mirror of
https://github.com/postgrespro/pg_probackup.git
synced 2025-01-05 13:20:31 +02:00
parent
8846e1997a
commit
384cf6dcfd
@ -928,7 +928,7 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo)
|
||||
nodeInfo->server_version_str, "9.6");
|
||||
|
||||
if (nodeInfo->pgpro_support)
|
||||
res = pgut_execute(conn, "SELECT pgpro_edition()", 0, NULL);
|
||||
res = pgut_execute(conn, "SELECT pg_catalog.pgpro_edition()", 0, NULL);
|
||||
|
||||
/*
|
||||
* Check major version of connected PostgreSQL and major version of
|
||||
@ -1120,7 +1120,7 @@ pgpro_support(PGconn *conn)
|
||||
PGresult *res;
|
||||
|
||||
res = pgut_execute(conn,
|
||||
"SELECT proname FROM pg_proc WHERE proname='pgpro_edition'",
|
||||
"SELECT proname FROM pg_catalog.pg_proc WHERE proname='pgpro_edition'::name AND pronamespace='pg_catalog'::regnamespace::oid",
|
||||
0, NULL);
|
||||
|
||||
if (PQresultStatus(res) == PGRES_TUPLES_OK &&
|
||||
@ -1159,7 +1159,7 @@ get_database_map(PGconn *conn)
|
||||
*/
|
||||
res = pgut_execute_extended(conn,
|
||||
"SELECT oid, datname FROM pg_catalog.pg_database "
|
||||
"WHERE datname NOT IN ('template1', 'template0')",
|
||||
"WHERE datname NOT IN ('template1'::name, 'template0'::name)",
|
||||
0, NULL, true, true);
|
||||
|
||||
/* Don't error out, simply return NULL. See comment above. */
|
||||
|
@ -357,10 +357,10 @@ get_index_list(const char *dbname, bool first_db_with_amcheck,
|
||||
|
||||
res = pgut_execute(db_conn, "SELECT "
|
||||
"extname, nspname, extversion "
|
||||
"FROM pg_namespace n "
|
||||
"JOIN pg_extension e "
|
||||
"FROM pg_catalog.pg_namespace n "
|
||||
"JOIN pg_catalog.pg_extension e "
|
||||
"ON n.oid=e.extnamespace "
|
||||
"WHERE e.extname IN ('amcheck', 'amcheck_next') "
|
||||
"WHERE e.extname IN ('amcheck'::name, 'amcheck_next'::name) "
|
||||
"ORDER BY extversion DESC "
|
||||
"LIMIT 1",
|
||||
0, NULL);
|
||||
@ -556,8 +556,8 @@ do_amcheck(ConnectionOptions conn_opt, PGconn *conn)
|
||||
|
||||
res_db = pgut_execute(conn,
|
||||
"SELECT datname, oid, dattablespace "
|
||||
"FROM pg_database "
|
||||
"WHERE datname NOT IN ('template0', 'template1')",
|
||||
"FROM pg_catalog.pg_database "
|
||||
"WHERE datname NOT IN ('template0'::name, 'template1'::name)",
|
||||
0, NULL);
|
||||
|
||||
/* we don't need this connection anymore */
|
||||
|
@ -169,7 +169,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT extnamespace::regnamespace, extversion "
|
||||
"FROM pg_catalog.pg_extension WHERE extname = 'ptrack'",
|
||||
"FROM pg_catalog.pg_extension WHERE extname = 'ptrack'::name",
|
||||
0, NULL);
|
||||
|
||||
if (PQntuples(res_db) > 0)
|
||||
@ -187,7 +187,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo)
|
||||
/* ptrack 1.x is supported, save version */
|
||||
PQclear(res_db);
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT proname FROM pg_proc WHERE proname='ptrack_version'",
|
||||
"SELECT proname FROM pg_catalog.pg_proc WHERE proname='ptrack_version'::name",
|
||||
0, NULL);
|
||||
|
||||
if (PQntuples(res_db) == 0)
|
||||
@ -285,7 +285,7 @@ pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num)
|
||||
|
||||
params[0] = palloc(64);
|
||||
params[1] = palloc(64);
|
||||
res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_database",
|
||||
res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_catalog.pg_database",
|
||||
0, NULL);
|
||||
|
||||
for(i = 0; i < PQntuples(res_db); i++)
|
||||
@ -335,7 +335,7 @@ pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn)
|
||||
|
||||
sprintf(params[0], "%i", dbOid);
|
||||
res_db = pgut_execute(backup_conn,
|
||||
"SELECT datname FROM pg_database WHERE oid=$1",
|
||||
"SELECT datname FROM pg_catalog.pg_database WHERE oid=$1",
|
||||
1, (const char **) params);
|
||||
/*
|
||||
* If database is not found, it's not an error.
|
||||
|
@ -169,7 +169,7 @@ get_current_timeline(PGconn *conn)
|
||||
char *val;
|
||||
|
||||
res = pgut_execute_extended(conn,
|
||||
"SELECT timeline_id FROM pg_control_checkpoint()", 0, NULL, true, true);
|
||||
"SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()", 0, NULL, true, true);
|
||||
|
||||
if (PQresultStatus(res) == PGRES_TUPLES_OK)
|
||||
val = PQgetvalue(res, 0, 0);
|
||||
|
@ -20,6 +20,12 @@
|
||||
#include "common/string.h"
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= 100000
|
||||
#include "common/connect.h"
|
||||
#else
|
||||
#include "fe_utils/connect.h"
|
||||
#endif
|
||||
|
||||
#include <time.h>
|
||||
|
||||
#include "pgut.h"
|
||||
@ -257,7 +263,7 @@ pgut_connect(const char *host, const char *port,
|
||||
pthread_lock(&atexit_callback_disconnect_mutex);
|
||||
pgut_atexit_push(pgut_disconnect_callback, conn);
|
||||
pthread_mutex_unlock(&atexit_callback_disconnect_mutex);
|
||||
return conn;
|
||||
break;
|
||||
}
|
||||
|
||||
if (conn && PQconnectionNeedsPassword(conn) && prompt_password)
|
||||
@ -279,6 +285,28 @@ pgut_connect(const char *host, const char *port,
|
||||
PQfinish(conn);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fix for CVE-2018-1058. This code was taken with small modification from
|
||||
* src/bin/pg_basebackup/streamutil.c:GetConnection()
|
||||
*/
|
||||
if (dbname != NULL)
|
||||
{
|
||||
PGresult *res;
|
||||
|
||||
res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL);
|
||||
if (PQresultStatus(res) != PGRES_TUPLES_OK)
|
||||
{
|
||||
elog(ERROR, "could not clear search_path: %s",
|
||||
PQerrorMessage(conn));
|
||||
PQclear(res);
|
||||
PQfinish(conn);
|
||||
return NULL;
|
||||
}
|
||||
PQclear(res);
|
||||
}
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
PGconn *
|
||||
|
143
tests/CVE_2018_1058.py
Normal file
143
tests/CVE_2018_1058.py
Normal file
@ -0,0 +1,143 @@
|
||||
import os
|
||||
import unittest
|
||||
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
|
||||
|
||||
module_name = 'CVE-2018-1058'
|
||||
|
||||
class CVE_2018_1058(ProbackupTest, unittest.TestCase):
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_basic_default_search_path(self):
|
||||
""""""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True)
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
"CREATE FUNCTION public.pgpro_edition() "
|
||||
"RETURNS text "
|
||||
"AS $$ "
|
||||
"BEGIN "
|
||||
" RAISE 'pg_probackup vulnerable!'; "
|
||||
"END "
|
||||
"$$ LANGUAGE plpgsql")
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream'])
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_basic_backup_modified_search_path(self):
|
||||
""""""
|
||||
fname = self.id().split('.')[3]
|
||||
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
set_replication=True)
|
||||
self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'})
|
||||
|
||||
self.init_pb(backup_dir)
|
||||
self.add_instance(backup_dir, 'node', node)
|
||||
node.slow_start()
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
"CREATE FUNCTION public.pg_control_checkpoint(OUT timeline_id integer, OUT dummy integer) "
|
||||
"RETURNS record "
|
||||
"AS $$ "
|
||||
"BEGIN "
|
||||
" RAISE '% vulnerable!', 'pg_probackup'; "
|
||||
"END "
|
||||
"$$ LANGUAGE plpgsql")
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
"CREATE FUNCTION public.pg_proc(OUT proname name, OUT dummy integer) "
|
||||
"RETURNS record "
|
||||
"AS $$ "
|
||||
"BEGIN "
|
||||
" RAISE '% vulnerable!', 'pg_probackup'; "
|
||||
"END "
|
||||
"$$ LANGUAGE plpgsql; "
|
||||
"CREATE VIEW public.pg_proc AS SELECT proname FROM public.pg_proc()")
|
||||
|
||||
self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream'])
|
||||
|
||||
log_file = os.path.join(node.logs_dir, 'postgresql.log')
|
||||
with open(log_file, 'r') as f:
|
||||
log_content = f.read()
|
||||
self.assertFalse(
|
||||
'pg_probackup vulnerable!' in log_content)
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
||||
|
||||
# @unittest.skip("skip")
|
||||
def test_basic_checkdb_modified_search_path(self):
|
||||
""""""
|
||||
fname = self.id().split('.')[3]
|
||||
node = self.make_simple_node(
|
||||
base_dir=os.path.join(module_name, fname, 'node'),
|
||||
initdb_params=['--data-checksums'])
|
||||
self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'})
|
||||
node.slow_start()
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
"CREATE FUNCTION public.pg_database(OUT datname name, OUT oid oid, OUT dattablespace oid) "
|
||||
"RETURNS record "
|
||||
"AS $$ "
|
||||
"BEGIN "
|
||||
" RAISE 'pg_probackup vulnerable!'; "
|
||||
"END "
|
||||
"$$ LANGUAGE plpgsql; "
|
||||
"CREATE VIEW public.pg_database AS SELECT * FROM public.pg_database()")
|
||||
|
||||
node.safe_psql(
|
||||
'postgres',
|
||||
"CREATE FUNCTION public.pg_extension(OUT extname name, OUT extnamespace oid, OUT extversion text) "
|
||||
"RETURNS record "
|
||||
"AS $$ "
|
||||
"BEGIN "
|
||||
" RAISE 'pg_probackup vulnerable!'; "
|
||||
"END "
|
||||
"$$ LANGUAGE plpgsql; "
|
||||
"CREATE FUNCTION public.pg_namespace(OUT oid oid, OUT nspname name) "
|
||||
"RETURNS record "
|
||||
"AS $$ "
|
||||
"BEGIN "
|
||||
" RAISE 'pg_probackup vulnerable!'; "
|
||||
"END "
|
||||
"$$ LANGUAGE plpgsql; "
|
||||
"CREATE VIEW public.pg_extension AS SELECT * FROM public.pg_extension();"
|
||||
"CREATE VIEW public.pg_namespace AS SELECT * FROM public.pg_namespace();"
|
||||
)
|
||||
|
||||
try:
|
||||
self.checkdb_node(
|
||||
options=[
|
||||
'--amcheck',
|
||||
'--skip-block-validation',
|
||||
'-d', 'postgres', '-p', str(node.port)])
|
||||
self.assertEqual(
|
||||
1, 0,
|
||||
"Expecting Error because amcheck{,_next} not installed\n"
|
||||
" Output: {0} \n CMD: {1}".format(
|
||||
repr(self.output), self.cmd))
|
||||
except ProbackupException as e:
|
||||
self.assertIn(
|
||||
"WARNING: Extension 'amcheck' or 'amcheck_next' are not installed in database postgres",
|
||||
e.message,
|
||||
"\n Unexpected Error Message: {0}\n CMD: {1}".format(
|
||||
repr(e.message), self.cmd))
|
||||
|
||||
# Clean after yourself
|
||||
self.del_test_dir(module_name, fname)
|
@ -6,7 +6,8 @@ from . import init, merge, option, show, compatibility, \
|
||||
retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \
|
||||
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
|
||||
cfs_validate_backup, auth_test, time_stamp, snapfs, logging, \
|
||||
locking, remote, external, config, checkdb, set_backup, incr_restore
|
||||
locking, remote, external, config, checkdb, set_backup, incr_restore, \
|
||||
CVE_2018_1058
|
||||
|
||||
|
||||
def load_tests(loader, tests, pattern):
|
||||
@ -55,6 +56,7 @@ def load_tests(loader, tests, pattern):
|
||||
suite.addTests(loader.loadTestsFromModule(snapfs))
|
||||
suite.addTests(loader.loadTestsFromModule(time_stamp))
|
||||
suite.addTests(loader.loadTestsFromModule(validate))
|
||||
suite.addTests(loader.loadTestsFromModule(CVE_2018_1058))
|
||||
|
||||
return suite
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user