From 4493d69d80b803d1c48ffd49f2e6d33a35e2f734 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 19 Feb 2025 11:10:21 +0300 Subject: [PATCH 01/90] OsOperations::cwd() is corrected (#182) * OsOperations::cwd() is corrected This patch fixes the following problems: - It does not work on Windows - It always returns LOCAL path --- testgres/operations/local_ops.py | 3 +++ testgres/operations/os_ops.py | 7 +------ testgres/operations/remote_ops.py | 4 ++++ tests/test_local.py | 17 +++++++++++++++++ tests/test_remote.py | 10 ++++++++++ 5 files changed, 35 insertions(+), 6 deletions(-) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 8bdb22cd..fc3e3954 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -152,6 +152,9 @@ def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, def environ(self, var_name): return os.environ.get(var_name) + def cwd(self): + return os.getcwd() + def find_executable(self, executable): return find_executable(executable) diff --git a/testgres/operations/os_ops.py b/testgres/operations/os_ops.py index 35525b3c..00880863 100644 --- a/testgres/operations/os_ops.py +++ b/testgres/operations/os_ops.py @@ -1,6 +1,5 @@ import getpass import locale -import sys try: import psycopg2 as pglib # noqa: F401 @@ -39,11 +38,7 @@ def environ(self, var_name): raise NotImplementedError() def cwd(self): - if sys.platform == 'linux': - cmd = 'pwd' - elif sys.platform == 'win32': - cmd = 'cd' - return self.exec_command(cmd).decode().rstrip() + raise NotImplementedError() def find_executable(self, executable): raise NotImplementedError() diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 2f34ecec..3ebc2e60 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -138,6 +138,10 @@ def environ(self, var_name: str) -> str: cmd = "echo ${}".format(var_name) return self.exec_command(cmd, encoding=get_default_encoding()).strip() + def cwd(self): + cmd = 'pwd' + return self.exec_command(cmd, encoding=get_default_encoding()).rstrip() + def find_executable(self, executable): search_paths = self.environ("PATH") if not search_paths: diff --git a/tests/test_local.py b/tests/test_local.py index d7adce17..568a4bc5 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -256,3 +256,20 @@ def test_isdir_false__file(self): response = self.operations.isdir(name) assert response is False + + def test_cwd(self): + """ + Test cwd. + """ + v = self.operations.cwd() + + assert v is not None + assert type(v) == str # noqa: E721 + + expectedValue = os.getcwd() + assert expectedValue is not None + assert type(expectedValue) == str # noqa: E721 + assert expectedValue != "" # research + + # Comp result + assert v == expectedValue diff --git a/tests/test_remote.py b/tests/test_remote.py index 7071a9d9..30c5d348 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -392,3 +392,13 @@ def test_isdir_false__file(self): response = self.operations.isdir(name) assert response is False + + def test_cwd(self): + """ + Test cwd. + """ + v = self.operations.cwd() + + assert v is not None + assert type(v) == str # noqa: E721 + assert v != "" From 22c476347d4e8bfae832e641e00428ed4b6d14a2 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 19 Feb 2025 15:26:56 +0300 Subject: [PATCH 02/90] OsOps::write method is corrected (#183) LocalOperations - [BUG FIX] (read_write=true and truncate=false) writes to begging of a file - Preparation of data is added (verification/encoding/decoding/eol) RemoteOperations - Preparation of data is corrected (verification/encoding/decoding/eol) - Temp file is always opened with "w+"/"w+b" modes. Tests are added. --- testgres/operations/local_ops.py | 53 ++++++++++++++++++------ testgres/operations/remote_ops.py | 35 ++++++++++------ tests/test_local.py | 69 +++++++++++++++++++++++++++++++ tests/test_remote.py | 69 +++++++++++++++++++++++++++++++ 4 files changed, 201 insertions(+), 25 deletions(-) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index fc3e3954..5c79bb7e 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -235,27 +235,54 @@ def write(self, filename, data, truncate=False, binary=False, read_and_write=Fal Args: filename: The file path where the data will be written. data: The data to be written to the file. - truncate: If True, the file will be truncated before writing ('w' or 'wb' option); - if False (default), data will be appended ('a' or 'ab' option). - binary: If True, the data will be written in binary mode ('wb' or 'ab' option); - if False (default), the data will be written in text mode ('w' or 'a' option). - read_and_write: If True, the file will be opened with read and write permissions ('r+' option); - if False (default), only write permission will be used ('w', 'a', 'wb', or 'ab' option) + truncate: If True, the file will be truncated before writing ('w' option); + if False (default), data will be appended ('a' option). + binary: If True, the data will be written in binary mode ('b' option); + if False (default), the data will be written in text mode. + read_and_write: If True, the file will be opened with read and write permissions ('+' option); + if False (default), only write permission will be used. """ - # If it is a bytes str or list if isinstance(data, bytes) or isinstance(data, list) and all(isinstance(item, bytes) for item in data): binary = True - mode = "wb" if binary else "w" - if not truncate: - mode = "ab" if binary else "a" + + mode = "w" if truncate else "a" + if read_and_write: - mode = "r+b" if binary else "r+" + mode += "+" + + # If it is a bytes str or list + if binary: + mode += "b" + + assert type(mode) == str # noqa: E721 + assert mode != "" with open(filename, mode) as file: if isinstance(data, list): - file.writelines(data) + data2 = [__class__._prepare_line_to_write(s, binary) for s in data] + file.writelines(data2) else: - file.write(data) + data2 = __class__._prepare_data_to_write(data, binary) + file.write(data2) + + def _prepare_line_to_write(data, binary): + data = __class__._prepare_data_to_write(data, binary) + + if binary: + assert type(data) == bytes # noqa: E721 + return data.rstrip(b'\n') + b'\n' + + assert type(data) == str # noqa: E721 + return data.rstrip('\n') + '\n' + + def _prepare_data_to_write(data, binary): + if isinstance(data, bytes): + return data if binary else data.decode() + + if isinstance(data, str): + return data if not binary else data.encode() + + raise InvalidOperationException("Unknown type of data type [{0}].".format(type(data).__name__)) def touch(self, filename): """ diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 3ebc2e60..f690e063 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -278,10 +278,6 @@ def write(self, filename, data, truncate=False, binary=False, read_and_write=Fal if not encoding: encoding = get_default_encoding() mode = "wb" if binary else "w" - if not truncate: - mode = "ab" if binary else "a" - if read_and_write: - mode = "r+b" if binary else "r+" with tempfile.NamedTemporaryFile(mode=mode, delete=False) as tmp_file: # For scp the port is specified by a "-P" option @@ -292,16 +288,12 @@ def write(self, filename, data, truncate=False, binary=False, read_and_write=Fal subprocess.run(scp_cmd, check=False) # The file might not exist yet tmp_file.seek(0, os.SEEK_END) - if isinstance(data, bytes) and not binary: - data = data.decode(encoding) - elif isinstance(data, str) and binary: - data = data.encode(encoding) - if isinstance(data, list): - data = [(s if isinstance(s, str) else s.decode(get_default_encoding())).rstrip('\n') + '\n' for s in data] - tmp_file.writelines(data) + data2 = [__class__._prepare_line_to_write(s, binary, encoding) for s in data] + tmp_file.writelines(data2) else: - tmp_file.write(data) + data2 = __class__._prepare_data_to_write(data, binary, encoding) + tmp_file.write(data2) tmp_file.flush() scp_cmd = ['scp'] + scp_args + [tmp_file.name, f"{self.ssh_dest}:{filename}"] @@ -313,6 +305,25 @@ def write(self, filename, data, truncate=False, binary=False, read_and_write=Fal os.remove(tmp_file.name) + def _prepare_line_to_write(data, binary, encoding): + data = __class__._prepare_data_to_write(data, binary, encoding) + + if binary: + assert type(data) == bytes # noqa: E721 + return data.rstrip(b'\n') + b'\n' + + assert type(data) == str # noqa: E721 + return data.rstrip('\n') + '\n' + + def _prepare_data_to_write(data, binary, encoding): + if isinstance(data, bytes): + return data if binary else data.decode(encoding) + + if isinstance(data, str): + return data if not binary else data.encode(encoding) + + raise InvalidOperationException("Unknown type of data type [{0}].".format(type(data).__name__)) + def touch(self, filename): """ Create a new file or update the access and modification times of an existing file on the remote server. diff --git a/tests/test_local.py b/tests/test_local.py index 568a4bc5..4051bfb5 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -2,6 +2,7 @@ import pytest import re +import tempfile from testgres import ExecUtilException from testgres import InvalidOperationException @@ -273,3 +274,71 @@ def test_cwd(self): # Comp result assert v == expectedValue + + class tagWriteData001: + def __init__(self, sign, source, cp_rw, cp_truncate, cp_binary, cp_data, result): + self.sign = sign + self.source = source + self.call_param__rw = cp_rw + self.call_param__truncate = cp_truncate + self.call_param__binary = cp_binary + self.call_param__data = cp_data + self.result = result + + sm_write_data001 = [ + tagWriteData001("A001", "1234567890", False, False, False, "ABC", "1234567890ABC"), + tagWriteData001("A002", b"1234567890", False, False, True, b"ABC", b"1234567890ABC"), + + tagWriteData001("B001", "1234567890", False, True, False, "ABC", "ABC"), + tagWriteData001("B002", "1234567890", False, True, False, "ABC1234567890", "ABC1234567890"), + tagWriteData001("B003", b"1234567890", False, True, True, b"ABC", b"ABC"), + tagWriteData001("B004", b"1234567890", False, True, True, b"ABC1234567890", b"ABC1234567890"), + + tagWriteData001("C001", "1234567890", True, False, False, "ABC", "1234567890ABC"), + tagWriteData001("C002", b"1234567890", True, False, True, b"ABC", b"1234567890ABC"), + + tagWriteData001("D001", "1234567890", True, True, False, "ABC", "ABC"), + tagWriteData001("D002", "1234567890", True, True, False, "ABC1234567890", "ABC1234567890"), + tagWriteData001("D003", b"1234567890", True, True, True, b"ABC", b"ABC"), + tagWriteData001("D004", b"1234567890", True, True, True, b"ABC1234567890", b"ABC1234567890"), + + tagWriteData001("E001", "\0001234567890\000", False, False, False, "\000ABC\000", "\0001234567890\000\000ABC\000"), + tagWriteData001("E002", b"\0001234567890\000", False, False, True, b"\000ABC\000", b"\0001234567890\000\000ABC\000"), + + tagWriteData001("F001", "a\nb\n", False, False, False, ["c", "d"], "a\nb\nc\nd\n"), + tagWriteData001("F002", b"a\nb\n", False, False, True, [b"c", b"d"], b"a\nb\nc\nd\n"), + + tagWriteData001("G001", "a\nb\n", False, False, False, ["c\n\n", "d\n"], "a\nb\nc\nd\n"), + tagWriteData001("G002", b"a\nb\n", False, False, True, [b"c\n\n", b"d\n"], b"a\nb\nc\nd\n"), + ] + + @pytest.fixture( + params=sm_write_data001, + ids=[x.sign for x in sm_write_data001], + ) + def write_data001(self, request): + assert isinstance(request, pytest.FixtureRequest) + assert type(request.param) == __class__.tagWriteData001 # noqa: E721 + return request.param + + def test_write(self, write_data001): + assert type(write_data001) == __class__.tagWriteData001 # noqa: E721 + + mode = "w+b" if write_data001.call_param__binary else "w+" + + with tempfile.NamedTemporaryFile(mode=mode, delete=True) as tmp_file: + tmp_file.write(write_data001.source) + tmp_file.flush() + + self.operations.write( + tmp_file.name, + write_data001.call_param__data, + read_and_write=write_data001.call_param__rw, + truncate=write_data001.call_param__truncate, + binary=write_data001.call_param__binary) + + tmp_file.seek(0) + + s = tmp_file.read() + + assert s == write_data001.result diff --git a/tests/test_remote.py b/tests/test_remote.py index 30c5d348..3e6b79dd 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -2,6 +2,7 @@ import pytest import re +import tempfile from testgres import ExecUtilException from testgres import InvalidOperationException @@ -402,3 +403,71 @@ def test_cwd(self): assert v is not None assert type(v) == str # noqa: E721 assert v != "" + + class tagWriteData001: + def __init__(self, sign, source, cp_rw, cp_truncate, cp_binary, cp_data, result): + self.sign = sign + self.source = source + self.call_param__rw = cp_rw + self.call_param__truncate = cp_truncate + self.call_param__binary = cp_binary + self.call_param__data = cp_data + self.result = result + + sm_write_data001 = [ + tagWriteData001("A001", "1234567890", False, False, False, "ABC", "1234567890ABC"), + tagWriteData001("A002", b"1234567890", False, False, True, b"ABC", b"1234567890ABC"), + + tagWriteData001("B001", "1234567890", False, True, False, "ABC", "ABC"), + tagWriteData001("B002", "1234567890", False, True, False, "ABC1234567890", "ABC1234567890"), + tagWriteData001("B003", b"1234567890", False, True, True, b"ABC", b"ABC"), + tagWriteData001("B004", b"1234567890", False, True, True, b"ABC1234567890", b"ABC1234567890"), + + tagWriteData001("C001", "1234567890", True, False, False, "ABC", "1234567890ABC"), + tagWriteData001("C002", b"1234567890", True, False, True, b"ABC", b"1234567890ABC"), + + tagWriteData001("D001", "1234567890", True, True, False, "ABC", "ABC"), + tagWriteData001("D002", "1234567890", True, True, False, "ABC1234567890", "ABC1234567890"), + tagWriteData001("D003", b"1234567890", True, True, True, b"ABC", b"ABC"), + tagWriteData001("D004", b"1234567890", True, True, True, b"ABC1234567890", b"ABC1234567890"), + + tagWriteData001("E001", "\0001234567890\000", False, False, False, "\000ABC\000", "\0001234567890\000\000ABC\000"), + tagWriteData001("E002", b"\0001234567890\000", False, False, True, b"\000ABC\000", b"\0001234567890\000\000ABC\000"), + + tagWriteData001("F001", "a\nb\n", False, False, False, ["c", "d"], "a\nb\nc\nd\n"), + tagWriteData001("F002", b"a\nb\n", False, False, True, [b"c", b"d"], b"a\nb\nc\nd\n"), + + tagWriteData001("G001", "a\nb\n", False, False, False, ["c\n\n", "d\n"], "a\nb\nc\nd\n"), + tagWriteData001("G002", b"a\nb\n", False, False, True, [b"c\n\n", b"d\n"], b"a\nb\nc\nd\n"), + ] + + @pytest.fixture( + params=sm_write_data001, + ids=[x.sign for x in sm_write_data001], + ) + def write_data001(self, request): + assert isinstance(request, pytest.FixtureRequest) + assert type(request.param) == __class__.tagWriteData001 # noqa: E721 + return request.param + + def test_write(self, write_data001): + assert type(write_data001) == __class__.tagWriteData001 # noqa: E721 + + mode = "w+b" if write_data001.call_param__binary else "w+" + + with tempfile.NamedTemporaryFile(mode=mode, delete=True) as tmp_file: + tmp_file.write(write_data001.source) + tmp_file.flush() + + self.operations.write( + tmp_file.name, + write_data001.call_param__data, + read_and_write=write_data001.call_param__rw, + truncate=write_data001.call_param__truncate, + binary=write_data001.call_param__binary) + + tmp_file.seek(0) + + s = tmp_file.read() + + assert s == write_data001.result From 7fd2f07af75391503fb1c79a1bc9ec1f763923af Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Thu, 20 Feb 2025 14:56:02 +0300 Subject: [PATCH 03/90] RemoteOperations::exec_command updated (#185) - Exact enumeration of supported 'cmd' types - Refactoring --- testgres/operations/remote_ops.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index f690e063..a24fce50 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -78,14 +78,17 @@ def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, assert input_prepared is None or (type(input_prepared) == bytes) # noqa: E721 - ssh_cmd = [] - if isinstance(cmd, str): - ssh_cmd = ['ssh', self.ssh_dest] + self.ssh_args + [cmd] - elif isinstance(cmd, list): - ssh_cmd = ['ssh', self.ssh_dest] + self.ssh_args + [subprocess.list2cmdline(cmd)] + if type(cmd) == str: # noqa: E721 + cmd_s = cmd + elif type(cmd) == list: # noqa: E721 + cmd_s = subprocess.list2cmdline(cmd) else: raise ValueError("Invalid 'cmd' argument type - {0}".format(type(cmd).__name__)) + assert type(cmd_s) == str # noqa: E721 + + ssh_cmd = ['ssh', self.ssh_dest] + self.ssh_args + [cmd_s] + process = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert not (process is None) if get_process: From e44aa9813daf04e8170f57c277923100aa04eadc Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 21 Feb 2025 20:04:08 +0300 Subject: [PATCH 04/90] RemoteOperations::exec_command explicitly transfers LANG, LANGUAGE and LC_* envvars to the server side (#187) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * RemoteOperations::exec_command updated - Exact enumeration of supported 'cmd' types - Refactoring * RemoteOperations::exec_command explicitly transfers LANG, LANGUAGE and LC_* envvars to the server side It should help resolve a problem with replacing a LANG variable by ssh-server. History. On our internal tests we got a problem on the Debian 11 and PostgresPro STD-13. One test returned the error from initdb: initdb: error: collations with different collate and ctype values ("en_US.UTF-8" and "C.UTF-8" accordingly) are not supported by ICU - TestRunner set variable LANG="C" - Python set variable LC_CTYPE="C.UTF-8" - Test call inidb through command "ssh test@localhost inidb -D ...." - SSH-server replaces LANG with value "en_US.UTF-8" (from etc/default/locale) - initdb calculate collate through this value of LANG variable and get en_US.UTF-8 So we have that: - ctype is C.UTF-8 - collate is en_US.UTF-8 ICU on the Debuan-11 (uconv v2.1 ICU 67.1) does not suppot this combination and inidb rturns the error. This patch generates a new command line for ssh: ssh test@localhost "LANG=\"...\";LC_xxx=\"...\";" It resolves this problem with initdb and should help resolve other problems with execution of command through SSH. Amen. * New tests in TestgresRemoteTests are added New tests: - test_init__LANG_С - test_init__unk_LANG_and_LC_CTYPE * TestgresRemoteTests.test_init__unk_LANG_and_LC_CTYPE is updated Let's test bad data with '\' and '"' symbols. * Static methods are marked with @staticmethod [thanks to Victoria Shepard] The following methods of RemoteOperations were corrected: - _make_exec_env_list - _does_put_envvar_into_exec_cmd - _quote_envvar * TestRemoteOperations::_quote_envvar is updated (typification) --- testgres/operations/remote_ops.py | 46 +++++++++++++++++- tests/test_simple_remote.py | 79 +++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+), 1 deletion(-) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index a24fce50..af4c59f9 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -87,7 +87,12 @@ def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, assert type(cmd_s) == str # noqa: E721 - ssh_cmd = ['ssh', self.ssh_dest] + self.ssh_args + [cmd_s] + cmd_items = __class__._make_exec_env_list() + cmd_items.append(cmd_s) + + env_cmd_s = ';'.join(cmd_items) + + ssh_cmd = ['ssh', self.ssh_dest] + self.ssh_args + [env_cmd_s] process = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert not (process is None) @@ -510,6 +515,45 @@ def db_connect(self, dbname, user, password=None, host="localhost", port=5432): ) return conn + @staticmethod + def _make_exec_env_list() -> list[str]: + result = list[str]() + for envvar in os.environ.items(): + if not __class__._does_put_envvar_into_exec_cmd(envvar[0]): + continue + qvalue = __class__._quote_envvar(envvar[1]) + assert type(qvalue) == str # noqa: E721 + result.append(envvar[0] + "=" + qvalue) + continue + + return result + + sm_envs_for_exec_cmd = ["LANG", "LANGUAGE"] + + @staticmethod + def _does_put_envvar_into_exec_cmd(name: str) -> bool: + assert type(name) == str # noqa: E721 + name = name.upper() + if name.startswith("LC_"): + return True + if name in __class__.sm_envs_for_exec_cmd: + return True + return False + + @staticmethod + def _quote_envvar(value: str) -> str: + assert type(value) == str # noqa: E721 + result = "\"" + for ch in value: + if ch == "\"": + result += "\\\"" + elif ch == "\\": + result += "\\\\" + else: + result += ch + result += "\"" + return result + def normalize_error(error): if isinstance(error, bytes): diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index c8dd2964..2b581ac9 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -119,6 +119,79 @@ def test_custom_init(self): # there should be no trust entries at all self.assertFalse(any('trust' in s for s in lines)) + def test_init__LANG_С(self): + # PBCKP-1744 + prev_LANG = os.environ.get("LANG") + + try: + os.environ["LANG"] = "C" + + with get_remote_node(conn_params=conn_params) as node: + node.init().start() + finally: + __class__.helper__restore_envvar("LANG", prev_LANG) + + def test_init__unk_LANG_and_LC_CTYPE(self): + # PBCKP-1744 + prev_LANG = os.environ.get("LANG") + prev_LANGUAGE = os.environ.get("LANGUAGE") + prev_LC_CTYPE = os.environ.get("LC_CTYPE") + prev_LC_COLLATE = os.environ.get("LC_COLLATE") + + try: + # TODO: Pass unkData through test parameter. + unkDatas = [ + ("UNKNOWN_LANG", "UNKNOWN_CTYPE"), + ("\"UNKNOWN_LANG\"", "\"UNKNOWN_CTYPE\""), + ("\\UNKNOWN_LANG\\", "\\UNKNOWN_CTYPE\\"), + ("\"UNKNOWN_LANG", "UNKNOWN_CTYPE\""), + ("\\UNKNOWN_LANG", "UNKNOWN_CTYPE\\"), + ("\\", "\\"), + ("\"", "\""), + ] + + for unkData in unkDatas: + logging.info("----------------------") + logging.info("Unk LANG is [{0}]".format(unkData[0])) + logging.info("Unk LC_CTYPE is [{0}]".format(unkData[1])) + + os.environ["LANG"] = unkData[0] + os.environ.pop("LANGUAGE", None) + os.environ["LC_CTYPE"] = unkData[1] + os.environ.pop("LC_COLLATE", None) + + assert os.environ.get("LANG") == unkData[0] + assert not ("LANGUAGE" in os.environ.keys()) + assert os.environ.get("LC_CTYPE") == unkData[1] + assert not ("LC_COLLATE" in os.environ.keys()) + + while True: + try: + with get_remote_node(conn_params=conn_params): + pass + except testgres.exceptions.ExecUtilException as e: + # + # Example of an error message: + # + # warning: setlocale: LC_CTYPE: cannot change locale (UNKNOWN_CTYPE): No such file or directory + # postgres (PostgreSQL) 14.12 + # + errMsg = str(e) + + logging.info("Error message is: {0}".format(errMsg)) + + assert "LC_CTYPE" in errMsg + assert unkData[1] in errMsg + assert "warning: setlocale: LC_CTYPE: cannot change locale (" + unkData[1] + "): No such file or directory" in errMsg + assert "postgres" in errMsg + break + raise Exception("We expected an error!") + finally: + __class__.helper__restore_envvar("LANG", prev_LANG) + __class__.helper__restore_envvar("LANGUAGE", prev_LANGUAGE) + __class__.helper__restore_envvar("LC_CTYPE", prev_LC_CTYPE) + __class__.helper__restore_envvar("LC_COLLATE", prev_LC_COLLATE) + def test_double_init(self): with get_remote_node(conn_params=conn_params).init() as node: # can't initialize node more than once @@ -994,6 +1067,12 @@ def test_child_process_dies(self): # try to handle children list -- missing processes will have ptype "ProcessType.Unknown" [ProcessProxy(p) for p in children] + def helper__restore_envvar(name, prev_value): + if prev_value is None: + os.environ.pop(name, None) + else: + os.environ[name] = prev_value + if __name__ == '__main__': if os_ops.environ('ALT_CONFIG'): From 44b99f08084d706a3cc9a67fa255579def9cb201 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 21 Feb 2025 22:11:00 +0300 Subject: [PATCH 05/90] TestRemoteOperations::test_makedirs_and_rmdirs_success is updated (#188) The new checks are added. --- tests/test_remote.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_remote.py b/tests/test_remote.py index 3e6b79dd..4330b92f 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -83,10 +83,12 @@ def test_makedirs_and_rmdirs_success(self): # Test makedirs self.operations.makedirs(path) + assert os.path.exists(path) assert self.operations.path_exists(path) # Test rmdirs self.operations.rmdirs(path) + assert not os.path.exists(path) assert not self.operations.path_exists(path) def test_makedirs_and_rmdirs_failure(self): From 40eaf7de413eb4b4eb94ed32111837f8aaa7507b Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sun, 23 Feb 2025 19:34:57 +0300 Subject: [PATCH 06/90] Usage of @staticmethod (#189) * Usage of @staticmethod All the known static methods are marked with @staticmethod decorators. * Fix for "ERROR: Unexpected indentation. [docutils]" This commit should fix an unstable error in CI: /pg/testgres/testgres/api.py:docstring of testgres.api.get_remote_node:5: ERROR: Unexpected indentation. [docutils] --- testgres/api.py | 4 +--- testgres/node.py | 3 +++ testgres/operations/helpers.py | 3 +++ testgres/operations/local_ops.py | 2 ++ testgres/operations/raise_error.py | 4 ++++ testgres/operations/remote_ops.py | 2 ++ tests/test_simple.py | 2 ++ 7 files changed, 17 insertions(+), 3 deletions(-) diff --git a/testgres/api.py b/testgres/api.py index e4b1cdd5..6a96ee84 100644 --- a/testgres/api.py +++ b/testgres/api.py @@ -47,8 +47,6 @@ def get_remote_node(name=None, conn_params=None): Simply a wrapper around :class:`.PostgresNode` constructor for remote node. See :meth:`.PostgresNode.__init__` for details. For remote connection you can add the next parameter: - conn_params = ConnectionParams(host='127.0.0.1', - ssh_key=None, - username=default_username()) + conn_params = ConnectionParams(host='127.0.0.1', ssh_key=None, username=default_username()) """ return get_new_node(name=name, conn_params=conn_params) diff --git a/testgres/node.py b/testgres/node.py index b85a62f2..8a712753 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -1939,6 +1939,7 @@ def make_simple( return node + @staticmethod def _gettempdir_for_socket(): platform_system_name = platform.system().lower() @@ -1966,6 +1967,7 @@ def _gettempdir_for_socket(): return "/tmp" + @staticmethod def _gettempdir(): v = tempfile.gettempdir() @@ -1984,6 +1986,7 @@ def _gettempdir(): # OK return v + @staticmethod def _raise_bugcheck(msg): assert type(msg) == str # noqa: E721 assert msg != "" diff --git a/testgres/operations/helpers.py b/testgres/operations/helpers.py index b50f0baa..03e97edc 100644 --- a/testgres/operations/helpers.py +++ b/testgres/operations/helpers.py @@ -2,6 +2,7 @@ class Helpers: + @staticmethod def _make_get_default_encoding_func(): # locale.getencoding is added in Python 3.11 if hasattr(locale, 'getencoding'): @@ -13,6 +14,7 @@ def _make_get_default_encoding_func(): # Prepared pointer on function to get a name of system codepage _get_default_encoding_func = _make_get_default_encoding_func() + @staticmethod def GetDefaultEncoding(): # # Original idea/source was: @@ -36,6 +38,7 @@ def GetDefaultEncoding(): # Is it an unexpected situation? return 'UTF-8' + @staticmethod def PrepareProcessInput(input, encoding): if not input: return None diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 5c79bb7e..91070fe7 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -265,6 +265,7 @@ def write(self, filename, data, truncate=False, binary=False, read_and_write=Fal data2 = __class__._prepare_data_to_write(data, binary) file.write(data2) + @staticmethod def _prepare_line_to_write(data, binary): data = __class__._prepare_data_to_write(data, binary) @@ -275,6 +276,7 @@ def _prepare_line_to_write(data, binary): assert type(data) == str # noqa: E721 return data.rstrip('\n') + '\n' + @staticmethod def _prepare_data_to_write(data, binary): if isinstance(data, bytes): return data if binary else data.decode() diff --git a/testgres/operations/raise_error.py b/testgres/operations/raise_error.py index 0e760e74..6031b238 100644 --- a/testgres/operations/raise_error.py +++ b/testgres/operations/raise_error.py @@ -3,6 +3,7 @@ class RaiseError: + @staticmethod def UtilityExitedWithNonZeroCode(cmd, exit_code, msg_arg, error, out): assert type(exit_code) == int # noqa: E721 @@ -20,12 +21,14 @@ def UtilityExitedWithNonZeroCode(cmd, exit_code, msg_arg, error, out): out=out, error=error) + @staticmethod def _TranslateDataIntoString(data): if type(data) == bytes: # noqa: E721 return __class__._TranslateDataIntoString__FromBinary(data) return str(data) + @staticmethod def _TranslateDataIntoString__FromBinary(data): assert type(data) == bytes # noqa: E721 @@ -36,6 +39,7 @@ def _TranslateDataIntoString__FromBinary(data): return "#cannot_decode_text" + @staticmethod def _BinaryIsASCII(data): assert type(data) == bytes # noqa: E721 diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index af4c59f9..51f5b2e8 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -313,6 +313,7 @@ def write(self, filename, data, truncate=False, binary=False, read_and_write=Fal os.remove(tmp_file.name) + @staticmethod def _prepare_line_to_write(data, binary, encoding): data = __class__._prepare_data_to_write(data, binary, encoding) @@ -323,6 +324,7 @@ def _prepare_line_to_write(data, binary, encoding): assert type(data) == str # noqa: E721 return data.rstrip('\n') + '\n' + @staticmethod def _prepare_data_to_write(data, binary, encoding): if isinstance(data, bytes): return data if binary else data.decode(encoding) diff --git a/tests/test_simple.py b/tests/test_simple.py index 4e6fb573..a751f0a3 100644 --- a/tests/test_simple.py +++ b/tests/test_simple.py @@ -1125,6 +1125,7 @@ def __exit__(self, type, value, traceback): __class__.sm_prev_testgres_reserve_port = None __class__.sm_prev_testgres_release_port = None + @staticmethod def _proxy__reserve_port(): assert type(__class__.sm_DummyPortMaxUsage) == int # noqa: E721 assert type(__class__.sm_DummyPortTotalUsage) == int # noqa: E721 @@ -1144,6 +1145,7 @@ def _proxy__reserve_port(): __class__.sm_DummyPortCurrentUsage += 1 return __class__.sm_DummyPortNumber + @staticmethod def _proxy__release_port(dummyPortNumber): assert type(dummyPortNumber) == int # noqa: E721 From 8824946283a305c0ee2b997aeb513efda8a073e7 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Sun, 23 Feb 2025 23:18:57 +0300 Subject: [PATCH 07/90] TestgresRemoteTests::helper__restore_envvar is marked with @staticmethod --- tests/test_simple_remote.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index 2b581ac9..9d12e618 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -1067,6 +1067,7 @@ def test_child_process_dies(self): # try to handle children list -- missing processes will have ptype "ProcessType.Unknown" [ProcessProxy(p) for p in children] + @staticmethod def helper__restore_envvar(name, prev_value): if prev_value is None: os.environ.pop(name, None) From fe03c24d35b8713f01df4d015859d1d099dfcd58 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 24 Feb 2025 13:49:43 +0300 Subject: [PATCH 08/90] TestgresRemoteTests.test_init__unk_LANG_and_LC_CTYPE is corrected Vanilla PG18 returns "...PostgreSQL 18devel" --- tests/test_simple_remote.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index 9d12e618..8b44623a 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -183,7 +183,7 @@ def test_init__unk_LANG_and_LC_CTYPE(self): assert "LC_CTYPE" in errMsg assert unkData[1] in errMsg assert "warning: setlocale: LC_CTYPE: cannot change locale (" + unkData[1] + "): No such file or directory" in errMsg - assert "postgres" in errMsg + assert ("postgres" in errMsg) or ("PostgreSQL" in errMsg) break raise Exception("We expected an error!") finally: From 50fc4c5fecc9aa722dbc660762a9d120f12e92fa Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 24 Feb 2025 14:12:50 +0300 Subject: [PATCH 09/90] [BUG FIX] PostgresNode::start is corrected Incorrect code to build a warning message. --- testgres/node.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testgres/node.py b/testgres/node.py index 8a712753..512650c1 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -841,8 +841,7 @@ def LOCAL__raise_cannot_start_node__std(from_exception): log_files0 = log_files1 logging.warning( - "Detected a conflict with using the port {0}. " - "Trying another port after a {1}-second sleep...".format(self.port, timeout) + "Detected a conflict with using the port {0}. Trying another port after a {1}-second sleep...".format(self.port, timeout) ) time.sleep(timeout) timeout = min(2 * timeout, 5) From 2b34236e9f8677cd20b75b37840841dbe0968b09 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 24 Feb 2025 15:03:23 +0300 Subject: [PATCH 10/90] execute_utility2, get_bin_path2, get_pg_config2 are added This the functions with explicit os_ops argument. testgres/utils.py - [add] def execute_utility2(os_ops: OsOperations, args, logfile=None, verbose=False) - [add] def get_bin_path2(os_ops: OsOperations, filename) - [add] def get_pg_config2(os_ops: OsOperations, pg_config_path): ATTENTION get_pg_config does not change tconf.os_ops now testgres/cache.py - cached_initdb - [add] make_utility_path - it is used for pg_resetwal, too. --- testgres/backup.py | 13 +++++++++---- testgres/cache.py | 24 ++++++++++++++++++------ testgres/node.py | 30 +++++++++++++++--------------- testgres/utils.py | 46 +++++++++++++++++++++++++++++++++++----------- 4 files changed, 77 insertions(+), 36 deletions(-) diff --git a/testgres/backup.py b/testgres/backup.py index cecb0f7b..619c0270 100644 --- a/testgres/backup.py +++ b/testgres/backup.py @@ -15,9 +15,11 @@ from .exceptions import BackupException +from .operations.os_ops import OsOperations + from .utils import \ - get_bin_path, \ - execute_utility, \ + get_bin_path2, \ + execute_utility2, \ clean_on_error @@ -44,6 +46,9 @@ def __init__(self, username: database user name. xlog_method: none | fetch | stream (see docs) """ + assert node.os_ops is not None + assert isinstance(node.os_ops, OsOperations) + if not options: options = [] self.os_ops = node.os_ops @@ -73,7 +78,7 @@ def __init__(self, data_dir = os.path.join(self.base_dir, DATA_DIR) _params = [ - get_bin_path("pg_basebackup"), + get_bin_path2(self.os_ops, "pg_basebackup"), "-p", str(node.port), "-h", node.host, "-U", username, @@ -81,7 +86,7 @@ def __init__(self, "-X", xlog_method.value ] # yapf: disable _params += options - execute_utility(_params, self.log_file) + execute_utility2(self.os_ops, _params, self.log_file) def __enter__(self): return self diff --git a/testgres/cache.py b/testgres/cache.py index f17b54b5..61d44868 100644 --- a/testgres/cache.py +++ b/testgres/cache.py @@ -15,8 +15,8 @@ ExecUtilException from .utils import \ - get_bin_path, \ - execute_utility + get_bin_path2, \ + execute_utility2 from .operations.local_ops import LocalOperations from .operations.os_ops import OsOperations @@ -27,11 +27,23 @@ def cached_initdb(data_dir, logfile=None, params=None, os_ops: OsOperations = Lo Perform initdb or use cached node files. """ + assert os_ops is not None + assert isinstance(os_ops, OsOperations) + + def make_utility_path(name): + assert name is not None + assert type(name) == str + + if bin_path: + return os.path.join(bin_path, name) + + return get_bin_path2(os_ops, name) + def call_initdb(initdb_dir, log=logfile): try: - initdb_path = os.path.join(bin_path, 'initdb') if bin_path else get_bin_path("initdb") + initdb_path = make_utility_path("initdb") _params = [initdb_path, "-D", initdb_dir, "-N"] - execute_utility(_params + (params or []), log) + execute_utility2(os_ops, _params + (params or []), log) except ExecUtilException as e: raise_from(InitNodeException("Failed to run initdb"), e) @@ -63,8 +75,8 @@ def call_initdb(initdb_dir, log=logfile): os_ops.write(pg_control, new_pg_control, truncate=True, binary=True, read_and_write=True) # XXX: build new WAL segment with our system id - _params = [get_bin_path("pg_resetwal"), "-D", data_dir, "-f"] - execute_utility(_params, logfile) + _params = [make_utility_path("pg_resetwal"), "-D", data_dir, "-f"] + execute_utility2(os_ops, _params, logfile) except ExecUtilException as e: msg = "Failed to reset WAL for system id" diff --git a/testgres/node.py b/testgres/node.py index 512650c1..56899b90 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -89,9 +89,9 @@ from .utils import \ PgVer, \ eprint, \ - get_bin_path, \ + get_bin_path2, \ get_pg_version, \ - execute_utility, \ + execute_utility2, \ options_string, \ clean_on_error @@ -301,7 +301,7 @@ def base_dir(self): @property def bin_dir(self): if not self._bin_dir: - self._bin_dir = os.path.dirname(get_bin_path("pg_config")) + self._bin_dir = os.path.dirname(get_bin_path2(self.os_ops, "pg_config")) return self._bin_dir @property @@ -684,7 +684,7 @@ def status(self): "-D", self.data_dir, "status" ] # yapf: disable - status_code, out, error = execute_utility(_params, self.utils_log_file, verbose=True) + status_code, out, error = execute_utility2(self.os_ops, _params, self.utils_log_file, verbose=True) if error and 'does not exist' in error: return NodeStatus.Uninitialized elif 'no server running' in out: @@ -710,7 +710,7 @@ def get_control_data(self): _params += ["-D"] if self._pg_version >= PgVer('9.5') else [] _params += [self.data_dir] - data = execute_utility(_params, self.utils_log_file) + data = execute_utility2(self.os_ops, _params, self.utils_log_file) out_dict = {} @@ -793,7 +793,7 @@ def start(self, params=[], wait=True): def LOCAL__start_node(): # 'error' will be None on Windows - _, _, error = execute_utility(_params, self.utils_log_file, verbose=True) + _, _, error = execute_utility2(self.os_ops, _params, self.utils_log_file, verbose=True) assert error is None or type(error) == str # noqa: E721 if error and 'does not exist' in error: raise Exception(error) @@ -882,7 +882,7 @@ def stop(self, params=[], wait=True): "stop" ] + params # yapf: disable - execute_utility(_params, self.utils_log_file) + execute_utility2(self.os_ops, _params, self.utils_log_file) self._maybe_stop_logger() self.is_started = False @@ -924,7 +924,7 @@ def restart(self, params=[]): ] + params # yapf: disable try: - error_code, out, error = execute_utility(_params, self.utils_log_file, verbose=True) + error_code, out, error = execute_utility2(self.os_ops, _params, self.utils_log_file, verbose=True) if error and 'could not start server' in error: raise ExecUtilException except ExecUtilException as e: @@ -953,7 +953,7 @@ def reload(self, params=[]): "reload" ] + params # yapf: disable - execute_utility(_params, self.utils_log_file) + execute_utility2(self.os_ops, _params, self.utils_log_file) return self @@ -975,7 +975,7 @@ def promote(self, dbname=None, username=None): "promote" ] # yapf: disable - execute_utility(_params, self.utils_log_file) + execute_utility2(self.os_ops, _params, self.utils_log_file) # for versions below 10 `promote` is asynchronous so we need to wait # until it actually becomes writable @@ -1010,7 +1010,7 @@ def pg_ctl(self, params): "-w" # wait ] + params # yapf: disable - return execute_utility(_params, self.utils_log_file) + return execute_utility2(self.os_ops, _params, self.utils_log_file) def free_port(self): """ @@ -1230,7 +1230,7 @@ def tmpfile(): "-F", format.value ] # yapf: disable - execute_utility(_params, self.utils_log_file) + execute_utility2(self.os_ops, _params, self.utils_log_file) return filename @@ -1259,7 +1259,7 @@ def restore(self, filename, dbname=None, username=None): # try pg_restore if dump is binary format, and psql if not try: - execute_utility(_params, self.utils_log_name) + execute_utility2(self.os_ops, _params, self.utils_log_name) except ExecUtilException: self.psql(filename=filename, dbname=dbname, username=username) @@ -1612,7 +1612,7 @@ def pgbench_run(self, dbname=None, username=None, options=[], **kwargs): # should be the last one _params.append(dbname) - return execute_utility(_params, self.utils_log_file) + return execute_utility2(self.os_ops, _params, self.utils_log_file) def connect(self, dbname=None, @@ -1809,7 +1809,7 @@ def _get_bin_path(self, filename): if self.bin_dir: bin_path = os.path.join(self.bin_dir, filename) else: - bin_path = get_bin_path(filename) + bin_path = get_bin_path2(self.os_ops, filename) return bin_path def _escape_config_value(value): diff --git a/testgres/utils.py b/testgres/utils.py index 4bd232b1..9645fc3b 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -16,6 +16,8 @@ from .helpers.port_manager import PortManager from .exceptions import ExecUtilException from .config import testgres_config as tconf +from .operations.os_ops import OsOperations +from .operations.remote_ops import RemoteOperations # rows returned by PG_CONFIG _pg_config_data = {} @@ -68,7 +70,14 @@ def execute_utility(args, logfile=None, verbose=False): Returns: stdout of executed utility. """ - exit_status, out, error = tconf.os_ops.exec_command(args, verbose=True) + return execute_utility2(tconf.os_ops, args, logfile, verbose) + + +def execute_utility2(os_ops: OsOperations, args, logfile=None, verbose=False): + assert os_ops is not None + assert isinstance(os_ops, OsOperations) + + exit_status, out, error = os_ops.exec_command(args, verbose=True) # decode result out = '' if not out else out if isinstance(out, bytes): @@ -79,11 +88,11 @@ def execute_utility(args, logfile=None, verbose=False): # write new log entry if possible if logfile: try: - tconf.os_ops.write(filename=logfile, data=args, truncate=True) + os_ops.write(filename=logfile, data=args, truncate=True) if out: # comment-out lines lines = [u'\n'] + ['# ' + line for line in out.splitlines()] + [u'\n'] - tconf.os_ops.write(filename=logfile, data=lines) + os_ops.write(filename=logfile, data=lines) except IOError: raise ExecUtilException( "Problem with writing to logfile `{}` during run command `{}`".format(logfile, args)) @@ -98,25 +107,32 @@ def get_bin_path(filename): Return absolute path to an executable using PG_BIN or PG_CONFIG. This function does nothing if 'filename' is already absolute. """ + return get_bin_path2(tconf.os_ops, filename) + + +def get_bin_path2(os_ops: OsOperations, filename): + assert os_ops is not None + assert isinstance(os_ops, OsOperations) + # check if it's already absolute if os.path.isabs(filename): return filename - if tconf.os_ops.remote: + if isinstance(os_ops, RemoteOperations): pg_config = os.environ.get("PG_CONFIG_REMOTE") or os.environ.get("PG_CONFIG") else: # try PG_CONFIG - get from local machine pg_config = os.environ.get("PG_CONFIG") if pg_config: - bindir = get_pg_config()["BINDIR"] + bindir = get_pg_config(pg_config, os_ops)["BINDIR"] return os.path.join(bindir, filename) # try PG_BIN - pg_bin = tconf.os_ops.environ("PG_BIN") + pg_bin = os_ops.environ("PG_BIN") if pg_bin: return os.path.join(pg_bin, filename) - pg_config_path = tconf.os_ops.find_executable('pg_config') + pg_config_path = os_ops.find_executable('pg_config') if pg_config_path: bindir = get_pg_config(pg_config_path)["BINDIR"] return os.path.join(bindir, filename) @@ -129,12 +145,20 @@ def get_pg_config(pg_config_path=None, os_ops=None): Return output of pg_config (provided that it is installed). NOTE: this function caches the result by default (see GlobalConfig). """ - if os_ops: - tconf.os_ops = os_ops + + if os_ops is None: + os_ops = tconf.os_ops + + return get_pg_config2(os_ops, pg_config_path) + + +def get_pg_config2(os_ops: OsOperations, pg_config_path): + assert os_ops is not None + assert isinstance(os_ops, OsOperations) def cache_pg_config_data(cmd): # execute pg_config and get the output - out = tconf.os_ops.exec_command(cmd, encoding='utf-8') + out = os_ops.exec_command(cmd, encoding='utf-8') data = {} for line in out.splitlines(): @@ -158,7 +182,7 @@ def cache_pg_config_data(cmd): return _pg_config_data # try specified pg_config path or PG_CONFIG - if tconf.os_ops.remote: + if isinstance(os_ops, RemoteOperations): pg_config = pg_config_path or os.environ.get("PG_CONFIG_REMOTE") or os.environ.get("PG_CONFIG") else: # try PG_CONFIG - get from local machine From ed3ef60be4c09b135d86737755c2d4cbb7c5657f Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 24 Feb 2025 16:38:42 +0300 Subject: [PATCH 11/90] Code style (flake8) --- testgres/cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testgres/cache.py b/testgres/cache.py index 61d44868..3ac63326 100644 --- a/testgres/cache.py +++ b/testgres/cache.py @@ -32,11 +32,11 @@ def cached_initdb(data_dir, logfile=None, params=None, os_ops: OsOperations = Lo def make_utility_path(name): assert name is not None - assert type(name) == str + assert type(name) == str # noqa: E721 if bin_path: return os.path.join(bin_path, name) - + return get_bin_path2(os_ops, name) def call_initdb(initdb_dir, log=logfile): From 0eeb705151b23b7dabb0b677ff9371279389a6cf Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 26 Feb 2025 11:17:29 +0300 Subject: [PATCH 12/90] Tests are based on pytest (#192) * Using pytest [pytest.raises] * Using pytest [pytest.skip] * Using pytest [assertIsNotNone] * Using pytest [assertFalse] * Using pytest [assertTrue] * Using pytest [assertEqual] * Using pytest [assertNotEqual] * Using pytest [assertGreaterEqual] * Using pytest [assertGreater] * Using pytest [assertIn] * Using pytest [assertListEqual] * unittest is not used * Code style (flake8) * Execution signature is removed * run_tests.sh installs pytest * run_tests.sh is updated run tests through pytest explicitly * Total refactoring of tests - TestgresRemoteTests does not use global variables and code - Explicit work with ..testgres folder * Code style (flake8) * Root __init__.py is added It is required for tests. * Code style (flake8) * pytest.ini is added * TestgresTests::test_ports_management is corrected Let's send warning about a garbage in the container "bound_ports" and continue working. * coding: utf-8 * Cleanup * CI runs all the tests of testgres. * Add install ssh (cherry picked from commit fec1e7ac9d6e2bfb43a01f0e370336ba5ed8e971) * Revert "Add install ssh" This reverts commit 537a9acb9dfb26d82251d2d68796a55989be8317. * Revert "CI runs all the tests of testgres." This reverts commit 2d2532c77e8d7521552c0f3511c119e90d55573e. * Test of probackup plugin is restored It works now (was runned with a fresh probackup2 and vanilla 18devel). * The test suite of a probackup plugin is based on pytest * Probackup plugin is updated Probackup plugin tests - They are skipped if PGPROBACKUPBIN is not defined Global variable init_params is None when PGPROBACKUPBIN is not defined or version is not processed * CI test use 4 cores * testgres.plugins.probackup2.Init was restored [thanks to Yuri Sokolov] * pytest.ini is updated [testpaths] Enumeration of all the known folders with tests. * test_child_pids (local, remote) is updated Multiple attempts and logging are added. * test_child_process_dies is updated Multiple attempts are added. --------- Co-authored-by: vshepard --- __init__.py | 0 pytest.ini | 9 + run_tests.sh | 12 +- testgres/plugins/__init__.py | 8 +- .../pg_probackup2/init_helpers.py | 13 +- .../pg_probackup2/tests/__init__.py | 0 .../pg_probackup2/tests/basic_test.py | 80 --- .../pg_probackup2/tests/test_basic.py | 95 +++ tests/helpers/run_conditions.py | 1 + tests/test_local.py | 7 +- tests/test_remote.py | 9 +- tests/test_simple.py | 607 +++++++++-------- tests/test_simple_remote.py | 615 ++++++++++-------- 13 files changed, 839 insertions(+), 617 deletions(-) create mode 100644 __init__.py create mode 100644 pytest.ini create mode 100644 testgres/plugins/pg_probackup2/pg_probackup2/tests/__init__.py delete mode 100644 testgres/plugins/pg_probackup2/pg_probackup2/tests/basic_test.py create mode 100644 testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 00000000..c94eabc2 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,9 @@ +[pytest] +testpaths = ["./tests", "./testgres/plugins/pg_probackup2/pg_probackup2/tests"] +addopts = --strict-markers +markers = +#log_file = logs/pytest.log +log_file_level = NOTSET +log_file_format = %(levelname)8s [%(asctime)s] %(message)s +log_file_date_format=%Y-%m-%d %H:%M:%S + diff --git a/run_tests.sh b/run_tests.sh index 73c459be..e9d58b54 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -22,11 +22,11 @@ export VIRTUAL_ENV_DISABLE_PROMPT=1 source $VENV_PATH/bin/activate # install utilities -$PIP install coverage flake8 psutil Sphinx +$PIP install coverage flake8 psutil Sphinx pytest pytest-xdist psycopg2 six psutil # install testgres' dependencies export PYTHONPATH=$(pwd) -$PIP install . +# $PIP install . # test code quality flake8 . @@ -38,21 +38,19 @@ rm -f $COVERAGE_FILE # run tests (PATH) -time coverage run -a tests/test_simple.py +time coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" # run tests (PG_BIN) time \ PG_BIN=$(dirname $(which pg_config)) \ - ALT_CONFIG=1 \ - coverage run -a tests/test_simple.py + coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" # run tests (PG_CONFIG) time \ PG_CONFIG=$(which pg_config) \ - ALT_CONFIG=1 \ - coverage run -a tests/test_simple.py + coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" # show coverage diff --git a/testgres/plugins/__init__.py b/testgres/plugins/__init__.py index 8c19a23b..824eadc6 100644 --- a/testgres/plugins/__init__.py +++ b/testgres/plugins/__init__.py @@ -1,7 +1,7 @@ -from pg_probackup2.gdb import GDBobj -from pg_probackup2.app import ProbackupApp, ProbackupException -from pg_probackup2.init_helpers import init_params -from pg_probackup2.storage.fs_backup import FSTestBackupDir +from .pg_probackup2.pg_probackup2.gdb import GDBobj +from .pg_probackup2.pg_probackup2.app import ProbackupApp, ProbackupException +from .pg_probackup2.pg_probackup2.init_helpers import init_params +from .pg_probackup2.pg_probackup2.storage.fs_backup import FSTestBackupDir __all__ = [ "ProbackupApp", "ProbackupException", "init_params", "FSTestBackupDir", "GDBobj" diff --git a/testgres/plugins/pg_probackup2/pg_probackup2/init_helpers.py b/testgres/plugins/pg_probackup2/pg_probackup2/init_helpers.py index 078fdbab..c4570a39 100644 --- a/testgres/plugins/pg_probackup2/pg_probackup2/init_helpers.py +++ b/testgres/plugins/pg_probackup2/pg_probackup2/init_helpers.py @@ -121,8 +121,7 @@ def __init__(self): self.probackup_path = probackup_path_tmp if not self.probackup_path: - logging.error('pg_probackup binary is not found') - exit(1) + raise Exception('pg_probackup binary is not found') if os.name == 'posix': self.EXTERNAL_DIRECTORY_DELIMITER = ':' @@ -213,11 +212,15 @@ def __init__(self): if self.probackup_version.split('.')[0].isdigit(): self.major_version = int(self.probackup_version.split('.')[0]) else: - logging.error('Can\'t process pg_probackup version \"{}\": the major version is expected to be a number'.format(self.probackup_version)) - sys.exit(1) + raise Exception('Can\'t process pg_probackup version \"{}\": the major version is expected to be a number'.format(self.probackup_version)) def test_env(self): return self._test_env.copy() -init_params = Init() +try: + init_params = Init() +except Exception as e: + logging.error(str(e)) + logging.warning("testgres.plugins.probackup2.init_params is set to None.") + init_params = None diff --git a/testgres/plugins/pg_probackup2/pg_probackup2/tests/__init__.py b/testgres/plugins/pg_probackup2/pg_probackup2/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testgres/plugins/pg_probackup2/pg_probackup2/tests/basic_test.py b/testgres/plugins/pg_probackup2/pg_probackup2/tests/basic_test.py deleted file mode 100644 index b63531ec..00000000 --- a/testgres/plugins/pg_probackup2/pg_probackup2/tests/basic_test.py +++ /dev/null @@ -1,80 +0,0 @@ -import logging -import os -import shutil -import unittest -import testgres -from pg_probackup2.app import ProbackupApp -from pg_probackup2.init_helpers import Init, init_params -from pg_probackup2.app import build_backup_dir - - -class TestUtils: - @staticmethod - def get_module_and_function_name(test_id): - try: - module_name = test_id.split('.')[-2] - fname = test_id.split('.')[-1] - except IndexError: - logging.warning(f"Couldn't get module name and function name from test_id: `{test_id}`") - module_name, fname = test_id.split('(')[1].split('.')[1], test_id.split('(')[0] - return module_name, fname - - -class ProbackupTest(unittest.TestCase): - def setUp(self): - self.setup_test_environment() - self.setup_test_paths() - self.setup_backup_dir() - self.setup_probackup() - - def setup_test_environment(self): - self.output = None - self.cmd = None - self.nodes_to_cleanup = [] - self.module_name, self.fname = TestUtils.get_module_and_function_name(self.id()) - self.test_env = Init().test_env() - - def setup_test_paths(self): - self.rel_path = os.path.join(self.module_name, self.fname) - self.test_path = os.path.join(init_params.tmp_path, self.rel_path) - os.makedirs(self.test_path) - self.pb_log_path = os.path.join(self.test_path, "pb_log") - - def setup_backup_dir(self): - self.backup_dir = build_backup_dir(self, 'backup') - self.backup_dir.cleanup() - - def setup_probackup(self): - self.pg_node = testgres.NodeApp(self.test_path, self.nodes_to_cleanup) - self.pb = ProbackupApp(self, self.pg_node, self.pb_log_path, self.test_env, - auto_compress_alg='zlib', backup_dir=self.backup_dir) - - def tearDown(self): - if os.path.exists(self.test_path): - shutil.rmtree(self.test_path) - - -class BasicTest(ProbackupTest): - def test_full_backup(self): - # Setting up a simple test node - node = self.pg_node.make_simple('node', pg_options={"fsync": "off", "synchronous_commit": "off"}) - - # Initialize and configure Probackup - self.pb.init() - self.pb.add_instance('node', node) - self.pb.set_archiving('node', node) - - # Start the node and initialize pgbench - node.slow_start() - node.pgbench_init(scale=100, no_vacuum=True) - - # Perform backup and validation - backup_id = self.pb.backup_node('node', node) - out = self.pb.validate('node', backup_id) - - # Check if the backup is valid - self.assertIn(f"INFO: Backup {backup_id} is valid", out) - - -if __name__ == "__main__": - unittest.main() diff --git a/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py b/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py new file mode 100644 index 00000000..ba788623 --- /dev/null +++ b/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import os +import shutil +import pytest + +from ...... import testgres +from ...pg_probackup2.app import ProbackupApp +from ...pg_probackup2.init_helpers import Init, init_params +from ..storage.fs_backup import FSTestBackupDir + + +class ProbackupTest: + pg_node: testgres.PostgresNode + + @staticmethod + def probackup_is_available() -> bool: + p = os.environ.get("PGPROBACKUPBIN") + + if p is None: + return False + + if not os.path.exists(p): + return False + + return True + + @pytest.fixture(autouse=True, scope="function") + def implicit_fixture(self, request: pytest.FixtureRequest): + assert isinstance(request, pytest.FixtureRequest) + self.helper__setUp(request) + yield + self.helper__tearDown() + + def helper__setUp(self, request: pytest.FixtureRequest): + assert isinstance(request, pytest.FixtureRequest) + + self.helper__setup_test_environment(request) + self.helper__setup_test_paths() + self.helper__setup_backup_dir() + self.helper__setup_probackup() + + def helper__setup_test_environment(self, request: pytest.FixtureRequest): + assert isinstance(request, pytest.FixtureRequest) + + self.output = None + self.cmd = None + self.nodes_to_cleanup = [] + self.module_name, self.fname = request.node.cls.__name__, request.node.name + self.test_env = Init().test_env() + + def helper__setup_test_paths(self): + self.rel_path = os.path.join(self.module_name, self.fname) + self.test_path = os.path.join(init_params.tmp_path, self.rel_path) + os.makedirs(self.test_path, exist_ok=True) + self.pb_log_path = os.path.join(self.test_path, "pb_log") + + def helper__setup_backup_dir(self): + self.backup_dir = self.helper__build_backup_dir('backup') + self.backup_dir.cleanup() + + def helper__setup_probackup(self): + self.pg_node = testgres.NodeApp(self.test_path, self.nodes_to_cleanup) + self.pb = ProbackupApp(self, self.pg_node, self.pb_log_path, self.test_env, + auto_compress_alg='zlib', backup_dir=self.backup_dir) + + def helper__tearDown(self): + if os.path.exists(self.test_path): + shutil.rmtree(self.test_path) + + def helper__build_backup_dir(self, backup='backup'): + return FSTestBackupDir(rel_path=self.rel_path, backup=backup) + + +@pytest.mark.skipif(not ProbackupTest.probackup_is_available(), reason="Check that PGPROBACKUPBIN is defined and is valid.") +class TestBasic(ProbackupTest): + def test_full_backup(self): + # Setting up a simple test node + node = self.pg_node.make_simple('node', pg_options={"fsync": "off", "synchronous_commit": "off"}) + + # Initialize and configure Probackup + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) + + # Start the node and initialize pgbench + node.slow_start() + node.pgbench_init(scale=100, no_vacuum=True) + + # Perform backup and validation + backup_id = self.pb.backup_node('node', node) + out = self.pb.validate('node', backup_id) + + # Check if the backup is valid + assert f"INFO: Backup {backup_id} is valid" in out diff --git a/tests/helpers/run_conditions.py b/tests/helpers/run_conditions.py index 8d57f753..11357c30 100644 --- a/tests/helpers/run_conditions.py +++ b/tests/helpers/run_conditions.py @@ -1,3 +1,4 @@ +# coding: utf-8 import pytest import platform diff --git a/tests/test_local.py b/tests/test_local.py index 4051bfb5..60a96c18 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -1,12 +1,13 @@ +# coding: utf-8 import os import pytest import re import tempfile -from testgres import ExecUtilException -from testgres import InvalidOperationException -from testgres import LocalOperations +from ..testgres import ExecUtilException +from ..testgres import InvalidOperationException +from ..testgres import LocalOperations from .helpers.run_conditions import RunConditions diff --git a/tests/test_remote.py b/tests/test_remote.py index 4330b92f..8b167e9f 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -1,13 +1,14 @@ +# coding: utf-8 import os import pytest import re import tempfile -from testgres import ExecUtilException -from testgres import InvalidOperationException -from testgres import RemoteOperations -from testgres import ConnectionParams +from ..testgres import ExecUtilException +from ..testgres import InvalidOperationException +from ..testgres import RemoteOperations +from ..testgres import ConnectionParams class TestRemoteOperations: diff --git a/tests/test_simple.py b/tests/test_simple.py index a751f0a3..6c433cd4 100644 --- a/tests/test_simple.py +++ b/tests/test_simple.py @@ -1,22 +1,22 @@ -#!/usr/bin/env python # coding: utf-8 - import os import re import subprocess import tempfile -import testgres import time import six -import unittest +import pytest import psutil +import platform import logging.config from contextlib import contextmanager from shutil import rmtree -from testgres import \ +from .. import testgres + +from ..testgres import \ InitNodeException, \ StartNodeException, \ ExecUtilException, \ @@ -27,31 +27,32 @@ InvalidOperationException, \ NodeApp -from testgres import \ +from ..testgres import \ TestgresConfig, \ configure_testgres, \ scoped_config, \ pop_config -from testgres import \ +from ..testgres import \ NodeStatus, \ ProcessType, \ IsolationLevel, \ get_new_node -from testgres import \ +from ..testgres import \ get_bin_path, \ get_pg_config, \ get_pg_version -from testgres import \ +from ..testgres import \ First, \ Any # NOTE: those are ugly imports -from testgres import bound_ports -from testgres.utils import PgVer, parse_pg_version -from testgres.node import ProcessProxy +from ..testgres import bound_ports +from ..testgres.utils import PgVer, parse_pg_version +from ..testgres.utils import file_tail +from ..testgres.node import ProcessProxy def pg_version_ge(version): @@ -105,11 +106,11 @@ def removing(f): rmtree(f, ignore_errors=True) -class TestgresTests(unittest.TestCase): +class TestgresTests: def test_node_repr(self): with get_new_node() as node: pattern = r"PostgresNode\(name='.+', port=.+, base_dir='.+'\)" - self.assertIsNotNone(re.match(pattern, str(node))) + assert re.match(pattern, str(node)) is not None def test_custom_init(self): with get_new_node() as node: @@ -126,15 +127,15 @@ def test_custom_init(self): lines = conf.readlines() # check number of lines - self.assertGreaterEqual(len(lines), 6) + assert (len(lines) >= 6) # there should be no trust entries at all - self.assertFalse(any('trust' in s for s in lines)) + assert not (any('trust' in s for s in lines)) def test_double_init(self): with get_new_node().init() as node: # can't initialize node more than once - with self.assertRaises(InitNodeException): + with pytest.raises(expected_exception=InitNodeException): node.init() def test_init_after_cleanup(self): @@ -143,10 +144,11 @@ def test_init_after_cleanup(self): node.cleanup() node.init().start().execute('select 1') - @unittest.skipUnless(util_exists('pg_resetwal.exe' if os.name == 'nt' else 'pg_resetwal'), 'pgbench might be missing') - @unittest.skipUnless(pg_version_ge('9.6'), 'requires 9.6+') def test_init_unique_system_id(self): # this function exists in PostgreSQL 9.6+ + __class__.helper__skip_test_if_util_not_exist("pg_resetwal") + __class__.helper__skip_test_if_pg_version_is_not_ge("9.6") + query = 'select system_identifier from pg_control_system()' with scoped_config(cache_initdb=False): @@ -155,8 +157,8 @@ def test_init_unique_system_id(self): with scoped_config(cache_initdb=True, cached_initdb_unique=True) as config: - self.assertTrue(config.cache_initdb) - self.assertTrue(config.cached_initdb_unique) + assert (config.cache_initdb) + assert (config.cached_initdb_unique) # spawn two nodes; ids must be different with get_new_node().init().start() as node1, \ @@ -166,37 +168,37 @@ def test_init_unique_system_id(self): id2 = node2.execute(query)[0] # ids must increase - self.assertGreater(id1, id0) - self.assertGreater(id2, id1) + assert (id1 > id0) + assert (id2 > id1) def test_node_exit(self): base_dir = None - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): with get_new_node().init() as node: base_dir = node.base_dir node.safe_psql('select 1') # we should save the DB for "debugging" - self.assertTrue(os.path.exists(base_dir)) + assert (os.path.exists(base_dir)) rmtree(base_dir, ignore_errors=True) with get_new_node().init() as node: base_dir = node.base_dir # should have been removed by default - self.assertFalse(os.path.exists(base_dir)) + assert not (os.path.exists(base_dir)) def test_double_start(self): with get_new_node().init().start() as node: # can't start node more than once node.start() - self.assertTrue(node.is_started) + assert (node.is_started) def test_uninitialized_start(self): with get_new_node() as node: # node is not initialized yet - with self.assertRaises(StartNodeException): + with pytest.raises(expected_exception=StartNodeException): node.start() def test_restart(self): @@ -205,13 +207,13 @@ def test_restart(self): # restart, ok res = node.execute('select 1') - self.assertEqual(res, [(1, )]) + assert (res == [(1, )]) node.restart() res = node.execute('select 2') - self.assertEqual(res, [(2, )]) + assert (res == [(2, )]) # restart, fail - with self.assertRaises(StartNodeException): + with pytest.raises(expected_exception=StartNodeException): node.append_conf('pg_hba.conf', 'DUMMY') node.restart() @@ -228,106 +230,107 @@ def test_reload(self): # check new value cmm_new = node.execute('show client_min_messages') - self.assertEqual('debug1', cmm_new[0][0].lower()) - self.assertNotEqual(cmm_old, cmm_new) + assert ('debug1' == cmm_new[0][0].lower()) + assert (cmm_old != cmm_new) def test_pg_ctl(self): with get_new_node() as node: node.init().start() status = node.pg_ctl(['status']) - self.assertTrue('PID' in status) + assert ('PID' in status) def test_status(self): - self.assertTrue(NodeStatus.Running) - self.assertFalse(NodeStatus.Stopped) - self.assertFalse(NodeStatus.Uninitialized) + assert (NodeStatus.Running) + assert not (NodeStatus.Stopped) + assert not (NodeStatus.Uninitialized) # check statuses after each operation with get_new_node() as node: - self.assertEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Uninitialized) + assert (node.pid == 0) + assert (node.status() == NodeStatus.Uninitialized) node.init() - self.assertEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Stopped) + assert (node.pid == 0) + assert (node.status() == NodeStatus.Stopped) node.start() - self.assertNotEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Running) + assert (node.pid != 0) + assert (node.status() == NodeStatus.Running) node.stop() - self.assertEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Stopped) + assert (node.pid == 0) + assert (node.status() == NodeStatus.Stopped) node.cleanup() - self.assertEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Uninitialized) + assert (node.pid == 0) + assert (node.status() == NodeStatus.Uninitialized) def test_psql(self): with get_new_node().init().start() as node: # check returned values (1 arg) res = node.psql('select 1') - self.assertEqual(rm_carriage_returns(res), (0, b'1\n', b'')) + assert (rm_carriage_returns(res) == (0, b'1\n', b'')) # check returned values (2 args) res = node.psql('postgres', 'select 2') - self.assertEqual(rm_carriage_returns(res), (0, b'2\n', b'')) + assert (rm_carriage_returns(res) == (0, b'2\n', b'')) # check returned values (named) res = node.psql(query='select 3', dbname='postgres') - self.assertEqual(rm_carriage_returns(res), (0, b'3\n', b'')) + assert (rm_carriage_returns(res) == (0, b'3\n', b'')) # check returned values (1 arg) res = node.safe_psql('select 4') - self.assertEqual(rm_carriage_returns(res), b'4\n') + assert (rm_carriage_returns(res) == b'4\n') # check returned values (2 args) res = node.safe_psql('postgres', 'select 5') - self.assertEqual(rm_carriage_returns(res), b'5\n') + assert (rm_carriage_returns(res) == b'5\n') # check returned values (named) res = node.safe_psql(query='select 6', dbname='postgres') - self.assertEqual(rm_carriage_returns(res), b'6\n') + assert (rm_carriage_returns(res) == b'6\n') # check feeding input node.safe_psql('create table horns (w int)') node.safe_psql('copy horns from stdin (format csv)', input=b"1\n2\n3\n\\.\n") _sum = node.safe_psql('select sum(w) from horns') - self.assertEqual(rm_carriage_returns(_sum), b'6\n') + assert (rm_carriage_returns(_sum) == b'6\n') # check psql's default args, fails - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): node.psql() node.stop() # check psql on stopped node, fails - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): node.safe_psql('select 1') def test_safe_psql__expect_error(self): with get_new_node().init().start() as node: err = node.safe_psql('select_or_not_select 1', expect_error=True) - self.assertTrue(type(err) == str) # noqa: E721 - self.assertIn('select_or_not_select', err) - self.assertIn('ERROR: syntax error at or near "select_or_not_select"', err) + assert (type(err) == str) # noqa: E721 + assert ('select_or_not_select' in err) + assert ('ERROR: syntax error at or near "select_or_not_select"' in err) # --------- - with self.assertRaises(InvalidOperationException) as ctx: + with pytest.raises( + expected_exception=InvalidOperationException, + match="^" + re.escape("Exception was expected, but query finished successfully: `select 1;`.") + "$" + ): node.safe_psql("select 1;", expect_error=True) - self.assertEqual(str(ctx.exception), "Exception was expected, but query finished successfully: `select 1;`.") - # --------- res = node.safe_psql("select 1;", expect_error=False) - self.assertEqual(rm_carriage_returns(res), b'1\n') + assert (rm_carriage_returns(res) == b'1\n') def test_transactions(self): with get_new_node().init().start() as node: @@ -341,12 +344,12 @@ def test_transactions(self): con.begin() con.execute('insert into test values (2)') res = con.execute('select * from test order by val asc') - self.assertListEqual(res, [(1, ), (2, )]) + assert (res == [(1, ), (2, )]) con.rollback() con.begin() res = con.execute('select * from test') - self.assertListEqual(res, [(1, )]) + assert (res == [(1, )]) con.rollback() con.begin() @@ -357,15 +360,15 @@ def test_control_data(self): with get_new_node() as node: # node is not initialized yet - with self.assertRaises(ExecUtilException): + with pytest.raises(expected_exception=ExecUtilException): node.get_control_data() node.init() data = node.get_control_data() # check returned dict - self.assertIsNotNone(data) - self.assertTrue(any('pg_control' in s for s in data.keys())) + assert data is not None + assert (any('pg_control' in s for s in data.keys())) def test_backup_simple(self): with get_new_node() as master: @@ -374,7 +377,7 @@ def test_backup_simple(self): master.init(allow_streaming=True) # node must be running - with self.assertRaises(BackupException): + with pytest.raises(expected_exception=BackupException): master.backup() # it's time to start node @@ -386,7 +389,7 @@ def test_backup_simple(self): with master.backup(xlog_method='stream') as backup: with backup.spawn_primary().start() as slave: res = slave.execute('select * from test order by i asc') - self.assertListEqual(res, [(1, ), (2, ), (3, ), (4, )]) + assert (res == [(1, ), (2, ), (3, ), (4, )]) def test_backup_multiple(self): with get_new_node() as node: @@ -394,12 +397,12 @@ def test_backup_multiple(self): with node.backup(xlog_method='fetch') as backup1, \ node.backup(xlog_method='fetch') as backup2: - self.assertNotEqual(backup1.base_dir, backup2.base_dir) + assert (backup1.base_dir != backup2.base_dir) with node.backup(xlog_method='fetch') as backup: with backup.spawn_primary('node1', destroy=False) as node1, \ backup.spawn_primary('node2', destroy=False) as node2: - self.assertNotEqual(node1.base_dir, node2.base_dir) + assert (node1.base_dir != node2.base_dir) def test_backup_exhaust(self): with get_new_node() as node: @@ -411,15 +414,17 @@ def test_backup_exhaust(self): pass # now let's try to create one more node - with self.assertRaises(BackupException): + with pytest.raises(expected_exception=BackupException): backup.spawn_primary() def test_backup_wrong_xlog_method(self): with get_new_node() as node: node.init(allow_streaming=True).start() - with self.assertRaises(BackupException, - msg='Invalid xlog_method "wrong"'): + with pytest.raises( + expected_exception=BackupException, + match="^" + re.escape('Invalid xlog_method "wrong"') + "$" + ): node.backup(xlog_method='wrong') def test_pg_ctl_wait_option(self): @@ -440,17 +445,18 @@ def test_replicate(self): with node.replicate().start() as replica: res = replica.execute('select 1') - self.assertListEqual(res, [(1, )]) + assert (res == [(1, )]) node.execute('create table test (val int)', commit=True) replica.catchup() res = node.execute('select * from test') - self.assertListEqual(res, []) + assert (res == []) - @unittest.skipUnless(pg_version_ge('9.6'), 'requires 9.6+') def test_synchronous_replication(self): + __class__.helper__skip_test_if_pg_version_is_not_ge("9.6") + with get_new_node() as master: old_version = not pg_version_ge('9.6') @@ -465,12 +471,12 @@ def test_synchronous_replication(self): standby2.start() # check formatting - self.assertEqual( - '1 ("{}", "{}")'.format(standby1.name, standby2.name), - str(First(1, (standby1, standby2)))) # yapf: disable - self.assertEqual( - 'ANY 1 ("{}", "{}")'.format(standby1.name, standby2.name), - str(Any(1, (standby1, standby2)))) # yapf: disable + assert ( + '1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(First(1, (standby1, standby2))) + ) # yapf: disable + assert ( + 'ANY 1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(Any(1, (standby1, standby2))) + ) # yapf: disable # set synchronous_standby_names master.set_synchronous_standbys(First(2, [standby1, standby2])) @@ -488,10 +494,11 @@ def test_synchronous_replication(self): master.safe_psql( 'insert into abc select generate_series(1, 1000000)') res = standby1.safe_psql('select count(*) from abc') - self.assertEqual(rm_carriage_returns(res), b'1000000\n') + assert (rm_carriage_returns(res) == b'1000000\n') - @unittest.skipUnless(pg_version_ge('10'), 'requires 10+') def test_logical_replication(self): + __class__.helper__skip_test_if_pg_version_is_not_ge("10") + with get_new_node() as node1, get_new_node() as node2: node1.init(allow_logical=True) node1.start() @@ -510,7 +517,7 @@ def test_logical_replication(self): # wait until changes apply on subscriber and check them sub.catchup() res = node2.execute('select * from test') - self.assertListEqual(res, [(1, 1), (2, 2)]) + assert (res == [(1, 1), (2, 2)]) # disable and put some new data sub.disable() @@ -520,7 +527,7 @@ def test_logical_replication(self): sub.enable() sub.catchup() res = node2.execute('select * from test') - self.assertListEqual(res, [(1, 1), (2, 2), (3, 3)]) + assert (res == [(1, 1), (2, 2), (3, 3)]) # Add new tables. Since we added "all tables" to publication # (default behaviour of publish() method) we don't need @@ -534,7 +541,7 @@ def test_logical_replication(self): node1.safe_psql('insert into test2 values (\'a\'), (\'b\')') sub.catchup() res = node2.execute('select * from test2') - self.assertListEqual(res, [('a', ), ('b', )]) + assert (res == [('a', ), ('b', )]) # drop subscription sub.drop() @@ -548,20 +555,21 @@ def test_logical_replication(self): node1.safe_psql('insert into test values (4, 4)') sub.catchup() res = node2.execute('select * from test') - self.assertListEqual(res, [(1, 1), (2, 2), (3, 3), (4, 4)]) + assert (res == [(1, 1), (2, 2), (3, 3), (4, 4)]) # explicitly add table - with self.assertRaises(ValueError): + with pytest.raises(expected_exception=ValueError): pub.add_tables([]) # fail pub.add_tables(['test2']) node1.safe_psql('insert into test2 values (\'c\')') sub.catchup() res = node2.execute('select * from test2') - self.assertListEqual(res, [('a', ), ('b', )]) + assert (res == [('a', ), ('b', )]) - @unittest.skipUnless(pg_version_ge('10'), 'requires 10+') def test_logical_catchup(self): """ Runs catchup for 100 times to be sure that it is consistent """ + __class__.helper__skip_test_if_pg_version_is_not_ge("10") + with get_new_node() as node1, get_new_node() as node2: node1.init(allow_logical=True) node1.start() @@ -579,16 +587,14 @@ def test_logical_catchup(self): node1.execute('insert into test values ({0}, {0})'.format(i)) sub.catchup() res = node2.execute('select * from test') - self.assertListEqual(res, [( - i, - i, - )]) + assert (res == [(i, i, )]) node1.execute('delete from test') - @unittest.skipIf(pg_version_ge('10'), 'requires <10') def test_logical_replication_fail(self): + __class__.helper__skip_test_if_pg_version_is_ge("10") + with get_new_node() as node: - with self.assertRaises(InitNodeException): + with pytest.raises(expected_exception=InitNodeException): node.init(allow_logical=True) def test_replication_slots(self): @@ -599,7 +605,7 @@ def test_replication_slots(self): replica.execute('select 1') # cannot create new slot with the same name - with self.assertRaises(TestgresException): + with pytest.raises(expected_exception=TestgresException): node.replicate(slot='slot1') def test_incorrect_catchup(self): @@ -607,7 +613,7 @@ def test_incorrect_catchup(self): node.init(allow_streaming=True).start() # node has no master, can't catch up - with self.assertRaises(TestgresException): + with pytest.raises(expected_exception=TestgresException): node.catchup() def test_promotion(self): @@ -622,7 +628,7 @@ def test_promotion(self): # make standby becomes writable master replica.safe_psql('insert into abc values (1)') res = replica.safe_psql('select * from abc') - self.assertEqual(rm_carriage_returns(res), b'1\n') + assert (rm_carriage_returns(res) == b'1\n') def test_dump(self): query_create = 'create table test as select generate_series(1, 2) as val' @@ -635,20 +641,20 @@ def test_dump(self): with removing(node1.dump(format=format)) as dump: with get_new_node().init().start() as node3: if format == 'directory': - self.assertTrue(os.path.isdir(dump)) + assert (os.path.isdir(dump)) else: - self.assertTrue(os.path.isfile(dump)) + assert (os.path.isfile(dump)) # restore dump node3.restore(filename=dump) res = node3.execute(query_select) - self.assertListEqual(res, [(1, ), (2, )]) + assert (res == [(1, ), (2, )]) def test_users(self): with get_new_node().init().start() as node: node.psql('create role test_user login') value = node.safe_psql('select 1', username='test_user') value = rm_carriage_returns(value) - self.assertEqual(value, b'1\n') + assert (value == b'1\n') def test_poll_query_until(self): with get_new_node() as node: @@ -661,15 +667,15 @@ def test_poll_query_until(self): node.poll_query_until(query=check_time.format(start_time)) end_time = node.execute(get_time)[0][0] - self.assertTrue(end_time - start_time >= 5) + assert (end_time - start_time >= 5) # check 0 columns - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): node.poll_query_until( query='select from pg_catalog.pg_class limit 1') # check None, fail - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): node.poll_query_until(query='create table abc (val int)') # check None, ok @@ -682,7 +688,7 @@ def test_poll_query_until(self): expected=None) # check arbitrary expected value, fail - with self.assertRaises(TimeoutException): + with pytest.raises(expected_exception=TimeoutException): node.poll_query_until(query='select 3', expected=1, max_attempts=3, @@ -692,17 +698,17 @@ def test_poll_query_until(self): node.poll_query_until(query='select 2', expected=2) # check timeout - with self.assertRaises(TimeoutException): + with pytest.raises(expected_exception=TimeoutException): node.poll_query_until(query='select 1 > 2', max_attempts=3, sleep_time=0.01) # check ProgrammingError, fail - with self.assertRaises(testgres.ProgrammingError): + with pytest.raises(expected_exception=testgres.ProgrammingError): node.poll_query_until(query='dummy1') # check ProgrammingError, ok - with self.assertRaises(TimeoutException): + with pytest.raises(expected_exception=(TimeoutException)): node.poll_query_until(query='dummy2', max_attempts=3, sleep_time=0.01, @@ -754,16 +760,17 @@ def test_logging(self): # check that master's port is found with open(logfile.name, 'r') as log: lines = log.readlines() - self.assertTrue(any(node_name in s for s in lines)) + assert (any(node_name in s for s in lines)) # test logger after stop/start/restart master.stop() master.start() master.restart() - self.assertTrue(master._logger.is_alive()) + assert (master._logger.is_alive()) - @unittest.skipUnless(util_exists('pgbench.exe' if os.name == 'nt' else 'pgbench'), 'pgbench might be missing') def test_pgbench(self): + __class__.helper__skip_test_if_util_not_exist("pgbench") + with get_new_node().init().start() as node: # initialize pgbench DB and run benchmarks @@ -780,13 +787,13 @@ def test_pgbench(self): proc.stdout.close() - self.assertTrue('tps' in out) + assert ('tps' in out) def test_pg_config(self): # check same instances a = get_pg_config() b = get_pg_config() - self.assertEqual(id(a), id(b)) + assert (id(a) == id(b)) # save right before config change c1 = get_pg_config() @@ -794,26 +801,26 @@ def test_pg_config(self): # modify setting for this scope with scoped_config(cache_pg_config=False) as config: # sanity check for value - self.assertFalse(config.cache_pg_config) + assert not (config.cache_pg_config) # save right after config change c2 = get_pg_config() # check different instances after config change - self.assertNotEqual(id(c1), id(c2)) + assert (id(c1) != id(c2)) # check different instances a = get_pg_config() b = get_pg_config() - self.assertNotEqual(id(a), id(b)) + assert (id(a) != id(b)) def test_config_stack(self): # no such option - with self.assertRaises(TypeError): + with pytest.raises(expected_exception=TypeError): configure_testgres(dummy=True) # we have only 1 config in stack - with self.assertRaises(IndexError): + with pytest.raises(expected_exception=IndexError): pop_config() d0 = TestgresConfig.cached_initdb_dir @@ -821,22 +828,22 @@ def test_config_stack(self): d2 = 'dummy_def' with scoped_config(cached_initdb_dir=d1) as c1: - self.assertEqual(c1.cached_initdb_dir, d1) + assert (c1.cached_initdb_dir == d1) with scoped_config(cached_initdb_dir=d2) as c2: stack_size = len(testgres.config.config_stack) # try to break a stack - with self.assertRaises(TypeError): + with pytest.raises(expected_exception=TypeError): with scoped_config(dummy=True): pass - self.assertEqual(c2.cached_initdb_dir, d2) - self.assertEqual(len(testgres.config.config_stack), stack_size) + assert (c2.cached_initdb_dir == d2) + assert (len(testgres.config.config_stack) == stack_size) - self.assertEqual(c1.cached_initdb_dir, d1) + assert (c1.cached_initdb_dir == d1) - self.assertEqual(TestgresConfig.cached_initdb_dir, d0) + assert (TestgresConfig.cached_initdb_dir == d0) def test_unix_sockets(self): with get_new_node() as node: @@ -854,17 +861,15 @@ def test_auto_name(self): with get_new_node().init(allow_streaming=True).start() as m: with m.replicate().start() as r: # check that nodes are running - self.assertTrue(m.status()) - self.assertTrue(r.status()) + assert (m.status()) + assert (r.status()) # check their names - self.assertNotEqual(m.name, r.name) - self.assertTrue('testgres' in m.name) - self.assertTrue('testgres' in r.name) + assert (m.name != r.name) + assert ('testgres' in m.name) + assert ('testgres' in r.name) def test_file_tail(self): - from testgres.utils import file_tail - s1 = "the quick brown fox jumped over that lazy dog\n" s2 = "abc\n" s3 = "def\n" @@ -879,13 +884,13 @@ def test_file_tail(self): f.seek(0) lines = file_tail(f, 3) - self.assertEqual(lines[0], s1) - self.assertEqual(lines[1], s2) - self.assertEqual(lines[2], s3) + assert (lines[0] == s1) + assert (lines[1] == s2) + assert (lines[2] == s3) f.seek(0) lines = file_tail(f, 1) - self.assertEqual(lines[0], s3) + assert (lines[0] == s3) def test_isolation_levels(self): with get_new_node().init().start() as node: @@ -903,24 +908,42 @@ def test_isolation_levels(self): con.begin(IsolationLevel.Serializable).commit() # check wrong level - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): con.begin('Garbage').commit() def test_ports_management(self): - # check that no ports have been bound yet - self.assertEqual(len(bound_ports), 0) + assert bound_ports is not None + assert type(bound_ports) == set # noqa: E721 + + if len(bound_ports) != 0: + logging.warning("bound_ports is not empty: {0}".format(bound_ports)) + + stage0__bound_ports = bound_ports.copy() with get_new_node() as node: - # check that we've just bound a port - self.assertEqual(len(bound_ports), 1) + assert bound_ports is not None + assert type(bound_ports) == set # noqa: E721 + + assert node.port is not None + assert type(node.port) == int # noqa: E721 + + logging.info("node port is {0}".format(node.port)) - # check that bound_ports contains our port - port_1 = list(bound_ports)[0] - port_2 = node.port - self.assertEqual(port_1, port_2) + assert node.port in bound_ports + assert node.port not in stage0__bound_ports + + assert stage0__bound_ports <= bound_ports + assert len(stage0__bound_ports) + 1 == len(bound_ports) + + stage1__bound_ports = stage0__bound_ports.copy() + stage1__bound_ports.add(node.port) + + assert stage1__bound_ports == bound_ports # check that port has been freed successfully - self.assertEqual(len(bound_ports), 0) + assert bound_ports is not None + assert type(bound_ports) == set # noqa: E721 + assert bound_ports == stage0__bound_ports def test_exceptions(self): str(StartNodeException('msg', [('file', 'lines')])) @@ -939,22 +962,22 @@ def test_version_management(self): g = PgVer('15.3.1bihabeta1') k = PgVer('15.3.1') - self.assertTrue(a == b) - self.assertTrue(b > c) - self.assertTrue(a > c) - self.assertTrue(d > e) - self.assertTrue(e > f) - self.assertTrue(d > f) - self.assertTrue(h > f) - self.assertTrue(h == i) - self.assertTrue(g == k) - self.assertTrue(g > h) + assert (a == b) + assert (b > c) + assert (a > c) + assert (d > e) + assert (e > f) + assert (d > f) + assert (h > f) + assert (h == i) + assert (g == k) + assert (g > h) version = get_pg_version() with get_new_node() as node: - self.assertTrue(isinstance(version, six.string_types)) - self.assertTrue(isinstance(node.version, PgVer)) - self.assertEqual(node.version, PgVer(version)) + assert (isinstance(version, six.string_types)) + assert (isinstance(node.version, PgVer)) + assert (node.version == PgVer(version)) def test_child_pids(self): master_processes = [ @@ -977,53 +1000,128 @@ def test_child_pids(self): ProcessType.WalReceiver, ] + def LOCAL__test_auxiliary_pids( + node: testgres.PostgresNode, + expectedTypes: list[ProcessType] + ) -> list[ProcessType]: + # returns list of the absence processes + assert node is not None + assert type(node) == testgres.PostgresNode # noqa: E721 + assert expectedTypes is not None + assert type(expectedTypes) == list # noqa: E721 + + pids = node.auxiliary_pids + assert pids is not None # noqa: E721 + assert type(pids) == dict # noqa: E721 + + result = list[ProcessType]() + for ptype in expectedTypes: + if not (ptype in pids): + result.append(ptype) + return result + + def LOCAL__check_auxiliary_pids__multiple_attempts( + node: testgres.PostgresNode, + expectedTypes: list[ProcessType]): + assert node is not None + assert type(node) == testgres.PostgresNode # noqa: E721 + assert expectedTypes is not None + assert type(expectedTypes) == list # noqa: E721 + + nAttempt = 0 + + while nAttempt < 5: + nAttempt += 1 + + logging.info("Test pids of [{0}] node. Attempt #{1}.".format( + node.name, + nAttempt + )) + + if nAttempt > 1: + time.sleep(1) + + absenceList = LOCAL__test_auxiliary_pids(node, expectedTypes) + assert absenceList is not None + assert type(absenceList) == list # noqa: E721 + if len(absenceList) == 0: + logging.info("Bingo!") + return + + logging.info("These processes are not found: {0}.".format(absenceList)) + continue + + raise Exception("Node {0} does not have the following processes: {1}.".format( + node.name, + absenceList + )) + with get_new_node().init().start() as master: # master node doesn't have a source walsender! - with self.assertRaises(TestgresException): + with pytest.raises(expected_exception=TestgresException): master.source_walsender with master.connect() as con: - self.assertGreater(con.pid, 0) + assert (con.pid > 0) with master.replicate().start() as replica: # test __str__ method str(master.child_processes[0]) - master_pids = master.auxiliary_pids - for ptype in master_processes: - self.assertIn(ptype, master_pids) + LOCAL__check_auxiliary_pids__multiple_attempts( + master, + master_processes) + + LOCAL__check_auxiliary_pids__multiple_attempts( + replica, + repl_processes) - replica_pids = replica.auxiliary_pids - for ptype in repl_processes: - self.assertIn(ptype, replica_pids) + master_pids = master.auxiliary_pids # there should be exactly 1 source walsender for replica - self.assertEqual(len(master_pids[ProcessType.WalSender]), 1) + assert (len(master_pids[ProcessType.WalSender]) == 1) pid1 = master_pids[ProcessType.WalSender][0] pid2 = replica.source_walsender.pid - self.assertEqual(pid1, pid2) + assert (pid1 == pid2) replica.stop() # there should be no walsender after we've stopped replica - with self.assertRaises(TestgresException): + with pytest.raises(expected_exception=TestgresException): replica.source_walsender def test_child_process_dies(self): # test for FileNotFound exception during child_processes() function cmd = ["timeout", "60"] if os.name == 'nt' else ["sleep", "60"] - with subprocess.Popen(cmd, shell=True) as process: # shell=True might be needed on Windows - self.assertEqual(process.poll(), None) - # collect list of processes currently running - children = psutil.Process(os.getpid()).children() - # kill a process, so received children dictionary becomes invalid - process.kill() - process.wait() - # try to handle children list -- missing processes will have ptype "ProcessType.Unknown" - [ProcessProxy(p) for p in children] + nAttempt = 0 + + while True: + if nAttempt == 5: + raise Exception("Max attempt number is exceed.") + + nAttempt += 1 + + logging.info("Attempt #{0}".format(nAttempt)) + + with subprocess.Popen(cmd, shell=True) as process: # shell=True might be needed on Windows + r = process.poll() + + if r is not None: + logging.warning("process.pool() returns an unexpected result: {0}.".format(r)) + continue + + assert r is None + # collect list of processes currently running + children = psutil.Process(os.getpid()).children() + # kill a process, so received children dictionary becomes invalid + process.kill() + process.wait() + # try to handle children list -- missing processes will have ptype "ProcessType.Unknown" + [ProcessProxy(p) for p in children] + break def test_upgrade_node(self): old_bin_dir = os.path.dirname(get_bin_path("pg_config")) @@ -1036,7 +1134,7 @@ def test_upgrade_node(self): node_new.init(cached=False) res = node_new.upgrade_from(old_node=node_old) node_new.start() - self.assertTrue(b'Upgrade Complete' in res) + assert (b'Upgrade Complete' in res) def test_parse_pg_version(self): # Linux Mint @@ -1051,25 +1149,26 @@ def test_parse_pg_version(self): def test_the_same_port(self): with get_new_node() as node: node.init().start() - self.assertTrue(node._should_free_port) - self.assertEqual(type(node.port), int) + assert (node._should_free_port) + assert (type(node.port) == int) # noqa: E721 node_port_copy = node.port - self.assertEqual(rm_carriage_returns(node.safe_psql("SELECT 1;")), b'1\n') + assert (rm_carriage_returns(node.safe_psql("SELECT 1;")) == b'1\n') with get_new_node(port=node.port) as node2: - self.assertEqual(type(node2.port), int) - self.assertEqual(node2.port, node.port) - self.assertFalse(node2._should_free_port) - - with self.assertRaises(StartNodeException) as ctx: + assert (type(node2.port) == int) # noqa: E721 + assert (node2.port == node.port) + assert not (node2._should_free_port) + + with pytest.raises( + expected_exception=StartNodeException, + match=re.escape("Cannot start node") + ): node2.init().start() - self.assertIn("Cannot start node", str(ctx.exception)) - # node is still working - self.assertEqual(node.port, node_port_copy) - self.assertTrue(node._should_free_port) - self.assertEqual(rm_carriage_returns(node.safe_psql("SELECT 3;")), b'3\n') + assert (node.port == node_port_copy) + assert (node._should_free_port) + assert (rm_carriage_returns(node.safe_psql("SELECT 3;")) == b'3\n') class tagPortManagerProxy: sm_prev_testgres_reserve_port = None @@ -1174,31 +1273,31 @@ def test_port_rereserve_during_node_start(self): with get_new_node() as node1: node1.init().start() - self.assertTrue(node1._should_free_port) - self.assertEqual(type(node1.port), int) # noqa: E721 + assert (node1._should_free_port) + assert (type(node1.port) == int) # noqa: E721 node1_port_copy = node1.port - self.assertEqual(rm_carriage_returns(node1.safe_psql("SELECT 1;")), b'1\n') + assert (rm_carriage_returns(node1.safe_psql("SELECT 1;")) == b'1\n') with __class__.tagPortManagerProxy(node1.port, C_COUNT_OF_BAD_PORT_USAGE): assert __class__.tagPortManagerProxy.sm_DummyPortNumber == node1.port with get_new_node() as node2: - self.assertTrue(node2._should_free_port) - self.assertEqual(node2.port, node1.port) + assert (node2._should_free_port) + assert (node2.port == node1.port) node2.init().start() - self.assertNotEqual(node2.port, node1.port) - self.assertTrue(node2._should_free_port) - self.assertEqual(__class__.tagPortManagerProxy.sm_DummyPortCurrentUsage, 0) - self.assertEqual(__class__.tagPortManagerProxy.sm_DummyPortTotalUsage, C_COUNT_OF_BAD_PORT_USAGE) - self.assertTrue(node2.is_started) + assert (node2.port != node1.port) + assert (node2._should_free_port) + assert (__class__.tagPortManagerProxy.sm_DummyPortCurrentUsage == 0) + assert (__class__.tagPortManagerProxy.sm_DummyPortTotalUsage == C_COUNT_OF_BAD_PORT_USAGE) + assert (node2.is_started) - self.assertEqual(rm_carriage_returns(node2.safe_psql("SELECT 2;")), b'2\n') + assert (rm_carriage_returns(node2.safe_psql("SELECT 2;")) == b'2\n') # node1 is still working - self.assertEqual(node1.port, node1_port_copy) - self.assertTrue(node1._should_free_port) - self.assertEqual(rm_carriage_returns(node1.safe_psql("SELECT 3;")), b'3\n') + assert (node1.port == node1_port_copy) + assert (node1._should_free_port) + assert (rm_carriage_returns(node1.safe_psql("SELECT 3;")) == b'3\n') def test_port_conflict(self): assert testgres.PostgresNode._C_MAX_START_ATEMPTS > 1 @@ -1207,35 +1306,36 @@ def test_port_conflict(self): with get_new_node() as node1: node1.init().start() - self.assertTrue(node1._should_free_port) - self.assertEqual(type(node1.port), int) # noqa: E721 + assert (node1._should_free_port) + assert (type(node1.port) == int) # noqa: E721 node1_port_copy = node1.port - self.assertEqual(rm_carriage_returns(node1.safe_psql("SELECT 1;")), b'1\n') + assert (rm_carriage_returns(node1.safe_psql("SELECT 1;")) == b'1\n') with __class__.tagPortManagerProxy(node1.port, C_COUNT_OF_BAD_PORT_USAGE): assert __class__.tagPortManagerProxy.sm_DummyPortNumber == node1.port with get_new_node() as node2: - self.assertTrue(node2._should_free_port) - self.assertEqual(node2.port, node1.port) + assert (node2._should_free_port) + assert (node2.port == node1.port) - with self.assertRaises(StartNodeException) as ctx: + with pytest.raises( + expected_exception=StartNodeException, + match=re.escape("Cannot start node after multiple attempts") + ): node2.init().start() - self.assertIn("Cannot start node after multiple attempts", str(ctx.exception)) - - self.assertEqual(node2.port, node1.port) - self.assertTrue(node2._should_free_port) - self.assertEqual(__class__.tagPortManagerProxy.sm_DummyPortCurrentUsage, 1) - self.assertEqual(__class__.tagPortManagerProxy.sm_DummyPortTotalUsage, C_COUNT_OF_BAD_PORT_USAGE) - self.assertFalse(node2.is_started) + assert (node2.port == node1.port) + assert (node2._should_free_port) + assert (__class__.tagPortManagerProxy.sm_DummyPortCurrentUsage == 1) + assert (__class__.tagPortManagerProxy.sm_DummyPortTotalUsage == C_COUNT_OF_BAD_PORT_USAGE) + assert not (node2.is_started) # node2 must release our dummyPort (node1.port) - self.assertEqual(__class__.tagPortManagerProxy.sm_DummyPortCurrentUsage, 0) + assert (__class__.tagPortManagerProxy.sm_DummyPortCurrentUsage == 0) # node1 is still working - self.assertEqual(node1.port, node1_port_copy) - self.assertTrue(node1._should_free_port) - self.assertEqual(rm_carriage_returns(node1.safe_psql("SELECT 3;")), b'3\n') + assert (node1.port == node1_port_copy) + assert (node1._should_free_port) + assert (rm_carriage_returns(node1.safe_psql("SELECT 3;")) == b'3\n') def test_simple_with_bin_dir(self): with get_new_node() as node: @@ -1300,29 +1400,28 @@ def test_set_auto_conf(self): content = f.read() for x in testData: - self.assertIn( - x[0] + " = " + x[2], - content, - x[0] + " stored wrong" - ) - - -if __name__ == '__main__': - if os.environ.get('ALT_CONFIG'): - suite = unittest.TestSuite() - - # Small subset of tests for alternative configs (PG_BIN or PG_CONFIG) - suite.addTest(TestgresTests('test_pg_config')) - suite.addTest(TestgresTests('test_pg_ctl')) - suite.addTest(TestgresTests('test_psql')) - suite.addTest(TestgresTests('test_replicate')) - - print('Running tests for alternative config:') - for t in suite: - print(t) - print() - - runner = unittest.TextTestRunner() - runner.run(suite) - else: - unittest.main() + assert x[0] + " = " + x[2] in content + + @staticmethod + def helper__skip_test_if_util_not_exist(name: str): + assert type(name) == str # noqa: E721 + + if platform.system().lower() == "windows": + name2 = name + ".exe" + else: + name2 = name + + if not util_exists(name2): + pytest.skip('might be missing') + + @staticmethod + def helper__skip_test_if_pg_version_is_not_ge(version: str): + assert type(version) == str # noqa: E721 + if not pg_version_ge(version): + pytest.skip('requires {0}+'.format(version)) + + @staticmethod + def helper__skip_test_if_pg_version_is_ge(version: str): + assert type(version) == str # noqa: E721 + if pg_version_ge(version): + pytest.skip('requires <{0}'.format(version)) diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index 8b44623a..e7cc5e5c 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -1,22 +1,21 @@ -#!/usr/bin/env python # coding: utf-8 - import os import re import subprocess import tempfile -import testgres import time import six -import unittest +import pytest import psutil import logging.config from contextlib import contextmanager -from testgres.exceptions import \ +from .. import testgres + +from ..testgres.exceptions import \ InitNodeException, \ StartNodeException, \ ExecUtilException, \ @@ -26,38 +25,33 @@ TestgresException, \ InvalidOperationException -from testgres.config import \ +from ..testgres.config import \ TestgresConfig, \ configure_testgres, \ scoped_config, \ pop_config, testgres_config -from testgres import \ +from ..testgres import \ NodeStatus, \ ProcessType, \ IsolationLevel, \ get_remote_node, \ RemoteOperations -from testgres import \ +from ..testgres import \ get_bin_path, \ get_pg_config, \ get_pg_version -from testgres import \ +from ..testgres import \ First, \ Any # NOTE: those are ugly imports -from testgres import bound_ports -from testgres.utils import PgVer -from testgres.node import ProcessProxy, ConnectionParams - -conn_params = ConnectionParams(host=os.getenv('RDBMS_TESTPOOL1_HOST') or '127.0.0.1', - username=os.getenv('USER'), - ssh_key=os.getenv('RDBMS_TESTPOOL_SSHKEY')) -os_ops = RemoteOperations(conn_params) -testgres_config.set_os_ops(os_ops=os_ops) +from ..testgres import bound_ports +from ..testgres.utils import PgVer +from ..testgres.utils import file_tail +from ..testgres.node import ProcessProxy, ConnectionParams def pg_version_ge(version): @@ -68,16 +62,16 @@ def pg_version_ge(version): def util_exists(util): def good_properties(f): - return (os_ops.path_exists(f) and # noqa: W504 - os_ops.isfile(f) and # noqa: W504 - os_ops.is_executable(f)) # yapf: disable + return (testgres_config.os_ops.path_exists(f) and # noqa: W504 + testgres_config.os_ops.isfile(f) and # noqa: W504 + testgres_config.os_ops.is_executable(f)) # yapf: disable # try to resolve it if good_properties(get_bin_path(util)): return True # check if util is in PATH - for path in os_ops.environ("PATH").split(os_ops.pathsep): + for path in testgres_config.os_ops.environ("PATH").split(testgres_config.os_ops.pathsep): if good_properties(os.path.join(path, util)): return True @@ -87,37 +81,56 @@ def removing(f): try: yield f finally: - if os_ops.isfile(f): - os_ops.remove_file(f) + if testgres_config.os_ops.isfile(f): + testgres_config.os_ops.remove_file(f) + + elif testgres_config.os_ops.isdir(f): + testgres_config.os_ops.rmdirs(f, ignore_errors=True) - elif os_ops.isdir(f): - os_ops.rmdirs(f, ignore_errors=True) +class TestgresRemoteTests: + sm_conn_params = ConnectionParams( + host=os.getenv('RDBMS_TESTPOOL1_HOST') or '127.0.0.1', + username=os.getenv('USER'), + ssh_key=os.getenv('RDBMS_TESTPOOL_SSHKEY')) + + sm_os_ops = RemoteOperations(sm_conn_params) + + @pytest.fixture(autouse=True, scope="class") + def implicit_fixture(self): + prev_ops = testgres_config.os_ops + assert prev_ops is not None + assert __class__.sm_os_ops is not None + testgres_config.set_os_ops(os_ops=__class__.sm_os_ops) + assert testgres_config.os_ops is __class__.sm_os_ops + yield + assert testgres_config.os_ops is __class__.sm_os_ops + testgres_config.set_os_ops(os_ops=prev_ops) + assert testgres_config.os_ops is prev_ops -class TestgresRemoteTests(unittest.TestCase): def test_node_repr(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: pattern = r"PostgresNode\(name='.+', port=.+, base_dir='.+'\)" - self.assertIsNotNone(re.match(pattern, str(node))) + assert re.match(pattern, str(node)) is not None def test_custom_init(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: # enable page checksums node.init(initdb_params=['-k']).start() - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init( allow_streaming=True, initdb_params=['--auth-local=reject', '--auth-host=reject']) hba_file = os.path.join(node.data_dir, 'pg_hba.conf') - lines = os_ops.readlines(hba_file) + lines = node.os_ops.readlines(hba_file) # check number of lines - self.assertGreaterEqual(len(lines), 6) + assert (len(lines) >= 6) # there should be no trust entries at all - self.assertFalse(any('trust' in s for s in lines)) + assert not (any('trust' in s for s in lines)) def test_init__LANG_С(self): # PBCKP-1744 @@ -126,7 +139,7 @@ def test_init__LANG_С(self): try: os.environ["LANG"] = "C" - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init().start() finally: __class__.helper__restore_envvar("LANG", prev_LANG) @@ -167,9 +180,9 @@ def test_init__unk_LANG_and_LC_CTYPE(self): while True: try: - with get_remote_node(conn_params=conn_params): + with __class__.helper__get_node(): pass - except testgres.exceptions.ExecUtilException as e: + except ExecUtilException as e: # # Example of an error message: # @@ -193,88 +206,89 @@ def test_init__unk_LANG_and_LC_CTYPE(self): __class__.helper__restore_envvar("LC_COLLATE", prev_LC_COLLATE) def test_double_init(self): - with get_remote_node(conn_params=conn_params).init() as node: + with __class__.helper__get_node().init() as node: # can't initialize node more than once - with self.assertRaises(InitNodeException): + with pytest.raises(expected_exception=InitNodeException): node.init() def test_init_after_cleanup(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init().start().execute('select 1') node.cleanup() node.init().start().execute('select 1') - @unittest.skipUnless(util_exists('pg_resetwal'), 'might be missing') - @unittest.skipUnless(pg_version_ge('9.6'), 'requires 9.6+') def test_init_unique_system_id(self): # this function exists in PostgreSQL 9.6+ + __class__.helper__skip_test_if_util_not_exist("pg_resetwal") + __class__.helper__skip_test_if_pg_version_is_not_ge('9.6') + query = 'select system_identifier from pg_control_system()' with scoped_config(cache_initdb=False): - with get_remote_node(conn_params=conn_params).init().start() as node0: + with __class__.helper__get_node().init().start() as node0: id0 = node0.execute(query)[0] with scoped_config(cache_initdb=True, cached_initdb_unique=True) as config: - self.assertTrue(config.cache_initdb) - self.assertTrue(config.cached_initdb_unique) + assert (config.cache_initdb) + assert (config.cached_initdb_unique) # spawn two nodes; ids must be different - with get_remote_node(conn_params=conn_params).init().start() as node1, \ - get_remote_node(conn_params=conn_params).init().start() as node2: + with __class__.helper__get_node().init().start() as node1, \ + __class__.helper__get_node().init().start() as node2: id1 = node1.execute(query)[0] id2 = node2.execute(query)[0] # ids must increase - self.assertGreater(id1, id0) - self.assertGreater(id2, id1) + assert (id1 > id0) + assert (id2 > id1) def test_node_exit(self): - with self.assertRaises(QueryException): - with get_remote_node(conn_params=conn_params).init() as node: + with pytest.raises(expected_exception=QueryException): + with __class__.helper__get_node().init() as node: base_dir = node.base_dir node.safe_psql('select 1') # we should save the DB for "debugging" - self.assertTrue(os_ops.path_exists(base_dir)) - os_ops.rmdirs(base_dir, ignore_errors=True) + assert (__class__.sm_os_ops.path_exists(base_dir)) + __class__.sm_os_ops.rmdirs(base_dir, ignore_errors=True) - with get_remote_node(conn_params=conn_params).init() as node: + with __class__.helper__get_node().init() as node: base_dir = node.base_dir # should have been removed by default - self.assertFalse(os_ops.path_exists(base_dir)) + assert not (__class__.sm_os_ops.path_exists(base_dir)) def test_double_start(self): - with get_remote_node(conn_params=conn_params).init().start() as node: + with __class__.helper__get_node().init().start() as node: # can't start node more than once node.start() - self.assertTrue(node.is_started) + assert (node.is_started) def test_uninitialized_start(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: # node is not initialized yet - with self.assertRaises(StartNodeException): + with pytest.raises(expected_exception=StartNodeException): node.start() def test_restart(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init().start() # restart, ok res = node.execute('select 1') - self.assertEqual(res, [(1,)]) + assert (res == [(1,)]) node.restart() res = node.execute('select 2') - self.assertEqual(res, [(2,)]) + assert (res == [(2,)]) # restart, fail - with self.assertRaises(StartNodeException): + with pytest.raises(expected_exception=StartNodeException): node.append_conf('pg_hba.conf', 'DUMMY') node.restart() def test_reload(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init().start() # change client_min_messages and save old value @@ -286,108 +300,109 @@ def test_reload(self): # check new value cmm_new = node.execute('show client_min_messages') - self.assertEqual('debug1', cmm_new[0][0].lower()) - self.assertNotEqual(cmm_old, cmm_new) + assert ('debug1' == cmm_new[0][0].lower()) + assert (cmm_old != cmm_new) def test_pg_ctl(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init().start() status = node.pg_ctl(['status']) - self.assertTrue('PID' in status) + assert ('PID' in status) def test_status(self): - self.assertTrue(NodeStatus.Running) - self.assertFalse(NodeStatus.Stopped) - self.assertFalse(NodeStatus.Uninitialized) + assert (NodeStatus.Running) + assert not (NodeStatus.Stopped) + assert not (NodeStatus.Uninitialized) # check statuses after each operation - with get_remote_node(conn_params=conn_params) as node: - self.assertEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Uninitialized) + with __class__.helper__get_node() as node: + assert (node.pid == 0) + assert (node.status() == NodeStatus.Uninitialized) node.init() - self.assertEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Stopped) + assert (node.pid == 0) + assert (node.status() == NodeStatus.Stopped) node.start() - self.assertNotEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Running) + assert (node.pid != 0) + assert (node.status() == NodeStatus.Running) node.stop() - self.assertEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Stopped) + assert (node.pid == 0) + assert (node.status() == NodeStatus.Stopped) node.cleanup() - self.assertEqual(node.pid, 0) - self.assertEqual(node.status(), NodeStatus.Uninitialized) + assert (node.pid == 0) + assert (node.status() == NodeStatus.Uninitialized) def test_psql(self): - with get_remote_node(conn_params=conn_params).init().start() as node: + with __class__.helper__get_node().init().start() as node: # check returned values (1 arg) res = node.psql('select 1') - self.assertEqual(res, (0, b'1\n', b'')) + assert (res == (0, b'1\n', b'')) # check returned values (2 args) res = node.psql('postgres', 'select 2') - self.assertEqual(res, (0, b'2\n', b'')) + assert (res == (0, b'2\n', b'')) # check returned values (named) res = node.psql(query='select 3', dbname='postgres') - self.assertEqual(res, (0, b'3\n', b'')) + assert (res == (0, b'3\n', b'')) # check returned values (1 arg) res = node.safe_psql('select 4') - self.assertEqual(res, b'4\n') + assert (res == b'4\n') # check returned values (2 args) res = node.safe_psql('postgres', 'select 5') - self.assertEqual(res, b'5\n') + assert (res == b'5\n') # check returned values (named) res = node.safe_psql(query='select 6', dbname='postgres') - self.assertEqual(res, b'6\n') + assert (res == b'6\n') # check feeding input node.safe_psql('create table horns (w int)') node.safe_psql('copy horns from stdin (format csv)', input=b"1\n2\n3\n\\.\n") _sum = node.safe_psql('select sum(w) from horns') - self.assertEqual(_sum, b'6\n') + assert (_sum == b'6\n') # check psql's default args, fails - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): node.psql() node.stop() # check psql on stopped node, fails - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): node.safe_psql('select 1') def test_safe_psql__expect_error(self): - with get_remote_node(conn_params=conn_params).init().start() as node: + with __class__.helper__get_node().init().start() as node: err = node.safe_psql('select_or_not_select 1', expect_error=True) - self.assertTrue(type(err) == str) # noqa: E721 - self.assertIn('select_or_not_select', err) - self.assertIn('ERROR: syntax error at or near "select_or_not_select"', err) + assert (type(err) == str) # noqa: E721 + assert ('select_or_not_select' in err) + assert ('ERROR: syntax error at or near "select_or_not_select"' in err) # --------- - with self.assertRaises(InvalidOperationException) as ctx: + with pytest.raises( + expected_exception=InvalidOperationException, + match="^" + re.escape("Exception was expected, but query finished successfully: `select 1;`.") + "$" + ): node.safe_psql("select 1;", expect_error=True) - self.assertEqual(str(ctx.exception), "Exception was expected, but query finished successfully: `select 1;`.") - # --------- res = node.safe_psql("select 1;", expect_error=False) - self.assertEqual(res, b'1\n') + assert (res == b'1\n') def test_transactions(self): - with get_remote_node(conn_params=conn_params).init().start() as node: + with __class__.helper__get_node().init().start() as node: with node.connect() as con: con.begin() con.execute('create table test(val int)') @@ -397,12 +412,12 @@ def test_transactions(self): con.begin() con.execute('insert into test values (2)') res = con.execute('select * from test order by val asc') - self.assertListEqual(res, [(1,), (2,)]) + assert (res == [(1,), (2,)]) con.rollback() con.begin() res = con.execute('select * from test') - self.assertListEqual(res, [(1,)]) + assert (res == [(1,)]) con.rollback() con.begin() @@ -410,25 +425,25 @@ def test_transactions(self): con.commit() def test_control_data(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: # node is not initialized yet - with self.assertRaises(ExecUtilException): + with pytest.raises(expected_exception=ExecUtilException): node.get_control_data() node.init() data = node.get_control_data() # check returned dict - self.assertIsNotNone(data) - self.assertTrue(any('pg_control' in s for s in data.keys())) + assert data is not None + assert (any('pg_control' in s for s in data.keys())) def test_backup_simple(self): - with get_remote_node(conn_params=conn_params) as master: + with __class__.helper__get_node() as master: # enable streaming for backups master.init(allow_streaming=True) # node must be running - with self.assertRaises(BackupException): + with pytest.raises(expected_exception=BackupException): master.backup() # it's time to start node @@ -440,23 +455,23 @@ def test_backup_simple(self): with master.backup(xlog_method='stream') as backup: with backup.spawn_primary().start() as slave: res = slave.execute('select * from test order by i asc') - self.assertListEqual(res, [(1,), (2,), (3,), (4,)]) + assert (res == [(1,), (2,), (3,), (4,)]) def test_backup_multiple(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init(allow_streaming=True).start() with node.backup(xlog_method='fetch') as backup1, \ node.backup(xlog_method='fetch') as backup2: - self.assertNotEqual(backup1.base_dir, backup2.base_dir) + assert (backup1.base_dir != backup2.base_dir) with node.backup(xlog_method='fetch') as backup: with backup.spawn_primary('node1', destroy=False) as node1, \ backup.spawn_primary('node2', destroy=False) as node2: - self.assertNotEqual(node1.base_dir, node2.base_dir) + assert (node1.base_dir != node2.base_dir) def test_backup_exhaust(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init(allow_streaming=True).start() with node.backup(xlog_method='fetch') as backup: @@ -465,19 +480,21 @@ def test_backup_exhaust(self): pass # now let's try to create one more node - with self.assertRaises(BackupException): + with pytest.raises(expected_exception=BackupException): backup.spawn_primary() def test_backup_wrong_xlog_method(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init(allow_streaming=True).start() - with self.assertRaises(BackupException, - msg='Invalid xlog_method "wrong"'): + with pytest.raises( + expected_exception=BackupException, + match="^" + re.escape('Invalid xlog_method "wrong"') + "$" + ): node.backup(xlog_method='wrong') def test_pg_ctl_wait_option(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init().start(wait=False) while True: try: @@ -489,23 +506,24 @@ def test_pg_ctl_wait_option(self): pass def test_replicate(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init(allow_streaming=True).start() with node.replicate().start() as replica: res = replica.execute('select 1') - self.assertListEqual(res, [(1,)]) + assert (res == [(1,)]) node.execute('create table test (val int)', commit=True) replica.catchup() res = node.execute('select * from test') - self.assertListEqual(res, []) + assert (res == []) - @unittest.skipUnless(pg_version_ge('9.6'), 'requires 9.6+') def test_synchronous_replication(self): - with get_remote_node(conn_params=conn_params) as master: + __class__.helper__skip_test_if_pg_version_is_not_ge("9.6") + + with __class__.helper__get_node() as master: old_version = not pg_version_ge('9.6') master.init(allow_streaming=True).start() @@ -519,12 +537,12 @@ def test_synchronous_replication(self): standby2.start() # check formatting - self.assertEqual( - '1 ("{}", "{}")'.format(standby1.name, standby2.name), - str(First(1, (standby1, standby2)))) # yapf: disable - self.assertEqual( - 'ANY 1 ("{}", "{}")'.format(standby1.name, standby2.name), - str(Any(1, (standby1, standby2)))) # yapf: disable + assert ( + '1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(First(1, (standby1, standby2))) + ) # yapf: disable + assert ( + 'ANY 1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(Any(1, (standby1, standby2))) + ) # yapf: disable # set synchronous_standby_names master.set_synchronous_standbys(First(2, [standby1, standby2])) @@ -542,11 +560,12 @@ def test_synchronous_replication(self): master.safe_psql( 'insert into abc select generate_series(1, 1000000)') res = standby1.safe_psql('select count(*) from abc') - self.assertEqual(res, b'1000000\n') + assert (res == b'1000000\n') - @unittest.skipUnless(pg_version_ge('10'), 'requires 10+') def test_logical_replication(self): - with get_remote_node(conn_params=conn_params) as node1, get_remote_node(conn_params=conn_params) as node2: + __class__.helper__skip_test_if_pg_version_is_not_ge("10") + + with __class__.helper__get_node() as node1, __class__.helper__get_node() as node2: node1.init(allow_logical=True) node1.start() node2.init().start() @@ -564,7 +583,7 @@ def test_logical_replication(self): # wait until changes apply on subscriber and check them sub.catchup() res = node2.execute('select * from test') - self.assertListEqual(res, [(1, 1), (2, 2)]) + assert (res == [(1, 1), (2, 2)]) # disable and put some new data sub.disable() @@ -574,7 +593,7 @@ def test_logical_replication(self): sub.enable() sub.catchup() res = node2.execute('select * from test') - self.assertListEqual(res, [(1, 1), (2, 2), (3, 3)]) + assert (res == [(1, 1), (2, 2), (3, 3)]) # Add new tables. Since we added "all tables" to publication # (default behaviour of publish() method) we don't need @@ -588,7 +607,7 @@ def test_logical_replication(self): node1.safe_psql('insert into test2 values (\'a\'), (\'b\')') sub.catchup() res = node2.execute('select * from test2') - self.assertListEqual(res, [('a',), ('b',)]) + assert (res == [('a',), ('b',)]) # drop subscription sub.drop() @@ -602,21 +621,22 @@ def test_logical_replication(self): node1.safe_psql('insert into test values (4, 4)') sub.catchup() res = node2.execute('select * from test') - self.assertListEqual(res, [(1, 1), (2, 2), (3, 3), (4, 4)]) + assert (res == [(1, 1), (2, 2), (3, 3), (4, 4)]) # explicitly add table - with self.assertRaises(ValueError): + with pytest.raises(expected_exception=ValueError): pub.add_tables([]) # fail pub.add_tables(['test2']) node1.safe_psql('insert into test2 values (\'c\')') sub.catchup() res = node2.execute('select * from test2') - self.assertListEqual(res, [('a',), ('b',)]) + assert (res == [('a',), ('b',)]) - @unittest.skipUnless(pg_version_ge('10'), 'requires 10+') def test_logical_catchup(self): """ Runs catchup for 100 times to be sure that it is consistent """ - with get_remote_node(conn_params=conn_params) as node1, get_remote_node(conn_params=conn_params) as node2: + __class__.helper__skip_test_if_pg_version_is_not_ge("10") + + with __class__.helper__get_node() as node1, __class__.helper__get_node() as node2: node1.init(allow_logical=True) node1.start() node2.init().start() @@ -633,39 +653,37 @@ def test_logical_catchup(self): node1.execute('insert into test values ({0}, {0})'.format(i)) sub.catchup() res = node2.execute('select * from test') - self.assertListEqual(res, [( - i, - i, - )]) + assert (res == [(i, i, )]) node1.execute('delete from test') - @unittest.skipIf(pg_version_ge('10'), 'requires <10') def test_logical_replication_fail(self): - with get_remote_node(conn_params=conn_params) as node: - with self.assertRaises(InitNodeException): + __class__.helper__skip_test_if_pg_version_is_ge("10") + + with __class__.helper__get_node() as node: + with pytest.raises(expected_exception=InitNodeException): node.init(allow_logical=True) def test_replication_slots(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init(allow_streaming=True).start() with node.replicate(slot='slot1').start() as replica: replica.execute('select 1') # cannot create new slot with the same name - with self.assertRaises(TestgresException): + with pytest.raises(expected_exception=TestgresException): node.replicate(slot='slot1') def test_incorrect_catchup(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init(allow_streaming=True).start() # node has no master, can't catch up - with self.assertRaises(TestgresException): + with pytest.raises(expected_exception=TestgresException): node.catchup() def test_promotion(self): - with get_remote_node(conn_params=conn_params) as master: + with __class__.helper__get_node() as master: master.init().start() master.safe_psql('create table abc(id serial)') @@ -676,35 +694,35 @@ def test_promotion(self): # make standby becomes writable master replica.safe_psql('insert into abc values (1)') res = replica.safe_psql('select * from abc') - self.assertEqual(res, b'1\n') + assert (res == b'1\n') def test_dump(self): query_create = 'create table test as select generate_series(1, 2) as val' query_select = 'select * from test order by val asc' - with get_remote_node(conn_params=conn_params).init().start() as node1: + with __class__.helper__get_node().init().start() as node1: node1.execute(query_create) for format in ['plain', 'custom', 'directory', 'tar']: with removing(node1.dump(format=format)) as dump: - with get_remote_node(conn_params=conn_params).init().start() as node3: + with __class__.helper__get_node().init().start() as node3: if format == 'directory': - self.assertTrue(os_ops.isdir(dump)) + assert (node1.os_ops.isdir(dump)) else: - self.assertTrue(os_ops.isfile(dump)) + assert (node1.os_ops.isfile(dump)) # restore dump node3.restore(filename=dump) res = node3.execute(query_select) - self.assertListEqual(res, [(1,), (2,)]) + assert (res == [(1,), (2,)]) def test_users(self): - with get_remote_node(conn_params=conn_params).init().start() as node: + with __class__.helper__get_node().init().start() as node: node.psql('create role test_user login') value = node.safe_psql('select 1', username='test_user') - self.assertEqual(b'1\n', value) + assert (b'1\n' == value) def test_poll_query_until(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init().start() get_time = 'select extract(epoch from now())' @@ -714,15 +732,15 @@ def test_poll_query_until(self): node.poll_query_until(query=check_time.format(start_time)) end_time = node.execute(get_time)[0][0] - self.assertTrue(end_time - start_time >= 5) + assert (end_time - start_time >= 5) # check 0 columns - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): node.poll_query_until( query='select from pg_catalog.pg_class limit 1') # check None, fail - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): node.poll_query_until(query='create table abc (val int)') # check None, ok @@ -735,7 +753,7 @@ def test_poll_query_until(self): expected=None) # check arbitrary expected value, fail - with self.assertRaises(TimeoutException): + with pytest.raises(expected_exception=TimeoutException): node.poll_query_until(query='select 3', expected=1, max_attempts=3, @@ -745,17 +763,17 @@ def test_poll_query_until(self): node.poll_query_until(query='select 2', expected=2) # check timeout - with self.assertRaises(TimeoutException): + with pytest.raises(expected_exception=TimeoutException): node.poll_query_until(query='select 1 > 2', max_attempts=3, sleep_time=0.01) # check ProgrammingError, fail - with self.assertRaises(testgres.ProgrammingError): + with pytest.raises(expected_exception=testgres.ProgrammingError): node.poll_query_until(query='dummy1') # check ProgrammingError, ok - with self.assertRaises(TimeoutException): + with pytest.raises(expected_exception=TimeoutException): node.poll_query_until(query='dummy2', max_attempts=3, sleep_time=0.01, @@ -808,17 +826,18 @@ def test_logging(self): # check that master's port is found with open(logfile.name, 'r') as log: lines = log.readlines() - self.assertTrue(any(node_name in s for s in lines)) + assert (any(node_name in s for s in lines)) # test logger after stop/start/restart master.stop() master.start() master.restart() - self.assertTrue(master._logger.is_alive()) + assert (master._logger.is_alive()) - @unittest.skipUnless(util_exists('pgbench'), 'might be missing') def test_pgbench(self): - with get_remote_node(conn_params=conn_params).init().start() as node: + __class__.helper__skip_test_if_util_not_exist("pgbench") + + with __class__.helper__get_node().init().start() as node: # initialize pgbench DB and run benchmarks node.pgbench_init(scale=2, foreign_keys=True, options=['-q']).pgbench_run(time=2) @@ -828,13 +847,13 @@ def test_pgbench(self): stderr=subprocess.STDOUT, options=['-T3']) out = proc.communicate()[0] - self.assertTrue(b'tps = ' in out) + assert (b'tps = ' in out) def test_pg_config(self): # check same instances a = get_pg_config() b = get_pg_config() - self.assertEqual(id(a), id(b)) + assert (id(a) == id(b)) # save right before config change c1 = get_pg_config() @@ -842,26 +861,26 @@ def test_pg_config(self): # modify setting for this scope with scoped_config(cache_pg_config=False) as config: # sanity check for value - self.assertFalse(config.cache_pg_config) + assert not (config.cache_pg_config) # save right after config change c2 = get_pg_config() # check different instances after config change - self.assertNotEqual(id(c1), id(c2)) + assert (id(c1) != id(c2)) # check different instances a = get_pg_config() b = get_pg_config() - self.assertNotEqual(id(a), id(b)) + assert (id(a) != id(b)) def test_config_stack(self): # no such option - with self.assertRaises(TypeError): + with pytest.raises(expected_exception=TypeError): configure_testgres(dummy=True) # we have only 1 config in stack - with self.assertRaises(IndexError): + with pytest.raises(expected_exception=IndexError): pop_config() d0 = TestgresConfig.cached_initdb_dir @@ -869,54 +888,52 @@ def test_config_stack(self): d2 = 'dummy_def' with scoped_config(cached_initdb_dir=d1) as c1: - self.assertEqual(c1.cached_initdb_dir, d1) + assert (c1.cached_initdb_dir == d1) with scoped_config(cached_initdb_dir=d2) as c2: stack_size = len(testgres.config.config_stack) # try to break a stack - with self.assertRaises(TypeError): + with pytest.raises(expected_exception=TypeError): with scoped_config(dummy=True): pass - self.assertEqual(c2.cached_initdb_dir, d2) - self.assertEqual(len(testgres.config.config_stack), stack_size) + assert (c2.cached_initdb_dir == d2) + assert (len(testgres.config.config_stack) == stack_size) - self.assertEqual(c1.cached_initdb_dir, d1) + assert (c1.cached_initdb_dir == d1) - self.assertEqual(TestgresConfig.cached_initdb_dir, d0) + assert (TestgresConfig.cached_initdb_dir == d0) def test_unix_sockets(self): - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: node.init(unix_sockets=False, allow_streaming=True) node.start() res_exec = node.execute('select 1') res_psql = node.safe_psql('select 1') - self.assertEqual(res_exec, [(1,)]) - self.assertEqual(res_psql, b'1\n') + assert (res_exec == [(1,)]) + assert (res_psql == b'1\n') with node.replicate().start() as r: res_exec = r.execute('select 1') res_psql = r.safe_psql('select 1') - self.assertEqual(res_exec, [(1,)]) - self.assertEqual(res_psql, b'1\n') + assert (res_exec == [(1,)]) + assert (res_psql == b'1\n') def test_auto_name(self): - with get_remote_node(conn_params=conn_params).init(allow_streaming=True).start() as m: + with __class__.helper__get_node().init(allow_streaming=True).start() as m: with m.replicate().start() as r: # check that nodes are running - self.assertTrue(m.status()) - self.assertTrue(r.status()) + assert (m.status()) + assert (r.status()) # check their names - self.assertNotEqual(m.name, r.name) - self.assertTrue('testgres' in m.name) - self.assertTrue('testgres' in r.name) + assert (m.name != r.name) + assert ('testgres' in m.name) + assert ('testgres' in r.name) def test_file_tail(self): - from testgres.utils import file_tail - s1 = "the quick brown fox jumped over that lazy dog\n" s2 = "abc\n" s3 = "def\n" @@ -931,16 +948,16 @@ def test_file_tail(self): f.seek(0) lines = file_tail(f, 3) - self.assertEqual(lines[0], s1) - self.assertEqual(lines[1], s2) - self.assertEqual(lines[2], s3) + assert (lines[0] == s1) + assert (lines[1] == s2) + assert (lines[2] == s3) f.seek(0) lines = file_tail(f, 1) - self.assertEqual(lines[0], s3) + assert (lines[0] == s3) def test_isolation_levels(self): - with get_remote_node(conn_params=conn_params).init().start() as node: + with __class__.helper__get_node().init().start() as node: with node.connect() as con: # string levels con.begin('Read Uncommitted').commit() @@ -955,24 +972,24 @@ def test_isolation_levels(self): con.begin(IsolationLevel.Serializable).commit() # check wrong level - with self.assertRaises(QueryException): + with pytest.raises(expected_exception=QueryException): con.begin('Garbage').commit() def test_ports_management(self): # check that no ports have been bound yet - self.assertEqual(len(bound_ports), 0) + assert (len(bound_ports) == 0) - with get_remote_node(conn_params=conn_params) as node: + with __class__.helper__get_node() as node: # check that we've just bound a port - self.assertEqual(len(bound_ports), 1) + assert (len(bound_ports) == 1) # check that bound_ports contains our port port_1 = list(bound_ports)[0] port_2 = node.port - self.assertEqual(port_1, port_2) + assert (port_1 == port_2) # check that port has been freed successfully - self.assertEqual(len(bound_ports), 0) + assert (len(bound_ports) == 0) def test_exceptions(self): str(StartNodeException('msg', [('file', 'lines')])) @@ -987,18 +1004,18 @@ def test_version_management(self): e = PgVer('15rc1') f = PgVer('15beta4') - self.assertTrue(a == b) - self.assertTrue(b > c) - self.assertTrue(a > c) - self.assertTrue(d > e) - self.assertTrue(e > f) - self.assertTrue(d > f) + assert (a == b) + assert (b > c) + assert (a > c) + assert (d > e) + assert (e > f) + assert (d > f) version = get_pg_version() - with get_remote_node(conn_params=conn_params) as node: - self.assertTrue(isinstance(version, six.string_types)) - self.assertTrue(isinstance(node.version, PgVer)) - self.assertEqual(node.version, PgVer(version)) + with __class__.helper__get_node() as node: + assert (isinstance(version, six.string_types)) + assert (isinstance(node.version, PgVer)) + assert (node.version == PgVer(version)) def test_child_pids(self): master_processes = [ @@ -1021,51 +1038,132 @@ def test_child_pids(self): ProcessType.WalReceiver, ] - with get_remote_node(conn_params=conn_params).init().start() as master: + def LOCAL__test_auxiliary_pids( + node: testgres.PostgresNode, + expectedTypes: list[ProcessType] + ) -> list[ProcessType]: + # returns list of the absence processes + assert node is not None + assert type(node) == testgres.PostgresNode # noqa: E721 + assert expectedTypes is not None + assert type(expectedTypes) == list # noqa: E721 + + pids = node.auxiliary_pids + assert pids is not None # noqa: E721 + assert type(pids) == dict # noqa: E721 + + result = list[ProcessType]() + for ptype in expectedTypes: + if not (ptype in pids): + result.append(ptype) + return result + + def LOCAL__check_auxiliary_pids__multiple_attempts( + node: testgres.PostgresNode, + expectedTypes: list[ProcessType]): + assert node is not None + assert type(node) == testgres.PostgresNode # noqa: E721 + assert expectedTypes is not None + assert type(expectedTypes) == list # noqa: E721 + + nAttempt = 0 + + while nAttempt < 5: + nAttempt += 1 + + logging.info("Test pids of [{0}] node. Attempt #{1}.".format( + node.name, + nAttempt + )) + + if nAttempt > 1: + time.sleep(1) + + absenceList = LOCAL__test_auxiliary_pids(node, expectedTypes) + assert absenceList is not None + assert type(absenceList) == list # noqa: E721 + if len(absenceList) == 0: + logging.info("Bingo!") + return + + logging.info("These processes are not found: {0}.".format(absenceList)) + continue + + raise Exception("Node {0} does not have the following processes: {1}.".format( + node.name, + absenceList + )) + + with __class__.helper__get_node().init().start() as master: # master node doesn't have a source walsender! - with self.assertRaises(TestgresException): + with pytest.raises(expected_exception=TestgresException): master.source_walsender with master.connect() as con: - self.assertGreater(con.pid, 0) + assert (con.pid > 0) with master.replicate().start() as replica: # test __str__ method str(master.child_processes[0]) - master_pids = master.auxiliary_pids - for ptype in master_processes: - self.assertIn(ptype, master_pids) + LOCAL__check_auxiliary_pids__multiple_attempts( + master, + master_processes) - replica_pids = replica.auxiliary_pids - for ptype in repl_processes: - self.assertIn(ptype, replica_pids) + LOCAL__check_auxiliary_pids__multiple_attempts( + replica, + repl_processes) + + master_pids = master.auxiliary_pids # there should be exactly 1 source walsender for replica - self.assertEqual(len(master_pids[ProcessType.WalSender]), 1) + assert (len(master_pids[ProcessType.WalSender]) == 1) pid1 = master_pids[ProcessType.WalSender][0] pid2 = replica.source_walsender.pid - self.assertEqual(pid1, pid2) + assert (pid1 == pid2) replica.stop() # there should be no walsender after we've stopped replica - with self.assertRaises(TestgresException): + with pytest.raises(expected_exception=TestgresException): replica.source_walsender + # TODO: Why does not this test work with remote host? def test_child_process_dies(self): - # test for FileNotFound exception during child_processes() function - with subprocess.Popen(["sleep", "60"]) as process: - self.assertEqual(process.poll(), None) - # collect list of processes currently running - children = psutil.Process(os.getpid()).children() - # kill a process, so received children dictionary becomes invalid - process.kill() - process.wait() - # try to handle children list -- missing processes will have ptype "ProcessType.Unknown" - [ProcessProxy(p) for p in children] + nAttempt = 0 + + while True: + if nAttempt == 5: + raise Exception("Max attempt number is exceed.") + + nAttempt += 1 + + logging.info("Attempt #{0}".format(nAttempt)) + + # test for FileNotFound exception during child_processes() function + with subprocess.Popen(["sleep", "60"]) as process: + r = process.poll() + + if r is not None: + logging.warning("process.pool() returns an unexpected result: {0}.".format(r)) + continue + + assert r is None + # collect list of processes currently running + children = psutil.Process(os.getpid()).children() + # kill a process, so received children dictionary becomes invalid + process.kill() + process.wait() + # try to handle children list -- missing processes will have ptype "ProcessType.Unknown" + [ProcessProxy(p) for p in children] + break + + @staticmethod + def helper__get_node(): + assert __class__.sm_conn_params is not None + return get_remote_node(conn_params=__class__.sm_conn_params) @staticmethod def helper__restore_envvar(name, prev_value): @@ -1074,23 +1172,20 @@ def helper__restore_envvar(name, prev_value): else: os.environ[name] = prev_value + @staticmethod + def helper__skip_test_if_util_not_exist(name: str): + assert type(name) == str # noqa: E721 + if not util_exists(name): + pytest.skip('might be missing') -if __name__ == '__main__': - if os_ops.environ('ALT_CONFIG'): - suite = unittest.TestSuite() - - # Small subset of tests for alternative configs (PG_BIN or PG_CONFIG) - suite.addTest(TestgresRemoteTests('test_pg_config')) - suite.addTest(TestgresRemoteTests('test_pg_ctl')) - suite.addTest(TestgresRemoteTests('test_psql')) - suite.addTest(TestgresRemoteTests('test_replicate')) - - print('Running tests for alternative config:') - for t in suite: - print(t) - print() + @staticmethod + def helper__skip_test_if_pg_version_is_not_ge(version: str): + assert type(version) == str # noqa: E721 + if not pg_version_ge(version): + pytest.skip('requires {0}+'.format(version)) - runner = unittest.TextTestRunner() - runner.run(suite) - else: - unittest.main() + @staticmethod + def helper__skip_test_if_pg_version_is_ge(version: str): + assert type(version) == str # noqa: E721 + if pg_version_ge(version): + pytest.skip('requires <{0}'.format(version)) From 9911ed0c928b602c17a3b0845b24910bf291925a Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Thu, 27 Feb 2025 13:48:05 +0300 Subject: [PATCH 13/90] [run_tests.sh] A right way for obtaining of BINDIR and PG_CONFIG is used (#196) A problem was detected in container with Ubuntu 24.04 tests works with "/usr/bin/pg_config" but real pg_config is "/usr/lib/postgresql/17/bin/pg_config" To resovle this problem we will call "pg_config --bindir" and use it result for BINDIR and PG_CONFIG. --- run_tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index e9d58b54..5cbbac60 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -43,13 +43,13 @@ time coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" # run tests (PG_BIN) time \ - PG_BIN=$(dirname $(which pg_config)) \ + PG_BIN=$(pg_config --bindir) \ coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" # run tests (PG_CONFIG) time \ - PG_CONFIG=$(which pg_config) \ + PG_CONFIG=$(pg_config --bindir)/pg_config \ coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" From 6d67da2170becb944fc768ff4938f88b37e0f2a4 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Thu, 27 Feb 2025 14:30:43 +0300 Subject: [PATCH 14/90] Update README.md Build status is corrected A correct URL to get a starus image is https://api.travis-ci.com/postgrespro/testgres.svg?branch=master --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f0071a90..a3b854f8 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Build Status](https://travis-ci.com/postgrespro/testgres.svg?branch=master)](https://app.travis-ci.com/github/postgrespro/testgres/branches) +[![Build Status](https://api.travis-ci.com/postgrespro/testgres.svg?branch=master)](https://travis-ci.com/github/postgrespro/testgres) [![codecov](https://codecov.io/gh/postgrespro/testgres/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/testgres) [![PyPI version](https://badge.fury.io/py/testgres.svg)](https://badge.fury.io/py/testgres) From 3cc162715e3bc3c49b944e03038665dbd4445221 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 28 Feb 2025 12:04:05 +0300 Subject: [PATCH 15/90] TestgresRemoteTests::test_ports_management is corrected (#198) It is synchronized with TestgresTests::test_ports_management. --- tests/test_simple_remote.py | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index e7cc5e5c..d4a28a2b 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -976,20 +976,38 @@ def test_isolation_levels(self): con.begin('Garbage').commit() def test_ports_management(self): - # check that no ports have been bound yet - assert (len(bound_ports) == 0) + assert bound_ports is not None + assert type(bound_ports) == set # noqa: E721 + + if len(bound_ports) != 0: + logging.warning("bound_ports is not empty: {0}".format(bound_ports)) + + stage0__bound_ports = bound_ports.copy() with __class__.helper__get_node() as node: - # check that we've just bound a port - assert (len(bound_ports) == 1) + assert bound_ports is not None + assert type(bound_ports) == set # noqa: E721 + + assert node.port is not None + assert type(node.port) == int # noqa: E721 + + logging.info("node port is {0}".format(node.port)) + + assert node.port in bound_ports + assert node.port not in stage0__bound_ports + + assert stage0__bound_ports <= bound_ports + assert len(stage0__bound_ports) + 1 == len(bound_ports) + + stage1__bound_ports = stage0__bound_ports.copy() + stage1__bound_ports.add(node.port) - # check that bound_ports contains our port - port_1 = list(bound_ports)[0] - port_2 = node.port - assert (port_1 == port_2) + assert stage1__bound_ports == bound_ports # check that port has been freed successfully - assert (len(bound_ports) == 0) + assert bound_ports is not None + assert type(bound_ports) == set # noqa: E721 + assert bound_ports == stage0__bound_ports def test_exceptions(self): str(StartNodeException('msg', [('file', 'lines')])) From de432edafd63e3f054cefaefcbe69b4b3ae90e4f Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 28 Feb 2025 19:43:38 +0300 Subject: [PATCH 16/90] execute_utility2 is updated (ignore_errors) (#201) - New parameters "ignore_errors" is added. Default value is False. - Asserts are added. --- testgres/utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testgres/utils.py b/testgres/utils.py index 9645fc3b..76d42b02 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -73,11 +73,13 @@ def execute_utility(args, logfile=None, verbose=False): return execute_utility2(tconf.os_ops, args, logfile, verbose) -def execute_utility2(os_ops: OsOperations, args, logfile=None, verbose=False): +def execute_utility2(os_ops: OsOperations, args, logfile=None, verbose=False, ignore_errors=False): assert os_ops is not None assert isinstance(os_ops, OsOperations) + assert type(verbose) == bool # noqa: E721 + assert type(ignore_errors) == bool # noqa: E721 - exit_status, out, error = os_ops.exec_command(args, verbose=True) + exit_status, out, error = os_ops.exec_command(args, verbose=True, ignore_errors=ignore_errors) # decode result out = '' if not out else out if isinstance(out, bytes): From 22826e001bd32314da0bef24101a916f011a0fdc Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sat, 1 Mar 2025 09:57:49 +0300 Subject: [PATCH 17/90] PostgresNode::pid is improved (#199) * PostgresNode::pid is improved - We do multiple attempts to read pid file. - We process a case when we see that node is stopped between test and read. - We process a case when pid-file is empty. * PostgresNode::pid is updated Assert is added. * execute_utility2 is updated (ignore_errors) - New parameters "ignore_errors" is added. Default value is False. - Asserts are added. * PostgresNode::_try_shutdown is rewrited (normalization) * PostgresNode::pid uses the data from "pg_ctl status" output. * PostgresNode::_try_shutdown is correct (return None) This method returns nothing (None). --- testgres/consts.py | 4 + testgres/node.py | 247 +++++++++++++++++++++++++++++++++++++-------- 2 files changed, 211 insertions(+), 40 deletions(-) diff --git a/testgres/consts.py b/testgres/consts.py index 98c84af6..89c49ab7 100644 --- a/testgres/consts.py +++ b/testgres/consts.py @@ -35,3 +35,7 @@ # logical replication settings LOGICAL_REPL_MAX_CATCHUP_ATTEMPTS = 60 + +PG_CTL__STATUS__OK = 0 +PG_CTL__STATUS__NODE_IS_STOPPED = 3 +PG_CTL__STATUS__BAD_DATADIR = 4 diff --git a/testgres/node.py b/testgres/node.py index 56899b90..859fe742 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -49,7 +49,9 @@ RECOVERY_CONF_FILE, \ PG_LOG_FILE, \ UTILS_LOG_FILE, \ - PG_PID_FILE + PG_CTL__STATUS__OK, \ + PG_CTL__STATUS__NODE_IS_STOPPED, \ + PG_CTL__STATUS__BAD_DATADIR \ from .consts import \ MAX_LOGICAL_REPLICATION_WORKERS, \ @@ -208,14 +210,136 @@ def pid(self): Return postmaster's PID if node is running, else 0. """ - if self.status(): - pid_file = os.path.join(self.data_dir, PG_PID_FILE) - lines = self.os_ops.readlines(pid_file) - pid = int(lines[0]) if lines else None - return pid + self__data_dir = self.data_dir - # for clarity - return 0 + _params = [ + self._get_bin_path('pg_ctl'), + "-D", self__data_dir, + "status" + ] # yapf: disable + + status_code, out, error = execute_utility2( + self.os_ops, + _params, + self.utils_log_file, + verbose=True, + ignore_errors=True) + + assert type(status_code) == int # noqa: E721 + assert type(out) == str # noqa: E721 + assert type(error) == str # noqa: E721 + + # ----------------- + if status_code == PG_CTL__STATUS__NODE_IS_STOPPED: + return 0 + + # ----------------- + if status_code == PG_CTL__STATUS__BAD_DATADIR: + return 0 + + # ----------------- + if status_code != PG_CTL__STATUS__OK: + errMsg = "Getting of a node status [data_dir is {0}] failed.".format(self__data_dir) + + raise ExecUtilException( + message=errMsg, + command=_params, + exit_code=status_code, + out=out, + error=error, + ) + + # ----------------- + assert status_code == PG_CTL__STATUS__OK + + if out == "": + __class__._throw_error__pg_ctl_returns_an_empty_string( + _params + ) + + C_PID_PREFIX = "(PID: " + + i = out.find(C_PID_PREFIX) + + if i == -1: + __class__._throw_error__pg_ctl_returns_an_unexpected_string( + out, + _params + ) + + assert i > 0 + assert i < len(out) + assert len(C_PID_PREFIX) <= len(out) + assert i <= len(out) - len(C_PID_PREFIX) + + i += len(C_PID_PREFIX) + start_pid_s = i + + while True: + if i == len(out): + __class__._throw_error__pg_ctl_returns_an_unexpected_string( + out, + _params + ) + + ch = out[i] + + if ch == ")": + break + + if ch.isdigit(): + i += 1 + continue + + __class__._throw_error__pg_ctl_returns_an_unexpected_string( + out, + _params + ) + assert False + + if i == start_pid_s: + __class__._throw_error__pg_ctl_returns_an_unexpected_string( + out, + _params + ) + + # TODO: Let's verify a length of pid string. + + pid = int(out[start_pid_s:i]) + + if pid == 0: + __class__._throw_error__pg_ctl_returns_a_zero_pid( + out, + _params + ) + + assert pid != 0 + return pid + + @staticmethod + def _throw_error__pg_ctl_returns_an_empty_string(_params): + errLines = [] + errLines.append("Utility pg_ctl returns empty string.") + errLines.append("Command line is {0}".format(_params)) + raise RuntimeError("\n".join(errLines)) + + @staticmethod + def _throw_error__pg_ctl_returns_an_unexpected_string(out, _params): + errLines = [] + errLines.append("Utility pg_ctl returns an unexpected string:") + errLines.append(out) + errLines.append("------------") + errLines.append("Command line is {0}".format(_params)) + raise RuntimeError("\n".join(errLines)) + + @staticmethod + def _throw_error__pg_ctl_returns_a_zero_pid(out, _params): + errLines = [] + errLines.append("Utility pg_ctl returns a zero pid. Output string is:") + errLines.append(out) + errLines.append("------------") + errLines.append("Command line is {0}".format(_params)) + raise RuntimeError("\n".join(errLines)) @property def auxiliary_pids(self): @@ -338,41 +462,84 @@ def version(self): return self._pg_version def _try_shutdown(self, max_attempts, with_force=False): + assert type(max_attempts) == int # noqa: E721 + assert type(with_force) == bool # noqa: E721 + assert max_attempts > 0 + attempts = 0 + + # try stopping server N times + while attempts < max_attempts: + attempts += 1 + try: + self.stop() + except ExecUtilException: + continue # one more time + except Exception: + eprint('cannot stop node {}'.format(self.name)) + break + + return # OK + + # If force stopping is enabled and PID is valid + if not with_force: + return + node_pid = self.pid + assert node_pid is not None + assert type(node_pid) == int # noqa: E721 - if node_pid > 0: - # try stopping server N times - while attempts < max_attempts: - try: - self.stop() - break # OK - except ExecUtilException: - pass # one more time - except Exception: - eprint('cannot stop node {}'.format(self.name)) - break - - attempts += 1 - - # If force stopping is enabled and PID is valid - if with_force and node_pid != 0: - # If we couldn't stop the node - p_status_output = self.os_ops.exec_command(cmd=f'ps -o pid= -p {node_pid}', shell=True, ignore_errors=True).decode('utf-8') - if self.status() != NodeStatus.Stopped and p_status_output and str(node_pid) in p_status_output: - try: - eprint(f'Force stopping node {self.name} with PID {node_pid}') - self.os_ops.kill(node_pid, signal.SIGKILL, expect_error=False) - except Exception: - # The node has already stopped - pass - - # Check that node stopped - print only column pid without headers - p_status_output = self.os_ops.exec_command(f'ps -o pid= -p {node_pid}', shell=True, ignore_errors=True).decode('utf-8') - if p_status_output and str(node_pid) in p_status_output: - eprint(f'Failed to stop node {self.name}.') - else: - eprint(f'Node {self.name} has been stopped successfully.') + if node_pid == 0: + return + + # TODO: [2025-02-28] It is really the old ugly code. We have to rewrite it! + + ps_command = ['ps', '-o', 'pid=', '-p', str(node_pid)] + + ps_output = self.os_ops.exec_command(cmd=ps_command, shell=True, ignore_errors=True).decode('utf-8') + assert type(ps_output) == str # noqa: E721 + + if ps_output == "": + return + + if ps_output != str(node_pid): + __class__._throw_bugcheck__unexpected_result_of_ps( + ps_output, + ps_command) + + try: + eprint('Force stopping node {0} with PID {1}'.format(self.name, node_pid)) + self.os_ops.kill(node_pid, signal.SIGKILL, expect_error=False) + except Exception: + # The node has already stopped + pass + + # Check that node stopped - print only column pid without headers + ps_output = self.os_ops.exec_command(cmd=ps_command, shell=True, ignore_errors=True).decode('utf-8') + assert type(ps_output) == str # noqa: E721 + + if ps_output == "": + eprint('Node {0} has been stopped successfully.'.format(self.name)) + return + + if ps_output == str(node_pid): + eprint('Failed to stop node {0}.'.format(self.name)) + return + + __class__._throw_bugcheck__unexpected_result_of_ps( + ps_output, + ps_command) + + @staticmethod + def _throw_bugcheck__unexpected_result_of_ps(result, cmd): + assert type(result) == str # noqa: E721 + assert type(cmd) == list # noqa: E721 + errLines = [] + errLines.append("[BUG CHECK] Unexpected result of command ps:") + errLines.append(result) + errLines.append("-----") + errLines.append("Command line is {0}".format(cmd)) + raise RuntimeError("\n".join(errLines)) def _assign_master(self, master): """NOTE: this is a private method!""" From b0f90d94d81a6470e2ef4f904e79ff1114d69bed Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sat, 1 Mar 2025 11:13:27 +0300 Subject: [PATCH 18/90] [RemoteOperations] A call of mktemp is fixed (#202) When we define a template we have to use "-t" option. It forces mktemp to return a path instead name. The following methods of RemoteOperations are fixed: - mkdtemp - mkstemp --- testgres/operations/remote_ops.py | 46 +++++++++++++++++++------------ 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 51f5b2e8..2a4e5c78 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -247,32 +247,42 @@ def mkdtemp(self, prefix=None): - prefix (str): The prefix of the temporary directory name. """ if prefix: - command = ["ssh"] + self.ssh_args + [self.ssh_dest, f"mktemp -d {prefix}XXXXX"] + command = ["mktemp", "-d", "-t", prefix + "XXXXX"] else: - command = ["ssh"] + self.ssh_args + [self.ssh_dest, "mktemp -d"] + command = ["mktemp", "-d"] - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + exit_status, result, error = self.exec_command(command, verbose=True, encoding=get_default_encoding(), ignore_errors=True) - if result.returncode == 0: - temp_dir = result.stdout.strip() - if not os.path.isabs(temp_dir): - temp_dir = os.path.join('/home', self.username, temp_dir) - return temp_dir - else: - raise ExecUtilException(f"Could not create temporary directory. Error: {result.stderr}") + assert type(result) == str # noqa: E721 + assert type(error) == str # noqa: E721 + + if exit_status != 0: + raise ExecUtilException("Could not create temporary directory. Error code: {0}. Error message: {1}".format(exit_status, error)) + + temp_dir = result.strip() + return temp_dir def mkstemp(self, prefix=None): + """ + Creates a temporary file in the remote server. + Args: + - prefix (str): The prefix of the temporary directory name. + """ if prefix: - temp_dir = self.exec_command("mktemp {}XXXXX".format(prefix), encoding=get_default_encoding()) + command = ["mktemp", "-t", prefix + "XXXXX"] else: - temp_dir = self.exec_command("mktemp", encoding=get_default_encoding()) + command = ["mktemp"] - if temp_dir: - if not os.path.isabs(temp_dir): - temp_dir = os.path.join('/home', self.username, temp_dir.strip()) - return temp_dir - else: - raise ExecUtilException("Could not create temporary directory.") + exit_status, result, error = self.exec_command(command, verbose=True, encoding=get_default_encoding(), ignore_errors=True) + + assert type(result) == str # noqa: E721 + assert type(error) == str # noqa: E721 + + if exit_status != 0: + raise ExecUtilException("Could not create temporary file. Error code: {0}. Error message: {1}".format(exit_status, error)) + + temp_file = result.strip() + return temp_file def copytree(self, src, dst): if not os.path.isabs(dst): From 71772122b6485a71c6831dbe11690d970c809f38 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sat, 1 Mar 2025 23:39:42 +0300 Subject: [PATCH 19/90] TestRemoteOperations::test_is_executable_true is corrected (#204) Let's test a real pg_config. --- tests/test_remote.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/test_remote.py b/tests/test_remote.py index 8b167e9f..e457de07 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -8,7 +8,9 @@ from ..testgres import ExecUtilException from ..testgres import InvalidOperationException from ..testgres import RemoteOperations +from ..testgres import LocalOperations from ..testgres import ConnectionParams +from ..testgres import utils as testgres_utils class TestRemoteOperations: @@ -59,7 +61,11 @@ def test_is_executable_true(self): """ Test is_executable for an existing executable. """ - cmd = os.getenv('PG_CONFIG') + local_ops = LocalOperations() + cmd = testgres_utils.get_bin_path2(local_ops, "pg_config") + cmd = local_ops.exec_command([cmd, "--bindir"], encoding="utf-8") + cmd = cmd.rstrip() + cmd = os.path.join(cmd, "pg_config") response = self.operations.is_executable(cmd) assert response is True From 0ffd5f0c4c5ff682ffaec8ee7a6145d55e904aec Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sat, 1 Mar 2025 23:40:29 +0300 Subject: [PATCH 20/90] Total refactoring of os_ops::execute_command (#203) * Total refactoring of os_ops::execute_command Main - We check only an exit code to detect an error. - If someone utility returns a result through an exit code, a caller side should set ignore_errors=true and process this case itself. - If expect_error is true and no errors occurred, we raise an InvalidOperationException. * The old behaviour of RaiseError.UtilityExitedWithNonZeroCode is restored Let's rollback the new code to avoid problems with probackup2' tests. --- testgres/operations/local_ops.py | 34 +++++----- testgres/operations/raise_error.py | 31 +++++---- testgres/operations/remote_ops.py | 101 +++++++++++++++++++---------- testgres/utils.py | 13 ++-- tests/test_local.py | 5 +- tests/test_remote.py | 10 +-- tests/test_simple_remote.py | 43 ++++++------ 7 files changed, 141 insertions(+), 96 deletions(-) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 91070fe7..51003174 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -23,20 +23,6 @@ from distutils import rmtree CMD_TIMEOUT_SEC = 60 -error_markers = [b'error', b'Permission denied', b'fatal'] -err_out_markers = [b'Failure'] - - -def has_errors(output=None, error=None): - if output: - if isinstance(output, str): - output = output.encode(get_default_encoding()) - return any(marker in output for marker in err_out_markers) - if error: - if isinstance(error, str): - error = error.encode(get_default_encoding()) - return any(marker in error for marker in error_markers) - return False class LocalOperations(OsOperations): @@ -134,19 +120,29 @@ def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, process, output, error = self._run_command(cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding) if get_process: return process - if not ignore_errors and ((process.returncode != 0 or has_errors(output=output, error=error)) and not expect_error): + + if expect_error: + if process.returncode == 0: + raise InvalidOperationException("We expected an execution error.") + elif ignore_errors: + pass + elif process.returncode == 0: + pass + else: + assert not expect_error + assert not ignore_errors + assert process.returncode != 0 RaiseError.UtilityExitedWithNonZeroCode( cmd=cmd, exit_code=process.returncode, msg_arg=error or output, error=error, - out=output - ) + out=output) if verbose: return process.returncode, output, error - else: - return output + + return output # Environment setup def environ(self, var_name): diff --git a/testgres/operations/raise_error.py b/testgres/operations/raise_error.py index 6031b238..0d14be5a 100644 --- a/testgres/operations/raise_error.py +++ b/testgres/operations/raise_error.py @@ -7,13 +7,27 @@ class RaiseError: def UtilityExitedWithNonZeroCode(cmd, exit_code, msg_arg, error, out): assert type(exit_code) == int # noqa: E721 - msg_arg_s = __class__._TranslateDataIntoString(msg_arg).strip() + msg_arg_s = __class__._TranslateDataIntoString(msg_arg) assert type(msg_arg_s) == str # noqa: E721 + msg_arg_s = msg_arg_s.strip() if msg_arg_s == "": msg_arg_s = "#no_error_message" - message = "Utility exited with non-zero code. Error: `" + msg_arg_s + "`" + message = "Utility exited with non-zero code (" + str(exit_code) + "). Error: `" + msg_arg_s + "`" + raise ExecUtilException( + message=message, + command=cmd, + exit_code=exit_code, + out=out, + error=error) + + @staticmethod + def CommandExecutionError(cmd, exit_code, message, error, out): + assert type(exit_code) == int # noqa: E721 + assert type(message) == str # noqa: E721 + assert message != "" + raise ExecUtilException( message=message, command=cmd, @@ -23,6 +37,9 @@ def UtilityExitedWithNonZeroCode(cmd, exit_code, msg_arg, error, out): @staticmethod def _TranslateDataIntoString(data): + if data is None: + return "" + if type(data) == bytes: # noqa: E721 return __class__._TranslateDataIntoString__FromBinary(data) @@ -38,13 +55,3 @@ def _TranslateDataIntoString__FromBinary(data): pass return "#cannot_decode_text" - - @staticmethod - def _BinaryIsASCII(data): - assert type(data) == bytes # noqa: E721 - - for b in data: - if not (b >= 0 and b <= 127): - return False - - return True diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 2a4e5c78..dc392bee 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -100,41 +100,40 @@ def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, return process try: - result, error = process.communicate(input=input_prepared, timeout=timeout) + output, error = process.communicate(input=input_prepared, timeout=timeout) except subprocess.TimeoutExpired: process.kill() raise ExecUtilException("Command timed out after {} seconds.".format(timeout)) - exit_status = process.returncode - - assert type(result) == bytes # noqa: E721 + assert type(output) == bytes # noqa: E721 assert type(error) == bytes # noqa: E721 - if not error: - error_found = False - else: - error_found = exit_status != 0 or any( - marker in error for marker in [b'error', b'Permission denied', b'fatal', b'No such file or directory'] - ) - - assert type(error_found) == bool # noqa: E721 - if encoding: - result = result.decode(encoding) + output = output.decode(encoding) error = error.decode(encoding) - if not ignore_errors and error_found and not expect_error: + if expect_error: + if process.returncode == 0: + raise InvalidOperationException("We expected an execution error.") + elif ignore_errors: + pass + elif process.returncode == 0: + pass + else: + assert not expect_error + assert not ignore_errors + assert process.returncode != 0 RaiseError.UtilityExitedWithNonZeroCode( cmd=cmd, - exit_code=exit_status, + exit_code=process.returncode, msg_arg=error, error=error, - out=result) + out=output) if verbose: - return exit_status, result, error - else: - return result + return process.returncode, output, error + + return output # Environment setup def environ(self, var_name: str) -> str: @@ -165,8 +164,30 @@ def find_executable(self, executable): def is_executable(self, file): # Check if the file is executable - is_exec = self.exec_command("test -x {} && echo OK".format(file)) - return is_exec == b"OK\n" + command = ["test", "-x", file] + + exit_status, output, error = self.exec_command(cmd=command, encoding=get_default_encoding(), ignore_errors=True, verbose=True) + + assert type(output) == str # noqa: E721 + assert type(error) == str # noqa: E721 + + if exit_status == 0: + return True + + if exit_status == 1: + return False + + errMsg = "Test operation returns an unknown result code: {0}. File name is [{1}].".format( + exit_status, + file) + + RaiseError.CommandExecutionError( + cmd=command, + exit_code=exit_status, + msg_arg=errMsg, + error=error, + out=output + ) def set_env(self, var_name: str, var_val: str): """ @@ -251,15 +272,21 @@ def mkdtemp(self, prefix=None): else: command = ["mktemp", "-d"] - exit_status, result, error = self.exec_command(command, verbose=True, encoding=get_default_encoding(), ignore_errors=True) + exec_exitcode, exec_output, exec_error = self.exec_command(command, verbose=True, encoding=get_default_encoding(), ignore_errors=True) - assert type(result) == str # noqa: E721 - assert type(error) == str # noqa: E721 + assert type(exec_exitcode) == int # noqa: E721 + assert type(exec_output) == str # noqa: E721 + assert type(exec_error) == str # noqa: E721 - if exit_status != 0: - raise ExecUtilException("Could not create temporary directory. Error code: {0}. Error message: {1}".format(exit_status, error)) + if exec_exitcode != 0: + RaiseError.CommandExecutionError( + cmd=command, + exit_code=exec_exitcode, + message="Could not create temporary directory.", + error=exec_error, + out=exec_output) - temp_dir = result.strip() + temp_dir = exec_output.strip() return temp_dir def mkstemp(self, prefix=None): @@ -273,15 +300,21 @@ def mkstemp(self, prefix=None): else: command = ["mktemp"] - exit_status, result, error = self.exec_command(command, verbose=True, encoding=get_default_encoding(), ignore_errors=True) + exec_exitcode, exec_output, exec_error = self.exec_command(command, verbose=True, encoding=get_default_encoding(), ignore_errors=True) - assert type(result) == str # noqa: E721 - assert type(error) == str # noqa: E721 + assert type(exec_exitcode) == int # noqa: E721 + assert type(exec_output) == str # noqa: E721 + assert type(exec_error) == str # noqa: E721 - if exit_status != 0: - raise ExecUtilException("Could not create temporary file. Error code: {0}. Error message: {1}".format(exit_status, error)) + if exec_exitcode != 0: + RaiseError.CommandExecutionError( + cmd=command, + exit_code=exec_exitcode, + message="Could not create temporary file.", + error=exec_error, + out=exec_output) - temp_file = result.strip() + temp_file = exec_output.strip() return temp_file def copytree(self, src, dst): diff --git a/testgres/utils.py b/testgres/utils.py index 76d42b02..093eaff6 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -18,6 +18,7 @@ from .config import testgres_config as tconf from .operations.os_ops import OsOperations from .operations.remote_ops import RemoteOperations +from .operations.helpers import Helpers as OsHelpers # rows returned by PG_CONFIG _pg_config_data = {} @@ -79,13 +80,13 @@ def execute_utility2(os_ops: OsOperations, args, logfile=None, verbose=False, ig assert type(verbose) == bool # noqa: E721 assert type(ignore_errors) == bool # noqa: E721 - exit_status, out, error = os_ops.exec_command(args, verbose=True, ignore_errors=ignore_errors) - # decode result + exit_status, out, error = os_ops.exec_command( + args, + verbose=True, + ignore_errors=ignore_errors, + encoding=OsHelpers.GetDefaultEncoding()) + out = '' if not out else out - if isinstance(out, bytes): - out = out.decode('utf-8') - if isinstance(error, bytes): - error = error.decode('utf-8') # write new log entry if possible if logfile: diff --git a/tests/test_local.py b/tests/test_local.py index 60a96c18..ee5e19a0 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -40,10 +40,11 @@ def test_exec_command_failure(self): try: self.operations.exec_command(cmd, wait_exit=True, shell=True) except ExecUtilException as e: - error = e.message + assert e.message == "Utility exited with non-zero code (127). Error: `/bin/sh: 1: nonexistent_command: not found`" + assert type(e.error) == bytes # noqa: E721 + assert e.error.strip() == b"/bin/sh: 1: nonexistent_command: not found" break raise Exception("We wait an exception!") - assert error == "Utility exited with non-zero code. Error: `/bin/sh: 1: nonexistent_command: not found`" def test_exec_command_failure__expect_error(self): """ diff --git a/tests/test_remote.py b/tests/test_remote.py index e457de07..6114e29e 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -40,10 +40,11 @@ def test_exec_command_failure(self): try: self.operations.exec_command(cmd, verbose=True, wait_exit=True) except ExecUtilException as e: - error = e.message + assert e.message == "Utility exited with non-zero code (127). Error: `bash: line 1: nonexistent_command: command not found`" + assert type(e.error) == bytes # noqa: E721 + assert e.error.strip() == b"bash: line 1: nonexistent_command: command not found" break raise Exception("We wait an exception!") - assert error == 'Utility exited with non-zero code. Error: `bash: line 1: nonexistent_command: command not found`' def test_exec_command_failure__expect_error(self): """ @@ -114,10 +115,11 @@ def test_makedirs_and_rmdirs_failure(self): try: self.operations.rmdirs(path, verbose=True) except ExecUtilException as e: - error = e.message + assert e.message == "Utility exited with non-zero code (1). Error: `rm: cannot remove '/root/test_dir': Permission denied`" + assert type(e.error) == bytes # noqa: E721 + assert e.error.strip() == b"rm: cannot remove '/root/test_dir': Permission denied" break raise Exception("We wait an exception!") - assert error == "Utility exited with non-zero code. Error: `rm: cannot remove '/root/test_dir': Permission denied`" def test_listdir(self): """ diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index d4a28a2b..74b10635 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -178,27 +178,32 @@ def test_init__unk_LANG_and_LC_CTYPE(self): assert os.environ.get("LC_CTYPE") == unkData[1] assert not ("LC_COLLATE" in os.environ.keys()) - while True: + assert os.getenv('LANG') == unkData[0] + assert os.getenv('LANGUAGE') is None + assert os.getenv('LC_CTYPE') == unkData[1] + assert os.getenv('LC_COLLATE') is None + + exc: ExecUtilException = None + with __class__.helper__get_node() as node: try: - with __class__.helper__get_node(): - pass - except ExecUtilException as e: - # - # Example of an error message: - # - # warning: setlocale: LC_CTYPE: cannot change locale (UNKNOWN_CTYPE): No such file or directory - # postgres (PostgreSQL) 14.12 - # - errMsg = str(e) - - logging.info("Error message is: {0}".format(errMsg)) - - assert "LC_CTYPE" in errMsg - assert unkData[1] in errMsg - assert "warning: setlocale: LC_CTYPE: cannot change locale (" + unkData[1] + "): No such file or directory" in errMsg - assert ("postgres" in errMsg) or ("PostgreSQL" in errMsg) - break + node.init() # IT RAISES! + except InitNodeException as e: + exc = e.__cause__ + assert exc is not None + assert isinstance(exc, ExecUtilException) + + if exc is None: raise Exception("We expected an error!") + + assert isinstance(exc, ExecUtilException) + + errMsg = str(exc) + logging.info("Error message is {0}: {1}".format(type(exc).__name__, errMsg)) + + assert "warning: setlocale: LC_CTYPE: cannot change locale (" + unkData[1] + ")" in errMsg + assert "initdb: error: invalid locale settings; check LANG and LC_* environment variables" in errMsg + continue + finally: __class__.helper__restore_envvar("LANG", prev_LANG) __class__.helper__restore_envvar("LANGUAGE", prev_LANGUAGE) From 7abca7f5e36ea20c6d8dd5dcc29b34ca5c9090c1 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sun, 2 Mar 2025 17:33:10 +0300 Subject: [PATCH 21/90] xxx::test_logging is corrected (local, remote) (#205) - these tests configure logging wrong and create the conflicts with root logger - these tests (local and remote) conflict with each other --- tests/test_simple.py | 142 +++++++++++++++++++++------------- tests/test_simple_remote.py | 147 +++++++++++++++++++++++------------- 2 files changed, 184 insertions(+), 105 deletions(-) diff --git a/tests/test_simple.py b/tests/test_simple.py index 6c433cd4..37c3db44 100644 --- a/tests/test_simple.py +++ b/tests/test_simple.py @@ -8,8 +8,8 @@ import pytest import psutil import platform - -import logging.config +import logging +import uuid from contextlib import contextmanager from shutil import rmtree @@ -718,55 +718,95 @@ def test_poll_query_until(self): node.poll_query_until('select true') def test_logging(self): - logfile = tempfile.NamedTemporaryFile('w', delete=True) - - log_conf = { - 'version': 1, - 'handlers': { - 'file': { - 'class': 'logging.FileHandler', - 'filename': logfile.name, - 'formatter': 'base_format', - 'level': logging.DEBUG, - }, - }, - 'formatters': { - 'base_format': { - 'format': '%(node)-5s: %(message)s', - }, - }, - 'root': { - 'handlers': ('file', ), - 'level': 'DEBUG', - }, - } - - logging.config.dictConfig(log_conf) - - with scoped_config(use_python_logging=True): - node_name = 'master' - - with get_new_node(name=node_name) as master: - master.init().start() - - # execute a dummy query a few times - for i in range(20): - master.execute('select 1') - time.sleep(0.01) - - # let logging worker do the job - time.sleep(0.1) - - # check that master's port is found - with open(logfile.name, 'r') as log: - lines = log.readlines() - assert (any(node_name in s for s in lines)) - - # test logger after stop/start/restart - master.stop() - master.start() - master.restart() - assert (master._logger.is_alive()) + C_MAX_ATTEMPTS = 50 + # This name is used for testgres logging, too. + C_NODE_NAME = "testgres_tests." + __class__.__name__ + "test_logging-master-" + uuid.uuid4().hex + + logging.info("Node name is [{0}]".format(C_NODE_NAME)) + + with tempfile.NamedTemporaryFile('w', delete=True) as logfile: + formatter = logging.Formatter(fmt="%(node)-5s: %(message)s") + handler = logging.FileHandler(filename=logfile.name) + handler.formatter = formatter + logger = logging.getLogger(C_NODE_NAME) + assert logger is not None + assert len(logger.handlers) == 0 + + try: + # It disables to log on the root level + logger.propagate = False + logger.addHandler(handler) + + with scoped_config(use_python_logging=True): + with get_new_node(name=C_NODE_NAME) as master: + logging.info("Master node is initilizing") + master.init() + + logging.info("Master node is starting") + master.start() + + logging.info("Dummy query is executed a few times") + for _ in range(20): + master.execute('select 1') + time.sleep(0.01) + + # let logging worker do the job + time.sleep(0.1) + + logging.info("Master node log file is checking") + nAttempt = 0 + + while True: + assert nAttempt <= C_MAX_ATTEMPTS + if nAttempt == C_MAX_ATTEMPTS: + raise Exception("Test failed!") + + # let logging worker do the job + time.sleep(0.1) + + nAttempt += 1 + + logging.info("Attempt {0}".format(nAttempt)) + + # check that master's port is found + with open(logfile.name, 'r') as log: + lines = log.readlines() + + assert lines is not None + assert type(lines) == list # noqa: E721 + + def LOCAL__test_lines(): + for s in lines: + if any(C_NODE_NAME in s for s in lines): + logging.info("OK. We found the node_name in a line \"{0}\"".format(s)) + return True + return False + + if LOCAL__test_lines(): + break + + logging.info("Master node log file does not have an expected information.") + continue + + # test logger after stop/start/restart + logging.info("Master node is stopping...") + master.stop() + logging.info("Master node is staring again...") + master.start() + logging.info("Master node is restaring...") + master.restart() + assert (master._logger.is_alive()) + finally: + # It is a hack code to logging cleanup + logging._acquireLock() + assert logging.Logger.manager is not None + assert C_NODE_NAME in logging.Logger.manager.loggerDict.keys() + logging.Logger.manager.loggerDict.pop(C_NODE_NAME, None) + assert not (C_NODE_NAME in logging.Logger.manager.loggerDict.keys()) + assert not (handler in logging._handlers.values()) + logging._releaseLock() + # GO HOME! + return def test_pgbench(self): __class__.helper__skip_test_if_util_not_exist("pgbench") diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index 74b10635..a62085ce 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -8,8 +8,8 @@ import six import pytest import psutil - -import logging.config +import logging +import uuid from contextlib import contextmanager @@ -788,56 +788,95 @@ def test_poll_query_until(self): node.poll_query_until('select true') def test_logging(self): - # FAIL - logfile = tempfile.NamedTemporaryFile('w', delete=True) - - log_conf = { - 'version': 1, - 'handlers': { - 'file': { - 'class': 'logging.FileHandler', - 'filename': logfile.name, - 'formatter': 'base_format', - 'level': logging.DEBUG, - }, - }, - 'formatters': { - 'base_format': { - 'format': '%(node)-5s: %(message)s', - }, - }, - 'root': { - 'handlers': ('file',), - 'level': 'DEBUG', - }, - } - - logging.config.dictConfig(log_conf) - - with scoped_config(use_python_logging=True): - node_name = 'master' - - with get_remote_node(name=node_name) as master: - master.init().start() - - # execute a dummy query a few times - for i in range(20): - master.execute('select 1') - time.sleep(0.01) - - # let logging worker do the job - time.sleep(0.1) - - # check that master's port is found - with open(logfile.name, 'r') as log: - lines = log.readlines() - assert (any(node_name in s for s in lines)) - - # test logger after stop/start/restart - master.stop() - master.start() - master.restart() - assert (master._logger.is_alive()) + C_MAX_ATTEMPTS = 50 + # This name is used for testgres logging, too. + C_NODE_NAME = "testgres_tests." + __class__.__name__ + "test_logging-master-" + uuid.uuid4().hex + + logging.info("Node name is [{0}]".format(C_NODE_NAME)) + + with tempfile.NamedTemporaryFile('w', delete=True) as logfile: + formatter = logging.Formatter(fmt="%(node)-5s: %(message)s") + handler = logging.FileHandler(filename=logfile.name) + handler.formatter = formatter + logger = logging.getLogger(C_NODE_NAME) + assert logger is not None + assert len(logger.handlers) == 0 + + try: + # It disables to log on the root level + logger.propagate = False + logger.addHandler(handler) + + with scoped_config(use_python_logging=True): + with __class__.helper__get_node(name=C_NODE_NAME) as master: + logging.info("Master node is initilizing") + master.init() + + logging.info("Master node is starting") + master.start() + + logging.info("Dummy query is executed a few times") + for _ in range(20): + master.execute('select 1') + time.sleep(0.01) + + # let logging worker do the job + time.sleep(0.1) + + logging.info("Master node log file is checking") + nAttempt = 0 + + while True: + assert nAttempt <= C_MAX_ATTEMPTS + if nAttempt == C_MAX_ATTEMPTS: + raise Exception("Test failed!") + + # let logging worker do the job + time.sleep(0.1) + + nAttempt += 1 + + logging.info("Attempt {0}".format(nAttempt)) + + # check that master's port is found + with open(logfile.name, 'r') as log: + lines = log.readlines() + + assert lines is not None + assert type(lines) == list # noqa: E721 + + def LOCAL__test_lines(): + for s in lines: + if any(C_NODE_NAME in s for s in lines): + logging.info("OK. We found the node_name in a line \"{0}\"".format(s)) + return True + return False + + if LOCAL__test_lines(): + break + + logging.info("Master node log file does not have an expected information.") + continue + + # test logger after stop/start/restart + logging.info("Master node is stopping...") + master.stop() + logging.info("Master node is staring again...") + master.start() + logging.info("Master node is restaring...") + master.restart() + assert (master._logger.is_alive()) + finally: + # It is a hack code to logging cleanup + logging._acquireLock() + assert logging.Logger.manager is not None + assert C_NODE_NAME in logging.Logger.manager.loggerDict.keys() + logging.Logger.manager.loggerDict.pop(C_NODE_NAME, None) + assert not (C_NODE_NAME in logging.Logger.manager.loggerDict.keys()) + assert not (handler in logging._handlers.values()) + logging._releaseLock() + # GO HOME! + return def test_pgbench(self): __class__.helper__skip_test_if_util_not_exist("pgbench") @@ -1184,9 +1223,9 @@ def test_child_process_dies(self): break @staticmethod - def helper__get_node(): + def helper__get_node(name=None): assert __class__.sm_conn_params is not None - return get_remote_node(conn_params=__class__.sm_conn_params) + return get_remote_node(name=name, conn_params=__class__.sm_conn_params) @staticmethod def helper__restore_envvar(name, prev_value): From e1a5bb451186c871df56f79f78af78dd63d3381e Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 3 Mar 2025 12:37:26 +0300 Subject: [PATCH 22/90] Updating of CI-tests (#197) * Test dockerfile for ubuntu 24.04 is added * Cleanup (Dockerfile--ubuntu-24_04) * Cleanup. Including of 'postgres' in 'sudo' group is not required. * [run_tests.sh] A right way for obtaining of BINDIR and PG_CONFIG is used A problem was detected in container with Ubuntu 24.04 tests works with "/usr/bin/pg_config" but real pg_config is "/usr/lib/postgresql/17/bin/pg_config" To resovle this problem we will call "pg_config --bindir" and use it result for BINDIR and PG_CONFIG. * Dockerfile--ubuntu-24_04 is updated Let's use /pg/testgres/run_tests.sh directly. * Dockerfile--ubuntu-24_04 is updated (cleanup) curl is installing twice. * Dockerfile--ubuntu-24_04 is updated (cleanup) [del] musl-dev [del] mc * Dockerfile--ubuntu-24_04 is formatted * CI-test on Ubuntu 24.04 is added. * Dockerfile--std.tmpl is updated (refactoring) /pg/testgres/run_tests.sh is used directly. * Dockerfile--ubuntu-24_04.tmpl is updated * PostgresNode::pid is improved - We do multiple attempts to read pid file. - We process a case when we see that node is stopped between test and read. - We process a case when pid-file is empty. * PostgresNode::pid is updated Assert is added. * execute_utility2 is updated (ignore_errors) - New parameters "ignore_errors" is added. Default value is False. - Asserts are added. * PostgresNode::_try_shutdown is rewrited (normalization) * PostgresNode::pid uses the data from "pg_ctl status" output. * PostgresNode::_try_shutdown is correct (return None) This method returns nothing (None). * [RemoteOperations] A call of mktemp is fixed When we define a template we have to use "-t" option. It forces mktemp to return a path instead name. The following methods of RemoteOperations are fixed: - mkdtemp - mkstemp * Total refactoring of os_ops::execute_command Main - We check only an exit code to detect an error. - If someone utility returns a result through an exit code, a caller side should set ignore_errors=true and process this case itself. - If expect_error is true and no errors occurred, we raise an InvalidOperationException. * Dockerfile--ubuntu-24_04.tmpl is updated The folder "home/postgres" is not required now. * The old behaviour of RaiseError.UtilityExitedWithNonZeroCode is restored Let's rollback the new code to avoid problems with probackup2' tests. * TestRemoteOperations::test_is_executable_true is corrected Let's test a real pg_config. * xxx::test_logging is corrected (local, remote) - these tests configure logging wrong and create the conflicts with root logger - these tests (local and remote) conflict with each other * TEST_FILTER is added * CI on Ubuntu 24.04 runs all the tests --- .travis.yml | 28 ++++------ Dockerfile.tmpl => Dockerfile--std.tmpl | 6 +-- Dockerfile--ubuntu-24_04.tmpl | 69 +++++++++++++++++++++++++ mk_dockerfile.sh | 2 +- run_tests.sh | 7 +-- 5 files changed, 86 insertions(+), 26 deletions(-) rename Dockerfile.tmpl => Dockerfile--std.tmpl (80%) create mode 100644 Dockerfile--ubuntu-24_04.tmpl diff --git a/.travis.yml b/.travis.yml index 6f63a67b..4110835a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,23 +20,17 @@ notifications: on_failure: always env: - - PYTHON_VERSION=3 PG_VERSION=17 - - PYTHON_VERSION=3 PG_VERSION=16 - - PYTHON_VERSION=3 PG_VERSION=15 - - PYTHON_VERSION=3 PG_VERSION=14 - - PYTHON_VERSION=3 PG_VERSION=13 - - PYTHON_VERSION=3 PG_VERSION=12 - - PYTHON_VERSION=3 PG_VERSION=11 - - PYTHON_VERSION=3 PG_VERSION=10 -# - PYTHON_VERSION=3 PG_VERSION=9.6 -# - PYTHON_VERSION=3 PG_VERSION=9.5 -# - PYTHON_VERSION=3 PG_VERSION=9.4 -# - PYTHON_VERSION=2 PG_VERSION=10 -# - PYTHON_VERSION=2 PG_VERSION=9.6 -# - PYTHON_VERSION=2 PG_VERSION=9.5 -# - PYTHON_VERSION=2 PG_VERSION=9.4 + - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=17 + - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=16 + - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=15 + - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=14 + - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=13 + - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=12 + - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=11 + - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=10 + - TEST_PLATFORM=ubuntu-24_04 PYTHON_VERSION=3 PG_VERSION=17 matrix: allow_failures: - - env: PYTHON_VERSION=3 PG_VERSION=11 - - env: PYTHON_VERSION=3 PG_VERSION=10 + - env: TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=11 + - env: TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=10 diff --git a/Dockerfile.tmpl b/Dockerfile--std.tmpl similarity index 80% rename from Dockerfile.tmpl rename to Dockerfile--std.tmpl index dc5878b6..d844c9a3 100644 --- a/Dockerfile.tmpl +++ b/Dockerfile--std.tmpl @@ -11,13 +11,9 @@ RUN if [ "${PYTHON_VERSION}" = "3" ] ; then \ fi ENV LANG=C.UTF-8 -RUN mkdir -p /pg -COPY run_tests.sh /run.sh -RUN chmod 755 /run.sh - ADD . /pg/testgres WORKDIR /pg/testgres RUN chown -R postgres:postgres /pg USER postgres -ENTRYPOINT PYTHON_VERSION=${PYTHON_VERSION} /run.sh +ENTRYPOINT PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh diff --git a/Dockerfile--ubuntu-24_04.tmpl b/Dockerfile--ubuntu-24_04.tmpl new file mode 100644 index 00000000..99be5343 --- /dev/null +++ b/Dockerfile--ubuntu-24_04.tmpl @@ -0,0 +1,69 @@ +FROM ubuntu:24.04 + +RUN apt update +RUN apt install -y sudo curl ca-certificates postgresql-common + +RUN bash /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y + +RUN install -d /usr/share/postgresql-common/pgdg +RUN curl -o /usr/share/postgresql-common/pgdg/apt.postgresql.org.asc --fail https://www.postgresql.org/media/keys/ACCC4CF8.asc + +# It does not work +# RUN sh -c 'echo "deb [signed-by=/usr/share/postgresql-common/pgdg/apt.postgresql.org.asc] https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + +RUN apt update +RUN apt install -y postgresql-${PG_VERSION} + +RUN apt install -y python3 python3-dev python3-virtualenv +# RUN apt install -y mc + +# It is required for psycopg2 +RUN apt install -y libpq-dev +RUN apt install -y openssh-server + +# [2025-02-26] It adds the user 'postgres' in the group 'sudo' +# [2025-02-27] It is not required. +# RUN adduser postgres sudo + +ADD . /pg/testgres +WORKDIR /pg/testgres +RUN chown -R postgres /pg + +EXPOSE 22 + +RUN ssh-keygen -A + +# It enables execution of "sudo service ssh start" without password +RUN sh -c "echo postgres ALL=NOPASSWD:/usr/sbin/service ssh start" >> /etc/sudoers + +USER postgres + +ENV LANG=C.UTF-8 + +#ENTRYPOINT PYTHON_VERSION=3.12 /run.sh +ENTRYPOINT sh -c " \ +#set -eux; \ +echo HELLO FROM ENTRYPOINT; \ +echo HOME DIR IS [`realpath ~/`]; \ +echo POINT 1; \ +chmod go-w /var/lib/postgresql; \ +echo POINT 1.5; \ +mkdir -p ~/.ssh; \ +echo POINT 2; \ +service ssh enable; \ +echo POINT 3; \ +sudo service ssh start; \ +echo POINT 4; \ +ssh-keyscan -H localhost >> ~/.ssh/known_hosts; \ +echo POINT 5; \ +ssh-keyscan -H 127.0.0.1 >> ~/.ssh/known_hosts; \ +echo POINT 6; \ +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ +echo ----; \ +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ +echo ----; \ +chmod 600 ~/.ssh/authorized_keys; \ +echo ----; \ +ls -la ~/.ssh/; \ +echo ----; \ +TEST_FILTER="" PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh;" diff --git a/mk_dockerfile.sh b/mk_dockerfile.sh index d2aa3a8a..8f7876a3 100755 --- a/mk_dockerfile.sh +++ b/mk_dockerfile.sh @@ -1,2 +1,2 @@ set -eu -sed -e 's/${PYTHON_VERSION}/'${PYTHON_VERSION}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile.tmpl > Dockerfile +sed -e 's/${PYTHON_VERSION}/'${PYTHON_VERSION}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile--${TEST_PLATFORM}.tmpl > Dockerfile diff --git a/run_tests.sh b/run_tests.sh index 5cbbac60..021f9d9f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -4,6 +4,7 @@ set -eux +if [ -z ${TEST_FILTER+x} ]; then export TEST_FILTER="TestgresTests"; fi # choose python version echo python version is $PYTHON_VERSION @@ -38,19 +39,19 @@ rm -f $COVERAGE_FILE # run tests (PATH) -time coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" +time coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" # run tests (PG_BIN) time \ PG_BIN=$(pg_config --bindir) \ - coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" + coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" # run tests (PG_CONFIG) time \ PG_CONFIG=$(pg_config --bindir)/pg_config \ - coverage run -a -m pytest -l -v -n 4 -k "TestgresTests" + coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" # show coverage From 3a1d08b3156b56609d02568d34771a6a9a535aa8 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 4 Mar 2025 07:06:08 +0300 Subject: [PATCH 23/90] RemoteOperations::path_exists is updated (#206) - command is passed through list - we process all the result codes of test --- testgres/operations/remote_ops.py | 26 ++++++++++++++++++++++++-- tests/test_remote.py | 26 ++++++++++++++++---------- 2 files changed, 40 insertions(+), 12 deletions(-) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index dc392bee..60d5265c 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -247,8 +247,30 @@ def listdir(self, path): return result.splitlines() def path_exists(self, path): - result = self.exec_command("test -e {}; echo $?".format(path), encoding=get_default_encoding()) - return int(result.strip()) == 0 + command = ["test", "-e", path] + + exit_status, output, error = self.exec_command(cmd=command, encoding=get_default_encoding(), ignore_errors=True, verbose=True) + + assert type(output) == str # noqa: E721 + assert type(error) == str # noqa: E721 + + if exit_status == 0: + return True + + if exit_status == 1: + return False + + errMsg = "Test operation returns an unknown result code: {0}. Path is [{1}].".format( + exit_status, + path) + + RaiseError.CommandExecutionError( + cmd=command, + exit_code=exit_status, + msg_arg=errMsg, + error=error, + out=output + ) @property def pathsep(self): diff --git a/tests/test_remote.py b/tests/test_remote.py index 6114e29e..85e65c24 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -130,23 +130,29 @@ def test_listdir(self): assert isinstance(files, list) - def test_path_exists_true(self): + def test_path_exists_true__directory(self): """ - Test path_exists for an existing path. + Test path_exists for an existing directory. """ - path = "/etc" - response = self.operations.path_exists(path) + assert self.operations.path_exists("/etc") is True - assert response is True + def test_path_exists_true__file(self): + """ + Test path_exists for an existing file. + """ + assert self.operations.path_exists(__file__) is True - def test_path_exists_false(self): + def test_path_exists_false__directory(self): """ - Test path_exists for a non-existing path. + Test path_exists for a non-existing directory. """ - path = "/nonexistent_path" - response = self.operations.path_exists(path) + assert self.operations.path_exists("/nonexistent_path") is False - assert response is False + def test_path_exists_false__file(self): + """ + Test path_exists for a non-existing file. + """ + assert self.operations.path_exists("/etc/nonexistent_path.txt") is False def test_write_text_file(self): """ From ddcaea03a1d69cca22a172aeedbef79ba82f5d40 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 4 Mar 2025 10:56:30 +0300 Subject: [PATCH 24/90] os_ops::rmdirs (local, remote) was refactored (#207) * os_ops::rmdirs (local, remote) was refactored LocalOperations::rmdirs - parameter 'retries' was remaned with 'attempts' - if ignore_errors we raise an error RemoteOperations::rmdirs - parameter 'verbose' was removed - method returns bool - we prevent to delete a file * [TestRemoteOperations] New tests for rmdirs are added. * test_pg_ctl_wait_option (local, remote) is corrected --- testgres/operations/local_ops.py | 41 ++++++++++---- testgres/operations/remote_ops.py | 42 +++++++++++--- tests/test_remote.py | 92 +++++++++++++++++++++++++++---- tests/test_simple.py | 65 ++++++++++++++++++---- tests/test_simple_remote.py | 65 ++++++++++++++++++---- 5 files changed, 255 insertions(+), 50 deletions(-) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 51003174..0fa7d0ad 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -174,7 +174,8 @@ def makedirs(self, path, remove_existing=False): except FileExistsError: pass - def rmdirs(self, path, ignore_errors=True, retries=3, delay=1): + # [2025-02-03] Old name of parameter attempts is "retries". + def rmdirs(self, path, ignore_errors=True, attempts=3, delay=1): """ Removes a directory and its contents, retrying on failure. @@ -183,18 +184,38 @@ def rmdirs(self, path, ignore_errors=True, retries=3, delay=1): :param retries: Number of attempts to remove the directory. :param delay: Delay between attempts in seconds. """ - for attempt in range(retries): + assert type(path) == str # noqa: E721 + assert type(ignore_errors) == bool # noqa: E721 + assert type(attempts) == int # noqa: E721 + assert type(delay) == int or type(delay) == float # noqa: E721 + assert attempts > 0 + assert delay >= 0 + + attempt = 0 + while True: + assert attempt < attempts + attempt += 1 try: - rmtree(path, ignore_errors=ignore_errors) - if not os.path.exists(path): - return True + rmtree(path) except FileNotFoundError: - return True + pass except Exception as e: - logging.error(f"Error: Failed to remove directory {path} on attempt {attempt + 1}: {e}") - time.sleep(delay) - logging.error(f"Error: Failed to remove directory {path} after {retries} attempts.") - return False + if attempt < attempt: + errMsg = "Failed to remove directory {0} on attempt {1} ({2}): {3}".format( + path, attempt, type(e).__name__, e + ) + logging.warning(errMsg) + time.sleep(delay) + continue + + assert attempt == attempts + if not ignore_errors: + raise + + return False + + # OK! + return True def listdir(self, path): return os.listdir(path) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 60d5265c..767df567 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -4,6 +4,7 @@ import subprocess import tempfile import io +import logging # we support both pg8000 and psycopg2 try: @@ -222,20 +223,45 @@ def makedirs(self, path, remove_existing=False): raise Exception("Couldn't create dir {} because of error {}".format(path, error)) return result - def rmdirs(self, path, verbose=False, ignore_errors=True): + def rmdirs(self, path, ignore_errors=True): """ Remove a directory in the remote server. Args: - path (str): The path to the directory to be removed. - - verbose (bool): If True, return exit status, result, and error. - ignore_errors (bool): If True, do not raise error if directory does not exist. """ - cmd = "rm -rf {}".format(path) - exit_status, result, error = self.exec_command(cmd, verbose=True) - if verbose: - return exit_status, result, error - else: - return result + assert type(path) == str # noqa: E721 + assert type(ignore_errors) == bool # noqa: E721 + + # ENOENT = 2 - No such file or directory + # ENOTDIR = 20 - Not a directory + + cmd1 = [ + "if", "[", "-d", path, "]", ";", + "then", "rm", "-rf", path, ";", + "elif", "[", "-e", path, "]", ";", + "then", "{", "echo", "cannot remove '" + path + "': it is not a directory", ">&2", ";", "exit", "20", ";", "}", ";", + "else", "{", "echo", "directory '" + path + "' does not exist", ">&2", ";", "exit", "2", ";", "}", ";", + "fi" + ] + + cmd2 = ["sh", "-c", subprocess.list2cmdline(cmd1)] + + try: + self.exec_command(cmd2, encoding=Helpers.GetDefaultEncoding()) + except ExecUtilException as e: + if e.exit_code == 2: # No such file or directory + return True + + if not ignore_errors: + raise + + errMsg = "Failed to remove directory {0} ({1}): {2}".format( + path, type(e).__name__, e + ) + logging.warning(errMsg) + return False + return True def listdir(self, path): """ diff --git a/tests/test_remote.py b/tests/test_remote.py index 85e65c24..b1c4e58c 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -99,9 +99,9 @@ def test_makedirs_and_rmdirs_success(self): assert not os.path.exists(path) assert not self.operations.path_exists(path) - def test_makedirs_and_rmdirs_failure(self): + def test_makedirs_failure(self): """ - Test makedirs and rmdirs for directory creation and removal failure. + Test makedirs for failure. """ # Try to create a directory in a read-only location path = "/root/test_dir" @@ -110,16 +110,84 @@ def test_makedirs_and_rmdirs_failure(self): with pytest.raises(Exception): self.operations.makedirs(path) - # Test rmdirs - while True: - try: - self.operations.rmdirs(path, verbose=True) - except ExecUtilException as e: - assert e.message == "Utility exited with non-zero code (1). Error: `rm: cannot remove '/root/test_dir': Permission denied`" - assert type(e.error) == bytes # noqa: E721 - assert e.error.strip() == b"rm: cannot remove '/root/test_dir': Permission denied" - break - raise Exception("We wait an exception!") + def test_rmdirs(self): + path = self.operations.mkdtemp() + assert os.path.exists(path) + + assert self.operations.rmdirs(path, ignore_errors=False) is True + assert not os.path.exists(path) + + def test_rmdirs__01_with_subfolder(self): + # folder with subfolder + path = self.operations.mkdtemp() + assert os.path.exists(path) + + dir1 = os.path.join(path, "dir1") + assert not os.path.exists(dir1) + + self.operations.makedirs(dir1) + assert os.path.exists(dir1) + + assert self.operations.rmdirs(path, ignore_errors=False) is True + assert not os.path.exists(path) + assert not os.path.exists(dir1) + + def test_rmdirs__02_with_file(self): + # folder with file + path = self.operations.mkdtemp() + assert os.path.exists(path) + + file1 = os.path.join(path, "file1.txt") + assert not os.path.exists(file1) + + self.operations.touch(file1) + assert os.path.exists(file1) + + assert self.operations.rmdirs(path, ignore_errors=False) is True + assert not os.path.exists(path) + assert not os.path.exists(file1) + + def test_rmdirs__03_with_subfolder_and_file(self): + # folder with subfolder and file + path = self.operations.mkdtemp() + assert os.path.exists(path) + + dir1 = os.path.join(path, "dir1") + assert not os.path.exists(dir1) + + self.operations.makedirs(dir1) + assert os.path.exists(dir1) + + file1 = os.path.join(dir1, "file1.txt") + assert not os.path.exists(file1) + + self.operations.touch(file1) + assert os.path.exists(file1) + + assert self.operations.rmdirs(path, ignore_errors=False) is True + assert not os.path.exists(path) + assert not os.path.exists(dir1) + assert not os.path.exists(file1) + + def test_rmdirs__try_to_delete_nonexist_path(self): + path = "/root/test_dir" + + assert self.operations.rmdirs(path, ignore_errors=False) is True + + def test_rmdirs__try_to_delete_file(self): + path = self.operations.mkstemp() + assert os.path.exists(path) + + with pytest.raises(ExecUtilException) as x: + self.operations.rmdirs(path, ignore_errors=False) + + assert os.path.exists(path) + assert type(x.value) == ExecUtilException # noqa: E721 + assert x.value.message == "Utility exited with non-zero code (20). Error: `cannot remove '" + path + "': it is not a directory`" + assert type(x.value.error) == str # noqa: E721 + assert x.value.error.strip() == "cannot remove '" + path + "': it is not a directory" + assert type(x.value.exit_code) == int # noqa: E721 + assert x.value.exit_code == 20 def test_listdir(self): """ diff --git a/tests/test_simple.py b/tests/test_simple.py index 37c3db44..d9844fed 100644 --- a/tests/test_simple.py +++ b/tests/test_simple.py @@ -428,16 +428,61 @@ def test_backup_wrong_xlog_method(self): node.backup(xlog_method='wrong') def test_pg_ctl_wait_option(self): - with get_new_node() as node: - node.init().start(wait=False) - while True: - try: - node.stop(wait=False) - break - except ExecUtilException: - # it's ok to get this exception here since node - # could be not started yet - pass + C_MAX_ATTEMPTS = 50 + + node = get_new_node() + assert node.status() == testgres.NodeStatus.Uninitialized + node.init() + assert node.status() == testgres.NodeStatus.Stopped + node.start(wait=False) + nAttempt = 0 + while True: + if nAttempt == C_MAX_ATTEMPTS: + raise Exception("Could not stop node.") + + nAttempt += 1 + + if nAttempt > 1: + logging.info("Wait 1 second.") + time.sleep(1) + logging.info("") + + logging.info("Try to stop node. Attempt #{0}.".format(nAttempt)) + + try: + node.stop(wait=False) + break + except ExecUtilException as e: + # it's ok to get this exception here since node + # could be not started yet + logging.info("Node is not stopped. Exception ({0}): {1}".format(type(e).__name__, e)) + continue + + logging.info("OK. Stop command was executed. Let's wait while our node will stop really.") + nAttempt = 0 + while True: + if nAttempt == C_MAX_ATTEMPTS: + raise Exception("Could not stop node.") + + nAttempt += 1 + if nAttempt > 1: + logging.info("Wait 1 second.") + time.sleep(1) + logging.info("") + + logging.info("Attempt #{0}.".format(nAttempt)) + s1 = node.status() + + if s1 == testgres.NodeStatus.Running: + continue + + if s1 == testgres.NodeStatus.Stopped: + break + + raise Exception("Unexpected node status: {0}.".format(s1)) + + logging.info("OK. Node is stopped.") + node.cleanup() def test_replicate(self): with get_new_node() as node: diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index a62085ce..42527dbc 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -499,16 +499,61 @@ def test_backup_wrong_xlog_method(self): node.backup(xlog_method='wrong') def test_pg_ctl_wait_option(self): - with __class__.helper__get_node() as node: - node.init().start(wait=False) - while True: - try: - node.stop(wait=False) - break - except ExecUtilException: - # it's ok to get this exception here since node - # could be not started yet - pass + C_MAX_ATTEMPTS = 50 + + node = __class__.helper__get_node() + assert node.status() == testgres.NodeStatus.Uninitialized + node.init() + assert node.status() == testgres.NodeStatus.Stopped + node.start(wait=False) + nAttempt = 0 + while True: + if nAttempt == C_MAX_ATTEMPTS: + raise Exception("Could not stop node.") + + nAttempt += 1 + + if nAttempt > 1: + logging.info("Wait 1 second.") + time.sleep(1) + logging.info("") + + logging.info("Try to stop node. Attempt #{0}.".format(nAttempt)) + + try: + node.stop(wait=False) + break + except ExecUtilException as e: + # it's ok to get this exception here since node + # could be not started yet + logging.info("Node is not stopped. Exception ({0}): {1}".format(type(e).__name__, e)) + continue + + logging.info("OK. Stop command was executed. Let's wait while our node will stop really.") + nAttempt = 0 + while True: + if nAttempt == C_MAX_ATTEMPTS: + raise Exception("Could not stop node.") + + nAttempt += 1 + if nAttempt > 1: + logging.info("Wait 1 second.") + time.sleep(1) + logging.info("") + + logging.info("Attempt #{0}.".format(nAttempt)) + s1 = node.status() + + if s1 == testgres.NodeStatus.Running: + continue + + if s1 == testgres.NodeStatus.Stopped: + break + + raise Exception("Unexpected node status: {0}.".format(s1)) + + logging.info("OK. Node is stopped.") + node.cleanup() def test_replicate(self): with __class__.helper__get_node() as node: From e47cded2a784bfbc64c9a70aceedfe26f9a3b802 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Tue, 11 Mar 2025 10:27:59 +0300 Subject: [PATCH 25/90] [remote_ops] A problem with mktemp on Alpine Linux is fixed Five 'X' in template is not enough - Alpine returns "mktemp: : Invalid argument" error. Six 'X' is OK. --- testgres/operations/remote_ops.py | 4 ++-- tests/test_local.py | 17 +++++++++++++++++ tests/test_remote.py | 17 +++++++++++++++++ 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 767df567..11d9cd37 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -316,7 +316,7 @@ def mkdtemp(self, prefix=None): - prefix (str): The prefix of the temporary directory name. """ if prefix: - command = ["mktemp", "-d", "-t", prefix + "XXXXX"] + command = ["mktemp", "-d", "-t", prefix + "XXXXXX"] else: command = ["mktemp", "-d"] @@ -344,7 +344,7 @@ def mkstemp(self, prefix=None): - prefix (str): The prefix of the temporary directory name. """ if prefix: - command = ["mktemp", "-t", prefix + "XXXXX"] + command = ["mktemp", "-t", prefix + "XXXXXX"] else: command = ["mktemp"] diff --git a/tests/test_local.py b/tests/test_local.py index ee5e19a0..826c3f51 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -4,6 +4,7 @@ import pytest import re import tempfile +import logging from ..testgres import ExecUtilException from ..testgres import InvalidOperationException @@ -18,6 +19,22 @@ class TestLocalOperations: def setup(self): self.operations = LocalOperations() + def test_mkdtemp__default(self): + path = self.operations.mkdtemp() + logging.info("Path is [{0}].".format(path)) + assert os.path.exists(path) + os.rmdir(path) + assert not os.path.exists(path) + + def test_mkdtemp__custom(self): + C_TEMPLATE = "abcdef" + path = self.operations.mkdtemp(C_TEMPLATE) + logging.info("Path is [{0}].".format(path)) + assert os.path.exists(path) + assert C_TEMPLATE in os.path.basename(path) + os.rmdir(path) + assert not os.path.exists(path) + def test_exec_command_success(self): """ Test exec_command for successful command execution. diff --git a/tests/test_remote.py b/tests/test_remote.py index b1c4e58c..17c76c2c 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -4,6 +4,7 @@ import pytest import re import tempfile +import logging from ..testgres import ExecUtilException from ..testgres import InvalidOperationException @@ -110,6 +111,22 @@ def test_makedirs_failure(self): with pytest.raises(Exception): self.operations.makedirs(path) + def test_mkdtemp__default(self): + path = self.operations.mkdtemp() + logging.info("Path is [{0}].".format(path)) + assert os.path.exists(path) + os.rmdir(path) + assert not os.path.exists(path) + + def test_mkdtemp__custom(self): + C_TEMPLATE = "abcdef" + path = self.operations.mkdtemp(C_TEMPLATE) + logging.info("Path is [{0}].".format(path)) + assert os.path.exists(path) + assert C_TEMPLATE in os.path.basename(path) + os.rmdir(path) + assert not os.path.exists(path) + def test_rmdirs(self): path = self.operations.mkdtemp() assert os.path.exists(path) From cf6f4cc6f6111a07302e4f6644baccbf58bb158a Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Tue, 11 Mar 2025 11:38:34 +0300 Subject: [PATCH 26/90] [remote] Tests are updated to support Alpine Linux --- tests/test_local.py | 16 +++++++++++++--- tests/test_remote.py | 36 ++++++++++++++++++++++++++++-------- tests/test_simple_remote.py | 10 +++++++++- 3 files changed, 50 insertions(+), 12 deletions(-) diff --git a/tests/test_local.py b/tests/test_local.py index 826c3f51..68e7db33 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -57,9 +57,17 @@ def test_exec_command_failure(self): try: self.operations.exec_command(cmd, wait_exit=True, shell=True) except ExecUtilException as e: - assert e.message == "Utility exited with non-zero code (127). Error: `/bin/sh: 1: nonexistent_command: not found`" + assert type(e.exit_code) == int # noqa: E721 + assert e.exit_code == 127 + + assert type(e.message) == str # noqa: E721 assert type(e.error) == bytes # noqa: E721 - assert e.error.strip() == b"/bin/sh: 1: nonexistent_command: not found" + + assert e.message.startswith("Utility exited with non-zero code (127). Error:") + assert "nonexistent_command" in e.message + assert "not found" in e.message + assert b"nonexistent_command" in e.error + assert b"not found" in e.error break raise Exception("We wait an exception!") @@ -73,9 +81,11 @@ def test_exec_command_failure__expect_error(self): exit_status, result, error = self.operations.exec_command(cmd, verbose=True, wait_exit=True, shell=True, expect_error=True) - assert error == b'/bin/sh: 1: nonexistent_command: not found\n' assert exit_status == 127 assert result == b'' + assert type(error) == bytes # noqa: E721 + assert b"nonexistent_command" in error + assert b"not found" in error def test_read__text(self): """ diff --git a/tests/test_remote.py b/tests/test_remote.py index 17c76c2c..1f771c62 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -41,9 +41,17 @@ def test_exec_command_failure(self): try: self.operations.exec_command(cmd, verbose=True, wait_exit=True) except ExecUtilException as e: - assert e.message == "Utility exited with non-zero code (127). Error: `bash: line 1: nonexistent_command: command not found`" + assert type(e.exit_code) == int # noqa: E721 + assert e.exit_code == 127 + + assert type(e.message) == str # noqa: E721 assert type(e.error) == bytes # noqa: E721 - assert e.error.strip() == b"bash: line 1: nonexistent_command: command not found" + + assert e.message.startswith("Utility exited with non-zero code (127). Error:") + assert "nonexistent_command" in e.message + assert "not found" in e.message + assert b"nonexistent_command" in e.error + assert b"not found" in e.error break raise Exception("We wait an exception!") @@ -55,9 +63,11 @@ def test_exec_command_failure__expect_error(self): exit_status, result, error = self.operations.exec_command(cmd, verbose=True, wait_exit=True, shell=True, expect_error=True) - assert error == b'bash: line 1: nonexistent_command: command not found\n' assert exit_status == 127 assert result == b'' + assert type(error) == bytes # noqa: E721 + assert b"nonexistent_command" in error + assert b"not found" in error def test_is_executable_true(self): """ @@ -344,11 +354,13 @@ def test_read__unknown_file(self): Test RemoteOperations::read with unknown file. """ - with pytest.raises( - ExecUtilException, - match=re.escape("cat: /dummy: No such file or directory")): + with pytest.raises(ExecUtilException) as x: self.operations.read("/dummy") + assert "Utility exited with non-zero code (1)." in str(x.value) + assert "No such file or directory" in str(x.value) + assert "/dummy" in str(x.value) + def test_read_binary__spec(self): """ Test RemoteOperations::read_binary. @@ -388,9 +400,13 @@ def test_read_binary__spec__unk_file(self): Test RemoteOperations::read_binary with unknown file. """ - with pytest.raises(ExecUtilException, match=re.escape("tail: cannot open '/dummy' for reading: No such file or directory")): + with pytest.raises(ExecUtilException) as x: self.operations.read_binary("/dummy", 0) + assert "Utility exited with non-zero code (1)." in str(x.value) + assert "No such file or directory" in str(x.value) + assert "/dummy" in str(x.value) + def test_read_binary__spec__negative_offset(self): """ Test RemoteOperations::read_binary with negative offset. @@ -419,9 +435,13 @@ def test_get_file_size__unk_file(self): Test RemoteOperations::get_file_size. """ - with pytest.raises(ExecUtilException, match=re.escape("du: cannot access '/dummy': No such file or directory")): + with pytest.raises(ExecUtilException) as x: self.operations.get_file_size("/dummy") + assert "Utility exited with non-zero code (1)." in str(x.value) + assert "No such file or directory" in str(x.value) + assert "/dummy" in str(x.value) + def test_touch(self): """ Test touch for creating a new file or updating access and modification times of an existing file. diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index 42527dbc..cdad161c 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -163,6 +163,8 @@ def test_init__unk_LANG_and_LC_CTYPE(self): ("\"", "\""), ] + errorIsDetected = False + for unkData in unkDatas: logging.info("----------------------") logging.info("Unk LANG is [{0}]".format(unkData[0])) @@ -193,7 +195,10 @@ def test_init__unk_LANG_and_LC_CTYPE(self): assert isinstance(exc, ExecUtilException) if exc is None: - raise Exception("We expected an error!") + logging.warning("We expected an error!") + continue + + errorIsDetected = True assert isinstance(exc, ExecUtilException) @@ -204,6 +209,9 @@ def test_init__unk_LANG_and_LC_CTYPE(self): assert "initdb: error: invalid locale settings; check LANG and LC_* environment variables" in errMsg continue + if not errorIsDetected: + pytest.xfail("All the bad data are processed without errors!") + finally: __class__.helper__restore_envvar("LANG", prev_LANG) __class__.helper__restore_envvar("LANGUAGE", prev_LANGUAGE) From 44f280bc7aa51cce5374fb19c1c234b379e4fbf0 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 11 Mar 2025 18:19:12 +0300 Subject: [PATCH 27/90] [CI] A run of all tests (local, remote) on Alpine Linux [PY3, PG17] is added (#210) * Dockerfile to run all tests on Alpine Linux * A run of all the tests on Alpine Linux [PY3, PG17] is added * [CI] The run of [STD, PY3, PG17] is removed It was replaced with [ALPINE, PY3, PG17]. * Test platform "alpine" was renamed with "std.all" --- .travis.yml | 2 +- Dockerfile--std.all.tmpl | 60 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 Dockerfile--std.all.tmpl diff --git a/.travis.yml b/.travis.yml index 4110835a..7fb34808 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,7 +20,6 @@ notifications: on_failure: always env: - - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=17 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=16 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=15 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=14 @@ -28,6 +27,7 @@ env: - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=12 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=11 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=10 + - TEST_PLATFORM=std.all PYTHON_VERSION=3 PG_VERSION=17 - TEST_PLATFORM=ubuntu-24_04 PYTHON_VERSION=3 PG_VERSION=17 matrix: diff --git a/Dockerfile--std.all.tmpl b/Dockerfile--std.all.tmpl new file mode 100644 index 00000000..dfd9ab20 --- /dev/null +++ b/Dockerfile--std.all.tmpl @@ -0,0 +1,60 @@ +FROM postgres:${PG_VERSION}-alpine + +ENV PYTHON=python${PYTHON_VERSION} +RUN if [ "${PYTHON_VERSION}" = "2" ] ; then \ + apk add --no-cache curl python2 python2-dev build-base musl-dev \ + linux-headers py-virtualenv py-pip; \ + fi +RUN if [ "${PYTHON_VERSION}" = "3" ] ; then \ + apk add --no-cache curl python3 python3-dev build-base musl-dev \ + linux-headers py-virtualenv; \ + fi + +#RUN apk add --no-cache mc + +# Full version of "ps" command +RUN apk add --no-cache procps + +RUN apk add --no-cache openssh +RUN apk add --no-cache sudo + +ENV LANG=C.UTF-8 + +RUN addgroup -S sudo +RUN adduser postgres sudo + +EXPOSE 22 +RUN ssh-keygen -A + +ADD . /pg/testgres +WORKDIR /pg/testgres +RUN chown -R postgres:postgres /pg + +# It allows to use sudo without password +RUN sh -c "echo \"postgres ALL=(ALL:ALL) NOPASSWD:ALL\"">>/etc/sudoers + +# THIS CMD IS NEEDED TO CONNECT THROUGH SSH WITHOUT PASSWORD +RUN sh -c "echo "postgres:*" | chpasswd -e" + +USER postgres + +# THIS CMD IS NEEDED TO CONNECT THROUGH SSH WITHOUT PASSWORD +RUN chmod 700 ~/ + +RUN mkdir -p ~/.ssh +#RUN chmod 700 ~/.ssh + +#ENTRYPOINT PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh + +ENTRYPOINT sh -c " \ +set -eux; \ +echo HELLO FROM ENTRYPOINT; \ +echo HOME DIR IS [`realpath ~/`]; \ +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ +chmod 600 ~/.ssh/authorized_keys; \ +ls -la ~/.ssh/; \ +sudo /usr/sbin/sshd; \ +ssh-keyscan -H localhost >> ~/.ssh/known_hosts; \ +ssh-keyscan -H 127.0.0.1 >> ~/.ssh/known_hosts; \ +TEST_FILTER=\"\" PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh;" From 600572857f100c37218d29ec5c4d6fa8c6d9d6c6 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 11 Mar 2025 21:00:07 +0300 Subject: [PATCH 28/90] PsUtilProcessProxy is updated (refactoring) (#212) --- testgres/operations/remote_ops.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 11d9cd37..d6917c82 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -26,17 +26,25 @@ class PsUtilProcessProxy: def __init__(self, ssh, pid): + assert isinstance(ssh, RemoteOperations) + assert type(pid) == int # noqa: E721 self.ssh = ssh self.pid = pid def kill(self): - command = "kill {}".format(self.pid) - self.ssh.exec_command(command) + assert isinstance(self.ssh, RemoteOperations) + assert type(self.pid) == int # noqa: E721 + command = ["kill", str(self.pid)] + self.ssh.exec_command(command, encoding=get_default_encoding()) def cmdline(self): - command = "ps -p {} -o cmd --no-headers".format(self.pid) - stdin, stdout, stderr = self.ssh.exec_command(command, verbose=True, encoding=get_default_encoding()) - cmdline = stdout.strip() + assert isinstance(self.ssh, RemoteOperations) + assert type(self.pid) == int # noqa: E721 + command = ["ps", "-p", str(self.pid), "-o", "cmd", "--no-headers"] + output = self.ssh.exec_command(command, encoding=get_default_encoding()) + assert type(output) == str # noqa: E721 + cmdline = output.strip() + # TODO: This code work wrong if command line contains quoted values. Yes? return cmdline.split() From 1a4655cb9011c3a01cd0e7d56606ba36fff40e43 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 11 Mar 2025 21:46:10 +0300 Subject: [PATCH 29/90] Dockerfile for Ubuntu 2024.04 is updated (#211) * Dockerfile for Ubuntu 2024.04 updated - Using "chmod 700 ~/" instead "chmod go-w /var/lib/postgresql" - "~/.ssh" folder is prepared at image level - Entrypoint startup code is cleaned from trace messages * Dockerfile--ubuntu-24_04.tmpl is updated (double quotes) --- Dockerfile--ubuntu-24_04.tmpl | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/Dockerfile--ubuntu-24_04.tmpl b/Dockerfile--ubuntu-24_04.tmpl index 99be5343..fd1136d8 100644 --- a/Dockerfile--ubuntu-24_04.tmpl +++ b/Dockerfile--ubuntu-24_04.tmpl @@ -37,33 +37,22 @@ RUN ssh-keygen -A RUN sh -c "echo postgres ALL=NOPASSWD:/usr/sbin/service ssh start" >> /etc/sudoers USER postgres - ENV LANG=C.UTF-8 +RUN chmod 700 ~/ +RUN mkdir -p ~/.ssh + #ENTRYPOINT PYTHON_VERSION=3.12 /run.sh ENTRYPOINT sh -c " \ #set -eux; \ echo HELLO FROM ENTRYPOINT; \ echo HOME DIR IS [`realpath ~/`]; \ -echo POINT 1; \ -chmod go-w /var/lib/postgresql; \ -echo POINT 1.5; \ -mkdir -p ~/.ssh; \ -echo POINT 2; \ service ssh enable; \ -echo POINT 3; \ sudo service ssh start; \ -echo POINT 4; \ ssh-keyscan -H localhost >> ~/.ssh/known_hosts; \ -echo POINT 5; \ ssh-keyscan -H 127.0.0.1 >> ~/.ssh/known_hosts; \ -echo POINT 6; \ ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ -echo ----; \ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ -echo ----; \ chmod 600 ~/.ssh/authorized_keys; \ -echo ----; \ ls -la ~/.ssh/; \ -echo ----; \ -TEST_FILTER="" PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh;" +TEST_FILTER=\"\" PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh;" From 438e84508c7c0b3d9f05336ffdd1df44ba23c36b Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 12 Mar 2025 08:48:16 +0300 Subject: [PATCH 30/90] test_pg_ctl_wait_option is updated (#213) When node is not stopped, we read and output a content of node log file to provide an additional information about this problem. It should help find a reason of unexpected problem with this test in CI. --- tests/test_simple.py | 13 +++++++++++++ tests/test_simple_remote.py | 13 +++++++++++++ 2 files changed, 26 insertions(+) diff --git a/tests/test_simple.py b/tests/test_simple.py index d9844fed..e13cf095 100644 --- a/tests/test_simple.py +++ b/tests/test_simple.py @@ -438,6 +438,19 @@ def test_pg_ctl_wait_option(self): nAttempt = 0 while True: if nAttempt == C_MAX_ATTEMPTS: + # + # [2025-03-11] + # We have an unexpected problem with this test in CI + # Let's get an additional information about this test failure. + # + logging.error("Node was not stopped.") + if not node.os_ops.path_exists(node.pg_log_file): + logging.warning("Node log does not exist.") + else: + logging.info("Let's read node log file [{0}]".format(node.pg_log_file)) + logFileData = node.os_ops.read(node.pg_log_file, binary=False) + logging.info("Node log file content:\n{0}".format(logFileData)) + raise Exception("Could not stop node.") nAttempt += 1 diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index cdad161c..d484f1e3 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -517,6 +517,19 @@ def test_pg_ctl_wait_option(self): nAttempt = 0 while True: if nAttempt == C_MAX_ATTEMPTS: + # + # [2025-03-11] + # We have an unexpected problem with this test in CI + # Let's get an additional information about this test failure. + # + logging.error("Node was not stopped.") + if not node.os_ops.path_exists(node.pg_log_file): + logging.warning("Node log does not exist.") + else: + logging.info("Let's read node log file [{0}]".format(node.pg_log_file)) + logFileData = node.os_ops.read(node.pg_log_file, binary=False) + logging.info("Node log file content:\n{0}".format(logFileData)) + raise Exception("Could not stop node.") nAttempt += 1 From f2c000c28e4f250c5dc319be557217d8433306cd Mon Sep 17 00:00:00 2001 From: Victoria Shepard <5807469+demonolock@users.noreply.github.com> Date: Wed, 12 Mar 2025 06:52:03 +0100 Subject: [PATCH 31/90] Fix auto conf test (#214) * Fix test_set_auto_conf for Postgresql 10, 11 * Remove allow failures --------- Co-authored-by: vshepard --- .travis.yml | 5 ----- tests/test_simple.py | 7 ++++--- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7fb34808..997945b5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,8 +29,3 @@ env: - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=10 - TEST_PLATFORM=std.all PYTHON_VERSION=3 PG_VERSION=17 - TEST_PLATFORM=ubuntu-24_04 PYTHON_VERSION=3 PG_VERSION=17 - -matrix: - allow_failures: - - env: TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=11 - - env: TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=10 diff --git a/tests/test_simple.py b/tests/test_simple.py index e13cf095..e886a39c 100644 --- a/tests/test_simple.py +++ b/tests/test_simple.py @@ -1464,9 +1464,6 @@ def test_set_auto_conf(self): ["archive_command", "cp '%p' \"/mnt/server/archivedir/%f\"", "'cp \\'%p\\' \"/mnt/server/archivedir/%f\""], - ["restore_command", - 'cp "/mnt/server/archivedir/%f" \'%p\'', - "'cp \"/mnt/server/archivedir/%f\" \\'%p\\''"], ["log_line_prefix", "'\n\r\t\b\\\"", "'\\\'\\n\\r\\t\\b\\\\\""], @@ -1480,6 +1477,10 @@ def test_set_auto_conf(self): 3, "3"] ] + if pg_version_ge('12'): + testData.append(["restore_command", + 'cp "/mnt/server/archivedir/%f" \'%p\'', + "'cp \"/mnt/server/archivedir/%f\" \\'%p\\''"]) with get_new_node() as node: node.init().start() From e3eb6ae1a8ed63a6df516e59e406b674f4e5ea96 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 12 Mar 2025 21:49:38 +0300 Subject: [PATCH 32/90] Refactoring of dockerfiles (#215) * Refactoring of dockerfiles Let's to try using a pure dockerfile' logic. It is the fist step. - We use docker instead docker-composer - We upgrade and use only "std" plaform. Other plaforms will be upgraded later * It is better to run a docker with "-t" options. This allows the colors in output data. * A problem with PYTHON_VERSION is fixed * Dockerfile--std.all.tmpl is updated * Dockerfile--ubuntu-24_04.tmpl * Dockerfile--std.tmpl is updated (formatting) * docker-compose.yml and mk_dockerfile.sh are deleted * [CI] Platform "std.all" was renamed with "std_all" Let's avoid using a "point" symbol in name of file. This symbol may create a problem in the future if we decide to use configuration docker files without extensions. * [CI] Platform name has the one format Dockerfiles--[-].tmpl - "std_all" -> "std-all" - "ubuntu-24_04" -> "ubuntu_24_04" * Dockerfile--ubuntu_24_04.tmpl is updated (minimization) --- .travis.yml | 9 ++-- ...--std.all.tmpl => Dockerfile--std-all.tmpl | 31 ++++++++------ Dockerfile--std.tmpl | 30 +++++++++----- ...4_04.tmpl => Dockerfile--ubuntu_24_04.tmpl | 41 ++++++++++++------- docker-compose.yml | 4 -- mk_dockerfile.sh | 2 - 6 files changed, 69 insertions(+), 48 deletions(-) rename Dockerfile--std.all.tmpl => Dockerfile--std-all.tmpl (60%) rename Dockerfile--ubuntu-24_04.tmpl => Dockerfile--ubuntu_24_04.tmpl (71%) delete mode 100644 docker-compose.yml delete mode 100755 mk_dockerfile.sh diff --git a/.travis.yml b/.travis.yml index 997945b5..3a889845 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,11 +8,10 @@ services: - docker install: - - ./mk_dockerfile.sh - - docker-compose build + - docker build --build-arg PG_VERSION="${PG_VERSION}" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" -t tests -f Dockerfile--${TEST_PLATFORM}.tmpl . script: - - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests + - docker run $(bash <(curl -s https://codecov.io/env)) -t tests notifications: email: @@ -27,5 +26,5 @@ env: - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=12 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=11 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=10 - - TEST_PLATFORM=std.all PYTHON_VERSION=3 PG_VERSION=17 - - TEST_PLATFORM=ubuntu-24_04 PYTHON_VERSION=3 PG_VERSION=17 + - TEST_PLATFORM=std-all PYTHON_VERSION=3 PG_VERSION=17 + - TEST_PLATFORM=ubuntu_24_04 PYTHON_VERSION=3 PG_VERSION=17 diff --git a/Dockerfile--std.all.tmpl b/Dockerfile--std-all.tmpl similarity index 60% rename from Dockerfile--std.all.tmpl rename to Dockerfile--std-all.tmpl index dfd9ab20..c41c5a06 100644 --- a/Dockerfile--std.all.tmpl +++ b/Dockerfile--std-all.tmpl @@ -1,14 +1,21 @@ -FROM postgres:${PG_VERSION}-alpine - -ENV PYTHON=python${PYTHON_VERSION} -RUN if [ "${PYTHON_VERSION}" = "2" ] ; then \ - apk add --no-cache curl python2 python2-dev build-base musl-dev \ - linux-headers py-virtualenv py-pip; \ - fi -RUN if [ "${PYTHON_VERSION}" = "3" ] ; then \ - apk add --no-cache curl python3 python3-dev build-base musl-dev \ - linux-headers py-virtualenv; \ - fi +ARG PG_VERSION +ARG PYTHON_VERSION + +# --------------------------------------------- base1 +FROM postgres:${PG_VERSION}-alpine as base1 + +# --------------------------------------------- base2_with_python-2 +FROM base1 as base2_with_python-2 +RUN apk add --no-cache curl python2 python2-dev build-base musl-dev linux-headers py-virtualenv py-pip +ENV PYTHON_VERSION=2 + +# --------------------------------------------- base2_with_python-3 +FROM base1 as base2_with_python-3 +RUN apk add --no-cache curl python3 python3-dev build-base musl-dev linux-headers py-virtualenv +ENV PYTHON_VERSION=3 + +# --------------------------------------------- final +FROM base2_with_python-${PYTHON_VERSION} as final #RUN apk add --no-cache mc @@ -57,4 +64,4 @@ ls -la ~/.ssh/; \ sudo /usr/sbin/sshd; \ ssh-keyscan -H localhost >> ~/.ssh/known_hosts; \ ssh-keyscan -H 127.0.0.1 >> ~/.ssh/known_hosts; \ -TEST_FILTER=\"\" PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh;" +TEST_FILTER=\"\" bash run_tests.sh;" diff --git a/Dockerfile--std.tmpl b/Dockerfile--std.tmpl index d844c9a3..91886ede 100644 --- a/Dockerfile--std.tmpl +++ b/Dockerfile--std.tmpl @@ -1,14 +1,22 @@ -FROM postgres:${PG_VERSION}-alpine +ARG PG_VERSION +ARG PYTHON_VERSION + +# --------------------------------------------- base1 +FROM postgres:${PG_VERSION}-alpine as base1 + +# --------------------------------------------- base2_with_python-2 +FROM base1 as base2_with_python-2 +RUN apk add --no-cache curl python2 python2-dev build-base musl-dev linux-headers py-virtualenv py-pip +ENV PYTHON_VERSION=2 + +# --------------------------------------------- base2_with_python-3 +FROM base1 as base2_with_python-3 +RUN apk add --no-cache curl python3 python3-dev build-base musl-dev linux-headers py-virtualenv +ENV PYTHON_VERSION=3 + +# --------------------------------------------- final +FROM base2_with_python-${PYTHON_VERSION} as final -ENV PYTHON=python${PYTHON_VERSION} -RUN if [ "${PYTHON_VERSION}" = "2" ] ; then \ - apk add --no-cache curl python2 python2-dev build-base musl-dev \ - linux-headers py-virtualenv py-pip; \ - fi -RUN if [ "${PYTHON_VERSION}" = "3" ] ; then \ - apk add --no-cache curl python3 python3-dev build-base musl-dev \ - linux-headers py-virtualenv; \ - fi ENV LANG=C.UTF-8 ADD . /pg/testgres @@ -16,4 +24,4 @@ WORKDIR /pg/testgres RUN chown -R postgres:postgres /pg USER postgres -ENTRYPOINT PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh +ENTRYPOINT bash run_tests.sh diff --git a/Dockerfile--ubuntu-24_04.tmpl b/Dockerfile--ubuntu_24_04.tmpl similarity index 71% rename from Dockerfile--ubuntu-24_04.tmpl rename to Dockerfile--ubuntu_24_04.tmpl index fd1136d8..c1ddeab6 100644 --- a/Dockerfile--ubuntu-24_04.tmpl +++ b/Dockerfile--ubuntu_24_04.tmpl @@ -1,7 +1,17 @@ -FROM ubuntu:24.04 +ARG PG_VERSION +ARG PYTHON_VERSION + +# --------------------------------------------- base1 +FROM ubuntu:24.04 as base1 +ARG PG_VERSION + +RUN apt update +RUN apt install -y sudo curl ca-certificates +RUN apt update +RUN apt install -y openssh-server RUN apt update -RUN apt install -y sudo curl ca-certificates postgresql-common +RUN apt install -y postgresql-common RUN bash /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y @@ -14,21 +24,12 @@ RUN curl -o /usr/share/postgresql-common/pgdg/apt.postgresql.org.asc --fail http RUN apt update RUN apt install -y postgresql-${PG_VERSION} -RUN apt install -y python3 python3-dev python3-virtualenv # RUN apt install -y mc -# It is required for psycopg2 -RUN apt install -y libpq-dev -RUN apt install -y openssh-server - # [2025-02-26] It adds the user 'postgres' in the group 'sudo' # [2025-02-27] It is not required. # RUN adduser postgres sudo -ADD . /pg/testgres -WORKDIR /pg/testgres -RUN chown -R postgres /pg - EXPOSE 22 RUN ssh-keygen -A @@ -36,13 +37,25 @@ RUN ssh-keygen -A # It enables execution of "sudo service ssh start" without password RUN sh -c "echo postgres ALL=NOPASSWD:/usr/sbin/service ssh start" >> /etc/sudoers -USER postgres +# --------------------------------------------- base2_with_python-3 +FROM base1 as base2_with_python-3 +RUN apt install -y python3 python3-dev python3-virtualenv libpq-dev +ENV PYTHON_VERSION=3 + +# --------------------------------------------- final +FROM base2_with_python-${PYTHON_VERSION} as final + +ADD . /pg/testgres +WORKDIR /pg/testgres +RUN chown -R postgres /pg + ENV LANG=C.UTF-8 +USER postgres + RUN chmod 700 ~/ RUN mkdir -p ~/.ssh -#ENTRYPOINT PYTHON_VERSION=3.12 /run.sh ENTRYPOINT sh -c " \ #set -eux; \ echo HELLO FROM ENTRYPOINT; \ @@ -55,4 +68,4 @@ ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ chmod 600 ~/.ssh/authorized_keys; \ ls -la ~/.ssh/; \ -TEST_FILTER=\"\" PYTHON_VERSION=${PYTHON_VERSION} bash run_tests.sh;" +TEST_FILTER=\"\" bash ./run_tests.sh;" diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 86edf9a4..00000000 --- a/docker-compose.yml +++ /dev/null @@ -1,4 +0,0 @@ -version: '3.8' -services: - tests: - build: . diff --git a/mk_dockerfile.sh b/mk_dockerfile.sh deleted file mode 100755 index 8f7876a3..00000000 --- a/mk_dockerfile.sh +++ /dev/null @@ -1,2 +0,0 @@ -set -eu -sed -e 's/${PYTHON_VERSION}/'${PYTHON_VERSION}/g -e 's/${PG_VERSION}/'${PG_VERSION}/g Dockerfile--${TEST_PLATFORM}.tmpl > Dockerfile From 230e5620d68ffef92f31fe67b4ed30881950847d Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 14 Mar 2025 00:10:11 +0300 Subject: [PATCH 33/90] RemoteOperations::listdir is corrected (#217) - It returns list[str] - New asserts are added Tests for listdir are updated. --- testgres/operations/remote_ops.py | 8 ++++++-- tests/test_local.py | 11 +++++++++++ tests/test_remote.py | 4 +++- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index d6917c82..60161e3c 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -277,8 +277,12 @@ def listdir(self, path): Args: path (str): The path to the directory. """ - result = self.exec_command("ls {}".format(path)) - return result.splitlines() + command = ["ls", path] + output = self.exec_command(cmd=command, encoding=get_default_encoding()) + assert type(output) == str # noqa: E721 + result = output.splitlines() + assert type(result) == list # noqa: E721 + return result def path_exists(self, path): command = ["test", "-e", path] diff --git a/tests/test_local.py b/tests/test_local.py index 68e7db33..3ae93f76 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -87,6 +87,17 @@ def test_exec_command_failure__expect_error(self): assert b"nonexistent_command" in error assert b"not found" in error + def test_listdir(self): + """ + Test listdir for listing directory contents. + """ + path = "/etc" + files = self.operations.listdir(path) + assert isinstance(files, list) + for f in files: + assert f is not None + assert type(f) == str # noqa: E721 + def test_read__text(self): """ Test LocalOperations::read for text data. diff --git a/tests/test_remote.py b/tests/test_remote.py index 1f771c62..2c37e2c1 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -222,8 +222,10 @@ def test_listdir(self): """ path = "/etc" files = self.operations.listdir(path) - assert isinstance(files, list) + for f in files: + assert f is not None + assert type(f) == str # noqa: E721 def test_path_exists_true__directory(self): """ From cc4361c6fce77df6380aed8e023241b2cdde915f Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Fri, 14 Mar 2025 18:44:28 +0300 Subject: [PATCH 34/90] get_process_children is updated --- testgres/operations/local_ops.py | 1 + testgres/operations/remote_ops.py | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 0fa7d0ad..6ae1cf2b 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -418,6 +418,7 @@ def get_pid(self): return os.getpid() def get_process_children(self, pid): + assert type(pid) == int # noqa: E721 return psutil.Process(pid).children() # Database control diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 60161e3c..625a184b 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -599,15 +599,16 @@ def get_pid(self): return int(self.exec_command("echo $$", encoding=get_default_encoding())) def get_process_children(self, pid): - command = ["ssh"] + self.ssh_args + [self.ssh_dest, f"pgrep -P {pid}"] + assert type(pid) == int # noqa: E721 + command = ["ssh"] + self.ssh_args + [self.ssh_dest, "pgrep", "-P", str(pid)] result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) if result.returncode == 0: children = result.stdout.strip().splitlines() return [PsUtilProcessProxy(self, int(child_pid.strip())) for child_pid in children] - else: - raise ExecUtilException(f"Error in getting process children. Error: {result.stderr}") + + raise ExecUtilException(f"Error in getting process children. Error: {result.stderr}") # Database control def db_connect(self, dbname, user, password=None, host="localhost", port=5432): From bab1d8ec8671261787689dec99a4a3ff10499fd7 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Fri, 14 Mar 2025 18:49:58 +0300 Subject: [PATCH 35/90] os_ops::readlines is updated (revision) --- testgres/operations/local_ops.py | 16 +++++++++++++++- testgres/operations/remote_ops.py | 27 +++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 6ae1cf2b..93a64787 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -352,12 +352,26 @@ def readlines(self, filename, num_lines=0, binary=False, encoding=None): Read lines from a local file. If num_lines is greater than 0, only the last num_lines lines will be read. """ + assert type(num_lines) == int # noqa: E721 + assert type(filename) == str # noqa: E721 + assert type(binary) == bool # noqa: E721 + assert encoding is None or type(encoding) == str # noqa: E721 assert num_lines >= 0 + + if binary: + assert encoding is None + pass + elif encoding is None: + encoding = get_default_encoding() + assert type(encoding) == str # noqa: E721 + else: + assert type(encoding) == str # noqa: E721 + pass + mode = 'rb' if binary else 'r' if num_lines == 0: with open(filename, mode, encoding=encoding) as file: # open in binary mode return file.readlines() - else: bufsize = 8192 buffers = 1 diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 625a184b..e1ad6dac 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -484,18 +484,37 @@ def _read__binary(self, filename): return content def readlines(self, filename, num_lines=0, binary=False, encoding=None): + assert type(num_lines) == int # noqa: E721 + assert type(filename) == str # noqa: E721 + assert type(binary) == bool # noqa: E721 + assert encoding is None or type(encoding) == str # noqa: E721 + if num_lines > 0: - cmd = "tail -n {} {}".format(num_lines, filename) + cmd = ["tail", "-n", str(num_lines), filename] + else: + cmd = ["cat", filename] + + if binary: + assert encoding is None + pass + elif encoding is None: + encoding = get_default_encoding() + assert type(encoding) == str # noqa: E721 else: - cmd = "cat {}".format(filename) + assert type(encoding) == str # noqa: E721 + pass result = self.exec_command(cmd, encoding=encoding) + assert result is not None - if not binary and result: - lines = result.decode(encoding or get_default_encoding()).splitlines() + if binary: + assert type(result) == bytes # noqa: E721 + lines = result.splitlines() else: + assert type(result) == str # noqa: E721 lines = result.splitlines() + assert type(lines) == list # noqa: E721 return lines def read_binary(self, filename, offset): From c07f1127f110bf7dac28ef6f28cad363d404d551 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sun, 16 Mar 2025 14:07:04 +0300 Subject: [PATCH 36/90] Initialization of Helpers._get_default_encoding_func is corrected [py3.9] (#221) Python 3.9 does not undestand the following code: _get_default_encoding_func = _make_get_default_encoding_func() ERROR - TypeError: 'staticmethod' object is not callable https://app.travis-ci.com/github/postgrespro/testgres/jobs/631402370 The code: _get_default_encoding_func = _make_get_default_encoding_func.__func__() is processed without problems. --- testgres/operations/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testgres/operations/helpers.py b/testgres/operations/helpers.py index 03e97edc..ebbf0f73 100644 --- a/testgres/operations/helpers.py +++ b/testgres/operations/helpers.py @@ -12,7 +12,7 @@ def _make_get_default_encoding_func(): return locale.getpreferredencoding # Prepared pointer on function to get a name of system codepage - _get_default_encoding_func = _make_get_default_encoding_func() + _get_default_encoding_func = _make_get_default_encoding_func.__func__() @staticmethod def GetDefaultEncoding(): From ea114966771982f0a2fabaaeedd8c89bf8ec1feb Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Sun, 16 Mar 2025 18:04:31 +0300 Subject: [PATCH 37/90] testgres.utils.get_pg_version2 is added This a version of get_pg_version that requires and uses an explicit os_ops object. --- testgres/utils.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/testgres/utils.py b/testgres/utils.py index 093eaff6..a988effe 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -203,19 +203,29 @@ def cache_pg_config_data(cmd): return cache_pg_config_data("pg_config") -def get_pg_version(bin_dir=None): +def get_pg_version2(os_ops: OsOperations, bin_dir=None): """ Return PostgreSQL version provided by postmaster. """ + assert os_ops is not None + assert isinstance(os_ops, OsOperations) # Get raw version (e.g., postgres (PostgreSQL) 9.5.7) - postgres_path = os.path.join(bin_dir, 'postgres') if bin_dir else get_bin_path('postgres') + postgres_path = os.path.join(bin_dir, 'postgres') if bin_dir else get_bin_path2(os_ops, 'postgres') _params = [postgres_path, '--version'] - raw_ver = tconf.os_ops.exec_command(_params, encoding='utf-8') + raw_ver = os_ops.exec_command(_params, encoding='utf-8') return parse_pg_version(raw_ver) +def get_pg_version(bin_dir=None): + """ + Return PostgreSQL version provided by postmaster. + """ + + return get_pg_version2(tconf.os_ops, bin_dir) + + def parse_pg_version(version_out): # Generalize removal of system-specific suffixes (anything in parentheses) raw_ver = re.sub(r'\([^)]*\)', '', version_out).strip() From ac0a2bbd373b5d5852bfb3b3ff0ff6caec2c6633 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 17 Mar 2025 16:10:55 +0300 Subject: [PATCH 38/90] PostgresNode is updated [os_ops and clone_with_new_name_and_base_dir] 1) Constructor of PostgresNode can get an explicit os_ops object 2) PostgresNode::os_ops property is added 3) New method PostgresNode::clone_with_new_name_and_base_dir is added It is used to right clone an object in NodeBackup::spawn_primary --- testgres/backup.py | 15 +++++++++++-- testgres/node.py | 54 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 59 insertions(+), 10 deletions(-) diff --git a/testgres/backup.py b/testgres/backup.py index 619c0270..388697b7 100644 --- a/testgres/backup.py +++ b/testgres/backup.py @@ -147,8 +147,19 @@ def spawn_primary(self, name=None, destroy=True): base_dir = self._prepare_dir(destroy) # Build a new PostgresNode - NodeClass = self.original_node.__class__ - with clean_on_error(NodeClass(name=name, base_dir=base_dir, conn_params=self.original_node.os_ops.conn_params)) as node: + assert self.original_node is not None + + if (hasattr(self.original_node, "clone_with_new_name_and_base_dir")): + node = self.original_node.clone_with_new_name_and_base_dir(name=name, base_dir=base_dir) + else: + # For backward compatibility + NodeClass = self.original_node.__class__ + node = NodeClass(name=name, base_dir=base_dir, conn_params=self.original_node.os_ops.conn_params) + + assert node is not None + assert type(node) == self.original_node.__class__ # noqa: E721 + + with clean_on_error(node) as node: # New nodes should always remove dir tree node._should_rm_dirs = True diff --git a/testgres/node.py b/testgres/node.py index 859fe742..6d2417c4 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -100,6 +100,7 @@ from .backup import NodeBackup from .operations.os_ops import ConnectionParams +from .operations.os_ops import OsOperations from .operations.local_ops import LocalOperations from .operations.remote_ops import RemoteOperations @@ -135,7 +136,7 @@ class PostgresNode(object): _C_MAX_START_ATEMPTS = 5 def __init__(self, name=None, base_dir=None, port=None, conn_params: ConnectionParams = ConnectionParams(), - bin_dir=None, prefix=None): + bin_dir=None, prefix=None, os_ops=None): """ PostgresNode constructor. @@ -157,17 +158,20 @@ def __init__(self, name=None, base_dir=None, port=None, conn_params: ConnectionP # basic self.name = name or generate_app_name() - if testgres_config.os_ops: - self.os_ops = testgres_config.os_ops - elif conn_params.ssh_key: - self.os_ops = RemoteOperations(conn_params) + if os_ops is None: + os_ops = __class__._get_os_ops(conn_params) else: - self.os_ops = LocalOperations(conn_params) + assert conn_params is None + pass - self.host = self.os_ops.host + assert os_ops is not None + assert isinstance(os_ops, OsOperations) + self._os_ops = os_ops + + self.host = os_ops.host self.port = port or utils.reserve_port() - self.ssh_key = self.os_ops.ssh_key + self.ssh_key = os_ops.ssh_key # defaults for __exit__() self.cleanup_on_good_exit = testgres_config.node_cleanup_on_good_exit @@ -204,6 +208,40 @@ def __repr__(self): return "{}(name='{}', port={}, base_dir='{}')".format( self.__class__.__name__, self.name, self.port, self.base_dir) + @staticmethod + def _get_os_ops(conn_params: ConnectionParams) -> OsOperations: + if testgres_config.os_ops: + return testgres_config.os_ops + + assert type(conn_params) == ConnectionParams # noqa: E721 + + if conn_params.ssh_key: + return RemoteOperations(conn_params) + + return LocalOperations(conn_params) + + def clone_with_new_name_and_base_dir(self, name: str, base_dir: str): + assert name is None or type(name) == str # noqa: E721 + assert base_dir is None or type(base_dir) == str # noqa: E721 + + assert __class__ == PostgresNode + + node = PostgresNode( + name=name, + base_dir=base_dir, + conn_params=None, + bin_dir=self._bin_dir, + prefix=self._prefix, + os_ops=self._os_ops) + + return node + + @property + def os_ops(self) -> OsOperations: + assert self._os_ops is not None + assert isinstance(self._os_ops, OsOperations) + return self._os_ops + @property def pid(self): """ From dcb7f24592b157297606c29985621a28ebeebeaa Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 17 Mar 2025 16:34:30 +0300 Subject: [PATCH 39/90] TestTestgresCommon - generic tests for local and remote modes. This commit replaces #216 --- run_tests.sh | 4 +- tests/test_simple.py | 979 ---------------------------- tests/test_simple_remote.py | 993 +---------------------------- tests/test_testgres_common.py | 1131 +++++++++++++++++++++++++++++++++ 4 files changed, 1142 insertions(+), 1965 deletions(-) create mode 100644 tests/test_testgres_common.py diff --git a/run_tests.sh b/run_tests.sh index 021f9d9f..0fecde60 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -4,7 +4,9 @@ set -eux -if [ -z ${TEST_FILTER+x} ]; then export TEST_FILTER="TestgresTests"; fi +if [ -z ${TEST_FILTER+x} ]; \ +then export TEST_FILTER="TestgresTests or (TestTestgresCommon and (not remote_ops))"; \ +fi # choose python version echo python version is $PYTHON_VERSION diff --git a/tests/test_simple.py b/tests/test_simple.py index e886a39c..f648e558 100644 --- a/tests/test_simple.py +++ b/tests/test_simple.py @@ -2,29 +2,16 @@ import os import re import subprocess -import tempfile -import time -import six import pytest import psutil import platform import logging -import uuid - -from contextlib import contextmanager -from shutil import rmtree from .. import testgres from ..testgres import \ - InitNodeException, \ StartNodeException, \ ExecUtilException, \ - BackupException, \ - QueryException, \ - TimeoutException, \ - TestgresException, \ - InvalidOperationException, \ NodeApp from ..testgres import \ @@ -34,9 +21,6 @@ pop_config from ..testgres import \ - NodeStatus, \ - ProcessType, \ - IsolationLevel, \ get_new_node from ..testgres import \ @@ -44,14 +28,9 @@ get_pg_config, \ get_pg_version -from ..testgres import \ - First, \ - Any - # NOTE: those are ugly imports from ..testgres import bound_ports from ..testgres.utils import PgVer, parse_pg_version -from ..testgres.utils import file_tail from ..testgres.node import ProcessProxy @@ -95,17 +74,6 @@ def rm_carriage_returns(out): return out -@contextmanager -def removing(f): - try: - yield f - finally: - if os.path.isfile(f): - os.remove(f) - elif os.path.isdir(f): - rmtree(f, ignore_errors=True) - - class TestgresTests: def test_node_repr(self): with get_new_node() as node: @@ -132,740 +100,6 @@ def test_custom_init(self): # there should be no trust entries at all assert not (any('trust' in s for s in lines)) - def test_double_init(self): - with get_new_node().init() as node: - # can't initialize node more than once - with pytest.raises(expected_exception=InitNodeException): - node.init() - - def test_init_after_cleanup(self): - with get_new_node() as node: - node.init().start().execute('select 1') - node.cleanup() - node.init().start().execute('select 1') - - def test_init_unique_system_id(self): - # this function exists in PostgreSQL 9.6+ - __class__.helper__skip_test_if_util_not_exist("pg_resetwal") - __class__.helper__skip_test_if_pg_version_is_not_ge("9.6") - - query = 'select system_identifier from pg_control_system()' - - with scoped_config(cache_initdb=False): - with get_new_node().init().start() as node0: - id0 = node0.execute(query)[0] - - with scoped_config(cache_initdb=True, - cached_initdb_unique=True) as config: - assert (config.cache_initdb) - assert (config.cached_initdb_unique) - - # spawn two nodes; ids must be different - with get_new_node().init().start() as node1, \ - get_new_node().init().start() as node2: - - id1 = node1.execute(query)[0] - id2 = node2.execute(query)[0] - - # ids must increase - assert (id1 > id0) - assert (id2 > id1) - - def test_node_exit(self): - base_dir = None - - with pytest.raises(expected_exception=QueryException): - with get_new_node().init() as node: - base_dir = node.base_dir - node.safe_psql('select 1') - - # we should save the DB for "debugging" - assert (os.path.exists(base_dir)) - rmtree(base_dir, ignore_errors=True) - - with get_new_node().init() as node: - base_dir = node.base_dir - - # should have been removed by default - assert not (os.path.exists(base_dir)) - - def test_double_start(self): - with get_new_node().init().start() as node: - # can't start node more than once - node.start() - assert (node.is_started) - - def test_uninitialized_start(self): - with get_new_node() as node: - # node is not initialized yet - with pytest.raises(expected_exception=StartNodeException): - node.start() - - def test_restart(self): - with get_new_node() as node: - node.init().start() - - # restart, ok - res = node.execute('select 1') - assert (res == [(1, )]) - node.restart() - res = node.execute('select 2') - assert (res == [(2, )]) - - # restart, fail - with pytest.raises(expected_exception=StartNodeException): - node.append_conf('pg_hba.conf', 'DUMMY') - node.restart() - - def test_reload(self): - with get_new_node() as node: - node.init().start() - - # change client_min_messages and save old value - cmm_old = node.execute('show client_min_messages') - node.append_conf(client_min_messages='DEBUG1') - - # reload config - node.reload() - - # check new value - cmm_new = node.execute('show client_min_messages') - assert ('debug1' == cmm_new[0][0].lower()) - assert (cmm_old != cmm_new) - - def test_pg_ctl(self): - with get_new_node() as node: - node.init().start() - - status = node.pg_ctl(['status']) - assert ('PID' in status) - - def test_status(self): - assert (NodeStatus.Running) - assert not (NodeStatus.Stopped) - assert not (NodeStatus.Uninitialized) - - # check statuses after each operation - with get_new_node() as node: - assert (node.pid == 0) - assert (node.status() == NodeStatus.Uninitialized) - - node.init() - - assert (node.pid == 0) - assert (node.status() == NodeStatus.Stopped) - - node.start() - - assert (node.pid != 0) - assert (node.status() == NodeStatus.Running) - - node.stop() - - assert (node.pid == 0) - assert (node.status() == NodeStatus.Stopped) - - node.cleanup() - - assert (node.pid == 0) - assert (node.status() == NodeStatus.Uninitialized) - - def test_psql(self): - with get_new_node().init().start() as node: - - # check returned values (1 arg) - res = node.psql('select 1') - assert (rm_carriage_returns(res) == (0, b'1\n', b'')) - - # check returned values (2 args) - res = node.psql('postgres', 'select 2') - assert (rm_carriage_returns(res) == (0, b'2\n', b'')) - - # check returned values (named) - res = node.psql(query='select 3', dbname='postgres') - assert (rm_carriage_returns(res) == (0, b'3\n', b'')) - - # check returned values (1 arg) - res = node.safe_psql('select 4') - assert (rm_carriage_returns(res) == b'4\n') - - # check returned values (2 args) - res = node.safe_psql('postgres', 'select 5') - assert (rm_carriage_returns(res) == b'5\n') - - # check returned values (named) - res = node.safe_psql(query='select 6', dbname='postgres') - assert (rm_carriage_returns(res) == b'6\n') - - # check feeding input - node.safe_psql('create table horns (w int)') - node.safe_psql('copy horns from stdin (format csv)', - input=b"1\n2\n3\n\\.\n") - _sum = node.safe_psql('select sum(w) from horns') - assert (rm_carriage_returns(_sum) == b'6\n') - - # check psql's default args, fails - with pytest.raises(expected_exception=QueryException): - node.psql() - - node.stop() - - # check psql on stopped node, fails - with pytest.raises(expected_exception=QueryException): - node.safe_psql('select 1') - - def test_safe_psql__expect_error(self): - with get_new_node().init().start() as node: - err = node.safe_psql('select_or_not_select 1', expect_error=True) - assert (type(err) == str) # noqa: E721 - assert ('select_or_not_select' in err) - assert ('ERROR: syntax error at or near "select_or_not_select"' in err) - - # --------- - with pytest.raises( - expected_exception=InvalidOperationException, - match="^" + re.escape("Exception was expected, but query finished successfully: `select 1;`.") + "$" - ): - node.safe_psql("select 1;", expect_error=True) - - # --------- - res = node.safe_psql("select 1;", expect_error=False) - assert (rm_carriage_returns(res) == b'1\n') - - def test_transactions(self): - with get_new_node().init().start() as node: - - with node.connect() as con: - con.begin() - con.execute('create table test(val int)') - con.execute('insert into test values (1)') - con.commit() - - con.begin() - con.execute('insert into test values (2)') - res = con.execute('select * from test order by val asc') - assert (res == [(1, ), (2, )]) - con.rollback() - - con.begin() - res = con.execute('select * from test') - assert (res == [(1, )]) - con.rollback() - - con.begin() - con.execute('drop table test') - con.commit() - - def test_control_data(self): - with get_new_node() as node: - - # node is not initialized yet - with pytest.raises(expected_exception=ExecUtilException): - node.get_control_data() - - node.init() - data = node.get_control_data() - - # check returned dict - assert data is not None - assert (any('pg_control' in s for s in data.keys())) - - def test_backup_simple(self): - with get_new_node() as master: - - # enable streaming for backups - master.init(allow_streaming=True) - - # node must be running - with pytest.raises(expected_exception=BackupException): - master.backup() - - # it's time to start node - master.start() - - # fill node with some data - master.psql('create table test as select generate_series(1, 4) i') - - with master.backup(xlog_method='stream') as backup: - with backup.spawn_primary().start() as slave: - res = slave.execute('select * from test order by i asc') - assert (res == [(1, ), (2, ), (3, ), (4, )]) - - def test_backup_multiple(self): - with get_new_node() as node: - node.init(allow_streaming=True).start() - - with node.backup(xlog_method='fetch') as backup1, \ - node.backup(xlog_method='fetch') as backup2: - assert (backup1.base_dir != backup2.base_dir) - - with node.backup(xlog_method='fetch') as backup: - with backup.spawn_primary('node1', destroy=False) as node1, \ - backup.spawn_primary('node2', destroy=False) as node2: - assert (node1.base_dir != node2.base_dir) - - def test_backup_exhaust(self): - with get_new_node() as node: - node.init(allow_streaming=True).start() - - with node.backup(xlog_method='fetch') as backup: - # exhaust backup by creating new node - with backup.spawn_primary(): - pass - - # now let's try to create one more node - with pytest.raises(expected_exception=BackupException): - backup.spawn_primary() - - def test_backup_wrong_xlog_method(self): - with get_new_node() as node: - node.init(allow_streaming=True).start() - - with pytest.raises( - expected_exception=BackupException, - match="^" + re.escape('Invalid xlog_method "wrong"') + "$" - ): - node.backup(xlog_method='wrong') - - def test_pg_ctl_wait_option(self): - C_MAX_ATTEMPTS = 50 - - node = get_new_node() - assert node.status() == testgres.NodeStatus.Uninitialized - node.init() - assert node.status() == testgres.NodeStatus.Stopped - node.start(wait=False) - nAttempt = 0 - while True: - if nAttempt == C_MAX_ATTEMPTS: - # - # [2025-03-11] - # We have an unexpected problem with this test in CI - # Let's get an additional information about this test failure. - # - logging.error("Node was not stopped.") - if not node.os_ops.path_exists(node.pg_log_file): - logging.warning("Node log does not exist.") - else: - logging.info("Let's read node log file [{0}]".format(node.pg_log_file)) - logFileData = node.os_ops.read(node.pg_log_file, binary=False) - logging.info("Node log file content:\n{0}".format(logFileData)) - - raise Exception("Could not stop node.") - - nAttempt += 1 - - if nAttempt > 1: - logging.info("Wait 1 second.") - time.sleep(1) - logging.info("") - - logging.info("Try to stop node. Attempt #{0}.".format(nAttempt)) - - try: - node.stop(wait=False) - break - except ExecUtilException as e: - # it's ok to get this exception here since node - # could be not started yet - logging.info("Node is not stopped. Exception ({0}): {1}".format(type(e).__name__, e)) - continue - - logging.info("OK. Stop command was executed. Let's wait while our node will stop really.") - nAttempt = 0 - while True: - if nAttempt == C_MAX_ATTEMPTS: - raise Exception("Could not stop node.") - - nAttempt += 1 - if nAttempt > 1: - logging.info("Wait 1 second.") - time.sleep(1) - logging.info("") - - logging.info("Attempt #{0}.".format(nAttempt)) - s1 = node.status() - - if s1 == testgres.NodeStatus.Running: - continue - - if s1 == testgres.NodeStatus.Stopped: - break - - raise Exception("Unexpected node status: {0}.".format(s1)) - - logging.info("OK. Node is stopped.") - node.cleanup() - - def test_replicate(self): - with get_new_node() as node: - node.init(allow_streaming=True).start() - - with node.replicate().start() as replica: - res = replica.execute('select 1') - assert (res == [(1, )]) - - node.execute('create table test (val int)', commit=True) - - replica.catchup() - - res = node.execute('select * from test') - assert (res == []) - - def test_synchronous_replication(self): - __class__.helper__skip_test_if_pg_version_is_not_ge("9.6") - - with get_new_node() as master: - old_version = not pg_version_ge('9.6') - - master.init(allow_streaming=True).start() - - if not old_version: - master.append_conf('synchronous_commit = remote_apply') - - # create standby - with master.replicate() as standby1, master.replicate() as standby2: - standby1.start() - standby2.start() - - # check formatting - assert ( - '1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(First(1, (standby1, standby2))) - ) # yapf: disable - assert ( - 'ANY 1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(Any(1, (standby1, standby2))) - ) # yapf: disable - - # set synchronous_standby_names - master.set_synchronous_standbys(First(2, [standby1, standby2])) - master.restart() - - # the following part of the test is only applicable to newer - # versions of PostgresQL - if not old_version: - master.safe_psql('create table abc(a int)') - - # Create a large transaction that will take some time to apply - # on standby to check that it applies synchronously - # (If set synchronous_commit to 'on' or other lower level then - # standby most likely won't catchup so fast and test will fail) - master.safe_psql( - 'insert into abc select generate_series(1, 1000000)') - res = standby1.safe_psql('select count(*) from abc') - assert (rm_carriage_returns(res) == b'1000000\n') - - def test_logical_replication(self): - __class__.helper__skip_test_if_pg_version_is_not_ge("10") - - with get_new_node() as node1, get_new_node() as node2: - node1.init(allow_logical=True) - node1.start() - node2.init().start() - - create_table = 'create table test (a int, b int)' - node1.safe_psql(create_table) - node2.safe_psql(create_table) - - # create publication / create subscription - pub = node1.publish('mypub') - sub = node2.subscribe(pub, 'mysub') - - node1.safe_psql('insert into test values (1, 1), (2, 2)') - - # wait until changes apply on subscriber and check them - sub.catchup() - res = node2.execute('select * from test') - assert (res == [(1, 1), (2, 2)]) - - # disable and put some new data - sub.disable() - node1.safe_psql('insert into test values (3, 3)') - - # enable and ensure that data successfully transferred - sub.enable() - sub.catchup() - res = node2.execute('select * from test') - assert (res == [(1, 1), (2, 2), (3, 3)]) - - # Add new tables. Since we added "all tables" to publication - # (default behaviour of publish() method) we don't need - # to explicitly perform pub.add_tables() - create_table = 'create table test2 (c char)' - node1.safe_psql(create_table) - node2.safe_psql(create_table) - sub.refresh() - - # put new data - node1.safe_psql('insert into test2 values (\'a\'), (\'b\')') - sub.catchup() - res = node2.execute('select * from test2') - assert (res == [('a', ), ('b', )]) - - # drop subscription - sub.drop() - pub.drop() - - # create new publication and subscription for specific table - # (omitting copying data as it's already done) - pub = node1.publish('newpub', tables=['test']) - sub = node2.subscribe(pub, 'newsub', copy_data=False) - - node1.safe_psql('insert into test values (4, 4)') - sub.catchup() - res = node2.execute('select * from test') - assert (res == [(1, 1), (2, 2), (3, 3), (4, 4)]) - - # explicitly add table - with pytest.raises(expected_exception=ValueError): - pub.add_tables([]) # fail - pub.add_tables(['test2']) - node1.safe_psql('insert into test2 values (\'c\')') - sub.catchup() - res = node2.execute('select * from test2') - assert (res == [('a', ), ('b', )]) - - def test_logical_catchup(self): - """ Runs catchup for 100 times to be sure that it is consistent """ - __class__.helper__skip_test_if_pg_version_is_not_ge("10") - - with get_new_node() as node1, get_new_node() as node2: - node1.init(allow_logical=True) - node1.start() - node2.init().start() - - create_table = 'create table test (key int primary key, val int); ' - node1.safe_psql(create_table) - node1.safe_psql('alter table test replica identity default') - node2.safe_psql(create_table) - - # create publication / create subscription - sub = node2.subscribe(node1.publish('mypub'), 'mysub') - - for i in range(0, 100): - node1.execute('insert into test values ({0}, {0})'.format(i)) - sub.catchup() - res = node2.execute('select * from test') - assert (res == [(i, i, )]) - node1.execute('delete from test') - - def test_logical_replication_fail(self): - __class__.helper__skip_test_if_pg_version_is_ge("10") - - with get_new_node() as node: - with pytest.raises(expected_exception=InitNodeException): - node.init(allow_logical=True) - - def test_replication_slots(self): - with get_new_node() as node: - node.init(allow_streaming=True).start() - - with node.replicate(slot='slot1').start() as replica: - replica.execute('select 1') - - # cannot create new slot with the same name - with pytest.raises(expected_exception=TestgresException): - node.replicate(slot='slot1') - - def test_incorrect_catchup(self): - with get_new_node() as node: - node.init(allow_streaming=True).start() - - # node has no master, can't catch up - with pytest.raises(expected_exception=TestgresException): - node.catchup() - - def test_promotion(self): - with get_new_node() as master: - master.init().start() - master.safe_psql('create table abc(id serial)') - - with master.replicate().start() as replica: - master.stop() - replica.promote() - - # make standby becomes writable master - replica.safe_psql('insert into abc values (1)') - res = replica.safe_psql('select * from abc') - assert (rm_carriage_returns(res) == b'1\n') - - def test_dump(self): - query_create = 'create table test as select generate_series(1, 2) as val' - query_select = 'select * from test order by val asc' - - with get_new_node().init().start() as node1: - - node1.execute(query_create) - for format in ['plain', 'custom', 'directory', 'tar']: - with removing(node1.dump(format=format)) as dump: - with get_new_node().init().start() as node3: - if format == 'directory': - assert (os.path.isdir(dump)) - else: - assert (os.path.isfile(dump)) - # restore dump - node3.restore(filename=dump) - res = node3.execute(query_select) - assert (res == [(1, ), (2, )]) - - def test_users(self): - with get_new_node().init().start() as node: - node.psql('create role test_user login') - value = node.safe_psql('select 1', username='test_user') - value = rm_carriage_returns(value) - assert (value == b'1\n') - - def test_poll_query_until(self): - with get_new_node() as node: - node.init().start() - - get_time = 'select extract(epoch from now())' - check_time = 'select extract(epoch from now()) - {} >= 5' - - start_time = node.execute(get_time)[0][0] - node.poll_query_until(query=check_time.format(start_time)) - end_time = node.execute(get_time)[0][0] - - assert (end_time - start_time >= 5) - - # check 0 columns - with pytest.raises(expected_exception=QueryException): - node.poll_query_until( - query='select from pg_catalog.pg_class limit 1') - - # check None, fail - with pytest.raises(expected_exception=QueryException): - node.poll_query_until(query='create table abc (val int)') - - # check None, ok - node.poll_query_until(query='create table def()', - expected=None) # returns nothing - - # check 0 rows equivalent to expected=None - node.poll_query_until( - query='select * from pg_catalog.pg_class where true = false', - expected=None) - - # check arbitrary expected value, fail - with pytest.raises(expected_exception=TimeoutException): - node.poll_query_until(query='select 3', - expected=1, - max_attempts=3, - sleep_time=0.01) - - # check arbitrary expected value, ok - node.poll_query_until(query='select 2', expected=2) - - # check timeout - with pytest.raises(expected_exception=TimeoutException): - node.poll_query_until(query='select 1 > 2', - max_attempts=3, - sleep_time=0.01) - - # check ProgrammingError, fail - with pytest.raises(expected_exception=testgres.ProgrammingError): - node.poll_query_until(query='dummy1') - - # check ProgrammingError, ok - with pytest.raises(expected_exception=(TimeoutException)): - node.poll_query_until(query='dummy2', - max_attempts=3, - sleep_time=0.01, - suppress={testgres.ProgrammingError}) - - # check 1 arg, ok - node.poll_query_until('select true') - - def test_logging(self): - C_MAX_ATTEMPTS = 50 - # This name is used for testgres logging, too. - C_NODE_NAME = "testgres_tests." + __class__.__name__ + "test_logging-master-" + uuid.uuid4().hex - - logging.info("Node name is [{0}]".format(C_NODE_NAME)) - - with tempfile.NamedTemporaryFile('w', delete=True) as logfile: - formatter = logging.Formatter(fmt="%(node)-5s: %(message)s") - handler = logging.FileHandler(filename=logfile.name) - handler.formatter = formatter - logger = logging.getLogger(C_NODE_NAME) - assert logger is not None - assert len(logger.handlers) == 0 - - try: - # It disables to log on the root level - logger.propagate = False - logger.addHandler(handler) - - with scoped_config(use_python_logging=True): - with get_new_node(name=C_NODE_NAME) as master: - logging.info("Master node is initilizing") - master.init() - - logging.info("Master node is starting") - master.start() - - logging.info("Dummy query is executed a few times") - for _ in range(20): - master.execute('select 1') - time.sleep(0.01) - - # let logging worker do the job - time.sleep(0.1) - - logging.info("Master node log file is checking") - nAttempt = 0 - - while True: - assert nAttempt <= C_MAX_ATTEMPTS - if nAttempt == C_MAX_ATTEMPTS: - raise Exception("Test failed!") - - # let logging worker do the job - time.sleep(0.1) - - nAttempt += 1 - - logging.info("Attempt {0}".format(nAttempt)) - - # check that master's port is found - with open(logfile.name, 'r') as log: - lines = log.readlines() - - assert lines is not None - assert type(lines) == list # noqa: E721 - - def LOCAL__test_lines(): - for s in lines: - if any(C_NODE_NAME in s for s in lines): - logging.info("OK. We found the node_name in a line \"{0}\"".format(s)) - return True - return False - - if LOCAL__test_lines(): - break - - logging.info("Master node log file does not have an expected information.") - continue - - # test logger after stop/start/restart - logging.info("Master node is stopping...") - master.stop() - logging.info("Master node is staring again...") - master.start() - logging.info("Master node is restaring...") - master.restart() - assert (master._logger.is_alive()) - finally: - # It is a hack code to logging cleanup - logging._acquireLock() - assert logging.Logger.manager is not None - assert C_NODE_NAME in logging.Logger.manager.loggerDict.keys() - logging.Logger.manager.loggerDict.pop(C_NODE_NAME, None) - assert not (C_NODE_NAME in logging.Logger.manager.loggerDict.keys()) - assert not (handler in logging._handlers.values()) - logging._releaseLock() - # GO HOME! - return - def test_pgbench(self): __class__.helper__skip_test_if_util_not_exist("pgbench") @@ -955,60 +189,6 @@ def test_unix_sockets(self): r.execute('select 1') r.safe_psql('select 1') - def test_auto_name(self): - with get_new_node().init(allow_streaming=True).start() as m: - with m.replicate().start() as r: - # check that nodes are running - assert (m.status()) - assert (r.status()) - - # check their names - assert (m.name != r.name) - assert ('testgres' in m.name) - assert ('testgres' in r.name) - - def test_file_tail(self): - s1 = "the quick brown fox jumped over that lazy dog\n" - s2 = "abc\n" - s3 = "def\n" - - with tempfile.NamedTemporaryFile(mode='r+', delete=True) as f: - sz = 0 - while sz < 3 * 8192: - sz += len(s1) - f.write(s1) - f.write(s2) - f.write(s3) - - f.seek(0) - lines = file_tail(f, 3) - assert (lines[0] == s1) - assert (lines[1] == s2) - assert (lines[2] == s3) - - f.seek(0) - lines = file_tail(f, 1) - assert (lines[0] == s3) - - def test_isolation_levels(self): - with get_new_node().init().start() as node: - with node.connect() as con: - # string levels - con.begin('Read Uncommitted').commit() - con.begin('Read Committed').commit() - con.begin('Repeatable Read').commit() - con.begin('Serializable').commit() - - # enum levels - con.begin(IsolationLevel.ReadUncommitted).commit() - con.begin(IsolationLevel.ReadCommitted).commit() - con.begin(IsolationLevel.RepeatableRead).commit() - con.begin(IsolationLevel.Serializable).commit() - - # check wrong level - with pytest.raises(expected_exception=QueryException): - con.begin('Garbage').commit() - def test_ports_management(self): assert bound_ports is not None assert type(bound_ports) == set # noqa: E721 @@ -1043,153 +223,6 @@ def test_ports_management(self): assert type(bound_ports) == set # noqa: E721 assert bound_ports == stage0__bound_ports - def test_exceptions(self): - str(StartNodeException('msg', [('file', 'lines')])) - str(ExecUtilException('msg', 'cmd', 1, 'out')) - str(QueryException('msg', 'query')) - - def test_version_management(self): - a = PgVer('10.0') - b = PgVer('10') - c = PgVer('9.6.5') - d = PgVer('15.0') - e = PgVer('15rc1') - f = PgVer('15beta4') - h = PgVer('15.3biha') - i = PgVer('15.3') - g = PgVer('15.3.1bihabeta1') - k = PgVer('15.3.1') - - assert (a == b) - assert (b > c) - assert (a > c) - assert (d > e) - assert (e > f) - assert (d > f) - assert (h > f) - assert (h == i) - assert (g == k) - assert (g > h) - - version = get_pg_version() - with get_new_node() as node: - assert (isinstance(version, six.string_types)) - assert (isinstance(node.version, PgVer)) - assert (node.version == PgVer(version)) - - def test_child_pids(self): - master_processes = [ - ProcessType.AutovacuumLauncher, - ProcessType.BackgroundWriter, - ProcessType.Checkpointer, - ProcessType.StatsCollector, - ProcessType.WalSender, - ProcessType.WalWriter, - ] - - if pg_version_ge('10'): - master_processes.append(ProcessType.LogicalReplicationLauncher) - - if pg_version_ge('14'): - master_processes.remove(ProcessType.StatsCollector) - - repl_processes = [ - ProcessType.Startup, - ProcessType.WalReceiver, - ] - - def LOCAL__test_auxiliary_pids( - node: testgres.PostgresNode, - expectedTypes: list[ProcessType] - ) -> list[ProcessType]: - # returns list of the absence processes - assert node is not None - assert type(node) == testgres.PostgresNode # noqa: E721 - assert expectedTypes is not None - assert type(expectedTypes) == list # noqa: E721 - - pids = node.auxiliary_pids - assert pids is not None # noqa: E721 - assert type(pids) == dict # noqa: E721 - - result = list[ProcessType]() - for ptype in expectedTypes: - if not (ptype in pids): - result.append(ptype) - return result - - def LOCAL__check_auxiliary_pids__multiple_attempts( - node: testgres.PostgresNode, - expectedTypes: list[ProcessType]): - assert node is not None - assert type(node) == testgres.PostgresNode # noqa: E721 - assert expectedTypes is not None - assert type(expectedTypes) == list # noqa: E721 - - nAttempt = 0 - - while nAttempt < 5: - nAttempt += 1 - - logging.info("Test pids of [{0}] node. Attempt #{1}.".format( - node.name, - nAttempt - )) - - if nAttempt > 1: - time.sleep(1) - - absenceList = LOCAL__test_auxiliary_pids(node, expectedTypes) - assert absenceList is not None - assert type(absenceList) == list # noqa: E721 - if len(absenceList) == 0: - logging.info("Bingo!") - return - - logging.info("These processes are not found: {0}.".format(absenceList)) - continue - - raise Exception("Node {0} does not have the following processes: {1}.".format( - node.name, - absenceList - )) - - with get_new_node().init().start() as master: - - # master node doesn't have a source walsender! - with pytest.raises(expected_exception=TestgresException): - master.source_walsender - - with master.connect() as con: - assert (con.pid > 0) - - with master.replicate().start() as replica: - - # test __str__ method - str(master.child_processes[0]) - - LOCAL__check_auxiliary_pids__multiple_attempts( - master, - master_processes) - - LOCAL__check_auxiliary_pids__multiple_attempts( - replica, - repl_processes) - - master_pids = master.auxiliary_pids - - # there should be exactly 1 source walsender for replica - assert (len(master_pids[ProcessType.WalSender]) == 1) - pid1 = master_pids[ProcessType.WalSender][0] - pid2 = replica.source_walsender.pid - assert (pid1 == pid2) - - replica.stop() - - # there should be no walsender after we've stopped replica - with pytest.raises(expected_exception=TestgresException): - replica.source_walsender - def test_child_process_dies(self): # test for FileNotFound exception during child_processes() function cmd = ["timeout", "60"] if os.name == 'nt' else ["sleep", "60"] @@ -1512,15 +545,3 @@ def helper__skip_test_if_util_not_exist(name: str): if not util_exists(name2): pytest.skip('might be missing') - - @staticmethod - def helper__skip_test_if_pg_version_is_not_ge(version: str): - assert type(version) == str # noqa: E721 - if not pg_version_ge(version): - pytest.skip('requires {0}+'.format(version)) - - @staticmethod - def helper__skip_test_if_pg_version_is_ge(version: str): - assert type(version) == str # noqa: E721 - if pg_version_ge(version): - pytest.skip('requires <{0}'.format(version)) diff --git a/tests/test_simple_remote.py b/tests/test_simple_remote.py index d484f1e3..c16fe53f 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_simple_remote.py @@ -2,28 +2,19 @@ import os import re import subprocess -import tempfile -import time -import six import pytest import psutil import logging -import uuid -from contextlib import contextmanager +from .helpers.os_ops_descrs import OsOpsDescrs +from .helpers.os_ops_descrs import OsOperations from .. import testgres from ..testgres.exceptions import \ InitNodeException, \ - StartNodeException, \ - ExecUtilException, \ - BackupException, \ - QueryException, \ - TimeoutException, \ - TestgresException, \ - InvalidOperationException + ExecUtilException from ..testgres.config import \ TestgresConfig, \ @@ -31,33 +22,13 @@ scoped_config, \ pop_config, testgres_config -from ..testgres import \ - NodeStatus, \ - ProcessType, \ - IsolationLevel, \ - get_remote_node, \ - RemoteOperations - from ..testgres import \ get_bin_path, \ - get_pg_config, \ - get_pg_version - -from ..testgres import \ - First, \ - Any + get_pg_config # NOTE: those are ugly imports from ..testgres import bound_ports -from ..testgres.utils import PgVer -from ..testgres.utils import file_tail -from ..testgres.node import ProcessProxy, ConnectionParams - - -def pg_version_ge(version): - cur_ver = PgVer(get_pg_version()) - min_ver = PgVer(version) - return cur_ver >= min_ver +from ..testgres.node import ProcessProxy def util_exists(util): @@ -76,25 +47,8 @@ def good_properties(f): return True -@contextmanager -def removing(f): - try: - yield f - finally: - if testgres_config.os_ops.isfile(f): - testgres_config.os_ops.remove_file(f) - - elif testgres_config.os_ops.isdir(f): - testgres_config.os_ops.rmdirs(f, ignore_errors=True) - - class TestgresRemoteTests: - sm_conn_params = ConnectionParams( - host=os.getenv('RDBMS_TESTPOOL1_HOST') or '127.0.0.1', - username=os.getenv('USER'), - ssh_key=os.getenv('RDBMS_TESTPOOL_SSHKEY')) - - sm_os_ops = RemoteOperations(sm_conn_params) + sm_os_ops = OsOpsDescrs.sm_remote_os_ops @pytest.fixture(autouse=True, scope="class") def implicit_fixture(self): @@ -218,732 +172,6 @@ def test_init__unk_LANG_and_LC_CTYPE(self): __class__.helper__restore_envvar("LC_CTYPE", prev_LC_CTYPE) __class__.helper__restore_envvar("LC_COLLATE", prev_LC_COLLATE) - def test_double_init(self): - with __class__.helper__get_node().init() as node: - # can't initialize node more than once - with pytest.raises(expected_exception=InitNodeException): - node.init() - - def test_init_after_cleanup(self): - with __class__.helper__get_node() as node: - node.init().start().execute('select 1') - node.cleanup() - node.init().start().execute('select 1') - - def test_init_unique_system_id(self): - # this function exists in PostgreSQL 9.6+ - __class__.helper__skip_test_if_util_not_exist("pg_resetwal") - __class__.helper__skip_test_if_pg_version_is_not_ge('9.6') - - query = 'select system_identifier from pg_control_system()' - - with scoped_config(cache_initdb=False): - with __class__.helper__get_node().init().start() as node0: - id0 = node0.execute(query)[0] - - with scoped_config(cache_initdb=True, - cached_initdb_unique=True) as config: - assert (config.cache_initdb) - assert (config.cached_initdb_unique) - - # spawn two nodes; ids must be different - with __class__.helper__get_node().init().start() as node1, \ - __class__.helper__get_node().init().start() as node2: - id1 = node1.execute(query)[0] - id2 = node2.execute(query)[0] - - # ids must increase - assert (id1 > id0) - assert (id2 > id1) - - def test_node_exit(self): - with pytest.raises(expected_exception=QueryException): - with __class__.helper__get_node().init() as node: - base_dir = node.base_dir - node.safe_psql('select 1') - - # we should save the DB for "debugging" - assert (__class__.sm_os_ops.path_exists(base_dir)) - __class__.sm_os_ops.rmdirs(base_dir, ignore_errors=True) - - with __class__.helper__get_node().init() as node: - base_dir = node.base_dir - - # should have been removed by default - assert not (__class__.sm_os_ops.path_exists(base_dir)) - - def test_double_start(self): - with __class__.helper__get_node().init().start() as node: - # can't start node more than once - node.start() - assert (node.is_started) - - def test_uninitialized_start(self): - with __class__.helper__get_node() as node: - # node is not initialized yet - with pytest.raises(expected_exception=StartNodeException): - node.start() - - def test_restart(self): - with __class__.helper__get_node() as node: - node.init().start() - - # restart, ok - res = node.execute('select 1') - assert (res == [(1,)]) - node.restart() - res = node.execute('select 2') - assert (res == [(2,)]) - - # restart, fail - with pytest.raises(expected_exception=StartNodeException): - node.append_conf('pg_hba.conf', 'DUMMY') - node.restart() - - def test_reload(self): - with __class__.helper__get_node() as node: - node.init().start() - - # change client_min_messages and save old value - cmm_old = node.execute('show client_min_messages') - node.append_conf(client_min_messages='DEBUG1') - - # reload config - node.reload() - - # check new value - cmm_new = node.execute('show client_min_messages') - assert ('debug1' == cmm_new[0][0].lower()) - assert (cmm_old != cmm_new) - - def test_pg_ctl(self): - with __class__.helper__get_node() as node: - node.init().start() - - status = node.pg_ctl(['status']) - assert ('PID' in status) - - def test_status(self): - assert (NodeStatus.Running) - assert not (NodeStatus.Stopped) - assert not (NodeStatus.Uninitialized) - - # check statuses after each operation - with __class__.helper__get_node() as node: - assert (node.pid == 0) - assert (node.status() == NodeStatus.Uninitialized) - - node.init() - - assert (node.pid == 0) - assert (node.status() == NodeStatus.Stopped) - - node.start() - - assert (node.pid != 0) - assert (node.status() == NodeStatus.Running) - - node.stop() - - assert (node.pid == 0) - assert (node.status() == NodeStatus.Stopped) - - node.cleanup() - - assert (node.pid == 0) - assert (node.status() == NodeStatus.Uninitialized) - - def test_psql(self): - with __class__.helper__get_node().init().start() as node: - # check returned values (1 arg) - res = node.psql('select 1') - assert (res == (0, b'1\n', b'')) - - # check returned values (2 args) - res = node.psql('postgres', 'select 2') - assert (res == (0, b'2\n', b'')) - - # check returned values (named) - res = node.psql(query='select 3', dbname='postgres') - assert (res == (0, b'3\n', b'')) - - # check returned values (1 arg) - res = node.safe_psql('select 4') - assert (res == b'4\n') - - # check returned values (2 args) - res = node.safe_psql('postgres', 'select 5') - assert (res == b'5\n') - - # check returned values (named) - res = node.safe_psql(query='select 6', dbname='postgres') - assert (res == b'6\n') - - # check feeding input - node.safe_psql('create table horns (w int)') - node.safe_psql('copy horns from stdin (format csv)', - input=b"1\n2\n3\n\\.\n") - _sum = node.safe_psql('select sum(w) from horns') - assert (_sum == b'6\n') - - # check psql's default args, fails - with pytest.raises(expected_exception=QueryException): - node.psql() - - node.stop() - - # check psql on stopped node, fails - with pytest.raises(expected_exception=QueryException): - node.safe_psql('select 1') - - def test_safe_psql__expect_error(self): - with __class__.helper__get_node().init().start() as node: - err = node.safe_psql('select_or_not_select 1', expect_error=True) - assert (type(err) == str) # noqa: E721 - assert ('select_or_not_select' in err) - assert ('ERROR: syntax error at or near "select_or_not_select"' in err) - - # --------- - with pytest.raises( - expected_exception=InvalidOperationException, - match="^" + re.escape("Exception was expected, but query finished successfully: `select 1;`.") + "$" - ): - node.safe_psql("select 1;", expect_error=True) - - # --------- - res = node.safe_psql("select 1;", expect_error=False) - assert (res == b'1\n') - - def test_transactions(self): - with __class__.helper__get_node().init().start() as node: - with node.connect() as con: - con.begin() - con.execute('create table test(val int)') - con.execute('insert into test values (1)') - con.commit() - - con.begin() - con.execute('insert into test values (2)') - res = con.execute('select * from test order by val asc') - assert (res == [(1,), (2,)]) - con.rollback() - - con.begin() - res = con.execute('select * from test') - assert (res == [(1,)]) - con.rollback() - - con.begin() - con.execute('drop table test') - con.commit() - - def test_control_data(self): - with __class__.helper__get_node() as node: - # node is not initialized yet - with pytest.raises(expected_exception=ExecUtilException): - node.get_control_data() - - node.init() - data = node.get_control_data() - - # check returned dict - assert data is not None - assert (any('pg_control' in s for s in data.keys())) - - def test_backup_simple(self): - with __class__.helper__get_node() as master: - # enable streaming for backups - master.init(allow_streaming=True) - - # node must be running - with pytest.raises(expected_exception=BackupException): - master.backup() - - # it's time to start node - master.start() - - # fill node with some data - master.psql('create table test as select generate_series(1, 4) i') - - with master.backup(xlog_method='stream') as backup: - with backup.spawn_primary().start() as slave: - res = slave.execute('select * from test order by i asc') - assert (res == [(1,), (2,), (3,), (4,)]) - - def test_backup_multiple(self): - with __class__.helper__get_node() as node: - node.init(allow_streaming=True).start() - - with node.backup(xlog_method='fetch') as backup1, \ - node.backup(xlog_method='fetch') as backup2: - assert (backup1.base_dir != backup2.base_dir) - - with node.backup(xlog_method='fetch') as backup: - with backup.spawn_primary('node1', destroy=False) as node1, \ - backup.spawn_primary('node2', destroy=False) as node2: - assert (node1.base_dir != node2.base_dir) - - def test_backup_exhaust(self): - with __class__.helper__get_node() as node: - node.init(allow_streaming=True).start() - - with node.backup(xlog_method='fetch') as backup: - # exhaust backup by creating new node - with backup.spawn_primary(): - pass - - # now let's try to create one more node - with pytest.raises(expected_exception=BackupException): - backup.spawn_primary() - - def test_backup_wrong_xlog_method(self): - with __class__.helper__get_node() as node: - node.init(allow_streaming=True).start() - - with pytest.raises( - expected_exception=BackupException, - match="^" + re.escape('Invalid xlog_method "wrong"') + "$" - ): - node.backup(xlog_method='wrong') - - def test_pg_ctl_wait_option(self): - C_MAX_ATTEMPTS = 50 - - node = __class__.helper__get_node() - assert node.status() == testgres.NodeStatus.Uninitialized - node.init() - assert node.status() == testgres.NodeStatus.Stopped - node.start(wait=False) - nAttempt = 0 - while True: - if nAttempt == C_MAX_ATTEMPTS: - # - # [2025-03-11] - # We have an unexpected problem with this test in CI - # Let's get an additional information about this test failure. - # - logging.error("Node was not stopped.") - if not node.os_ops.path_exists(node.pg_log_file): - logging.warning("Node log does not exist.") - else: - logging.info("Let's read node log file [{0}]".format(node.pg_log_file)) - logFileData = node.os_ops.read(node.pg_log_file, binary=False) - logging.info("Node log file content:\n{0}".format(logFileData)) - - raise Exception("Could not stop node.") - - nAttempt += 1 - - if nAttempt > 1: - logging.info("Wait 1 second.") - time.sleep(1) - logging.info("") - - logging.info("Try to stop node. Attempt #{0}.".format(nAttempt)) - - try: - node.stop(wait=False) - break - except ExecUtilException as e: - # it's ok to get this exception here since node - # could be not started yet - logging.info("Node is not stopped. Exception ({0}): {1}".format(type(e).__name__, e)) - continue - - logging.info("OK. Stop command was executed. Let's wait while our node will stop really.") - nAttempt = 0 - while True: - if nAttempt == C_MAX_ATTEMPTS: - raise Exception("Could not stop node.") - - nAttempt += 1 - if nAttempt > 1: - logging.info("Wait 1 second.") - time.sleep(1) - logging.info("") - - logging.info("Attempt #{0}.".format(nAttempt)) - s1 = node.status() - - if s1 == testgres.NodeStatus.Running: - continue - - if s1 == testgres.NodeStatus.Stopped: - break - - raise Exception("Unexpected node status: {0}.".format(s1)) - - logging.info("OK. Node is stopped.") - node.cleanup() - - def test_replicate(self): - with __class__.helper__get_node() as node: - node.init(allow_streaming=True).start() - - with node.replicate().start() as replica: - res = replica.execute('select 1') - assert (res == [(1,)]) - - node.execute('create table test (val int)', commit=True) - - replica.catchup() - - res = node.execute('select * from test') - assert (res == []) - - def test_synchronous_replication(self): - __class__.helper__skip_test_if_pg_version_is_not_ge("9.6") - - with __class__.helper__get_node() as master: - old_version = not pg_version_ge('9.6') - - master.init(allow_streaming=True).start() - - if not old_version: - master.append_conf('synchronous_commit = remote_apply') - - # create standby - with master.replicate() as standby1, master.replicate() as standby2: - standby1.start() - standby2.start() - - # check formatting - assert ( - '1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(First(1, (standby1, standby2))) - ) # yapf: disable - assert ( - 'ANY 1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(Any(1, (standby1, standby2))) - ) # yapf: disable - - # set synchronous_standby_names - master.set_synchronous_standbys(First(2, [standby1, standby2])) - master.restart() - - # the following part of the test is only applicable to newer - # versions of PostgresQL - if not old_version: - master.safe_psql('create table abc(a int)') - - # Create a large transaction that will take some time to apply - # on standby to check that it applies synchronously - # (If set synchronous_commit to 'on' or other lower level then - # standby most likely won't catchup so fast and test will fail) - master.safe_psql( - 'insert into abc select generate_series(1, 1000000)') - res = standby1.safe_psql('select count(*) from abc') - assert (res == b'1000000\n') - - def test_logical_replication(self): - __class__.helper__skip_test_if_pg_version_is_not_ge("10") - - with __class__.helper__get_node() as node1, __class__.helper__get_node() as node2: - node1.init(allow_logical=True) - node1.start() - node2.init().start() - - create_table = 'create table test (a int, b int)' - node1.safe_psql(create_table) - node2.safe_psql(create_table) - - # create publication / create subscription - pub = node1.publish('mypub') - sub = node2.subscribe(pub, 'mysub') - - node1.safe_psql('insert into test values (1, 1), (2, 2)') - - # wait until changes apply on subscriber and check them - sub.catchup() - res = node2.execute('select * from test') - assert (res == [(1, 1), (2, 2)]) - - # disable and put some new data - sub.disable() - node1.safe_psql('insert into test values (3, 3)') - - # enable and ensure that data successfully transferred - sub.enable() - sub.catchup() - res = node2.execute('select * from test') - assert (res == [(1, 1), (2, 2), (3, 3)]) - - # Add new tables. Since we added "all tables" to publication - # (default behaviour of publish() method) we don't need - # to explicitly perform pub.add_tables() - create_table = 'create table test2 (c char)' - node1.safe_psql(create_table) - node2.safe_psql(create_table) - sub.refresh() - - # put new data - node1.safe_psql('insert into test2 values (\'a\'), (\'b\')') - sub.catchup() - res = node2.execute('select * from test2') - assert (res == [('a',), ('b',)]) - - # drop subscription - sub.drop() - pub.drop() - - # create new publication and subscription for specific table - # (omitting copying data as it's already done) - pub = node1.publish('newpub', tables=['test']) - sub = node2.subscribe(pub, 'newsub', copy_data=False) - - node1.safe_psql('insert into test values (4, 4)') - sub.catchup() - res = node2.execute('select * from test') - assert (res == [(1, 1), (2, 2), (3, 3), (4, 4)]) - - # explicitly add table - with pytest.raises(expected_exception=ValueError): - pub.add_tables([]) # fail - pub.add_tables(['test2']) - node1.safe_psql('insert into test2 values (\'c\')') - sub.catchup() - res = node2.execute('select * from test2') - assert (res == [('a',), ('b',)]) - - def test_logical_catchup(self): - """ Runs catchup for 100 times to be sure that it is consistent """ - __class__.helper__skip_test_if_pg_version_is_not_ge("10") - - with __class__.helper__get_node() as node1, __class__.helper__get_node() as node2: - node1.init(allow_logical=True) - node1.start() - node2.init().start() - - create_table = 'create table test (key int primary key, val int); ' - node1.safe_psql(create_table) - node1.safe_psql('alter table test replica identity default') - node2.safe_psql(create_table) - - # create publication / create subscription - sub = node2.subscribe(node1.publish('mypub'), 'mysub') - - for i in range(0, 100): - node1.execute('insert into test values ({0}, {0})'.format(i)) - sub.catchup() - res = node2.execute('select * from test') - assert (res == [(i, i, )]) - node1.execute('delete from test') - - def test_logical_replication_fail(self): - __class__.helper__skip_test_if_pg_version_is_ge("10") - - with __class__.helper__get_node() as node: - with pytest.raises(expected_exception=InitNodeException): - node.init(allow_logical=True) - - def test_replication_slots(self): - with __class__.helper__get_node() as node: - node.init(allow_streaming=True).start() - - with node.replicate(slot='slot1').start() as replica: - replica.execute('select 1') - - # cannot create new slot with the same name - with pytest.raises(expected_exception=TestgresException): - node.replicate(slot='slot1') - - def test_incorrect_catchup(self): - with __class__.helper__get_node() as node: - node.init(allow_streaming=True).start() - - # node has no master, can't catch up - with pytest.raises(expected_exception=TestgresException): - node.catchup() - - def test_promotion(self): - with __class__.helper__get_node() as master: - master.init().start() - master.safe_psql('create table abc(id serial)') - - with master.replicate().start() as replica: - master.stop() - replica.promote() - - # make standby becomes writable master - replica.safe_psql('insert into abc values (1)') - res = replica.safe_psql('select * from abc') - assert (res == b'1\n') - - def test_dump(self): - query_create = 'create table test as select generate_series(1, 2) as val' - query_select = 'select * from test order by val asc' - - with __class__.helper__get_node().init().start() as node1: - - node1.execute(query_create) - for format in ['plain', 'custom', 'directory', 'tar']: - with removing(node1.dump(format=format)) as dump: - with __class__.helper__get_node().init().start() as node3: - if format == 'directory': - assert (node1.os_ops.isdir(dump)) - else: - assert (node1.os_ops.isfile(dump)) - # restore dump - node3.restore(filename=dump) - res = node3.execute(query_select) - assert (res == [(1,), (2,)]) - - def test_users(self): - with __class__.helper__get_node().init().start() as node: - node.psql('create role test_user login') - value = node.safe_psql('select 1', username='test_user') - assert (b'1\n' == value) - - def test_poll_query_until(self): - with __class__.helper__get_node() as node: - node.init().start() - - get_time = 'select extract(epoch from now())' - check_time = 'select extract(epoch from now()) - {} >= 5' - - start_time = node.execute(get_time)[0][0] - node.poll_query_until(query=check_time.format(start_time)) - end_time = node.execute(get_time)[0][0] - - assert (end_time - start_time >= 5) - - # check 0 columns - with pytest.raises(expected_exception=QueryException): - node.poll_query_until( - query='select from pg_catalog.pg_class limit 1') - - # check None, fail - with pytest.raises(expected_exception=QueryException): - node.poll_query_until(query='create table abc (val int)') - - # check None, ok - node.poll_query_until(query='create table def()', - expected=None) # returns nothing - - # check 0 rows equivalent to expected=None - node.poll_query_until( - query='select * from pg_catalog.pg_class where true = false', - expected=None) - - # check arbitrary expected value, fail - with pytest.raises(expected_exception=TimeoutException): - node.poll_query_until(query='select 3', - expected=1, - max_attempts=3, - sleep_time=0.01) - - # check arbitrary expected value, ok - node.poll_query_until(query='select 2', expected=2) - - # check timeout - with pytest.raises(expected_exception=TimeoutException): - node.poll_query_until(query='select 1 > 2', - max_attempts=3, - sleep_time=0.01) - - # check ProgrammingError, fail - with pytest.raises(expected_exception=testgres.ProgrammingError): - node.poll_query_until(query='dummy1') - - # check ProgrammingError, ok - with pytest.raises(expected_exception=TimeoutException): - node.poll_query_until(query='dummy2', - max_attempts=3, - sleep_time=0.01, - suppress={testgres.ProgrammingError}) - - # check 1 arg, ok - node.poll_query_until('select true') - - def test_logging(self): - C_MAX_ATTEMPTS = 50 - # This name is used for testgres logging, too. - C_NODE_NAME = "testgres_tests." + __class__.__name__ + "test_logging-master-" + uuid.uuid4().hex - - logging.info("Node name is [{0}]".format(C_NODE_NAME)) - - with tempfile.NamedTemporaryFile('w', delete=True) as logfile: - formatter = logging.Formatter(fmt="%(node)-5s: %(message)s") - handler = logging.FileHandler(filename=logfile.name) - handler.formatter = formatter - logger = logging.getLogger(C_NODE_NAME) - assert logger is not None - assert len(logger.handlers) == 0 - - try: - # It disables to log on the root level - logger.propagate = False - logger.addHandler(handler) - - with scoped_config(use_python_logging=True): - with __class__.helper__get_node(name=C_NODE_NAME) as master: - logging.info("Master node is initilizing") - master.init() - - logging.info("Master node is starting") - master.start() - - logging.info("Dummy query is executed a few times") - for _ in range(20): - master.execute('select 1') - time.sleep(0.01) - - # let logging worker do the job - time.sleep(0.1) - - logging.info("Master node log file is checking") - nAttempt = 0 - - while True: - assert nAttempt <= C_MAX_ATTEMPTS - if nAttempt == C_MAX_ATTEMPTS: - raise Exception("Test failed!") - - # let logging worker do the job - time.sleep(0.1) - - nAttempt += 1 - - logging.info("Attempt {0}".format(nAttempt)) - - # check that master's port is found - with open(logfile.name, 'r') as log: - lines = log.readlines() - - assert lines is not None - assert type(lines) == list # noqa: E721 - - def LOCAL__test_lines(): - for s in lines: - if any(C_NODE_NAME in s for s in lines): - logging.info("OK. We found the node_name in a line \"{0}\"".format(s)) - return True - return False - - if LOCAL__test_lines(): - break - - logging.info("Master node log file does not have an expected information.") - continue - - # test logger after stop/start/restart - logging.info("Master node is stopping...") - master.stop() - logging.info("Master node is staring again...") - master.start() - logging.info("Master node is restaring...") - master.restart() - assert (master._logger.is_alive()) - finally: - # It is a hack code to logging cleanup - logging._acquireLock() - assert logging.Logger.manager is not None - assert C_NODE_NAME in logging.Logger.manager.loggerDict.keys() - logging.Logger.manager.loggerDict.pop(C_NODE_NAME, None) - assert not (C_NODE_NAME in logging.Logger.manager.loggerDict.keys()) - assert not (handler in logging._handlers.values()) - logging._releaseLock() - # GO HOME! - return - def test_pgbench(self): __class__.helper__skip_test_if_util_not_exist("pgbench") @@ -1031,60 +259,6 @@ def test_unix_sockets(self): assert (res_exec == [(1,)]) assert (res_psql == b'1\n') - def test_auto_name(self): - with __class__.helper__get_node().init(allow_streaming=True).start() as m: - with m.replicate().start() as r: - # check that nodes are running - assert (m.status()) - assert (r.status()) - - # check their names - assert (m.name != r.name) - assert ('testgres' in m.name) - assert ('testgres' in r.name) - - def test_file_tail(self): - s1 = "the quick brown fox jumped over that lazy dog\n" - s2 = "abc\n" - s3 = "def\n" - - with tempfile.NamedTemporaryFile(mode='r+', delete=True) as f: - sz = 0 - while sz < 3 * 8192: - sz += len(s1) - f.write(s1) - f.write(s2) - f.write(s3) - - f.seek(0) - lines = file_tail(f, 3) - assert (lines[0] == s1) - assert (lines[1] == s2) - assert (lines[2] == s3) - - f.seek(0) - lines = file_tail(f, 1) - assert (lines[0] == s3) - - def test_isolation_levels(self): - with __class__.helper__get_node().init().start() as node: - with node.connect() as con: - # string levels - con.begin('Read Uncommitted').commit() - con.begin('Read Committed').commit() - con.begin('Repeatable Read').commit() - con.begin('Serializable').commit() - - # enum levels - con.begin(IsolationLevel.ReadUncommitted).commit() - con.begin(IsolationLevel.ReadCommitted).commit() - con.begin(IsolationLevel.RepeatableRead).commit() - con.begin(IsolationLevel.Serializable).commit() - - # check wrong level - with pytest.raises(expected_exception=QueryException): - con.begin('Garbage').commit() - def test_ports_management(self): assert bound_ports is not None assert type(bound_ports) == set # noqa: E721 @@ -1119,145 +293,6 @@ def test_ports_management(self): assert type(bound_ports) == set # noqa: E721 assert bound_ports == stage0__bound_ports - def test_exceptions(self): - str(StartNodeException('msg', [('file', 'lines')])) - str(ExecUtilException('msg', 'cmd', 1, 'out')) - str(QueryException('msg', 'query')) - - def test_version_management(self): - a = PgVer('10.0') - b = PgVer('10') - c = PgVer('9.6.5') - d = PgVer('15.0') - e = PgVer('15rc1') - f = PgVer('15beta4') - - assert (a == b) - assert (b > c) - assert (a > c) - assert (d > e) - assert (e > f) - assert (d > f) - - version = get_pg_version() - with __class__.helper__get_node() as node: - assert (isinstance(version, six.string_types)) - assert (isinstance(node.version, PgVer)) - assert (node.version == PgVer(version)) - - def test_child_pids(self): - master_processes = [ - ProcessType.AutovacuumLauncher, - ProcessType.BackgroundWriter, - ProcessType.Checkpointer, - ProcessType.StatsCollector, - ProcessType.WalSender, - ProcessType.WalWriter, - ] - - if pg_version_ge('10'): - master_processes.append(ProcessType.LogicalReplicationLauncher) - - if pg_version_ge('14'): - master_processes.remove(ProcessType.StatsCollector) - - repl_processes = [ - ProcessType.Startup, - ProcessType.WalReceiver, - ] - - def LOCAL__test_auxiliary_pids( - node: testgres.PostgresNode, - expectedTypes: list[ProcessType] - ) -> list[ProcessType]: - # returns list of the absence processes - assert node is not None - assert type(node) == testgres.PostgresNode # noqa: E721 - assert expectedTypes is not None - assert type(expectedTypes) == list # noqa: E721 - - pids = node.auxiliary_pids - assert pids is not None # noqa: E721 - assert type(pids) == dict # noqa: E721 - - result = list[ProcessType]() - for ptype in expectedTypes: - if not (ptype in pids): - result.append(ptype) - return result - - def LOCAL__check_auxiliary_pids__multiple_attempts( - node: testgres.PostgresNode, - expectedTypes: list[ProcessType]): - assert node is not None - assert type(node) == testgres.PostgresNode # noqa: E721 - assert expectedTypes is not None - assert type(expectedTypes) == list # noqa: E721 - - nAttempt = 0 - - while nAttempt < 5: - nAttempt += 1 - - logging.info("Test pids of [{0}] node. Attempt #{1}.".format( - node.name, - nAttempt - )) - - if nAttempt > 1: - time.sleep(1) - - absenceList = LOCAL__test_auxiliary_pids(node, expectedTypes) - assert absenceList is not None - assert type(absenceList) == list # noqa: E721 - if len(absenceList) == 0: - logging.info("Bingo!") - return - - logging.info("These processes are not found: {0}.".format(absenceList)) - continue - - raise Exception("Node {0} does not have the following processes: {1}.".format( - node.name, - absenceList - )) - - with __class__.helper__get_node().init().start() as master: - - # master node doesn't have a source walsender! - with pytest.raises(expected_exception=TestgresException): - master.source_walsender - - with master.connect() as con: - assert (con.pid > 0) - - with master.replicate().start() as replica: - - # test __str__ method - str(master.child_processes[0]) - - LOCAL__check_auxiliary_pids__multiple_attempts( - master, - master_processes) - - LOCAL__check_auxiliary_pids__multiple_attempts( - replica, - repl_processes) - - master_pids = master.auxiliary_pids - - # there should be exactly 1 source walsender for replica - assert (len(master_pids[ProcessType.WalSender]) == 1) - pid1 = master_pids[ProcessType.WalSender][0] - pid2 = replica.source_walsender.pid - assert (pid1 == pid2) - - replica.stop() - - # there should be no walsender after we've stopped replica - with pytest.raises(expected_exception=TestgresException): - replica.source_walsender - # TODO: Why does not this test work with remote host? def test_child_process_dies(self): nAttempt = 0 @@ -1290,8 +325,8 @@ def test_child_process_dies(self): @staticmethod def helper__get_node(name=None): - assert __class__.sm_conn_params is not None - return get_remote_node(name=name, conn_params=__class__.sm_conn_params) + assert isinstance(__class__.sm_os_ops, OsOperations) + return testgres.PostgresNode(name, conn_params=None, os_ops=__class__.sm_os_ops) @staticmethod def helper__restore_envvar(name, prev_value): @@ -1305,15 +340,3 @@ def helper__skip_test_if_util_not_exist(name: str): assert type(name) == str # noqa: E721 if not util_exists(name): pytest.skip('might be missing') - - @staticmethod - def helper__skip_test_if_pg_version_is_not_ge(version: str): - assert type(version) == str # noqa: E721 - if not pg_version_ge(version): - pytest.skip('requires {0}+'.format(version)) - - @staticmethod - def helper__skip_test_if_pg_version_is_ge(version: str): - assert type(version) == str # noqa: E721 - if pg_version_ge(version): - pytest.skip('requires <{0}'.format(version)) diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py new file mode 100644 index 00000000..49740b61 --- /dev/null +++ b/tests/test_testgres_common.py @@ -0,0 +1,1131 @@ +from .helpers.os_ops_descrs import OsOpsDescr +from .helpers.os_ops_descrs import OsOpsDescrs +from .helpers.os_ops_descrs import OsOperations + +from ..testgres.node import PgVer +from ..testgres.node import PostgresNode +from ..testgres.utils import get_pg_version2 +from ..testgres.utils import file_tail +from ..testgres.utils import get_bin_path2 +from ..testgres import ProcessType +from ..testgres import NodeStatus +from ..testgres import IsolationLevel +from ..testgres import TestgresException +from ..testgres import InitNodeException +from ..testgres import StartNodeException +from ..testgres import QueryException +from ..testgres import ExecUtilException +from ..testgres import TimeoutException +from ..testgres import InvalidOperationException +from ..testgres import BackupException +from ..testgres import ProgrammingError +from ..testgres import scoped_config +from ..testgres import First, Any + +from contextlib import contextmanager + +import pytest +import six +import logging +import time +import tempfile +import uuid +import os +import re + + +@contextmanager +def removing(os_ops: OsOperations, f): + assert isinstance(os_ops, OsOperations) + + try: + yield f + finally: + if os_ops.isfile(f): + os_ops.remove_file(f) + + elif os_ops.isdir(f): + os_ops.rmdirs(f, ignore_errors=True) + + +class TestTestgresCommon: + sm_os_ops_descrs: list[OsOpsDescr] = [ + OsOpsDescrs.sm_local_os_ops_descr, + OsOpsDescrs.sm_remote_os_ops_descr + ] + + @pytest.fixture( + params=[descr.os_ops for descr in sm_os_ops_descrs], + ids=[descr.sign for descr in sm_os_ops_descrs] + ) + def os_ops(self, request: pytest.FixtureRequest) -> OsOperations: + assert isinstance(request, pytest.FixtureRequest) + assert isinstance(request.param, OsOperations) + return request.param + + def test_version_management(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + a = PgVer('10.0') + b = PgVer('10') + c = PgVer('9.6.5') + d = PgVer('15.0') + e = PgVer('15rc1') + f = PgVer('15beta4') + h = PgVer('15.3biha') + i = PgVer('15.3') + g = PgVer('15.3.1bihabeta1') + k = PgVer('15.3.1') + + assert (a == b) + assert (b > c) + assert (a > c) + assert (d > e) + assert (e > f) + assert (d > f) + assert (h > f) + assert (h == i) + assert (g == k) + assert (g > h) + + version = get_pg_version2(os_ops) + + with __class__.helper__get_node(os_ops) as node: + assert (isinstance(version, six.string_types)) + assert (isinstance(node.version, PgVer)) + assert (node.version == PgVer(version)) + + def test_double_init(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with __class__.helper__get_node(os_ops).init() as node: + # can't initialize node more than once + with pytest.raises(expected_exception=InitNodeException): + node.init() + + def test_init_after_cleanup(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with __class__.helper__get_node(os_ops) as node: + node.init().start().execute('select 1') + node.cleanup() + node.init().start().execute('select 1') + + def test_init_unique_system_id(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + # this function exists in PostgreSQL 9.6+ + current_version = get_pg_version2(os_ops) + + __class__.helper__skip_test_if_util_not_exist(os_ops, "pg_resetwal") + __class__.helper__skip_test_if_pg_version_is_not_ge(current_version, '9.6') + + query = 'select system_identifier from pg_control_system()' + + with scoped_config(cache_initdb=False): + with __class__.helper__get_node(os_ops).init().start() as node0: + id0 = node0.execute(query)[0] + + with scoped_config(cache_initdb=True, + cached_initdb_unique=True) as config: + assert (config.cache_initdb) + assert (config.cached_initdb_unique) + + # spawn two nodes; ids must be different + with __class__.helper__get_node(os_ops).init().start() as node1, \ + __class__.helper__get_node(os_ops).init().start() as node2: + id1 = node1.execute(query)[0] + id2 = node2.execute(query)[0] + + # ids must increase + assert (id1 > id0) + assert (id2 > id1) + + def test_node_exit(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with pytest.raises(expected_exception=QueryException): + with __class__.helper__get_node(os_ops).init() as node: + base_dir = node.base_dir + node.safe_psql('select 1') + + # we should save the DB for "debugging" + assert (os_ops.path_exists(base_dir)) + os_ops.rmdirs(base_dir, ignore_errors=True) + + with __class__.helper__get_node(os_ops).init() as node: + base_dir = node.base_dir + + # should have been removed by default + assert not (os_ops.path_exists(base_dir)) + + def test_double_start(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with __class__.helper__get_node(os_ops).init().start() as node: + # can't start node more than once + node.start() + assert (node.is_started) + + def test_uninitialized_start(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with __class__.helper__get_node(os_ops) as node: + # node is not initialized yet + with pytest.raises(expected_exception=StartNodeException): + node.start() + + def test_restart(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with __class__.helper__get_node(os_ops) as node: + node.init().start() + + # restart, ok + res = node.execute('select 1') + assert (res == [(1,)]) + node.restart() + res = node.execute('select 2') + assert (res == [(2,)]) + + # restart, fail + with pytest.raises(expected_exception=StartNodeException): + node.append_conf('pg_hba.conf', 'DUMMY') + node.restart() + + def test_reload(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with __class__.helper__get_node(os_ops) as node: + node.init().start() + + # change client_min_messages and save old value + cmm_old = node.execute('show client_min_messages') + node.append_conf(client_min_messages='DEBUG1') + + # reload config + node.reload() + + # check new value + cmm_new = node.execute('show client_min_messages') + assert ('debug1' == cmm_new[0][0].lower()) + assert (cmm_old != cmm_new) + + def test_pg_ctl(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with __class__.helper__get_node(os_ops) as node: + node.init().start() + + status = node.pg_ctl(['status']) + assert ('PID' in status) + + def test_status(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + assert (NodeStatus.Running) + assert not (NodeStatus.Stopped) + assert not (NodeStatus.Uninitialized) + + # check statuses after each operation + with __class__.helper__get_node(os_ops) as node: + assert (node.pid == 0) + assert (node.status() == NodeStatus.Uninitialized) + + node.init() + + assert (node.pid == 0) + assert (node.status() == NodeStatus.Stopped) + + node.start() + + assert (node.pid != 0) + assert (node.status() == NodeStatus.Running) + + node.stop() + + assert (node.pid == 0) + assert (node.status() == NodeStatus.Stopped) + + node.cleanup() + + assert (node.pid == 0) + assert (node.status() == NodeStatus.Uninitialized) + + def test_child_pids(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + master_processes = [ + ProcessType.AutovacuumLauncher, + ProcessType.BackgroundWriter, + ProcessType.Checkpointer, + ProcessType.StatsCollector, + ProcessType.WalSender, + ProcessType.WalWriter, + ] + + postgresVersion = get_pg_version2(os_ops) + + if __class__.helper__pg_version_ge(postgresVersion, '10'): + master_processes.append(ProcessType.LogicalReplicationLauncher) + + if __class__.helper__pg_version_ge(postgresVersion, '14'): + master_processes.remove(ProcessType.StatsCollector) + + repl_processes = [ + ProcessType.Startup, + ProcessType.WalReceiver, + ] + + def LOCAL__test_auxiliary_pids( + node: PostgresNode, + expectedTypes: list[ProcessType] + ) -> list[ProcessType]: + # returns list of the absence processes + assert node is not None + assert type(node) == PostgresNode # noqa: E721 + assert expectedTypes is not None + assert type(expectedTypes) == list # noqa: E721 + + pids = node.auxiliary_pids + assert pids is not None # noqa: E721 + assert type(pids) == dict # noqa: E721 + + result = list[ProcessType]() + for ptype in expectedTypes: + if not (ptype in pids): + result.append(ptype) + return result + + def LOCAL__check_auxiliary_pids__multiple_attempts( + node: PostgresNode, + expectedTypes: list[ProcessType]): + assert node is not None + assert type(node) == PostgresNode # noqa: E721 + assert expectedTypes is not None + assert type(expectedTypes) == list # noqa: E721 + + nAttempt = 0 + + while nAttempt < 5: + nAttempt += 1 + + logging.info("Test pids of [{0}] node. Attempt #{1}.".format( + node.name, + nAttempt + )) + + if nAttempt > 1: + time.sleep(1) + + absenceList = LOCAL__test_auxiliary_pids(node, expectedTypes) + assert absenceList is not None + assert type(absenceList) == list # noqa: E721 + if len(absenceList) == 0: + logging.info("Bingo!") + return + + logging.info("These processes are not found: {0}.".format(absenceList)) + continue + + raise Exception("Node {0} does not have the following processes: {1}.".format( + node.name, + absenceList + )) + + with __class__.helper__get_node(os_ops).init().start() as master: + + # master node doesn't have a source walsender! + with pytest.raises(expected_exception=TestgresException): + master.source_walsender + + with master.connect() as con: + assert (con.pid > 0) + + with master.replicate().start() as replica: + + # test __str__ method + str(master.child_processes[0]) + + LOCAL__check_auxiliary_pids__multiple_attempts( + master, + master_processes) + + LOCAL__check_auxiliary_pids__multiple_attempts( + replica, + repl_processes) + + master_pids = master.auxiliary_pids + + # there should be exactly 1 source walsender for replica + assert (len(master_pids[ProcessType.WalSender]) == 1) + pid1 = master_pids[ProcessType.WalSender][0] + pid2 = replica.source_walsender.pid + assert (pid1 == pid2) + + replica.stop() + + # there should be no walsender after we've stopped replica + with pytest.raises(expected_exception=TestgresException): + replica.source_walsender + + def test_exceptions(self): + str(StartNodeException('msg', [('file', 'lines')])) + str(ExecUtilException('msg', 'cmd', 1, 'out')) + str(QueryException('msg', 'query')) + + def test_auto_name(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + with __class__.helper__get_node(os_ops).init(allow_streaming=True).start() as m: + with m.replicate().start() as r: + # check that nodes are running + assert (m.status()) + assert (r.status()) + + # check their names + assert (m.name != r.name) + assert ('testgres' in m.name) + assert ('testgres' in r.name) + + def test_file_tail(self): + s1 = "the quick brown fox jumped over that lazy dog\n" + s2 = "abc\n" + s3 = "def\n" + + with tempfile.NamedTemporaryFile(mode='r+', delete=True) as f: + sz = 0 + while sz < 3 * 8192: + sz += len(s1) + f.write(s1) + f.write(s2) + f.write(s3) + + f.seek(0) + lines = file_tail(f, 3) + assert (lines[0] == s1) + assert (lines[1] == s2) + assert (lines[2] == s3) + + f.seek(0) + lines = file_tail(f, 1) + assert (lines[0] == s3) + + def test_isolation_levels(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops).init().start() as node: + with node.connect() as con: + # string levels + con.begin('Read Uncommitted').commit() + con.begin('Read Committed').commit() + con.begin('Repeatable Read').commit() + con.begin('Serializable').commit() + + # enum levels + con.begin(IsolationLevel.ReadUncommitted).commit() + con.begin(IsolationLevel.ReadCommitted).commit() + con.begin(IsolationLevel.RepeatableRead).commit() + con.begin(IsolationLevel.Serializable).commit() + + # check wrong level + with pytest.raises(expected_exception=QueryException): + con.begin('Garbage').commit() + + def test_users(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops).init().start() as node: + node.psql('create role test_user login') + value = node.safe_psql('select 1', username='test_user') + value = __class__.helper__rm_carriage_returns(value) + assert (value == b'1\n') + + def test_poll_query_until(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as node: + node.init().start() + + get_time = 'select extract(epoch from now())' + check_time = 'select extract(epoch from now()) - {} >= 5' + + start_time = node.execute(get_time)[0][0] + node.poll_query_until(query=check_time.format(start_time)) + end_time = node.execute(get_time)[0][0] + + assert (end_time - start_time >= 5) + + # check 0 columns + with pytest.raises(expected_exception=QueryException): + node.poll_query_until( + query='select from pg_catalog.pg_class limit 1') + + # check None, fail + with pytest.raises(expected_exception=QueryException): + node.poll_query_until(query='create table abc (val int)') + + # check None, ok + node.poll_query_until(query='create table def()', + expected=None) # returns nothing + + # check 0 rows equivalent to expected=None + node.poll_query_until( + query='select * from pg_catalog.pg_class where true = false', + expected=None) + + # check arbitrary expected value, fail + with pytest.raises(expected_exception=TimeoutException): + node.poll_query_until(query='select 3', + expected=1, + max_attempts=3, + sleep_time=0.01) + + # check arbitrary expected value, ok + node.poll_query_until(query='select 2', expected=2) + + # check timeout + with pytest.raises(expected_exception=TimeoutException): + node.poll_query_until(query='select 1 > 2', + max_attempts=3, + sleep_time=0.01) + + # check ProgrammingError, fail + with pytest.raises(expected_exception=ProgrammingError): + node.poll_query_until(query='dummy1') + + # check ProgrammingError, ok + with pytest.raises(expected_exception=(TimeoutException)): + node.poll_query_until(query='dummy2', + max_attempts=3, + sleep_time=0.01, + suppress={ProgrammingError}) + + # check 1 arg, ok + node.poll_query_until('select true') + + def test_logging(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + C_MAX_ATTEMPTS = 50 + # This name is used for testgres logging, too. + C_NODE_NAME = "testgres_tests." + __class__.__name__ + "test_logging-master-" + uuid.uuid4().hex + + logging.info("Node name is [{0}]".format(C_NODE_NAME)) + + with tempfile.NamedTemporaryFile('w', delete=True) as logfile: + formatter = logging.Formatter(fmt="%(node)-5s: %(message)s") + handler = logging.FileHandler(filename=logfile.name) + handler.formatter = formatter + logger = logging.getLogger(C_NODE_NAME) + assert logger is not None + assert len(logger.handlers) == 0 + + try: + # It disables to log on the root level + logger.propagate = False + logger.addHandler(handler) + + with scoped_config(use_python_logging=True): + with __class__.helper__get_node(os_ops, name=C_NODE_NAME) as master: + logging.info("Master node is initilizing") + master.init() + + logging.info("Master node is starting") + master.start() + + logging.info("Dummy query is executed a few times") + for _ in range(20): + master.execute('select 1') + time.sleep(0.01) + + # let logging worker do the job + time.sleep(0.1) + + logging.info("Master node log file is checking") + nAttempt = 0 + + while True: + assert nAttempt <= C_MAX_ATTEMPTS + if nAttempt == C_MAX_ATTEMPTS: + raise Exception("Test failed!") + + # let logging worker do the job + time.sleep(0.1) + + nAttempt += 1 + + logging.info("Attempt {0}".format(nAttempt)) + + # check that master's port is found + with open(logfile.name, 'r') as log: + lines = log.readlines() + + assert lines is not None + assert type(lines) == list # noqa: E721 + + def LOCAL__test_lines(): + for s in lines: + if any(C_NODE_NAME in s for s in lines): + logging.info("OK. We found the node_name in a line \"{0}\"".format(s)) + return True + return False + + if LOCAL__test_lines(): + break + + logging.info("Master node log file does not have an expected information.") + continue + + # test logger after stop/start/restart + logging.info("Master node is stopping...") + master.stop() + logging.info("Master node is staring again...") + master.start() + logging.info("Master node is restaring...") + master.restart() + assert (master._logger.is_alive()) + finally: + # It is a hack code to logging cleanup + logging._acquireLock() + assert logging.Logger.manager is not None + assert C_NODE_NAME in logging.Logger.manager.loggerDict.keys() + logging.Logger.manager.loggerDict.pop(C_NODE_NAME, None) + assert not (C_NODE_NAME in logging.Logger.manager.loggerDict.keys()) + assert not (handler in logging._handlers.values()) + logging._releaseLock() + # GO HOME! + return + + def test_psql(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops).init().start() as node: + + # check returned values (1 arg) + res = node.psql('select 1') + assert (__class__.helper__rm_carriage_returns(res) == (0, b'1\n', b'')) + + # check returned values (2 args) + res = node.psql('postgres', 'select 2') + assert (__class__.helper__rm_carriage_returns(res) == (0, b'2\n', b'')) + + # check returned values (named) + res = node.psql(query='select 3', dbname='postgres') + assert (__class__.helper__rm_carriage_returns(res) == (0, b'3\n', b'')) + + # check returned values (1 arg) + res = node.safe_psql('select 4') + assert (__class__.helper__rm_carriage_returns(res) == b'4\n') + + # check returned values (2 args) + res = node.safe_psql('postgres', 'select 5') + assert (__class__.helper__rm_carriage_returns(res) == b'5\n') + + # check returned values (named) + res = node.safe_psql(query='select 6', dbname='postgres') + assert (__class__.helper__rm_carriage_returns(res) == b'6\n') + + # check feeding input + node.safe_psql('create table horns (w int)') + node.safe_psql('copy horns from stdin (format csv)', + input=b"1\n2\n3\n\\.\n") + _sum = node.safe_psql('select sum(w) from horns') + assert (__class__.helper__rm_carriage_returns(_sum) == b'6\n') + + # check psql's default args, fails + with pytest.raises(expected_exception=QueryException): + node.psql() + + node.stop() + + # check psql on stopped node, fails + with pytest.raises(expected_exception=QueryException): + node.safe_psql('select 1') + + def test_safe_psql__expect_error(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops).init().start() as node: + err = node.safe_psql('select_or_not_select 1', expect_error=True) + assert (type(err) == str) # noqa: E721 + assert ('select_or_not_select' in err) + assert ('ERROR: syntax error at or near "select_or_not_select"' in err) + + # --------- + with pytest.raises( + expected_exception=InvalidOperationException, + match="^" + re.escape("Exception was expected, but query finished successfully: `select 1;`.") + "$" + ): + node.safe_psql("select 1;", expect_error=True) + + # --------- + res = node.safe_psql("select 1;", expect_error=False) + assert (__class__.helper__rm_carriage_returns(res) == b'1\n') + + def test_transactions(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops).init().start() as node: + + with node.connect() as con: + con.begin() + con.execute('create table test(val int)') + con.execute('insert into test values (1)') + con.commit() + + con.begin() + con.execute('insert into test values (2)') + res = con.execute('select * from test order by val asc') + assert (res == [(1, ), (2, )]) + con.rollback() + + con.begin() + res = con.execute('select * from test') + assert (res == [(1, )]) + con.rollback() + + con.begin() + con.execute('drop table test') + con.commit() + + def test_control_data(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as node: + + # node is not initialized yet + with pytest.raises(expected_exception=ExecUtilException): + node.get_control_data() + + node.init() + data = node.get_control_data() + + # check returned dict + assert data is not None + assert (any('pg_control' in s for s in data.keys())) + + def test_backup_simple(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as master: + + # enable streaming for backups + master.init(allow_streaming=True) + + # node must be running + with pytest.raises(expected_exception=BackupException): + master.backup() + + # it's time to start node + master.start() + + # fill node with some data + master.psql('create table test as select generate_series(1, 4) i') + + with master.backup(xlog_method='stream') as backup: + with backup.spawn_primary().start() as slave: + res = slave.execute('select * from test order by i asc') + assert (res == [(1, ), (2, ), (3, ), (4, )]) + + def test_backup_multiple(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as node: + node.init(allow_streaming=True).start() + + with node.backup(xlog_method='fetch') as backup1, \ + node.backup(xlog_method='fetch') as backup2: + assert (backup1.base_dir != backup2.base_dir) + + with node.backup(xlog_method='fetch') as backup: + with backup.spawn_primary('node1', destroy=False) as node1, \ + backup.spawn_primary('node2', destroy=False) as node2: + assert (node1.base_dir != node2.base_dir) + + def test_backup_exhaust(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as node: + node.init(allow_streaming=True).start() + + with node.backup(xlog_method='fetch') as backup: + # exhaust backup by creating new node + with backup.spawn_primary(): + pass + + # now let's try to create one more node + with pytest.raises(expected_exception=BackupException): + backup.spawn_primary() + + def test_backup_wrong_xlog_method(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as node: + node.init(allow_streaming=True).start() + + with pytest.raises( + expected_exception=BackupException, + match="^" + re.escape('Invalid xlog_method "wrong"') + "$" + ): + node.backup(xlog_method='wrong') + + def test_pg_ctl_wait_option(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + C_MAX_ATTEMPTS = 50 + + node = __class__.helper__get_node(os_ops) + assert node.status() == NodeStatus.Uninitialized + node.init() + assert node.status() == NodeStatus.Stopped + node.start(wait=False) + nAttempt = 0 + while True: + if nAttempt == C_MAX_ATTEMPTS: + # + # [2025-03-11] + # We have an unexpected problem with this test in CI + # Let's get an additional information about this test failure. + # + logging.error("Node was not stopped.") + if not node.os_ops.path_exists(node.pg_log_file): + logging.warning("Node log does not exist.") + else: + logging.info("Let's read node log file [{0}]".format(node.pg_log_file)) + logFileData = node.os_ops.read(node.pg_log_file, binary=False) + logging.info("Node log file content:\n{0}".format(logFileData)) + + raise Exception("Could not stop node.") + + nAttempt += 1 + + if nAttempt > 1: + logging.info("Wait 1 second.") + time.sleep(1) + logging.info("") + + logging.info("Try to stop node. Attempt #{0}.".format(nAttempt)) + + try: + node.stop(wait=False) + break + except ExecUtilException as e: + # it's ok to get this exception here since node + # could be not started yet + logging.info("Node is not stopped. Exception ({0}): {1}".format(type(e).__name__, e)) + continue + + logging.info("OK. Stop command was executed. Let's wait while our node will stop really.") + nAttempt = 0 + while True: + if nAttempt == C_MAX_ATTEMPTS: + raise Exception("Could not stop node.") + + nAttempt += 1 + if nAttempt > 1: + logging.info("Wait 1 second.") + time.sleep(1) + logging.info("") + + logging.info("Attempt #{0}.".format(nAttempt)) + s1 = node.status() + + if s1 == NodeStatus.Running: + continue + + if s1 == NodeStatus.Stopped: + break + + raise Exception("Unexpected node status: {0}.".format(s1)) + + logging.info("OK. Node is stopped.") + node.cleanup() + + def test_replicate(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as node: + node.init(allow_streaming=True).start() + + with node.replicate().start() as replica: + res = replica.execute('select 1') + assert (res == [(1, )]) + + node.execute('create table test (val int)', commit=True) + + replica.catchup() + + res = node.execute('select * from test') + assert (res == []) + + def test_synchronous_replication(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + current_version = get_pg_version2(os_ops) + + __class__.helper__skip_test_if_pg_version_is_not_ge(current_version, "9.6") + + with __class__.helper__get_node(os_ops) as master: + old_version = not __class__.helper__pg_version_ge(current_version, '9.6') + + master.init(allow_streaming=True).start() + + if not old_version: + master.append_conf('synchronous_commit = remote_apply') + + # create standby + with master.replicate() as standby1, master.replicate() as standby2: + standby1.start() + standby2.start() + + # check formatting + assert ( + '1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(First(1, (standby1, standby2))) + ) # yapf: disable + assert ( + 'ANY 1 ("{}", "{}")'.format(standby1.name, standby2.name) == str(Any(1, (standby1, standby2))) + ) # yapf: disable + + # set synchronous_standby_names + master.set_synchronous_standbys(First(2, [standby1, standby2])) + master.restart() + + # the following part of the test is only applicable to newer + # versions of PostgresQL + if not old_version: + master.safe_psql('create table abc(a int)') + + # Create a large transaction that will take some time to apply + # on standby to check that it applies synchronously + # (If set synchronous_commit to 'on' or other lower level then + # standby most likely won't catchup so fast and test will fail) + master.safe_psql( + 'insert into abc select generate_series(1, 1000000)') + res = standby1.safe_psql('select count(*) from abc') + assert (__class__.helper__rm_carriage_returns(res) == b'1000000\n') + + def test_logical_replication(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + current_version = get_pg_version2(os_ops) + + __class__.helper__skip_test_if_pg_version_is_not_ge(current_version, "10") + + with __class__.helper__get_node(os_ops) as node1, __class__.helper__get_node(os_ops) as node2: + node1.init(allow_logical=True) + node1.start() + node2.init().start() + + create_table = 'create table test (a int, b int)' + node1.safe_psql(create_table) + node2.safe_psql(create_table) + + # create publication / create subscription + pub = node1.publish('mypub') + sub = node2.subscribe(pub, 'mysub') + + node1.safe_psql('insert into test values (1, 1), (2, 2)') + + # wait until changes apply on subscriber and check them + sub.catchup() + res = node2.execute('select * from test') + assert (res == [(1, 1), (2, 2)]) + + # disable and put some new data + sub.disable() + node1.safe_psql('insert into test values (3, 3)') + + # enable and ensure that data successfully transferred + sub.enable() + sub.catchup() + res = node2.execute('select * from test') + assert (res == [(1, 1), (2, 2), (3, 3)]) + + # Add new tables. Since we added "all tables" to publication + # (default behaviour of publish() method) we don't need + # to explicitly perform pub.add_tables() + create_table = 'create table test2 (c char)' + node1.safe_psql(create_table) + node2.safe_psql(create_table) + sub.refresh() + + # put new data + node1.safe_psql('insert into test2 values (\'a\'), (\'b\')') + sub.catchup() + res = node2.execute('select * from test2') + assert (res == [('a', ), ('b', )]) + + # drop subscription + sub.drop() + pub.drop() + + # create new publication and subscription for specific table + # (omitting copying data as it's already done) + pub = node1.publish('newpub', tables=['test']) + sub = node2.subscribe(pub, 'newsub', copy_data=False) + + node1.safe_psql('insert into test values (4, 4)') + sub.catchup() + res = node2.execute('select * from test') + assert (res == [(1, 1), (2, 2), (3, 3), (4, 4)]) + + # explicitly add table + with pytest.raises(expected_exception=ValueError): + pub.add_tables([]) # fail + pub.add_tables(['test2']) + node1.safe_psql('insert into test2 values (\'c\')') + sub.catchup() + res = node2.execute('select * from test2') + assert (res == [('a', ), ('b', )]) + + def test_logical_catchup(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + """ Runs catchup for 100 times to be sure that it is consistent """ + + current_version = get_pg_version2(os_ops) + + __class__.helper__skip_test_if_pg_version_is_not_ge(current_version, "10") + + with __class__.helper__get_node(os_ops) as node1, __class__.helper__get_node(os_ops) as node2: + node1.init(allow_logical=True) + node1.start() + node2.init().start() + + create_table = 'create table test (key int primary key, val int); ' + node1.safe_psql(create_table) + node1.safe_psql('alter table test replica identity default') + node2.safe_psql(create_table) + + # create publication / create subscription + sub = node2.subscribe(node1.publish('mypub'), 'mysub') + + for i in range(0, 100): + node1.execute('insert into test values ({0}, {0})'.format(i)) + sub.catchup() + res = node2.execute('select * from test') + assert (res == [(i, i, )]) + node1.execute('delete from test') + + def test_logical_replication_fail(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + current_version = get_pg_version2(os_ops) + + __class__.helper__skip_test_if_pg_version_is_ge(current_version, "10") + + with __class__.helper__get_node(os_ops) as node: + with pytest.raises(expected_exception=InitNodeException): + node.init(allow_logical=True) + + def test_replication_slots(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as node: + node.init(allow_streaming=True).start() + + with node.replicate(slot='slot1').start() as replica: + replica.execute('select 1') + + # cannot create new slot with the same name + with pytest.raises(expected_exception=TestgresException): + node.replicate(slot='slot1') + + def test_incorrect_catchup(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as node: + node.init(allow_streaming=True).start() + + # node has no master, can't catch up + with pytest.raises(expected_exception=TestgresException): + node.catchup() + + def test_promotion(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + with __class__.helper__get_node(os_ops) as master: + master.init().start() + master.safe_psql('create table abc(id serial)') + + with master.replicate().start() as replica: + master.stop() + replica.promote() + + # make standby becomes writable master + replica.safe_psql('insert into abc values (1)') + res = replica.safe_psql('select * from abc') + assert (__class__.helper__rm_carriage_returns(res) == b'1\n') + + def test_dump(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + query_create = 'create table test as select generate_series(1, 2) as val' + query_select = 'select * from test order by val asc' + + with __class__.helper__get_node(os_ops).init().start() as node1: + + node1.execute(query_create) + for format in ['plain', 'custom', 'directory', 'tar']: + with removing(os_ops, node1.dump(format=format)) as dump: + with __class__.helper__get_node(os_ops).init().start() as node3: + if format == 'directory': + assert (os.path.isdir(dump)) + else: + assert (os.path.isfile(dump)) + # restore dump + node3.restore(filename=dump) + res = node3.execute(query_select) + assert (res == [(1, ), (2, )]) + + @staticmethod + def helper__get_node(os_ops: OsOperations, name=None): + assert isinstance(os_ops, OsOperations) + return PostgresNode(name, conn_params=None, os_ops=os_ops) + + @staticmethod + def helper__skip_test_if_pg_version_is_not_ge(ver1: str, ver2: str): + assert type(ver1) == str # noqa: E721 + assert type(ver2) == str # noqa: E721 + if not __class__.helper__pg_version_ge(ver1, ver2): + pytest.skip('requires {0}+'.format(ver2)) + + @staticmethod + def helper__skip_test_if_pg_version_is_ge(ver1: str, ver2: str): + assert type(ver1) == str # noqa: E721 + assert type(ver2) == str # noqa: E721 + if __class__.helper__pg_version_ge(ver1, ver2): + pytest.skip('requires <{0}'.format(ver2)) + + @staticmethod + def helper__pg_version_ge(ver1: str, ver2: str) -> bool: + assert type(ver1) == str # noqa: E721 + assert type(ver2) == str # noqa: E721 + v1 = PgVer(ver1) + v2 = PgVer(ver2) + return v1 >= v2 + + @staticmethod + def helper__rm_carriage_returns(out): + """ + In Windows we have additional '\r' symbols in output. + Let's get rid of them. + """ + if isinstance(out, (int, float, complex)): + return out + + if isinstance(out, tuple): + return tuple(__class__.helper__rm_carriage_returns(item) for item in out) + + if isinstance(out, bytes): + return out.replace(b'\r', b'') + + assert type(out) == str # noqa: E721 + return out.replace('\r', '') + + @staticmethod + def helper__skip_test_if_util_not_exist(os_ops: OsOperations, name: str): + assert isinstance(os_ops, OsOperations) + assert type(name) == str # noqa: E721 + if not __class__.helper__util_exists(os_ops, name): + pytest.skip('might be missing') + + @staticmethod + def helper__util_exists(os_ops: OsOperations, util): + assert isinstance(os_ops, OsOperations) + + def good_properties(f): + return (os_ops.path_exists(f) and # noqa: W504 + os_ops.isfile(f) and # noqa: W504 + os_ops.is_executable(f)) # yapf: disable + + # try to resolve it + if good_properties(get_bin_path2(os_ops, util)): + return True + + # check if util is in PATH + for path in os_ops.environ("PATH").split(os.pathsep): + if good_properties(os.path.join(path, util)): + return True From 0b2c629318ea98333b6824fcade369efa349df34 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 17 Mar 2025 17:07:41 +0300 Subject: [PATCH 40/90] os_ops_descrs.py is added --- tests/helpers/os_ops_descrs.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 tests/helpers/os_ops_descrs.py diff --git a/tests/helpers/os_ops_descrs.py b/tests/helpers/os_ops_descrs.py new file mode 100644 index 00000000..02297adb --- /dev/null +++ b/tests/helpers/os_ops_descrs.py @@ -0,0 +1,32 @@ +from ...testgres.operations.os_ops import OsOperations +from ...testgres.operations.os_ops import ConnectionParams +from ...testgres.operations.local_ops import LocalOperations +from ...testgres.operations.remote_ops import RemoteOperations + +import os + + +class OsOpsDescr: + os_ops: OsOperations + sign: str + + def __init__(self, os_ops: OsOperations, sign: str): + assert isinstance(os_ops, OsOperations) + assert type(sign) == str # noqa: E721 + self.os_ops = os_ops + self.sign = sign + + +class OsOpsDescrs: + sm_remote_conn_params = ConnectionParams( + host=os.getenv('RDBMS_TESTPOOL1_HOST') or '127.0.0.1', + username=os.getenv('USER'), + ssh_key=os.getenv('RDBMS_TESTPOOL_SSHKEY')) + + sm_remote_os_ops = RemoteOperations(sm_remote_conn_params) + + sm_remote_os_ops_descr = OsOpsDescr(sm_remote_os_ops, "remote_ops") + + sm_local_os_ops = LocalOperations() + + sm_local_os_ops_descr = OsOpsDescr(sm_local_os_ops, "local_ops") From b597bf893633dd5e6701c925f8fd70b1f7fda128 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 18 Mar 2025 19:25:11 +0300 Subject: [PATCH 41/90] Warnings with pytest are fixed (#223) 1) [pytest.ini] testpaths has another format. It is a spaces separated list. pytest warning: PytestConfigWarning: No files were found in testpaths; consider removing or adjusting your testpaths configuration. Searching recursively from the current directory instead. 2) pytest tries to find the test function in TestgresException class. Let's rename it to avoid this problem. pytest warning: PytestCollectionWarning: cannot collect test class 'TestgresException' because it has a __init__ constructor (from: tests/test_simple.py) class TestgresException(Exception): Of course, we can add __test__=False in TestgresException but it is not a good solution. --- pytest.ini | 2 +- tests/test_testgres_common.py | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/pytest.ini b/pytest.ini index c94eabc2..9f5fa375 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] -testpaths = ["./tests", "./testgres/plugins/pg_probackup2/pg_probackup2/tests"] +testpaths = tests testgres/plugins/pg_probackup2/pg_probackup2/tests addopts = --strict-markers markers = #log_file = logs/pytest.log diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index 49740b61..f42964c8 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -10,7 +10,11 @@ from ..testgres import ProcessType from ..testgres import NodeStatus from ..testgres import IsolationLevel -from ..testgres import TestgresException + +# New name prevents to collect test-functions in TestgresException and fixes +# the problem with pytest warning. +from ..testgres import TestgresException as testgres_TestgresException + from ..testgres import InitNodeException from ..testgres import StartNodeException from ..testgres import QueryException @@ -336,7 +340,7 @@ def LOCAL__check_auxiliary_pids__multiple_attempts( with __class__.helper__get_node(os_ops).init().start() as master: # master node doesn't have a source walsender! - with pytest.raises(expected_exception=TestgresException): + with pytest.raises(expected_exception=testgres_TestgresException): master.source_walsender with master.connect() as con: @@ -366,7 +370,7 @@ def LOCAL__check_auxiliary_pids__multiple_attempts( replica.stop() # there should be no walsender after we've stopped replica - with pytest.raises(expected_exception=TestgresException): + with pytest.raises(expected_exception=testgres_TestgresException): replica.source_walsender def test_exceptions(self): @@ -1013,7 +1017,7 @@ def test_replication_slots(self, os_ops: OsOperations): replica.execute('select 1') # cannot create new slot with the same name - with pytest.raises(expected_exception=TestgresException): + with pytest.raises(expected_exception=testgres_TestgresException): node.replicate(slot='slot1') def test_incorrect_catchup(self, os_ops: OsOperations): @@ -1022,7 +1026,7 @@ def test_incorrect_catchup(self, os_ops: OsOperations): node.init(allow_streaming=True).start() # node has no master, can't catch up - with pytest.raises(expected_exception=TestgresException): + with pytest.raises(expected_exception=testgres_TestgresException): node.catchup() def test_promotion(self, os_ops: OsOperations): From 87dbecb3a1c190099b43fc1510c92a6baa35fea3 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Tue, 18 Mar 2025 19:27:44 +0300 Subject: [PATCH 42/90] OsOperations::remove_file is added It seems to me we forgot to add it. --- testgres/operations/os_ops.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testgres/operations/os_ops.py b/testgres/operations/os_ops.py index 00880863..f20a7a30 100644 --- a/testgres/operations/os_ops.py +++ b/testgres/operations/os_ops.py @@ -108,6 +108,10 @@ def isdir(self, dirname): def get_file_size(self, filename): raise NotImplementedError() + def remove_file(self, filename): + assert type(filename) == str # noqa: E721 + raise NotImplementedError() + # Processes control def kill(self, pid, signal): # Kill the process From 25c6a2fe3d0557f0a60d719c74e2f044cf22f6ac Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 18 Mar 2025 21:34:25 +0300 Subject: [PATCH 43/90] Log files (#224) Let's start writing test events into files to provide an access to the tests execution information. --- .gitignore | 1 + tests/conftest.py | 510 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 511 insertions(+) create mode 100644 tests/conftest.py diff --git a/.gitignore b/.gitignore index 038d1952..238181b5 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ dist/ build/ docs/build/ +logs/ env/ venv/ diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..e37c3c77 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,510 @@ +# ///////////////////////////////////////////////////////////////////////////// +# PyTest Configuration + +import _pytest.outcomes + +import pluggy +import pytest +import _pytest +import os +import logging +import pathlib +import math +import datetime + +# ///////////////////////////////////////////////////////////////////////////// + +C_ROOT_DIR__RELATIVE = ".." + +# ///////////////////////////////////////////////////////////////////////////// +# TestConfigPropNames + + +class TestConfigPropNames: + TEST_CFG__LOG_DIR = "TEST_CFG__LOG_DIR" + + +# ///////////////////////////////////////////////////////////////////////////// +# TestStartupData__Helper + + +class TestStartupData__Helper: + sm_StartTS = datetime.datetime.now() + + # -------------------------------------------------------------------- + def GetStartTS() -> datetime.datetime: + assert type(__class__.sm_StartTS) == datetime.datetime # noqa: E721 + return __class__.sm_StartTS + + # -------------------------------------------------------------------- + def CalcRootDir() -> str: + r = os.path.abspath(__file__) + r = os.path.dirname(r) + r = os.path.join(r, C_ROOT_DIR__RELATIVE) + r = os.path.abspath(r) + return r + + # -------------------------------------------------------------------- + def CalcCurrentTestWorkerSignature() -> str: + currentPID = os.getpid() + assert type(currentPID) + + startTS = __class__.sm_StartTS + assert type(startTS) + + result = "pytest-{0:04d}{1:02d}{2:02d}_{3:02d}{4:02d}{5:02d}".format( + startTS.year, + startTS.month, + startTS.day, + startTS.hour, + startTS.minute, + startTS.second, + ) + + gwid = os.environ.get("PYTEST_XDIST_WORKER") + + if gwid is not None: + result += "--xdist_" + str(gwid) + + result += "--" + "pid" + str(currentPID) + return result + + +# ///////////////////////////////////////////////////////////////////////////// +# TestStartupData + + +class TestStartupData: + sm_RootDir: str = TestStartupData__Helper.CalcRootDir() + sm_CurrentTestWorkerSignature: str = ( + TestStartupData__Helper.CalcCurrentTestWorkerSignature() + ) + + # -------------------------------------------------------------------- + def GetRootDir() -> str: + assert type(__class__.sm_RootDir) == str # noqa: E721 + return __class__.sm_RootDir + + # -------------------------------------------------------------------- + def GetCurrentTestWorkerSignature() -> str: + assert type(__class__.sm_CurrentTestWorkerSignature) == str # noqa: E721 + return __class__.sm_CurrentTestWorkerSignature + + +# /////////////////////////////////////////////////////////////////////////////# ///////////////////////////////////////////////////////////////////////////// +# Fixtures + + +# ///////////////////////////////////////////////////////////////////////////// +# TEST_PROCESS_STATS + + +class TEST_PROCESS_STATS: + cTotalTests: int = 0 + cNotExecutedTests: int = 0 + cExecutedTests: int = 0 + cPassedTests: int = 0 + cFailedTests: int = 0 + cXFailedTests: int = 0 + cSkippedTests: int = 0 + cNotXFailedTests: int = 0 + cUnexpectedTests: int = 0 + + FailedTests = list[str]() + XFailedTests = list[str]() + NotXFailedTests = list[str]() + + # -------------------------------------------------------------------- + def incrementTotalTestCount() -> None: + __class__.cTotalTests += 1 + + # -------------------------------------------------------------------- + def incrementNotExecutedTestCount() -> None: + __class__.cNotExecutedTests += 1 + + # -------------------------------------------------------------------- + def incrementExecutedTestCount() -> int: + __class__.cExecutedTests += 1 + return __class__.cExecutedTests + + # -------------------------------------------------------------------- + def incrementPassedTestCount() -> None: + __class__.cPassedTests += 1 + + # -------------------------------------------------------------------- + def incrementFailedTestCount(testID: str) -> None: + assert type(testID) == str # noqa: E721 + assert type(__class__.FailedTests) == list # noqa: E721 + + __class__.FailedTests.append(testID) # raise? + __class__.cFailedTests += 1 + + # -------------------------------------------------------------------- + def incrementXFailedTestCount(testID: str) -> None: + assert type(testID) == str # noqa: E721 + assert type(__class__.XFailedTests) == list # noqa: E721 + + __class__.XFailedTests.append(testID) # raise? + __class__.cXFailedTests += 1 + + # -------------------------------------------------------------------- + def incrementSkippedTestCount() -> None: + __class__.cSkippedTests += 1 + + # -------------------------------------------------------------------- + def incrementNotXFailedTests(testID: str) -> None: + assert type(testID) == str # noqa: E721 + assert type(__class__.NotXFailedTests) == list # noqa: E721 + + __class__.NotXFailedTests.append(testID) # raise? + __class__.cNotXFailedTests += 1 + + # -------------------------------------------------------------------- + def incrementUnexpectedTests() -> None: + __class__.cUnexpectedTests += 1 + + +# ///////////////////////////////////////////////////////////////////////////// + + +def timedelta_to_human_text(delta: datetime.timedelta) -> str: + assert isinstance(delta, datetime.timedelta) + + C_SECONDS_IN_MINUTE = 60 + C_SECONDS_IN_HOUR = 60 * C_SECONDS_IN_MINUTE + + v = delta.seconds + + cHours = int(v / C_SECONDS_IN_HOUR) + v = v - cHours * C_SECONDS_IN_HOUR + cMinutes = int(v / C_SECONDS_IN_MINUTE) + cSeconds = v - cMinutes * C_SECONDS_IN_MINUTE + + result = "" if delta.days == 0 else "{0} day(s) ".format(delta.days) + + result = result + "{:02d}:{:02d}:{:02d}.{:06d}".format( + cHours, cMinutes, cSeconds, delta.microseconds + ) + + return result + + +# ///////////////////////////////////////////////////////////////////////////// + + +def helper__makereport__setup( + item: pytest.Function, call: pytest.CallInfo, outcome: pluggy.Result +): + assert item is not None + assert call is not None + assert outcome is not None + assert type(item) == pytest.Function # noqa: E721 + assert type(call) == pytest.CallInfo # noqa: E721 + assert type(outcome) == pluggy.Result # noqa: E721 + + # logging.info("pytest_runtest_makereport - setup") + + TEST_PROCESS_STATS.incrementTotalTestCount() + + rep: pytest.TestReport = outcome.get_result() + assert rep is not None + assert type(rep) == pytest.TestReport # noqa: E721 + + if rep.outcome == "skipped": + TEST_PROCESS_STATS.incrementNotExecutedTestCount() + return + + assert rep.outcome == "passed" + + testNumber = TEST_PROCESS_STATS.incrementExecutedTestCount() + + testID = "" + + if item.cls is not None: + testID = item.cls.__module__ + "." + item.cls.__name__ + "::" + + testID = testID + item.name + + if testNumber > 1: + logging.info("") + + logging.info("******************************************************") + logging.info("* START TEST {0}".format(testID)) + logging.info("*") + logging.info("* Path : {0}".format(item.path)) + logging.info("* Number: {0}".format(testNumber)) + logging.info("*") + + +# ------------------------------------------------------------------------ +def helper__makereport__call( + item: pytest.Function, call: pytest.CallInfo, outcome: pluggy.Result +): + assert item is not None + assert call is not None + assert outcome is not None + assert type(item) == pytest.Function # noqa: E721 + assert type(call) == pytest.CallInfo # noqa: E721 + assert type(outcome) == pluggy.Result # noqa: E721 + + # logging.info("pytest_runtest_makereport - call") + + rep = outcome.get_result() + assert rep is not None + assert type(rep) == pytest.TestReport # noqa: E721 + + # -------- + testID = "" + + if item.cls is not None: + testID = item.cls.__module__ + "." + item.cls.__name__ + "::" + + testID = testID + item.name + + # -------- + assert call.start <= call.stop + + startDT = datetime.datetime.fromtimestamp(call.start) + assert type(startDT) == datetime.datetime # noqa: E721 + stopDT = datetime.datetime.fromtimestamp(call.stop) + assert type(stopDT) == datetime.datetime # noqa: E721 + + testDurration = stopDT - startDT + assert type(testDurration) == datetime.timedelta # noqa: E721 + + # -------- + exitStatus = None + if rep.outcome == "skipped": + assert call.excinfo is not None # research + assert call.excinfo.value is not None # research + + if type(call.excinfo.value) == _pytest.outcomes.Skipped: # noqa: E721 + assert not hasattr(rep, "wasxfail") + + TEST_PROCESS_STATS.incrementSkippedTestCount() + + exitStatus = "SKIPPED" + reasonText = str(call.excinfo.value) + reasonMsg = "SKIP REASON: {0}" + + elif type(call.excinfo.value) == _pytest.outcomes.XFailed: # noqa: E721 + TEST_PROCESS_STATS.incrementXFailedTestCount(testID) + + exitStatus = "XFAILED" + reasonText = str(call.excinfo.value) + reasonMsg = "XFAIL REASON: {0}" + else: + exitStatus = "XFAILED" + assert hasattr(rep, "wasxfail") + assert rep.wasxfail is not None + assert type(rep.wasxfail) == str # noqa: E721 + + TEST_PROCESS_STATS.incrementXFailedTestCount(testID) + + reasonText = rep.wasxfail + reasonMsg = "XFAIL REASON: {0}" + + logging.error(call.excinfo.value) + + if reasonText != "": + logging.info("*") + logging.info("* " + reasonMsg.format(reasonText)) + + elif rep.outcome == "failed": + assert call.excinfo is not None + assert call.excinfo.value is not None + + TEST_PROCESS_STATS.incrementFailedTestCount(testID) + + logging.error(call.excinfo.value) + exitStatus = "FAILED" + elif rep.outcome == "passed": + assert call.excinfo is None + + if hasattr(rep, "wasxfail"): + assert type(rep.wasxfail) == str # noqa: E721 + + TEST_PROCESS_STATS.incrementNotXFailedTests(testID) + + warnMsg = "Test is marked as xfail" + + if rep.wasxfail != "": + warnMsg += " [" + rep.wasxfail + "]" + + logging.warning(warnMsg) + exitStatus = "NOT XFAILED" + else: + assert not hasattr(rep, "wasxfail") + + TEST_PROCESS_STATS.incrementPassedTestCount() + exitStatus = "PASSED" + else: + TEST_PROCESS_STATS.incrementUnexpectedTests() + exitStatus = "UNEXPECTED [{0}]".format(rep.outcome) + assert False + + # -------- + logging.info("*") + logging.info("* DURATION : {0}".format(timedelta_to_human_text(testDurration))) + logging.info("*") + logging.info("* EXIT STATUS : {0}".format(exitStatus)) + logging.info("*") + logging.info("* STOP TEST {0}".format(testID)) + + +# ///////////////////////////////////////////////////////////////////////////// + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): + assert item is not None + assert call is not None + assert type(item) == pytest.Function # noqa: E721 + assert type(call) == pytest.CallInfo # noqa: E721 + + # logging.info("[pytest_runtest_makereport][#001][{0}][{1}]".format(item.name, call.when)) + + outcome: pluggy.Result = yield + assert outcome is not None + assert type(outcome) == pluggy.Result # noqa: E721 + + # logging.info("[pytest_runtest_makereport][#002][{0}][{1}]".format(item.name, call.when)) + + rep: pytest.TestReport = outcome.get_result() + assert rep is not None + assert type(rep) == pytest.TestReport # noqa: E721 + + if call.when == "collect": + return + + if call.when == "setup": + helper__makereport__setup(item, call, outcome) + return + + if call.when == "call": + helper__makereport__call(item, call, outcome) + return + + if call.when == "teardown": + return + + assert False + + +# ///////////////////////////////////////////////////////////////////////////// + + +def helper__calc_W(n: int) -> int: + assert n > 0 + + x = int(math.log10(n)) + assert type(x) == int # noqa: E721 + assert x >= 0 + x += 1 + return x + + +# ------------------------------------------------------------------------ +def helper__print_test_list(tests: list[str]) -> None: + assert type(tests) == list # noqa: E721 + + assert helper__calc_W(9) == 1 + assert helper__calc_W(10) == 2 + assert helper__calc_W(11) == 2 + assert helper__calc_W(99) == 2 + assert helper__calc_W(100) == 3 + assert helper__calc_W(101) == 3 + assert helper__calc_W(999) == 3 + assert helper__calc_W(1000) == 4 + assert helper__calc_W(1001) == 4 + + W = helper__calc_W(len(tests)) + + templateLine = "{0:0" + str(W) + "d}. {1}" + + nTest = 0 + + while nTest < len(tests): + testID = tests[nTest] + assert type(testID) == str # noqa: E721 + nTest += 1 + logging.info(templateLine.format(nTest, testID)) + + +# ///////////////////////////////////////////////////////////////////////////// + + +@pytest.fixture(autouse=True, scope="session") +def run_after_tests(request: pytest.FixtureRequest): + assert isinstance(request, pytest.FixtureRequest) + + yield + + logging.info("--------------------------- [FAILED TESTS]") + logging.info("") + + assert len(TEST_PROCESS_STATS.FailedTests) == TEST_PROCESS_STATS.cFailedTests + + if len(TEST_PROCESS_STATS.FailedTests) > 0: + helper__print_test_list(TEST_PROCESS_STATS.FailedTests) + logging.info("") + + logging.info("--------------------------- [XFAILED TESTS]") + logging.info("") + + assert len(TEST_PROCESS_STATS.XFailedTests) == TEST_PROCESS_STATS.cXFailedTests + + if len(TEST_PROCESS_STATS.XFailedTests) > 0: + helper__print_test_list(TEST_PROCESS_STATS.XFailedTests) + logging.info("") + + logging.info("--------------------------- [NOT XFAILED TESTS]") + logging.info("") + + assert ( + len(TEST_PROCESS_STATS.NotXFailedTests) == TEST_PROCESS_STATS.cNotXFailedTests + ) + + if len(TEST_PROCESS_STATS.NotXFailedTests) > 0: + helper__print_test_list(TEST_PROCESS_STATS.NotXFailedTests) + logging.info("") + + logging.info("--------------------------- [SUMMARY STATISTICS]") + logging.info("") + logging.info("[TESTS]") + logging.info(" TOTAL : {0}".format(TEST_PROCESS_STATS.cTotalTests)) + logging.info(" EXECUTED : {0}".format(TEST_PROCESS_STATS.cExecutedTests)) + logging.info(" NOT EXECUTED: {0}".format(TEST_PROCESS_STATS.cNotExecutedTests)) + logging.info("") + logging.info(" PASSED : {0}".format(TEST_PROCESS_STATS.cPassedTests)) + logging.info(" FAILED : {0}".format(TEST_PROCESS_STATS.cFailedTests)) + logging.info(" XFAILED : {0}".format(TEST_PROCESS_STATS.cXFailedTests)) + logging.info(" NOT XFAILED : {0}".format(TEST_PROCESS_STATS.cNotXFailedTests)) + logging.info(" SKIPPED : {0}".format(TEST_PROCESS_STATS.cSkippedTests)) + logging.info(" UNEXPECTED : {0}".format(TEST_PROCESS_STATS.cUnexpectedTests)) + logging.info("") + + +# ///////////////////////////////////////////////////////////////////////////// + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: pytest.Config) -> None: + assert isinstance(config, pytest.Config) + + log_name = TestStartupData.GetCurrentTestWorkerSignature() + log_name += ".log" + + if TestConfigPropNames.TEST_CFG__LOG_DIR in os.environ: + log_path_v = os.environ[TestConfigPropNames.TEST_CFG__LOG_DIR] + log_path = pathlib.Path(log_path_v) + else: + log_path = config.rootpath.joinpath("logs") + + log_path.mkdir(exist_ok=True) + + logging_plugin = config.pluginmanager.get_plugin("logging-plugin") + logging_plugin.set_log_path(str(log_path / log_name)) + + +# ///////////////////////////////////////////////////////////////////////////// From 81a5eb43b7398cd3c436ffe32b21e20c4c9712b4 Mon Sep 17 00:00:00 2001 From: dura0ok Date: Wed, 19 Mar 2025 23:08:05 +0700 Subject: [PATCH 44/90] add FUSE support to plugin pg_probackup2 (#184) --- .../pg_probackup2/pg_probackup2/app.py | 43 ++++++++++++++++--- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/testgres/plugins/pg_probackup2/pg_probackup2/app.py b/testgres/plugins/pg_probackup2/pg_probackup2/app.py index 57492814..d47cf51f 100644 --- a/testgres/plugins/pg_probackup2/pg_probackup2/app.py +++ b/testgres/plugins/pg_probackup2/pg_probackup2/app.py @@ -45,6 +45,7 @@ class ProbackupApp: def __init__(self, test_class: unittest.TestCase, pg_node, pb_log_path, test_env, auto_compress_alg, backup_dir, probackup_path=None): + self.process = None self.test_class = test_class self.pg_node = pg_node self.pb_log_path = pb_log_path @@ -60,8 +61,35 @@ def __init__(self, test_class: unittest.TestCase, self.test_class.output = None self.execution_time = None + def form_daemon_process(self, cmdline, env): + def stream_output(stream: subprocess.PIPE) -> None: + try: + for line in iter(stream.readline, ''): + print(line) + self.test_class.output += line + finally: + stream.close() + + self.process = subprocess.Popen( + cmdline, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + env=env + ) + logging.info(f"Process started in background with PID: {self.process.pid}") + + if self.process.stdout and self.process.stderr: + stdout_thread = threading.Thread(target=stream_output, args=(self.process.stdout,), daemon=True) + stderr_thread = threading.Thread(target=stream_output, args=(self.process.stderr,), daemon=True) + + stdout_thread.start() + stderr_thread.start() + + return self.process.pid + def run(self, command, gdb=False, old_binary=False, return_id=True, env=None, - skip_log_directory=False, expect_error=False, use_backup_dir=True): + skip_log_directory=False, expect_error=False, use_backup_dir=True, daemonize=False): """ Run pg_probackup backup_dir: target directory for making backup @@ -118,11 +146,14 @@ def run(self, command, gdb=False, old_binary=False, return_id=True, env=None, logging.warning("pg_probackup gdb suspended, waiting gdb connection on localhost:{0}".format(gdb_port)) start_time = time.time() - self.test_class.output = subprocess.check_output( - cmdline, - stderr=subprocess.STDOUT, - env=env - ).decode('utf-8', errors='replace') + if daemonize: + return self.form_daemon_process(cmdline, env) + else: + self.test_class.output = subprocess.check_output( + cmdline, + stderr=subprocess.STDOUT, + env=env + ).decode('utf-8', errors='replace') end_time = time.time() self.execution_time = end_time - start_time From ddfaff401f0f55b5a51bb0490d12dda1631acbe3 Mon Sep 17 00:00:00 2001 From: asavchkov Date: Thu, 20 Mar 2025 10:13:51 +0700 Subject: [PATCH 45/90] pg-probackup2 version 0.0.6 --- testgres/plugins/pg_probackup2/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testgres/plugins/pg_probackup2/setup.py b/testgres/plugins/pg_probackup2/setup.py index 619b8d39..8bcfe7b4 100644 --- a/testgres/plugins/pg_probackup2/setup.py +++ b/testgres/plugins/pg_probackup2/setup.py @@ -4,7 +4,7 @@ from distutils.core import setup setup( - version='0.0.5', + version='0.0.6', name='testgres_pg_probackup2', packages=['pg_probackup2', 'pg_probackup2.storage'], description='Plugin for testgres that manages pg_probackup2', From f0bf7a8994b5dd39e5c2f841381d5e4be5e22297 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 21 Mar 2025 14:15:24 +0300 Subject: [PATCH 46/90] CI files are updated (#225) * [CI] work with 'time' is corrected AltLinux 10 does not support the sequential "time coverage run ...". Because this OS does not has a builtin command 'time' in bash. https://forum.altlinux.org/index.php?topic=48342.0 We will install 'time' manually and use another command " time coverage run ..." that works without problems but it requires to install 'time' on Ubuntu 2024.04, too. AlpineLinux processes a new command line without any problems. * [CI] An initization of python virtualenv is simplified Let's avoid creating useless environment variables. --- Dockerfile--ubuntu_24_04.tmpl | 1 + run_tests.sh | 27 +++++++++------------------ 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/Dockerfile--ubuntu_24_04.tmpl b/Dockerfile--ubuntu_24_04.tmpl index c1ddeab6..3bdc6640 100644 --- a/Dockerfile--ubuntu_24_04.tmpl +++ b/Dockerfile--ubuntu_24_04.tmpl @@ -9,6 +9,7 @@ RUN apt update RUN apt install -y sudo curl ca-certificates RUN apt update RUN apt install -y openssh-server +RUN apt install -y time RUN apt update RUN apt install -y postgresql-common diff --git a/run_tests.sh b/run_tests.sh index 0fecde60..a40a97cf 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -8,24 +8,17 @@ if [ -z ${TEST_FILTER+x} ]; \ then export TEST_FILTER="TestgresTests or (TestTestgresCommon and (not remote_ops))"; \ fi -# choose python version -echo python version is $PYTHON_VERSION -VIRTUALENV="virtualenv --python=/usr/bin/python$PYTHON_VERSION" -PIP="pip$PYTHON_VERSION" - # fail early echo check that pg_config is in PATH command -v pg_config -# prepare environment -VENV_PATH=/tmp/testgres_venv +# prepare python environment +VENV_PATH="/tmp/testgres_venv" rm -rf $VENV_PATH -$VIRTUALENV $VENV_PATH +virtualenv --python="/usr/bin/python${PYTHON_VERSION}" "${VENV_PATH}" export VIRTUAL_ENV_DISABLE_PROMPT=1 -source $VENV_PATH/bin/activate - -# install utilities -$PIP install coverage flake8 psutil Sphinx pytest pytest-xdist psycopg2 six psutil +source "${VENV_PATH}/bin/activate" +pip install coverage flake8 psutil Sphinx pytest pytest-xdist psycopg2 six psutil # install testgres' dependencies export PYTHONPATH=$(pwd) @@ -45,15 +38,13 @@ time coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" # run tests (PG_BIN) -time \ - PG_BIN=$(pg_config --bindir) \ - coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" +PG_BIN=$(pg_config --bindir) \ +time coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" # run tests (PG_CONFIG) -time \ - PG_CONFIG=$(pg_config --bindir)/pg_config \ - coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" +PG_CONFIG=$(pg_config --bindir)/pg_config \ +time coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" # show coverage From 76fa94cfa6d2645abbb59a027074bd97b9f389ee Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 24 Mar 2025 13:04:41 +0300 Subject: [PATCH 47/90] [CI] Run tests on AltLinux 10 and 11 (#219) This patch adds an automated tests of testgres on AltLinux 10 and 11. We will execute only "local" tests because AltLinux has an unexpected problem with SSH connection - it is created too slowly. --- .travis.yml | 2 + Dockerfile--altlinux_10.tmpl | 118 +++++++++++++++++++++++++++++++++++ Dockerfile--altlinux_11.tmpl | 118 +++++++++++++++++++++++++++++++++++ 3 files changed, 238 insertions(+) create mode 100644 Dockerfile--altlinux_10.tmpl create mode 100644 Dockerfile--altlinux_11.tmpl diff --git a/.travis.yml b/.travis.yml index 3a889845..7557a2ce 100644 --- a/.travis.yml +++ b/.travis.yml @@ -28,3 +28,5 @@ env: - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=10 - TEST_PLATFORM=std-all PYTHON_VERSION=3 PG_VERSION=17 - TEST_PLATFORM=ubuntu_24_04 PYTHON_VERSION=3 PG_VERSION=17 + - TEST_PLATFORM=altlinux_10 PYTHON_VERSION=3 PG_VERSION=17 + - TEST_PLATFORM=altlinux_11 PYTHON_VERSION=3 PG_VERSION=17 diff --git a/Dockerfile--altlinux_10.tmpl b/Dockerfile--altlinux_10.tmpl new file mode 100644 index 00000000..e60e9320 --- /dev/null +++ b/Dockerfile--altlinux_10.tmpl @@ -0,0 +1,118 @@ +ARG PG_VERSION +ARG PYTHON_VERSION + +# --------------------------------------------- base1 +FROM alt:p10 as base1 +ARG PG_VERSION + +RUN apt-get update +RUN apt-get install -y sudo curl ca-certificates +RUN apt-get update +RUN apt-get install -y openssh-server openssh-clients +RUN apt-get install -y time + +# RUN apt-get install -y mc + +RUN apt-get install -y libsqlite3-devel + +EXPOSE 22 + +RUN ssh-keygen -A + +# --------------------------------------------- postgres +FROM base1 as base1_with_dev_tools + +RUN apt-get update + +RUN apt-get install -y git +RUN apt-get install -y gcc +RUN apt-get install -y make + +RUN apt-get install -y meson +RUN apt-get install -y flex +RUN apt-get install -y bison + +RUN apt-get install -y pkg-config +RUN apt-get install -y libssl-devel +RUN apt-get install -y libicu-devel +RUN apt-get install -y libzstd-devel +RUN apt-get install -y zlib-devel +RUN apt-get install -y liblz4-devel +RUN apt-get install -y libzstd-devel +RUN apt-get install -y libxml2-devel + +# --------------------------------------------- postgres +FROM base1_with_dev_tools as base1_with_pg-17 + +RUN git clone https://github.com/postgres/postgres.git -b REL_17_STABLE /pg/postgres/source + +WORKDIR /pg/postgres/source + +RUN ./configure --prefix=/pg/postgres/install --with-zlib --with-openssl --without-readline --with-lz4 --with-zstd --with-libxml +RUN make -j 4 install +RUN make -j 4 -C contrib install + +# SETUP PG_CONFIG +# When pg_config symlink in /usr/local/bin it returns a real (right) result of --bindir +RUN ln -s /pg/postgres/install/bin/pg_config -t /usr/local/bin + +# SETUP PG CLIENT LIBRARY +# libpq.so.5 is enough +RUN ln -s /pg/postgres/install/lib/libpq.so.5.17 /usr/lib64/libpq.so.5 + +# --------------------------------------------- base2_with_python-3 +FROM base1_with_pg-${PG_VERSION} as base2_with_python-3 +RUN apt-get install -y python3 +RUN apt-get install -y python3-dev +RUN apt-get install -y python3-module-virtualenv +RUN apt-get install -y python3-modules-sqlite3 + +# AltLinux does not have "generic" virtualenv utility. Let's create it. +RUN if [[ -f "/usr/bin/virtualenv" ]] ; then \ + echo AAA; \ + elif [[ -f "/usr/bin/virtualenv3" ]] ; then \ + ln -s /usr/bin/virtualenv3 /usr/bin/virtualenv; \ + else \ + echo "/usr/bin/virtualenv is not created!"; \ + exit 1; \ + fi + +ENV PYTHON_VERSION=3 + +# --------------------------------------------- final +FROM base2_with_python-${PYTHON_VERSION} as final + +RUN adduser test -G wheel + +# It enables execution of "sudo service ssh start" without password +RUN sh -c "echo \"WHEEL_USERS ALL=(ALL:ALL) NOPASSWD: ALL\"" >> /etc/sudoers + +ADD . /pg/testgres +WORKDIR /pg/testgres +RUN chown -R test /pg/testgres + +ENV LANG=C.UTF-8 + +USER test + +RUN chmod 700 ~/ +RUN mkdir -p ~/.ssh + +# +# Altlinux 10 and 11 too slowly create a new SSH connection (x6). +# +# So, we exclude the "remote" tests until this problem has been resolved. +# + +ENTRYPOINT sh -c " \ +set -eux; \ +echo HELLO FROM ENTRYPOINT; \ +echo HOME DIR IS [`realpath ~/`]; \ +sudo /usr/sbin/sshd; \ +ssh-keyscan -H localhost >> ~/.ssh/known_hosts; \ +ssh-keyscan -H 127.0.0.1 >> ~/.ssh/known_hosts; \ +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ +chmod 600 ~/.ssh/authorized_keys; \ +ls -la ~/.ssh/; \ +TEST_FILTER=\"TestgresTests or (TestTestgresCommon and (not remote_ops))\" bash ./run_tests.sh;" diff --git a/Dockerfile--altlinux_11.tmpl b/Dockerfile--altlinux_11.tmpl new file mode 100644 index 00000000..4b591632 --- /dev/null +++ b/Dockerfile--altlinux_11.tmpl @@ -0,0 +1,118 @@ +ARG PG_VERSION +ARG PYTHON_VERSION + +# --------------------------------------------- base1 +FROM alt:p11 as base1 +ARG PG_VERSION + +RUN apt-get update +RUN apt-get install -y sudo curl ca-certificates +RUN apt-get update +RUN apt-get install -y openssh-server openssh-clients +RUN apt-get install -y time + +# RUN apt-get install -y mc + +RUN apt-get install -y libsqlite3-devel + +EXPOSE 22 + +RUN ssh-keygen -A + +# --------------------------------------------- postgres +FROM base1 as base1_with_dev_tools + +RUN apt-get update + +RUN apt-get install -y git +RUN apt-get install -y gcc +RUN apt-get install -y make + +RUN apt-get install -y meson +RUN apt-get install -y flex +RUN apt-get install -y bison + +RUN apt-get install -y pkg-config +RUN apt-get install -y libssl-devel +RUN apt-get install -y libicu-devel +RUN apt-get install -y libzstd-devel +RUN apt-get install -y zlib-devel +RUN apt-get install -y liblz4-devel +RUN apt-get install -y libzstd-devel +RUN apt-get install -y libxml2-devel + +# --------------------------------------------- postgres +FROM base1_with_dev_tools as base1_with_pg-17 + +RUN git clone https://github.com/postgres/postgres.git -b REL_17_STABLE /pg/postgres/source + +WORKDIR /pg/postgres/source + +RUN ./configure --prefix=/pg/postgres/install --with-zlib --with-openssl --without-readline --with-lz4 --with-zstd --with-libxml +RUN make -j 4 install +RUN make -j 4 -C contrib install + +# SETUP PG_CONFIG +# When pg_config symlink in /usr/local/bin it returns a real (right) result of --bindir +RUN ln -s /pg/postgres/install/bin/pg_config -t /usr/local/bin + +# SETUP PG CLIENT LIBRARY +# libpq.so.5 is enough +RUN ln -s /pg/postgres/install/lib/libpq.so.5.17 /usr/lib64/libpq.so.5 + +# --------------------------------------------- base2_with_python-3 +FROM base1_with_pg-${PG_VERSION} as base2_with_python-3 +RUN apt-get install -y python3 +RUN apt-get install -y python3-dev +RUN apt-get install -y python3-module-virtualenv +RUN apt-get install -y python3-modules-sqlite3 + +# AltLinux does not have "generic" virtualenv utility. Let's create it. +RUN if [[ -f "/usr/bin/virtualenv" ]] ; then \ + echo AAA; \ + elif [[ -f "/usr/bin/virtualenv3" ]] ; then \ + ln -s /usr/bin/virtualenv3 /usr/bin/virtualenv; \ + else \ + echo "/usr/bin/virtualenv is not created!"; \ + exit 1; \ + fi + +ENV PYTHON_VERSION=3 + +# --------------------------------------------- final +FROM base2_with_python-${PYTHON_VERSION} as final + +RUN adduser test -G wheel + +# It enables execution of "sudo service ssh start" without password +RUN sh -c "echo \"WHEEL_USERS ALL=(ALL:ALL) NOPASSWD: ALL\"" >> /etc/sudoers + +ADD . /pg/testgres +WORKDIR /pg/testgres +RUN chown -R test /pg/testgres + +ENV LANG=C.UTF-8 + +USER test + +RUN chmod 700 ~/ +RUN mkdir -p ~/.ssh + +# +# Altlinux 10 and 11 too slowly create a new SSH connection (x6). +# +# So, we exclude the "remote" tests until this problem has been resolved. +# + +ENTRYPOINT sh -c " \ +set -eux; \ +echo HELLO FROM ENTRYPOINT; \ +echo HOME DIR IS [`realpath ~/`]; \ +sudo /usr/sbin/sshd; \ +ssh-keyscan -H localhost >> ~/.ssh/known_hosts; \ +ssh-keyscan -H 127.0.0.1 >> ~/.ssh/known_hosts; \ +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ +chmod 600 ~/.ssh/authorized_keys; \ +ls -la ~/.ssh/; \ +TEST_FILTER=\"TestgresTests or (TestTestgresCommon and (not remote_ops))\" bash ./run_tests.sh;" From bc18e5b9362d9634c468c616e40282cf7e30e5da Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 28 Mar 2025 15:55:58 +0300 Subject: [PATCH 48/90] conftest.py is updated [refactoring] (#226) New code can process a failure in fixtures and builds a list of these cases (achtung tests). --- tests/conftest.py | 146 +++++++++++++++++++++++++++++++--------------- 1 file changed, 98 insertions(+), 48 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index e37c3c77..0f65838e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,17 +1,18 @@ # ///////////////////////////////////////////////////////////////////////////// # PyTest Configuration -import _pytest.outcomes - import pluggy import pytest -import _pytest import os import logging import pathlib import math import datetime +import _pytest.outcomes +import _pytest.unittest +import _pytest.logging + # ///////////////////////////////////////////////////////////////////////////// C_ROOT_DIR__RELATIVE = ".." @@ -91,10 +92,6 @@ def GetCurrentTestWorkerSignature() -> str: return __class__.sm_CurrentTestWorkerSignature -# /////////////////////////////////////////////////////////////////////////////# ///////////////////////////////////////////////////////////////////////////// -# Fixtures - - # ///////////////////////////////////////////////////////////////////////////// # TEST_PROCESS_STATS @@ -109,10 +106,12 @@ class TEST_PROCESS_STATS: cSkippedTests: int = 0 cNotXFailedTests: int = 0 cUnexpectedTests: int = 0 + cAchtungTests: int = 0 FailedTests = list[str]() XFailedTests = list[str]() NotXFailedTests = list[str]() + AchtungTests = list[str]() # -------------------------------------------------------------------- def incrementTotalTestCount() -> None: @@ -163,6 +162,14 @@ def incrementNotXFailedTests(testID: str) -> None: def incrementUnexpectedTests() -> None: __class__.cUnexpectedTests += 1 + # -------------------------------------------------------------------- + def incrementAchtungTestCount(testID: str) -> None: + assert type(testID) == str # noqa: E721 + assert type(__class__.AchtungTests) == list # noqa: E721 + + __class__.AchtungTests.append(testID) # raise? + __class__.cAchtungTests += 1 + # ///////////////////////////////////////////////////////////////////////////// @@ -198,10 +205,13 @@ def helper__makereport__setup( assert item is not None assert call is not None assert outcome is not None - assert type(item) == pytest.Function # noqa: E721 + # it may be pytest.Function or _pytest.unittest.TestCaseFunction + assert isinstance(item, pytest.Function) assert type(call) == pytest.CallInfo # noqa: E721 assert type(outcome) == pluggy.Result # noqa: E721 + C_LINE1 = "******************************************************" + # logging.info("pytest_runtest_makereport - setup") TEST_PROCESS_STATS.incrementTotalTestCount() @@ -214,10 +224,6 @@ def helper__makereport__setup( TEST_PROCESS_STATS.incrementNotExecutedTestCount() return - assert rep.outcome == "passed" - - testNumber = TEST_PROCESS_STATS.incrementExecutedTestCount() - testID = "" if item.cls is not None: @@ -225,15 +231,35 @@ def helper__makereport__setup( testID = testID + item.name - if testNumber > 1: - logging.info("") + if rep.outcome == "passed": + testNumber = TEST_PROCESS_STATS.incrementExecutedTestCount() + + logging.info(C_LINE1) + logging.info("* START TEST {0}".format(testID)) + logging.info("*") + logging.info("* Path : {0}".format(item.path)) + logging.info("* Number: {0}".format(testNumber)) + logging.info("*") + return + + assert rep.outcome != "passed" + + TEST_PROCESS_STATS.incrementAchtungTestCount(testID) - logging.info("******************************************************") - logging.info("* START TEST {0}".format(testID)) + logging.info(C_LINE1) + logging.info("* ACHTUNG TEST {0}".format(testID)) logging.info("*") logging.info("* Path : {0}".format(item.path)) - logging.info("* Number: {0}".format(testNumber)) + logging.info("* Outcome is [{0}]".format(rep.outcome)) + + if rep.outcome == "failed": + assert call.excinfo is not None + assert call.excinfo.value is not None + logging.info("*") + logging.error(call.excinfo.value) + logging.info("*") + return # ------------------------------------------------------------------------ @@ -243,12 +269,11 @@ def helper__makereport__call( assert item is not None assert call is not None assert outcome is not None - assert type(item) == pytest.Function # noqa: E721 + # it may be pytest.Function or _pytest.unittest.TestCaseFunction + assert isinstance(item, pytest.Function) assert type(call) == pytest.CallInfo # noqa: E721 assert type(outcome) == pluggy.Result # noqa: E721 - # logging.info("pytest_runtest_makereport - call") - rep = outcome.get_result() assert rep is not None assert type(rep) == pytest.TestReport # noqa: E721 @@ -341,7 +366,8 @@ def helper__makereport__call( else: TEST_PROCESS_STATS.incrementUnexpectedTests() exitStatus = "UNEXPECTED [{0}]".format(rep.outcome) - assert False + # [2025-03-28] It may create a useless problem in new environment. + # assert False # -------- logging.info("*") @@ -350,6 +376,7 @@ def helper__makereport__call( logging.info("* EXIT STATUS : {0}".format(exitStatus)) logging.info("*") logging.info("* STOP TEST {0}".format(testID)) + logging.info("*") # ///////////////////////////////////////////////////////////////////////////// @@ -359,17 +386,14 @@ def helper__makereport__call( def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): assert item is not None assert call is not None - assert type(item) == pytest.Function # noqa: E721 + # it may be pytest.Function or _pytest.unittest.TestCaseFunction + assert isinstance(item, pytest.Function) assert type(call) == pytest.CallInfo # noqa: E721 - # logging.info("[pytest_runtest_makereport][#001][{0}][{1}]".format(item.name, call.when)) - outcome: pluggy.Result = yield assert outcome is not None assert type(outcome) == pluggy.Result # noqa: E721 - # logging.info("[pytest_runtest_makereport][#002][{0}][{1}]".format(item.name, call.when)) - rep: pytest.TestReport = outcome.get_result() assert rep is not None assert type(rep) == pytest.TestReport # noqa: E721 @@ -440,41 +464,61 @@ def run_after_tests(request: pytest.FixtureRequest): yield - logging.info("--------------------------- [FAILED TESTS]") - logging.info("") - - assert len(TEST_PROCESS_STATS.FailedTests) == TEST_PROCESS_STATS.cFailedTests - - if len(TEST_PROCESS_STATS.FailedTests) > 0: - helper__print_test_list(TEST_PROCESS_STATS.FailedTests) - logging.info("") + C_LINE1 = "---------------------------" - logging.info("--------------------------- [XFAILED TESTS]") - logging.info("") + def LOCAL__print_line1_with_header(header: str): + assert type(C_LINE1) == str # noqa: E721 + assert type(header) == str # noqa: E721 + assert header != "" + logging.info(C_LINE1 + " [" + header + "]") - assert len(TEST_PROCESS_STATS.XFailedTests) == TEST_PROCESS_STATS.cXFailedTests + def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): + assert type(header) == str # noqa: E721 + assert type(test_count) == int # noqa: E721 + assert type(test_list) == list # noqa: E721 + assert header != "" + assert test_count >= 0 + assert len(test_list) == test_count - if len(TEST_PROCESS_STATS.XFailedTests) > 0: - helper__print_test_list(TEST_PROCESS_STATS.XFailedTests) + LOCAL__print_line1_with_header(header) logging.info("") + if len(test_list) > 0: + helper__print_test_list(test_list) + logging.info("") + + # fmt: off + LOCAL__print_test_list( + "ACHTUNG TESTS", + TEST_PROCESS_STATS.cAchtungTests, + TEST_PROCESS_STATS.AchtungTests, + ) - logging.info("--------------------------- [NOT XFAILED TESTS]") - logging.info("") + LOCAL__print_test_list( + "FAILED TESTS", + TEST_PROCESS_STATS.cFailedTests, + TEST_PROCESS_STATS.FailedTests + ) - assert ( - len(TEST_PROCESS_STATS.NotXFailedTests) == TEST_PROCESS_STATS.cNotXFailedTests + LOCAL__print_test_list( + "XFAILED TESTS", + TEST_PROCESS_STATS.cXFailedTests, + TEST_PROCESS_STATS.XFailedTests, ) - if len(TEST_PROCESS_STATS.NotXFailedTests) > 0: - helper__print_test_list(TEST_PROCESS_STATS.NotXFailedTests) - logging.info("") + LOCAL__print_test_list( + "NOT XFAILED TESTS", + TEST_PROCESS_STATS.cNotXFailedTests, + TEST_PROCESS_STATS.NotXFailedTests, + ) + # fmt: on - logging.info("--------------------------- [SUMMARY STATISTICS]") + LOCAL__print_line1_with_header("SUMMARY STATISTICS") logging.info("") logging.info("[TESTS]") logging.info(" TOTAL : {0}".format(TEST_PROCESS_STATS.cTotalTests)) logging.info(" EXECUTED : {0}".format(TEST_PROCESS_STATS.cExecutedTests)) logging.info(" NOT EXECUTED: {0}".format(TEST_PROCESS_STATS.cNotExecutedTests)) + logging.info(" ACHTUNG : {0}".format(TEST_PROCESS_STATS.cAchtungTests)) logging.info("") logging.info(" PASSED : {0}".format(TEST_PROCESS_STATS.cPassedTests)) logging.info(" FAILED : {0}".format(TEST_PROCESS_STATS.cFailedTests)) @@ -503,7 +547,13 @@ def pytest_configure(config: pytest.Config) -> None: log_path.mkdir(exist_ok=True) - logging_plugin = config.pluginmanager.get_plugin("logging-plugin") + logging_plugin: _pytest.logging.LoggingPlugin = config.pluginmanager.get_plugin( + "logging-plugin" + ) + + assert logging_plugin is not None + assert isinstance(logging_plugin, _pytest.logging.LoggingPlugin) + logging_plugin.set_log_path(str(log_path / log_name)) From a5d6df452371206da124309ef7cddaf44d023299 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 28 Mar 2025 23:36:42 +0300 Subject: [PATCH 49/90] [BUG FIX] PostgresNode must use get_pg_version2 (#227) --- testgres/node.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/testgres/node.py b/testgres/node.py index 6d2417c4..2b5fc6d1 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -92,7 +92,7 @@ PgVer, \ eprint, \ get_bin_path2, \ - get_pg_version, \ + get_pg_version2, \ execute_utility2, \ options_string, \ clean_on_error @@ -148,16 +148,6 @@ def __init__(self, name=None, base_dir=None, port=None, conn_params: ConnectionP """ # private - self._pg_version = PgVer(get_pg_version(bin_dir)) - self._should_free_port = port is None - self._base_dir = base_dir - self._bin_dir = bin_dir - self._prefix = prefix - self._logger = None - self._master = None - - # basic - self.name = name or generate_app_name() if os_ops is None: os_ops = __class__._get_os_ops(conn_params) else: @@ -168,6 +158,17 @@ def __init__(self, name=None, base_dir=None, port=None, conn_params: ConnectionP assert isinstance(os_ops, OsOperations) self._os_ops = os_ops + self._pg_version = PgVer(get_pg_version2(os_ops, bin_dir)) + self._should_free_port = port is None + self._base_dir = base_dir + self._bin_dir = bin_dir + self._prefix = prefix + self._logger = None + self._master = None + + # basic + self.name = name or generate_app_name() + self.host = os_ops.host self.port = port or utils.reserve_port() From 56ae1a8ceb31a34cbf382d7884411389e293d83c Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Fri, 28 Mar 2025 23:38:48 +0300 Subject: [PATCH 50/90] get_pg_version2 is updated --- testgres/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testgres/utils.py b/testgres/utils.py index a988effe..62e95ff6 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -212,8 +212,8 @@ def get_pg_version2(os_ops: OsOperations, bin_dir=None): # Get raw version (e.g., postgres (PostgreSQL) 9.5.7) postgres_path = os.path.join(bin_dir, 'postgres') if bin_dir else get_bin_path2(os_ops, 'postgres') - _params = [postgres_path, '--version'] - raw_ver = os_ops.exec_command(_params, encoding='utf-8') + cmd = [postgres_path, '--version'] + raw_ver = os_ops.exec_command(cmd, encoding='utf-8') return parse_pg_version(raw_ver) From ee441caa91ebd96f0a1ac8244da36269a0d70d98 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Fri, 28 Mar 2025 23:39:40 +0300 Subject: [PATCH 51/90] get_pg_config2 is updated --- testgres/utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/testgres/utils.py b/testgres/utils.py index 62e95ff6..92383571 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -185,11 +185,15 @@ def cache_pg_config_data(cmd): return _pg_config_data # try specified pg_config path or PG_CONFIG + if pg_config_path: + return cache_pg_config_data(pg_config_path) + if isinstance(os_ops, RemoteOperations): - pg_config = pg_config_path or os.environ.get("PG_CONFIG_REMOTE") or os.environ.get("PG_CONFIG") + pg_config = os.environ.get("PG_CONFIG_REMOTE") or os.environ.get("PG_CONFIG") else: # try PG_CONFIG - get from local machine - pg_config = pg_config_path or os.environ.get("PG_CONFIG") + pg_config = os.environ.get("PG_CONFIG") + if pg_config: return cache_pg_config_data(pg_config) From 939ca6dfd29d6c6680ffd4f6441fbcae94da66f4 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Fri, 28 Mar 2025 23:40:33 +0300 Subject: [PATCH 52/90] TestTestgresCommon::test_get_pg_config2 is added --- tests/test_testgres_common.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index f42964c8..2440b8f0 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -5,6 +5,7 @@ from ..testgres.node import PgVer from ..testgres.node import PostgresNode from ..testgres.utils import get_pg_version2 +from ..testgres.utils import get_pg_config2 from ..testgres.utils import file_tail from ..testgres.utils import get_bin_path2 from ..testgres import ProcessType @@ -1064,6 +1065,31 @@ def test_dump(self, os_ops: OsOperations): res = node3.execute(query_select) assert (res == [(1, ), (2, )]) + def test_get_pg_config2(self, os_ops: OsOperations): + # check same instances + a = get_pg_config2(os_ops, None) + b = get_pg_config2(os_ops, None) + assert (id(a) == id(b)) + + # save right before config change + c1 = get_pg_config2(os_ops, None) + + # modify setting for this scope + with scoped_config(cache_pg_config=False) as config: + # sanity check for value + assert not (config.cache_pg_config) + + # save right after config change + c2 = get_pg_config2(os_ops, None) + + # check different instances after config change + assert (id(c1) != id(c2)) + + # check different instances + a = get_pg_config2(os_ops, None) + b = get_pg_config2(os_ops, None) + assert (id(a) != id(b)) + @staticmethod def helper__get_node(os_ops: OsOperations, name=None): assert isinstance(os_ops, OsOperations) From ca545891c7961b11b481f7befdc6e918289f454a Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 31 Mar 2025 13:45:52 +0300 Subject: [PATCH 53/90] pytest_runtest_makereport is updated (cleanup) (#228) Local 'rep' is not used. --- tests/conftest.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 0f65838e..e27eaeb3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -394,10 +394,6 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): assert outcome is not None assert type(outcome) == pluggy.Result # noqa: E721 - rep: pytest.TestReport = outcome.get_result() - assert rep is not None - assert type(rep) == pytest.TestReport # noqa: E721 - if call.when == "collect": return From 712de460eb9a49998b88f32f14c096d77705dfd4 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 31 Mar 2025 14:42:48 +0300 Subject: [PATCH 54/90] helper__build_test_id is added (conftest refactoring) (#229) --- tests/conftest.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index e27eaeb3..c6306454 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -199,6 +199,22 @@ def timedelta_to_human_text(delta: datetime.timedelta) -> str: # ///////////////////////////////////////////////////////////////////////////// +def helper__build_test_id(item: pytest.Function) -> str: + assert item is not None + assert isinstance(item, pytest.Function) + + testID = "" + + if item.cls is not None: + testID = item.cls.__module__ + "." + item.cls.__name__ + "::" + + testID = testID + item.name + + return testID + +# ///////////////////////////////////////////////////////////////////////////// + + def helper__makereport__setup( item: pytest.Function, call: pytest.CallInfo, outcome: pluggy.Result ): @@ -224,12 +240,7 @@ def helper__makereport__setup( TEST_PROCESS_STATS.incrementNotExecutedTestCount() return - testID = "" - - if item.cls is not None: - testID = item.cls.__module__ + "." + item.cls.__name__ + "::" - - testID = testID + item.name + testID = helper__build_test_id(item) if rep.outcome == "passed": testNumber = TEST_PROCESS_STATS.incrementExecutedTestCount() @@ -279,12 +290,7 @@ def helper__makereport__call( assert type(rep) == pytest.TestReport # noqa: E721 # -------- - testID = "" - - if item.cls is not None: - testID = item.cls.__module__ + "." + item.cls.__name__ + "::" - - testID = testID + item.name + testID = helper__build_test_id(item) # -------- assert call.start <= call.stop From cf19df91f33e44a7a130e04bca8b7d8cc1af155c Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 31 Mar 2025 14:50:58 +0300 Subject: [PATCH 55/90] pytest_runtest_makereport is updated (refactoring+documentation) --- tests/conftest.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index c6306454..ae528536 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -390,6 +390,12 @@ def helper__makereport__call( @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): + # + # https://docs.pytest.org/en/7.1.x/how-to/writing_hook_functions.html#hookwrapper-executing-around-other-hooks + # + # Note that hook wrappers don’t return results themselves, + # they merely perform tracing or other side effects around the actual hook implementations. + # assert item is not None assert call is not None # it may be pytest.Function or _pytest.unittest.TestCaseFunction @@ -400,6 +406,8 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): assert outcome is not None assert type(outcome) == pluggy.Result # noqa: E721 + assert type(call.when) == str + if call.when == "collect": return @@ -414,7 +422,9 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): if call.when == "teardown": return - assert False + errMsg = "[pytest_runtest_makereport] unknown 'call.when' value: [{0}].".format(call.when) + + raise RuntimeError(errMsg) # ///////////////////////////////////////////////////////////////////////////// From d9db881901a202e76651c0940efd91e372fd56c8 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 31 Mar 2025 17:31:38 +0300 Subject: [PATCH 56/90] [FIX] Formatting [flake8] --- tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index ae528536..196dbf39 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -406,7 +406,7 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): assert outcome is not None assert type(outcome) == pluggy.Result # noqa: E721 - assert type(call.when) == str + assert type(call.when) == str # noqa: E721 if call.when == "collect": return From 4502b86c3e0ffa4178411951074878cacbd42ca2 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 31 Mar 2025 17:34:59 +0300 Subject: [PATCH 57/90] helper__makereport__call is updated [revision] --- tests/conftest.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 196dbf39..ee03d1c3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -316,14 +316,14 @@ def helper__makereport__call( exitStatus = "SKIPPED" reasonText = str(call.excinfo.value) - reasonMsg = "SKIP REASON: {0}" + reasonMsgTempl = "SKIP REASON: {0}" elif type(call.excinfo.value) == _pytest.outcomes.XFailed: # noqa: E721 TEST_PROCESS_STATS.incrementXFailedTestCount(testID) exitStatus = "XFAILED" reasonText = str(call.excinfo.value) - reasonMsg = "XFAIL REASON: {0}" + reasonMsgTempl = "XFAIL REASON: {0}" else: exitStatus = "XFAILED" assert hasattr(rep, "wasxfail") @@ -333,13 +333,16 @@ def helper__makereport__call( TEST_PROCESS_STATS.incrementXFailedTestCount(testID) reasonText = rep.wasxfail - reasonMsg = "XFAIL REASON: {0}" + reasonMsgTempl = "XFAIL REASON: {0}" logging.error(call.excinfo.value) + assert type(reasonText) == str # noqa: E721 + if reasonText != "": + assert type(reasonMsgTempl) == str # noqa: E721 logging.info("*") - logging.info("* " + reasonMsg.format(reasonText)) + logging.info("* " + reasonMsgTempl.format(reasonText)) elif rep.outcome == "failed": assert call.excinfo is not None From 2090fbce9f6095da51364e03ab21763171ed650b Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Mon, 31 Mar 2025 23:02:12 +0300 Subject: [PATCH 58/90] conftest is updated [formatting+comments] --- tests/conftest.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index ee03d1c3..9e8ea368 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -399,6 +399,8 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): # Note that hook wrappers don’t return results themselves, # they merely perform tracing or other side effects around the actual hook implementations. # + # https://docs.pytest.org/en/7.1.x/reference/reference.html#test-running-runtest-hooks + # assert item is not None assert call is not None # it may be pytest.Function or _pytest.unittest.TestCaseFunction @@ -425,7 +427,9 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): if call.when == "teardown": return - errMsg = "[pytest_runtest_makereport] unknown 'call.when' value: [{0}].".format(call.when) + errMsg = "[pytest_runtest_makereport] unknown 'call.when' value: [{0}].".format( + call.when + ) raise RuntimeError(errMsg) From b91714142fb77d1af7450bfa58abf2cecf529c4d Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Tue, 1 Apr 2025 09:01:54 +0300 Subject: [PATCH 59/90] PostgresNode::start is updated [no logging.error] It does not uses logging.error when it can't reallocate port number. It throws exception only. Why? Our new test infrastructure will process logging.error and will increment an error counter. As result - some tests start failing. --- testgres/node.py | 3 +-- tests/test_simple.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/testgres/node.py b/testgres/node.py index 2b5fc6d1..c8ae4204 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -1038,8 +1038,7 @@ def LOCAL__raise_cannot_start_node__std(from_exception): assert nAttempt > 0 assert nAttempt <= __class__._C_MAX_START_ATEMPTS if nAttempt == __class__._C_MAX_START_ATEMPTS: - logging.error("Reached maximum retry attempts. Unable to start node.") - LOCAL__raise_cannot_start_node(e, "Cannot start node after multiple attempts") + LOCAL__raise_cannot_start_node(e, "Cannot start node after multiple attempts.") log_files1 = self._collect_log_files() if not self._detect_port_conflict(log_files0, log_files1): diff --git a/tests/test_simple.py b/tests/test_simple.py index f648e558..6ca52cb0 100644 --- a/tests/test_simple.py +++ b/tests/test_simple.py @@ -450,7 +450,7 @@ def test_port_conflict(self): with pytest.raises( expected_exception=StartNodeException, - match=re.escape("Cannot start node after multiple attempts") + match=re.escape("Cannot start node after multiple attempts.") ): node2.init().start() From a9137dfeabfd0ac14b177811fef5c73296803734 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 1 Apr 2025 16:15:30 +0300 Subject: [PATCH 60/90] [conftest] Advanced processing of logging (#230) * [conftest] Advanced processing of logging This patch does the following things: - it processes the calls of logging.error as test errors - it prints the number of errors/warnings for each test - it prints the total stats of errors/warnings/duration --- tests/conftest.py | 462 +++++++++++++++++++++++++++++++--- tests/test_conftest.py--devel | 80 ++++++ 2 files changed, 507 insertions(+), 35 deletions(-) create mode 100644 tests/test_conftest.py--devel diff --git a/tests/conftest.py b/tests/conftest.py index 9e8ea368..ff3b3cb4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,6 +8,7 @@ import pathlib import math import datetime +import typing import _pytest.outcomes import _pytest.unittest @@ -105,71 +106,169 @@ class TEST_PROCESS_STATS: cXFailedTests: int = 0 cSkippedTests: int = 0 cNotXFailedTests: int = 0 + cWarningTests: int = 0 cUnexpectedTests: int = 0 cAchtungTests: int = 0 - FailedTests = list[str]() - XFailedTests = list[str]() + FailedTests = list[str, int]() + XFailedTests = list[str, int]() NotXFailedTests = list[str]() + WarningTests = list[str, int]() AchtungTests = list[str]() + cTotalDuration: datetime.timedelta = datetime.timedelta() + + cTotalErrors: int = 0 + cTotalWarnings: int = 0 + # -------------------------------------------------------------------- def incrementTotalTestCount() -> None: + assert type(__class__.cTotalTests) == int # noqa: E721 + assert __class__.cTotalTests >= 0 + __class__.cTotalTests += 1 + assert __class__.cTotalTests > 0 + # -------------------------------------------------------------------- def incrementNotExecutedTestCount() -> None: + assert type(__class__.cNotExecutedTests) == int # noqa: E721 + assert __class__.cNotExecutedTests >= 0 + __class__.cNotExecutedTests += 1 + assert __class__.cNotExecutedTests > 0 + # -------------------------------------------------------------------- def incrementExecutedTestCount() -> int: + assert type(__class__.cExecutedTests) == int # noqa: E721 + assert __class__.cExecutedTests >= 0 + __class__.cExecutedTests += 1 + + assert __class__.cExecutedTests > 0 return __class__.cExecutedTests # -------------------------------------------------------------------- def incrementPassedTestCount() -> None: + assert type(__class__.cPassedTests) == int # noqa: E721 + assert __class__.cPassedTests >= 0 + __class__.cPassedTests += 1 + assert __class__.cPassedTests > 0 + # -------------------------------------------------------------------- - def incrementFailedTestCount(testID: str) -> None: + def incrementFailedTestCount(testID: str, errCount: int) -> None: assert type(testID) == str # noqa: E721 + assert type(errCount) == int # noqa: E721 + assert errCount > 0 assert type(__class__.FailedTests) == list # noqa: E721 + assert type(__class__.cFailedTests) == int # noqa: E721 + assert __class__.cFailedTests >= 0 - __class__.FailedTests.append(testID) # raise? + __class__.FailedTests.append((testID, errCount)) # raise? __class__.cFailedTests += 1 + assert len(__class__.FailedTests) > 0 + assert __class__.cFailedTests > 0 + assert len(__class__.FailedTests) == __class__.cFailedTests + + # -------- + assert type(__class__.cTotalErrors) == int # noqa: E721 + assert __class__.cTotalErrors >= 0 + + __class__.cTotalErrors += errCount + + assert __class__.cTotalErrors > 0 + # -------------------------------------------------------------------- - def incrementXFailedTestCount(testID: str) -> None: + def incrementXFailedTestCount(testID: str, errCount: int) -> None: assert type(testID) == str # noqa: E721 + assert type(errCount) == int # noqa: E721 + assert errCount >= 0 assert type(__class__.XFailedTests) == list # noqa: E721 + assert type(__class__.cXFailedTests) == int # noqa: E721 + assert __class__.cXFailedTests >= 0 - __class__.XFailedTests.append(testID) # raise? + __class__.XFailedTests.append((testID, errCount)) # raise? __class__.cXFailedTests += 1 + assert len(__class__.XFailedTests) > 0 + assert __class__.cXFailedTests > 0 + assert len(__class__.XFailedTests) == __class__.cXFailedTests + # -------------------------------------------------------------------- def incrementSkippedTestCount() -> None: + assert type(__class__.cSkippedTests) == int # noqa: E721 + assert __class__.cSkippedTests >= 0 + __class__.cSkippedTests += 1 + assert __class__.cSkippedTests > 0 + # -------------------------------------------------------------------- def incrementNotXFailedTests(testID: str) -> None: assert type(testID) == str # noqa: E721 assert type(__class__.NotXFailedTests) == list # noqa: E721 + assert type(__class__.cNotXFailedTests) == int # noqa: E721 + assert __class__.cNotXFailedTests >= 0 __class__.NotXFailedTests.append(testID) # raise? __class__.cNotXFailedTests += 1 + assert len(__class__.NotXFailedTests) > 0 + assert __class__.cNotXFailedTests > 0 + assert len(__class__.NotXFailedTests) == __class__.cNotXFailedTests + + # -------------------------------------------------------------------- + def incrementWarningTestCount(testID: str, warningCount: int) -> None: + assert type(testID) == str # noqa: E721 + assert type(warningCount) == int # noqa: E721 + assert testID != "" + assert warningCount > 0 + assert type(__class__.WarningTests) == list # noqa: E721 + assert type(__class__.cWarningTests) == int # noqa: E721 + assert __class__.cWarningTests >= 0 + + __class__.WarningTests.append((testID, warningCount)) # raise? + __class__.cWarningTests += 1 + + assert len(__class__.WarningTests) > 0 + assert __class__.cWarningTests > 0 + assert len(__class__.WarningTests) == __class__.cWarningTests + + # -------- + assert type(__class__.cTotalWarnings) == int # noqa: E721 + assert __class__.cTotalWarnings >= 0 + + __class__.cTotalWarnings += warningCount + + assert __class__.cTotalWarnings > 0 + # -------------------------------------------------------------------- def incrementUnexpectedTests() -> None: + assert type(__class__.cUnexpectedTests) == int # noqa: E721 + assert __class__.cUnexpectedTests >= 0 + __class__.cUnexpectedTests += 1 + assert __class__.cUnexpectedTests > 0 + # -------------------------------------------------------------------- def incrementAchtungTestCount(testID: str) -> None: assert type(testID) == str # noqa: E721 assert type(__class__.AchtungTests) == list # noqa: E721 + assert type(__class__.cAchtungTests) == int # noqa: E721 + assert __class__.cAchtungTests >= 0 __class__.AchtungTests.append(testID) # raise? __class__.cAchtungTests += 1 + assert len(__class__.AchtungTests) > 0 + assert __class__.cAchtungTests > 0 + assert len(__class__.AchtungTests) == __class__.cAchtungTests + # ///////////////////////////////////////////////////////////////////////////// @@ -212,6 +311,12 @@ def helper__build_test_id(item: pytest.Function) -> str: return testID + +# ///////////////////////////////////////////////////////////////////////////// + +g_error_msg_count_key = pytest.StashKey[int]() +g_warning_msg_count_key = pytest.StashKey[int]() + # ///////////////////////////////////////////////////////////////////////////// @@ -285,6 +390,16 @@ def helper__makereport__call( assert type(call) == pytest.CallInfo # noqa: E721 assert type(outcome) == pluggy.Result # noqa: E721 + # -------- + item_error_msg_count = item.stash.get(g_error_msg_count_key, 0) + assert type(item_error_msg_count) == int # noqa: E721 + assert item_error_msg_count >= 0 + + item_warning_msg_count = item.stash.get(g_warning_msg_count_key, 0) + assert type(item_warning_msg_count) == int # noqa: E721 + assert item_warning_msg_count >= 0 + + # -------- rep = outcome.get_result() assert rep is not None assert type(rep) == pytest.TestReport # noqa: E721 @@ -312,30 +427,35 @@ def helper__makereport__call( if type(call.excinfo.value) == _pytest.outcomes.Skipped: # noqa: E721 assert not hasattr(rep, "wasxfail") - TEST_PROCESS_STATS.incrementSkippedTestCount() - exitStatus = "SKIPPED" reasonText = str(call.excinfo.value) reasonMsgTempl = "SKIP REASON: {0}" - elif type(call.excinfo.value) == _pytest.outcomes.XFailed: # noqa: E721 - TEST_PROCESS_STATS.incrementXFailedTestCount(testID) + TEST_PROCESS_STATS.incrementSkippedTestCount() + elif type(call.excinfo.value) == _pytest.outcomes.XFailed: # noqa: E721 exitStatus = "XFAILED" reasonText = str(call.excinfo.value) reasonMsgTempl = "XFAIL REASON: {0}" + + TEST_PROCESS_STATS.incrementXFailedTestCount(testID, item_error_msg_count) + else: exitStatus = "XFAILED" assert hasattr(rep, "wasxfail") assert rep.wasxfail is not None assert type(rep.wasxfail) == str # noqa: E721 - TEST_PROCESS_STATS.incrementXFailedTestCount(testID) - reasonText = rep.wasxfail reasonMsgTempl = "XFAIL REASON: {0}" - logging.error(call.excinfo.value) + if type(call.excinfo.value) == SIGNAL_EXCEPTION: # noqa: E721 + pass + else: + logging.error(call.excinfo.value) + item_error_msg_count += 1 + + TEST_PROCESS_STATS.incrementXFailedTestCount(testID, item_error_msg_count) assert type(reasonText) == str # noqa: E721 @@ -348,9 +468,16 @@ def helper__makereport__call( assert call.excinfo is not None assert call.excinfo.value is not None - TEST_PROCESS_STATS.incrementFailedTestCount(testID) + if type(call.excinfo.value) == SIGNAL_EXCEPTION: # noqa: E721 + assert item_error_msg_count > 0 + pass + else: + logging.error(call.excinfo.value) + item_error_msg_count += 1 + + assert item_error_msg_count > 0 + TEST_PROCESS_STATS.incrementFailedTestCount(testID, item_error_msg_count) - logging.error(call.excinfo.value) exitStatus = "FAILED" elif rep.outcome == "passed": assert call.excinfo is None @@ -360,12 +487,12 @@ def helper__makereport__call( TEST_PROCESS_STATS.incrementNotXFailedTests(testID) - warnMsg = "Test is marked as xfail" + warnMsg = "NOTE: Test is marked as xfail" if rep.wasxfail != "": warnMsg += " [" + rep.wasxfail + "]" - logging.warning(warnMsg) + logging.info(warnMsg) exitStatus = "NOT XFAILED" else: assert not hasattr(rep, "wasxfail") @@ -378,11 +505,25 @@ def helper__makereport__call( # [2025-03-28] It may create a useless problem in new environment. # assert False + # -------- + if item_warning_msg_count > 0: + TEST_PROCESS_STATS.incrementWarningTestCount(testID, item_warning_msg_count) + + # -------- + assert type(TEST_PROCESS_STATS.cTotalDuration) == datetime.timedelta # noqa: E721 + assert type(testDurration) == datetime.timedelta # noqa: E721 + + TEST_PROCESS_STATS.cTotalDuration += testDurration + + assert testDurration <= TEST_PROCESS_STATS.cTotalDuration + # -------- logging.info("*") - logging.info("* DURATION : {0}".format(timedelta_to_human_text(testDurration))) + logging.info("* DURATION : {0}".format(timedelta_to_human_text(testDurration))) logging.info("*") - logging.info("* EXIT STATUS : {0}".format(exitStatus)) + logging.info("* EXIT STATUS : {0}".format(exitStatus)) + logging.info("* ERROR COUNT : {0}".format(item_error_msg_count)) + logging.info("* WARNING COUNT: {0}".format(item_warning_msg_count)) logging.info("*") logging.info("* STOP TEST {0}".format(testID)) logging.info("*") @@ -437,6 +578,186 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): # ///////////////////////////////////////////////////////////////////////////// +class LogErrorWrapper2: + _old_method: any + _counter: typing.Optional[int] + + # -------------------------------------------------------------------- + def __init__(self): + self._old_method = None + self._counter = None + + # -------------------------------------------------------------------- + def __enter__(self): + assert self._old_method is None + assert self._counter is None + + self._old_method = logging.error + self._counter = 0 + + logging.error = self + return self + + # -------------------------------------------------------------------- + def __exit__(self, exc_type, exc_val, exc_tb): + assert self._old_method is not None + assert self._counter is not None + + assert logging.error is self + + logging.error = self._old_method + + self._old_method = None + self._counter = None + return False + + # -------------------------------------------------------------------- + def __call__(self, *args, **kwargs): + assert self._old_method is not None + assert self._counter is not None + + assert type(self._counter) == int # noqa: E721 + assert self._counter >= 0 + + r = self._old_method(*args, **kwargs) + + self._counter += 1 + assert self._counter > 0 + + return r + + +# ///////////////////////////////////////////////////////////////////////////// + + +class LogWarningWrapper2: + _old_method: any + _counter: typing.Optional[int] + + # -------------------------------------------------------------------- + def __init__(self): + self._old_method = None + self._counter = None + + # -------------------------------------------------------------------- + def __enter__(self): + assert self._old_method is None + assert self._counter is None + + self._old_method = logging.warning + self._counter = 0 + + logging.warning = self + return self + + # -------------------------------------------------------------------- + def __exit__(self, exc_type, exc_val, exc_tb): + assert self._old_method is not None + assert self._counter is not None + + assert logging.warning is self + + logging.warning = self._old_method + + self._old_method = None + self._counter = None + return False + + # -------------------------------------------------------------------- + def __call__(self, *args, **kwargs): + assert self._old_method is not None + assert self._counter is not None + + assert type(self._counter) == int # noqa: E721 + assert self._counter >= 0 + + r = self._old_method(*args, **kwargs) + + self._counter += 1 + assert self._counter > 0 + + return r + + +# ///////////////////////////////////////////////////////////////////////////// + + +class SIGNAL_EXCEPTION(Exception): + def __init__(self): + pass + + +# ///////////////////////////////////////////////////////////////////////////// + + +@pytest.hookimpl(hookwrapper=True) +def pytest_pyfunc_call(pyfuncitem: pytest.Function): + assert pyfuncitem is not None + assert isinstance(pyfuncitem, pytest.Function) + + debug__log_error_method = logging.error + assert debug__log_error_method is not None + + debug__log_warning_method = logging.warning + assert debug__log_warning_method is not None + + pyfuncitem.stash[g_error_msg_count_key] = 0 + pyfuncitem.stash[g_warning_msg_count_key] = 0 + + try: + with LogErrorWrapper2() as logErrorWrapper, LogWarningWrapper2() as logWarningWrapper: + assert type(logErrorWrapper) == LogErrorWrapper2 # noqa: E721 + assert logErrorWrapper._old_method is not None + assert type(logErrorWrapper._counter) == int # noqa: E721 + assert logErrorWrapper._counter == 0 + assert logging.error is logErrorWrapper + + assert type(logWarningWrapper) == LogWarningWrapper2 # noqa: E721 + assert logWarningWrapper._old_method is not None + assert type(logWarningWrapper._counter) == int # noqa: E721 + assert logWarningWrapper._counter == 0 + assert logging.warning is logWarningWrapper + + r: pluggy.Result = yield + + assert r is not None + assert type(r) == pluggy.Result # noqa: E721 + + assert logErrorWrapper._old_method is not None + assert type(logErrorWrapper._counter) == int # noqa: E721 + assert logErrorWrapper._counter >= 0 + assert logging.error is logErrorWrapper + + assert logWarningWrapper._old_method is not None + assert type(logWarningWrapper._counter) == int # noqa: E721 + assert logWarningWrapper._counter >= 0 + assert logging.warning is logWarningWrapper + + assert g_error_msg_count_key in pyfuncitem.stash + assert g_warning_msg_count_key in pyfuncitem.stash + + assert pyfuncitem.stash[g_error_msg_count_key] == 0 + assert pyfuncitem.stash[g_warning_msg_count_key] == 0 + + pyfuncitem.stash[g_error_msg_count_key] = logErrorWrapper._counter + pyfuncitem.stash[g_warning_msg_count_key] = logWarningWrapper._counter + + if r.exception is not None: + pass + elif logErrorWrapper._counter == 0: + pass + else: + assert logErrorWrapper._counter > 0 + r.force_exception(SIGNAL_EXCEPTION()) + finally: + assert logging.error is debug__log_error_method + assert logging.warning is debug__log_warning_method + pass + + +# ///////////////////////////////////////////////////////////////////////////// + + def helper__calc_W(n: int) -> int: assert n > 0 @@ -467,11 +788,42 @@ def helper__print_test_list(tests: list[str]) -> None: nTest = 0 - while nTest < len(tests): - testID = tests[nTest] - assert type(testID) == str # noqa: E721 + for t in tests: + assert type(t) == str # noqa: E721 + assert t != "" nTest += 1 - logging.info(templateLine.format(nTest, testID)) + logging.info(templateLine.format(nTest, t)) + + +# ------------------------------------------------------------------------ +def helper__print_test_list2(tests: list[str, int]) -> None: + assert type(tests) == list # noqa: E721 + + assert helper__calc_W(9) == 1 + assert helper__calc_W(10) == 2 + assert helper__calc_W(11) == 2 + assert helper__calc_W(99) == 2 + assert helper__calc_W(100) == 3 + assert helper__calc_W(101) == 3 + assert helper__calc_W(999) == 3 + assert helper__calc_W(1000) == 4 + assert helper__calc_W(1001) == 4 + + W = helper__calc_W(len(tests)) + + templateLine = "{0:0" + str(W) + "d}. {1} ({2})" + + nTest = 0 + + for t in tests: + assert type(t) == tuple # noqa: E721 + assert len(t) == 2 + assert type(t[0]) == str # noqa: E721 + assert type(t[1]) == int # noqa: E721 + assert t[0] != "" + assert t[1] >= 0 + nTest += 1 + logging.info(templateLine.format(nTest, t[0], t[1])) # ///////////////////////////////////////////////////////////////////////////// @@ -505,6 +857,22 @@ def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): helper__print_test_list(test_list) logging.info("") + def LOCAL__print_test_list2( + header: str, test_count: int, test_list: list[str, int] + ): + assert type(header) == str # noqa: E721 + assert type(test_count) == int # noqa: E721 + assert type(test_list) == list # noqa: E721 + assert header != "" + assert test_count >= 0 + assert len(test_list) == test_count + + LOCAL__print_line1_with_header(header) + logging.info("") + if len(test_list) > 0: + helper__print_test_list2(test_list) + logging.info("") + # fmt: off LOCAL__print_test_list( "ACHTUNG TESTS", @@ -512,13 +880,13 @@ def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): TEST_PROCESS_STATS.AchtungTests, ) - LOCAL__print_test_list( + LOCAL__print_test_list2( "FAILED TESTS", TEST_PROCESS_STATS.cFailedTests, TEST_PROCESS_STATS.FailedTests ) - LOCAL__print_test_list( + LOCAL__print_test_list2( "XFAILED TESTS", TEST_PROCESS_STATS.cXFailedTests, TEST_PROCESS_STATS.XFailedTests, @@ -529,22 +897,46 @@ def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): TEST_PROCESS_STATS.cNotXFailedTests, TEST_PROCESS_STATS.NotXFailedTests, ) + + LOCAL__print_test_list2( + "WARNING TESTS", + TEST_PROCESS_STATS.cWarningTests, + TEST_PROCESS_STATS.WarningTests, + ) # fmt: on LOCAL__print_line1_with_header("SUMMARY STATISTICS") logging.info("") logging.info("[TESTS]") - logging.info(" TOTAL : {0}".format(TEST_PROCESS_STATS.cTotalTests)) - logging.info(" EXECUTED : {0}".format(TEST_PROCESS_STATS.cExecutedTests)) - logging.info(" NOT EXECUTED: {0}".format(TEST_PROCESS_STATS.cNotExecutedTests)) - logging.info(" ACHTUNG : {0}".format(TEST_PROCESS_STATS.cAchtungTests)) + logging.info(" TOTAL : {0}".format(TEST_PROCESS_STATS.cTotalTests)) + logging.info(" EXECUTED : {0}".format(TEST_PROCESS_STATS.cExecutedTests)) + logging.info(" NOT EXECUTED : {0}".format(TEST_PROCESS_STATS.cNotExecutedTests)) + logging.info(" ACHTUNG : {0}".format(TEST_PROCESS_STATS.cAchtungTests)) + logging.info("") + logging.info(" PASSED : {0}".format(TEST_PROCESS_STATS.cPassedTests)) + logging.info(" FAILED : {0}".format(TEST_PROCESS_STATS.cFailedTests)) + logging.info(" XFAILED : {0}".format(TEST_PROCESS_STATS.cXFailedTests)) + logging.info(" NOT XFAILED : {0}".format(TEST_PROCESS_STATS.cNotXFailedTests)) + logging.info(" SKIPPED : {0}".format(TEST_PROCESS_STATS.cSkippedTests)) + logging.info(" WITH WARNINGS: {0}".format(TEST_PROCESS_STATS.cWarningTests)) + logging.info(" UNEXPECTED : {0}".format(TEST_PROCESS_STATS.cUnexpectedTests)) + logging.info("") + + assert type(TEST_PROCESS_STATS.cTotalDuration) == datetime.timedelta # noqa: E721 + + LOCAL__print_line1_with_header("TIME") + logging.info("") + logging.info( + " TOTAL DURATION: {0}".format( + timedelta_to_human_text(TEST_PROCESS_STATS.cTotalDuration) + ) + ) + logging.info("") + + LOCAL__print_line1_with_header("TOTAL INFORMATION") logging.info("") - logging.info(" PASSED : {0}".format(TEST_PROCESS_STATS.cPassedTests)) - logging.info(" FAILED : {0}".format(TEST_PROCESS_STATS.cFailedTests)) - logging.info(" XFAILED : {0}".format(TEST_PROCESS_STATS.cXFailedTests)) - logging.info(" NOT XFAILED : {0}".format(TEST_PROCESS_STATS.cNotXFailedTests)) - logging.info(" SKIPPED : {0}".format(TEST_PROCESS_STATS.cSkippedTests)) - logging.info(" UNEXPECTED : {0}".format(TEST_PROCESS_STATS.cUnexpectedTests)) + logging.info(" TOTAL ERROR COUNT : {0}".format(TEST_PROCESS_STATS.cTotalErrors)) + logging.info(" TOTAL WARNING COUNT: {0}".format(TEST_PROCESS_STATS.cTotalWarnings)) logging.info("") diff --git a/tests/test_conftest.py--devel b/tests/test_conftest.py--devel new file mode 100644 index 00000000..67c1dafe --- /dev/null +++ b/tests/test_conftest.py--devel @@ -0,0 +1,80 @@ +import pytest +import logging + + +class TestConfest: + def test_failed(self): + raise Exception("TEST EXCEPTION!") + + def test_ok(self): + pass + + @pytest.mark.skip() + def test_mark_skip__no_reason(self): + pass + + @pytest.mark.xfail() + def test_mark_xfail__no_reason(self): + raise Exception("XFAIL EXCEPTION") + + @pytest.mark.xfail() + def test_mark_xfail__no_reason___no_error(self): + pass + + @pytest.mark.skip(reason="reason") + def test_mark_skip__with_reason(self): + pass + + @pytest.mark.xfail(reason="reason") + def test_mark_xfail__with_reason(self): + raise Exception("XFAIL EXCEPTION") + + @pytest.mark.xfail(reason="reason") + def test_mark_xfail__with_reason___no_error(self): + pass + + def test_exc_skip__no_reason(self): + pytest.skip() + + def test_exc_xfail__no_reason(self): + pytest.xfail() + + def test_exc_skip__with_reason(self): + pytest.skip(reason="SKIP REASON") + + def test_exc_xfail__with_reason(self): + pytest.xfail(reason="XFAIL EXCEPTION") + + def test_log_error(self): + logging.error("IT IS A LOG ERROR!") + + def test_log_error_and_exc(self): + logging.error("IT IS A LOG ERROR!") + + raise Exception("TEST EXCEPTION!") + + def test_log_error_and_warning(self): + logging.error("IT IS A LOG ERROR!") + logging.warning("IT IS A LOG WARNING!") + logging.error("IT IS THE SECOND LOG ERROR!") + logging.warning("IT IS THE SECOND LOG WARNING!") + + @pytest.mark.xfail() + def test_log_error_and_xfail_mark_without_reason(self): + logging.error("IT IS A LOG ERROR!") + + @pytest.mark.xfail(reason="It is a reason message") + def test_log_error_and_xfail_mark_with_reason(self): + logging.error("IT IS A LOG ERROR!") + + @pytest.mark.xfail() + def test_two_log_error_and_xfail_mark_without_reason(self): + logging.error("IT IS THE FIRST LOG ERROR!") + logging.info("----------") + logging.error("IT IS THE SECOND LOG ERROR!") + + @pytest.mark.xfail(reason="It is a reason message") + def test_two_log_error_and_xfail_mark_with_reason(self): + logging.error("IT IS THE FIRST LOG ERROR!") + logging.info("----------") + logging.error("IT IS THE SECOND LOG ERROR!") From 5a19e0b4d677bee7264c4ce3d7a323fc9f1e5559 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 2 Apr 2025 14:33:51 +0300 Subject: [PATCH 61/90] TestOsOpsCommon is added [generic os_ops tests] (#231) Plus: - [BUG FIX] LocalOperations::is_executable is corrected - [FIX] OsOperations::mkstemp is added --- testgres/operations/local_ops.py | 3 +- testgres/operations/os_ops.py | 3 + tests/test_local.py | 327 ---------------- tests/test_os_ops_common.py | 650 +++++++++++++++++++++++++++++++ tests/test_remote.py | 523 ------------------------- 5 files changed, 655 insertions(+), 851 deletions(-) create mode 100644 tests/test_os_ops_common.py diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 93a64787..35e94210 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -156,7 +156,8 @@ def find_executable(self, executable): def is_executable(self, file): # Check if the file is executable - return os.stat(file).st_mode & stat.S_IXUSR + assert stat.S_IXUSR != 0 + return (os.stat(file).st_mode & stat.S_IXUSR) == stat.S_IXUSR def set_env(self, var_name, var_val): # Check if the directory is already in PATH diff --git a/testgres/operations/os_ops.py b/testgres/operations/os_ops.py index f20a7a30..3c606871 100644 --- a/testgres/operations/os_ops.py +++ b/testgres/operations/os_ops.py @@ -77,6 +77,9 @@ def pathsep(self): def mkdtemp(self, prefix=None): raise NotImplementedError() + def mkstemp(self, prefix=None): + raise NotImplementedError() + def copytree(self, src, dst): raise NotImplementedError() diff --git a/tests/test_local.py b/tests/test_local.py index 3ae93f76..7b5e488d 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -3,15 +3,9 @@ import pytest import re -import tempfile -import logging -from ..testgres import ExecUtilException -from ..testgres import InvalidOperationException from ..testgres import LocalOperations -from .helpers.run_conditions import RunConditions - class TestLocalOperations: @@ -19,138 +13,6 @@ class TestLocalOperations: def setup(self): self.operations = LocalOperations() - def test_mkdtemp__default(self): - path = self.operations.mkdtemp() - logging.info("Path is [{0}].".format(path)) - assert os.path.exists(path) - os.rmdir(path) - assert not os.path.exists(path) - - def test_mkdtemp__custom(self): - C_TEMPLATE = "abcdef" - path = self.operations.mkdtemp(C_TEMPLATE) - logging.info("Path is [{0}].".format(path)) - assert os.path.exists(path) - assert C_TEMPLATE in os.path.basename(path) - os.rmdir(path) - assert not os.path.exists(path) - - def test_exec_command_success(self): - """ - Test exec_command for successful command execution. - """ - RunConditions.skip_if_windows() - - cmd = "python3 --version" - response = self.operations.exec_command(cmd, wait_exit=True, shell=True) - - assert b'Python 3.' in response - - def test_exec_command_failure(self): - """ - Test exec_command for command execution failure. - """ - RunConditions.skip_if_windows() - - cmd = "nonexistent_command" - while True: - try: - self.operations.exec_command(cmd, wait_exit=True, shell=True) - except ExecUtilException as e: - assert type(e.exit_code) == int # noqa: E721 - assert e.exit_code == 127 - - assert type(e.message) == str # noqa: E721 - assert type(e.error) == bytes # noqa: E721 - - assert e.message.startswith("Utility exited with non-zero code (127). Error:") - assert "nonexistent_command" in e.message - assert "not found" in e.message - assert b"nonexistent_command" in e.error - assert b"not found" in e.error - break - raise Exception("We wait an exception!") - - def test_exec_command_failure__expect_error(self): - """ - Test exec_command for command execution failure. - """ - RunConditions.skip_if_windows() - - cmd = "nonexistent_command" - - exit_status, result, error = self.operations.exec_command(cmd, verbose=True, wait_exit=True, shell=True, expect_error=True) - - assert exit_status == 127 - assert result == b'' - assert type(error) == bytes # noqa: E721 - assert b"nonexistent_command" in error - assert b"not found" in error - - def test_listdir(self): - """ - Test listdir for listing directory contents. - """ - path = "/etc" - files = self.operations.listdir(path) - assert isinstance(files, list) - for f in files: - assert f is not None - assert type(f) == str # noqa: E721 - - def test_read__text(self): - """ - Test LocalOperations::read for text data. - """ - filename = __file__ # current file - - with open(filename, 'r') as file: # open in a text mode - response0 = file.read() - - assert type(response0) == str # noqa: E721 - - response1 = self.operations.read(filename) - assert type(response1) == str # noqa: E721 - assert response1 == response0 - - response2 = self.operations.read(filename, encoding=None, binary=False) - assert type(response2) == str # noqa: E721 - assert response2 == response0 - - response3 = self.operations.read(filename, encoding="") - assert type(response3) == str # noqa: E721 - assert response3 == response0 - - response4 = self.operations.read(filename, encoding="UTF-8") - assert type(response4) == str # noqa: E721 - assert response4 == response0 - - def test_read__binary(self): - """ - Test LocalOperations::read for binary data. - """ - filename = __file__ # current file - - with open(filename, 'rb') as file: # open in a binary mode - response0 = file.read() - - assert type(response0) == bytes # noqa: E721 - - response1 = self.operations.read(filename, binary=True) - assert type(response1) == bytes # noqa: E721 - assert response1 == response0 - - def test_read__binary_and_encoding(self): - """ - Test LocalOperations::read for binary data and encoding. - """ - filename = __file__ # current file - - with pytest.raises( - InvalidOperationException, - match=re.escape("Enconding is not allowed for read binary operation")): - self.operations.read(filename, encoding="", binary=True) - def test_read__unknown_file(self): """ Test LocalOperations::read with unknown file. @@ -159,40 +21,6 @@ def test_read__unknown_file(self): with pytest.raises(FileNotFoundError, match=re.escape("[Errno 2] No such file or directory: '/dummy'")): self.operations.read("/dummy") - def test_read_binary__spec(self): - """ - Test LocalOperations::read_binary. - """ - filename = __file__ # current file - - with open(filename, 'rb') as file: # open in a binary mode - response0 = file.read() - - assert type(response0) == bytes # noqa: E721 - - response1 = self.operations.read_binary(filename, 0) - assert type(response1) == bytes # noqa: E721 - assert response1 == response0 - - response2 = self.operations.read_binary(filename, 1) - assert type(response2) == bytes # noqa: E721 - assert len(response2) < len(response1) - assert len(response2) + 1 == len(response1) - assert response2 == response1[1:] - - response3 = self.operations.read_binary(filename, len(response1)) - assert type(response3) == bytes # noqa: E721 - assert len(response3) == 0 - - response4 = self.operations.read_binary(filename, len(response2)) - assert type(response4) == bytes # noqa: E721 - assert len(response4) == 1 - assert response4[0] == response1[len(response1) - 1] - - response5 = self.operations.read_binary(filename, len(response1) + 1) - assert type(response5) == bytes # noqa: E721 - assert len(response5) == 0 - def test_read_binary__spec__unk_file(self): """ Test LocalOperations::read_binary with unknown file. @@ -203,29 +31,6 @@ def test_read_binary__spec__unk_file(self): match=re.escape("[Errno 2] No such file or directory: '/dummy'")): self.operations.read_binary("/dummy", 0) - def test_read_binary__spec__negative_offset(self): - """ - Test LocalOperations::read_binary with negative offset. - """ - - with pytest.raises( - ValueError, - match=re.escape("Negative 'offset' is not supported.")): - self.operations.read_binary(__file__, -1) - - def test_get_file_size(self): - """ - Test LocalOperations::get_file_size. - """ - filename = __file__ # current file - - sz0 = os.path.getsize(filename) - assert type(sz0) == int # noqa: E721 - - sz1 = self.operations.get_file_size(filename) - assert type(sz1) == int # noqa: E721 - assert sz1 == sz0 - def test_get_file_size__unk_file(self): """ Test LocalOperations::get_file_size. @@ -234,70 +39,6 @@ def test_get_file_size__unk_file(self): with pytest.raises(FileNotFoundError, match=re.escape("[Errno 2] No such file or directory: '/dummy'")): self.operations.get_file_size("/dummy") - def test_isfile_true(self): - """ - Test isfile for an existing file. - """ - filename = __file__ - - response = self.operations.isfile(filename) - - assert response is True - - def test_isfile_false__not_exist(self): - """ - Test isfile for a non-existing file. - """ - filename = os.path.join(os.path.dirname(__file__), "nonexistent_file.txt") - - response = self.operations.isfile(filename) - - assert response is False - - def test_isfile_false__directory(self): - """ - Test isfile for a firectory. - """ - name = os.path.dirname(__file__) - - assert self.operations.isdir(name) - - response = self.operations.isfile(name) - - assert response is False - - def test_isdir_true(self): - """ - Test isdir for an existing directory. - """ - name = os.path.dirname(__file__) - - response = self.operations.isdir(name) - - assert response is True - - def test_isdir_false__not_exist(self): - """ - Test isdir for a non-existing directory. - """ - name = os.path.join(os.path.dirname(__file__), "it_is_nonexistent_directory") - - response = self.operations.isdir(name) - - assert response is False - - def test_isdir_false__file(self): - """ - Test isdir for a file. - """ - name = __file__ - - assert self.operations.isfile(name) - - response = self.operations.isdir(name) - - assert response is False - def test_cwd(self): """ Test cwd. @@ -314,71 +55,3 @@ def test_cwd(self): # Comp result assert v == expectedValue - - class tagWriteData001: - def __init__(self, sign, source, cp_rw, cp_truncate, cp_binary, cp_data, result): - self.sign = sign - self.source = source - self.call_param__rw = cp_rw - self.call_param__truncate = cp_truncate - self.call_param__binary = cp_binary - self.call_param__data = cp_data - self.result = result - - sm_write_data001 = [ - tagWriteData001("A001", "1234567890", False, False, False, "ABC", "1234567890ABC"), - tagWriteData001("A002", b"1234567890", False, False, True, b"ABC", b"1234567890ABC"), - - tagWriteData001("B001", "1234567890", False, True, False, "ABC", "ABC"), - tagWriteData001("B002", "1234567890", False, True, False, "ABC1234567890", "ABC1234567890"), - tagWriteData001("B003", b"1234567890", False, True, True, b"ABC", b"ABC"), - tagWriteData001("B004", b"1234567890", False, True, True, b"ABC1234567890", b"ABC1234567890"), - - tagWriteData001("C001", "1234567890", True, False, False, "ABC", "1234567890ABC"), - tagWriteData001("C002", b"1234567890", True, False, True, b"ABC", b"1234567890ABC"), - - tagWriteData001("D001", "1234567890", True, True, False, "ABC", "ABC"), - tagWriteData001("D002", "1234567890", True, True, False, "ABC1234567890", "ABC1234567890"), - tagWriteData001("D003", b"1234567890", True, True, True, b"ABC", b"ABC"), - tagWriteData001("D004", b"1234567890", True, True, True, b"ABC1234567890", b"ABC1234567890"), - - tagWriteData001("E001", "\0001234567890\000", False, False, False, "\000ABC\000", "\0001234567890\000\000ABC\000"), - tagWriteData001("E002", b"\0001234567890\000", False, False, True, b"\000ABC\000", b"\0001234567890\000\000ABC\000"), - - tagWriteData001("F001", "a\nb\n", False, False, False, ["c", "d"], "a\nb\nc\nd\n"), - tagWriteData001("F002", b"a\nb\n", False, False, True, [b"c", b"d"], b"a\nb\nc\nd\n"), - - tagWriteData001("G001", "a\nb\n", False, False, False, ["c\n\n", "d\n"], "a\nb\nc\nd\n"), - tagWriteData001("G002", b"a\nb\n", False, False, True, [b"c\n\n", b"d\n"], b"a\nb\nc\nd\n"), - ] - - @pytest.fixture( - params=sm_write_data001, - ids=[x.sign for x in sm_write_data001], - ) - def write_data001(self, request): - assert isinstance(request, pytest.FixtureRequest) - assert type(request.param) == __class__.tagWriteData001 # noqa: E721 - return request.param - - def test_write(self, write_data001): - assert type(write_data001) == __class__.tagWriteData001 # noqa: E721 - - mode = "w+b" if write_data001.call_param__binary else "w+" - - with tempfile.NamedTemporaryFile(mode=mode, delete=True) as tmp_file: - tmp_file.write(write_data001.source) - tmp_file.flush() - - self.operations.write( - tmp_file.name, - write_data001.call_param__data, - read_and_write=write_data001.call_param__rw, - truncate=write_data001.call_param__truncate, - binary=write_data001.call_param__binary) - - tmp_file.seek(0) - - s = tmp_file.read() - - assert s == write_data001.result diff --git a/tests/test_os_ops_common.py b/tests/test_os_ops_common.py new file mode 100644 index 00000000..c3944c3b --- /dev/null +++ b/tests/test_os_ops_common.py @@ -0,0 +1,650 @@ +# coding: utf-8 +from .helpers.os_ops_descrs import OsOpsDescr +from .helpers.os_ops_descrs import OsOpsDescrs +from .helpers.os_ops_descrs import OsOperations +from .helpers.run_conditions import RunConditions + +import os + +import pytest +import re +import tempfile +import logging + +from ..testgres import InvalidOperationException +from ..testgres import ExecUtilException + + +class TestOsOpsCommon: + sm_os_ops_descrs: list[OsOpsDescr] = [ + OsOpsDescrs.sm_local_os_ops_descr, + OsOpsDescrs.sm_remote_os_ops_descr + ] + + @pytest.fixture( + params=[descr.os_ops for descr in sm_os_ops_descrs], + ids=[descr.sign for descr in sm_os_ops_descrs] + ) + def os_ops(self, request: pytest.FixtureRequest) -> OsOperations: + assert isinstance(request, pytest.FixtureRequest) + assert isinstance(request.param, OsOperations) + return request.param + + def test_exec_command_success(self, os_ops: OsOperations): + """ + Test exec_command for successful command execution. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + cmd = ["sh", "-c", "python3 --version"] + + response = os_ops.exec_command(cmd) + + assert b'Python 3.' in response + + def test_exec_command_failure(self, os_ops: OsOperations): + """ + Test exec_command for command execution failure. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + cmd = ["sh", "-c", "nonexistent_command"] + + while True: + try: + os_ops.exec_command(cmd) + except ExecUtilException as e: + assert type(e.exit_code) == int # noqa: E721 + assert e.exit_code == 127 + + assert type(e.message) == str # noqa: E721 + assert type(e.error) == bytes # noqa: E721 + + assert e.message.startswith("Utility exited with non-zero code (127). Error:") + assert "nonexistent_command" in e.message + assert "not found" in e.message + assert b"nonexistent_command" in e.error + assert b"not found" in e.error + break + raise Exception("We wait an exception!") + + def test_exec_command_failure__expect_error(self, os_ops: OsOperations): + """ + Test exec_command for command execution failure. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + cmd = ["sh", "-c", "nonexistent_command"] + + exit_status, result, error = os_ops.exec_command(cmd, verbose=True, expect_error=True) + + assert exit_status == 127 + assert result == b'' + assert type(error) == bytes # noqa: E721 + assert b"nonexistent_command" in error + assert b"not found" in error + + def test_is_executable_true(self, os_ops: OsOperations): + """ + Test is_executable for an existing executable. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + response = os_ops.is_executable("/bin/sh") + + assert response is True + + def test_is_executable_false(self, os_ops: OsOperations): + """ + Test is_executable for a non-executable. + """ + assert isinstance(os_ops, OsOperations) + + response = os_ops.is_executable(__file__) + + assert response is False + + def test_makedirs_and_rmdirs_success(self, os_ops: OsOperations): + """ + Test makedirs and rmdirs for successful directory creation and removal. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + cmd = "pwd" + pwd = os_ops.exec_command(cmd, wait_exit=True, encoding='utf-8').strip() + + path = "{}/test_dir".format(pwd) + + # Test makedirs + os_ops.makedirs(path) + assert os.path.exists(path) + assert os_ops.path_exists(path) + + # Test rmdirs + os_ops.rmdirs(path) + assert not os.path.exists(path) + assert not os_ops.path_exists(path) + + def test_makedirs_failure(self, os_ops: OsOperations): + """ + Test makedirs for failure. + """ + # Try to create a directory in a read-only location + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + path = "/root/test_dir" + + # Test makedirs + with pytest.raises(Exception): + os_ops.makedirs(path) + + def test_listdir(self, os_ops: OsOperations): + """ + Test listdir for listing directory contents. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + path = "/etc" + files = os_ops.listdir(path) + assert isinstance(files, list) + for f in files: + assert f is not None + assert type(f) == str # noqa: E721 + + def test_path_exists_true__directory(self, os_ops: OsOperations): + """ + Test path_exists for an existing directory. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + assert os_ops.path_exists("/etc") is True + + def test_path_exists_true__file(self, os_ops: OsOperations): + """ + Test path_exists for an existing file. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + assert os_ops.path_exists(__file__) is True + + def test_path_exists_false__directory(self, os_ops: OsOperations): + """ + Test path_exists for a non-existing directory. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + assert os_ops.path_exists("/nonexistent_path") is False + + def test_path_exists_false__file(self, os_ops: OsOperations): + """ + Test path_exists for a non-existing file. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + assert os_ops.path_exists("/etc/nonexistent_path.txt") is False + + def test_mkdtemp__default(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + path = os_ops.mkdtemp() + logging.info("Path is [{0}].".format(path)) + assert os.path.exists(path) + os.rmdir(path) + assert not os.path.exists(path) + + def test_mkdtemp__custom(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + C_TEMPLATE = "abcdef" + path = os_ops.mkdtemp(C_TEMPLATE) + logging.info("Path is [{0}].".format(path)) + assert os.path.exists(path) + assert C_TEMPLATE in os.path.basename(path) + os.rmdir(path) + assert not os.path.exists(path) + + def test_rmdirs(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + path = os_ops.mkdtemp() + assert os.path.exists(path) + + assert os_ops.rmdirs(path, ignore_errors=False) is True + assert not os.path.exists(path) + + def test_rmdirs__01_with_subfolder(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + # folder with subfolder + path = os_ops.mkdtemp() + assert os.path.exists(path) + + dir1 = os.path.join(path, "dir1") + assert not os.path.exists(dir1) + + os_ops.makedirs(dir1) + assert os.path.exists(dir1) + + assert os_ops.rmdirs(path, ignore_errors=False) is True + assert not os.path.exists(path) + assert not os.path.exists(dir1) + + def test_rmdirs__02_with_file(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + # folder with file + path = os_ops.mkdtemp() + assert os.path.exists(path) + + file1 = os.path.join(path, "file1.txt") + assert not os.path.exists(file1) + + os_ops.touch(file1) + assert os.path.exists(file1) + + assert os_ops.rmdirs(path, ignore_errors=False) is True + assert not os.path.exists(path) + assert not os.path.exists(file1) + + def test_rmdirs__03_with_subfolder_and_file(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + # folder with subfolder and file + path = os_ops.mkdtemp() + assert os.path.exists(path) + + dir1 = os.path.join(path, "dir1") + assert not os.path.exists(dir1) + + os_ops.makedirs(dir1) + assert os.path.exists(dir1) + + file1 = os.path.join(dir1, "file1.txt") + assert not os.path.exists(file1) + + os_ops.touch(file1) + assert os.path.exists(file1) + + assert os_ops.rmdirs(path, ignore_errors=False) is True + assert not os.path.exists(path) + assert not os.path.exists(dir1) + assert not os.path.exists(file1) + + def test_write_text_file(self, os_ops: OsOperations): + """ + Test write for writing data to a text file. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + filename = os_ops.mkstemp() + data = "Hello, world!" + + os_ops.write(filename, data, truncate=True) + os_ops.write(filename, data) + + response = os_ops.read(filename) + + assert response == data + data + + os_ops.remove_file(filename) + + def test_write_binary_file(self, os_ops: OsOperations): + """ + Test write for writing data to a binary file. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + filename = "/tmp/test_file.bin" + data = b"\x00\x01\x02\x03" + + os_ops.write(filename, data, binary=True, truncate=True) + + response = os_ops.read(filename, binary=True) + + assert response == data + + def test_read_text_file(self, os_ops: OsOperations): + """ + Test read for reading data from a text file. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + filename = "/etc/hosts" + + response = os_ops.read(filename) + + assert isinstance(response, str) + + def test_read_binary_file(self, os_ops: OsOperations): + """ + Test read for reading data from a binary file. + """ + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + filename = "/usr/bin/python3" + + response = os_ops.read(filename, binary=True) + + assert isinstance(response, bytes) + + def test_read__text(self, os_ops: OsOperations): + """ + Test OsOperations::read for text data. + """ + assert isinstance(os_ops, OsOperations) + + filename = __file__ # current file + + with open(filename, 'r') as file: # open in a text mode + response0 = file.read() + + assert type(response0) == str # noqa: E721 + + response1 = os_ops.read(filename) + assert type(response1) == str # noqa: E721 + assert response1 == response0 + + response2 = os_ops.read(filename, encoding=None, binary=False) + assert type(response2) == str # noqa: E721 + assert response2 == response0 + + response3 = os_ops.read(filename, encoding="") + assert type(response3) == str # noqa: E721 + assert response3 == response0 + + response4 = os_ops.read(filename, encoding="UTF-8") + assert type(response4) == str # noqa: E721 + assert response4 == response0 + + def test_read__binary(self, os_ops: OsOperations): + """ + Test OsOperations::read for binary data. + """ + filename = __file__ # current file + + with open(filename, 'rb') as file: # open in a binary mode + response0 = file.read() + + assert type(response0) == bytes # noqa: E721 + + response1 = os_ops.read(filename, binary=True) + assert type(response1) == bytes # noqa: E721 + assert response1 == response0 + + def test_read__binary_and_encoding(self, os_ops: OsOperations): + """ + Test OsOperations::read for binary data and encoding. + """ + assert isinstance(os_ops, OsOperations) + + filename = __file__ # current file + + with pytest.raises( + InvalidOperationException, + match=re.escape("Enconding is not allowed for read binary operation")): + os_ops.read(filename, encoding="", binary=True) + + def test_read_binary__spec(self, os_ops: OsOperations): + """ + Test OsOperations::read_binary. + """ + assert isinstance(os_ops, OsOperations) + + filename = __file__ # currnt file + + with open(filename, 'rb') as file: # open in a binary mode + response0 = file.read() + + assert type(response0) == bytes # noqa: E721 + + response1 = os_ops.read_binary(filename, 0) + assert type(response1) == bytes # noqa: E721 + assert response1 == response0 + + response2 = os_ops.read_binary(filename, 1) + assert type(response2) == bytes # noqa: E721 + assert len(response2) < len(response1) + assert len(response2) + 1 == len(response1) + assert response2 == response1[1:] + + response3 = os_ops.read_binary(filename, len(response1)) + assert type(response3) == bytes # noqa: E721 + assert len(response3) == 0 + + response4 = os_ops.read_binary(filename, len(response2)) + assert type(response4) == bytes # noqa: E721 + assert len(response4) == 1 + assert response4[0] == response1[len(response1) - 1] + + response5 = os_ops.read_binary(filename, len(response1) + 1) + assert type(response5) == bytes # noqa: E721 + assert len(response5) == 0 + + def test_read_binary__spec__negative_offset(self, os_ops: OsOperations): + """ + Test OsOperations::read_binary with negative offset. + """ + assert isinstance(os_ops, OsOperations) + + with pytest.raises( + ValueError, + match=re.escape("Negative 'offset' is not supported.")): + os_ops.read_binary(__file__, -1) + + def test_get_file_size(self, os_ops: OsOperations): + """ + Test OsOperations::get_file_size. + """ + assert isinstance(os_ops, OsOperations) + + filename = __file__ # current file + + sz0 = os.path.getsize(filename) + assert type(sz0) == int # noqa: E721 + + sz1 = os_ops.get_file_size(filename) + assert type(sz1) == int # noqa: E721 + assert sz1 == sz0 + + def test_isfile_true(self, os_ops: OsOperations): + """ + Test isfile for an existing file. + """ + assert isinstance(os_ops, OsOperations) + + filename = __file__ + + response = os_ops.isfile(filename) + + assert response is True + + def test_isfile_false__not_exist(self, os_ops: OsOperations): + """ + Test isfile for a non-existing file. + """ + assert isinstance(os_ops, OsOperations) + + filename = os.path.join(os.path.dirname(__file__), "nonexistent_file.txt") + + response = os_ops.isfile(filename) + + assert response is False + + def test_isfile_false__directory(self, os_ops: OsOperations): + """ + Test isfile for a firectory. + """ + assert isinstance(os_ops, OsOperations) + + name = os.path.dirname(__file__) + + assert os_ops.isdir(name) + + response = os_ops.isfile(name) + + assert response is False + + def test_isdir_true(self, os_ops: OsOperations): + """ + Test isdir for an existing directory. + """ + assert isinstance(os_ops, OsOperations) + + name = os.path.dirname(__file__) + + response = os_ops.isdir(name) + + assert response is True + + def test_isdir_false__not_exist(self, os_ops: OsOperations): + """ + Test isdir for a non-existing directory. + """ + assert isinstance(os_ops, OsOperations) + + name = os.path.join(os.path.dirname(__file__), "it_is_nonexistent_directory") + + response = os_ops.isdir(name) + + assert response is False + + def test_isdir_false__file(self, os_ops: OsOperations): + """ + Test isdir for a file. + """ + assert isinstance(os_ops, OsOperations) + + name = __file__ + + assert os_ops.isfile(name) + + response = os_ops.isdir(name) + + assert response is False + + def test_cwd(self, os_ops: OsOperations): + """ + Test cwd. + """ + assert isinstance(os_ops, OsOperations) + + v = os_ops.cwd() + + assert v is not None + assert type(v) == str # noqa: E721 + assert v != "" + + class tagWriteData001: + def __init__(self, sign, source, cp_rw, cp_truncate, cp_binary, cp_data, result): + self.sign = sign + self.source = source + self.call_param__rw = cp_rw + self.call_param__truncate = cp_truncate + self.call_param__binary = cp_binary + self.call_param__data = cp_data + self.result = result + + sm_write_data001 = [ + tagWriteData001("A001", "1234567890", False, False, False, "ABC", "1234567890ABC"), + tagWriteData001("A002", b"1234567890", False, False, True, b"ABC", b"1234567890ABC"), + + tagWriteData001("B001", "1234567890", False, True, False, "ABC", "ABC"), + tagWriteData001("B002", "1234567890", False, True, False, "ABC1234567890", "ABC1234567890"), + tagWriteData001("B003", b"1234567890", False, True, True, b"ABC", b"ABC"), + tagWriteData001("B004", b"1234567890", False, True, True, b"ABC1234567890", b"ABC1234567890"), + + tagWriteData001("C001", "1234567890", True, False, False, "ABC", "1234567890ABC"), + tagWriteData001("C002", b"1234567890", True, False, True, b"ABC", b"1234567890ABC"), + + tagWriteData001("D001", "1234567890", True, True, False, "ABC", "ABC"), + tagWriteData001("D002", "1234567890", True, True, False, "ABC1234567890", "ABC1234567890"), + tagWriteData001("D003", b"1234567890", True, True, True, b"ABC", b"ABC"), + tagWriteData001("D004", b"1234567890", True, True, True, b"ABC1234567890", b"ABC1234567890"), + + tagWriteData001("E001", "\0001234567890\000", False, False, False, "\000ABC\000", "\0001234567890\000\000ABC\000"), + tagWriteData001("E002", b"\0001234567890\000", False, False, True, b"\000ABC\000", b"\0001234567890\000\000ABC\000"), + + tagWriteData001("F001", "a\nb\n", False, False, False, ["c", "d"], "a\nb\nc\nd\n"), + tagWriteData001("F002", b"a\nb\n", False, False, True, [b"c", b"d"], b"a\nb\nc\nd\n"), + + tagWriteData001("G001", "a\nb\n", False, False, False, ["c\n\n", "d\n"], "a\nb\nc\nd\n"), + tagWriteData001("G002", b"a\nb\n", False, False, True, [b"c\n\n", b"d\n"], b"a\nb\nc\nd\n"), + ] + + @pytest.fixture( + params=sm_write_data001, + ids=[x.sign for x in sm_write_data001], + ) + def write_data001(self, request): + assert isinstance(request, pytest.FixtureRequest) + assert type(request.param) == __class__.tagWriteData001 # noqa: E721 + return request.param + + def test_write(self, write_data001: tagWriteData001, os_ops: OsOperations): + assert type(write_data001) == __class__.tagWriteData001 # noqa: E721 + assert isinstance(os_ops, OsOperations) + + mode = "w+b" if write_data001.call_param__binary else "w+" + + with tempfile.NamedTemporaryFile(mode=mode, delete=True) as tmp_file: + tmp_file.write(write_data001.source) + tmp_file.flush() + + os_ops.write( + tmp_file.name, + write_data001.call_param__data, + read_and_write=write_data001.call_param__rw, + truncate=write_data001.call_param__truncate, + binary=write_data001.call_param__binary) + + tmp_file.seek(0) + + s = tmp_file.read() + + assert s == write_data001.result + + def test_touch(self, os_ops: OsOperations): + """ + Test touch for creating a new file or updating access and modification times of an existing file. + """ + assert isinstance(os_ops, OsOperations) + + filename = os_ops.mkstemp() + + # TODO: this test does not check the result of 'touch' command! + + os_ops.touch(filename) + + assert os_ops.isfile(filename) + + os_ops.remove_file(filename) diff --git a/tests/test_remote.py b/tests/test_remote.py index 2c37e2c1..565b2d20 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -2,16 +2,10 @@ import os import pytest -import re -import tempfile -import logging from ..testgres import ExecUtilException -from ..testgres import InvalidOperationException from ..testgres import RemoteOperations -from ..testgres import LocalOperations from ..testgres import ConnectionParams -from ..testgres import utils as testgres_utils class TestRemoteOperations: @@ -23,179 +17,6 @@ def setup(self): ssh_key=os.getenv('RDBMS_TESTPOOL_SSHKEY')) self.operations = RemoteOperations(conn_params) - def test_exec_command_success(self): - """ - Test exec_command for successful command execution. - """ - cmd = "python3 --version" - response = self.operations.exec_command(cmd, wait_exit=True) - - assert b'Python 3.' in response - - def test_exec_command_failure(self): - """ - Test exec_command for command execution failure. - """ - cmd = "nonexistent_command" - while True: - try: - self.operations.exec_command(cmd, verbose=True, wait_exit=True) - except ExecUtilException as e: - assert type(e.exit_code) == int # noqa: E721 - assert e.exit_code == 127 - - assert type(e.message) == str # noqa: E721 - assert type(e.error) == bytes # noqa: E721 - - assert e.message.startswith("Utility exited with non-zero code (127). Error:") - assert "nonexistent_command" in e.message - assert "not found" in e.message - assert b"nonexistent_command" in e.error - assert b"not found" in e.error - break - raise Exception("We wait an exception!") - - def test_exec_command_failure__expect_error(self): - """ - Test exec_command for command execution failure. - """ - cmd = "nonexistent_command" - - exit_status, result, error = self.operations.exec_command(cmd, verbose=True, wait_exit=True, shell=True, expect_error=True) - - assert exit_status == 127 - assert result == b'' - assert type(error) == bytes # noqa: E721 - assert b"nonexistent_command" in error - assert b"not found" in error - - def test_is_executable_true(self): - """ - Test is_executable for an existing executable. - """ - local_ops = LocalOperations() - cmd = testgres_utils.get_bin_path2(local_ops, "pg_config") - cmd = local_ops.exec_command([cmd, "--bindir"], encoding="utf-8") - cmd = cmd.rstrip() - cmd = os.path.join(cmd, "pg_config") - response = self.operations.is_executable(cmd) - - assert response is True - - def test_is_executable_false(self): - """ - Test is_executable for a non-executable. - """ - cmd = "python" - response = self.operations.is_executable(cmd) - - assert response is False - - def test_makedirs_and_rmdirs_success(self): - """ - Test makedirs and rmdirs for successful directory creation and removal. - """ - cmd = "pwd" - pwd = self.operations.exec_command(cmd, wait_exit=True, encoding='utf-8').strip() - - path = "{}/test_dir".format(pwd) - - # Test makedirs - self.operations.makedirs(path) - assert os.path.exists(path) - assert self.operations.path_exists(path) - - # Test rmdirs - self.operations.rmdirs(path) - assert not os.path.exists(path) - assert not self.operations.path_exists(path) - - def test_makedirs_failure(self): - """ - Test makedirs for failure. - """ - # Try to create a directory in a read-only location - path = "/root/test_dir" - - # Test makedirs - with pytest.raises(Exception): - self.operations.makedirs(path) - - def test_mkdtemp__default(self): - path = self.operations.mkdtemp() - logging.info("Path is [{0}].".format(path)) - assert os.path.exists(path) - os.rmdir(path) - assert not os.path.exists(path) - - def test_mkdtemp__custom(self): - C_TEMPLATE = "abcdef" - path = self.operations.mkdtemp(C_TEMPLATE) - logging.info("Path is [{0}].".format(path)) - assert os.path.exists(path) - assert C_TEMPLATE in os.path.basename(path) - os.rmdir(path) - assert not os.path.exists(path) - - def test_rmdirs(self): - path = self.operations.mkdtemp() - assert os.path.exists(path) - - assert self.operations.rmdirs(path, ignore_errors=False) is True - assert not os.path.exists(path) - - def test_rmdirs__01_with_subfolder(self): - # folder with subfolder - path = self.operations.mkdtemp() - assert os.path.exists(path) - - dir1 = os.path.join(path, "dir1") - assert not os.path.exists(dir1) - - self.operations.makedirs(dir1) - assert os.path.exists(dir1) - - assert self.operations.rmdirs(path, ignore_errors=False) is True - assert not os.path.exists(path) - assert not os.path.exists(dir1) - - def test_rmdirs__02_with_file(self): - # folder with file - path = self.operations.mkdtemp() - assert os.path.exists(path) - - file1 = os.path.join(path, "file1.txt") - assert not os.path.exists(file1) - - self.operations.touch(file1) - assert os.path.exists(file1) - - assert self.operations.rmdirs(path, ignore_errors=False) is True - assert not os.path.exists(path) - assert not os.path.exists(file1) - - def test_rmdirs__03_with_subfolder_and_file(self): - # folder with subfolder and file - path = self.operations.mkdtemp() - assert os.path.exists(path) - - dir1 = os.path.join(path, "dir1") - assert not os.path.exists(dir1) - - self.operations.makedirs(dir1) - assert os.path.exists(dir1) - - file1 = os.path.join(dir1, "file1.txt") - assert not os.path.exists(file1) - - self.operations.touch(file1) - assert os.path.exists(file1) - - assert self.operations.rmdirs(path, ignore_errors=False) is True - assert not os.path.exists(path) - assert not os.path.exists(dir1) - assert not os.path.exists(file1) - def test_rmdirs__try_to_delete_nonexist_path(self): path = "/root/test_dir" @@ -216,141 +37,6 @@ def test_rmdirs__try_to_delete_file(self): assert type(x.value.exit_code) == int # noqa: E721 assert x.value.exit_code == 20 - def test_listdir(self): - """ - Test listdir for listing directory contents. - """ - path = "/etc" - files = self.operations.listdir(path) - assert isinstance(files, list) - for f in files: - assert f is not None - assert type(f) == str # noqa: E721 - - def test_path_exists_true__directory(self): - """ - Test path_exists for an existing directory. - """ - assert self.operations.path_exists("/etc") is True - - def test_path_exists_true__file(self): - """ - Test path_exists for an existing file. - """ - assert self.operations.path_exists(__file__) is True - - def test_path_exists_false__directory(self): - """ - Test path_exists for a non-existing directory. - """ - assert self.operations.path_exists("/nonexistent_path") is False - - def test_path_exists_false__file(self): - """ - Test path_exists for a non-existing file. - """ - assert self.operations.path_exists("/etc/nonexistent_path.txt") is False - - def test_write_text_file(self): - """ - Test write for writing data to a text file. - """ - filename = "/tmp/test_file.txt" - data = "Hello, world!" - - self.operations.write(filename, data, truncate=True) - self.operations.write(filename, data) - - response = self.operations.read(filename) - - assert response == data + data - - def test_write_binary_file(self): - """ - Test write for writing data to a binary file. - """ - filename = "/tmp/test_file.bin" - data = b"\x00\x01\x02\x03" - - self.operations.write(filename, data, binary=True, truncate=True) - - response = self.operations.read(filename, binary=True) - - assert response == data - - def test_read_text_file(self): - """ - Test read for reading data from a text file. - """ - filename = "/etc/hosts" - - response = self.operations.read(filename) - - assert isinstance(response, str) - - def test_read_binary_file(self): - """ - Test read for reading data from a binary file. - """ - filename = "/usr/bin/python3" - - response = self.operations.read(filename, binary=True) - - assert isinstance(response, bytes) - - def test_read__text(self): - """ - Test RemoteOperations::read for text data. - """ - filename = __file__ # current file - - with open(filename, 'r') as file: # open in a text mode - response0 = file.read() - - assert type(response0) == str # noqa: E721 - - response1 = self.operations.read(filename) - assert type(response1) == str # noqa: E721 - assert response1 == response0 - - response2 = self.operations.read(filename, encoding=None, binary=False) - assert type(response2) == str # noqa: E721 - assert response2 == response0 - - response3 = self.operations.read(filename, encoding="") - assert type(response3) == str # noqa: E721 - assert response3 == response0 - - response4 = self.operations.read(filename, encoding="UTF-8") - assert type(response4) == str # noqa: E721 - assert response4 == response0 - - def test_read__binary(self): - """ - Test RemoteOperations::read for binary data. - """ - filename = __file__ # current file - - with open(filename, 'rb') as file: # open in a binary mode - response0 = file.read() - - assert type(response0) == bytes # noqa: E721 - - response1 = self.operations.read(filename, binary=True) - assert type(response1) == bytes # noqa: E721 - assert response1 == response0 - - def test_read__binary_and_encoding(self): - """ - Test RemoteOperations::read for binary data and encoding. - """ - filename = __file__ # current file - - with pytest.raises( - InvalidOperationException, - match=re.escape("Enconding is not allowed for read binary operation")): - self.operations.read(filename, encoding="", binary=True) - def test_read__unknown_file(self): """ Test RemoteOperations::read with unknown file. @@ -363,40 +49,6 @@ def test_read__unknown_file(self): assert "No such file or directory" in str(x.value) assert "/dummy" in str(x.value) - def test_read_binary__spec(self): - """ - Test RemoteOperations::read_binary. - """ - filename = __file__ # currnt file - - with open(filename, 'rb') as file: # open in a binary mode - response0 = file.read() - - assert type(response0) == bytes # noqa: E721 - - response1 = self.operations.read_binary(filename, 0) - assert type(response1) == bytes # noqa: E721 - assert response1 == response0 - - response2 = self.operations.read_binary(filename, 1) - assert type(response2) == bytes # noqa: E721 - assert len(response2) < len(response1) - assert len(response2) + 1 == len(response1) - assert response2 == response1[1:] - - response3 = self.operations.read_binary(filename, len(response1)) - assert type(response3) == bytes # noqa: E721 - assert len(response3) == 0 - - response4 = self.operations.read_binary(filename, len(response2)) - assert type(response4) == bytes # noqa: E721 - assert len(response4) == 1 - assert response4[0] == response1[len(response1) - 1] - - response5 = self.operations.read_binary(filename, len(response1) + 1) - assert type(response5) == bytes # noqa: E721 - assert len(response5) == 0 - def test_read_binary__spec__unk_file(self): """ Test RemoteOperations::read_binary with unknown file. @@ -409,29 +61,6 @@ def test_read_binary__spec__unk_file(self): assert "No such file or directory" in str(x.value) assert "/dummy" in str(x.value) - def test_read_binary__spec__negative_offset(self): - """ - Test RemoteOperations::read_binary with negative offset. - """ - - with pytest.raises( - ValueError, - match=re.escape("Negative 'offset' is not supported.")): - self.operations.read_binary(__file__, -1) - - def test_get_file_size(self): - """ - Test RemoteOperations::get_file_size. - """ - filename = __file__ # current file - - sz0 = os.path.getsize(filename) - assert type(sz0) == int # noqa: E721 - - sz1 = self.operations.get_file_size(filename) - assert type(sz1) == int # noqa: E721 - assert sz1 == sz0 - def test_get_file_size__unk_file(self): """ Test RemoteOperations::get_file_size. @@ -443,155 +72,3 @@ def test_get_file_size__unk_file(self): assert "Utility exited with non-zero code (1)." in str(x.value) assert "No such file or directory" in str(x.value) assert "/dummy" in str(x.value) - - def test_touch(self): - """ - Test touch for creating a new file or updating access and modification times of an existing file. - """ - filename = "/tmp/test_file.txt" - - self.operations.touch(filename) - - assert self.operations.isfile(filename) - - def test_isfile_true(self): - """ - Test isfile for an existing file. - """ - filename = __file__ - - response = self.operations.isfile(filename) - - assert response is True - - def test_isfile_false__not_exist(self): - """ - Test isfile for a non-existing file. - """ - filename = os.path.join(os.path.dirname(__file__), "nonexistent_file.txt") - - response = self.operations.isfile(filename) - - assert response is False - - def test_isfile_false__directory(self): - """ - Test isfile for a firectory. - """ - name = os.path.dirname(__file__) - - assert self.operations.isdir(name) - - response = self.operations.isfile(name) - - assert response is False - - def test_isdir_true(self): - """ - Test isdir for an existing directory. - """ - name = os.path.dirname(__file__) - - response = self.operations.isdir(name) - - assert response is True - - def test_isdir_false__not_exist(self): - """ - Test isdir for a non-existing directory. - """ - name = os.path.join(os.path.dirname(__file__), "it_is_nonexistent_directory") - - response = self.operations.isdir(name) - - assert response is False - - def test_isdir_false__file(self): - """ - Test isdir for a file. - """ - name = __file__ - - assert self.operations.isfile(name) - - response = self.operations.isdir(name) - - assert response is False - - def test_cwd(self): - """ - Test cwd. - """ - v = self.operations.cwd() - - assert v is not None - assert type(v) == str # noqa: E721 - assert v != "" - - class tagWriteData001: - def __init__(self, sign, source, cp_rw, cp_truncate, cp_binary, cp_data, result): - self.sign = sign - self.source = source - self.call_param__rw = cp_rw - self.call_param__truncate = cp_truncate - self.call_param__binary = cp_binary - self.call_param__data = cp_data - self.result = result - - sm_write_data001 = [ - tagWriteData001("A001", "1234567890", False, False, False, "ABC", "1234567890ABC"), - tagWriteData001("A002", b"1234567890", False, False, True, b"ABC", b"1234567890ABC"), - - tagWriteData001("B001", "1234567890", False, True, False, "ABC", "ABC"), - tagWriteData001("B002", "1234567890", False, True, False, "ABC1234567890", "ABC1234567890"), - tagWriteData001("B003", b"1234567890", False, True, True, b"ABC", b"ABC"), - tagWriteData001("B004", b"1234567890", False, True, True, b"ABC1234567890", b"ABC1234567890"), - - tagWriteData001("C001", "1234567890", True, False, False, "ABC", "1234567890ABC"), - tagWriteData001("C002", b"1234567890", True, False, True, b"ABC", b"1234567890ABC"), - - tagWriteData001("D001", "1234567890", True, True, False, "ABC", "ABC"), - tagWriteData001("D002", "1234567890", True, True, False, "ABC1234567890", "ABC1234567890"), - tagWriteData001("D003", b"1234567890", True, True, True, b"ABC", b"ABC"), - tagWriteData001("D004", b"1234567890", True, True, True, b"ABC1234567890", b"ABC1234567890"), - - tagWriteData001("E001", "\0001234567890\000", False, False, False, "\000ABC\000", "\0001234567890\000\000ABC\000"), - tagWriteData001("E002", b"\0001234567890\000", False, False, True, b"\000ABC\000", b"\0001234567890\000\000ABC\000"), - - tagWriteData001("F001", "a\nb\n", False, False, False, ["c", "d"], "a\nb\nc\nd\n"), - tagWriteData001("F002", b"a\nb\n", False, False, True, [b"c", b"d"], b"a\nb\nc\nd\n"), - - tagWriteData001("G001", "a\nb\n", False, False, False, ["c\n\n", "d\n"], "a\nb\nc\nd\n"), - tagWriteData001("G002", b"a\nb\n", False, False, True, [b"c\n\n", b"d\n"], b"a\nb\nc\nd\n"), - ] - - @pytest.fixture( - params=sm_write_data001, - ids=[x.sign for x in sm_write_data001], - ) - def write_data001(self, request): - assert isinstance(request, pytest.FixtureRequest) - assert type(request.param) == __class__.tagWriteData001 # noqa: E721 - return request.param - - def test_write(self, write_data001): - assert type(write_data001) == __class__.tagWriteData001 # noqa: E721 - - mode = "w+b" if write_data001.call_param__binary else "w+" - - with tempfile.NamedTemporaryFile(mode=mode, delete=True) as tmp_file: - tmp_file.write(write_data001.source) - tmp_file.flush() - - self.operations.write( - tmp_file.name, - write_data001.call_param__data, - read_and_write=write_data001.call_param__rw, - truncate=write_data001.call_param__truncate, - binary=write_data001.call_param__binary) - - tmp_file.seek(0) - - s = tmp_file.read() - - assert s == write_data001.result From 1e4a1e37aa8fba5f5254a5a8fd00b2028cfaf4de Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 2 Apr 2025 17:58:33 +0300 Subject: [PATCH 62/90] Refactoring of tests (#232) * TestLocalOperations is refactored * TestRemoteOperations is refactored --- tests/test_local.py | 31 ++++++++++++++++------------- tests/test_remote.py | 47 ++++++++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 35 deletions(-) diff --git a/tests/test_local.py b/tests/test_local.py index 7b5e488d..e82df989 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -1,27 +1,27 @@ # coding: utf-8 +from .helpers.os_ops_descrs import OsOpsDescrs +from .helpers.os_ops_descrs import OsOperations + import os import pytest import re -from ..testgres import LocalOperations - class TestLocalOperations: + @pytest.fixture + def os_ops(self): + return OsOpsDescrs.sm_local_os_ops - @pytest.fixture(scope="function", autouse=True) - def setup(self): - self.operations = LocalOperations() - - def test_read__unknown_file(self): + def test_read__unknown_file(self, os_ops: OsOperations): """ Test LocalOperations::read with unknown file. """ with pytest.raises(FileNotFoundError, match=re.escape("[Errno 2] No such file or directory: '/dummy'")): - self.operations.read("/dummy") + os_ops.read("/dummy") - def test_read_binary__spec__unk_file(self): + def test_read_binary__spec__unk_file(self, os_ops: OsOperations): """ Test LocalOperations::read_binary with unknown file. """ @@ -29,21 +29,24 @@ def test_read_binary__spec__unk_file(self): with pytest.raises( FileNotFoundError, match=re.escape("[Errno 2] No such file or directory: '/dummy'")): - self.operations.read_binary("/dummy", 0) + os_ops.read_binary("/dummy", 0) - def test_get_file_size__unk_file(self): + def test_get_file_size__unk_file(self, os_ops: OsOperations): """ Test LocalOperations::get_file_size. """ + assert isinstance(os_ops, OsOperations) with pytest.raises(FileNotFoundError, match=re.escape("[Errno 2] No such file or directory: '/dummy'")): - self.operations.get_file_size("/dummy") + os_ops.get_file_size("/dummy") - def test_cwd(self): + def test_cwd(self, os_ops: OsOperations): """ Test cwd. """ - v = self.operations.cwd() + assert isinstance(os_ops, OsOperations) + + v = os_ops.cwd() assert v is not None assert type(v) == str # noqa: E721 diff --git a/tests/test_remote.py b/tests/test_remote.py index 565b2d20..a37e258e 100755 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -1,33 +1,35 @@ # coding: utf-8 -import os -import pytest +from .helpers.os_ops_descrs import OsOpsDescrs +from .helpers.os_ops_descrs import OsOperations from ..testgres import ExecUtilException -from ..testgres import RemoteOperations -from ..testgres import ConnectionParams + +import os +import pytest class TestRemoteOperations: + @pytest.fixture + def os_ops(self): + return OsOpsDescrs.sm_remote_os_ops - @pytest.fixture(scope="function", autouse=True) - def setup(self): - conn_params = ConnectionParams(host=os.getenv('RDBMS_TESTPOOL1_HOST') or '127.0.0.1', - username=os.getenv('USER'), - ssh_key=os.getenv('RDBMS_TESTPOOL_SSHKEY')) - self.operations = RemoteOperations(conn_params) + def test_rmdirs__try_to_delete_nonexist_path(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) - def test_rmdirs__try_to_delete_nonexist_path(self): path = "/root/test_dir" - assert self.operations.rmdirs(path, ignore_errors=False) is True + assert os_ops.rmdirs(path, ignore_errors=False) is True + + def test_rmdirs__try_to_delete_file(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) - def test_rmdirs__try_to_delete_file(self): - path = self.operations.mkstemp() + path = os_ops.mkstemp() + assert type(path) == str # noqa: E721 assert os.path.exists(path) with pytest.raises(ExecUtilException) as x: - self.operations.rmdirs(path, ignore_errors=False) + os_ops.rmdirs(path, ignore_errors=False) assert os.path.exists(path) assert type(x.value) == ExecUtilException # noqa: E721 @@ -37,37 +39,40 @@ def test_rmdirs__try_to_delete_file(self): assert type(x.value.exit_code) == int # noqa: E721 assert x.value.exit_code == 20 - def test_read__unknown_file(self): + def test_read__unknown_file(self, os_ops: OsOperations): """ Test RemoteOperations::read with unknown file. """ + assert isinstance(os_ops, OsOperations) with pytest.raises(ExecUtilException) as x: - self.operations.read("/dummy") + os_ops.read("/dummy") assert "Utility exited with non-zero code (1)." in str(x.value) assert "No such file or directory" in str(x.value) assert "/dummy" in str(x.value) - def test_read_binary__spec__unk_file(self): + def test_read_binary__spec__unk_file(self, os_ops: OsOperations): """ Test RemoteOperations::read_binary with unknown file. """ + assert isinstance(os_ops, OsOperations) with pytest.raises(ExecUtilException) as x: - self.operations.read_binary("/dummy", 0) + os_ops.read_binary("/dummy", 0) assert "Utility exited with non-zero code (1)." in str(x.value) assert "No such file or directory" in str(x.value) assert "/dummy" in str(x.value) - def test_get_file_size__unk_file(self): + def test_get_file_size__unk_file(self, os_ops: OsOperations): """ Test RemoteOperations::get_file_size. """ + assert isinstance(os_ops, OsOperations) with pytest.raises(ExecUtilException) as x: - self.operations.get_file_size("/dummy") + os_ops.get_file_size("/dummy") assert "Utility exited with non-zero code (1)." in str(x.value) assert "No such file or directory" in str(x.value) From 46598b476888606c7b197f263f4d828253cf9d1d Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 2 Apr 2025 20:48:19 +0300 Subject: [PATCH 63/90] Test files and classes were renamed (#233) CI for AltLinux (10, 11) tests all the "local" cases. --- Dockerfile--altlinux_10.tmpl | 2 +- Dockerfile--altlinux_11.tmpl | 2 +- run_tests.sh | 2 +- tests/{test_local.py => test_os_ops_local.py} | 2 +- tests/{test_remote.py => test_os_ops_remote.py} | 2 +- tests/{test_simple.py => test_testgres_local.py} | 2 +- tests/{test_simple_remote.py => test_testgres_remote.py} | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) rename tests/{test_local.py => test_os_ops_local.py} (98%) rename tests/{test_remote.py => test_os_ops_remote.py} (98%) rename tests/{test_simple.py => test_testgres_local.py} (99%) rename tests/{test_simple_remote.py => test_testgres_remote.py} (99%) diff --git a/Dockerfile--altlinux_10.tmpl b/Dockerfile--altlinux_10.tmpl index e60e9320..a75e35a0 100644 --- a/Dockerfile--altlinux_10.tmpl +++ b/Dockerfile--altlinux_10.tmpl @@ -115,4 +115,4 @@ ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ chmod 600 ~/.ssh/authorized_keys; \ ls -la ~/.ssh/; \ -TEST_FILTER=\"TestgresTests or (TestTestgresCommon and (not remote_ops))\" bash ./run_tests.sh;" +TEST_FILTER=\"TestTestgresLocal or TestOsOpsLocal or local_ops\" bash ./run_tests.sh;" diff --git a/Dockerfile--altlinux_11.tmpl b/Dockerfile--altlinux_11.tmpl index 4b591632..5b43da20 100644 --- a/Dockerfile--altlinux_11.tmpl +++ b/Dockerfile--altlinux_11.tmpl @@ -115,4 +115,4 @@ ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ chmod 600 ~/.ssh/authorized_keys; \ ls -la ~/.ssh/; \ -TEST_FILTER=\"TestgresTests or (TestTestgresCommon and (not remote_ops))\" bash ./run_tests.sh;" +TEST_FILTER=\"TestTestgresLocal or TestOsOpsLocal or local_ops\" bash ./run_tests.sh;" diff --git a/run_tests.sh b/run_tests.sh index a40a97cf..8202aff5 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -5,7 +5,7 @@ set -eux if [ -z ${TEST_FILTER+x} ]; \ -then export TEST_FILTER="TestgresTests or (TestTestgresCommon and (not remote_ops))"; \ +then export TEST_FILTER="TestTestgresLocal or (TestTestgresCommon and (not remote_ops))"; \ fi # fail early diff --git a/tests/test_local.py b/tests/test_os_ops_local.py similarity index 98% rename from tests/test_local.py rename to tests/test_os_ops_local.py index e82df989..2e3c30b7 100644 --- a/tests/test_local.py +++ b/tests/test_os_ops_local.py @@ -8,7 +8,7 @@ import re -class TestLocalOperations: +class TestOsOpsLocal: @pytest.fixture def os_ops(self): return OsOpsDescrs.sm_local_os_ops diff --git a/tests/test_remote.py b/tests/test_os_ops_remote.py similarity index 98% rename from tests/test_remote.py rename to tests/test_os_ops_remote.py index a37e258e..58b09242 100755 --- a/tests/test_remote.py +++ b/tests/test_os_ops_remote.py @@ -9,7 +9,7 @@ import pytest -class TestRemoteOperations: +class TestOsOpsRemote: @pytest.fixture def os_ops(self): return OsOpsDescrs.sm_remote_os_ops diff --git a/tests/test_simple.py b/tests/test_testgres_local.py similarity index 99% rename from tests/test_simple.py rename to tests/test_testgres_local.py index 6ca52cb0..01f975a0 100644 --- a/tests/test_simple.py +++ b/tests/test_testgres_local.py @@ -74,7 +74,7 @@ def rm_carriage_returns(out): return out -class TestgresTests: +class TestTestgresLocal: def test_node_repr(self): with get_new_node() as node: pattern = r"PostgresNode\(name='.+', port=.+, base_dir='.+'\)" diff --git a/tests/test_simple_remote.py b/tests/test_testgres_remote.py similarity index 99% rename from tests/test_simple_remote.py rename to tests/test_testgres_remote.py index c16fe53f..2142e5ba 100755 --- a/tests/test_simple_remote.py +++ b/tests/test_testgres_remote.py @@ -47,7 +47,7 @@ def good_properties(f): return True -class TestgresRemoteTests: +class TestTestgresRemote: sm_os_ops = OsOpsDescrs.sm_remote_os_ops @pytest.fixture(autouse=True, scope="class") From ecd5427b7db2181a8b36b01797576d837e05eb46 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Wed, 2 Apr 2025 21:55:28 +0300 Subject: [PATCH 64/90] PostgresNode::source_walsender is updated [revision] --- testgres/node.py | 4 +++- tests/test_testgres_common.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/testgres/node.py b/testgres/node.py index c8ae4204..4ab98ea1 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -431,9 +431,11 @@ def source_walsender(self): where application_name = %s """ - if not self.master: + if self.master is None: raise TestgresException("Node doesn't have a master") + assert type(self.master) == PostgresNode + # master should be on the same host assert self.master.host == self.host diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index 2440b8f0..7cfb203e 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -348,6 +348,7 @@ def LOCAL__check_auxiliary_pids__multiple_attempts( assert (con.pid > 0) with master.replicate().start() as replica: + assert type(replica) == PostgresNode # test __str__ method str(master.child_processes[0]) From afee4a55a04893240fe674db7b134b646b005531 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Wed, 2 Apr 2025 22:03:55 +0300 Subject: [PATCH 65/90] [FIX] formatting (flake8) --- testgres/node.py | 2 +- tests/test_testgres_common.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testgres/node.py b/testgres/node.py index 4ab98ea1..1f8fca6e 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -434,7 +434,7 @@ def source_walsender(self): if self.master is None: raise TestgresException("Node doesn't have a master") - assert type(self.master) == PostgresNode + assert type(self.master) == PostgresNode # noqa: E721 # master should be on the same host assert self.master.host == self.host diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index 7cfb203e..4e23c4af 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -348,7 +348,7 @@ def LOCAL__check_auxiliary_pids__multiple_attempts( assert (con.pid > 0) with master.replicate().start() as replica: - assert type(replica) == PostgresNode + assert type(replica) == PostgresNode # noqa: E721 # test __str__ method str(master.child_processes[0]) From 6e7d315ab45a3065d1440819a369f7781c54a87e Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Sat, 5 Apr 2025 20:23:55 +0300 Subject: [PATCH 66/90] [FIX] The call of RaiseError.CommandExecutionError is corrected [message arg] --- testgres/operations/remote_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index e1ad6dac..a3ecf637 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -193,7 +193,7 @@ def is_executable(self, file): RaiseError.CommandExecutionError( cmd=command, exit_code=exit_status, - msg_arg=errMsg, + message=errMsg, error=error, out=output ) @@ -305,7 +305,7 @@ def path_exists(self, path): RaiseError.CommandExecutionError( cmd=command, exit_code=exit_status, - msg_arg=errMsg, + message=errMsg, error=error, out=output ) From 14bc733db60712c99df20faea3efe659f9d9dafb Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sun, 6 Apr 2025 19:14:21 +0300 Subject: [PATCH 67/90] PortManager (#234) Main - Old PortManager was deleted - PostresNode uses new "abstract" interface PortManager to reserve/release port number. - PostgresNode.free_port always resets port number - OsOperations::is_free_port is added - PostgresNode.start raises exception (InvalidOperationException) if node is None Refactoring - PostgresNode::port is RO-property now. It throws if port is None - PostgresNode::host is RO-property now - PostgresNode::ssh_key is RO-property now - PostgresNode::name is RO-property now --- Dockerfile--altlinux_10.tmpl | 2 +- Dockerfile--altlinux_11.tmpl | 2 +- Dockerfile--ubuntu_24_04.tmpl | 1 + run_tests.sh | 2 +- setup.py | 2 +- testgres/__init__.py | 6 +- testgres/exceptions.py | 4 + testgres/helpers/__init__.py | 0 testgres/helpers/port_manager.py | 41 --- testgres/node.py | 167 +++++++-- testgres/operations/local_ops.py | 11 + testgres/operations/os_ops.py | 4 + testgres/operations/remote_ops.py | 48 +++ testgres/port_manager.py | 102 +++++ testgres/utils.py | 32 +- tests/helpers/global_data.py | 78 ++++ tests/helpers/os_ops_descrs.py | 32 -- tests/test_os_ops_common.py | 105 +++++- tests/test_os_ops_local.py | 4 +- tests/test_os_ops_remote.py | 4 +- tests/test_testgres_common.py | 592 ++++++++++++++++++++++-------- tests/test_testgres_local.py | 57 --- tests/test_testgres_remote.py | 128 +------ 23 files changed, 993 insertions(+), 431 deletions(-) delete mode 100644 testgres/helpers/__init__.py delete mode 100644 testgres/helpers/port_manager.py create mode 100644 testgres/port_manager.py create mode 100644 tests/helpers/global_data.py delete mode 100644 tests/helpers/os_ops_descrs.py diff --git a/Dockerfile--altlinux_10.tmpl b/Dockerfile--altlinux_10.tmpl index a75e35a0..d78b05f5 100644 --- a/Dockerfile--altlinux_10.tmpl +++ b/Dockerfile--altlinux_10.tmpl @@ -115,4 +115,4 @@ ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ chmod 600 ~/.ssh/authorized_keys; \ ls -la ~/.ssh/; \ -TEST_FILTER=\"TestTestgresLocal or TestOsOpsLocal or local_ops\" bash ./run_tests.sh;" +TEST_FILTER=\"TestTestgresLocal or TestOsOpsLocal or local\" bash ./run_tests.sh;" diff --git a/Dockerfile--altlinux_11.tmpl b/Dockerfile--altlinux_11.tmpl index 5b43da20..5c88585d 100644 --- a/Dockerfile--altlinux_11.tmpl +++ b/Dockerfile--altlinux_11.tmpl @@ -115,4 +115,4 @@ ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ chmod 600 ~/.ssh/authorized_keys; \ ls -la ~/.ssh/; \ -TEST_FILTER=\"TestTestgresLocal or TestOsOpsLocal or local_ops\" bash ./run_tests.sh;" +TEST_FILTER=\"TestTestgresLocal or TestOsOpsLocal or local\" bash ./run_tests.sh;" diff --git a/Dockerfile--ubuntu_24_04.tmpl b/Dockerfile--ubuntu_24_04.tmpl index 3bdc6640..7a559776 100644 --- a/Dockerfile--ubuntu_24_04.tmpl +++ b/Dockerfile--ubuntu_24_04.tmpl @@ -10,6 +10,7 @@ RUN apt install -y sudo curl ca-certificates RUN apt update RUN apt install -y openssh-server RUN apt install -y time +RUN apt install -y netcat-traditional RUN apt update RUN apt install -y postgresql-common diff --git a/run_tests.sh b/run_tests.sh index 8202aff5..65c17dbf 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -5,7 +5,7 @@ set -eux if [ -z ${TEST_FILTER+x} ]; \ -then export TEST_FILTER="TestTestgresLocal or (TestTestgresCommon and (not remote_ops))"; \ +then export TEST_FILTER="TestTestgresLocal or (TestTestgresCommon and (not remote))"; \ fi # fail early diff --git a/setup.py b/setup.py index 3f2474dd..b47a1d8a 100755 --- a/setup.py +++ b/setup.py @@ -29,7 +29,7 @@ setup( version='1.10.5', name='testgres', - packages=['testgres', 'testgres.operations', 'testgres.helpers'], + packages=['testgres', 'testgres.operations'], description='Testing utility for PostgreSQL and its extensions', url='https://github.com/postgrespro/testgres', long_description=readme, diff --git a/testgres/__init__.py b/testgres/__init__.py index 665548d6..339ae62e 100644 --- a/testgres/__init__.py +++ b/testgres/__init__.py @@ -34,6 +34,7 @@ DumpFormat from .node import PostgresNode, NodeApp +from .node import PortManager from .utils import \ reserve_port, \ @@ -53,8 +54,6 @@ from .operations.local_ops import LocalOperations from .operations.remote_ops import RemoteOperations -from .helpers.port_manager import PortManager - __all__ = [ "get_new_node", "get_remote_node", @@ -64,7 +63,8 @@ "TestgresException", "ExecUtilException", "QueryException", "TimeoutException", "CatchUpException", "StartNodeException", "InitNodeException", "BackupException", "InvalidOperationException", "XLogMethod", "IsolationLevel", "NodeStatus", "ProcessType", "DumpFormat", "PostgresNode", "NodeApp", + "PortManager", "reserve_port", "release_port", "bound_ports", "get_bin_path", "get_pg_config", "get_pg_version", - "First", "Any", "PortManager", + "First", "Any", "OsOperations", "LocalOperations", "RemoteOperations", "ConnectionParams" ] diff --git a/testgres/exceptions.py b/testgres/exceptions.py index d61d4691..20c1a8cf 100644 --- a/testgres/exceptions.py +++ b/testgres/exceptions.py @@ -7,6 +7,10 @@ class TestgresException(Exception): pass +class PortForException(TestgresException): + pass + + @six.python_2_unicode_compatible class ExecUtilException(TestgresException): def __init__(self, message=None, command=None, exit_code=0, out=None, error=None): diff --git a/testgres/helpers/__init__.py b/testgres/helpers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/testgres/helpers/port_manager.py b/testgres/helpers/port_manager.py deleted file mode 100644 index cfc5c096..00000000 --- a/testgres/helpers/port_manager.py +++ /dev/null @@ -1,41 +0,0 @@ -import socket -import random -from typing import Set, Iterable, Optional - - -class PortForException(Exception): - pass - - -class PortManager: - def __init__(self, ports_range=(1024, 65535)): - self.ports_range = ports_range - - @staticmethod - def is_port_free(port: int) -> bool: - """Check if a port is free to use.""" - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - try: - s.bind(("", port)) - return True - except OSError: - return False - - def find_free_port(self, ports: Optional[Set[int]] = None, exclude_ports: Optional[Iterable[int]] = None) -> int: - """Return a random unused port number.""" - if ports is None: - ports = set(range(1024, 65535)) - - assert type(ports) == set # noqa: E721 - - if exclude_ports is not None: - assert isinstance(exclude_ports, Iterable) - ports.difference_update(exclude_ports) - - sampled_ports = random.sample(tuple(ports), min(len(ports), 100)) - - for port in sampled_ports: - if self.is_port_free(port): - return port - - raise PortForException("Can't select a port") diff --git a/testgres/node.py b/testgres/node.py index 1f8fca6e..5039fc43 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -1,4 +1,6 @@ # coding: utf-8 +from __future__ import annotations + import logging import os import random @@ -10,6 +12,7 @@ from queue import Queue import time +import typing try: from collections.abc import Iterable @@ -80,14 +83,16 @@ BackupException, \ InvalidOperationException +from .port_manager import PortManager +from .port_manager import PortManager__ThisHost +from .port_manager import PortManager__Generic + from .logger import TestgresLogger from .pubsub import Publication, Subscription from .standby import First -from . import utils - from .utils import \ PgVer, \ eprint, \ @@ -126,17 +131,31 @@ def __getattr__(self, name): return getattr(self.process, name) def __repr__(self): - return '{}(ptype={}, process={})'.format(self.__class__.__name__, - str(self.ptype), - repr(self.process)) + return '{}(ptype={}, process={})'.format( + self.__class__.__name__, + str(self.ptype), + repr(self.process)) class PostgresNode(object): # a max number of node start attempts _C_MAX_START_ATEMPTS = 5 - def __init__(self, name=None, base_dir=None, port=None, conn_params: ConnectionParams = ConnectionParams(), - bin_dir=None, prefix=None, os_ops=None): + _name: typing.Optional[str] + _port: typing.Optional[int] + _should_free_port: bool + _os_ops: OsOperations + _port_manager: PortManager + + def __init__(self, + name=None, + base_dir=None, + port: typing.Optional[int] = None, + conn_params: ConnectionParams = ConnectionParams(), + bin_dir=None, + prefix=None, + os_ops: typing.Optional[OsOperations] = None, + port_manager: typing.Optional[PortManager] = None): """ PostgresNode constructor. @@ -145,21 +164,26 @@ def __init__(self, name=None, base_dir=None, port=None, conn_params: ConnectionP port: port to accept connections. base_dir: path to node's data directory. bin_dir: path to node's binary directory. + os_ops: None or correct OS operation object. + port_manager: None or correct port manager object. """ + assert port is None or type(port) == int # noqa: E721 + assert os_ops is None or isinstance(os_ops, OsOperations) + assert port_manager is None or isinstance(port_manager, PortManager) # private if os_ops is None: - os_ops = __class__._get_os_ops(conn_params) + self._os_ops = __class__._get_os_ops(conn_params) else: assert conn_params is None + assert isinstance(os_ops, OsOperations) + self._os_ops = os_ops pass - assert os_ops is not None - assert isinstance(os_ops, OsOperations) - self._os_ops = os_ops + assert self._os_ops is not None + assert isinstance(self._os_ops, OsOperations) - self._pg_version = PgVer(get_pg_version2(os_ops, bin_dir)) - self._should_free_port = port is None + self._pg_version = PgVer(get_pg_version2(self._os_ops, bin_dir)) self._base_dir = base_dir self._bin_dir = bin_dir self._prefix = prefix @@ -167,12 +191,29 @@ def __init__(self, name=None, base_dir=None, port=None, conn_params: ConnectionP self._master = None # basic - self.name = name or generate_app_name() + self._name = name or generate_app_name() + + if port is not None: + assert type(port) == int # noqa: E721 + assert port_manager is None + self._port = port + self._should_free_port = False + self._port_manager = None + else: + if port_manager is not None: + assert isinstance(port_manager, PortManager) + self._port_manager = port_manager + else: + self._port_manager = __class__._get_port_manager(self._os_ops) - self.host = os_ops.host - self.port = port or utils.reserve_port() + assert self._port_manager is not None + assert isinstance(self._port_manager, PortManager) - self.ssh_key = os_ops.ssh_key + self._port = self._port_manager.reserve_port() # raises + assert type(self._port) == int # noqa: E721 + self._should_free_port = True + + assert type(self._port) == int # noqa: E721 # defaults for __exit__() self.cleanup_on_good_exit = testgres_config.node_cleanup_on_good_exit @@ -207,7 +248,11 @@ def __exit__(self, type, value, traceback): def __repr__(self): return "{}(name='{}', port={}, base_dir='{}')".format( - self.__class__.__name__, self.name, self.port, self.base_dir) + self.__class__.__name__, + self.name, + str(self._port) if self._port is not None else "None", + self.base_dir + ) @staticmethod def _get_os_ops(conn_params: ConnectionParams) -> OsOperations: @@ -221,19 +266,39 @@ def _get_os_ops(conn_params: ConnectionParams) -> OsOperations: return LocalOperations(conn_params) + @staticmethod + def _get_port_manager(os_ops: OsOperations) -> PortManager: + assert os_ops is not None + assert isinstance(os_ops, OsOperations) + + if isinstance(os_ops, LocalOperations): + return PortManager__ThisHost() + + # TODO: Throw the exception "Please define a port manager." ? + return PortManager__Generic(os_ops) + def clone_with_new_name_and_base_dir(self, name: str, base_dir: str): assert name is None or type(name) == str # noqa: E721 assert base_dir is None or type(base_dir) == str # noqa: E721 assert __class__ == PostgresNode + if self._port_manager is None: + raise InvalidOperationException("PostgresNode without PortManager can't be cloned.") + + assert self._port_manager is not None + assert isinstance(self._port_manager, PortManager) + assert self._os_ops is not None + assert isinstance(self._os_ops, OsOperations) + node = PostgresNode( name=name, base_dir=base_dir, conn_params=None, bin_dir=self._bin_dir, prefix=self._prefix, - os_ops=self._os_ops) + os_ops=self._os_ops, + port_manager=self._port_manager) return node @@ -243,6 +308,33 @@ def os_ops(self) -> OsOperations: assert isinstance(self._os_ops, OsOperations) return self._os_ops + @property + def name(self) -> str: + if self._name is None: + raise InvalidOperationException("PostgresNode name is not defined.") + assert type(self._name) == str # noqa: E721 + return self._name + + @property + def host(self) -> str: + assert self._os_ops is not None + assert isinstance(self._os_ops, OsOperations) + return self._os_ops.host + + @property + def port(self) -> int: + if self._port is None: + raise InvalidOperationException("PostgresNode port is not defined.") + + assert type(self._port) == int # noqa: E721 + return self._port + + @property + def ssh_key(self) -> typing.Optional[str]: + assert self._os_ops is not None + assert isinstance(self._os_ops, OsOperations) + return self._os_ops.ssh_key + @property def pid(self): """ @@ -993,6 +1085,11 @@ def start(self, params=[], wait=True): if self.is_started: return self + if self._port is None: + raise InvalidOperationException("Can't start PostgresNode. Port is not defined.") + + assert type(self._port) == int # noqa: E721 + _params = [self._get_bin_path("pg_ctl"), "-D", self.data_dir, "-l", self.pg_log_file, @@ -1023,6 +1120,8 @@ def LOCAL__raise_cannot_start_node__std(from_exception): LOCAL__raise_cannot_start_node__std(e) else: assert self._should_free_port + assert self._port_manager is not None + assert isinstance(self._port_manager, PortManager) assert __class__._C_MAX_START_ATEMPTS > 1 log_files0 = self._collect_log_files() @@ -1048,20 +1147,20 @@ def LOCAL__raise_cannot_start_node__std(from_exception): log_files0 = log_files1 logging.warning( - "Detected a conflict with using the port {0}. Trying another port after a {1}-second sleep...".format(self.port, timeout) + "Detected a conflict with using the port {0}. Trying another port after a {1}-second sleep...".format(self._port, timeout) ) time.sleep(timeout) timeout = min(2 * timeout, 5) - cur_port = self.port - new_port = utils.reserve_port() # can raise + cur_port = self._port + new_port = self._port_manager.reserve_port() # can raise try: options = {'port': new_port} self.set_auto_conf(options) except: # noqa: E722 - utils.release_port(new_port) + self._port_manager.release_port(new_port) raise - self.port = new_port - utils.release_port(cur_port) + self._port = new_port + self._port_manager.release_port(cur_port) continue break self._maybe_start_logger() @@ -1222,14 +1321,22 @@ def pg_ctl(self, params): def free_port(self): """ Reclaim port owned by this node. - NOTE: does not free auto selected ports. + NOTE: this method does not release manually defined port but reset it. """ + assert type(self._should_free_port) == bool # noqa: E721 + + if not self._should_free_port: + self._port = None + else: + assert type(self._port) == int # noqa: E721 + + assert self._port_manager is not None + assert isinstance(self._port_manager, PortManager) - if self._should_free_port: - port = self.port + port = self._port self._should_free_port = False - self.port = None - utils.release_port(port) + self._port = None + self._port_manager.release_port(port) def cleanup(self, max_attempts=3, full=False): """ diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 35e94210..39c81405 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -6,6 +6,7 @@ import subprocess import tempfile import time +import socket import psutil @@ -436,6 +437,16 @@ def get_process_children(self, pid): assert type(pid) == int # noqa: E721 return psutil.Process(pid).children() + def is_port_free(self, number: int) -> bool: + assert type(number) == int # noqa: E721 + + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind(("", number)) + return True + except OSError: + return False + # Database control def db_connect(self, dbname, user, password=None, host="localhost", port=5432): conn = pglib.connect( diff --git a/testgres/operations/os_ops.py b/testgres/operations/os_ops.py index 3c606871..489a7cb2 100644 --- a/testgres/operations/os_ops.py +++ b/testgres/operations/os_ops.py @@ -127,6 +127,10 @@ def get_pid(self): def get_process_children(self, pid): raise NotImplementedError() + def is_port_free(self, number: int): + assert type(number) == int # noqa: E721 + raise NotImplementedError() + # Database control def db_connect(self, dbname, user, password=None, host="localhost", port=5432): raise NotImplementedError() diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index a3ecf637..ee747e52 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -629,6 +629,54 @@ def get_process_children(self, pid): raise ExecUtilException(f"Error in getting process children. Error: {result.stderr}") + def is_port_free(self, number: int) -> bool: + assert type(number) == int # noqa: E721 + + cmd = ["nc", "-w", "5", "-z", "-v", "localhost", str(number)] + + exit_status, output, error = self.exec_command(cmd=cmd, encoding=get_default_encoding(), ignore_errors=True, verbose=True) + + assert type(output) == str # noqa: E721 + assert type(error) == str # noqa: E721 + + if exit_status == 0: + return __class__._is_port_free__process_0(error) + + if exit_status == 1: + return __class__._is_port_free__process_1(error) + + errMsg = "nc returns an unknown result code: {0}".format(exit_status) + + RaiseError.CommandExecutionError( + cmd=cmd, + exit_code=exit_status, + message=errMsg, + error=error, + out=output + ) + + @staticmethod + def _is_port_free__process_0(error: str) -> bool: + assert type(error) == str # noqa: E721 + # + # Example of error text: + # "Connection to localhost (127.0.0.1) 1024 port [tcp/*] succeeded!\n" + # + # May be here is needed to check error message? + # + return False + + @staticmethod + def _is_port_free__process_1(error: str) -> bool: + assert type(error) == str # noqa: E721 + # + # Example of error text: + # "nc: connect to localhost (127.0.0.1) port 1024 (tcp) failed: Connection refused\n" + # + # May be here is needed to check error message? + # + return True + # Database control def db_connect(self, dbname, user, password=None, host="localhost", port=5432): conn = pglib.connect( diff --git a/testgres/port_manager.py b/testgres/port_manager.py new file mode 100644 index 00000000..164661e7 --- /dev/null +++ b/testgres/port_manager.py @@ -0,0 +1,102 @@ +from .operations.os_ops import OsOperations + +from .exceptions import PortForException + +from . import utils + +import threading +import random + + +class PortManager: + def __init__(self): + super().__init__() + + def reserve_port(self) -> int: + raise NotImplementedError("PortManager::reserve_port is not implemented.") + + def release_port(self, number: int) -> None: + assert type(number) == int # noqa: E721 + raise NotImplementedError("PortManager::release_port is not implemented.") + + +class PortManager__ThisHost(PortManager): + sm_single_instance: PortManager = None + sm_single_instance_guard = threading.Lock() + + def __init__(self): + pass + + def __new__(cls) -> PortManager: + assert __class__ == PortManager__ThisHost + assert __class__.sm_single_instance_guard is not None + + if __class__.sm_single_instance is None: + with __class__.sm_single_instance_guard: + __class__.sm_single_instance = super().__new__(cls) + assert __class__.sm_single_instance + assert type(__class__.sm_single_instance) == __class__ # noqa: E721 + return __class__.sm_single_instance + + def reserve_port(self) -> int: + return utils.reserve_port() + + def release_port(self, number: int) -> None: + assert type(number) == int # noqa: E721 + return utils.release_port(number) + + +class PortManager__Generic(PortManager): + _os_ops: OsOperations + _guard: object + # TODO: is there better to use bitmap fot _available_ports? + _available_ports: set[int] + _reserved_ports: set[int] + + def __init__(self, os_ops: OsOperations): + assert os_ops is not None + assert isinstance(os_ops, OsOperations) + self._os_ops = os_ops + self._guard = threading.Lock() + self._available_ports = set[int](range(1024, 65535)) + self._reserved_ports = set[int]() + + def reserve_port(self) -> int: + assert self._guard is not None + assert type(self._available_ports) == set # noqa: E721t + assert type(self._reserved_ports) == set # noqa: E721 + + with self._guard: + t = tuple(self._available_ports) + assert len(t) == len(self._available_ports) + sampled_ports = random.sample(t, min(len(t), 100)) + t = None + + for port in sampled_ports: + assert not (port in self._reserved_ports) + assert port in self._available_ports + + if not self._os_ops.is_port_free(port): + continue + + self._reserved_ports.add(port) + self._available_ports.discard(port) + assert port in self._reserved_ports + assert not (port in self._available_ports) + return port + + raise PortForException("Can't select a port.") + + def release_port(self, number: int) -> None: + assert type(number) == int # noqa: E721 + + assert self._guard is not None + assert type(self._reserved_ports) == set # noqa: E721 + + with self._guard: + assert number in self._reserved_ports + assert not (number in self._available_ports) + self._available_ports.add(number) + self._reserved_ports.discard(number) + assert not (number in self._reserved_ports) + assert number in self._available_ports diff --git a/testgres/utils.py b/testgres/utils.py index 92383571..10ae81b6 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -6,6 +6,8 @@ import os import sys +import socket +import random from contextlib import contextmanager from packaging.version import Version, InvalidVersion @@ -13,7 +15,7 @@ from six import iteritems -from .helpers.port_manager import PortManager +from .exceptions import PortForException from .exceptions import ExecUtilException from .config import testgres_config as tconf from .operations.os_ops import OsOperations @@ -41,11 +43,28 @@ def internal__reserve_port(): """ Generate a new port and add it to 'bound_ports'. """ - port_mng = PortManager() - port = port_mng.find_free_port(exclude_ports=bound_ports) - bound_ports.add(port) + def LOCAL__is_port_free(port: int) -> bool: + """Check if a port is free to use.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind(("", port)) + return True + except OSError: + return False - return port + ports = set(range(1024, 65535)) + assert type(ports) == set # noqa: E721 + assert type(bound_ports) == set # noqa: E721 + ports.difference_update(bound_ports) + + sampled_ports = random.sample(tuple(ports), min(len(ports), 100)) + + for port in sampled_ports: + if LOCAL__is_port_free(port): + bound_ports.add(port) + return port + + raise PortForException("Can't select a port") def internal__release_port(port): @@ -53,6 +72,9 @@ def internal__release_port(port): Free port provided by reserve_port(). """ + assert type(port) == int # noqa: E721 + assert port in bound_ports + bound_ports.discard(port) diff --git a/tests/helpers/global_data.py b/tests/helpers/global_data.py new file mode 100644 index 00000000..c21d7dd8 --- /dev/null +++ b/tests/helpers/global_data.py @@ -0,0 +1,78 @@ +from ...testgres.operations.os_ops import OsOperations +from ...testgres.operations.os_ops import ConnectionParams +from ...testgres.operations.local_ops import LocalOperations +from ...testgres.operations.remote_ops import RemoteOperations + +from ...testgres.node import PortManager +from ...testgres.node import PortManager__ThisHost +from ...testgres.node import PortManager__Generic + +import os + + +class OsOpsDescr: + sign: str + os_ops: OsOperations + + def __init__(self, sign: str, os_ops: OsOperations): + assert type(sign) == str # noqa: E721 + assert isinstance(os_ops, OsOperations) + self.sign = sign + self.os_ops = os_ops + + +class OsOpsDescrs: + sm_remote_conn_params = ConnectionParams( + host=os.getenv('RDBMS_TESTPOOL1_HOST') or '127.0.0.1', + username=os.getenv('USER'), + ssh_key=os.getenv('RDBMS_TESTPOOL_SSHKEY')) + + sm_remote_os_ops = RemoteOperations(sm_remote_conn_params) + + sm_remote_os_ops_descr = OsOpsDescr("remote_ops", sm_remote_os_ops) + + sm_local_os_ops = LocalOperations() + + sm_local_os_ops_descr = OsOpsDescr("local_ops", sm_local_os_ops) + + +class PortManagers: + sm_remote_port_manager = PortManager__Generic(OsOpsDescrs.sm_remote_os_ops) + + sm_local_port_manager = PortManager__ThisHost() + + sm_local2_port_manager = PortManager__Generic(OsOpsDescrs.sm_local_os_ops) + + +class PostgresNodeService: + sign: str + os_ops: OsOperations + port_manager: PortManager + + def __init__(self, sign: str, os_ops: OsOperations, port_manager: PortManager): + assert type(sign) == str # noqa: E721 + assert isinstance(os_ops, OsOperations) + assert isinstance(port_manager, PortManager) + self.sign = sign + self.os_ops = os_ops + self.port_manager = port_manager + + +class PostgresNodeServices: + sm_remote = PostgresNodeService( + "remote", + OsOpsDescrs.sm_remote_os_ops, + PortManagers.sm_remote_port_manager + ) + + sm_local = PostgresNodeService( + "local", + OsOpsDescrs.sm_local_os_ops, + PortManagers.sm_local_port_manager + ) + + sm_local2 = PostgresNodeService( + "local2", + OsOpsDescrs.sm_local_os_ops, + PortManagers.sm_local2_port_manager + ) diff --git a/tests/helpers/os_ops_descrs.py b/tests/helpers/os_ops_descrs.py deleted file mode 100644 index 02297adb..00000000 --- a/tests/helpers/os_ops_descrs.py +++ /dev/null @@ -1,32 +0,0 @@ -from ...testgres.operations.os_ops import OsOperations -from ...testgres.operations.os_ops import ConnectionParams -from ...testgres.operations.local_ops import LocalOperations -from ...testgres.operations.remote_ops import RemoteOperations - -import os - - -class OsOpsDescr: - os_ops: OsOperations - sign: str - - def __init__(self, os_ops: OsOperations, sign: str): - assert isinstance(os_ops, OsOperations) - assert type(sign) == str # noqa: E721 - self.os_ops = os_ops - self.sign = sign - - -class OsOpsDescrs: - sm_remote_conn_params = ConnectionParams( - host=os.getenv('RDBMS_TESTPOOL1_HOST') or '127.0.0.1', - username=os.getenv('USER'), - ssh_key=os.getenv('RDBMS_TESTPOOL_SSHKEY')) - - sm_remote_os_ops = RemoteOperations(sm_remote_conn_params) - - sm_remote_os_ops_descr = OsOpsDescr(sm_remote_os_ops, "remote_ops") - - sm_local_os_ops = LocalOperations() - - sm_local_os_ops_descr = OsOpsDescr(sm_local_os_ops, "local_ops") diff --git a/tests/test_os_ops_common.py b/tests/test_os_ops_common.py index c3944c3b..7d183775 100644 --- a/tests/test_os_ops_common.py +++ b/tests/test_os_ops_common.py @@ -1,7 +1,7 @@ # coding: utf-8 -from .helpers.os_ops_descrs import OsOpsDescr -from .helpers.os_ops_descrs import OsOpsDescrs -from .helpers.os_ops_descrs import OsOperations +from .helpers.global_data import OsOpsDescr +from .helpers.global_data import OsOpsDescrs +from .helpers.global_data import OsOperations from .helpers.run_conditions import RunConditions import os @@ -10,6 +10,8 @@ import re import tempfile import logging +import socket +import threading from ..testgres import InvalidOperationException from ..testgres import ExecUtilException @@ -648,3 +650,100 @@ def test_touch(self, os_ops: OsOperations): assert os_ops.isfile(filename) os_ops.remove_file(filename) + + def test_is_port_free__true(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + C_LIMIT = 128 + + ports = set(range(1024, 65535)) + assert type(ports) == set # noqa: E721 + + ok_count = 0 + no_count = 0 + + for port in ports: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind(("", port)) + except OSError: + continue + + r = os_ops.is_port_free(port) + + if r: + ok_count += 1 + logging.info("OK. Port {} is free.".format(port)) + else: + no_count += 1 + logging.warning("NO. Port {} is not free.".format(port)) + + if ok_count == C_LIMIT: + return + + if no_count == C_LIMIT: + raise RuntimeError("To many false positive test attempts.") + + if ok_count == 0: + raise RuntimeError("No one free port was found.") + + def test_is_port_free__false(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + C_LIMIT = 10 + + ports = set(range(1024, 65535)) + assert type(ports) == set # noqa: E721 + + def LOCAL_server(s: socket.socket): + assert s is not None + assert type(s) == socket.socket # noqa: E721 + + try: + while True: + r = s.accept() + + if r is None: + break + except Exception as e: + assert e is not None + pass + + ok_count = 0 + no_count = 0 + + for port in ports: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind(("", port)) + except OSError: + continue + + th = threading.Thread(target=LOCAL_server, args=[s]) + + s.listen(10) + + assert type(th) == threading.Thread # noqa: E721 + th.start() + + try: + r = os_ops.is_port_free(port) + finally: + s.shutdown(2) + th.join() + + if not r: + ok_count += 1 + logging.info("OK. Port {} is not free.".format(port)) + else: + no_count += 1 + logging.warning("NO. Port {} does not accept connection.".format(port)) + + if ok_count == C_LIMIT: + return + + if no_count == C_LIMIT: + raise RuntimeError("To many false positive test attempts.") + + if ok_count == 0: + raise RuntimeError("No one free port was found.") diff --git a/tests/test_os_ops_local.py b/tests/test_os_ops_local.py index 2e3c30b7..f60c3fc9 100644 --- a/tests/test_os_ops_local.py +++ b/tests/test_os_ops_local.py @@ -1,6 +1,6 @@ # coding: utf-8 -from .helpers.os_ops_descrs import OsOpsDescrs -from .helpers.os_ops_descrs import OsOperations +from .helpers.global_data import OsOpsDescrs +from .helpers.global_data import OsOperations import os diff --git a/tests/test_os_ops_remote.py b/tests/test_os_ops_remote.py index 58b09242..338e49f3 100755 --- a/tests/test_os_ops_remote.py +++ b/tests/test_os_ops_remote.py @@ -1,7 +1,7 @@ # coding: utf-8 -from .helpers.os_ops_descrs import OsOpsDescrs -from .helpers.os_ops_descrs import OsOperations +from .helpers.global_data import OsOpsDescrs +from .helpers.global_data import OsOperations from ..testgres import ExecUtilException diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index 4e23c4af..b286a1c6 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -1,6 +1,7 @@ -from .helpers.os_ops_descrs import OsOpsDescr -from .helpers.os_ops_descrs import OsOpsDescrs -from .helpers.os_ops_descrs import OsOperations +from .helpers.global_data import PostgresNodeService +from .helpers.global_data import PostgresNodeServices +from .helpers.global_data import OsOperations +from .helpers.global_data import PortManager from ..testgres.node import PgVer from ..testgres.node import PostgresNode @@ -37,6 +38,8 @@ import uuid import os import re +import subprocess +import typing @contextmanager @@ -54,22 +57,25 @@ def removing(os_ops: OsOperations, f): class TestTestgresCommon: - sm_os_ops_descrs: list[OsOpsDescr] = [ - OsOpsDescrs.sm_local_os_ops_descr, - OsOpsDescrs.sm_remote_os_ops_descr + sm_node_svcs: list[PostgresNodeService] = [ + PostgresNodeServices.sm_local, + PostgresNodeServices.sm_local2, + PostgresNodeServices.sm_remote, ] @pytest.fixture( - params=[descr.os_ops for descr in sm_os_ops_descrs], - ids=[descr.sign for descr in sm_os_ops_descrs] + params=sm_node_svcs, + ids=[descr.sign for descr in sm_node_svcs] ) - def os_ops(self, request: pytest.FixtureRequest) -> OsOperations: + def node_svc(self, request: pytest.FixtureRequest) -> PostgresNodeService: assert isinstance(request, pytest.FixtureRequest) - assert isinstance(request.param, OsOperations) + assert isinstance(request.param, PostgresNodeService) + assert isinstance(request.param.os_ops, OsOperations) + assert isinstance(request.param.port_manager, PortManager) return request.param - def test_version_management(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_version_management(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) a = PgVer('10.0') b = PgVer('10') @@ -93,42 +99,42 @@ def test_version_management(self, os_ops: OsOperations): assert (g == k) assert (g > h) - version = get_pg_version2(os_ops) + version = get_pg_version2(node_svc.os_ops) - with __class__.helper__get_node(os_ops) as node: + with __class__.helper__get_node(node_svc) as node: assert (isinstance(version, six.string_types)) assert (isinstance(node.version, PgVer)) assert (node.version == PgVer(version)) - def test_double_init(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_double_init(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(os_ops).init() as node: + with __class__.helper__get_node(node_svc).init() as node: # can't initialize node more than once with pytest.raises(expected_exception=InitNodeException): node.init() - def test_init_after_cleanup(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_init_after_cleanup(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(os_ops) as node: + with __class__.helper__get_node(node_svc) as node: node.init().start().execute('select 1') node.cleanup() node.init().start().execute('select 1') - def test_init_unique_system_id(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_init_unique_system_id(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) # this function exists in PostgreSQL 9.6+ - current_version = get_pg_version2(os_ops) + current_version = get_pg_version2(node_svc.os_ops) - __class__.helper__skip_test_if_util_not_exist(os_ops, "pg_resetwal") + __class__.helper__skip_test_if_util_not_exist(node_svc.os_ops, "pg_resetwal") __class__.helper__skip_test_if_pg_version_is_not_ge(current_version, '9.6') query = 'select system_identifier from pg_control_system()' with scoped_config(cache_initdb=False): - with __class__.helper__get_node(os_ops).init().start() as node0: + with __class__.helper__get_node(node_svc).init().start() as node0: id0 = node0.execute(query)[0] with scoped_config(cache_initdb=True, @@ -137,8 +143,8 @@ def test_init_unique_system_id(self, os_ops: OsOperations): assert (config.cached_initdb_unique) # spawn two nodes; ids must be different - with __class__.helper__get_node(os_ops).init().start() as node1, \ - __class__.helper__get_node(os_ops).init().start() as node2: + with __class__.helper__get_node(node_svc).init().start() as node1, \ + __class__.helper__get_node(node_svc).init().start() as node2: id1 = node1.execute(query)[0] id2 = node2.execute(query)[0] @@ -146,44 +152,44 @@ def test_init_unique_system_id(self, os_ops: OsOperations): assert (id1 > id0) assert (id2 > id1) - def test_node_exit(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_node_exit(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) with pytest.raises(expected_exception=QueryException): - with __class__.helper__get_node(os_ops).init() as node: + with __class__.helper__get_node(node_svc).init() as node: base_dir = node.base_dir node.safe_psql('select 1') # we should save the DB for "debugging" - assert (os_ops.path_exists(base_dir)) - os_ops.rmdirs(base_dir, ignore_errors=True) + assert (node_svc.os_ops.path_exists(base_dir)) + node_svc.os_ops.rmdirs(base_dir, ignore_errors=True) - with __class__.helper__get_node(os_ops).init() as node: + with __class__.helper__get_node(node_svc).init() as node: base_dir = node.base_dir # should have been removed by default - assert not (os_ops.path_exists(base_dir)) + assert not (node_svc.os_ops.path_exists(base_dir)) - def test_double_start(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_double_start(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(os_ops).init().start() as node: + with __class__.helper__get_node(node_svc).init().start() as node: # can't start node more than once node.start() assert (node.is_started) - def test_uninitialized_start(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_uninitialized_start(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(os_ops) as node: + with __class__.helper__get_node(node_svc) as node: # node is not initialized yet with pytest.raises(expected_exception=StartNodeException): node.start() - def test_restart(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_restart(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(os_ops) as node: + with __class__.helper__get_node(node_svc) as node: node.init().start() # restart, ok @@ -198,10 +204,10 @@ def test_restart(self, os_ops: OsOperations): node.append_conf('pg_hba.conf', 'DUMMY') node.restart() - def test_reload(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_reload(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(os_ops) as node: + with __class__.helper__get_node(node_svc) as node: node.init().start() # change client_min_messages and save old value @@ -216,24 +222,24 @@ def test_reload(self, os_ops: OsOperations): assert ('debug1' == cmm_new[0][0].lower()) assert (cmm_old != cmm_new) - def test_pg_ctl(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_pg_ctl(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(os_ops) as node: + with __class__.helper__get_node(node_svc) as node: node.init().start() status = node.pg_ctl(['status']) assert ('PID' in status) - def test_status(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_status(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) assert (NodeStatus.Running) assert not (NodeStatus.Stopped) assert not (NodeStatus.Uninitialized) # check statuses after each operation - with __class__.helper__get_node(os_ops) as node: + with __class__.helper__get_node(node_svc) as node: assert (node.pid == 0) assert (node.status() == NodeStatus.Uninitialized) @@ -257,8 +263,8 @@ def test_status(self, os_ops: OsOperations): assert (node.pid == 0) assert (node.status() == NodeStatus.Uninitialized) - def test_child_pids(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_child_pids(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) master_processes = [ ProcessType.AutovacuumLauncher, @@ -269,7 +275,7 @@ def test_child_pids(self, os_ops: OsOperations): ProcessType.WalWriter, ] - postgresVersion = get_pg_version2(os_ops) + postgresVersion = get_pg_version2(node_svc.os_ops) if __class__.helper__pg_version_ge(postgresVersion, '10'): master_processes.append(ProcessType.LogicalReplicationLauncher) @@ -338,7 +344,7 @@ def LOCAL__check_auxiliary_pids__multiple_attempts( absenceList )) - with __class__.helper__get_node(os_ops).init().start() as master: + with __class__.helper__get_node(node_svc).init().start() as master: # master node doesn't have a source walsender! with pytest.raises(expected_exception=testgres_TestgresException): @@ -380,10 +386,10 @@ def test_exceptions(self): str(ExecUtilException('msg', 'cmd', 1, 'out')) str(QueryException('msg', 'query')) - def test_auto_name(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_auto_name(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(os_ops).init(allow_streaming=True).start() as m: + with __class__.helper__get_node(node_svc).init(allow_streaming=True).start() as m: with m.replicate().start() as r: # check that nodes are running assert (m.status()) @@ -417,9 +423,9 @@ def test_file_tail(self): lines = file_tail(f, 1) assert (lines[0] == s3) - def test_isolation_levels(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops).init().start() as node: + def test_isolation_levels(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init().start() as node: with node.connect() as con: # string levels con.begin('Read Uncommitted').commit() @@ -437,17 +443,17 @@ def test_isolation_levels(self, os_ops: OsOperations): with pytest.raises(expected_exception=QueryException): con.begin('Garbage').commit() - def test_users(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops).init().start() as node: + def test_users(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init().start() as node: node.psql('create role test_user login') value = node.safe_psql('select 1', username='test_user') value = __class__.helper__rm_carriage_returns(value) assert (value == b'1\n') - def test_poll_query_until(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as node: + def test_poll_query_until(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as node: node.init().start() get_time = 'select extract(epoch from now())' @@ -507,8 +513,8 @@ def test_poll_query_until(self, os_ops: OsOperations): # check 1 arg, ok node.poll_query_until('select true') - def test_logging(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_logging(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) C_MAX_ATTEMPTS = 50 # This name is used for testgres logging, too. C_NODE_NAME = "testgres_tests." + __class__.__name__ + "test_logging-master-" + uuid.uuid4().hex @@ -529,7 +535,7 @@ def test_logging(self, os_ops: OsOperations): logger.addHandler(handler) with scoped_config(use_python_logging=True): - with __class__.helper__get_node(os_ops, name=C_NODE_NAME) as master: + with __class__.helper__get_node(node_svc, name=C_NODE_NAME) as master: logging.info("Master node is initilizing") master.init() @@ -599,9 +605,9 @@ def LOCAL__test_lines(): # GO HOME! return - def test_psql(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops).init().start() as node: + def test_psql(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init().start() as node: # check returned values (1 arg) res = node.psql('select 1') @@ -636,17 +642,20 @@ def test_psql(self, os_ops: OsOperations): # check psql's default args, fails with pytest.raises(expected_exception=QueryException): - node.psql() + r = node.psql() # raises! + logging.error("node.psql returns [{}]".format(r)) node.stop() # check psql on stopped node, fails with pytest.raises(expected_exception=QueryException): - node.safe_psql('select 1') + # [2025-04-03] This call does not raise exception! I do not know why. + r = node.safe_psql('select 1') # raises! + logging.error("node.safe_psql returns [{}]".format(r)) - def test_safe_psql__expect_error(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops).init().start() as node: + def test_safe_psql__expect_error(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init().start() as node: err = node.safe_psql('select_or_not_select 1', expect_error=True) assert (type(err) == str) # noqa: E721 assert ('select_or_not_select' in err) @@ -663,9 +672,9 @@ def test_safe_psql__expect_error(self, os_ops: OsOperations): res = node.safe_psql("select 1;", expect_error=False) assert (__class__.helper__rm_carriage_returns(res) == b'1\n') - def test_transactions(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops).init().start() as node: + def test_transactions(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init().start() as node: with node.connect() as con: con.begin() @@ -688,9 +697,9 @@ def test_transactions(self, os_ops: OsOperations): con.execute('drop table test') con.commit() - def test_control_data(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as node: + def test_control_data(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as node: # node is not initialized yet with pytest.raises(expected_exception=ExecUtilException): @@ -703,9 +712,9 @@ def test_control_data(self, os_ops: OsOperations): assert data is not None assert (any('pg_control' in s for s in data.keys())) - def test_backup_simple(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as master: + def test_backup_simple(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as master: # enable streaming for backups master.init(allow_streaming=True) @@ -725,9 +734,9 @@ def test_backup_simple(self, os_ops: OsOperations): res = slave.execute('select * from test order by i asc') assert (res == [(1, ), (2, ), (3, ), (4, )]) - def test_backup_multiple(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as node: + def test_backup_multiple(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as node: node.init(allow_streaming=True).start() with node.backup(xlog_method='fetch') as backup1, \ @@ -739,9 +748,9 @@ def test_backup_multiple(self, os_ops: OsOperations): backup.spawn_primary('node2', destroy=False) as node2: assert (node1.base_dir != node2.base_dir) - def test_backup_exhaust(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as node: + def test_backup_exhaust(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as node: node.init(allow_streaming=True).start() with node.backup(xlog_method='fetch') as backup: @@ -753,9 +762,9 @@ def test_backup_exhaust(self, os_ops: OsOperations): with pytest.raises(expected_exception=BackupException): backup.spawn_primary() - def test_backup_wrong_xlog_method(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as node: + def test_backup_wrong_xlog_method(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as node: node.init(allow_streaming=True).start() with pytest.raises( @@ -764,11 +773,11 @@ def test_backup_wrong_xlog_method(self, os_ops: OsOperations): ): node.backup(xlog_method='wrong') - def test_pg_ctl_wait_option(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_pg_ctl_wait_option(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) C_MAX_ATTEMPTS = 50 - node = __class__.helper__get_node(os_ops) + node = __class__.helper__get_node(node_svc) assert node.status() == NodeStatus.Uninitialized node.init() assert node.status() == NodeStatus.Stopped @@ -835,9 +844,9 @@ def test_pg_ctl_wait_option(self, os_ops: OsOperations): logging.info("OK. Node is stopped.") node.cleanup() - def test_replicate(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as node: + def test_replicate(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as node: node.init(allow_streaming=True).start() with node.replicate().start() as replica: @@ -851,14 +860,14 @@ def test_replicate(self, os_ops: OsOperations): res = node.execute('select * from test') assert (res == []) - def test_synchronous_replication(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_synchronous_replication(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - current_version = get_pg_version2(os_ops) + current_version = get_pg_version2(node_svc.os_ops) __class__.helper__skip_test_if_pg_version_is_not_ge(current_version, "9.6") - with __class__.helper__get_node(os_ops) as master: + with __class__.helper__get_node(node_svc) as master: old_version = not __class__.helper__pg_version_ge(current_version, '9.6') master.init(allow_streaming=True).start() @@ -897,14 +906,14 @@ def test_synchronous_replication(self, os_ops: OsOperations): res = standby1.safe_psql('select count(*) from abc') assert (__class__.helper__rm_carriage_returns(res) == b'1000000\n') - def test_logical_replication(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_logical_replication(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - current_version = get_pg_version2(os_ops) + current_version = get_pg_version2(node_svc.os_ops) __class__.helper__skip_test_if_pg_version_is_not_ge(current_version, "10") - with __class__.helper__get_node(os_ops) as node1, __class__.helper__get_node(os_ops) as node2: + with __class__.helper__get_node(node_svc) as node1, __class__.helper__get_node(node_svc) as node2: node1.init(allow_logical=True) node1.start() node2.init().start() @@ -971,15 +980,15 @@ def test_logical_replication(self, os_ops: OsOperations): res = node2.execute('select * from test2') assert (res == [('a', ), ('b', )]) - def test_logical_catchup(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_logical_catchup(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) """ Runs catchup for 100 times to be sure that it is consistent """ - current_version = get_pg_version2(os_ops) + current_version = get_pg_version2(node_svc.os_ops) __class__.helper__skip_test_if_pg_version_is_not_ge(current_version, "10") - with __class__.helper__get_node(os_ops) as node1, __class__.helper__get_node(os_ops) as node2: + with __class__.helper__get_node(node_svc) as node1, __class__.helper__get_node(node_svc) as node2: node1.init(allow_logical=True) node1.start() node2.init().start() @@ -999,20 +1008,20 @@ def test_logical_catchup(self, os_ops: OsOperations): assert (res == [(i, i, )]) node1.execute('delete from test') - def test_logical_replication_fail(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_logical_replication_fail(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) - current_version = get_pg_version2(os_ops) + current_version = get_pg_version2(node_svc.os_ops) __class__.helper__skip_test_if_pg_version_is_ge(current_version, "10") - with __class__.helper__get_node(os_ops) as node: + with __class__.helper__get_node(node_svc) as node: with pytest.raises(expected_exception=InitNodeException): node.init(allow_logical=True) - def test_replication_slots(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as node: + def test_replication_slots(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as node: node.init(allow_streaming=True).start() with node.replicate(slot='slot1').start() as replica: @@ -1022,18 +1031,18 @@ def test_replication_slots(self, os_ops: OsOperations): with pytest.raises(expected_exception=testgres_TestgresException): node.replicate(slot='slot1') - def test_incorrect_catchup(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as node: + def test_incorrect_catchup(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as node: node.init(allow_streaming=True).start() # node has no master, can't catch up with pytest.raises(expected_exception=testgres_TestgresException): node.catchup() - def test_promotion(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) - with __class__.helper__get_node(os_ops) as master: + def test_promotion(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc) as master: master.init().start() master.safe_psql('create table abc(id serial)') @@ -1046,17 +1055,17 @@ def test_promotion(self, os_ops: OsOperations): res = replica.safe_psql('select * from abc') assert (__class__.helper__rm_carriage_returns(res) == b'1\n') - def test_dump(self, os_ops: OsOperations): - assert isinstance(os_ops, OsOperations) + def test_dump(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) query_create = 'create table test as select generate_series(1, 2) as val' query_select = 'select * from test order by val asc' - with __class__.helper__get_node(os_ops).init().start() as node1: + with __class__.helper__get_node(node_svc).init().start() as node1: node1.execute(query_create) for format in ['plain', 'custom', 'directory', 'tar']: - with removing(os_ops, node1.dump(format=format)) as dump: - with __class__.helper__get_node(os_ops).init().start() as node3: + with removing(node_svc.os_ops, node1.dump(format=format)) as dump: + with __class__.helper__get_node(node_svc).init().start() as node3: if format == 'directory': assert (os.path.isdir(dump)) else: @@ -1066,14 +1075,16 @@ def test_dump(self, os_ops: OsOperations): res = node3.execute(query_select) assert (res == [(1, ), (2, )]) - def test_get_pg_config2(self, os_ops: OsOperations): + def test_get_pg_config2(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + # check same instances - a = get_pg_config2(os_ops, None) - b = get_pg_config2(os_ops, None) + a = get_pg_config2(node_svc.os_ops, None) + b = get_pg_config2(node_svc.os_ops, None) assert (id(a) == id(b)) # save right before config change - c1 = get_pg_config2(os_ops, None) + c1 = get_pg_config2(node_svc.os_ops, None) # modify setting for this scope with scoped_config(cache_pg_config=False) as config: @@ -1081,20 +1092,315 @@ def test_get_pg_config2(self, os_ops: OsOperations): assert not (config.cache_pg_config) # save right after config change - c2 = get_pg_config2(os_ops, None) + c2 = get_pg_config2(node_svc.os_ops, None) # check different instances after config change assert (id(c1) != id(c2)) # check different instances - a = get_pg_config2(os_ops, None) - b = get_pg_config2(os_ops, None) + a = get_pg_config2(node_svc.os_ops, None) + b = get_pg_config2(node_svc.os_ops, None) assert (id(a) != id(b)) + def test_pgbench(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + + __class__.helper__skip_test_if_util_not_exist(node_svc.os_ops, "pgbench") + + with __class__.helper__get_node(node_svc).init().start() as node: + # initialize pgbench DB and run benchmarks + node.pgbench_init( + scale=2, + foreign_keys=True, + options=['-q'] + ).pgbench_run(time=2) + + # run TPC-B benchmark + proc = node.pgbench(stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + options=['-T3']) + out = proc.communicate()[0] + assert (b'tps = ' in out) + + def test_unix_sockets(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + + with __class__.helper__get_node(node_svc) as node: + node.init(unix_sockets=False, allow_streaming=True) + node.start() + + res_exec = node.execute('select 1') + assert (res_exec == [(1,)]) + res_psql = node.safe_psql('select 1') + assert (res_psql == b'1\n') + + with node.replicate() as r: + assert type(r) == PostgresNode # noqa: E721 + r.start() + res_exec = r.execute('select 1') + assert (res_exec == [(1,)]) + res_psql = r.safe_psql('select 1') + assert (res_psql == b'1\n') + + def test_the_same_port(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + + with __class__.helper__get_node(node_svc) as node: + node.init().start() + assert (node._should_free_port) + assert (type(node.port) == int) # noqa: E721 + node_port_copy = node.port + r = node.safe_psql("SELECT 1;") + assert (__class__.helper__rm_carriage_returns(r) == b'1\n') + + with __class__.helper__get_node(node_svc, port=node.port) as node2: + assert (type(node2.port) == int) # noqa: E721 + assert (node2.port == node.port) + assert not (node2._should_free_port) + + with pytest.raises( + expected_exception=StartNodeException, + match=re.escape("Cannot start node") + ): + node2.init().start() + + # node is still working + assert (node.port == node_port_copy) + assert (node._should_free_port) + r = node.safe_psql("SELECT 3;") + assert (__class__.helper__rm_carriage_returns(r) == b'3\n') + + class tagPortManagerProxy(PortManager): + m_PrevPortManager: PortManager + + m_DummyPortNumber: int + m_DummyPortMaxUsage: int + + m_DummyPortCurrentUsage: int + m_DummyPortTotalUsage: int + + def __init__(self, prevPortManager: PortManager, dummyPortNumber: int, dummyPortMaxUsage: int): + assert isinstance(prevPortManager, PortManager) + assert type(dummyPortNumber) == int # noqa: E721 + assert type(dummyPortMaxUsage) == int # noqa: E721 + assert dummyPortNumber >= 0 + assert dummyPortMaxUsage >= 0 + + super().__init__() + + self.m_PrevPortManager = prevPortManager + + self.m_DummyPortNumber = dummyPortNumber + self.m_DummyPortMaxUsage = dummyPortMaxUsage + + self.m_DummyPortCurrentUsage = 0 + self.m_DummyPortTotalUsage = 0 + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + assert self.m_DummyPortCurrentUsage == 0 + + assert self.m_PrevPortManager is not None + + def reserve_port(self) -> int: + assert type(self.m_DummyPortMaxUsage) == int # noqa: E721 + assert type(self.m_DummyPortTotalUsage) == int # noqa: E721 + assert type(self.m_DummyPortCurrentUsage) == int # noqa: E721 + assert self.m_DummyPortTotalUsage >= 0 + assert self.m_DummyPortCurrentUsage >= 0 + + assert self.m_DummyPortTotalUsage <= self.m_DummyPortMaxUsage + assert self.m_DummyPortCurrentUsage <= self.m_DummyPortTotalUsage + + assert self.m_PrevPortManager is not None + assert isinstance(self.m_PrevPortManager, PortManager) + + if self.m_DummyPortTotalUsage == self.m_DummyPortMaxUsage: + return self.m_PrevPortManager.reserve_port() + + self.m_DummyPortTotalUsage += 1 + self.m_DummyPortCurrentUsage += 1 + return self.m_DummyPortNumber + + def release_port(self, dummyPortNumber: int): + assert type(dummyPortNumber) == int # noqa: E721 + + assert type(self.m_DummyPortMaxUsage) == int # noqa: E721 + assert type(self.m_DummyPortTotalUsage) == int # noqa: E721 + assert type(self.m_DummyPortCurrentUsage) == int # noqa: E721 + assert self.m_DummyPortTotalUsage >= 0 + assert self.m_DummyPortCurrentUsage >= 0 + + assert self.m_DummyPortTotalUsage <= self.m_DummyPortMaxUsage + assert self.m_DummyPortCurrentUsage <= self.m_DummyPortTotalUsage + + assert self.m_PrevPortManager is not None + assert isinstance(self.m_PrevPortManager, PortManager) + + if self.m_DummyPortCurrentUsage > 0 and dummyPortNumber == self.m_DummyPortNumber: + assert self.m_DummyPortTotalUsage > 0 + self.m_DummyPortCurrentUsage -= 1 + return + + return self.m_PrevPortManager.release_port(dummyPortNumber) + + def test_port_rereserve_during_node_start(self, node_svc: PostgresNodeService): + assert type(node_svc) == PostgresNodeService # noqa: E721 + assert PostgresNode._C_MAX_START_ATEMPTS == 5 + + C_COUNT_OF_BAD_PORT_USAGE = 3 + + with __class__.helper__get_node(node_svc) as node1: + node1.init().start() + assert node1._should_free_port + assert type(node1.port) == int # noqa: E721 + node1_port_copy = node1.port + assert __class__.helper__rm_carriage_returns(node1.safe_psql("SELECT 1;")) == b'1\n' + + with __class__.tagPortManagerProxy(node_svc.port_manager, node1.port, C_COUNT_OF_BAD_PORT_USAGE) as proxy: + assert proxy.m_DummyPortNumber == node1.port + with __class__.helper__get_node(node_svc, port_manager=proxy) as node2: + assert node2._should_free_port + assert node2.port == node1.port + + node2.init().start() + + assert node2.port != node1.port + assert node2._should_free_port + assert proxy.m_DummyPortCurrentUsage == 0 + assert proxy.m_DummyPortTotalUsage == C_COUNT_OF_BAD_PORT_USAGE + assert node2.is_started + r = node2.safe_psql("SELECT 2;") + assert __class__.helper__rm_carriage_returns(r) == b'2\n' + + # node1 is still working + assert node1.port == node1_port_copy + assert node1._should_free_port + r = node1.safe_psql("SELECT 3;") + assert __class__.helper__rm_carriage_returns(r) == b'3\n' + + def test_port_conflict(self, node_svc: PostgresNodeService): + assert type(node_svc) == PostgresNodeService # noqa: E721 + assert PostgresNode._C_MAX_START_ATEMPTS > 1 + + C_COUNT_OF_BAD_PORT_USAGE = PostgresNode._C_MAX_START_ATEMPTS + + with __class__.helper__get_node(node_svc) as node1: + node1.init().start() + assert node1._should_free_port + assert type(node1.port) == int # noqa: E721 + node1_port_copy = node1.port + assert __class__.helper__rm_carriage_returns(node1.safe_psql("SELECT 1;")) == b'1\n' + + with __class__.tagPortManagerProxy(node_svc.port_manager, node1.port, C_COUNT_OF_BAD_PORT_USAGE) as proxy: + assert proxy.m_DummyPortNumber == node1.port + with __class__.helper__get_node(node_svc, port_manager=proxy) as node2: + assert node2._should_free_port + assert node2.port == node1.port + + with pytest.raises( + expected_exception=StartNodeException, + match=re.escape("Cannot start node after multiple attempts.") + ): + node2.init().start() + + assert node2.port == node1.port + assert node2._should_free_port + assert proxy.m_DummyPortCurrentUsage == 1 + assert proxy.m_DummyPortTotalUsage == C_COUNT_OF_BAD_PORT_USAGE + assert not node2.is_started + + # node2 must release our dummyPort (node1.port) + assert (proxy.m_DummyPortCurrentUsage == 0) + + # node1 is still working + assert node1.port == node1_port_copy + assert node1._should_free_port + r = node1.safe_psql("SELECT 3;") + assert __class__.helper__rm_carriage_returns(r) == b'3\n' + + def test_try_to_get_port_after_free_manual_port(self, node_svc: PostgresNodeService): + assert type(node_svc) == PostgresNodeService # noqa: E721 + + assert node_svc.port_manager is not None + assert isinstance(node_svc.port_manager, PortManager) + + with __class__.helper__get_node(node_svc) as node1: + assert node1 is not None + assert type(node1) == PostgresNode # noqa: E721 + assert node1.port is not None + assert type(node1.port) == int # noqa: E721 + with __class__.helper__get_node(node_svc, port=node1.port, port_manager=None) as node2: + assert node2 is not None + assert type(node1) == PostgresNode # noqa: E721 + assert node2 is not node1 + assert node2.port is not None + assert type(node2.port) == int # noqa: E721 + assert node2.port == node1.port + + logging.info("Release node2 port") + node2.free_port() + + logging.info("try to get node2.port...") + with pytest.raises( + InvalidOperationException, + match="^" + re.escape("PostgresNode port is not defined.") + "$" + ): + p = node2.port + assert p is None + + def test_try_to_start_node_after_free_manual_port(self, node_svc: PostgresNodeService): + assert type(node_svc) == PostgresNodeService # noqa: E721 + + assert node_svc.port_manager is not None + assert isinstance(node_svc.port_manager, PortManager) + + with __class__.helper__get_node(node_svc) as node1: + assert node1 is not None + assert type(node1) == PostgresNode # noqa: E721 + assert node1.port is not None + assert type(node1.port) == int # noqa: E721 + with __class__.helper__get_node(node_svc, port=node1.port, port_manager=None) as node2: + assert node2 is not None + assert type(node1) == PostgresNode # noqa: E721 + assert node2 is not node1 + assert node2.port is not None + assert type(node2.port) == int # noqa: E721 + assert node2.port == node1.port + + logging.info("Release node2 port") + node2.free_port() + + logging.info("node2 is trying to start...") + with pytest.raises( + InvalidOperationException, + match="^" + re.escape("Can't start PostgresNode. Port is not defined.") + "$" + ): + node2.start() + @staticmethod - def helper__get_node(os_ops: OsOperations, name=None): - assert isinstance(os_ops, OsOperations) - return PostgresNode(name, conn_params=None, os_ops=os_ops) + def helper__get_node( + node_svc: PostgresNodeService, + name: typing.Optional[str] = None, + port: typing.Optional[int] = None, + port_manager: typing.Optional[PortManager] = None + ) -> PostgresNode: + assert isinstance(node_svc, PostgresNodeService) + assert isinstance(node_svc.os_ops, OsOperations) + assert isinstance(node_svc.port_manager, PortManager) + + if port_manager is None: + port_manager = node_svc.port_manager + + return PostgresNode( + name, + port=port, + conn_params=None, + os_ops=node_svc.os_ops, + port_manager=port_manager if port is None else None + ) @staticmethod def helper__skip_test_if_pg_version_is_not_ge(ver1: str, ver2: str): diff --git a/tests/test_testgres_local.py b/tests/test_testgres_local.py index 01f975a0..bef80d0f 100644 --- a/tests/test_testgres_local.py +++ b/tests/test_testgres_local.py @@ -100,27 +100,6 @@ def test_custom_init(self): # there should be no trust entries at all assert not (any('trust' in s for s in lines)) - def test_pgbench(self): - __class__.helper__skip_test_if_util_not_exist("pgbench") - - with get_new_node().init().start() as node: - - # initialize pgbench DB and run benchmarks - node.pgbench_init(scale=2, foreign_keys=True, - options=['-q']).pgbench_run(time=2) - - # run TPC-B benchmark - proc = node.pgbench(stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=['-T3']) - - out, _ = proc.communicate() - out = out.decode('utf-8') - - proc.stdout.close() - - assert ('tps' in out) - def test_pg_config(self): # check same instances a = get_pg_config() @@ -177,18 +156,6 @@ def test_config_stack(self): assert (TestgresConfig.cached_initdb_dir == d0) - def test_unix_sockets(self): - with get_new_node() as node: - node.init(unix_sockets=False, allow_streaming=True) - node.start() - - node.execute('select 1') - node.safe_psql('select 1') - - with node.replicate().start() as r: - r.execute('select 1') - r.safe_psql('select 1') - def test_ports_management(self): assert bound_ports is not None assert type(bound_ports) == set # noqa: E721 @@ -277,30 +244,6 @@ def test_parse_pg_version(self): # Macos assert parse_pg_version("postgres (PostgreSQL) 14.9 (Homebrew)") == "14.9" - def test_the_same_port(self): - with get_new_node() as node: - node.init().start() - assert (node._should_free_port) - assert (type(node.port) == int) # noqa: E721 - node_port_copy = node.port - assert (rm_carriage_returns(node.safe_psql("SELECT 1;")) == b'1\n') - - with get_new_node(port=node.port) as node2: - assert (type(node2.port) == int) # noqa: E721 - assert (node2.port == node.port) - assert not (node2._should_free_port) - - with pytest.raises( - expected_exception=StartNodeException, - match=re.escape("Cannot start node") - ): - node2.init().start() - - # node is still working - assert (node.port == node_port_copy) - assert (node._should_free_port) - assert (rm_carriage_returns(node.safe_psql("SELECT 3;")) == b'3\n') - class tagPortManagerProxy: sm_prev_testgres_reserve_port = None sm_prev_testgres_release_port = None diff --git a/tests/test_testgres_remote.py b/tests/test_testgres_remote.py index 2142e5ba..ef4bd0c8 100755 --- a/tests/test_testgres_remote.py +++ b/tests/test_testgres_remote.py @@ -1,14 +1,12 @@ # coding: utf-8 import os import re -import subprocess import pytest -import psutil import logging -from .helpers.os_ops_descrs import OsOpsDescrs -from .helpers.os_ops_descrs import OsOperations +from .helpers.global_data import PostgresNodeService +from .helpers.global_data import PostgresNodeServices from .. import testgres @@ -27,8 +25,6 @@ get_pg_config # NOTE: those are ugly imports -from ..testgres import bound_ports -from ..testgres.node import ProcessProxy def util_exists(util): @@ -48,17 +44,17 @@ def good_properties(f): class TestTestgresRemote: - sm_os_ops = OsOpsDescrs.sm_remote_os_ops - @pytest.fixture(autouse=True, scope="class") def implicit_fixture(self): + cur_os_ops = PostgresNodeServices.sm_remote.os_ops + assert cur_os_ops is not None + prev_ops = testgres_config.os_ops assert prev_ops is not None - assert __class__.sm_os_ops is not None - testgres_config.set_os_ops(os_ops=__class__.sm_os_ops) - assert testgres_config.os_ops is __class__.sm_os_ops + testgres_config.set_os_ops(os_ops=cur_os_ops) + assert testgres_config.os_ops is cur_os_ops yield - assert testgres_config.os_ops is __class__.sm_os_ops + assert testgres_config.os_ops is cur_os_ops testgres_config.set_os_ops(os_ops=prev_ops) assert testgres_config.os_ops is prev_ops @@ -172,21 +168,6 @@ def test_init__unk_LANG_and_LC_CTYPE(self): __class__.helper__restore_envvar("LC_CTYPE", prev_LC_CTYPE) __class__.helper__restore_envvar("LC_COLLATE", prev_LC_COLLATE) - def test_pgbench(self): - __class__.helper__skip_test_if_util_not_exist("pgbench") - - with __class__.helper__get_node().init().start() as node: - # initialize pgbench DB and run benchmarks - node.pgbench_init(scale=2, foreign_keys=True, - options=['-q']).pgbench_run(time=2) - - # run TPC-B benchmark - proc = node.pgbench(stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - options=['-T3']) - out = proc.communicate()[0] - assert (b'tps = ' in out) - def test_pg_config(self): # check same instances a = get_pg_config() @@ -243,90 +224,19 @@ def test_config_stack(self): assert (TestgresConfig.cached_initdb_dir == d0) - def test_unix_sockets(self): - with __class__.helper__get_node() as node: - node.init(unix_sockets=False, allow_streaming=True) - node.start() - - res_exec = node.execute('select 1') - res_psql = node.safe_psql('select 1') - assert (res_exec == [(1,)]) - assert (res_psql == b'1\n') - - with node.replicate().start() as r: - res_exec = r.execute('select 1') - res_psql = r.safe_psql('select 1') - assert (res_exec == [(1,)]) - assert (res_psql == b'1\n') - - def test_ports_management(self): - assert bound_ports is not None - assert type(bound_ports) == set # noqa: E721 - - if len(bound_ports) != 0: - logging.warning("bound_ports is not empty: {0}".format(bound_ports)) - - stage0__bound_ports = bound_ports.copy() - - with __class__.helper__get_node() as node: - assert bound_ports is not None - assert type(bound_ports) == set # noqa: E721 - - assert node.port is not None - assert type(node.port) == int # noqa: E721 - - logging.info("node port is {0}".format(node.port)) - - assert node.port in bound_ports - assert node.port not in stage0__bound_ports - - assert stage0__bound_ports <= bound_ports - assert len(stage0__bound_ports) + 1 == len(bound_ports) - - stage1__bound_ports = stage0__bound_ports.copy() - stage1__bound_ports.add(node.port) - - assert stage1__bound_ports == bound_ports - - # check that port has been freed successfully - assert bound_ports is not None - assert type(bound_ports) == set # noqa: E721 - assert bound_ports == stage0__bound_ports - - # TODO: Why does not this test work with remote host? - def test_child_process_dies(self): - nAttempt = 0 - - while True: - if nAttempt == 5: - raise Exception("Max attempt number is exceed.") - - nAttempt += 1 - - logging.info("Attempt #{0}".format(nAttempt)) - - # test for FileNotFound exception during child_processes() function - with subprocess.Popen(["sleep", "60"]) as process: - r = process.poll() - - if r is not None: - logging.warning("process.pool() returns an unexpected result: {0}.".format(r)) - continue - - assert r is None - # collect list of processes currently running - children = psutil.Process(os.getpid()).children() - # kill a process, so received children dictionary becomes invalid - process.kill() - process.wait() - # try to handle children list -- missing processes will have ptype "ProcessType.Unknown" - [ProcessProxy(p) for p in children] - break - @staticmethod def helper__get_node(name=None): - assert isinstance(__class__.sm_os_ops, OsOperations) - return testgres.PostgresNode(name, conn_params=None, os_ops=__class__.sm_os_ops) + svc = PostgresNodeServices.sm_remote + + assert isinstance(svc, PostgresNodeService) + assert isinstance(svc.os_ops, testgres.OsOperations) + assert isinstance(svc.port_manager, testgres.PortManager) + + return testgres.PostgresNode( + name, + conn_params=None, + os_ops=svc.os_ops, + port_manager=svc.port_manager) @staticmethod def helper__restore_envvar(name, prev_value): From 307ef5fc523d2156d711b1493ee8d7d98f24ba82 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 7 Apr 2025 07:31:53 +0300 Subject: [PATCH 68/90] Test refactoring (#236) * TestUtils is added * TestTestgresCommon::test_node_repr is added * test_get_pg_config2 moved to TestUtils * TestTestgresCommon.test_custom_init is added * TestConfig is added --- tests/test_config.py | 41 +++++++++++++++ tests/test_testgres_common.py | 54 ++++++++++---------- tests/test_testgres_local.py | 96 ++++------------------------------- tests/test_testgres_remote.py | 73 +++----------------------- tests/test_utils.py | 62 ++++++++++++++++++++++ 5 files changed, 145 insertions(+), 181 deletions(-) create mode 100644 tests/test_config.py create mode 100644 tests/test_utils.py diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 00000000..05702e9a --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,41 @@ +from ..testgres import TestgresConfig +from ..testgres import configure_testgres +from ..testgres import scoped_config +from ..testgres import pop_config + +from .. import testgres + +import pytest + + +class TestConfig: + def test_config_stack(self): + # no such option + with pytest.raises(expected_exception=TypeError): + configure_testgres(dummy=True) + + # we have only 1 config in stack + with pytest.raises(expected_exception=IndexError): + pop_config() + + d0 = TestgresConfig.cached_initdb_dir + d1 = 'dummy_abc' + d2 = 'dummy_def' + + with scoped_config(cached_initdb_dir=d1) as c1: + assert (c1.cached_initdb_dir == d1) + + with scoped_config(cached_initdb_dir=d2) as c2: + stack_size = len(testgres.config.config_stack) + + # try to break a stack + with pytest.raises(expected_exception=TypeError): + with scoped_config(dummy=True): + pass + + assert (c2.cached_initdb_dir == d2) + assert (len(testgres.config.config_stack) == stack_size) + + assert (c1.cached_initdb_dir == d1) + + assert (TestgresConfig.cached_initdb_dir == d0) diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index b286a1c6..c384dfb2 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -6,7 +6,6 @@ from ..testgres.node import PgVer from ..testgres.node import PostgresNode from ..testgres.utils import get_pg_version2 -from ..testgres.utils import get_pg_config2 from ..testgres.utils import file_tail from ..testgres.utils import get_bin_path2 from ..testgres import ProcessType @@ -106,6 +105,32 @@ def test_version_management(self, node_svc: PostgresNodeService): assert (isinstance(node.version, PgVer)) assert (node.version == PgVer(version)) + def test_node_repr(self, node_svc: PostgresNodeService): + with __class__.helper__get_node(node_svc).init() as node: + pattern = r"PostgresNode\(name='.+', port=.+, base_dir='.+'\)" + assert re.match(pattern, str(node)) is not None + + def test_custom_init(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + + with __class__.helper__get_node(node_svc) as node: + # enable page checksums + node.init(initdb_params=['-k']).start() + + with __class__.helper__get_node(node_svc) as node: + node.init( + allow_streaming=True, + initdb_params=['--auth-local=reject', '--auth-host=reject']) + + hba_file = os.path.join(node.data_dir, 'pg_hba.conf') + lines = node.os_ops.readlines(hba_file) + + # check number of lines + assert (len(lines) >= 6) + + # there should be no trust entries at all + assert not (any('trust' in s for s in lines)) + def test_double_init(self, node_svc: PostgresNodeService): assert isinstance(node_svc, PostgresNodeService) @@ -1075,33 +1100,6 @@ def test_dump(self, node_svc: PostgresNodeService): res = node3.execute(query_select) assert (res == [(1, ), (2, )]) - def test_get_pg_config2(self, node_svc: PostgresNodeService): - assert isinstance(node_svc, PostgresNodeService) - - # check same instances - a = get_pg_config2(node_svc.os_ops, None) - b = get_pg_config2(node_svc.os_ops, None) - assert (id(a) == id(b)) - - # save right before config change - c1 = get_pg_config2(node_svc.os_ops, None) - - # modify setting for this scope - with scoped_config(cache_pg_config=False) as config: - # sanity check for value - assert not (config.cache_pg_config) - - # save right after config change - c2 = get_pg_config2(node_svc.os_ops, None) - - # check different instances after config change - assert (id(c1) != id(c2)) - - # check different instances - a = get_pg_config2(node_svc.os_ops, None) - b = get_pg_config2(node_svc.os_ops, None) - assert (id(a) != id(b)) - def test_pgbench(self, node_svc: PostgresNodeService): assert isinstance(node_svc, PostgresNodeService) diff --git a/tests/test_testgres_local.py b/tests/test_testgres_local.py index bef80d0f..9dbd455b 100644 --- a/tests/test_testgres_local.py +++ b/tests/test_testgres_local.py @@ -9,28 +9,18 @@ from .. import testgres -from ..testgres import \ - StartNodeException, \ - ExecUtilException, \ - NodeApp - -from ..testgres import \ - TestgresConfig, \ - configure_testgres, \ - scoped_config, \ - pop_config - -from ..testgres import \ - get_new_node - -from ..testgres import \ - get_bin_path, \ - get_pg_config, \ - get_pg_version +from ..testgres import StartNodeException +from ..testgres import ExecUtilException +from ..testgres import NodeApp +from ..testgres import scoped_config +from ..testgres import get_new_node +from ..testgres import get_bin_path +from ..testgres import get_pg_config +from ..testgres import get_pg_version # NOTE: those are ugly imports -from ..testgres import bound_ports -from ..testgres.utils import PgVer, parse_pg_version +from ..testgres.utils import bound_ports +from ..testgres.utils import PgVer from ..testgres.node import ProcessProxy @@ -75,31 +65,6 @@ def rm_carriage_returns(out): class TestTestgresLocal: - def test_node_repr(self): - with get_new_node() as node: - pattern = r"PostgresNode\(name='.+', port=.+, base_dir='.+'\)" - assert re.match(pattern, str(node)) is not None - - def test_custom_init(self): - with get_new_node() as node: - # enable page checksums - node.init(initdb_params=['-k']).start() - - with get_new_node() as node: - node.init( - allow_streaming=True, - initdb_params=['--auth-local=reject', '--auth-host=reject']) - - hba_file = os.path.join(node.data_dir, 'pg_hba.conf') - with open(hba_file, 'r') as conf: - lines = conf.readlines() - - # check number of lines - assert (len(lines) >= 6) - - # there should be no trust entries at all - assert not (any('trust' in s for s in lines)) - def test_pg_config(self): # check same instances a = get_pg_config() @@ -125,37 +90,6 @@ def test_pg_config(self): b = get_pg_config() assert (id(a) != id(b)) - def test_config_stack(self): - # no such option - with pytest.raises(expected_exception=TypeError): - configure_testgres(dummy=True) - - # we have only 1 config in stack - with pytest.raises(expected_exception=IndexError): - pop_config() - - d0 = TestgresConfig.cached_initdb_dir - d1 = 'dummy_abc' - d2 = 'dummy_def' - - with scoped_config(cached_initdb_dir=d1) as c1: - assert (c1.cached_initdb_dir == d1) - - with scoped_config(cached_initdb_dir=d2) as c2: - stack_size = len(testgres.config.config_stack) - - # try to break a stack - with pytest.raises(expected_exception=TypeError): - with scoped_config(dummy=True): - pass - - assert (c2.cached_initdb_dir == d2) - assert (len(testgres.config.config_stack) == stack_size) - - assert (c1.cached_initdb_dir == d1) - - assert (TestgresConfig.cached_initdb_dir == d0) - def test_ports_management(self): assert bound_ports is not None assert type(bound_ports) == set # noqa: E721 @@ -234,16 +168,6 @@ def test_upgrade_node(self): node_new.start() assert (b'Upgrade Complete' in res) - def test_parse_pg_version(self): - # Linux Mint - assert parse_pg_version("postgres (PostgreSQL) 15.5 (Ubuntu 15.5-1.pgdg22.04+1)") == "15.5" - # Linux Ubuntu - assert parse_pg_version("postgres (PostgreSQL) 12.17") == "12.17" - # Windows - assert parse_pg_version("postgres (PostgreSQL) 11.4") == "11.4" - # Macos - assert parse_pg_version("postgres (PostgreSQL) 14.9 (Homebrew)") == "14.9" - class tagPortManagerProxy: sm_prev_testgres_reserve_port = None sm_prev_testgres_release_port = None diff --git a/tests/test_testgres_remote.py b/tests/test_testgres_remote.py index ef4bd0c8..e38099b7 100755 --- a/tests/test_testgres_remote.py +++ b/tests/test_testgres_remote.py @@ -1,6 +1,5 @@ # coding: utf-8 import os -import re import pytest import logging @@ -10,19 +9,14 @@ from .. import testgres -from ..testgres.exceptions import \ - InitNodeException, \ - ExecUtilException +from ..testgres.exceptions import InitNodeException +from ..testgres.exceptions import ExecUtilException -from ..testgres.config import \ - TestgresConfig, \ - configure_testgres, \ - scoped_config, \ - pop_config, testgres_config +from ..testgres.config import scoped_config +from ..testgres.config import testgres_config -from ..testgres import \ - get_bin_path, \ - get_pg_config +from ..testgres import get_bin_path +from ..testgres import get_pg_config # NOTE: those are ugly imports @@ -58,30 +52,6 @@ def implicit_fixture(self): testgres_config.set_os_ops(os_ops=prev_ops) assert testgres_config.os_ops is prev_ops - def test_node_repr(self): - with __class__.helper__get_node() as node: - pattern = r"PostgresNode\(name='.+', port=.+, base_dir='.+'\)" - assert re.match(pattern, str(node)) is not None - - def test_custom_init(self): - with __class__.helper__get_node() as node: - # enable page checksums - node.init(initdb_params=['-k']).start() - - with __class__.helper__get_node() as node: - node.init( - allow_streaming=True, - initdb_params=['--auth-local=reject', '--auth-host=reject']) - - hba_file = os.path.join(node.data_dir, 'pg_hba.conf') - lines = node.os_ops.readlines(hba_file) - - # check number of lines - assert (len(lines) >= 6) - - # there should be no trust entries at all - assert not (any('trust' in s for s in lines)) - def test_init__LANG_С(self): # PBCKP-1744 prev_LANG = os.environ.get("LANG") @@ -193,37 +163,6 @@ def test_pg_config(self): b = get_pg_config() assert (id(a) != id(b)) - def test_config_stack(self): - # no such option - with pytest.raises(expected_exception=TypeError): - configure_testgres(dummy=True) - - # we have only 1 config in stack - with pytest.raises(expected_exception=IndexError): - pop_config() - - d0 = TestgresConfig.cached_initdb_dir - d1 = 'dummy_abc' - d2 = 'dummy_def' - - with scoped_config(cached_initdb_dir=d1) as c1: - assert (c1.cached_initdb_dir == d1) - - with scoped_config(cached_initdb_dir=d2) as c2: - stack_size = len(testgres.config.config_stack) - - # try to break a stack - with pytest.raises(expected_exception=TypeError): - with scoped_config(dummy=True): - pass - - assert (c2.cached_initdb_dir == d2) - assert (len(testgres.config.config_stack) == stack_size) - - assert (c1.cached_initdb_dir == d1) - - assert (TestgresConfig.cached_initdb_dir == d0) - @staticmethod def helper__get_node(name=None): svc = PostgresNodeServices.sm_remote diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 00000000..d4a4c9ad --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,62 @@ +from .helpers.global_data import OsOpsDescr +from .helpers.global_data import OsOpsDescrs +from .helpers.global_data import OsOperations + +from ..testgres.utils import parse_pg_version +from ..testgres.utils import get_pg_config2 +from ..testgres import scoped_config + +import pytest + + +class TestUtils: + sm_os_ops_descrs: list[OsOpsDescr] = [ + OsOpsDescrs.sm_local_os_ops_descr, + OsOpsDescrs.sm_remote_os_ops_descr + ] + + @pytest.fixture( + params=[descr.os_ops for descr in sm_os_ops_descrs], + ids=[descr.sign for descr in sm_os_ops_descrs] + ) + def os_ops(self, request: pytest.FixtureRequest) -> OsOperations: + assert isinstance(request, pytest.FixtureRequest) + assert isinstance(request.param, OsOperations) + return request.param + + def test_parse_pg_version(self): + # Linux Mint + assert parse_pg_version("postgres (PostgreSQL) 15.5 (Ubuntu 15.5-1.pgdg22.04+1)") == "15.5" + # Linux Ubuntu + assert parse_pg_version("postgres (PostgreSQL) 12.17") == "12.17" + # Windows + assert parse_pg_version("postgres (PostgreSQL) 11.4") == "11.4" + # Macos + assert parse_pg_version("postgres (PostgreSQL) 14.9 (Homebrew)") == "14.9" + + def test_get_pg_config2(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + # check same instances + a = get_pg_config2(os_ops, None) + b = get_pg_config2(os_ops, None) + assert (id(a) == id(b)) + + # save right before config change + c1 = get_pg_config2(os_ops, None) + + # modify setting for this scope + with scoped_config(cache_pg_config=False) as config: + # sanity check for value + assert not (config.cache_pg_config) + + # save right after config change + c2 = get_pg_config2(os_ops, None) + + # check different instances after config change + assert (id(c1) != id(c2)) + + # check different instances + a = get_pg_config2(os_ops, None) + b = get_pg_config2(os_ops, None) + assert (id(a) != id(b)) From da2c493473fb124612a8e2f1baa74ac6b6ff980e Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 7 Apr 2025 12:38:03 +0300 Subject: [PATCH 69/90] OsOperation::db_connect is removed (#237) * OsOperation::db_connect is removed OsOperation does not work with databases. It provides an only OS functional. TODO: CI must explicitly test pg8000 and psycopg2. --- testgres/connection.py | 12 +++++++----- testgres/operations/local_ops.py | 13 +------------ testgres/operations/os_ops.py | 12 ------------ testgres/operations/remote_ops.py | 20 -------------------- 4 files changed, 8 insertions(+), 49 deletions(-) diff --git a/testgres/connection.py b/testgres/connection.py index ccedd135..b8dc49a9 100644 --- a/testgres/connection.py +++ b/testgres/connection.py @@ -42,11 +42,13 @@ def __init__(self, self._node = node - self._connection = node.os_ops.db_connect(dbname=dbname, - user=username, - password=password, - host=node.host, - port=node.port) + self._connection = pglib.connect( + database=dbname, + user=username, + password=password, + host=node.host, + port=node.port + ) self._connection.autocommit = autocommit self._cursor = self.connection.cursor() diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 39c81405..9785d462 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -12,7 +12,7 @@ from ..exceptions import ExecUtilException from ..exceptions import InvalidOperationException -from .os_ops import ConnectionParams, OsOperations, pglib, get_default_encoding +from .os_ops import ConnectionParams, OsOperations, get_default_encoding from .raise_error import RaiseError from .helpers import Helpers @@ -446,14 +446,3 @@ def is_port_free(self, number: int) -> bool: return True except OSError: return False - - # Database control - def db_connect(self, dbname, user, password=None, host="localhost", port=5432): - conn = pglib.connect( - host=host, - port=port, - database=dbname, - user=user, - password=password, - ) - return conn diff --git a/testgres/operations/os_ops.py b/testgres/operations/os_ops.py index 489a7cb2..d25e76bc 100644 --- a/testgres/operations/os_ops.py +++ b/testgres/operations/os_ops.py @@ -1,14 +1,6 @@ import getpass import locale -try: - import psycopg2 as pglib # noqa: F401 -except ImportError: - try: - import pg8000 as pglib # noqa: F401 - except ImportError: - raise ImportError("You must have psycopg2 or pg8000 modules installed") - class ConnectionParams: def __init__(self, host='127.0.0.1', port=None, ssh_key=None, username=None): @@ -130,7 +122,3 @@ def get_process_children(self, pid): def is_port_free(self, number: int): assert type(number) == int # noqa: E721 raise NotImplementedError() - - # Database control - def db_connect(self, dbname, user, password=None, host="localhost", port=5432): - raise NotImplementedError() diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index ee747e52..25d02f38 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -6,15 +6,6 @@ import io import logging -# we support both pg8000 and psycopg2 -try: - import psycopg2 as pglib -except ImportError: - try: - import pg8000 as pglib - except ImportError: - raise ImportError("You must have psycopg2 or pg8000 modules installed") - from ..exceptions import ExecUtilException from ..exceptions import InvalidOperationException from .os_ops import OsOperations, ConnectionParams, get_default_encoding @@ -677,17 +668,6 @@ def _is_port_free__process_1(error: str) -> bool: # return True - # Database control - def db_connect(self, dbname, user, password=None, host="localhost", port=5432): - conn = pglib.connect( - host=host, - port=port, - database=dbname, - user=user, - password=password, - ) - return conn - @staticmethod def _make_exec_env_list() -> list[str]: result = list[str]() From 24014744bd031e34c2e2490127c40613edec01d5 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Thu, 10 Apr 2025 09:07:05 +0300 Subject: [PATCH 70/90] A support of Python v3.8 (#238) * A support of Python 3.8 [typing] Python 3.8 does not support tuple[...], list[...], set[...] and so on. We will use the analogues from typing package: typing.Tuple[...], typing.List[...] and typing.Set[...]. * [CI] Jobs for testing with python 3.8.0, 3.8.latest, 3.9.latest, 3.10.latest and 3.11.latest [std2-all][alpine] are added --- .travis.yml | 5 ++ Dockerfile--std2-all.tmpl | 96 +++++++++++++++++++++++++++++++ run_tests2.sh | 68 ++++++++++++++++++++++ testgres/operations/remote_ops.py | 5 +- testgres/port_manager.py | 9 +-- tests/conftest.py | 22 ++++--- tests/test_os_ops_common.py | 3 +- tests/test_testgres_common.py | 10 ++-- tests/test_utils.py | 3 +- 9 files changed, 199 insertions(+), 22 deletions(-) create mode 100644 Dockerfile--std2-all.tmpl create mode 100755 run_tests2.sh diff --git a/.travis.yml b/.travis.yml index 7557a2ce..55b7afa9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,6 +19,11 @@ notifications: on_failure: always env: + - TEST_PLATFORM=std2-all PYTHON_VERSION=3.8.0 PG_VERSION=17 + - TEST_PLATFORM=std2-all PYTHON_VERSION=3.8 PG_VERSION=17 + - TEST_PLATFORM=std2-all PYTHON_VERSION=3.9 PG_VERSION=17 + - TEST_PLATFORM=std2-all PYTHON_VERSION=3.10 PG_VERSION=17 + - TEST_PLATFORM=std2-all PYTHON_VERSION=3.11 PG_VERSION=17 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=16 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=15 - TEST_PLATFORM=std PYTHON_VERSION=3 PG_VERSION=14 diff --git a/Dockerfile--std2-all.tmpl b/Dockerfile--std2-all.tmpl new file mode 100644 index 00000000..10d8280c --- /dev/null +++ b/Dockerfile--std2-all.tmpl @@ -0,0 +1,96 @@ +ARG PG_VERSION +ARG PYTHON_VERSION + +# --------------------------------------------- base1 +FROM postgres:${PG_VERSION}-alpine as base1 + +# --------------------------------------------- base2_with_python-3 +FROM base1 as base2_with_python-3 +RUN apk add --no-cache curl python3 python3-dev build-base musl-dev linux-headers + +# For pyenv +RUN apk add patch +RUN apk add git +RUN apk add xz-dev +RUN apk add zip +RUN apk add zlib-dev +RUN apk add libffi-dev +RUN apk add readline-dev +RUN apk add openssl openssl-dev +RUN apk add sqlite-dev +RUN apk add bzip2-dev + +# --------------------------------------------- base3_with_python-3.8.0 +FROM base2_with_python-3 as base3_with_python-3.8.0 +ENV PYTHON_VERSION=3.8.0 + +# --------------------------------------------- base3_with_python-3.8 +FROM base2_with_python-3 as base3_with_python-3.8 +ENV PYTHON_VERSION=3.8 + +# --------------------------------------------- base3_with_python-3.9 +FROM base2_with_python-3 as base3_with_python-3.9 +ENV PYTHON_VERSION=3.9 + +# --------------------------------------------- base3_with_python-3.10 +FROM base2_with_python-3 as base3_with_python-3.10 +ENV PYTHON_VERSION=3.10 + +# --------------------------------------------- base3_with_python-3.11 +FROM base2_with_python-3 as base3_with_python-3.11 +ENV PYTHON_VERSION=3.11 + +# --------------------------------------------- final +FROM base3_with_python-${PYTHON_VERSION} as final + +#RUN apk add --no-cache mc + +# Full version of "ps" command +RUN apk add --no-cache procps + +RUN apk add --no-cache openssh +RUN apk add --no-cache sudo + +ENV LANG=C.UTF-8 + +RUN addgroup -S sudo +RUN adduser postgres sudo + +EXPOSE 22 +RUN ssh-keygen -A + +ADD . /pg/testgres +WORKDIR /pg/testgres +RUN chown -R postgres:postgres /pg + +# It allows to use sudo without password +RUN sh -c "echo \"postgres ALL=(ALL:ALL) NOPASSWD:ALL\"">>/etc/sudoers + +# THIS CMD IS NEEDED TO CONNECT THROUGH SSH WITHOUT PASSWORD +RUN sh -c "echo "postgres:*" | chpasswd -e" + +USER postgres + +RUN curl https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash + +RUN ~/.pyenv/bin/pyenv install ${PYTHON_VERSION} + +# THIS CMD IS NEEDED TO CONNECT THROUGH SSH WITHOUT PASSWORD +RUN chmod 700 ~/ + +RUN mkdir -p ~/.ssh +#RUN chmod 700 ~/.ssh + +ENTRYPOINT sh -c " \ +set -eux; \ +echo HELLO FROM ENTRYPOINT; \ +echo HOME DIR IS [`realpath ~/`]; \ +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''; \ +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys; \ +chmod 600 ~/.ssh/authorized_keys; \ +ls -la ~/.ssh/; \ +sudo /usr/sbin/sshd; \ +ssh-keyscan -H localhost >> ~/.ssh/known_hosts; \ +ssh-keyscan -H 127.0.0.1 >> ~/.ssh/known_hosts; \ +export PATH=\"~/.pyenv/bin:$PATH\"; \ +TEST_FILTER=\"\" bash run_tests2.sh;" diff --git a/run_tests2.sh b/run_tests2.sh new file mode 100755 index 00000000..173b19dc --- /dev/null +++ b/run_tests2.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# Copyright (c) 2017-2025 Postgres Professional + +set -eux + +eval "$(pyenv init -)" +eval "$(pyenv virtualenv-init -)" + +pyenv virtualenv --force ${PYTHON_VERSION} cur +pyenv activate cur + +if [ -z ${TEST_FILTER+x} ]; \ +then export TEST_FILTER="TestTestgresLocal or (TestTestgresCommon and (not remote))"; \ +fi + +# fail early +echo check that pg_config is in PATH +command -v pg_config + +# prepare python environment +VENV_PATH="/tmp/testgres_venv" +rm -rf $VENV_PATH +python -m venv "${VENV_PATH}" +export VIRTUAL_ENV_DISABLE_PROMPT=1 +source "${VENV_PATH}/bin/activate" +pip install coverage flake8 psutil Sphinx pytest pytest-xdist psycopg2 six psutil + +# install testgres' dependencies +export PYTHONPATH=$(pwd) +# $PIP install . + +# test code quality +flake8 . + + +# remove existing coverage file +export COVERAGE_FILE=.coverage +rm -f $COVERAGE_FILE + + +# run tests (PATH) +time coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" + + +# run tests (PG_BIN) +PG_BIN=$(pg_config --bindir) \ +time coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" + + +# run tests (PG_CONFIG) +PG_CONFIG=$(pg_config --bindir)/pg_config \ +time coverage run -a -m pytest -l -v -n 4 -k "${TEST_FILTER}" + + +# show coverage +coverage report + +# build documentation +cd docs +make html +cd .. + +# attempt to fix codecov +set +eux + +# send coverage stats to Codecov +bash <(curl -s https://codecov.io/bash) diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 25d02f38..33b61ac2 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -5,6 +5,7 @@ import tempfile import io import logging +import typing from ..exceptions import ExecUtilException from ..exceptions import InvalidOperationException @@ -669,8 +670,8 @@ def _is_port_free__process_1(error: str) -> bool: return True @staticmethod - def _make_exec_env_list() -> list[str]: - result = list[str]() + def _make_exec_env_list() -> typing.List[str]: + result: typing.List[str] = list() for envvar in os.environ.items(): if not __class__._does_put_envvar_into_exec_cmd(envvar[0]): continue diff --git a/testgres/port_manager.py b/testgres/port_manager.py index 164661e7..e2530470 100644 --- a/testgres/port_manager.py +++ b/testgres/port_manager.py @@ -6,6 +6,7 @@ import threading import random +import typing class PortManager: @@ -50,16 +51,16 @@ class PortManager__Generic(PortManager): _os_ops: OsOperations _guard: object # TODO: is there better to use bitmap fot _available_ports? - _available_ports: set[int] - _reserved_ports: set[int] + _available_ports: typing.Set[int] + _reserved_ports: typing.Set[int] def __init__(self, os_ops: OsOperations): assert os_ops is not None assert isinstance(os_ops, OsOperations) self._os_ops = os_ops self._guard = threading.Lock() - self._available_ports = set[int](range(1024, 65535)) - self._reserved_ports = set[int]() + self._available_ports: typing.Set[int] = set(range(1024, 65535)) + self._reserved_ports: typing.Set[int] = set() def reserve_port(self) -> int: assert self._guard is not None diff --git a/tests/conftest.py b/tests/conftest.py index ff3b3cb4..6f2f9e41 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -26,6 +26,10 @@ class TestConfigPropNames: TEST_CFG__LOG_DIR = "TEST_CFG__LOG_DIR" +# ///////////////////////////////////////////////////////////////////////////// + +T_TUPLE__str_int = typing.Tuple[str, int] + # ///////////////////////////////////////////////////////////////////////////// # TestStartupData__Helper @@ -110,11 +114,11 @@ class TEST_PROCESS_STATS: cUnexpectedTests: int = 0 cAchtungTests: int = 0 - FailedTests = list[str, int]() - XFailedTests = list[str, int]() - NotXFailedTests = list[str]() - WarningTests = list[str, int]() - AchtungTests = list[str]() + FailedTests: typing.List[T_TUPLE__str_int] = list() + XFailedTests: typing.List[T_TUPLE__str_int] = list() + NotXFailedTests: typing.List[str] = list() + WarningTests: typing.List[T_TUPLE__str_int] = list() + AchtungTests: typing.List[str] = list() cTotalDuration: datetime.timedelta = datetime.timedelta() @@ -769,7 +773,7 @@ def helper__calc_W(n: int) -> int: # ------------------------------------------------------------------------ -def helper__print_test_list(tests: list[str]) -> None: +def helper__print_test_list(tests: typing.List[str]) -> None: assert type(tests) == list # noqa: E721 assert helper__calc_W(9) == 1 @@ -796,7 +800,7 @@ def helper__print_test_list(tests: list[str]) -> None: # ------------------------------------------------------------------------ -def helper__print_test_list2(tests: list[str, int]) -> None: +def helper__print_test_list2(tests: typing.List[T_TUPLE__str_int]) -> None: assert type(tests) == list # noqa: E721 assert helper__calc_W(9) == 1 @@ -843,7 +847,7 @@ def LOCAL__print_line1_with_header(header: str): assert header != "" logging.info(C_LINE1 + " [" + header + "]") - def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): + def LOCAL__print_test_list(header: str, test_count: int, test_list: typing.List[str]): assert type(header) == str # noqa: E721 assert type(test_count) == int # noqa: E721 assert type(test_list) == list # noqa: E721 @@ -858,7 +862,7 @@ def LOCAL__print_test_list(header: str, test_count: int, test_list: list[str]): logging.info("") def LOCAL__print_test_list2( - header: str, test_count: int, test_list: list[str, int] + header: str, test_count: int, test_list: typing.List[T_TUPLE__str_int] ): assert type(header) == str # noqa: E721 assert type(test_count) == int # noqa: E721 diff --git a/tests/test_os_ops_common.py b/tests/test_os_ops_common.py index 7d183775..ecfff5b2 100644 --- a/tests/test_os_ops_common.py +++ b/tests/test_os_ops_common.py @@ -12,13 +12,14 @@ import logging import socket import threading +import typing from ..testgres import InvalidOperationException from ..testgres import ExecUtilException class TestOsOpsCommon: - sm_os_ops_descrs: list[OsOpsDescr] = [ + sm_os_ops_descrs: typing.List[OsOpsDescr] = [ OsOpsDescrs.sm_local_os_ops_descr, OsOpsDescrs.sm_remote_os_ops_descr ] diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index c384dfb2..e1252de2 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -56,7 +56,7 @@ def removing(os_ops: OsOperations, f): class TestTestgresCommon: - sm_node_svcs: list[PostgresNodeService] = [ + sm_node_svcs: typing.List[PostgresNodeService] = [ PostgresNodeServices.sm_local, PostgresNodeServices.sm_local2, PostgresNodeServices.sm_remote, @@ -315,8 +315,8 @@ def test_child_pids(self, node_svc: PostgresNodeService): def LOCAL__test_auxiliary_pids( node: PostgresNode, - expectedTypes: list[ProcessType] - ) -> list[ProcessType]: + expectedTypes: typing.List[ProcessType] + ) -> typing.List[ProcessType]: # returns list of the absence processes assert node is not None assert type(node) == PostgresNode # noqa: E721 @@ -327,7 +327,7 @@ def LOCAL__test_auxiliary_pids( assert pids is not None # noqa: E721 assert type(pids) == dict # noqa: E721 - result = list[ProcessType]() + result: typing.List[ProcessType] = list() for ptype in expectedTypes: if not (ptype in pids): result.append(ptype) @@ -335,7 +335,7 @@ def LOCAL__test_auxiliary_pids( def LOCAL__check_auxiliary_pids__multiple_attempts( node: PostgresNode, - expectedTypes: list[ProcessType]): + expectedTypes: typing.List[ProcessType]): assert node is not None assert type(node) == PostgresNode # noqa: E721 assert expectedTypes is not None diff --git a/tests/test_utils.py b/tests/test_utils.py index d4a4c9ad..c05bd2fe 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -7,10 +7,11 @@ from ..testgres import scoped_config import pytest +import typing class TestUtils: - sm_os_ops_descrs: list[OsOpsDescr] = [ + sm_os_ops_descrs: typing.List[OsOpsDescr] = [ OsOpsDescrs.sm_local_os_ops_descr, OsOpsDescrs.sm_remote_os_ops_descr ] From ac782bb3354c596171c9d0498494fbb4b828463b Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 15 Apr 2025 14:50:35 +0300 Subject: [PATCH 71/90] [New] OsOps::execute_command supports a transfer of environment variables (exec_env) (#239) * [New] OsOps::execute_command supports a transfer of environment variables (exec_env) New feature allows to pass environment variables to an executed program. If variable in exec_env has None value, then this variable will be unset. PostgresNode::start and PostgresNode::slow_start supports exec_env. --- testgres/node.py | 12 +++--- testgres/operations/local_ops.py | 66 ++++++++++++++++++++++++++++--- testgres/operations/remote_ops.py | 44 ++++++++++++++++----- testgres/utils.py | 13 +++++- tests/test_os_ops_common.py | 64 ++++++++++++++++++++++++++++++ 5 files changed, 177 insertions(+), 22 deletions(-) diff --git a/testgres/node.py b/testgres/node.py index 5039fc43..3a294044 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -1020,7 +1020,7 @@ def get_control_data(self): return out_dict - def slow_start(self, replica=False, dbname='template1', username=None, max_attempts=0): + def slow_start(self, replica=False, dbname='template1', username=None, max_attempts=0, exec_env=None): """ Starts the PostgreSQL instance and then polls the instance until it reaches the expected state (primary or replica). The state is checked @@ -1033,7 +1033,9 @@ def slow_start(self, replica=False, dbname='template1', username=None, max_attem If False, waits for the instance to be in primary mode. Default is False. max_attempts: """ - self.start() + assert exec_env is None or type(exec_env) == dict # noqa: E721 + + self.start(exec_env=exec_env) if replica: query = 'SELECT pg_is_in_recovery()' @@ -1065,7 +1067,7 @@ def _detect_port_conflict(self, log_files0, log_files1): return True return False - def start(self, params=[], wait=True): + def start(self, params=[], wait=True, exec_env=None): """ Starts the PostgreSQL node using pg_ctl if node has not been started. By default, it waits for the operation to complete before returning. @@ -1079,7 +1081,7 @@ def start(self, params=[], wait=True): Returns: This instance of :class:`.PostgresNode`. """ - + assert exec_env is None or type(exec_env) == dict # noqa: E721 assert __class__._C_MAX_START_ATEMPTS > 1 if self.is_started: @@ -1098,7 +1100,7 @@ def start(self, params=[], wait=True): def LOCAL__start_node(): # 'error' will be None on Windows - _, _, error = execute_utility2(self.os_ops, _params, self.utils_log_file, verbose=True) + _, _, error = execute_utility2(self.os_ops, _params, self.utils_log_file, verbose=True, exec_env=exec_env) assert error is None or type(error) == str # noqa: E721 if error and 'does not exist' in error: raise Exception(error) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 9785d462..74323bb8 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -9,6 +9,7 @@ import socket import psutil +import typing from ..exceptions import ExecUtilException from ..exceptions import InvalidOperationException @@ -46,9 +47,34 @@ def _process_output(encoding, temp_file_path): output = output.decode(encoding) return output, None # In Windows stderr writing in stdout - def _run_command__nt(self, cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding): + def _run_command__nt(self, cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding, exec_env=None): + assert exec_env is None or type(exec_env) == dict # noqa: E721 + # TODO: why don't we use the data from input? + extParams: typing.Dict[str, str] = dict() + + if exec_env is None: + pass + elif len(exec_env) == 0: + pass + else: + env = os.environ.copy() + assert type(env) == dict # noqa: E721 + for v in exec_env.items(): + assert type(v) == tuple # noqa: E721 + assert len(v) == 2 + assert type(v[0]) == str # noqa: E721 + assert v[0] != "" + + if v[1] is None: + env.pop(v[0], None) + else: + assert type(v[1]) == str # noqa: E721 + env[v[0]] = v[1] + + extParams["env"] = env + with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as temp_file: stdout = temp_file stderr = subprocess.STDOUT @@ -58,6 +84,7 @@ def _run_command__nt(self, cmd, shell, input, stdin, stdout, stderr, get_process stdin=stdin or subprocess.PIPE if input is not None else None, stdout=stdout, stderr=stderr, + **extParams, ) if get_process: return process, None, None @@ -69,19 +96,45 @@ def _run_command__nt(self, cmd, shell, input, stdin, stdout, stderr, get_process output, error = self._process_output(encoding, temp_file_path) return process, output, error - def _run_command__generic(self, cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding): + def _run_command__generic(self, cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding, exec_env=None): + assert exec_env is None or type(exec_env) == dict # noqa: E721 + input_prepared = None if not get_process: input_prepared = Helpers.PrepareProcessInput(input, encoding) # throw assert input_prepared is None or (type(input_prepared) == bytes) # noqa: E721 + extParams: typing.Dict[str, str] = dict() + + if exec_env is None: + pass + elif len(exec_env) == 0: + pass + else: + env = os.environ.copy() + assert type(env) == dict # noqa: E721 + for v in exec_env.items(): + assert type(v) == tuple # noqa: E721 + assert len(v) == 2 + assert type(v[0]) == str # noqa: E721 + assert v[0] != "" + + if v[1] is None: + env.pop(v[0], None) + else: + assert type(v[1]) == str # noqa: E721 + env[v[0]] = v[1] + + extParams["env"] = env + process = subprocess.Popen( cmd, shell=shell, stdin=stdin or subprocess.PIPE if input is not None else None, stdout=stdout or subprocess.PIPE, stderr=stderr or subprocess.PIPE, + **extParams ) assert not (process is None) if get_process: @@ -100,25 +153,26 @@ def _run_command__generic(self, cmd, shell, input, stdin, stdout, stderr, get_pr error = error.decode(encoding) return process, output, error - def _run_command(self, cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding): + def _run_command(self, cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding, exec_env=None): """Execute a command and return the process and its output.""" if os.name == 'nt' and stdout is None: # Windows method = __class__._run_command__nt else: # Other OS method = __class__._run_command__generic - return method(self, cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding) + return method(self, cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding, exec_env=exec_env) def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, encoding=None, shell=False, text=False, input=None, stdin=None, stdout=None, stderr=None, get_process=False, timeout=None, - ignore_errors=False): + ignore_errors=False, exec_env=None): """ Execute a command in a subprocess and handle the output based on the provided parameters. """ assert type(expect_error) == bool # noqa: E721 assert type(ignore_errors) == bool # noqa: E721 + assert exec_env is None or type(exec_env) == dict # noqa: E721 - process, output, error = self._run_command(cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding) + process, output, error = self._run_command(cmd, shell, input, stdin, stdout, stderr, get_process, timeout, encoding, exec_env=exec_env) if get_process: return process diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 33b61ac2..e722a2cb 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -64,7 +64,8 @@ def __enter__(self): def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, encoding=None, shell=True, text=False, input=None, stdin=None, stdout=None, - stderr=None, get_process=None, timeout=None, ignore_errors=False): + stderr=None, get_process=None, timeout=None, ignore_errors=False, + exec_env=None): """ Execute a command in the SSH session. Args: @@ -72,6 +73,7 @@ def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, """ assert type(expect_error) == bool # noqa: E721 assert type(ignore_errors) == bool # noqa: E721 + assert exec_env is None or type(exec_env) == dict # noqa: E721 input_prepared = None if not get_process: @@ -88,7 +90,7 @@ def exec_command(self, cmd, wait_exit=False, verbose=False, expect_error=False, assert type(cmd_s) == str # noqa: E721 - cmd_items = __class__._make_exec_env_list() + cmd_items = __class__._make_exec_env_list(exec_env=exec_env) cmd_items.append(cmd_s) env_cmd_s = ';'.join(cmd_items) @@ -670,14 +672,38 @@ def _is_port_free__process_1(error: str) -> bool: return True @staticmethod - def _make_exec_env_list() -> typing.List[str]: - result: typing.List[str] = list() + def _make_exec_env_list(exec_env: typing.Dict) -> typing.List[str]: + env: typing.Dict[str, str] = dict() + + # ---------------------------------- SYSTEM ENV for envvar in os.environ.items(): - if not __class__._does_put_envvar_into_exec_cmd(envvar[0]): - continue - qvalue = __class__._quote_envvar(envvar[1]) - assert type(qvalue) == str # noqa: E721 - result.append(envvar[0] + "=" + qvalue) + if __class__._does_put_envvar_into_exec_cmd(envvar[0]): + env[envvar[0]] = envvar[1] + + # ---------------------------------- EXEC (LOCAL) ENV + if exec_env is None: + pass + else: + for envvar in exec_env.items(): + assert type(envvar) == tuple # noqa: E721 + assert len(envvar) == 2 + assert type(envvar[0]) == str # noqa: E721 + env[envvar[0]] = envvar[1] + + # ---------------------------------- FINAL BUILD + result: typing.List[str] = list() + for envvar in env.items(): + assert type(envvar) == tuple # noqa: E721 + assert len(envvar) == 2 + assert type(envvar[0]) == str # noqa: E721 + + if envvar[1] is None: + result.append("unset " + envvar[0]) + else: + assert type(envvar[1]) == str # noqa: E721 + qvalue = __class__._quote_envvar(envvar[1]) + assert type(qvalue) == str # noqa: E721 + result.append(envvar[0] + "=" + qvalue) continue return result diff --git a/testgres/utils.py b/testgres/utils.py index 10ae81b6..2ff6f2a0 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -96,17 +96,26 @@ def execute_utility(args, logfile=None, verbose=False): return execute_utility2(tconf.os_ops, args, logfile, verbose) -def execute_utility2(os_ops: OsOperations, args, logfile=None, verbose=False, ignore_errors=False): +def execute_utility2( + os_ops: OsOperations, + args, + logfile=None, + verbose=False, + ignore_errors=False, + exec_env=None, +): assert os_ops is not None assert isinstance(os_ops, OsOperations) assert type(verbose) == bool # noqa: E721 assert type(ignore_errors) == bool # noqa: E721 + assert exec_env is None or type(exec_env) == dict # noqa: E721 exit_status, out, error = os_ops.exec_command( args, verbose=True, ignore_errors=ignore_errors, - encoding=OsHelpers.GetDefaultEncoding()) + encoding=OsHelpers.GetDefaultEncoding(), + exec_env=exec_env) out = '' if not out else out diff --git a/tests/test_os_ops_common.py b/tests/test_os_ops_common.py index ecfff5b2..17c3151c 100644 --- a/tests/test_os_ops_common.py +++ b/tests/test_os_ops_common.py @@ -93,6 +93,70 @@ def test_exec_command_failure__expect_error(self, os_ops: OsOperations): assert b"nonexistent_command" in error assert b"not found" in error + def test_exec_command_with_exec_env(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + C_ENV_NAME = "TESTGRES_TEST__EXEC_ENV_20250414" + + cmd = ["sh", "-c", "echo ${}".format(C_ENV_NAME)] + + exec_env = {C_ENV_NAME: "Hello!"} + + response = os_ops.exec_command(cmd, exec_env=exec_env) + assert response is not None + assert type(response) == bytes # noqa: E721 + assert response == b'Hello!\n' + + response = os_ops.exec_command(cmd) + assert response is not None + assert type(response) == bytes # noqa: E721 + assert response == b'\n' + + def test_exec_command__test_unset(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + C_ENV_NAME = "LANG" + + cmd = ["sh", "-c", "echo ${}".format(C_ENV_NAME)] + + response1 = os_ops.exec_command(cmd) + assert response1 is not None + assert type(response1) == bytes # noqa: E721 + + if response1 == b'\n': + logging.warning("Environment variable {} is not defined.".format(C_ENV_NAME)) + return + + exec_env = {C_ENV_NAME: None} + response2 = os_ops.exec_command(cmd, exec_env=exec_env) + assert response2 is not None + assert type(response2) == bytes # noqa: E721 + assert response2 == b'\n' + + response3 = os_ops.exec_command(cmd) + assert response3 is not None + assert type(response3) == bytes # noqa: E721 + assert response3 == response1 + + def test_exec_command__test_unset_dummy_var(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + RunConditions.skip_if_windows() + + C_ENV_NAME = "TESTGRES_TEST__DUMMY_VAR_20250414" + + cmd = ["sh", "-c", "echo ${}".format(C_ENV_NAME)] + + exec_env = {C_ENV_NAME: None} + response2 = os_ops.exec_command(cmd, exec_env=exec_env) + assert response2 is not None + assert type(response2) == bytes # noqa: E721 + assert response2 == b'\n' + def test_is_executable_true(self, os_ops: OsOperations): """ Test is_executable for an existing executable. From daa2b7b146dc5ed41c34f7d75ec0b8dcb8db00bd Mon Sep 17 00:00:00 2001 From: Victoria Shepard <5807469+demonolock@users.noreply.github.com> Date: Tue, 15 Apr 2025 20:32:33 +0200 Subject: [PATCH 72/90] Add maintain command (#175) --- .../plugins/pg_probackup2/pg_probackup2/app.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/testgres/plugins/pg_probackup2/pg_probackup2/app.py b/testgres/plugins/pg_probackup2/pg_probackup2/app.py index d47cf51f..5166e9b8 100644 --- a/testgres/plugins/pg_probackup2/pg_probackup2/app.py +++ b/testgres/plugins/pg_probackup2/pg_probackup2/app.py @@ -842,5 +842,22 @@ def archive_get(self, instance, wal_file_name, wal_file_path, options=None, expe ] return self.run(cmd + options, expect_error=expect_error) + def maintain( + self, instance=None, backup_id=None, + options=None, old_binary=False, gdb=False, expect_error=False + ): + if options is None: + options = [] + cmd_list = [ + 'maintain', + ] + if instance: + cmd_list += ['--instance={0}'.format(instance)] + if backup_id: + cmd_list += ['-i', backup_id] + + return self.run(cmd_list + options, old_binary=old_binary, gdb=gdb, + expect_error=expect_error) + def build_backup_dir(self, backup='backup'): return fs_backup_class(rel_path=self.rel_path, backup=backup) From 330fd9abaf081a47ecd8f88767f18cb0bf3dcd2c Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Mon, 21 Apr 2025 20:09:27 +0700 Subject: [PATCH 73/90] Up versions --- setup.py | 2 +- testgres/plugins/pg_probackup2/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index b47a1d8a..2c44b18f 100755 --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ readme = f.read() setup( - version='1.10.5', + version='1.11.0', name='testgres', packages=['testgres', 'testgres.operations'], description='Testing utility for PostgreSQL and its extensions', diff --git a/testgres/plugins/pg_probackup2/setup.py b/testgres/plugins/pg_probackup2/setup.py index 8bcfe7b4..7a3212e4 100644 --- a/testgres/plugins/pg_probackup2/setup.py +++ b/testgres/plugins/pg_probackup2/setup.py @@ -4,7 +4,7 @@ from distutils.core import setup setup( - version='0.0.6', + version='0.1.0', name='testgres_pg_probackup2', packages=['pg_probackup2', 'pg_probackup2.storage'], description='Plugin for testgres that manages pg_probackup2', From 6e5e4f5a9eb7f7a02df7056dcded7e0b68a6d1da Mon Sep 17 00:00:00 2001 From: Victoria Shepard <5807469+demonolock@users.noreply.github.com> Date: Thu, 24 Apr 2025 14:00:30 +0000 Subject: [PATCH 74/90] remove __init__.py from the root --- __init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 __init__.py diff --git a/__init__.py b/__init__.py deleted file mode 100644 index e69de29b..00000000 From 1a662f138f9e03b750b91455113ea3b58c0c986e Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Fri, 25 Apr 2025 15:33:13 +0300 Subject: [PATCH 75/90] [FIX] Tests include testgres by right way through import (#241) When we do not have root __init__.py tests must to import testgres through "import testgres" not through "from import testgres" --- .../pg_probackup2/tests/test_basic.py | 2 +- tests/helpers/global_data.py | 16 ++++---- tests/test_config.py | 10 ++--- tests/test_os_ops_common.py | 4 +- tests/test_os_ops_remote.py | 2 +- tests/test_testgres_common.py | 40 +++++++++---------- tests/test_testgres_local.py | 24 +++++------ tests/test_testgres_remote.py | 14 +++---- tests/test_utils.py | 6 +-- 9 files changed, 59 insertions(+), 59 deletions(-) diff --git a/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py b/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py index ba788623..f22a62bf 100644 --- a/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py +++ b/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py @@ -4,7 +4,7 @@ import shutil import pytest -from ...... import testgres +import testgres from ...pg_probackup2.app import ProbackupApp from ...pg_probackup2.init_helpers import Init, init_params from ..storage.fs_backup import FSTestBackupDir diff --git a/tests/helpers/global_data.py b/tests/helpers/global_data.py index c21d7dd8..51bf4485 100644 --- a/tests/helpers/global_data.py +++ b/tests/helpers/global_data.py @@ -1,11 +1,11 @@ -from ...testgres.operations.os_ops import OsOperations -from ...testgres.operations.os_ops import ConnectionParams -from ...testgres.operations.local_ops import LocalOperations -from ...testgres.operations.remote_ops import RemoteOperations - -from ...testgres.node import PortManager -from ...testgres.node import PortManager__ThisHost -from ...testgres.node import PortManager__Generic +from testgres.operations.os_ops import OsOperations +from testgres.operations.os_ops import ConnectionParams +from testgres.operations.local_ops import LocalOperations +from testgres.operations.remote_ops import RemoteOperations + +from testgres.node import PortManager +from testgres.node import PortManager__ThisHost +from testgres.node import PortManager__Generic import os diff --git a/tests/test_config.py b/tests/test_config.py index 05702e9a..a80a11f1 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,9 +1,9 @@ -from ..testgres import TestgresConfig -from ..testgres import configure_testgres -from ..testgres import scoped_config -from ..testgres import pop_config +from testgres import TestgresConfig +from testgres import configure_testgres +from testgres import scoped_config +from testgres import pop_config -from .. import testgres +import testgres import pytest diff --git a/tests/test_os_ops_common.py b/tests/test_os_ops_common.py index 17c3151c..9c4d8857 100644 --- a/tests/test_os_ops_common.py +++ b/tests/test_os_ops_common.py @@ -14,8 +14,8 @@ import threading import typing -from ..testgres import InvalidOperationException -from ..testgres import ExecUtilException +from testgres import InvalidOperationException +from testgres import ExecUtilException class TestOsOpsCommon: diff --git a/tests/test_os_ops_remote.py b/tests/test_os_ops_remote.py index 338e49f3..65830218 100755 --- a/tests/test_os_ops_remote.py +++ b/tests/test_os_ops_remote.py @@ -3,7 +3,7 @@ from .helpers.global_data import OsOpsDescrs from .helpers.global_data import OsOperations -from ..testgres import ExecUtilException +from testgres import ExecUtilException import os import pytest diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index e1252de2..f4e5996f 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -3,29 +3,29 @@ from .helpers.global_data import OsOperations from .helpers.global_data import PortManager -from ..testgres.node import PgVer -from ..testgres.node import PostgresNode -from ..testgres.utils import get_pg_version2 -from ..testgres.utils import file_tail -from ..testgres.utils import get_bin_path2 -from ..testgres import ProcessType -from ..testgres import NodeStatus -from ..testgres import IsolationLevel +from testgres.node import PgVer +from testgres.node import PostgresNode +from testgres.utils import get_pg_version2 +from testgres.utils import file_tail +from testgres.utils import get_bin_path2 +from testgres import ProcessType +from testgres import NodeStatus +from testgres import IsolationLevel # New name prevents to collect test-functions in TestgresException and fixes # the problem with pytest warning. -from ..testgres import TestgresException as testgres_TestgresException - -from ..testgres import InitNodeException -from ..testgres import StartNodeException -from ..testgres import QueryException -from ..testgres import ExecUtilException -from ..testgres import TimeoutException -from ..testgres import InvalidOperationException -from ..testgres import BackupException -from ..testgres import ProgrammingError -from ..testgres import scoped_config -from ..testgres import First, Any +from testgres import TestgresException as testgres_TestgresException + +from testgres import InitNodeException +from testgres import StartNodeException +from testgres import QueryException +from testgres import ExecUtilException +from testgres import TimeoutException +from testgres import InvalidOperationException +from testgres import BackupException +from testgres import ProgrammingError +from testgres import scoped_config +from testgres import First, Any from contextlib import contextmanager diff --git a/tests/test_testgres_local.py b/tests/test_testgres_local.py index 9dbd455b..1dd98fe3 100644 --- a/tests/test_testgres_local.py +++ b/tests/test_testgres_local.py @@ -7,21 +7,21 @@ import platform import logging -from .. import testgres +import testgres -from ..testgres import StartNodeException -from ..testgres import ExecUtilException -from ..testgres import NodeApp -from ..testgres import scoped_config -from ..testgres import get_new_node -from ..testgres import get_bin_path -from ..testgres import get_pg_config -from ..testgres import get_pg_version +from testgres import StartNodeException +from testgres import ExecUtilException +from testgres import NodeApp +from testgres import scoped_config +from testgres import get_new_node +from testgres import get_bin_path +from testgres import get_pg_config +from testgres import get_pg_version # NOTE: those are ugly imports -from ..testgres.utils import bound_ports -from ..testgres.utils import PgVer -from ..testgres.node import ProcessProxy +from testgres.utils import bound_ports +from testgres.utils import PgVer +from testgres.node import ProcessProxy def pg_version_ge(version): diff --git a/tests/test_testgres_remote.py b/tests/test_testgres_remote.py index e38099b7..87cc0269 100755 --- a/tests/test_testgres_remote.py +++ b/tests/test_testgres_remote.py @@ -7,16 +7,16 @@ from .helpers.global_data import PostgresNodeService from .helpers.global_data import PostgresNodeServices -from .. import testgres +import testgres -from ..testgres.exceptions import InitNodeException -from ..testgres.exceptions import ExecUtilException +from testgres.exceptions import InitNodeException +from testgres.exceptions import ExecUtilException -from ..testgres.config import scoped_config -from ..testgres.config import testgres_config +from testgres.config import scoped_config +from testgres.config import testgres_config -from ..testgres import get_bin_path -from ..testgres import get_pg_config +from testgres import get_bin_path +from testgres import get_pg_config # NOTE: those are ugly imports diff --git a/tests/test_utils.py b/tests/test_utils.py index c05bd2fe..39e9dda0 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -2,9 +2,9 @@ from .helpers.global_data import OsOpsDescrs from .helpers.global_data import OsOperations -from ..testgres.utils import parse_pg_version -from ..testgres.utils import get_pg_config2 -from ..testgres import scoped_config +from testgres.utils import parse_pg_version +from testgres.utils import get_pg_config2 +from testgres import scoped_config import pytest import typing From 94d75725bb4d3bfb6667804e075e9b4d46e062d6 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 28 Apr 2025 08:37:16 +0300 Subject: [PATCH 76/90] [#240] Using of node.psql with other host and port (#242) * [FIX] Tests include testgres by right way through import When we do not have root __init__.py tests must to import testgres through "import testgres" not through "from import testgres" * [#240] Using of node.psql with other host and port This patch adds the support of using other host and port in the following methods: - PostgresNode.psql (explicit new args: host and port) - PostgresNode.safe_psql (indirectly through **kwargs) It allows to run psql utility from one PostgreSQL instance to work with another one. If explicit host and port are not defined (are None), PostgresNode will use own ones. This patch closes #240. --- testgres/node.py | 29 +++++++++++- tests/test_testgres_common.py | 83 +++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 2 deletions(-) diff --git a/testgres/node.py b/testgres/node.py index 3a294044..41504e89 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -1372,6 +1372,8 @@ def psql(self, dbname=None, username=None, input=None, + host: typing.Optional[str] = None, + port: typing.Optional[int] = None, **variables): """ Execute a query using psql. @@ -1382,6 +1384,8 @@ def psql(self, dbname: database name to connect to. username: database user name. input: raw input to be passed. + host: an explicit host of server. + port: an explicit port of server. **variables: vars to be set before execution. Returns: @@ -1393,6 +1397,10 @@ def psql(self, >>> psql(query='select 3', ON_ERROR_STOP=1) """ + assert host is None or type(host) == str # noqa: E721 + assert port is None or type(port) == int # noqa: E721 + assert type(variables) == dict # noqa: E721 + return self._psql( ignore_errors=True, query=query, @@ -1400,6 +1408,8 @@ def psql(self, dbname=dbname, username=username, input=input, + host=host, + port=port, **variables ) @@ -1411,7 +1421,11 @@ def _psql( dbname=None, username=None, input=None, + host: typing.Optional[str] = None, + port: typing.Optional[int] = None, **variables): + assert host is None or type(host) == str # noqa: E721 + assert port is None or type(port) == int # noqa: E721 assert type(variables) == dict # noqa: E721 # @@ -1424,10 +1438,21 @@ def _psql( else: raise Exception("Input data must be None or bytes.") + if host is None: + host = self.host + + if port is None: + port = self.port + + assert host is not None + assert port is not None + assert type(host) == str # noqa: E721 + assert type(port) == int # noqa: E721 + psql_params = [ self._get_bin_path("psql"), - "-p", str(self.port), - "-h", self.host, + "-p", str(port), + "-h", host, "-U", username or self.os_ops.username, "-d", dbname or default_dbname(), "-X", # no .psqlrc diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index f4e5996f..21fa00df 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -678,6 +678,89 @@ def test_psql(self, node_svc: PostgresNodeService): r = node.safe_psql('select 1') # raises! logging.error("node.safe_psql returns [{}]".format(r)) + def test_psql__another_port(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init() as node1: + with __class__.helper__get_node(node_svc).init() as node2: + node1.start() + node2.start() + assert node1.port != node2.port + assert node1.host == node2.host + + node1.stop() + + logging.info("test table in node2 is creating ...") + node2.safe_psql( + dbname="postgres", + query="create table test (id integer);" + ) + + logging.info("try to find test table through node1.psql ...") + res = node1.psql( + dbname="postgres", + query="select count(*) from pg_class where relname='test'", + host=node2.host, + port=node2.port, + ) + assert (__class__.helper__rm_carriage_returns(res) == (0, b'1\n', b'')) + + def test_psql__another_bad_host(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init() as node: + logging.info("try to execute node1.psql ...") + res = node.psql( + dbname="postgres", + query="select count(*) from pg_class where relname='test'", + host="DUMMY_HOST_NAME", + port=node.port, + ) + + res2 = __class__.helper__rm_carriage_returns(res) + + assert res2[0] != 0 + assert b"DUMMY_HOST_NAME" in res[2] + + def test_safe_psql__another_port(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init() as node1: + with __class__.helper__get_node(node_svc).init() as node2: + node1.start() + node2.start() + assert node1.port != node2.port + assert node1.host == node2.host + + node1.stop() + + logging.info("test table in node2 is creating ...") + node2.safe_psql( + dbname="postgres", + query="create table test (id integer);" + ) + + logging.info("try to find test table through node1.psql ...") + res = node1.safe_psql( + dbname="postgres", + query="select count(*) from pg_class where relname='test'", + host=node2.host, + port=node2.port, + ) + assert (__class__.helper__rm_carriage_returns(res) == b'1\n') + + def test_safe_psql__another_bad_host(self, node_svc: PostgresNodeService): + assert isinstance(node_svc, PostgresNodeService) + with __class__.helper__get_node(node_svc).init() as node: + logging.info("try to execute node1.psql ...") + + with pytest.raises(expected_exception=Exception) as x: + node.safe_psql( + dbname="postgres", + query="select count(*) from pg_class where relname='test'", + host="DUMMY_HOST_NAME", + port=node.port, + ) + + assert "DUMMY_HOST_NAME" in str(x.value) + def test_safe_psql__expect_error(self, node_svc: PostgresNodeService): assert isinstance(node_svc, PostgresNodeService) with __class__.helper__get_node(node_svc).init().start() as node: From 2f550d8787130551e7c72d07070bf3dcc11dd586 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Thu, 1 May 2025 23:21:48 +0300 Subject: [PATCH 77/90] Testgres tests create log dir in exact place (#243) When we do not define TEST_CFG__LOG_DIR it is expected the testgres tests will create a log directory in a root of testgres project folder. We used config.rootpath for detect this folder in pytest_configure function. It was occurred that config.rootpath can point to another (unexpected) place. So we will use exact code to calculate testgres project folder (see TestStartupData.GetRootLogDir) to avid this problem. --- tests/conftest.py | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 6f2f9e41..9e74879b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -50,6 +50,17 @@ def CalcRootDir() -> str: r = os.path.abspath(r) return r + # -------------------------------------------------------------------- + def CalcRootLogDir() -> str: + if TestConfigPropNames.TEST_CFG__LOG_DIR in os.environ: + resultPath = os.environ[TestConfigPropNames.TEST_CFG__LOG_DIR] + else: + rootDir = __class__.CalcRootDir() + resultPath = os.path.join(rootDir, "logs") + + assert type(resultPath) == str # noqa: E721 + return resultPath + # -------------------------------------------------------------------- def CalcCurrentTestWorkerSignature() -> str: currentPID = os.getpid() @@ -86,11 +97,18 @@ class TestStartupData: TestStartupData__Helper.CalcCurrentTestWorkerSignature() ) + sm_RootLogDir: str = TestStartupData__Helper.CalcRootLogDir() + # -------------------------------------------------------------------- def GetRootDir() -> str: assert type(__class__.sm_RootDir) == str # noqa: E721 return __class__.sm_RootDir + # -------------------------------------------------------------------- + def GetRootLogDir() -> str: + assert type(__class__.sm_RootLogDir) == str # noqa: E721 + return __class__.sm_RootLogDir + # -------------------------------------------------------------------- def GetCurrentTestWorkerSignature() -> str: assert type(__class__.sm_CurrentTestWorkerSignature) == str # noqa: E721 @@ -954,13 +972,9 @@ def pytest_configure(config: pytest.Config) -> None: log_name = TestStartupData.GetCurrentTestWorkerSignature() log_name += ".log" - if TestConfigPropNames.TEST_CFG__LOG_DIR in os.environ: - log_path_v = os.environ[TestConfigPropNames.TEST_CFG__LOG_DIR] - log_path = pathlib.Path(log_path_v) - else: - log_path = config.rootpath.joinpath("logs") + log_dir = TestStartupData.GetRootLogDir() - log_path.mkdir(exist_ok=True) + pathlib.Path(log_dir).mkdir(exist_ok=True) logging_plugin: _pytest.logging.LoggingPlugin = config.pluginmanager.get_plugin( "logging-plugin" @@ -969,7 +983,7 @@ def pytest_configure(config: pytest.Config) -> None: assert logging_plugin is not None assert isinstance(logging_plugin, _pytest.logging.LoggingPlugin) - logging_plugin.set_log_path(str(log_path / log_name)) + logging_plugin.set_log_path(os.path.join(log_dir, log_name)) # ///////////////////////////////////////////////////////////////////////////// From 5f8f5dd2e5684a340f640141f01b3edd5ebca5a9 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sun, 4 May 2025 06:10:29 +0300 Subject: [PATCH 78/90] [test] TestTestgresLocal.test_upgrade_node is corrected (#246) Let's "release" all our test nodes correctly. --- tests/test_testgres_local.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/test_testgres_local.py b/tests/test_testgres_local.py index 1dd98fe3..53c9e0f6 100644 --- a/tests/test_testgres_local.py +++ b/tests/test_testgres_local.py @@ -158,15 +158,15 @@ def test_child_process_dies(self): def test_upgrade_node(self): old_bin_dir = os.path.dirname(get_bin_path("pg_config")) new_bin_dir = os.path.dirname(get_bin_path("pg_config")) - node_old = get_new_node(prefix='node_old', bin_dir=old_bin_dir) - node_old.init() - node_old.start() - node_old.stop() - node_new = get_new_node(prefix='node_new', bin_dir=new_bin_dir) - node_new.init(cached=False) - res = node_new.upgrade_from(old_node=node_old) - node_new.start() - assert (b'Upgrade Complete' in res) + with get_new_node(prefix='node_old', bin_dir=old_bin_dir) as node_old: + node_old.init() + node_old.start() + node_old.stop() + with get_new_node(prefix='node_new', bin_dir=new_bin_dir) as node_new: + node_new.init(cached=False) + res = node_new.upgrade_from(old_node=node_old) + node_new.start() + assert (b'Upgrade Complete' in res) class tagPortManagerProxy: sm_prev_testgres_reserve_port = None From 0b331e6839002da9b51fb3ca6ca7db228373dff0 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sun, 4 May 2025 16:01:58 +0300 Subject: [PATCH 79/90] Releasing of reserved port in tests (#248) * [test] TestTestgresLocal.test_pg_ctl_wait_option is corrected Let's "release" all our test nodes correctly. * [test] TestTestgresLocal.test_simple_with_bin_dir is corrected Let's "release" all our test nodes correctly. --- tests/test_testgres_common.py | 15 ++++++++++++--- tests/test_testgres_local.py | 8 ++++---- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index 21fa00df..b71dc5ad 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -883,10 +883,20 @@ def test_backup_wrong_xlog_method(self, node_svc: PostgresNodeService): def test_pg_ctl_wait_option(self, node_svc: PostgresNodeService): assert isinstance(node_svc, PostgresNodeService) - C_MAX_ATTEMPTS = 50 + with __class__.helper__get_node(node_svc) as node: + self.impl__test_pg_ctl_wait_option(node_svc, node) - node = __class__.helper__get_node(node_svc) + def impl__test_pg_ctl_wait_option( + self, + node_svc: PostgresNodeService, + node: PostgresNode + ) -> None: + assert isinstance(node_svc, PostgresNodeService) + assert isinstance(node, PostgresNode) assert node.status() == NodeStatus.Uninitialized + + C_MAX_ATTEMPTS = 50 + node.init() assert node.status() == NodeStatus.Stopped node.start(wait=False) @@ -950,7 +960,6 @@ def test_pg_ctl_wait_option(self, node_svc: PostgresNodeService): raise Exception("Unexpected node status: {0}.".format(s1)) logging.info("OK. Node is stopped.") - node.cleanup() def test_replicate(self, node_svc: PostgresNodeService): assert isinstance(node_svc, PostgresNodeService) diff --git a/tests/test_testgres_local.py b/tests/test_testgres_local.py index 53c9e0f6..63e5f37e 100644 --- a/tests/test_testgres_local.py +++ b/tests/test_testgres_local.py @@ -341,10 +341,10 @@ def test_simple_with_bin_dir(self): bin_dir = node.bin_dir app = NodeApp() - correct_bin_dir = app.make_simple(base_dir=node.base_dir, bin_dir=bin_dir) - correct_bin_dir.slow_start() - correct_bin_dir.safe_psql("SELECT 1;") - correct_bin_dir.stop() + with app.make_simple(base_dir=node.base_dir, bin_dir=bin_dir) as correct_bin_dir: + correct_bin_dir.slow_start() + correct_bin_dir.safe_psql("SELECT 1;") + correct_bin_dir.stop() while True: try: From c3b25b22f6004d592e1f53d0366486d367fb3b48 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Sun, 4 May 2025 22:39:18 +0300 Subject: [PATCH 80/90] [#249] Fix of port number leak in NodeBackup::spawn_replica (#250) This patch has the following changes: 1) It adds a new argument release_resources to PostgresNode::cleanup method. Default value is False. 2) It fixes a port number leak in NodeBackup::spawn_replica through explicit call of PostgresNode::cleanup(release_resources=True). Closes #249. --- testgres/backup.py | 11 ++++++++--- testgres/node.py | 12 +++++++++--- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/testgres/backup.py b/testgres/backup.py index 388697b7..857c46d4 100644 --- a/testgres/backup.py +++ b/testgres/backup.py @@ -184,14 +184,19 @@ def spawn_replica(self, name=None, destroy=True, slot=None): """ # Build a new PostgresNode - with clean_on_error(self.spawn_primary(name=name, - destroy=destroy)) as node: + node = self.spawn_primary(name=name, destroy=destroy) + assert node is not None + try: # Assign it a master and a recovery file (private magic) node._assign_master(self.original_node) node._create_recovery_conf(username=self.username, slot=slot) + except: # noqa: E722 + # TODO: Pass 'final=True' ? + node.cleanup(release_resources=True) + raise - return node + return node def cleanup(self): """ diff --git a/testgres/node.py b/testgres/node.py index 41504e89..defc0b40 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -231,8 +231,6 @@ def __enter__(self): return self def __exit__(self, type, value, traceback): - self.free_port() - # NOTE: Ctrl+C does not count! got_exception = type is not None and type != KeyboardInterrupt @@ -246,6 +244,8 @@ def __exit__(self, type, value, traceback): else: self._try_shutdown(attempts) + self._release_resources() + def __repr__(self): return "{}(name='{}', port={}, base_dir='{}')".format( self.__class__.__name__, @@ -663,6 +663,9 @@ def _try_shutdown(self, max_attempts, with_force=False): ps_output, ps_command) + def _release_resources(self): + self.free_port() + @staticmethod def _throw_bugcheck__unexpected_result_of_ps(result, cmd): assert type(result) == str # noqa: E721 @@ -1340,7 +1343,7 @@ def free_port(self): self._port = None self._port_manager.release_port(port) - def cleanup(self, max_attempts=3, full=False): + def cleanup(self, max_attempts=3, full=False, release_resources=False): """ Stop node if needed and remove its data/logs directory. NOTE: take a look at TestgresConfig.node_cleanup_full. @@ -1363,6 +1366,9 @@ def cleanup(self, max_attempts=3, full=False): self.os_ops.rmdirs(rm_dir, ignore_errors=False) + if release_resources: + self._release_resources() + return self @method_decorator(positional_args_hack(['dbname', 'query'])) From a683c65ae222e1980aa141f8d215c9fc3eac9383 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 5 May 2025 15:12:45 +0300 Subject: [PATCH 81/90] [Refactoring] Default port manager functions now use PortManager__Generic and LocalOperations (#251) * [Refactoring] Default port manager functions now use PortManager__Generic and LocalOperations This patch deletes a duplication of port manager code. Now utils.reserve_port and utils.release_port works through _old_port_manager - it is a global instance of PortManager__Generic that uses a global instance of LocalOperations. This commit is a part of work for #247. * [BUG FIX] PortManager__ThisHost::__new__ had MT-problem After MT-lock we must to check __class__.sm_single_instance again. Refactoring - PortManager__ThisHost::__new__ is replaced with an explicit PortManager__ThisHost::get_single_instance() - PortManager__ThisHost::__init__ is deleted --- setup.py | 2 +- testgres/impl/port_manager__generic.py | 64 ++++++++++++++++ testgres/impl/port_manager__this_host.py | 33 +++++++++ testgres/node.py | 6 +- testgres/port_manager.py | 93 ------------------------ testgres/utils.py | 42 ++++------- tests/helpers/global_data.py | 2 +- 7 files changed, 115 insertions(+), 127 deletions(-) create mode 100755 testgres/impl/port_manager__generic.py create mode 100755 testgres/impl/port_manager__this_host.py diff --git a/setup.py b/setup.py index 2c44b18f..0b209181 100755 --- a/setup.py +++ b/setup.py @@ -29,7 +29,7 @@ setup( version='1.11.0', name='testgres', - packages=['testgres', 'testgres.operations'], + packages=['testgres', 'testgres.operations', 'testgres.impl'], description='Testing utility for PostgreSQL and its extensions', url='https://github.com/postgrespro/testgres', long_description=readme, diff --git a/testgres/impl/port_manager__generic.py b/testgres/impl/port_manager__generic.py new file mode 100755 index 00000000..a51af2bd --- /dev/null +++ b/testgres/impl/port_manager__generic.py @@ -0,0 +1,64 @@ +from ..operations.os_ops import OsOperations + +from ..port_manager import PortManager +from ..exceptions import PortForException + +import threading +import random +import typing + + +class PortManager__Generic(PortManager): + _os_ops: OsOperations + _guard: object + # TODO: is there better to use bitmap fot _available_ports? + _available_ports: typing.Set[int] + _reserved_ports: typing.Set[int] + + def __init__(self, os_ops: OsOperations): + assert os_ops is not None + assert isinstance(os_ops, OsOperations) + self._os_ops = os_ops + self._guard = threading.Lock() + self._available_ports: typing.Set[int] = set(range(1024, 65535)) + self._reserved_ports: typing.Set[int] = set() + + def reserve_port(self) -> int: + assert self._guard is not None + assert type(self._available_ports) == set # noqa: E721t + assert type(self._reserved_ports) == set # noqa: E721 + + with self._guard: + t = tuple(self._available_ports) + assert len(t) == len(self._available_ports) + sampled_ports = random.sample(t, min(len(t), 100)) + t = None + + for port in sampled_ports: + assert not (port in self._reserved_ports) + assert port in self._available_ports + + if not self._os_ops.is_port_free(port): + continue + + self._reserved_ports.add(port) + self._available_ports.discard(port) + assert port in self._reserved_ports + assert not (port in self._available_ports) + return port + + raise PortForException("Can't select a port.") + + def release_port(self, number: int) -> None: + assert type(number) == int # noqa: E721 + + assert self._guard is not None + assert type(self._reserved_ports) == set # noqa: E721 + + with self._guard: + assert number in self._reserved_ports + assert not (number in self._available_ports) + self._available_ports.add(number) + self._reserved_ports.discard(number) + assert not (number in self._reserved_ports) + assert number in self._available_ports diff --git a/testgres/impl/port_manager__this_host.py b/testgres/impl/port_manager__this_host.py new file mode 100755 index 00000000..0d56f356 --- /dev/null +++ b/testgres/impl/port_manager__this_host.py @@ -0,0 +1,33 @@ +from ..port_manager import PortManager + +from .. import utils + +import threading + + +class PortManager__ThisHost(PortManager): + sm_single_instance: PortManager = None + sm_single_instance_guard = threading.Lock() + + @staticmethod + def get_single_instance() -> PortManager: + assert __class__ == PortManager__ThisHost + assert __class__.sm_single_instance_guard is not None + + if __class__.sm_single_instance is not None: + assert type(__class__.sm_single_instance) == __class__ # noqa: E721 + return __class__.sm_single_instance + + with __class__.sm_single_instance_guard: + if __class__.sm_single_instance is None: + __class__.sm_single_instance = __class__() + assert __class__.sm_single_instance is not None + assert type(__class__.sm_single_instance) == __class__ # noqa: E721 + return __class__.sm_single_instance + + def reserve_port(self) -> int: + return utils.reserve_port() + + def release_port(self, number: int) -> None: + assert type(number) == int # noqa: E721 + return utils.release_port(number) diff --git a/testgres/node.py b/testgres/node.py index defc0b40..dd1a45d3 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -84,8 +84,8 @@ InvalidOperationException from .port_manager import PortManager -from .port_manager import PortManager__ThisHost -from .port_manager import PortManager__Generic +from .impl.port_manager__this_host import PortManager__ThisHost +from .impl.port_manager__generic import PortManager__Generic from .logger import TestgresLogger @@ -272,7 +272,7 @@ def _get_port_manager(os_ops: OsOperations) -> PortManager: assert isinstance(os_ops, OsOperations) if isinstance(os_ops, LocalOperations): - return PortManager__ThisHost() + return PortManager__ThisHost.get_single_instance() # TODO: Throw the exception "Please define a port manager." ? return PortManager__Generic(os_ops) diff --git a/testgres/port_manager.py b/testgres/port_manager.py index e2530470..1ae696c8 100644 --- a/testgres/port_manager.py +++ b/testgres/port_manager.py @@ -1,14 +1,3 @@ -from .operations.os_ops import OsOperations - -from .exceptions import PortForException - -from . import utils - -import threading -import random -import typing - - class PortManager: def __init__(self): super().__init__() @@ -19,85 +8,3 @@ def reserve_port(self) -> int: def release_port(self, number: int) -> None: assert type(number) == int # noqa: E721 raise NotImplementedError("PortManager::release_port is not implemented.") - - -class PortManager__ThisHost(PortManager): - sm_single_instance: PortManager = None - sm_single_instance_guard = threading.Lock() - - def __init__(self): - pass - - def __new__(cls) -> PortManager: - assert __class__ == PortManager__ThisHost - assert __class__.sm_single_instance_guard is not None - - if __class__.sm_single_instance is None: - with __class__.sm_single_instance_guard: - __class__.sm_single_instance = super().__new__(cls) - assert __class__.sm_single_instance - assert type(__class__.sm_single_instance) == __class__ # noqa: E721 - return __class__.sm_single_instance - - def reserve_port(self) -> int: - return utils.reserve_port() - - def release_port(self, number: int) -> None: - assert type(number) == int # noqa: E721 - return utils.release_port(number) - - -class PortManager__Generic(PortManager): - _os_ops: OsOperations - _guard: object - # TODO: is there better to use bitmap fot _available_ports? - _available_ports: typing.Set[int] - _reserved_ports: typing.Set[int] - - def __init__(self, os_ops: OsOperations): - assert os_ops is not None - assert isinstance(os_ops, OsOperations) - self._os_ops = os_ops - self._guard = threading.Lock() - self._available_ports: typing.Set[int] = set(range(1024, 65535)) - self._reserved_ports: typing.Set[int] = set() - - def reserve_port(self) -> int: - assert self._guard is not None - assert type(self._available_ports) == set # noqa: E721t - assert type(self._reserved_ports) == set # noqa: E721 - - with self._guard: - t = tuple(self._available_ports) - assert len(t) == len(self._available_ports) - sampled_ports = random.sample(t, min(len(t), 100)) - t = None - - for port in sampled_ports: - assert not (port in self._reserved_ports) - assert port in self._available_ports - - if not self._os_ops.is_port_free(port): - continue - - self._reserved_ports.add(port) - self._available_ports.discard(port) - assert port in self._reserved_ports - assert not (port in self._available_ports) - return port - - raise PortForException("Can't select a port.") - - def release_port(self, number: int) -> None: - assert type(number) == int # noqa: E721 - - assert self._guard is not None - assert type(self._reserved_ports) == set # noqa: E721 - - with self._guard: - assert number in self._reserved_ports - assert not (number in self._available_ports) - self._available_ports.add(number) - self._reserved_ports.discard(number) - assert not (number in self._reserved_ports) - assert number in self._available_ports diff --git a/testgres/utils.py b/testgres/utils.py index 2ff6f2a0..6603c929 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -6,8 +6,6 @@ import os import sys -import socket -import random from contextlib import contextmanager from packaging.version import Version, InvalidVersion @@ -15,18 +13,27 @@ from six import iteritems -from .exceptions import PortForException from .exceptions import ExecUtilException from .config import testgres_config as tconf from .operations.os_ops import OsOperations from .operations.remote_ops import RemoteOperations +from .operations.local_ops import LocalOperations from .operations.helpers import Helpers as OsHelpers +from .impl.port_manager__generic import PortManager__Generic + # rows returned by PG_CONFIG _pg_config_data = {} +_local_operations = LocalOperations() + +# +# The old, global "port manager" always worked with LOCAL system +# +_old_port_manager = PortManager__Generic(_local_operations) + # ports used by nodes -bound_ports = set() +bound_ports = _old_port_manager._reserved_ports # re-export version type @@ -43,28 +50,7 @@ def internal__reserve_port(): """ Generate a new port and add it to 'bound_ports'. """ - def LOCAL__is_port_free(port: int) -> bool: - """Check if a port is free to use.""" - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - try: - s.bind(("", port)) - return True - except OSError: - return False - - ports = set(range(1024, 65535)) - assert type(ports) == set # noqa: E721 - assert type(bound_ports) == set # noqa: E721 - ports.difference_update(bound_ports) - - sampled_ports = random.sample(tuple(ports), min(len(ports), 100)) - - for port in sampled_ports: - if LOCAL__is_port_free(port): - bound_ports.add(port) - return port - - raise PortForException("Can't select a port") + return _old_port_manager.reserve_port() def internal__release_port(port): @@ -73,9 +59,7 @@ def internal__release_port(port): """ assert type(port) == int # noqa: E721 - assert port in bound_ports - - bound_ports.discard(port) + return _old_port_manager.release_port(port) reserve_port = internal__reserve_port diff --git a/tests/helpers/global_data.py b/tests/helpers/global_data.py index 51bf4485..07ac083d 100644 --- a/tests/helpers/global_data.py +++ b/tests/helpers/global_data.py @@ -39,7 +39,7 @@ class OsOpsDescrs: class PortManagers: sm_remote_port_manager = PortManager__Generic(OsOpsDescrs.sm_remote_os_ops) - sm_local_port_manager = PortManager__ThisHost() + sm_local_port_manager = PortManager__ThisHost.get_single_instance() sm_local2_port_manager = PortManager__Generic(OsOpsDescrs.sm_local_os_ops) From 6972bfcdab479088ac881375db92944be87487f4 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 5 May 2025 15:27:43 +0300 Subject: [PATCH 82/90] [#244] PostgresNode now uses os_ops only (#245) This patch implements the proposal #244 - detach PostgresNode from ConnectionParams object. It will use os_ops object only. conn_params is saved but must be None. It will be removed in the future. --- testgres/node.py | 30 +++++++++++++++--------------- tests/test_testgres_common.py | 1 - tests/test_testgres_remote.py | 1 - 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/testgres/node.py b/testgres/node.py index dd1a45d3..80ac5ee8 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -107,7 +107,6 @@ from .operations.os_ops import ConnectionParams from .operations.os_ops import OsOperations from .operations.local_ops import LocalOperations -from .operations.remote_ops import RemoteOperations InternalError = pglib.InternalError ProgrammingError = pglib.ProgrammingError @@ -151,7 +150,7 @@ def __init__(self, name=None, base_dir=None, port: typing.Optional[int] = None, - conn_params: ConnectionParams = ConnectionParams(), + conn_params: ConnectionParams = None, bin_dir=None, prefix=None, os_ops: typing.Optional[OsOperations] = None, @@ -171,11 +170,15 @@ def __init__(self, assert os_ops is None or isinstance(os_ops, OsOperations) assert port_manager is None or isinstance(port_manager, PortManager) + if conn_params is not None: + assert type(conn_params) == ConnectionParams # noqa: E721 + + raise InvalidOperationException("conn_params is deprecated, please use os_ops parameter instead.") + # private if os_ops is None: - self._os_ops = __class__._get_os_ops(conn_params) + self._os_ops = __class__._get_os_ops() else: - assert conn_params is None assert isinstance(os_ops, OsOperations) self._os_ops = os_ops pass @@ -200,11 +203,14 @@ def __init__(self, self._should_free_port = False self._port_manager = None else: - if port_manager is not None: + if port_manager is None: + self._port_manager = __class__._get_port_manager(self._os_ops) + elif os_ops is None: + raise InvalidOperationException("When port_manager is not None you have to define os_ops, too.") + else: assert isinstance(port_manager, PortManager) + assert self._os_ops is os_ops self._port_manager = port_manager - else: - self._port_manager = __class__._get_port_manager(self._os_ops) assert self._port_manager is not None assert isinstance(self._port_manager, PortManager) @@ -255,16 +261,11 @@ def __repr__(self): ) @staticmethod - def _get_os_ops(conn_params: ConnectionParams) -> OsOperations: + def _get_os_ops() -> OsOperations: if testgres_config.os_ops: return testgres_config.os_ops - assert type(conn_params) == ConnectionParams # noqa: E721 - - if conn_params.ssh_key: - return RemoteOperations(conn_params) - - return LocalOperations(conn_params) + return LocalOperations() @staticmethod def _get_port_manager(os_ops: OsOperations) -> PortManager: @@ -294,7 +295,6 @@ def clone_with_new_name_and_base_dir(self, name: str, base_dir: str): node = PostgresNode( name=name, base_dir=base_dir, - conn_params=None, bin_dir=self._bin_dir, prefix=self._prefix, os_ops=self._os_ops, diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index b71dc5ad..5b926bc8 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -1487,7 +1487,6 @@ def helper__get_node( return PostgresNode( name, port=port, - conn_params=None, os_ops=node_svc.os_ops, port_manager=port_manager if port is None else None ) diff --git a/tests/test_testgres_remote.py b/tests/test_testgres_remote.py index 87cc0269..6a8d068b 100755 --- a/tests/test_testgres_remote.py +++ b/tests/test_testgres_remote.py @@ -173,7 +173,6 @@ def helper__get_node(name=None): return testgres.PostgresNode( name, - conn_params=None, os_ops=svc.os_ops, port_manager=svc.port_manager) From df4f545eb47427f2997dbe7eeb18e80ff64c5686 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 6 May 2025 09:39:26 +0300 Subject: [PATCH 83/90] LocalOperations::get_single_instance is added (#252) * LocalOperations::get_single_instance is added This patch forces testgres to use a single instance of LocalOperations that is created with default parameters. Note that, PortManager__ThisHost is used only when PostgresNode uses this single local_ops instance. --- testgres/cache.py | 8 ++++++-- testgres/config.py | 3 ++- testgres/node.py | 23 +++++++++++++++++++---- testgres/operations/local_ops.py | 20 ++++++++++++++++++++ testgres/utils.py | 4 +--- tests/helpers/global_data.py | 2 +- 6 files changed, 49 insertions(+), 11 deletions(-) diff --git a/testgres/cache.py b/testgres/cache.py index 3ac63326..499cce91 100644 --- a/testgres/cache.py +++ b/testgres/cache.py @@ -22,12 +22,16 @@ from .operations.os_ops import OsOperations -def cached_initdb(data_dir, logfile=None, params=None, os_ops: OsOperations = LocalOperations(), bin_path=None, cached=True): +def cached_initdb(data_dir, logfile=None, params=None, os_ops: OsOperations = None, bin_path=None, cached=True): """ Perform initdb or use cached node files. """ - assert os_ops is not None + assert os_ops is None or isinstance(os_ops, OsOperations) + + if os_ops is None: + os_ops = LocalOperations.get_single_instance() + assert isinstance(os_ops, OsOperations) def make_utility_path(name): diff --git a/testgres/config.py b/testgres/config.py index 67d467d3..55d52426 100644 --- a/testgres/config.py +++ b/testgres/config.py @@ -50,8 +50,9 @@ class GlobalConfig(object): _cached_initdb_dir = None """ underlying class attribute for cached_initdb_dir property """ - os_ops = LocalOperations() + os_ops = LocalOperations.get_single_instance() """ OsOperation object that allows work on remote host """ + @property def cached_initdb_dir(self): """ path to a temp directory for cached initdb. """ diff --git a/testgres/node.py b/testgres/node.py index 80ac5ee8..66783e08 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -93,6 +93,8 @@ from .standby import First +from . import utils + from .utils import \ PgVer, \ eprint, \ @@ -265,14 +267,17 @@ def _get_os_ops() -> OsOperations: if testgres_config.os_ops: return testgres_config.os_ops - return LocalOperations() + return LocalOperations.get_single_instance() @staticmethod def _get_port_manager(os_ops: OsOperations) -> PortManager: assert os_ops is not None assert isinstance(os_ops, OsOperations) - if isinstance(os_ops, LocalOperations): + if os_ops is LocalOperations.get_single_instance(): + assert utils._old_port_manager is not None + assert type(utils._old_port_manager) == PortManager__Generic # noqa: E721 + assert utils._old_port_manager._os_ops is os_ops return PortManager__ThisHost.get_single_instance() # TODO: Throw the exception "Please define a port manager." ? @@ -816,10 +821,13 @@ def init(self, initdb_params=None, cached=True, **kwargs): """ # initialize this PostgreSQL node + assert self._os_ops is not None + assert isinstance(self._os_ops, OsOperations) + cached_initdb( data_dir=self.data_dir, logfile=self.utils_log_file, - os_ops=self.os_ops, + os_ops=self._os_ops, params=initdb_params, bin_path=self.bin_dir, cached=False) @@ -2186,7 +2194,14 @@ def _escape_config_value(value): class NodeApp: - def __init__(self, test_path=None, nodes_to_cleanup=None, os_ops=LocalOperations()): + def __init__(self, test_path=None, nodes_to_cleanup=None, os_ops=None): + assert os_ops is None or isinstance(os_ops, OsOperations) + + if os_ops is None: + os_ops = LocalOperations.get_single_instance() + + assert isinstance(os_ops, OsOperations) + if test_path: if os.path.isabs(test_path): self.test_path = test_path diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index 74323bb8..b9fd7aef 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -10,6 +10,7 @@ import psutil import typing +import threading from ..exceptions import ExecUtilException from ..exceptions import InvalidOperationException @@ -28,6 +29,9 @@ class LocalOperations(OsOperations): + sm_single_instance: OsOperations = None + sm_single_instance_guard = threading.Lock() + def __init__(self, conn_params=None): if conn_params is None: conn_params = ConnectionParams() @@ -38,6 +42,22 @@ def __init__(self, conn_params=None): self.remote = False self.username = conn_params.username or getpass.getuser() + @staticmethod + def get_single_instance() -> OsOperations: + assert __class__ == LocalOperations + assert __class__.sm_single_instance_guard is not None + + if __class__.sm_single_instance is not None: + assert type(__class__.sm_single_instance) == __class__ # noqa: E721 + return __class__.sm_single_instance + + with __class__.sm_single_instance_guard: + if __class__.sm_single_instance is None: + __class__.sm_single_instance = __class__() + assert __class__.sm_single_instance is not None + assert type(__class__.sm_single_instance) == __class__ # noqa: E721 + return __class__.sm_single_instance + @staticmethod def _process_output(encoding, temp_file_path): """Process the output of a command from a temporary file.""" diff --git a/testgres/utils.py b/testgres/utils.py index 6603c929..d231eec3 100644 --- a/testgres/utils.py +++ b/testgres/utils.py @@ -25,12 +25,10 @@ # rows returned by PG_CONFIG _pg_config_data = {} -_local_operations = LocalOperations() - # # The old, global "port manager" always worked with LOCAL system # -_old_port_manager = PortManager__Generic(_local_operations) +_old_port_manager = PortManager__Generic(LocalOperations.get_single_instance()) # ports used by nodes bound_ports = _old_port_manager._reserved_ports diff --git a/tests/helpers/global_data.py b/tests/helpers/global_data.py index 07ac083d..f3df41a3 100644 --- a/tests/helpers/global_data.py +++ b/tests/helpers/global_data.py @@ -31,7 +31,7 @@ class OsOpsDescrs: sm_remote_os_ops_descr = OsOpsDescr("remote_ops", sm_remote_os_ops) - sm_local_os_ops = LocalOperations() + sm_local_os_ops = LocalOperations.get_single_instance() sm_local_os_ops_descr = OsOpsDescr("local_ops", sm_local_os_ops) From a0a85065f59ac40a8e8f951e88efa346cfb9d695 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 6 May 2025 14:52:14 +0300 Subject: [PATCH 84/90] New OsOperations methods: makedir, rmdir (#253) Signatures: def makedir(self, path: str) def rmdir(self, path: str) It is a part of work for #247. --- testgres/operations/local_ops.py | 8 + testgres/operations/os_ops.py | 8 + testgres/operations/remote_ops.py | 10 ++ tests/test_os_ops_common.py | 268 ++++++++++++++++++++++++++++++ 4 files changed, 294 insertions(+) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index b9fd7aef..d33e8b65 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -250,6 +250,10 @@ def makedirs(self, path, remove_existing=False): except FileExistsError: pass + def makedir(self, path: str): + assert type(path) == str # noqa: E721 + os.mkdir(path) + # [2025-02-03] Old name of parameter attempts is "retries". def rmdirs(self, path, ignore_errors=True, attempts=3, delay=1): """ @@ -293,6 +297,10 @@ def rmdirs(self, path, ignore_errors=True, attempts=3, delay=1): # OK! return True + def rmdir(self, path: str): + assert type(path) == str # noqa: E721 + os.rmdir(path) + def listdir(self, path): return os.listdir(path) diff --git a/testgres/operations/os_ops.py b/testgres/operations/os_ops.py index d25e76bc..a4e1d9a2 100644 --- a/testgres/operations/os_ops.py +++ b/testgres/operations/os_ops.py @@ -53,9 +53,17 @@ def get_name(self): def makedirs(self, path, remove_existing=False): raise NotImplementedError() + def makedir(self, path: str): + assert type(path) == str # noqa: E721 + raise NotImplementedError() + def rmdirs(self, path, ignore_errors=True): raise NotImplementedError() + def rmdir(self, path: str): + assert type(path) == str # noqa: E721 + raise NotImplementedError() + def listdir(self, path): raise NotImplementedError() diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index e722a2cb..09406f79 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -225,6 +225,11 @@ def makedirs(self, path, remove_existing=False): raise Exception("Couldn't create dir {} because of error {}".format(path, error)) return result + def makedir(self, path: str): + assert type(path) == str # noqa: E721 + cmd = ["mkdir", path] + self.exec_command(cmd) + def rmdirs(self, path, ignore_errors=True): """ Remove a directory in the remote server. @@ -265,6 +270,11 @@ def rmdirs(self, path, ignore_errors=True): return False return True + def rmdir(self, path: str): + assert type(path) == str # noqa: E721 + cmd = ["rmdir", path] + self.exec_command(cmd) + def listdir(self, path): """ List all files and directories in a directory. diff --git a/tests/test_os_ops_common.py b/tests/test_os_ops_common.py index 9c4d8857..149050f9 100644 --- a/tests/test_os_ops_common.py +++ b/tests/test_os_ops_common.py @@ -13,10 +13,14 @@ import socket import threading import typing +import uuid from testgres import InvalidOperationException from testgres import ExecUtilException +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import Future as ThreadFuture + class TestOsOpsCommon: sm_os_ops_descrs: typing.List[OsOpsDescr] = [ @@ -812,3 +816,267 @@ def LOCAL_server(s: socket.socket): if ok_count == 0: raise RuntimeError("No one free port was found.") + + class tagData_OS_OPS__NUMS: + os_ops_descr: OsOpsDescr + nums: int + + def __init__(self, os_ops_descr: OsOpsDescr, nums: int): + assert isinstance(os_ops_descr, OsOpsDescr) + assert type(nums) == int # noqa: E721 + + self.os_ops_descr = os_ops_descr + self.nums = nums + + sm_test_exclusive_creation__mt__data = [ + tagData_OS_OPS__NUMS(OsOpsDescrs.sm_local_os_ops_descr, 100000), + tagData_OS_OPS__NUMS(OsOpsDescrs.sm_remote_os_ops_descr, 120), + ] + + @pytest.fixture( + params=sm_test_exclusive_creation__mt__data, + ids=[x.os_ops_descr.sign for x in sm_test_exclusive_creation__mt__data] + ) + def data001(self, request: pytest.FixtureRequest) -> tagData_OS_OPS__NUMS: + assert isinstance(request, pytest.FixtureRequest) + return request.param + + def test_mkdir__mt(self, data001: tagData_OS_OPS__NUMS): + assert type(data001) == __class__.tagData_OS_OPS__NUMS # noqa: E721 + + N_WORKERS = 4 + N_NUMBERS = data001.nums + assert type(N_NUMBERS) == int # noqa: E721 + + os_ops = data001.os_ops_descr.os_ops + assert isinstance(os_ops, OsOperations) + + lock_dir_prefix = "test_mkdir_mt--" + uuid.uuid4().hex + + lock_dir = os_ops.mkdtemp(prefix=lock_dir_prefix) + + logging.info("A lock file [{}] is creating ...".format(lock_dir)) + + assert os.path.exists(lock_dir) + + def MAKE_PATH(lock_dir: str, num: int) -> str: + assert type(lock_dir) == str # noqa: E721 + assert type(num) == int # noqa: E721 + return os.path.join(lock_dir, str(num) + ".lock") + + def LOCAL_WORKER(os_ops: OsOperations, + workerID: int, + lock_dir: str, + cNumbers: int, + reservedNumbers: typing.Set[int]) -> None: + assert isinstance(os_ops, OsOperations) + assert type(workerID) == int # noqa: E721 + assert type(lock_dir) == str # noqa: E721 + assert type(cNumbers) == int # noqa: E721 + assert type(reservedNumbers) == set # noqa: E721 + assert cNumbers > 0 + assert len(reservedNumbers) == 0 + + assert os.path.exists(lock_dir) + + def LOG_INFO(template: str, *args: list) -> None: + assert type(template) == str # noqa: E721 + assert type(args) == tuple # noqa: E721 + + msg = template.format(*args) + assert type(msg) == str # noqa: E721 + + logging.info("[Worker #{}] {}".format(workerID, msg)) + return + + LOG_INFO("HELLO! I am here!") + + for num in range(cNumbers): + assert not (num in reservedNumbers) + + file_path = MAKE_PATH(lock_dir, num) + + try: + os_ops.makedir(file_path) + except Exception as e: + LOG_INFO( + "Can't reserve {}. Error ({}): {}", + num, + type(e).__name__, + str(e) + ) + continue + + LOG_INFO("Number {} is reserved!", num) + assert os_ops.path_exists(file_path) + reservedNumbers.add(num) + continue + + n_total = cNumbers + n_ok = len(reservedNumbers) + assert n_ok <= n_total + + LOG_INFO("Finish! OK: {}. FAILED: {}.", n_ok, n_total - n_ok) + return + + # ----------------------- + logging.info("Worker are creating ...") + + threadPool = ThreadPoolExecutor( + max_workers=N_WORKERS, + thread_name_prefix="ex_creator" + ) + + class tadWorkerData: + future: ThreadFuture + reservedNumbers: typing.Set[int] + + workerDatas: typing.List[tadWorkerData] = list() + + nErrors = 0 + + try: + for n in range(N_WORKERS): + logging.info("worker #{} is creating ...".format(n)) + + workerDatas.append(tadWorkerData()) + + workerDatas[n].reservedNumbers = set() + + workerDatas[n].future = threadPool.submit( + LOCAL_WORKER, + os_ops, + n, + lock_dir, + N_NUMBERS, + workerDatas[n].reservedNumbers + ) + + assert workerDatas[n].future is not None + + logging.info("OK. All the workers were created!") + except Exception as e: + nErrors += 1 + logging.error("A problem is detected ({}): {}".format(type(e).__name__, str(e))) + + logging.info("Will wait for stop of all the workers...") + + nWorkers = 0 + + assert type(workerDatas) == list # noqa: E721 + + for i in range(len(workerDatas)): + worker = workerDatas[i].future + + if worker is None: + continue + + nWorkers += 1 + + assert isinstance(worker, ThreadFuture) + + try: + logging.info("Wait for worker #{}".format(i)) + worker.result() + except Exception as e: + nErrors += 1 + logging.error("Worker #{} finished with error ({}): {}".format( + i, + type(e).__name__, + str(e), + )) + continue + + assert nWorkers == N_WORKERS + + if nErrors != 0: + raise RuntimeError("Some problems were detected. Please examine the log messages.") + + logging.info("OK. Let's check worker results!") + + reservedNumbers: typing.Dict[int, int] = dict() + + for i in range(N_WORKERS): + logging.info("Worker #{} is checked ...".format(i)) + + workerNumbers = workerDatas[i].reservedNumbers + assert type(workerNumbers) == set # noqa: E721 + + for n in workerNumbers: + if n < 0 or n >= N_NUMBERS: + nErrors += 1 + logging.error("Unexpected number {}".format(n)) + continue + + if n in reservedNumbers.keys(): + nErrors += 1 + logging.error("Number {} was already reserved by worker #{}".format( + n, + reservedNumbers[n] + )) + else: + reservedNumbers[n] = i + + file_path = MAKE_PATH(lock_dir, n) + if not os_ops.path_exists(file_path): + nErrors += 1 + logging.error("File {} is not found!".format(file_path)) + continue + + continue + + logging.info("OK. Let's check reservedNumbers!") + + for n in range(N_NUMBERS): + if not (n in reservedNumbers.keys()): + nErrors += 1 + logging.error("Number {} is not reserved!".format(n)) + continue + + file_path = MAKE_PATH(lock_dir, n) + if not os_ops.path_exists(file_path): + nErrors += 1 + logging.error("File {} is not found!".format(file_path)) + continue + + # OK! + continue + + logging.info("Verification is finished! Total error count is {}.".format(nErrors)) + + if nErrors == 0: + logging.info("Root lock-directory [{}] will be deleted.".format( + lock_dir + )) + + for n in range(N_NUMBERS): + file_path = MAKE_PATH(lock_dir, n) + try: + os_ops.rmdir(file_path) + except Exception as e: + nErrors += 1 + logging.error("Cannot delete directory [{}]. Error ({}): {}".format( + file_path, + type(e).__name__, + str(e) + )) + continue + + if os_ops.path_exists(file_path): + nErrors += 1 + logging.error("Directory {} is not deleted!".format(file_path)) + continue + + if nErrors == 0: + try: + os_ops.rmdir(lock_dir) + except Exception as e: + nErrors += 1 + logging.error("Cannot delete directory [{}]. Error ({}): {}".format( + lock_dir, + type(e).__name__, + str(e) + )) + + logging.info("Test is finished! Total error count is {}.".format(nErrors)) + return From edd64db5284a6b34e275e022a4cc673e1032a6ea Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Tue, 6 May 2025 15:17:15 +0300 Subject: [PATCH 85/90] OsOperations::get_tempdir() is added (#254) Signature: def get_tempdir(self) -> str --- testgres/operations/local_ops.py | 7 +++++++ testgres/operations/os_ops.py | 3 +++ testgres/operations/remote_ops.py | 28 ++++++++++++++++++++++++++ tests/test_os_ops_common.py | 33 +++++++++++++++++++++++++++++++ 4 files changed, 71 insertions(+) diff --git a/testgres/operations/local_ops.py b/testgres/operations/local_ops.py index d33e8b65..ccf1ab82 100644 --- a/testgres/operations/local_ops.py +++ b/testgres/operations/local_ops.py @@ -528,3 +528,10 @@ def is_port_free(self, number: int) -> bool: return True except OSError: return False + + def get_tempdir(self) -> str: + r = tempfile.gettempdir() + assert r is not None + assert type(r) == str # noqa: E721 + assert os.path.exists(r) + return r diff --git a/testgres/operations/os_ops.py b/testgres/operations/os_ops.py index a4e1d9a2..45e4f71c 100644 --- a/testgres/operations/os_ops.py +++ b/testgres/operations/os_ops.py @@ -130,3 +130,6 @@ def get_process_children(self, pid): def is_port_free(self, number: int): assert type(number) == int # noqa: E721 raise NotImplementedError() + + def get_tempdir(self) -> str: + raise NotImplementedError() diff --git a/testgres/operations/remote_ops.py b/testgres/operations/remote_ops.py index 09406f79..a478b453 100644 --- a/testgres/operations/remote_ops.py +++ b/testgres/operations/remote_ops.py @@ -659,6 +659,34 @@ def is_port_free(self, number: int) -> bool: out=output ) + def get_tempdir(self) -> str: + command = ["mktemp", "-u", "-d"] + + exec_exitcode, exec_output, exec_error = self.exec_command( + command, + verbose=True, + encoding=get_default_encoding(), + ignore_errors=True + ) + + assert type(exec_exitcode) == int # noqa: E721 + assert type(exec_output) == str # noqa: E721 + assert type(exec_error) == str # noqa: E721 + + if exec_exitcode != 0: + RaiseError.CommandExecutionError( + cmd=command, + exit_code=exec_exitcode, + message="Could not detect a temporary directory.", + error=exec_error, + out=exec_output) + + temp_subdir = exec_output.strip() + assert type(temp_subdir) == str # noqa: E721 + temp_dir = os.path.dirname(temp_subdir) + assert type(temp_dir) == str # noqa: E721 + return temp_dir + @staticmethod def _is_port_free__process_0(error: str) -> bool: assert type(error) == str # noqa: E721 diff --git a/tests/test_os_ops_common.py b/tests/test_os_ops_common.py index 149050f9..5ae3a61f 100644 --- a/tests/test_os_ops_common.py +++ b/tests/test_os_ops_common.py @@ -817,6 +817,39 @@ def LOCAL_server(s: socket.socket): if ok_count == 0: raise RuntimeError("No one free port was found.") + def test_get_tmpdir(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + dir = os_ops.get_tempdir() + assert type(dir) == str # noqa: E721 + assert os_ops.path_exists(dir) + assert os.path.exists(dir) + + file_path = os.path.join(dir, "testgres--" + uuid.uuid4().hex + ".tmp") + + os_ops.write(file_path, "1234", binary=False) + + assert os_ops.path_exists(file_path) + assert os.path.exists(file_path) + + d = os_ops.read(file_path, binary=False) + + assert d == "1234" + + os_ops.remove_file(file_path) + + assert not os_ops.path_exists(file_path) + assert not os.path.exists(file_path) + + def test_get_tmpdir__compare_with_py_info(self, os_ops: OsOperations): + assert isinstance(os_ops, OsOperations) + + actual_dir = os_ops.get_tempdir() + assert actual_dir is not None + assert type(actual_dir) == str # noqa: E721 + expected_dir = str(tempfile.tempdir) + assert actual_dir == expected_dir + class tagData_OS_OPS__NUMS: os_ops_descr: OsOpsDescr nums: int From 3bb59c64b45826f2a50cfb7d423de1c86721946b Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 7 May 2025 22:10:08 +0300 Subject: [PATCH 86/90] [#235] test_pg_ctl_wait_option detects a port conflict (#257) This patch must fix a problem in test_pg_ctl_wait_option when his PostgreSQL instance conflicts with another one. For this, we added two new things: - PostgresNodeLogReader - PostgresNodeUtils PostgresNodeLogReader reads server logs. PostgresNodeUtils provides an utility to detect a port conflict. PostgresNode::start also uses these new classes. --- testgres/node.py | 208 +++++++++++++++++++++++++++------- tests/test_testgres_common.py | 37 +++++- 2 files changed, 200 insertions(+), 45 deletions(-) diff --git a/testgres/node.py b/testgres/node.py index 66783e08..9a2f4e77 100644 --- a/testgres/node.py +++ b/testgres/node.py @@ -784,28 +784,6 @@ def _collect_special_files(self): return result - def _collect_log_files(self): - # dictionary of log files + size in bytes - - files = [ - self.pg_log_file - ] # yapf: disable - - result = {} - - for f in files: - # skip missing files - if not self.os_ops.path_exists(f): - continue - - file_size = self.os_ops.get_file_size(f) - assert type(file_size) == int # noqa: E721 - assert file_size >= 0 - - result[f] = file_size - - return result - def init(self, initdb_params=None, cached=True, **kwargs): """ Perform initdb for this node. @@ -1062,22 +1040,6 @@ def slow_start(self, replica=False, dbname='template1', username=None, max_attem OperationalError}, max_attempts=max_attempts) - def _detect_port_conflict(self, log_files0, log_files1): - assert type(log_files0) == dict # noqa: E721 - assert type(log_files1) == dict # noqa: E721 - - for file in log_files1.keys(): - read_pos = 0 - - if file in log_files0.keys(): - read_pos = log_files0[file] # the previous size - - file_content = self.os_ops.read_binary(file, read_pos) - file_content_s = file_content.decode() - if 'Is another postmaster already running on port' in file_content_s: - return True - return False - def start(self, params=[], wait=True, exec_env=None): """ Starts the PostgreSQL node using pg_ctl if node has not been started. @@ -1137,8 +1099,7 @@ def LOCAL__raise_cannot_start_node__std(from_exception): assert isinstance(self._port_manager, PortManager) assert __class__._C_MAX_START_ATEMPTS > 1 - log_files0 = self._collect_log_files() - assert type(log_files0) == dict # noqa: E721 + log_reader = PostgresNodeLogReader(self, from_beginnig=False) nAttempt = 0 timeout = 1 @@ -1154,11 +1115,11 @@ def LOCAL__raise_cannot_start_node__std(from_exception): if nAttempt == __class__._C_MAX_START_ATEMPTS: LOCAL__raise_cannot_start_node(e, "Cannot start node after multiple attempts.") - log_files1 = self._collect_log_files() - if not self._detect_port_conflict(log_files0, log_files1): + is_it_port_conflict = PostgresNodeUtils.delect_port_conflict(log_reader) + + if not is_it_port_conflict: LOCAL__raise_cannot_start_node__std(e) - log_files0 = log_files1 logging.warning( "Detected a conflict with using the port {0}. Trying another port after a {1}-second sleep...".format(self._port, timeout) ) @@ -2192,6 +2153,167 @@ def _escape_config_value(value): return result +class PostgresNodeLogReader: + class LogInfo: + position: int + + def __init__(self, position: int): + self.position = position + + # -------------------------------------------------------------------- + class LogDataBlock: + _file_name: str + _position: int + _data: str + + def __init__( + self, + file_name: str, + position: int, + data: str + ): + assert type(file_name) == str # noqa: E721 + assert type(position) == int # noqa: E721 + assert type(data) == str # noqa: E721 + assert file_name != "" + assert position >= 0 + self._file_name = file_name + self._position = position + self._data = data + + @property + def file_name(self) -> str: + assert type(self._file_name) == str # noqa: E721 + assert self._file_name != "" + return self._file_name + + @property + def position(self) -> int: + assert type(self._position) == int # noqa: E721 + assert self._position >= 0 + return self._position + + @property + def data(self) -> str: + assert type(self._data) == str # noqa: E721 + return self._data + + # -------------------------------------------------------------------- + _node: PostgresNode + _logs: typing.Dict[str, LogInfo] + + # -------------------------------------------------------------------- + def __init__(self, node: PostgresNode, from_beginnig: bool): + assert node is not None + assert isinstance(node, PostgresNode) + assert type(from_beginnig) == bool # noqa: E721 + + self._node = node + + if from_beginnig: + self._logs = dict() + else: + self._logs = self._collect_logs() + + assert type(self._logs) == dict # noqa: E721 + return + + def read(self) -> typing.List[LogDataBlock]: + assert self._node is not None + assert isinstance(self._node, PostgresNode) + + cur_logs: typing.Dict[__class__.LogInfo] = self._collect_logs() + assert cur_logs is not None + assert type(cur_logs) == dict # noqa: E721 + + assert type(self._logs) == dict # noqa: E721 + + result = list() + + for file_name, cur_log_info in cur_logs.items(): + assert type(file_name) == str # noqa: E721 + assert type(cur_log_info) == __class__.LogInfo # noqa: E721 + + read_pos = 0 + + if file_name in self._logs.keys(): + prev_log_info = self._logs[file_name] + assert type(prev_log_info) == __class__.LogInfo # noqa: E721 + read_pos = prev_log_info.position # the previous size + + file_content_b = self._node.os_ops.read_binary(file_name, read_pos) + assert type(file_content_b) == bytes # noqa: E721 + + # + # A POTENTIAL PROBLEM: file_content_b may contain an incompleted UTF-8 symbol. + # + file_content_s = file_content_b.decode() + assert type(file_content_s) == str # noqa: E721 + + next_read_pos = read_pos + len(file_content_b) + + # It is a research/paranoja check. + # When we will process partial UTF-8 symbol, it must be adjusted. + assert cur_log_info.position <= next_read_pos + + cur_log_info.position = next_read_pos + + block = __class__.LogDataBlock( + file_name, + read_pos, + file_content_s + ) + + result.append(block) + + # A new check point + self._logs = cur_logs + + return result + + def _collect_logs(self) -> typing.Dict[LogInfo]: + assert self._node is not None + assert isinstance(self._node, PostgresNode) + + files = [ + self._node.pg_log_file + ] # yapf: disable + + result = dict() + + for f in files: + assert type(f) == str # noqa: E721 + + # skip missing files + if not self._node.os_ops.path_exists(f): + continue + + file_size = self._node.os_ops.get_file_size(f) + assert type(file_size) == int # noqa: E721 + assert file_size >= 0 + + result[f] = __class__.LogInfo(file_size) + + return result + + +class PostgresNodeUtils: + @staticmethod + def delect_port_conflict(log_reader: PostgresNodeLogReader) -> bool: + assert type(log_reader) == PostgresNodeLogReader # noqa: E721 + + blocks = log_reader.read() + assert type(blocks) == list # noqa: E721 + + for block in blocks: + assert type(block) == PostgresNodeLogReader.LogDataBlock # noqa: E721 + + if 'Is another postmaster already running on port' in block.data: + return True + + return False + + class NodeApp: def __init__(self, test_path=None, nodes_to_cleanup=None, os_ops=None): diff --git a/tests/test_testgres_common.py b/tests/test_testgres_common.py index 5b926bc8..cf203a67 100644 --- a/tests/test_testgres_common.py +++ b/tests/test_testgres_common.py @@ -5,6 +5,8 @@ from testgres.node import PgVer from testgres.node import PostgresNode +from testgres.node import PostgresNodeLogReader +from testgres.node import PostgresNodeUtils from testgres.utils import get_pg_version2 from testgres.utils import file_tail from testgres.utils import get_bin_path2 @@ -883,8 +885,29 @@ def test_backup_wrong_xlog_method(self, node_svc: PostgresNodeService): def test_pg_ctl_wait_option(self, node_svc: PostgresNodeService): assert isinstance(node_svc, PostgresNodeService) - with __class__.helper__get_node(node_svc) as node: - self.impl__test_pg_ctl_wait_option(node_svc, node) + + C_MAX_ATTEMPT = 5 + + nAttempt = 0 + + while True: + if nAttempt == C_MAX_ATTEMPT: + raise Exception("PostgresSQL did not start.") + + nAttempt += 1 + logging.info("------------------------ NODE #{}".format( + nAttempt + )) + + with __class__.helper__get_node(node_svc, port=12345) as node: + if self.impl__test_pg_ctl_wait_option(node_svc, node): + break + continue + + logging.info("OK. Test is passed. Number of attempts is {}".format( + nAttempt + )) + return def impl__test_pg_ctl_wait_option( self, @@ -899,9 +922,18 @@ def impl__test_pg_ctl_wait_option( node.init() assert node.status() == NodeStatus.Stopped + + node_log_reader = PostgresNodeLogReader(node, from_beginnig=True) + node.start(wait=False) nAttempt = 0 while True: + if PostgresNodeUtils.delect_port_conflict(node_log_reader): + logging.info("Node port {} conflicted with another PostgreSQL instance.".format( + node.port + )) + return False + if nAttempt == C_MAX_ATTEMPTS: # # [2025-03-11] @@ -960,6 +992,7 @@ def impl__test_pg_ctl_wait_option( raise Exception("Unexpected node status: {0}.".format(s1)) logging.info("OK. Node is stopped.") + return True def test_replicate(self, node_svc: PostgresNodeService): assert isinstance(node_svc, PostgresNodeService) From 4c6bb1714b4102504a86cb200c9dd04d7036acaa Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Wed, 7 May 2025 22:16:04 +0300 Subject: [PATCH 87/90] Update README.md The version of supported Python is corrected. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a3b854f8..defbc8b3 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ # testgres -PostgreSQL testing utility. Both Python 2.7 and 3.3+ are supported. +PostgreSQL testing utility. Python 3.8+ is supported. ## Installation From 354de695b13be37ad481f92a084df9809704eea1 Mon Sep 17 00:00:00 2001 From: "d.kovalenko" Date: Wed, 7 May 2025 22:22:54 +0300 Subject: [PATCH 88/90] [CI] Python 2 is not supported any more. --- Dockerfile--std-all.tmpl | 5 ----- Dockerfile--std.tmpl | 5 ----- 2 files changed, 10 deletions(-) diff --git a/Dockerfile--std-all.tmpl b/Dockerfile--std-all.tmpl index c41c5a06..d19f52a6 100644 --- a/Dockerfile--std-all.tmpl +++ b/Dockerfile--std-all.tmpl @@ -4,11 +4,6 @@ ARG PYTHON_VERSION # --------------------------------------------- base1 FROM postgres:${PG_VERSION}-alpine as base1 -# --------------------------------------------- base2_with_python-2 -FROM base1 as base2_with_python-2 -RUN apk add --no-cache curl python2 python2-dev build-base musl-dev linux-headers py-virtualenv py-pip -ENV PYTHON_VERSION=2 - # --------------------------------------------- base2_with_python-3 FROM base1 as base2_with_python-3 RUN apk add --no-cache curl python3 python3-dev build-base musl-dev linux-headers py-virtualenv diff --git a/Dockerfile--std.tmpl b/Dockerfile--std.tmpl index 91886ede..67aa30b4 100644 --- a/Dockerfile--std.tmpl +++ b/Dockerfile--std.tmpl @@ -4,11 +4,6 @@ ARG PYTHON_VERSION # --------------------------------------------- base1 FROM postgres:${PG_VERSION}-alpine as base1 -# --------------------------------------------- base2_with_python-2 -FROM base1 as base2_with_python-2 -RUN apk add --no-cache curl python2 python2-dev build-base musl-dev linux-headers py-virtualenv py-pip -ENV PYTHON_VERSION=2 - # --------------------------------------------- base2_with_python-3 FROM base1 as base2_with_python-3 RUN apk add --no-cache curl python3 python3-dev build-base musl-dev linux-headers py-virtualenv From d48477d5e32ab0870229c5f676172111fd6ba3ee Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 12 May 2025 07:18:23 +0300 Subject: [PATCH 89/90] [#258] Problems in ProbackupTest and TestBasic(ProbackupTest) are fixed (#259) * [#258] Declaration of ProbackupTest::pg_node is corrected ProbackupTest::pg_node is testgres.NodeApp, not testgres.PostgresNode. Asserts are added. * [#258] TestBasic::test_full_backup cleans node object Node cleanup is added. TODO: we should to stop node only and cleanup his data in conftest. --- .../pg_probackup2/tests/test_basic.py | 35 ++++++++++++------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py b/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py index f22a62bf..2540ddb0 100644 --- a/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py +++ b/testgres/plugins/pg_probackup2/pg_probackup2/tests/test_basic.py @@ -11,7 +11,7 @@ class ProbackupTest: - pg_node: testgres.PostgresNode + pg_node: testgres.NodeApp @staticmethod def probackup_is_available() -> bool: @@ -75,21 +75,30 @@ def helper__build_backup_dir(self, backup='backup'): @pytest.mark.skipif(not ProbackupTest.probackup_is_available(), reason="Check that PGPROBACKUPBIN is defined and is valid.") class TestBasic(ProbackupTest): def test_full_backup(self): + assert self.pg_node is not None + assert type(self.pg_node) == testgres.NodeApp # noqa: E721 + assert self.pb is not None + assert type(self.pb) == ProbackupApp # noqa: E721 + # Setting up a simple test node node = self.pg_node.make_simple('node', pg_options={"fsync": "off", "synchronous_commit": "off"}) - # Initialize and configure Probackup - self.pb.init() - self.pb.add_instance('node', node) - self.pb.set_archiving('node', node) + assert node is not None + assert type(node) == testgres.PostgresNode # noqa: E721 + + with node: + # Initialize and configure Probackup + self.pb.init() + self.pb.add_instance('node', node) + self.pb.set_archiving('node', node) - # Start the node and initialize pgbench - node.slow_start() - node.pgbench_init(scale=100, no_vacuum=True) + # Start the node and initialize pgbench + node.slow_start() + node.pgbench_init(scale=100, no_vacuum=True) - # Perform backup and validation - backup_id = self.pb.backup_node('node', node) - out = self.pb.validate('node', backup_id) + # Perform backup and validation + backup_id = self.pb.backup_node('node', node) + out = self.pb.validate('node', backup_id) - # Check if the backup is valid - assert f"INFO: Backup {backup_id} is valid" in out + # Check if the backup is valid + assert f"INFO: Backup {backup_id} is valid" in out From 0470d305af4af16edd8ffa56d05a5b90cad1e128 Mon Sep 17 00:00:00 2001 From: Dmitry Kovalenko Date: Mon, 12 May 2025 18:29:10 +0300 Subject: [PATCH 90/90] conftest is updated (#260) - the calls of logging.root.handle are handled (LogWrapper2) - critical errors are processed --- tests/conftest.py | 200 +++++++++++++++++++++++----------------------- 1 file changed, 100 insertions(+), 100 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 9e74879b..111edc87 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -338,6 +338,7 @@ def helper__build_test_id(item: pytest.Function) -> str: g_error_msg_count_key = pytest.StashKey[int]() g_warning_msg_count_key = pytest.StashKey[int]() +g_critical_msg_count_key = pytest.StashKey[int]() # ///////////////////////////////////////////////////////////////////////////// @@ -413,10 +414,17 @@ def helper__makereport__call( assert type(outcome) == pluggy.Result # noqa: E721 # -------- - item_error_msg_count = item.stash.get(g_error_msg_count_key, 0) - assert type(item_error_msg_count) == int # noqa: E721 - assert item_error_msg_count >= 0 + item_error_msg_count1 = item.stash.get(g_error_msg_count_key, 0) + assert type(item_error_msg_count1) == int # noqa: E721 + assert item_error_msg_count1 >= 0 + item_error_msg_count2 = item.stash.get(g_critical_msg_count_key, 0) + assert type(item_error_msg_count2) == int # noqa: E721 + assert item_error_msg_count2 >= 0 + + item_error_msg_count = item_error_msg_count1 + item_error_msg_count2 + + # -------- item_warning_msg_count = item.stash.get(g_warning_msg_count_key, 0) assert type(item_warning_msg_count) == int # noqa: E721 assert item_warning_msg_count >= 0 @@ -600,103 +608,87 @@ def pytest_runtest_makereport(item: pytest.Function, call: pytest.CallInfo): # ///////////////////////////////////////////////////////////////////////////// -class LogErrorWrapper2: +class LogWrapper2: _old_method: any - _counter: typing.Optional[int] + _err_counter: typing.Optional[int] + _warn_counter: typing.Optional[int] + + _critical_counter: typing.Optional[int] # -------------------------------------------------------------------- def __init__(self): self._old_method = None - self._counter = None + self._err_counter = None + self._warn_counter = None + + self._critical_counter = None # -------------------------------------------------------------------- def __enter__(self): assert self._old_method is None - assert self._counter is None - - self._old_method = logging.error - self._counter = 0 - - logging.error = self - return self - - # -------------------------------------------------------------------- - def __exit__(self, exc_type, exc_val, exc_tb): - assert self._old_method is not None - assert self._counter is not None - - assert logging.error is self - - logging.error = self._old_method - - self._old_method = None - self._counter = None - return False - - # -------------------------------------------------------------------- - def __call__(self, *args, **kwargs): - assert self._old_method is not None - assert self._counter is not None - - assert type(self._counter) == int # noqa: E721 - assert self._counter >= 0 - - r = self._old_method(*args, **kwargs) - - self._counter += 1 - assert self._counter > 0 - - return r - - -# ///////////////////////////////////////////////////////////////////////////// + assert self._err_counter is None + assert self._warn_counter is None + assert self._critical_counter is None -class LogWarningWrapper2: - _old_method: any - _counter: typing.Optional[int] + assert logging.root is not None + assert isinstance(logging.root, logging.RootLogger) - # -------------------------------------------------------------------- - def __init__(self): - self._old_method = None - self._counter = None + self._old_method = logging.root.handle + self._err_counter = 0 + self._warn_counter = 0 - # -------------------------------------------------------------------- - def __enter__(self): - assert self._old_method is None - assert self._counter is None + self._critical_counter = 0 - self._old_method = logging.warning - self._counter = 0 - - logging.warning = self + logging.root.handle = self return self # -------------------------------------------------------------------- def __exit__(self, exc_type, exc_val, exc_tb): assert self._old_method is not None - assert self._counter is not None + assert self._err_counter is not None + assert self._warn_counter is not None + + assert logging.root is not None + assert isinstance(logging.root, logging.RootLogger) - assert logging.warning is self + assert logging.root.handle is self - logging.warning = self._old_method + logging.root.handle = self._old_method self._old_method = None - self._counter = None + self._err_counter = None + self._warn_counter = None + self._critical_counter = None return False # -------------------------------------------------------------------- - def __call__(self, *args, **kwargs): + def __call__(self, record: logging.LogRecord): + assert record is not None + assert isinstance(record, logging.LogRecord) assert self._old_method is not None - assert self._counter is not None - - assert type(self._counter) == int # noqa: E721 - assert self._counter >= 0 - - r = self._old_method(*args, **kwargs) - - self._counter += 1 - assert self._counter > 0 + assert self._err_counter is not None + assert self._warn_counter is not None + assert self._critical_counter is not None + + assert type(self._err_counter) == int # noqa: E721 + assert self._err_counter >= 0 + assert type(self._warn_counter) == int # noqa: E721 + assert self._warn_counter >= 0 + assert type(self._critical_counter) == int # noqa: E721 + assert self._critical_counter >= 0 + + r = self._old_method(record) + + if record.levelno == logging.ERROR: + self._err_counter += 1 + assert self._err_counter > 0 + elif record.levelno == logging.WARNING: + self._warn_counter += 1 + assert self._warn_counter > 0 + elif record.levelno == logging.CRITICAL: + self._critical_counter += 1 + assert self._critical_counter > 0 return r @@ -717,6 +709,13 @@ def pytest_pyfunc_call(pyfuncitem: pytest.Function): assert pyfuncitem is not None assert isinstance(pyfuncitem, pytest.Function) + assert logging.root is not None + assert isinstance(logging.root, logging.RootLogger) + assert logging.root.handle is not None + + debug__log_handle_method = logging.root.handle + assert debug__log_handle_method is not None + debug__log_error_method = logging.error assert debug__log_error_method is not None @@ -725,55 +724,56 @@ def pytest_pyfunc_call(pyfuncitem: pytest.Function): pyfuncitem.stash[g_error_msg_count_key] = 0 pyfuncitem.stash[g_warning_msg_count_key] = 0 + pyfuncitem.stash[g_critical_msg_count_key] = 0 try: - with LogErrorWrapper2() as logErrorWrapper, LogWarningWrapper2() as logWarningWrapper: - assert type(logErrorWrapper) == LogErrorWrapper2 # noqa: E721 - assert logErrorWrapper._old_method is not None - assert type(logErrorWrapper._counter) == int # noqa: E721 - assert logErrorWrapper._counter == 0 - assert logging.error is logErrorWrapper - - assert type(logWarningWrapper) == LogWarningWrapper2 # noqa: E721 - assert logWarningWrapper._old_method is not None - assert type(logWarningWrapper._counter) == int # noqa: E721 - assert logWarningWrapper._counter == 0 - assert logging.warning is logWarningWrapper + with LogWrapper2() as logWrapper: + assert type(logWrapper) == LogWrapper2 # noqa: E721 + assert logWrapper._old_method is not None + assert type(logWrapper._err_counter) == int # noqa: E721 + assert logWrapper._err_counter == 0 + assert type(logWrapper._warn_counter) == int # noqa: E721 + assert logWrapper._warn_counter == 0 + assert type(logWrapper._critical_counter) == int # noqa: E721 + assert logWrapper._critical_counter == 0 + assert logging.root.handle is logWrapper r: pluggy.Result = yield assert r is not None assert type(r) == pluggy.Result # noqa: E721 - assert logErrorWrapper._old_method is not None - assert type(logErrorWrapper._counter) == int # noqa: E721 - assert logErrorWrapper._counter >= 0 - assert logging.error is logErrorWrapper - - assert logWarningWrapper._old_method is not None - assert type(logWarningWrapper._counter) == int # noqa: E721 - assert logWarningWrapper._counter >= 0 - assert logging.warning is logWarningWrapper + assert logWrapper._old_method is not None + assert type(logWrapper._err_counter) == int # noqa: E721 + assert logWrapper._err_counter >= 0 + assert type(logWrapper._warn_counter) == int # noqa: E721 + assert logWrapper._warn_counter >= 0 + assert type(logWrapper._critical_counter) == int # noqa: E721 + assert logWrapper._critical_counter >= 0 + assert logging.root.handle is logWrapper assert g_error_msg_count_key in pyfuncitem.stash assert g_warning_msg_count_key in pyfuncitem.stash + assert g_critical_msg_count_key in pyfuncitem.stash assert pyfuncitem.stash[g_error_msg_count_key] == 0 assert pyfuncitem.stash[g_warning_msg_count_key] == 0 + assert pyfuncitem.stash[g_critical_msg_count_key] == 0 - pyfuncitem.stash[g_error_msg_count_key] = logErrorWrapper._counter - pyfuncitem.stash[g_warning_msg_count_key] = logWarningWrapper._counter + pyfuncitem.stash[g_error_msg_count_key] = logWrapper._err_counter + pyfuncitem.stash[g_warning_msg_count_key] = logWrapper._warn_counter + pyfuncitem.stash[g_critical_msg_count_key] = logWrapper._critical_counter if r.exception is not None: pass - elif logErrorWrapper._counter == 0: - pass - else: - assert logErrorWrapper._counter > 0 + elif logWrapper._err_counter > 0: + r.force_exception(SIGNAL_EXCEPTION()) + elif logWrapper._critical_counter > 0: r.force_exception(SIGNAL_EXCEPTION()) finally: assert logging.error is debug__log_error_method assert logging.warning is debug__log_warning_method + assert logging.root.handle == debug__log_handle_method pass