2017-03-16 09:27:55 -05:00
|
|
|
##########################################################################
|
2016-07-18 08:50:21 -05:00
|
|
|
#
|
|
|
|
# pgAdmin 4 - PostgreSQL Tools
|
|
|
|
#
|
2023-01-02 00:23:55 -06:00
|
|
|
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
|
2016-07-18 08:50:21 -05:00
|
|
|
# This software is released under the PostgreSQL Licence
|
|
|
|
#
|
2017-03-16 09:27:55 -05:00
|
|
|
##########################################################################
|
2016-07-18 08:50:21 -05:00
|
|
|
|
|
|
|
""" This file collect all modules/files present in tests directory and add
|
|
|
|
them to TestSuite. """
|
2017-03-01 07:20:06 -06:00
|
|
|
|
2016-07-18 08:50:21 -05:00
|
|
|
import argparse
|
2016-09-14 10:26:12 -05:00
|
|
|
import atexit
|
2016-07-18 08:50:21 -05:00
|
|
|
import logging
|
2017-03-01 07:20:06 -06:00
|
|
|
import os
|
|
|
|
import signal
|
|
|
|
import sys
|
2016-10-07 07:59:43 -05:00
|
|
|
import traceback
|
2017-04-06 03:33:29 -05:00
|
|
|
import json
|
2022-08-12 06:40:26 -05:00
|
|
|
import secrets
|
2020-05-11 01:41:31 -05:00
|
|
|
import threading
|
|
|
|
import time
|
2019-01-31 06:47:56 -06:00
|
|
|
import unittest
|
2023-02-15 00:01:29 -06:00
|
|
|
import asyncio
|
2023-06-19 01:13:28 -05:00
|
|
|
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
2023-02-15 00:01:29 -06:00
|
|
|
|
|
|
|
if sys.platform == "win32":
|
|
|
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
2016-07-18 08:50:21 -05:00
|
|
|
|
2020-04-30 03:47:00 -05:00
|
|
|
if sys.version_info < (3, 4):
|
2020-08-07 02:07:00 -05:00
|
|
|
raise RuntimeError('The test suite must be run under Python 3.4 or later.')
|
2020-04-30 03:47:00 -05:00
|
|
|
|
|
|
|
import builtins
|
2017-08-25 04:54:28 -05:00
|
|
|
|
2017-08-25 05:56:10 -05:00
|
|
|
# Ensure the global server mode is set.
|
|
|
|
builtins.SERVER_MODE = None
|
|
|
|
|
2016-09-22 06:58:38 -05:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
file_name = os.path.basename(__file__)
|
|
|
|
|
2017-04-06 03:33:29 -05:00
|
|
|
from testscenarios import scenarios
|
2016-07-18 08:50:21 -05:00
|
|
|
|
|
|
|
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
|
|
|
|
|
|
|
|
# Set sys path to current directory so that we can import pgadmin package
|
|
|
|
root = os.path.dirname(CURRENT_PATH)
|
|
|
|
|
|
|
|
if sys.path[0] != root:
|
|
|
|
sys.path.insert(0, root)
|
2016-09-14 10:26:12 -05:00
|
|
|
os.chdir(root)
|
2016-07-18 08:50:21 -05:00
|
|
|
import config
|
2023-08-16 04:59:11 -05:00
|
|
|
from pgadmin import create_app
|
2017-11-30 07:16:59 -06:00
|
|
|
|
|
|
|
if config.SERVER_MODE is True:
|
|
|
|
config.SECURITY_RECOVERABLE = True
|
|
|
|
config.SECURITY_CHANGEABLE = True
|
|
|
|
config.SECURITY_POST_CHANGE_VIEW = 'browser.change_password'
|
|
|
|
|
2019-05-28 01:30:18 -05:00
|
|
|
# disable master password for test cases
|
|
|
|
config.MASTER_PASSWORD_REQUIRED = False
|
2019-05-28 00:29:51 -05:00
|
|
|
|
2016-10-07 07:59:43 -05:00
|
|
|
from regression import test_setup
|
2017-03-01 07:20:06 -06:00
|
|
|
from regression.feature_utils.app_starter import AppStarter
|
2016-09-14 10:26:12 -05:00
|
|
|
|
2016-09-22 06:58:38 -05:00
|
|
|
# Delete SQLite db file if exists
|
2016-09-14 10:26:12 -05:00
|
|
|
if os.path.isfile(config.TEST_SQLITE_PATH):
|
2016-09-22 06:58:38 -05:00
|
|
|
os.remove(config.TEST_SQLITE_PATH)
|
|
|
|
|
2017-06-16 04:17:38 -05:00
|
|
|
os.environ["PGADMIN_TESTING_MODE"] = "1"
|
2017-01-31 04:29:07 -06:00
|
|
|
|
2017-04-06 03:33:29 -05:00
|
|
|
# Disable upgrade checks - no need during testing, and it'll cause an error
|
|
|
|
# if there's no network connection when it runs.
|
2017-01-31 04:29:07 -06:00
|
|
|
config.UPGRADE_CHECK_ENABLED = False
|
|
|
|
|
2016-09-22 06:58:38 -05:00
|
|
|
pgadmin_credentials = test_setup.config_data
|
|
|
|
|
|
|
|
# Set environment variables for email and password
|
|
|
|
os.environ['PGADMIN_SETUP_EMAIL'] = ''
|
|
|
|
os.environ['PGADMIN_SETUP_PASSWORD'] = ''
|
2020-06-15 05:29:37 -05:00
|
|
|
if pgadmin_credentials and \
|
|
|
|
'pgAdmin4_login_credentials' in pgadmin_credentials and \
|
|
|
|
all(item in pgadmin_credentials['pgAdmin4_login_credentials']
|
|
|
|
for item in ['login_username', 'login_password']):
|
|
|
|
pgadmin_credentials = pgadmin_credentials[
|
|
|
|
'pgAdmin4_login_credentials']
|
|
|
|
os.environ['PGADMIN_SETUP_EMAIL'] = str(pgadmin_credentials[
|
|
|
|
'login_username'])
|
|
|
|
os.environ['PGADMIN_SETUP_PASSWORD'] = str(pgadmin_credentials[
|
|
|
|
'login_password'])
|
2016-09-22 06:58:38 -05:00
|
|
|
|
|
|
|
# Execute the setup file
|
2018-02-19 05:12:35 -06:00
|
|
|
exec(open("setup.py").read())
|
2016-07-18 08:50:21 -05:00
|
|
|
|
|
|
|
# Get the config database schema version. We store this in pgadmin.model
|
|
|
|
# as it turns out that putting it in the config files isn't a great idea
|
|
|
|
from pgadmin.model import SCHEMA_VERSION
|
2016-09-14 10:26:12 -05:00
|
|
|
|
|
|
|
# Delay the import test_utils as it needs updated config.SQLITE_PATH
|
2017-03-23 06:59:31 -05:00
|
|
|
from regression.python_test_utils import test_utils
|
2019-05-28 00:29:51 -05:00
|
|
|
from regression.python_test_utils.csrf_test_client import TestClient
|
2016-07-18 08:50:21 -05:00
|
|
|
|
|
|
|
config.SETTINGS_SCHEMA_VERSION = SCHEMA_VERSION
|
2021-07-06 02:52:58 -05:00
|
|
|
from pgadmin.utils.constants import LDAP
|
2016-07-18 08:50:21 -05:00
|
|
|
|
|
|
|
# Override some other defaults
|
|
|
|
from logging import WARNING
|
2016-09-14 10:26:12 -05:00
|
|
|
|
2016-07-18 08:50:21 -05:00
|
|
|
config.CONSOLE_LOG_LEVEL = WARNING
|
|
|
|
|
|
|
|
# Create the app
|
|
|
|
app = create_app()
|
2019-05-28 00:29:51 -05:00
|
|
|
|
2019-12-12 23:12:43 -06:00
|
|
|
app.PGADMIN_INT_KEY = ''
|
2018-03-19 12:09:19 -05:00
|
|
|
app.config.update({'SESSION_COOKIE_DOMAIN': None})
|
2017-03-08 03:21:28 -06:00
|
|
|
driver = None
|
|
|
|
app_starter = None
|
|
|
|
handle_cleanup = None
|
2018-06-15 05:36:07 -05:00
|
|
|
app.PGADMIN_RUNTIME = True
|
|
|
|
if config.SERVER_MODE is True:
|
|
|
|
app.PGADMIN_RUNTIME = False
|
2019-05-28 00:29:51 -05:00
|
|
|
app.config['WTF_CSRF_ENABLED'] = True
|
2020-04-06 05:27:05 -05:00
|
|
|
|
|
|
|
# Authentication sources
|
2021-07-06 02:52:58 -05:00
|
|
|
app.PGADMIN_EXTERNAL_AUTH_SOURCE = LDAP
|
2020-04-06 05:27:05 -05:00
|
|
|
|
2019-05-28 00:29:51 -05:00
|
|
|
app.test_client_class = TestClient
|
|
|
|
test_client = app.test_client()
|
|
|
|
test_client.setApp(app)
|
2016-07-18 08:50:21 -05:00
|
|
|
|
2020-08-31 06:15:31 -05:00
|
|
|
|
|
|
|
class CaptureMail:
|
|
|
|
# A hack Mail service that simply captures what would be sent.
|
|
|
|
def __init__(self, app):
|
|
|
|
app.extensions["mail"] = self
|
|
|
|
self.sent = []
|
|
|
|
self.ascii_attachments = []
|
|
|
|
|
|
|
|
def send(self, msg):
|
|
|
|
self.sent.append(msg.body)
|
|
|
|
|
|
|
|
def pop(self):
|
|
|
|
if len(self.sent):
|
|
|
|
return self.sent.pop(0)
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
CaptureMail(app)
|
|
|
|
|
2019-01-31 06:47:56 -06:00
|
|
|
setattr(unittest.result.TestResult, "passed", [])
|
2017-04-12 07:11:53 -05:00
|
|
|
|
2019-01-31 06:47:56 -06:00
|
|
|
unittest.runner.TextTestResult.addSuccess = test_utils.add_success
|
2017-04-12 07:11:53 -05:00
|
|
|
|
2017-04-06 03:33:29 -05:00
|
|
|
# Override apply_scenario method as we need custom test description/name
|
|
|
|
scenarios.apply_scenario = test_utils.apply_scenario
|
|
|
|
|
|
|
|
|
2018-08-21 07:09:36 -05:00
|
|
|
def get_suite(module_list, test_server, test_app_client, server_information,
|
2021-05-27 00:31:25 -05:00
|
|
|
test_db_name, driver_passed, parallel_ui_test):
|
2016-07-18 08:50:21 -05:00
|
|
|
"""
|
2016-09-22 06:58:38 -05:00
|
|
|
This function add the tests to test suite and return modified test suite
|
|
|
|
variable.
|
2018-03-13 14:32:34 -05:00
|
|
|
:param server_information:
|
2016-09-22 06:58:38 -05:00
|
|
|
:param module_list: test module list
|
|
|
|
:type module_list: list
|
|
|
|
:param test_server: server details
|
|
|
|
:type test_server: dict
|
2016-07-18 08:50:21 -05:00
|
|
|
:param test_app_client: test client
|
|
|
|
:type test_app_client: pgadmin app object
|
|
|
|
:return pgadmin_suite: test suite with test cases
|
|
|
|
:rtype: TestSuite
|
2021-05-27 00:31:25 -05:00
|
|
|
:param driver_passed: driver object to run selenium tests
|
|
|
|
:type driver_passed: webdriver object
|
|
|
|
:param parallel_ui_test: whether ui tests to be run in parallel
|
|
|
|
:type parallel_ui_test: boolan
|
|
|
|
:param test_db_name: database name
|
|
|
|
:type test_db_name: string
|
2016-07-18 08:50:21 -05:00
|
|
|
"""
|
|
|
|
modules = []
|
2019-01-31 06:47:56 -06:00
|
|
|
pgadmin_suite = unittest.TestSuite()
|
2016-07-18 08:50:21 -05:00
|
|
|
|
|
|
|
# Get the each test module and add into list
|
2016-09-14 10:26:12 -05:00
|
|
|
for key, klass in module_list:
|
2020-01-10 00:18:36 -06:00
|
|
|
# Separate each test class from list of classes and store in modules
|
|
|
|
for item in klass:
|
|
|
|
gen = item
|
|
|
|
modules.append(gen)
|
2016-07-18 08:50:21 -05:00
|
|
|
|
|
|
|
# Set the test client to each module & generate the scenarios
|
|
|
|
for module in modules:
|
|
|
|
obj = module()
|
2016-09-14 10:26:12 -05:00
|
|
|
obj.setApp(app)
|
2016-07-18 08:50:21 -05:00
|
|
|
obj.setTestClient(test_app_client)
|
2016-09-22 06:58:38 -05:00
|
|
|
obj.setTestServer(test_server)
|
2020-05-11 01:41:31 -05:00
|
|
|
obj.setDriver(driver_passed)
|
2021-05-27 00:31:25 -05:00
|
|
|
obj.setParallelUI_tests(parallel_ui_test)
|
2018-03-13 14:32:34 -05:00
|
|
|
obj.setServerInformation(server_information)
|
2018-08-21 07:09:36 -05:00
|
|
|
obj.setTestDatabaseName(test_db_name)
|
2017-04-06 03:33:29 -05:00
|
|
|
scenario = scenarios.generate_scenarios(obj)
|
2016-07-18 08:50:21 -05:00
|
|
|
pgadmin_suite.addTests(scenario)
|
|
|
|
return pgadmin_suite
|
|
|
|
|
|
|
|
|
2016-09-22 06:58:38 -05:00
|
|
|
def get_test_modules(arguments):
|
|
|
|
"""
|
|
|
|
This function loads the all modules in the tests directory into testing
|
|
|
|
environment.
|
|
|
|
|
|
|
|
:param arguments: this is command line arguments for module name to
|
|
|
|
which test suite will run
|
2017-04-06 03:33:29 -05:00
|
|
|
:type arguments: dict
|
2016-09-22 06:58:38 -05:00
|
|
|
:return module list: test module list
|
|
|
|
:rtype: list
|
|
|
|
"""
|
|
|
|
|
|
|
|
from pgadmin.utils.route import TestsGeneratorRegistry
|
|
|
|
|
2017-02-15 09:10:31 -06:00
|
|
|
exclude_pkgs = []
|
2017-03-08 03:21:28 -06:00
|
|
|
global driver, app_starter, handle_cleanup
|
2017-02-15 09:10:31 -06:00
|
|
|
|
|
|
|
if not config.SERVER_MODE:
|
2019-05-28 01:30:18 -05:00
|
|
|
# following test cases applicable only for server mode
|
|
|
|
exclude_pkgs.extend([
|
|
|
|
"browser.tests.test_change_password",
|
|
|
|
"browser.tests.test_gravatar_image_display",
|
|
|
|
"browser.tests.test_login",
|
|
|
|
"browser.tests.test_logout",
|
|
|
|
"browser.tests.test_reset_password",
|
2020-04-06 05:27:05 -05:00
|
|
|
"browser.tests.test_ldap_login",
|
|
|
|
"browser.tests.test_ldap_with_mocking",
|
2019-05-28 01:30:18 -05:00
|
|
|
])
|
2017-02-15 09:10:31 -06:00
|
|
|
if arguments['exclude'] is not None:
|
|
|
|
exclude_pkgs += arguments['exclude'].split(',')
|
|
|
|
|
2019-07-08 07:08:06 -05:00
|
|
|
if 'feature_tests' not in exclude_pkgs and \
|
2020-05-11 01:41:31 -05:00
|
|
|
(arguments['pkg'] is None or arguments['pkg'] == "all" or
|
|
|
|
arguments['pkg'] == "feature_tests"):
|
|
|
|
|
|
|
|
if arguments['pkg'] == "feature_tests":
|
|
|
|
exclude_pkgs.extend(['resql'])
|
|
|
|
|
|
|
|
if not test_utils.is_parallel_ui_tests(args):
|
2020-08-19 02:52:17 -05:00
|
|
|
driver = setup_webdriver_specification(arguments)
|
2020-05-11 01:41:31 -05:00
|
|
|
app_starter = AppStarter(driver, config)
|
|
|
|
app_starter.start_app()
|
2017-03-08 03:21:28 -06:00
|
|
|
|
|
|
|
handle_cleanup = test_utils.get_cleanup_handler(test_client, app_starter)
|
|
|
|
# Register cleanup function to cleanup on exit
|
|
|
|
atexit.register(handle_cleanup)
|
|
|
|
|
2020-08-19 02:52:17 -05:00
|
|
|
# Load Test modules
|
|
|
|
module_list = load_modules(arguments, exclude_pkgs)
|
|
|
|
return module_list
|
|
|
|
|
|
|
|
|
|
|
|
def setup_webdriver_specification(arguments):
|
|
|
|
"""
|
|
|
|
Method return web-driver object set up according to values passed
|
|
|
|
in arguments
|
|
|
|
:param arguments:
|
|
|
|
:return: webdriver object
|
|
|
|
"""
|
|
|
|
from selenium import webdriver
|
|
|
|
from selenium.webdriver.chrome.options import Options
|
|
|
|
from selenium.webdriver.common.desired_capabilities import \
|
|
|
|
DesiredCapabilities
|
|
|
|
|
|
|
|
default_browser = 'chrome'
|
|
|
|
|
|
|
|
# Check default browser provided through command line. If provided
|
|
|
|
# then use that browser as default browser else check for the
|
|
|
|
# setting provided in test_config.json file.
|
|
|
|
if (
|
|
|
|
'default_browser' in arguments and
|
|
|
|
arguments['default_browser'] is not None
|
|
|
|
):
|
|
|
|
default_browser = arguments['default_browser'].lower()
|
|
|
|
elif (
|
|
|
|
test_setup.config_data and
|
|
|
|
"default_browser" in test_setup.config_data
|
|
|
|
):
|
|
|
|
default_browser = test_setup.config_data[
|
|
|
|
'default_browser'].lower()
|
|
|
|
|
|
|
|
if default_browser == 'firefox':
|
2023-06-19 01:13:28 -05:00
|
|
|
options = FirefoxOptions()
|
2020-08-19 02:52:17 -05:00
|
|
|
cap = DesiredCapabilities.FIREFOX
|
|
|
|
cap['requireWindowFocus'] = True
|
|
|
|
cap['enablePersistentHover'] = False
|
|
|
|
profile = webdriver.FirefoxProfile()
|
|
|
|
profile.set_preference("dom.disable_beforeunload", True)
|
2023-06-19 01:13:28 -05:00
|
|
|
options.profile = profile
|
|
|
|
driver_local = webdriver.Firefox(options=options)
|
2020-08-19 02:52:17 -05:00
|
|
|
driver_local.implicitly_wait(1)
|
|
|
|
else:
|
|
|
|
options = Options()
|
|
|
|
if test_setup.config_data and \
|
|
|
|
'headless_chrome' in test_setup.config_data and \
|
|
|
|
test_setup.config_data['headless_chrome']:
|
|
|
|
options.add_argument("--headless")
|
|
|
|
options.add_argument("--no-sandbox")
|
|
|
|
options.add_argument("--disable-setuid-sandbox")
|
2021-11-10 00:20:20 -06:00
|
|
|
options.add_argument("--window-size=1790,1080")
|
2020-08-19 02:52:17 -05:00
|
|
|
options.add_argument("--disable-infobars")
|
2021-11-10 00:20:20 -06:00
|
|
|
# options.add_experimental_option('w3c', False)
|
2022-05-12 07:59:09 -05:00
|
|
|
driver_local = webdriver.Chrome(options=options)
|
2020-08-19 02:52:17 -05:00
|
|
|
|
|
|
|
# maximize browser window
|
|
|
|
driver_local.maximize_window()
|
|
|
|
return driver_local
|
|
|
|
|
|
|
|
|
|
|
|
def load_modules(arguments, exclude_pkgs):
|
|
|
|
"""
|
|
|
|
Method returns list of modules which is formed by removing packages from
|
|
|
|
exclude_pkgs arguments.
|
|
|
|
:param arguments:
|
|
|
|
:param exclude_pkgs:
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
from pgadmin.utils.route import TestsGeneratorRegistry
|
2016-09-22 06:58:38 -05:00
|
|
|
# Load the test modules which are in given package(i.e. in arguments.pkg)
|
2022-09-06 03:29:33 -05:00
|
|
|
for_modules = []
|
|
|
|
if arguments['modules'] is not None:
|
|
|
|
for_modules = arguments['modules'].split(',')
|
|
|
|
|
2016-09-22 06:58:38 -05:00
|
|
|
if arguments['pkg'] is None or arguments['pkg'] == "all":
|
2022-09-06 03:29:33 -05:00
|
|
|
TestsGeneratorRegistry.load_generators(arguments['pkg'],
|
|
|
|
'pgadmin', exclude_pkgs)
|
2019-06-18 08:28:25 -05:00
|
|
|
elif arguments['pkg'] is not None and arguments['pkg'] == "resql":
|
|
|
|
# Load the reverse engineering sql test module
|
2022-09-06 03:29:33 -05:00
|
|
|
TestsGeneratorRegistry.load_generators(arguments['pkg'],
|
|
|
|
'pgadmin', exclude_pkgs,
|
2019-08-06 07:43:49 -05:00
|
|
|
for_modules, is_resql_only=True)
|
2022-09-06 03:29:33 -05:00
|
|
|
elif arguments['pkg'] is not None and arguments['pkg'] == "feature_tests":
|
|
|
|
# Load the feature test module
|
|
|
|
TestsGeneratorRegistry.load_generators(arguments['pkg'],
|
|
|
|
'regression.%s' %
|
|
|
|
arguments['pkg'],
|
|
|
|
exclude_pkgs,
|
|
|
|
for_modules)
|
2016-09-22 06:58:38 -05:00
|
|
|
else:
|
2022-09-06 03:29:33 -05:00
|
|
|
TestsGeneratorRegistry.load_generators(arguments['pkg'],
|
|
|
|
'pgadmin.%s' %
|
2017-02-15 09:10:31 -06:00
|
|
|
arguments['pkg'],
|
2019-03-21 07:04:37 -05:00
|
|
|
exclude_pkgs,
|
|
|
|
for_modules)
|
2016-09-22 06:58:38 -05:00
|
|
|
|
|
|
|
# Sort module list so that test suite executes the test cases sequentially
|
|
|
|
module_list = TestsGeneratorRegistry.registry.items()
|
|
|
|
module_list = sorted(module_list, key=lambda module_tuple: module_tuple[0])
|
|
|
|
return module_list
|
|
|
|
|
|
|
|
|
2016-07-18 08:50:21 -05:00
|
|
|
def add_arguments():
|
|
|
|
"""
|
|
|
|
This function parse the command line arguments(project's package name
|
|
|
|
e.g. browser) & add into parser
|
|
|
|
|
|
|
|
:return args: command line argument for pgadmin's package name
|
|
|
|
:rtype: argparse namespace
|
|
|
|
"""
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description='Test suite for pgAdmin4')
|
2018-02-19 05:12:35 -06:00
|
|
|
parser.add_argument(
|
|
|
|
'--pkg',
|
|
|
|
help='Executes the test cases of particular package and subpackages'
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
'--exclude',
|
|
|
|
help='Skips execution of the test cases of particular package and '
|
|
|
|
'sub-packages'
|
|
|
|
)
|
2018-05-15 09:10:11 -05:00
|
|
|
parser.add_argument(
|
|
|
|
'--default_browser',
|
|
|
|
help='Executes the feature test in specific browser'
|
|
|
|
)
|
2019-03-21 07:04:37 -05:00
|
|
|
parser.add_argument(
|
|
|
|
'--modules',
|
|
|
|
help='Executes the feature test for specific modules in pkg'
|
|
|
|
)
|
2020-05-11 01:41:31 -05:00
|
|
|
parser.add_argument('--parallel', nargs='?', const=True,
|
|
|
|
type=bool, default=False,
|
|
|
|
help='Enable parallel Feature Tests')
|
2016-07-18 08:50:21 -05:00
|
|
|
arg = parser.parse_args()
|
|
|
|
|
|
|
|
return arg
|
|
|
|
|
|
|
|
|
2016-09-14 10:26:12 -05:00
|
|
|
def sig_handler(signo, frame):
|
2017-04-06 03:33:29 -05:00
|
|
|
global handle_cleanup
|
2017-03-08 03:21:28 -06:00
|
|
|
if handle_cleanup:
|
|
|
|
handle_cleanup()
|
2016-09-14 10:26:12 -05:00
|
|
|
|
|
|
|
|
2017-04-12 07:11:53 -05:00
|
|
|
def update_test_result(test_cases, test_result_dict):
|
|
|
|
"""
|
|
|
|
This function update the test result in appropriate test behaviours i.e
|
|
|
|
passed/failed/skipped.
|
|
|
|
:param test_cases: test cases
|
|
|
|
:type test_cases: dict
|
|
|
|
:param test_result_dict: test result to be stored
|
|
|
|
:type test_result_dict: dict
|
|
|
|
:return: None
|
|
|
|
"""
|
|
|
|
for test_case in test_cases:
|
|
|
|
test_class_name = test_case[0].__class__.__name__
|
2017-06-13 05:11:58 -05:00
|
|
|
test_scenario_name = getattr(
|
|
|
|
test_case[0], 'scenario_name', str(test_case[0])
|
|
|
|
)
|
2017-04-12 07:11:53 -05:00
|
|
|
if test_class_name in test_result_dict:
|
|
|
|
test_result_dict[test_class_name].append(
|
2017-06-13 05:11:58 -05:00
|
|
|
{test_scenario_name: test_case[1]})
|
2017-04-12 07:11:53 -05:00
|
|
|
else:
|
|
|
|
test_result_dict[test_class_name] = \
|
2017-06-13 05:11:58 -05:00
|
|
|
[{test_scenario_name: test_case[1]}]
|
2017-04-12 07:11:53 -05:00
|
|
|
|
|
|
|
|
2016-09-22 06:58:38 -05:00
|
|
|
def get_tests_result(test_suite):
|
2016-09-19 10:49:06 -05:00
|
|
|
"""This function returns the total ran and total failed test cases count"""
|
2016-09-22 06:58:38 -05:00
|
|
|
try:
|
|
|
|
total_ran = test_suite.testsRun
|
2017-04-12 07:11:53 -05:00
|
|
|
passed_cases_result = {}
|
2017-04-06 03:33:29 -05:00
|
|
|
failed_cases_result = {}
|
|
|
|
skipped_cases_result = {}
|
2016-09-22 06:58:38 -05:00
|
|
|
if total_ran:
|
2017-04-12 07:11:53 -05:00
|
|
|
passed = test_suite.passed
|
|
|
|
failures = test_suite.failures
|
|
|
|
errors = test_suite.errors
|
|
|
|
skipped = test_suite.skipped
|
|
|
|
if passed:
|
|
|
|
update_test_result(passed, passed_cases_result)
|
|
|
|
if failures:
|
|
|
|
update_test_result(failures, failed_cases_result)
|
|
|
|
if errors:
|
|
|
|
update_test_result(errors, failed_cases_result)
|
|
|
|
if skipped:
|
|
|
|
update_test_result(skipped, skipped_cases_result)
|
|
|
|
|
|
|
|
return total_ran, failed_cases_result, skipped_cases_result, \
|
|
|
|
passed_cases_result
|
2016-10-07 07:59:43 -05:00
|
|
|
except Exception:
|
|
|
|
traceback.print_exc(file=sys.stderr)
|
2016-09-19 10:49:06 -05:00
|
|
|
|
|
|
|
|
2022-11-18 22:43:41 -06:00
|
|
|
class StreamToLogger():
|
2016-07-18 08:50:21 -05:00
|
|
|
def __init__(self, logger, log_level=logging.INFO):
|
|
|
|
self.terminal = sys.stderr
|
|
|
|
self.logger = logger
|
|
|
|
self.log_level = log_level
|
|
|
|
self.linebuf = ''
|
|
|
|
|
|
|
|
def write(self, buf):
|
|
|
|
"""
|
|
|
|
This function writes the log in the logger file as well as on console
|
|
|
|
|
|
|
|
:param buf: log message
|
|
|
|
:type buf: str
|
|
|
|
:return: None
|
|
|
|
"""
|
|
|
|
|
|
|
|
self.terminal.write(buf)
|
|
|
|
for line in buf.rstrip().splitlines():
|
|
|
|
self.logger.log(self.log_level, line.rstrip())
|
|
|
|
|
|
|
|
def flush(self):
|
2020-07-24 01:16:30 -05:00
|
|
|
# Function required to be implemented for logger
|
2016-07-18 08:50:21 -05:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
2021-05-27 00:31:25 -05:00
|
|
|
def execute_test(test_module_list_passed, server_passed, driver_passed,
|
|
|
|
parallel_ui_test=False):
|
2020-05-11 01:41:31 -05:00
|
|
|
"""
|
|
|
|
Function executes actually test
|
2021-05-27 00:31:25 -05:00
|
|
|
:param test_module_list_passed: test modules
|
|
|
|
:param server_passed: serve details
|
|
|
|
:param driver_passed: webdriver object
|
|
|
|
:param parallel_ui_test: parallel ui tests
|
2020-05-11 01:41:31 -05:00
|
|
|
:return:
|
|
|
|
"""
|
2023-04-20 02:57:12 -05:00
|
|
|
server_information = None
|
2020-05-11 01:41:31 -05:00
|
|
|
try:
|
|
|
|
print("\n=============Running the test cases for '%s' ============="
|
|
|
|
% server_passed['name'], file=sys.stderr)
|
|
|
|
# Create test server
|
|
|
|
server_information = \
|
|
|
|
test_utils.create_parent_server_node(server_passed)
|
|
|
|
|
|
|
|
# Create test database with random number to avoid conflict in
|
|
|
|
# parallel execution on different platforms. This database will be
|
|
|
|
# used across all feature tests.
|
|
|
|
test_db_name = "acceptance_test_db" + \
|
2022-08-12 06:40:26 -05:00
|
|
|
str(secrets.choice(range(10000, 65535)))
|
2020-05-11 01:41:31 -05:00
|
|
|
connection = test_utils.get_db_connection(
|
|
|
|
server_passed['db'],
|
|
|
|
server_passed['username'],
|
|
|
|
server_passed['db_password'],
|
|
|
|
server_passed['host'],
|
|
|
|
server_passed['port'],
|
|
|
|
server_passed['sslmode']
|
|
|
|
)
|
2017-04-06 03:33:29 -05:00
|
|
|
|
2020-05-11 01:41:31 -05:00
|
|
|
# Add the server version in server information
|
2023-02-15 00:01:29 -06:00
|
|
|
server_information['server_version'] = connection.info.server_version
|
2020-05-11 01:41:31 -05:00
|
|
|
server_information['type'] = server_passed['type']
|
|
|
|
|
|
|
|
# Drop the database if already exists.
|
|
|
|
test_utils.drop_database(connection, test_db_name)
|
|
|
|
|
|
|
|
# Create database
|
|
|
|
test_utils.create_database(server_passed, test_db_name)
|
|
|
|
|
|
|
|
# Configure preferences for the test cases
|
|
|
|
test_utils.configure_preferences(
|
|
|
|
default_binary_path=server_passed['default_binary_paths'])
|
|
|
|
|
2021-05-27 00:31:25 -05:00
|
|
|
# Create user to run selenoid tests in parallel
|
|
|
|
if parallel_ui_test:
|
|
|
|
server_passed['login_details'] = \
|
|
|
|
test_utils.create_users_for_parallel_tests(test_client)
|
|
|
|
|
2020-05-11 01:41:31 -05:00
|
|
|
# Get unit test suit
|
|
|
|
suite = get_suite(test_module_list_passed,
|
|
|
|
server_passed,
|
|
|
|
test_client,
|
2021-05-27 00:31:25 -05:00
|
|
|
server_information, test_db_name, driver_passed,
|
|
|
|
parallel_ui_test=parallel_ui_test)
|
2020-05-11 01:41:31 -05:00
|
|
|
|
|
|
|
# Run unit test suit created
|
|
|
|
tests = unittest.TextTestRunner(stream=sys.stderr,
|
|
|
|
descriptions=True,
|
|
|
|
verbosity=2).run(suite)
|
|
|
|
|
|
|
|
# processing results
|
|
|
|
ran_tests, failed_cases, skipped_cases, passed_cases = \
|
|
|
|
get_tests_result(tests)
|
|
|
|
|
|
|
|
# This is required when some tests are running parallel
|
|
|
|
# & some sequential in case of parallel ui tests
|
2023-03-14 07:21:14 -05:00
|
|
|
if threading.current_thread().name == "sequential_tests":
|
2020-05-11 01:41:31 -05:00
|
|
|
try:
|
|
|
|
if test_result[server_passed['name']][0] is not None:
|
|
|
|
ran_tests = test_result[server_passed['name']][0] + \
|
|
|
|
ran_tests
|
|
|
|
failed_cases.update(test_result[server_passed['name']][1])
|
|
|
|
skipped_cases.update(test_result[server_passed['name']][2])
|
|
|
|
passed_cases.update(test_result[server_passed['name']][3])
|
|
|
|
test_result[server_passed['name']] = [ran_tests, failed_cases,
|
|
|
|
skipped_cases,
|
|
|
|
passed_cases]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# Add final results server wise in test_result dict
|
|
|
|
test_result[server_passed['name']] = [ran_tests, failed_cases,
|
|
|
|
skipped_cases, passed_cases]
|
|
|
|
|
|
|
|
# Set empty list for 'passed' parameter for each testRun.
|
|
|
|
# So that it will not append same test case name
|
|
|
|
# unittest.result.TestResult.passed = []
|
|
|
|
|
|
|
|
# Drop the testing database created initially
|
|
|
|
if connection:
|
|
|
|
test_utils.drop_database(connection, test_db_name)
|
|
|
|
connection.close()
|
|
|
|
except Exception as exc:
|
|
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
print(str(exc))
|
2020-06-22 02:35:13 -05:00
|
|
|
print("Exception in {0} {1}".format(
|
|
|
|
threading.current_thread().ident,
|
2023-04-13 06:13:11 -05:00
|
|
|
threading.current_thread().name))
|
2021-06-09 07:46:50 -05:00
|
|
|
# Mark failure as true
|
2023-04-25 01:57:55 -05:00
|
|
|
if 'other session using the database.' not in str(exc) and \
|
|
|
|
'other sessions using the database.' not in str(exc):
|
2023-04-13 06:13:11 -05:00
|
|
|
global failure
|
|
|
|
failure = True
|
2020-05-11 01:41:31 -05:00
|
|
|
finally:
|
2023-04-17 01:17:08 -05:00
|
|
|
# Delete test server
|
2023-04-20 02:57:12 -05:00
|
|
|
if server_information:
|
|
|
|
test_utils.delete_server(test_client, server_information)
|
2020-05-11 01:41:31 -05:00
|
|
|
# Delete web-driver instance
|
|
|
|
thread_name = "parallel_tests" + server_passed['name']
|
2023-03-14 07:21:14 -05:00
|
|
|
if threading.current_thread().name == thread_name:
|
2020-05-21 09:14:28 -05:00
|
|
|
test_utils.quit_webdriver(driver_passed)
|
2020-05-11 01:41:31 -05:00
|
|
|
time.sleep(20)
|
|
|
|
|
|
|
|
# Print info about completed tests
|
|
|
|
print(
|
|
|
|
"\n=============Completed the test cases for '%s'============="
|
|
|
|
% server_passed['name'], file=sys.stderr)
|
2019-03-20 08:36:37 -05:00
|
|
|
|
2016-07-18 08:50:21 -05:00
|
|
|
|
2020-05-11 01:41:31 -05:00
|
|
|
def run_parallel_tests(url_client, servers_details, parallel_tests_lists,
|
|
|
|
name_of_browser, version_of_browser, max_thread_count):
|
|
|
|
"""
|
|
|
|
Function used to run tests in parallel
|
|
|
|
:param url_client:
|
|
|
|
:param servers_details:
|
|
|
|
:param parallel_tests_lists:
|
|
|
|
:param name_of_browser:
|
|
|
|
:param version_of_browser:
|
|
|
|
:param max_thread_count:
|
|
|
|
"""
|
|
|
|
driver_object = None
|
2021-05-27 00:31:25 -05:00
|
|
|
try:
|
|
|
|
# Thread list
|
|
|
|
threads_list = []
|
|
|
|
# Create thread for each server
|
|
|
|
for ser in servers_details:
|
2020-05-11 01:41:31 -05:00
|
|
|
while True:
|
|
|
|
# If active thread count <= max_thread_count, add new thread
|
2022-11-18 22:43:41 -06:00
|
|
|
if threading.active_count() <= max_thread_count:
|
2020-05-11 01:41:31 -05:00
|
|
|
# Get remote web-driver instance at server level
|
|
|
|
driver_object = \
|
|
|
|
test_utils.get_remote_webdriver(hub_url,
|
|
|
|
name_of_browser,
|
|
|
|
version_of_browser,
|
2022-06-22 07:17:09 -05:00
|
|
|
ser['name'],
|
|
|
|
url_client)
|
2020-05-11 01:41:31 -05:00
|
|
|
# Launch client url in browser
|
2021-11-18 05:43:32 -06:00
|
|
|
test_utils.launch_url_in_browser(
|
2021-11-22 06:35:30 -06:00
|
|
|
driver_object, url_client, timeout=60)
|
2020-05-11 01:41:31 -05:00
|
|
|
|
|
|
|
# Add name for thread
|
|
|
|
thread_name = "parallel_tests" + ser['name']
|
|
|
|
|
|
|
|
# Start thread
|
|
|
|
t = threading.Thread(target=execute_test, name=thread_name,
|
|
|
|
args=(parallel_tests_lists, ser,
|
2021-05-27 00:31:25 -05:00
|
|
|
driver_object, True))
|
2020-05-11 01:41:31 -05:00
|
|
|
threads_list.append(t)
|
|
|
|
t.start()
|
2021-05-27 00:31:25 -05:00
|
|
|
time.sleep(10)
|
2020-05-11 01:41:31 -05:00
|
|
|
break
|
|
|
|
# else sleep for 10 seconds
|
|
|
|
else:
|
|
|
|
time.sleep(10)
|
|
|
|
|
2021-05-27 00:31:25 -05:00
|
|
|
# Start threads in parallel
|
|
|
|
for t in threads_list:
|
|
|
|
t.join()
|
2021-04-14 01:26:38 -05:00
|
|
|
|
2021-05-27 00:31:25 -05:00
|
|
|
except Exception as exc:
|
|
|
|
# Print exception stack trace
|
|
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
print('Exception before starting tests for ' + ser['name'],
|
|
|
|
file=sys.stderr)
|
|
|
|
print(str(exc), file=sys.stderr)
|
2021-04-14 01:26:38 -05:00
|
|
|
|
2021-05-27 00:31:25 -05:00
|
|
|
# Mark failure as true
|
|
|
|
global failure
|
|
|
|
failure = True
|
2021-04-14 01:26:38 -05:00
|
|
|
|
2021-05-27 00:31:25 -05:00
|
|
|
# Clean driver object created
|
|
|
|
if driver_object is not None:
|
|
|
|
driver_object.quit()
|
2016-07-18 08:50:21 -05:00
|
|
|
|
2020-01-03 07:30:08 -06:00
|
|
|
|
2020-05-11 01:41:31 -05:00
|
|
|
def run_sequential_tests(url_client, servers_details, sequential_tests_lists,
|
|
|
|
name_of_browser, version_of_browser):
|
|
|
|
"""
|
|
|
|
Function is used to execute tests that needs to be run in sequential
|
|
|
|
manner.
|
|
|
|
:param url_client:
|
|
|
|
:param servers_details:
|
|
|
|
:param sequential_tests_lists:
|
|
|
|
:param name_of_browser:
|
|
|
|
:param version_of_browser:
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
driver_object = None
|
2016-09-14 10:26:12 -05:00
|
|
|
try:
|
2020-05-11 01:41:31 -05:00
|
|
|
# Get remote web-driver instance
|
|
|
|
driver_object = test_utils.get_remote_webdriver(hub_url,
|
|
|
|
name_of_browser,
|
|
|
|
version_of_browser,
|
2022-06-22 07:17:09 -05:00
|
|
|
"Sequential_Tests",
|
|
|
|
url_client)
|
2020-05-11 01:41:31 -05:00
|
|
|
|
|
|
|
# Launch client url in browser
|
|
|
|
test_utils.launch_url_in_browser(driver_object, url_client)
|
|
|
|
|
|
|
|
# Add name for thread
|
|
|
|
thread_name = "sequential_tests"
|
|
|
|
|
|
|
|
# Start thread
|
|
|
|
for ser in servers_details:
|
|
|
|
t = threading.Thread(target=execute_test,
|
|
|
|
name=thread_name,
|
|
|
|
args=(sequential_tests_lists, ser,
|
2021-05-27 00:31:25 -05:00
|
|
|
driver_object, True))
|
2020-05-11 01:41:31 -05:00
|
|
|
t.start()
|
|
|
|
t.join()
|
|
|
|
except Exception as exc:
|
|
|
|
# Print exception stack trace
|
|
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
print(str(exc))
|
|
|
|
finally:
|
|
|
|
# Clean driver object created
|
2020-06-22 02:35:13 -05:00
|
|
|
test_utils.quit_webdriver(driver_object)
|
2016-07-27 09:33:36 -05:00
|
|
|
|
2020-05-11 01:41:31 -05:00
|
|
|
|
|
|
|
def print_test_results():
|
2018-02-19 05:12:35 -06:00
|
|
|
print(
|
|
|
|
"\n==============================================================="
|
|
|
|
"=======",
|
|
|
|
file=sys.stderr
|
|
|
|
)
|
2016-09-26 08:16:04 -05:00
|
|
|
print("Test Result Summary", file=sys.stderr)
|
2016-10-07 07:59:43 -05:00
|
|
|
print(
|
|
|
|
"==================================================================="
|
2018-02-19 05:12:35 -06:00
|
|
|
"===\n", file=sys.stderr
|
|
|
|
)
|
2017-04-06 03:33:29 -05:00
|
|
|
|
|
|
|
test_result_json = {}
|
2016-09-19 10:49:06 -05:00
|
|
|
for server_res in test_result:
|
2017-04-06 03:33:29 -05:00
|
|
|
failed_cases = test_result[server_res][1]
|
|
|
|
skipped_cases = test_result[server_res][2]
|
2017-04-12 07:11:53 -05:00
|
|
|
passed_cases = test_result[server_res][3]
|
2017-04-06 03:33:29 -05:00
|
|
|
skipped_cases, skipped_cases_json = test_utils.get_scenario_name(
|
2017-04-10 05:28:10 -05:00
|
|
|
skipped_cases)
|
2017-04-06 03:33:29 -05:00
|
|
|
failed_cases, failed_cases_json = test_utils.get_scenario_name(
|
2017-04-10 05:28:10 -05:00
|
|
|
failed_cases)
|
|
|
|
|
2020-12-31 02:22:14 -06:00
|
|
|
total_failed = sum(list((len(value)) for key, value in
|
|
|
|
failed_cases.items()))
|
|
|
|
total_skipped = sum(list((len(value)) for key, value in
|
|
|
|
skipped_cases.items()))
|
|
|
|
|
2016-09-26 08:02:59 -05:00
|
|
|
total_passed_cases = int(
|
|
|
|
test_result[server_res][0]) - total_failed - total_skipped
|
|
|
|
|
2020-05-11 01:41:31 -05:00
|
|
|
if len(failed_cases) > 0:
|
|
|
|
global failure
|
|
|
|
failure = True
|
|
|
|
|
2016-09-26 08:02:59 -05:00
|
|
|
print(
|
2016-09-26 08:16:04 -05:00
|
|
|
"%s:\n\n\t%s test%s passed\n\t%s test%s failed%s%s"
|
|
|
|
"\n\t%s test%s skipped%s%s\n" %
|
2016-09-26 08:02:59 -05:00
|
|
|
(server_res, total_passed_cases,
|
|
|
|
(total_passed_cases != 1 and "s" or ""),
|
|
|
|
total_failed, (total_failed != 1 and "s" or ""),
|
2017-04-06 03:33:29 -05:00
|
|
|
(total_failed != 0 and ":\n\t\t" or ""),
|
2017-04-10 05:28:10 -05:00
|
|
|
"\n\t\t".join("{0} ({1})".format(key, ",\n\t\t\t\t\t".join(
|
|
|
|
map(str, value))) for key, value in failed_cases.items()),
|
2016-09-26 08:02:59 -05:00
|
|
|
total_skipped, (total_skipped != 1 and "s" or ""),
|
2017-04-06 03:33:29 -05:00
|
|
|
(total_skipped != 0 and ":\n\t\t" or ""),
|
2017-04-10 05:28:10 -05:00
|
|
|
"\n\t\t".join("{0} ({1})".format(key, ",\n\t\t\t\t\t".join(
|
|
|
|
map(str, value))) for key, value in skipped_cases.items())),
|
2016-09-26 08:02:59 -05:00
|
|
|
file=sys.stderr)
|
2016-09-22 06:58:38 -05:00
|
|
|
|
2017-04-06 03:33:29 -05:00
|
|
|
temp_dict_for_server = {
|
2018-02-19 05:12:35 -06:00
|
|
|
server_res: {
|
|
|
|
"tests_passed": [total_passed_cases, passed_cases],
|
|
|
|
"tests_failed": [total_failed, failed_cases_json],
|
|
|
|
"tests_skipped": [total_skipped, skipped_cases_json]
|
|
|
|
}
|
2017-04-06 03:33:29 -05:00
|
|
|
}
|
|
|
|
test_result_json.update(temp_dict_for_server)
|
|
|
|
|
|
|
|
# Dump test result into json file
|
2017-04-06 03:34:19 -05:00
|
|
|
json_file_path = CURRENT_PATH + "/test_result.json"
|
2017-04-06 03:33:29 -05:00
|
|
|
with open(json_file_path, 'w') as outfile:
|
|
|
|
json.dump(test_result_json, outfile, indent=2)
|
|
|
|
|
2016-10-07 07:59:43 -05:00
|
|
|
print(
|
|
|
|
"==================================================================="
|
2018-02-19 05:12:35 -06:00
|
|
|
"===\n",
|
|
|
|
file=sys.stderr
|
|
|
|
)
|
2016-09-19 10:49:06 -05:00
|
|
|
|
2020-05-11 01:41:31 -05:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
# Failure detected?
|
|
|
|
failure = False
|
|
|
|
test_result = dict()
|
|
|
|
cov = None
|
|
|
|
|
|
|
|
# Set signal handler for cleanup
|
|
|
|
signal_list = dir(signal)
|
|
|
|
required_signal_list = ['SIGTERM', 'SIGABRT', 'SIGQUIT', 'SIGINT']
|
|
|
|
# Get the OS wise supported signals
|
|
|
|
supported_signal_list = [sig for sig in required_signal_list if
|
|
|
|
sig in signal_list]
|
|
|
|
for sig in supported_signal_list:
|
|
|
|
signal.signal(getattr(signal, sig), sig_handler)
|
|
|
|
|
|
|
|
# Set basic logging configuration for log file
|
|
|
|
fh = logging.FileHandler(CURRENT_PATH + '/' +
|
|
|
|
'regression.log', 'w', 'utf-8')
|
|
|
|
fh.setLevel(logging.DEBUG)
|
|
|
|
fh.setFormatter(logging.Formatter('[%(thread)d] ' +
|
|
|
|
config.FILE_LOG_FORMAT))
|
|
|
|
|
|
|
|
logger = logging.getLogger()
|
|
|
|
logger.addHandler(fh)
|
|
|
|
|
|
|
|
# Create logger to write log in the logger file as well as on console
|
|
|
|
stderr_logger = logging.getLogger('STDERR')
|
|
|
|
sys.stderr = StreamToLogger(stderr_logger, logging.ERROR)
|
|
|
|
args = vars(add_arguments())
|
|
|
|
# Get test module list
|
|
|
|
try:
|
|
|
|
test_module_list = get_test_modules(args)
|
|
|
|
except Exception as e:
|
|
|
|
print(str(e))
|
|
|
|
sys.exit(1)
|
|
|
|
# Login the test client
|
|
|
|
test_utils.login_tester_account(test_client)
|
|
|
|
|
|
|
|
servers_info = test_utils.get_config_data()
|
|
|
|
node_name = "all"
|
|
|
|
if args['pkg'] is not None:
|
|
|
|
node_name = args['pkg'].split('.')[-1]
|
|
|
|
|
2021-05-27 00:31:25 -05:00
|
|
|
is_parallel_ui_tests = test_utils.is_parallel_ui_tests(args)
|
2020-05-11 01:41:31 -05:00
|
|
|
# Check if feature tests included & parallel tests switch passed
|
2021-05-27 00:31:25 -05:00
|
|
|
if test_utils.is_feature_test_included(args) and is_parallel_ui_tests:
|
|
|
|
if config.SERVER_MODE:
|
|
|
|
try:
|
|
|
|
# Get selenium config dict
|
|
|
|
selenoid_config = test_setup.config_data['selenoid_config']
|
|
|
|
|
|
|
|
# Set DEFAULT_SERVER value
|
|
|
|
default_server = selenoid_config['pgAdmin_default_server']
|
|
|
|
os.environ["PGADMIN_CONFIG_DEFAULT_SERVER"] = str(
|
|
|
|
default_server)
|
|
|
|
config.DEFAULT_SERVER = str(default_server)
|
|
|
|
|
|
|
|
# Get hub url
|
|
|
|
hub_url = selenoid_config['selenoid_url']
|
|
|
|
|
|
|
|
# Get selenium grid status & list of available browser
|
|
|
|
# out of passed
|
|
|
|
selenium_grid_status, list_of_browsers = test_utils.\
|
|
|
|
get_selenium_grid_status_and_browser_list(hub_url, args)
|
|
|
|
|
|
|
|
# Execute tests if selenium-grid is up
|
|
|
|
if selenium_grid_status and len(list_of_browsers) > 0:
|
|
|
|
app_starter_local = None
|
|
|
|
# run across browsers
|
|
|
|
for browser_info in list_of_browsers:
|
|
|
|
try:
|
|
|
|
# browser info
|
|
|
|
browser_name, browser_version = \
|
|
|
|
test_utils.get_browser_details(browser_info,
|
|
|
|
hub_url)
|
|
|
|
|
|
|
|
# test lists can be executed in
|
|
|
|
# parallel & sequentially
|
|
|
|
parallel_tests, sequential_tests = \
|
|
|
|
test_utils.get_parallel_sequential_module_list(
|
|
|
|
test_module_list)
|
|
|
|
|
|
|
|
# Print test summary
|
|
|
|
test_utils.print_test_summary(
|
|
|
|
test_module_list, parallel_tests,
|
|
|
|
sequential_tests,
|
2020-06-22 02:35:13 -05:00
|
|
|
browser_name, browser_version)
|
|
|
|
|
2021-05-27 00:31:25 -05:00
|
|
|
# Create app form source code
|
|
|
|
app_starter_local = AppStarter(None, config)
|
|
|
|
client_url = app_starter_local.start_app()
|
|
|
|
|
|
|
|
if config.DEBUG:
|
2021-11-22 06:35:30 -06:00
|
|
|
pgAdmin_wait_time = \
|
|
|
|
selenoid_config['pgAdmin_max_up_time']
|
2021-05-27 00:31:25 -05:00
|
|
|
print('pgAdmin is launched with DEBUG=True, '
|
2021-11-22 06:35:30 -06:00
|
|
|
'hence sleeping for %s seconds.',
|
|
|
|
pgAdmin_wait_time,
|
2021-05-27 00:31:25 -05:00
|
|
|
file=sys.stderr)
|
2021-11-22 06:35:30 -06:00
|
|
|
|
|
|
|
time.sleep(int(pgAdmin_wait_time))
|
2021-05-27 00:31:25 -05:00
|
|
|
|
|
|
|
# Running Parallel tests
|
|
|
|
if len(parallel_tests) > 0:
|
|
|
|
parallel_sessions = \
|
|
|
|
int(selenoid_config[
|
|
|
|
'max_parallel_sessions'])
|
|
|
|
|
|
|
|
run_parallel_tests(
|
|
|
|
client_url, servers_info, parallel_tests,
|
|
|
|
browser_name, browser_version,
|
|
|
|
parallel_sessions)
|
|
|
|
|
|
|
|
# Sequential Tests
|
|
|
|
if len(sequential_tests) > 0:
|
|
|
|
run_sequential_tests(
|
|
|
|
client_url, servers_info, sequential_tests,
|
|
|
|
browser_name, browser_version)
|
|
|
|
|
|
|
|
# Clean up environment
|
|
|
|
if app_starter_local:
|
|
|
|
app_starter_local.stop_app()
|
|
|
|
|
|
|
|
# Pause before printing result in order
|
|
|
|
# not to mix output
|
|
|
|
time.sleep(5)
|
|
|
|
|
|
|
|
print(
|
|
|
|
"\n============= Test execution with {0} is "
|
|
|
|
"completed.=============".format(browser_name),
|
|
|
|
file=sys.stderr)
|
|
|
|
print_test_results()
|
|
|
|
|
|
|
|
except SystemExit:
|
|
|
|
if app_starter_local:
|
|
|
|
app_starter_local.stop_app()
|
|
|
|
if handle_cleanup:
|
|
|
|
handle_cleanup()
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
print(
|
|
|
|
"\n============= Either Selenium Grid is NOT up OR"
|
|
|
|
" browser list is 0 =============", file=sys.stderr)
|
|
|
|
failure = True
|
|
|
|
except Exception as exc:
|
|
|
|
# Print exception stack trace
|
|
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
print(str(exc))
|
2020-06-22 02:35:13 -05:00
|
|
|
failure = True
|
2021-05-27 00:31:25 -05:00
|
|
|
del os.environ["PGADMIN_CONFIG_DEFAULT_SERVER"]
|
|
|
|
else:
|
|
|
|
print(
|
|
|
|
"\n============= Please Turn on Server Mode to run selenoid "
|
|
|
|
"tests =============", file=sys.stderr)
|
2020-06-22 02:35:13 -05:00
|
|
|
failure = True
|
2020-05-11 01:41:31 -05:00
|
|
|
else:
|
|
|
|
try:
|
|
|
|
for server in servers_info:
|
|
|
|
thread = threading.Thread(target=execute_test, args=(
|
|
|
|
test_module_list, server, driver))
|
|
|
|
thread.start()
|
|
|
|
thread.join()
|
|
|
|
except SystemExit:
|
|
|
|
if handle_cleanup:
|
|
|
|
handle_cleanup()
|
2020-08-03 02:29:51 -05:00
|
|
|
raise
|
2020-05-11 01:41:31 -05:00
|
|
|
print_test_results()
|
|
|
|
|
2016-09-26 08:16:04 -05:00
|
|
|
print("Please check output in file: %s/regression.log\n" % CURRENT_PATH)
|
2017-02-08 10:52:03 -06:00
|
|
|
|
2017-06-16 04:17:38 -05:00
|
|
|
# Unset environment variable
|
|
|
|
del os.environ["PGADMIN_TESTING_MODE"]
|
|
|
|
|
2017-03-25 17:49:25 -05:00
|
|
|
if failure:
|
2017-02-08 10:52:03 -06:00
|
|
|
sys.exit(1)
|
|
|
|
else:
|
|
|
|
sys.exit(0)
|