Merge pull request #2364 from pypeclub/feature/OP-2042_nuke-testing-class

Nuke testing class
This commit is contained in:
Petr Kalis 2021-12-13 13:14:10 +01:00 committed by GitHub
commit 4a32cf9f3c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 548 additions and 165 deletions

View file

@ -356,9 +356,22 @@ def run(script):
"--pyargs",
help="Run tests from package",
default=None)
def runtests(folder, mark, pyargs):
@click.option("-t",
"--test_data_folder",
help="Unzipped directory path of test file",
default=None)
@click.option("-s",
"--persist",
help="Persist test DB and published files after test end",
default=None)
@click.option("-a",
"--app_variant",
help="Provide specific app variant for test, empty for latest",
default=None)
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant):
"""Run all automatic tests after proper initialization via start.py"""
PypeCommands().run_tests(folder, mark, pyargs)
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
persist, app_variant)
@main.command()

View file

@ -42,10 +42,14 @@ class NukeRenderLocal(openpype.api.Extractor):
self.log.info("Start frame: {}".format(first_frame))
self.log.info("End frame: {}".format(last_frame))
# write node url might contain nuke's ctl expressin
# as [python ...]/path...
path = node["file"].evaluate()
# Ensure output directory exists.
directory = os.path.dirname(node["file"].value())
if not os.path.exists(directory):
os.makedirs(directory)
out_dir = os.path.dirname(path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Render frames
nuke.execute(
@ -58,15 +62,12 @@ class NukeRenderLocal(openpype.api.Extractor):
if "slate" in families:
first_frame += 1
path = node['file'].value()
out_dir = os.path.dirname(path)
ext = node["file_type"].value()
if "representations" not in instance.data:
instance.data["representations"] = []
collected_frames = os.listdir(out_dir)
if len(collected_frames) == 1:
repre = {
'name': ext,

View file

@ -67,7 +67,9 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
if not repre.get("files"):
msg = ("no frames were collected, "
"you need to render them")
"you need to render them.\n"
"Check properties of write node (group) and"
"select 'Local' option in 'Publish' dropdown.")
self.log.error(msg)
raise ValidationException(msg)

View file

@ -716,6 +716,8 @@ class ApplicationLaunchContext:
# subprocess.Popen launch arguments (first argument in constructor)
self.launch_args = executable.as_args()
self.launch_args.extend(application.arguments)
if self.data.get("app_args"):
self.launch_args.extend(self.data.pop("app_args"))
# Handle launch environemtns
env = self.data.pop("env", None)

View file

@ -307,7 +307,6 @@ class HostDirmap:
mapping = {}
if not project_settings["global"]["sync_server"]["enabled"]:
log.debug("Site Sync not enabled")
return mapping
from openpype.settings.lib import get_site_local_overrides

View file

@ -2,6 +2,7 @@ import os
from datetime import datetime
import sys
from bson.objectid import ObjectId
import collections
import pyblish.util
import pyblish.api
@ -140,7 +141,9 @@ def find_variant_key(application_manager, host):
found_variant_key = None
# finds most up-to-date variant if any installed
for variant_key, variant in app_group.variants.items():
sorted_variants = collections.OrderedDict(
sorted(app_group.variants.items()))
for variant_key, variant in sorted_variants.items():
for executable in variant.executables:
if executable.exists():
found_variant_key = variant_key

View file

@ -216,6 +216,7 @@ class PypeCommands:
task_name,
app_name
)
print("env:: {}".format(env))
os.environ.update(env)
os.environ["OPENPYPE_PUBLISH_DATA"] = batch_dir
@ -340,7 +341,8 @@ class PypeCommands:
def validate_jsons(self):
pass
def run_tests(self, folder, mark, pyargs):
def run_tests(self, folder, mark, pyargs,
test_data_folder, persist, app_variant):
"""
Runs tests from 'folder'
@ -348,25 +350,39 @@ class PypeCommands:
folder (str): relative path to folder with tests
mark (str): label to run tests marked by it (slow etc)
pyargs (str): package path to test
test_data_folder (str): url to unzipped folder of test data
persist (bool): True if keep test db and published after test
end
app_variant (str): variant (eg 2020 for AE), empty if use
latest installed version
"""
print("run_tests")
import subprocess
if folder:
folder = " ".join(list(folder))
else:
folder = "../tests"
mark_str = pyargs_str = ''
# disable warnings and show captured stdout even if success
args = ["--disable-pytest-warnings", "-rP", folder]
if mark:
mark_str = "-m {}".format(mark)
args.extend(["-m", mark])
if pyargs:
pyargs_str = "--pyargs {}".format(pyargs)
args.extend(["--pyargs", pyargs])
cmd = "pytest {} {} {}".format(folder, mark_str, pyargs_str)
print("Running {}".format(cmd))
subprocess.run(cmd)
if persist:
args.extend(["--test_data_folder", test_data_folder])
if persist:
args.extend(["--persist", persist])
if app_variant:
args.extend(["--app_variant", app_variant])
print("run_tests args: {}".format(args))
import pytest
pytest.main(args)
def syncserver(self, active_site):
"""Start running sync_server in background."""

View file

@ -142,7 +142,7 @@
"icon": "{}/app_icons/nuke.png",
"host_name": "nuke",
"environment": {
"NUKE_PATH": "{OPENPYPE_STUDIO_PLUGINS}/nuke"
"NUKE_PATH": ["{NUKE_PATH}", "{OPENPYPE_STUDIO_PLUGINS}/nuke"]
},
"variants": {
"13-0": {
@ -248,7 +248,7 @@
"icon": "{}/app_icons/nuke.png",
"host_name": "nuke",
"environment": {
"NUKE_PATH": "{OPENPYPE_STUDIO_PLUGINS}/nuke"
"NUKE_PATH": ["{NUKE_PATH}", "{OPENPYPE_STUDIO_PLUGINS}/nuke"]
},
"variants": {
"13-0": {

View file

@ -925,7 +925,9 @@ def boot():
sys.exit(1)
os.environ["OPENPYPE_MONGO"] = openpype_mongo
os.environ["OPENPYPE_DATABASE_NAME"] = "openpype" # name of Pype database
# name of Pype database
os.environ["OPENPYPE_DATABASE_NAME"] = \
os.environ.get("OPENPYPE_DATABASE_NAME") or "openpype"
_print(">>> run disk mapping command ...")
run_disk_mapping_commands(openpype_mongo)

View file

@ -14,12 +14,12 @@ How to run:
----------
- single test class could be run by PyCharm and its pytest runner directly
- OR
- use Openpype command 'runtests' from command line
-- `${OPENPYPE_ROOT}/start.py runtests`
- use Openpype command 'runtests' from command line (`.venv` in ${OPENPYPE_ROOT} must be activated to use configured Python!)
-- `${OPENPYPE_ROOT}/python start.py runtests`
By default, this command will run all tests in ${OPENPYPE_ROOT}/tests.
Specific location could be provided to this command as an argument, either as absolute path, or relative path to ${OPENPYPE_ROOT}.
(eg. `${OPENPYPE_ROOT}/start.py runtests ../tests/integration`) will trigger only tests in `integration` folder.
(eg. `${OPENPYPE_ROOT}/python start.py runtests ../tests/integration`) will trigger only tests in `integration` folder.
See `${OPENPYPE_ROOT}/cli.py:runtests` for other arguments.

View file

@ -5,33 +5,64 @@ Contains end-to-end tests for automatic testing of OP.
Should run headless publish on all hosts to check basic publish use cases automatically
to limit regression issues.
How to run
----------
- activate `{OPENPYPE_ROOT}/.venv`
- run in cmd
`{OPENPYPE_ROOT}/.venv/Scripts/python.exe {OPENPYPE_ROOT}/start.py runtests {OPENPYPE_ROOT}/tests/integration`
- add `hosts/APP_NAME` after integration part to limit only on specific app (eg. `{OPENPYPE_ROOT}/tests/integration/hosts/maya`)
OR can use built executables
`openpype_console runtests {ABS_PATH}/tests/integration`
How to check logs/errors from app
--------------------------------
Keep PERSIST to True in the class and check `test_openpype.logs` collection.
How to create test for publishing from host
------------------------------------------
- Extend PublishTest
- Extend PublishTest in `tests/lib/testing_classes.py`
- Use `resources\test_data.zip` skeleton file as a template for testing input data
- Put workfile into `test_data.zip/input/workfile`
- If you require other than base DB dumps provide them to `test_data.zip/input/dumps`
-- (Check commented code in `db_handler.py` how to dump specific DB. Currently all collections will be dumped.)
- Implement `last_workfile_path`
- `startup_scripts` - must contain pointing host to startup script saved into `test_data.zip/input/startup`
-- Script must contain something like
-- Script must contain something like (pseudocode)
```
import openpype
from avalon import api, HOST
from openpype.api import Logger
log = Logger().get_logger(__name__)
api.install(HOST)
pyblish.util.publish()
log_lines = []
for result in pyblish.util.publish_iter():
for record in result["records"]: # for logging to test_openpype DB
log_lines.append("{}: {}".format(
result["plugin"].label, record.msg))
if result["error"]:
err_fmt = "Failed {plugin.__name__}: {error} -- {error.traceback}"
log.error(err_fmt.format(**result))
EXIT_APP (command to exit host)
```
(Install and publish methods must be triggered only AFTER host app is fully initialized!)
- Zip `test_data.zip`, named it with descriptive name, upload it to Google Drive, right click - `Get link`, copy hash id
- If you would like add any command line arguments for your host app add it to `test_data.zip/input/app_args/app_args.json` (as a json list)
- Provide any required environment variables to `test_data.zip/input/env_vars/env_vars.json` (as a json dictionary)
- Zip `test_data.zip`, named it with descriptive name, upload it to Google Drive, right click - `Get link`, copy hash id (file must be accessible to anyone with a link!)
- Put this hash id and zip file name into TEST_FILES [(HASH_ID, FILE_NAME, MD5_OPTIONAL)]. If you want to check MD5 of downloaded
file, provide md5 value of zipped file.
- Implement any assert checks you need in extended class
- Run test class manually (via Pycharm or pytest runner (TODO))
- If you want test to compare expected files to published one, set PERSIST to True, run test manually
- If you want test to visually compare expected files to published one, set PERSIST to True, run test manually
-- Locate temporary `publish` subfolder of temporary folder (found in debugging console log)
-- Copy whole folder content into .zip file into `expected` subfolder
-- By default tests are comparing only structure of `expected` and published format (eg. if you want to save space, replace published files with empty files, but with expected names!)
-- Zip and upload again, change PERSIST to False
-- Zip and upload again, change PERSIST to False
- Use `TEST_DATA_FOLDER` variable in your class to reuse existing downloaded and unzipped test data (for faster creation of tests)
- Keep `APP_VARIANT` empty if you want to trigger test on latest version of app, or provide explicit value (as '2022' for Photoshop for example)

View file

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# adds command line arguments for 'runtests' as a fixtures
import pytest
def pytest_addoption(parser):
parser.addoption(
"--test_data_folder", action="store", default=None,
help="Provide url of a folder of unzipped test file"
)
parser.addoption(
"--persist", action="store", default=None,
help="True - keep test_db, test_openpype, outputted test files"
)
parser.addoption(
"--app_variant", action="store", default=None,
help="Keep empty to locate latest installed variant or explicit"
)
@pytest.fixture(scope="module")
def test_data_folder(request):
return request.config.getoption("--test_data_folder")
@pytest.fixture(scope="module")
def persist(request):
return request.config.getoption("--persist")
@pytest.fixture(scope="module")
def app_variant(request):
return request.config.getoption("--app_variant")

View file

@ -0,0 +1,41 @@
import os
import pytest
import shutil
from tests.lib.testing_classes import HostFixtures
class MayaTestClass(HostFixtures):
@pytest.fixture(scope="module")
def last_workfile_path(self, download_test_data, output_folder_url):
"""Get last_workfile_path from source data.
Maya expects workfile in proper folder, so copy is done first.
"""
src_path = os.path.join(download_test_data,
"input",
"workfile",
"test_project_test_asset_TestTask_v001.mb")
dest_folder = os.path.join(output_folder_url,
self.PROJECT,
self.ASSET,
"work",
self.TASK)
os.makedirs(dest_folder)
dest_path = os.path.join(dest_folder,
"test_project_test_asset_TestTask_v001.mb")
shutil.copy(src_path, dest_path)
yield dest_path
@pytest.fixture(scope="module")
def startup_scripts(self, monkeypatch_session, download_test_data):
"""Points Maya to userSetup file from input data"""
startup_path = os.path.join(download_test_data,
"input",
"startup")
original_pythonpath = os.environ.get("PYTHONPATH")
monkeypatch_session.setenv("PYTHONPATH",
"{}{}{}".format(startup_path,
os.pathsep,
original_pythonpath))

View file

@ -1,11 +1,7 @@
import pytest
import os
import shutil
from tests.lib.testing_classes import PublishTest
from tests.integration.hosts.maya.lib import MayaTestClass
class TestPublishInMaya(PublishTest):
class TestPublishInMaya(MayaTestClass):
"""Basic test case for publishing in Maya
Shouldnt be running standalone only via 'runtests' pype command! (??)
@ -13,60 +9,31 @@ class TestPublishInMaya(PublishTest):
Uses generic TestCase to prepare fixtures for test data, testing DBs,
env vars.
Opens Maya, run publish on prepared workile.
Always pulls and uses test data from GDrive!
Opens Maya, runs publish on prepared workile.
Then checks content of DB (if subset, version, representations were
created.
Checks tmp folder if all expected files were published.
How to run:
(in cmd with activated {OPENPYPE_ROOT}/.venv)
{OPENPYPE_ROOT}/.venv/Scripts/python.exe {OPENPYPE_ROOT}/start.py runtests ../tests/integration/hosts/maya # noqa: E501
"""
PERSIST = True
PERSIST = False
TEST_FILES = [
("1pOwjA_VVBc6ooTZyFxtAwLS2KZHaBlkY", "test_maya_publish.zip", "")
("1BTSIIULJTuDc8VvXseuiJV_fL6-Bu7FP", "test_maya_publish.zip", "")
]
APP = "maya"
APP_VARIANT = "2019"
APP_NAME = "{}/{}".format(APP, APP_VARIANT)
# keep empty to locate latest installed variant or explicit
APP_VARIANT = ""
TIMEOUT = 120 # publish timeout
@pytest.fixture(scope="module")
def last_workfile_path(self, download_test_data):
"""Get last_workfile_path from source data.
Maya expects workfile in proper folder, so copy is done first.
"""
src_path = os.path.join(download_test_data,
"input",
"workfile",
"test_project_test_asset_TestTask_v001.mb")
dest_folder = os.path.join(download_test_data,
self.PROJECT,
self.ASSET,
"work",
self.TASK)
os.makedirs(dest_folder)
dest_path = os.path.join(dest_folder,
"test_project_test_asset_TestTask_v001.mb")
shutil.copy(src_path, dest_path)
yield dest_path
@pytest.fixture(scope="module")
def startup_scripts(self, monkeypatch_session, download_test_data):
"""Points Maya to userSetup file from input data"""
startup_path = os.path.join(download_test_data,
"input",
"startup")
original_pythonpath = os.environ.get("PYTHONPATH")
monkeypatch_session.setenv("PYTHONPATH",
"{}{}{}".format(startup_path,
os.pathsep,
original_pythonpath))
def test_db_asserts(self, dbcon, publish_finished):
"""Host and input data dependent expected results in DB."""
print("test_db_asserts")

View file

@ -0,0 +1,44 @@
import os
import pytest
import shutil
from tests.lib.testing_classes import HostFixtures
class NukeTestClass(HostFixtures):
@pytest.fixture(scope="module")
def last_workfile_path(self, download_test_data, output_folder_url):
"""Get last_workfile_path from source data.
"""
source_file_name = "test_project_test_asset_CompositingInNuke_v001.nk"
src_path = os.path.join(download_test_data,
"input",
"workfile",
source_file_name)
dest_folder = os.path.join(output_folder_url,
self.PROJECT,
self.ASSET,
"work",
self.TASK)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
dest_path = os.path.join(dest_folder,
source_file_name)
shutil.copy(src_path, dest_path)
yield dest_path
@pytest.fixture(scope="module")
def startup_scripts(self, monkeypatch_session, download_test_data):
"""Points Nuke to userSetup file from input data"""
startup_path = os.path.join(download_test_data,
"input",
"startup")
original_nuke_path = os.environ.get("NUKE_PATH", "")
monkeypatch_session.setenv("NUKE_PATH",
"{}{}{}".format(startup_path,
os.pathsep,
original_nuke_path))

View file

@ -0,0 +1,74 @@
import logging
from tests.lib.assert_classes import DBAssert
from tests.integration.hosts.nuke.lib import NukeTestClass
log = logging.getLogger("test_publish_in_nuke")
class TestPublishInNuke(NukeTestClass):
"""Basic test case for publishing in Nuke
Uses generic TestCase to prepare fixtures for test data, testing DBs,
env vars.
Opens Nuke, run publish on prepared workile.
Then checks content of DB (if subset, version, representations were
created.
Checks tmp folder if all expected files were published.
How to run:
(in cmd with activated {OPENPYPE_ROOT}/.venv)
{OPENPYPE_ROOT}/.venv/Scripts/python.exe {OPENPYPE_ROOT}/start.py runtests ../tests/integration/hosts/nuke # noqa: E501
To check log/errors from launched app's publish process keep PERSIST
to True and check `test_openpype.logs` collection.
"""
# https://drive.google.com/file/d/1SUurHj2aiQ21ZIMJfGVBI2KjR8kIjBGI/view?usp=sharing # noqa: E501
TEST_FILES = [
("1SUurHj2aiQ21ZIMJfGVBI2KjR8kIjBGI", "test_Nuke_publish.zip", "")
]
APP = "nuke"
TIMEOUT = 120 # publish timeout
# could be overwritten by command line arguments
# keep empty to locate latest installed variant or explicit
APP_VARIANT = ""
PERSIST = True # True - keep test_db, test_openpype, outputted test files
TEST_DATA_FOLDER = None
def test_db_asserts(self, dbcon, publish_finished):
"""Host and input data dependent expected results in DB."""
print("test_db_asserts")
failures = []
failures.append(DBAssert.count_of_types(dbcon, "version", 2))
failures.append(
DBAssert.count_of_types(dbcon, "version", 0, name={"$ne": 1}))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="renderCompositingInNukeMain"))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="workfileTest_task"))
failures.append(
DBAssert.count_of_types(dbcon, "representation", 4))
additional_args = {"context.subset": "renderCompositingInNukeMain",
"context.ext": "exr"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
assert not any(failures)
if __name__ == "__main__":
test_case = TestPublishInNuke()

View file

@ -0,0 +1,34 @@
import os
import pytest
import shutil
from tests.lib.testing_classes import HostFixtures
class PhotoshopTestClass(HostFixtures):
@pytest.fixture(scope="module")
def last_workfile_path(self, download_test_data, output_folder_url):
"""Get last_workfile_path from source data.
Maya expects workfile in proper folder, so copy is done first.
"""
src_path = os.path.join(download_test_data,
"input",
"workfile",
"test_project_test_asset_TestTask_v001.psd")
dest_folder = os.path.join(output_folder_url,
self.PROJECT,
self.ASSET,
"work",
self.TASK)
os.makedirs(dest_folder)
dest_path = os.path.join(dest_folder,
"test_project_test_asset_TestTask_v001.psd")
shutil.copy(src_path, dest_path)
yield dest_path
@pytest.fixture(scope="module")
def startup_scripts(self, monkeypatch_session, download_test_data):
"""Points Maya to userSetup file from input data"""
pass

View file

@ -1,63 +1,37 @@
import pytest
import os
import shutil
from tests.lib.testing_classes import PublishTest
from tests.integration.hosts.photoshop.lib import PhotoshopTestClass
class TestPublishInPhotoshop(PublishTest):
class TestPublishInPhotoshop(PhotoshopTestClass):
"""Basic test case for publishing in Photoshop
Uses generic TestCase to prepare fixtures for test data, testing DBs,
env vars.
Opens Maya, run publish on prepared workile.
Always pulls and uses test data from GDrive!
Opens Photoshop, runs publish on prepared workile.
Then checks content of DB (if subset, version, representations were
created.
Checks tmp folder if all expected files were published.
How to run:
(in cmd with activated {OPENPYPE_ROOT}/.venv)
{OPENPYPE_ROOT}/.venv/Scripts/python.exe {OPENPYPE_ROOT}/start.py runtests ../tests/integration/hosts/photoshop # noqa: E501
"""
PERSIST = True
PERSIST = False
TEST_FILES = [
("1Bciy2pCwMKl1UIpxuPnlX_LHMo_Xkq0K", "test_photoshop_publish.zip", "")
("1zD2v5cBgkyOm_xIgKz3WKn8aFB_j8qC-", "test_photoshop_publish.zip", "")
]
APP = "photoshop"
APP_VARIANT = "2020"
APP_NAME = "{}/{}".format(APP, APP_VARIANT)
# keep empty to locate latest installed variant or explicit
APP_VARIANT = ""
TIMEOUT = 120 # publish timeout
@pytest.fixture(scope="module")
def last_workfile_path(self, download_test_data):
"""Get last_workfile_path from source data.
Maya expects workfile in proper folder, so copy is done first.
"""
src_path = os.path.join(download_test_data,
"input",
"workfile",
"test_project_test_asset_TestTask_v001.psd")
dest_folder = os.path.join(download_test_data,
self.PROJECT,
self.ASSET,
"work",
self.TASK)
os.makedirs(dest_folder)
dest_path = os.path.join(dest_folder,
"test_project_test_asset_TestTask_v001.psd")
shutil.copy(src_path, dest_path)
yield dest_path
@pytest.fixture(scope="module")
def startup_scripts(self, monkeypatch_session, download_test_data):
"""Points Maya to userSetup file from input data"""
os.environ["IS_HEADLESS"] = "true"
def test_db_asserts(self, dbcon, publish_finished):
"""Host and input data dependent expected results in DB."""
print("test_db_asserts")

View file

@ -0,0 +1,45 @@
"""Classed and methods for comparing expected and published items in DBs"""
class DBAssert:
@classmethod
def count_of_types(cls, dbcon, queried_type, expected, **kwargs):
"""Queries 'dbcon' and counts documents of type 'queried_type'
Args:
dbcon (AvalonMongoDB)
queried_type (str): type of document ("asset", "version"...)
expected (int): number of documents found
any number of additional keyword arguments
special handling of argument additional_args (dict)
with additional args like
{"context.subset": "XXX"}
"""
args = {"type": queried_type}
for key, val in kwargs.items():
if key == "additional_args":
args.update(val)
else:
args[key] = val
msg = None
no_of_docs = dbcon.count_documents(args)
if expected != no_of_docs:
msg = "Not expected no of versions. "\
"Expected {}, found {}".format(expected, no_of_docs)
args.pop("type")
detail_str = " "
if args:
detail_str = " with {}".format(args)
status = "successful"
if msg:
status = "failed"
print("Comparing count of {}{} {}".format(queried_type,
detail_str,
status))
return msg

View file

@ -112,9 +112,17 @@ class DBHandler:
source 'db_name'
"""
db_name_out = db_name_out or db_name
if self._db_exists(db_name) and not overwrite:
raise RuntimeError("DB {} already exists".format(db_name_out) +
"Run with overwrite=True")
if self._db_exists(db_name_out):
if not overwrite:
raise RuntimeError("DB {} already exists".format(db_name_out) +
"Run with overwrite=True")
else:
if collection:
coll = self.client[db_name_out].get(collection)
if coll:
coll.drop()
else:
self.teardown(db_name_out)
dir_path = os.path.join(dump_dir, db_name)
if not os.path.exists(dir_path):
@ -136,7 +144,8 @@ class DBHandler:
print("Dropping {} database".format(db_name))
self.client.drop_database(db_name)
def backup_to_dump(self, db_name, dump_dir, overwrite=False):
def backup_to_dump(self, db_name, dump_dir, overwrite=False,
collection=None):
"""
Helper method for running mongodump for specific 'db_name'
"""
@ -148,7 +157,8 @@ class DBHandler:
raise RuntimeError("Backup already exists, "
"run with overwrite=True")
query = self._dump_query(self.uri, dump_dir, db_name=db_name)
query = self._dump_query(self.uri, dump_dir,
db_name=db_name, collection=collection)
print("Mongodump query:: {}".format(query))
subprocess.run(query)
@ -163,7 +173,7 @@ class DBHandler:
if collection:
if not db_name:
raise ValueError("db_name must be present")
coll_part = "--nsInclude={}.{}".format(db_name, collection)
coll_part = "--collection={}".format(collection)
query = "\"{}\" --uri=\"{}\" --out={} {} {}".format(
"mongodump", uri, output_path, db_part, coll_part
)
@ -187,7 +197,8 @@ class DBHandler:
drop_part = "--drop"
if db_name_out:
db_part += " --nsTo={}.*".format(db_name_out)
collection_str = collection or '*'
db_part += " --nsTo={}.{}".format(db_name_out, collection_str)
query = "\"{}\" --uri=\"{}\" --dir=\"{}\" {} {} {}".format(
"mongorestore", uri, dump_dir, db_part, coll_part, drop_part
@ -217,15 +228,16 @@ class DBHandler:
return query
# Examples
# handler = DBHandler(uri="mongodb://localhost:27017")
#
# backup_dir = "c:\\projects\\dumps"
# #
# handler.backup_to_dump("openpype", backup_dir, True)
# # handler.setup_from_dump("test_db", backup_dir, True)
# # handler.setup_from_sql_file("test_db", "c:\\projects\\sql\\item.sql",
# # collection="test_project",
# # drop=False, mode="upsert")
# handler.setup_from_sql("test_db", "c:\\projects\\sql",
# backup_dir = "c:\\projects\\test_nuke_publish\\input\\dumps"
# # #
# handler.backup_to_dump("avalon", backup_dir, True, collection="test_project")
# handler.setup_from_dump("test_db", backup_dir, True, db_name_out="avalon", collection="test_project")
# handler.setup_from_sql_file("test_db", "c:\\projects\\sql\\item.sql",
# collection="test_project",
# drop=False, mode="upsert")
# handler.setup_from_sql("test_db", "c:\\projects\\sql",
# collection="test_project",
# drop=False, mode="upsert")

View file

@ -7,10 +7,13 @@ import pytest
import tempfile
import shutil
import glob
import platform
from tests.lib.db_handler import DBHandler
from tests.lib.file_handler import RemoteFileHandler
from openpype.lib.remote_publish import find_variant_key
class BaseTest:
"""Empty base test class"""
@ -45,6 +48,8 @@ class ModuleUnitTest(BaseTest):
ASSET = "test_asset"
TASK = "test_task"
TEST_DATA_FOLDER = None
@pytest.fixture(scope='session')
def monkeypatch_session(self):
"""Monkeypatch couldn't be used with module or session fixtures."""
@ -54,25 +59,31 @@ class ModuleUnitTest(BaseTest):
m.undo()
@pytest.fixture(scope="module")
def download_test_data(self):
tmpdir = tempfile.mkdtemp()
for test_file in self.TEST_FILES:
file_id, file_name, md5 = test_file
def download_test_data(self, test_data_folder, persist=False):
test_data_folder = test_data_folder or self.TEST_DATA_FOLDER
if test_data_folder:
print("Using existing folder {}".format(test_data_folder))
yield test_data_folder
else:
tmpdir = tempfile.mkdtemp()
for test_file in self.TEST_FILES:
file_id, file_name, md5 = test_file
f_name, ext = os.path.splitext(file_name)
f_name, ext = os.path.splitext(file_name)
RemoteFileHandler.download_file_from_google_drive(file_id,
str(tmpdir),
file_name)
RemoteFileHandler.download_file_from_google_drive(file_id,
str(tmpdir),
file_name)
if ext.lstrip('.') in RemoteFileHandler.IMPLEMENTED_ZIP_FORMATS:
RemoteFileHandler.unzip(os.path.join(tmpdir, file_name))
print("Temporary folder created:: {}".format(tmpdir))
yield tmpdir
if ext.lstrip('.') in RemoteFileHandler.IMPLEMENTED_ZIP_FORMATS: # noqa: E501
RemoteFileHandler.unzip(os.path.join(tmpdir, file_name))
print("Temporary folder created:: {}".format(tmpdir))
yield tmpdir
if not self.PERSIST:
print("Removing {}".format(tmpdir))
shutil.rmtree(tmpdir)
persist = persist or self.PERSIST
if not persist:
print("Removing {}".format(tmpdir))
shutil.rmtree(tmpdir)
@pytest.fixture(scope="module")
def env_var(self, monkeypatch_session, download_test_data):
@ -97,13 +108,24 @@ class ModuleUnitTest(BaseTest):
value = value.format(**all_vars)
print("Setting {}:{}".format(key, value))
monkeypatch_session.setenv(key, str(value))
import openpype
#reset connection to openpype DB with new env var
import openpype.settings.lib as sett_lib
sett_lib._SETTINGS_HANDLER = None
sett_lib._LOCAL_SETTINGS_HANDLER = None
sett_lib.create_settings_handler()
sett_lib.create_local_settings_handler()
import openpype
openpype_root = os.path.dirname(os.path.dirname(openpype.__file__))
# ?? why 2 of those
monkeypatch_session.setenv("OPENPYPE_ROOT", openpype_root)
monkeypatch_session.setenv("OPENPYPE_REPOS_ROOT", openpype_root)
# for remapping purposes (currently in Nuke)
monkeypatch_session.setenv("TEST_SOURCE_FOLDER", download_test_data)
@pytest.fixture(scope="module")
def db_setup(self, download_test_data, env_var, monkeypatch_session):
"""Restore prepared MongoDB dumps into selected DB."""
@ -111,10 +133,12 @@ class ModuleUnitTest(BaseTest):
uri = os.environ.get("OPENPYPE_MONGO")
db_handler = DBHandler(uri)
db_handler.setup_from_dump(self.TEST_DB_NAME, backup_dir, True,
db_handler.setup_from_dump(self.TEST_DB_NAME, backup_dir,
overwrite=True,
db_name_out=self.TEST_DB_NAME)
db_handler.setup_from_dump("openpype", backup_dir, True,
db_handler.setup_from_dump("openpype", backup_dir,
overwrite=True,
db_name_out=self.TEST_OPENPYPE_NAME)
yield db_handler
@ -167,31 +191,76 @@ class PublishTest(ModuleUnitTest):
"""
APP = ""
APP_VARIANT = ""
APP_NAME = "{}/{}".format(APP, APP_VARIANT)
TIMEOUT = 120 # publish timeout
@pytest.fixture(scope="module")
def last_workfile_path(self, download_test_data):
raise NotImplementedError
# could be overwritten by command line arguments
# command line value takes precedence
# keep empty to locate latest installed variant or explicit
APP_VARIANT = ""
PERSIST = True # True - keep test_db, test_openpype, outputted test files
TEST_DATA_FOLDER = None # use specific folder of unzipped test file
@pytest.fixture(scope="module")
def startup_scripts(self, monkeypatch_session, download_test_data):
raise NotImplementedError
def app_name(self, app_variant):
"""Returns calculated value for ApplicationManager. Eg.(nuke/12-2)"""
from openpype.lib import ApplicationManager
app_variant = app_variant or self.APP_VARIANT
application_manager = ApplicationManager()
if not app_variant:
app_variant = find_variant_key(application_manager, self.APP)
yield "{}/{}".format(self.APP, app_variant)
@pytest.fixture(scope="module")
def output_folder_url(self, download_test_data):
"""Returns location of published data, cleans it first if exists."""
path = os.path.join(download_test_data, "output")
if os.path.exists(path):
print("Purging {}".format(path))
shutil.rmtree(path)
yield path
@pytest.fixture(scope="module")
def app_args(self, download_test_data):
"""Returns additional application arguments from a test file.
Test zip file should contain file at:
FOLDER_DIR/input/app_args/app_args.json
containing a list of command line arguments (like '-x' etc.)
"""
app_args = []
args_url = os.path.join(download_test_data, "input",
"app_args", "app_args.json")
if not os.path.exists(args_url):
print("App argument file {} doesn't exist".format(args_url))
else:
try:
with open(args_url) as json_file:
app_args = json.load(json_file)
if not isinstance(app_args, list):
raise ValueError
except ValueError:
print("{} doesn't contain valid JSON".format(args_url))
six.reraise(*sys.exc_info())
yield app_args
@pytest.fixture(scope="module")
def launched_app(self, dbcon, download_test_data, last_workfile_path,
startup_scripts):
startup_scripts, app_args, app_name, output_folder_url):
"""Launch host app"""
# set publishing folders
root_key = "config.roots.work.{}".format("windows") # TEMP
platform_str = platform.system().lower()
root_key = "config.roots.work.{}".format(platform_str)
dbcon.update_one(
{"type": "project"},
{"$set":
{
root_key: download_test_data
root_key: output_folder_url
}}
)
@ -217,8 +286,11 @@ class PublishTest(ModuleUnitTest):
"asset_name": self.ASSET,
"task_name": self.TASK
}
if app_args:
data["app_args"] = app_args
yield application_manager.launch(self.APP_NAME, **data)
app_process = application_manager.launch(app_name, **data)
yield app_process
@pytest.fixture(scope="module")
def publish_finished(self, dbcon, launched_app, download_test_data):
@ -236,23 +308,26 @@ class PublishTest(ModuleUnitTest):
yield True
def test_folder_structure_same(self, dbcon, publish_finished,
download_test_data):
download_test_data, output_folder_url):
"""Check if expected and published subfolders contain same files.
Compares only presence, not size nor content!
"""
published_dir_base = download_test_data
published_dir = os.path.join(published_dir_base,
published_dir = os.path.join(output_folder_url,
self.PROJECT,
self.ASSET,
self.TASK,
"**")
expected_dir_base = os.path.join(published_dir_base,
"expected")
expected_dir = os.path.join(expected_dir_base,
self.PROJECT,
self.ASSET,
self.TASK,
"**")
print("Comparing published:'{}' : expected:'{}'".format(published_dir,
expected_dir))
published = set(f.replace(published_dir_base, '') for f in
glob.glob(published_dir, recursive=True) if
f != published_dir_base and os.path.exists(f))
@ -262,3 +337,16 @@ class PublishTest(ModuleUnitTest):
not_matched = expected.difference(published)
assert not not_matched, "Missing {} files".format(not_matched)
class HostFixtures(PublishTest):
"""Host specific fixtures. Should be implemented once per host."""
@pytest.fixture(scope="module")
def last_workfile_path(self, download_test_data, output_folder_url):
"""Returns url of workfile"""
raise NotImplementedError
@pytest.fixture(scope="module")
def startup_scripts(self, monkeypatch_session, download_test_data):
""""Adds init scripts (like userSetup) to expected location"""
raise NotImplementedError