diff --git a/openpype/hooks/pre_foundry_apps.py b/openpype/hooks/pre_foundry_apps.py new file mode 100644 index 0000000000..85f68c6b60 --- /dev/null +++ b/openpype/hooks/pre_foundry_apps.py @@ -0,0 +1,28 @@ +import subprocess +from openpype.lib import PreLaunchHook + + +class LaunchFoundryAppsWindows(PreLaunchHook): + """Foundry applications have specific way how to launch them. + + Nuke is executed "like" python process so it is required to pass + `CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console. + At the same time the newly created console won't create it's own stdout + and stderr handlers so they should not be redirected to DEVNULL. + """ + + # Should be as last hook because must change launch arguments to string + order = 1000 + app_groups = ["nuke", "nukex", "hiero", "nukestudio"] + platforms = ["windows"] + + def execute(self): + # Change `creationflags` to CREATE_NEW_CONSOLE + # - on Windows will nuke create new window using it's console + # Set `stdout` and `stderr` to None so new created console does not + # have redirected output to DEVNULL in build + self.launch_context.kwargs.update({ + "creationflags": subprocess.CREATE_NEW_CONSOLE, + "stdout": None, + "stderr": None + }) diff --git a/openpype/hooks/pre_non_python_host_launch.py b/openpype/hooks/pre_non_python_host_launch.py index 393a878f76..0447f4a06f 100644 --- a/openpype/hooks/pre_non_python_host_launch.py +++ b/openpype/hooks/pre_non_python_host_launch.py @@ -49,5 +49,7 @@ class NonPythonHostHook(PreLaunchHook): if remainders: self.launch_context.launch_args.extend(remainders) + # This must be set otherwise it wouldn't be possible to catch output + # when build OpenPype is used. self.launch_context.kwargs["stdout"] = subprocess.DEVNULL - self.launch_context.kwargs["stderr"] = subprocess.STDOUT + self.launch_context.kwargs["stderr"] = subprocess.DEVNULL diff --git a/openpype/hooks/pre_with_windows_shell.py b/openpype/hooks/pre_with_windows_shell.py deleted file mode 100644 index 441ab1a675..0000000000 --- a/openpype/hooks/pre_with_windows_shell.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import subprocess -from openpype.lib import PreLaunchHook - - -class LaunchWithWindowsShell(PreLaunchHook): - """Add shell command before executable. - - Some hosts have issues when are launched directly from python in that case - it is possible to prepend shell executable which will trigger process - instead. - """ - - # Should be as last hook because must change launch arguments to string - order = 1000 - app_groups = ["nuke", "nukex", "hiero", "nukestudio"] - platforms = ["windows"] - - def execute(self): - launch_args = self.launch_context.clear_launch_args( - self.launch_context.launch_args) - new_args = [ - # Get comspec which is cmd.exe in most cases. - os.environ.get("COMSPEC", "cmd.exe"), - # NOTE change to "/k" if want to keep console opened - "/c", - # Convert arguments to command line arguments (as string) - "\"{}\"".format( - subprocess.list2cmdline(launch_args) - ) - ] - # Convert list to string - # WARNING this only works if is used as string - args_string = " ".join(new_args) - self.log.info(( - "Modified launch arguments to be launched with shell \"{}\"." - ).format(args_string)) - - # Replace launch args with new one - self.launch_context.launch_args = args_string - # Change `creationflags` to CREATE_NEW_CONSOLE - self.launch_context.kwargs["creationflags"] = ( - subprocess.CREATE_NEW_CONSOLE - ) diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index eefbcc5d20..5f898a9a67 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -1739,3 +1739,68 @@ def process_workfile_builder(): log.info("Opening last workfile...") # open workfile open_file(last_workfile_path) + + +def recreate_instance(origin_node, avalon_data=None): + """Recreate input instance to different data + + Args: + origin_node (nuke.Node): Nuke node to be recreating from + avalon_data (dict, optional): data to be used in new node avalon_data + + Returns: + nuke.Node: newly created node + """ + knobs_wl = ["render", "publish", "review", "ypos", + "use_limit", "first", "last"] + # get data from avalon knobs + data = anlib.get_avalon_knob_data( + origin_node) + + # add input data to avalon data + if avalon_data: + data.update(avalon_data) + + # capture all node knobs allowed in op_knobs + knobs_data = {k: origin_node[k].value() + for k in origin_node.knobs() + for key in knobs_wl + if key in k} + + # get node dependencies + inputs = origin_node.dependencies() + outputs = origin_node.dependent() + + # remove the node + nuke.delete(origin_node) + + # create new node + # get appropriate plugin class + creator_plugin = None + for Creator in api.discover(api.Creator): + if Creator.__name__ == data["creator"]: + creator_plugin = Creator + break + + # create write node with creator + new_node_name = data["subset"] + new_node = creator_plugin(new_node_name, data["asset"]).process() + + # white listed knobs to the new node + for _k, _v in knobs_data.items(): + try: + print(_k, _v) + new_node[_k].setValue(_v) + except Exception as e: + print(e) + + # connect to original inputs + for i, n in enumerate(inputs): + new_node.setInput(i, n) + + # connect to outputs + if len(outputs) > 0: + for dn in outputs: + dn.setInput(0, new_node) + + return new_node diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py index dbf2574a9d..60a8cf48fc 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py @@ -181,7 +181,8 @@ class CollectInstances(pyblish.api.InstancePlugin): } }) for subset, properities in self.subsets.items(): - if properities["version"] == 0: + version = properities.get("version") + if version and version == 0: properities.pop("version") # adding Review-able instance diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py new file mode 100644 index 0000000000..d70a0a75b8 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py @@ -0,0 +1,456 @@ +import os +import re +import pyblish.api +import json + +from avalon.api import format_template_with_optional_keys + +from openpype.lib import prepare_template_data + + +class CollectTextures(pyblish.api.ContextPlugin): + """Collect workfile (and its resource_files) and textures. + + Currently implements use case with Mari and Substance Painter, where + one workfile is main (.mra - Mari) with possible additional workfiles + (.spp - Substance) + + + Provides: + 1 instance per workfile (with 'resources' filled if needed) + (workfile family) + 1 instance per group of textures + (textures family) + """ + + order = pyblish.api.CollectorOrder + label = "Collect Textures" + hosts = ["standalonepublisher"] + families = ["texture_batch"] + actions = [] + + # from presets + main_workfile_extensions = ['mra'] + other_workfile_extensions = ['spp', 'psd'] + texture_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", + "gif", "svg"] + + # additional families (ftrack etc.) + workfile_families = [] + textures_families = [] + + color_space = ["linsRGB", "raw", "acesg"] + + # currently implemented placeholders ["color_space"] + # describing patterns in file names splitted by regex groups + input_naming_patterns = { + # workfile: corridorMain_v001.mra > + # texture: corridorMain_aluminiumID_v001_baseColor_linsRGB_1001.exr + "workfile": r'^([^.]+)(_[^_.]*)?_v([0-9]{3,}).+', + "textures": r'^([^_.]+)_([^_.]+)_v([0-9]{3,})_([^_.]+)_({color_space})_(1[0-9]{3}).+', # noqa + } + # matching regex group position to 'input_naming_patterns' + input_naming_groups = { + "workfile": ('asset', 'filler', 'version'), + "textures": ('asset', 'shader', 'version', 'channel', 'color_space', + 'udim') + } + + workfile_subset_template = "textures{Subset}Workfile" + # implemented keys: ["color_space", "channel", "subset", "shader"] + texture_subset_template = "textures{Subset}_{Shader}_{Channel}" + + def process(self, context): + self.context = context + + resource_files = {} + workfile_files = {} + representations = {} + version_data = {} + asset_builds = set() + asset = None + for instance in context: + if not self.input_naming_patterns: + raise ValueError("Naming patterns are not configured. \n" + "Ask admin to provide naming conventions " + "for workfiles and textures.") + + if not asset: + asset = instance.data["asset"] # selected from SP + + parsed_subset = instance.data["subset"].replace( + instance.data["family"], '') + + fill_pairs = { + "subset": parsed_subset + } + + fill_pairs = prepare_template_data(fill_pairs) + workfile_subset = format_template_with_optional_keys( + fill_pairs, self.workfile_subset_template) + + processed_instance = False + for repre in instance.data["representations"]: + ext = repre["ext"].replace('.', '') + asset_build = version = None + + if isinstance(repre["files"], list): + repre_file = repre["files"][0] + else: + repre_file = repre["files"] + + if ext in self.main_workfile_extensions or \ + ext in self.other_workfile_extensions: + + asset_build = self._get_asset_build( + repre_file, + self.input_naming_patterns["workfile"], + self.input_naming_groups["workfile"], + self.color_space + ) + version = self._get_version( + repre_file, + self.input_naming_patterns["workfile"], + self.input_naming_groups["workfile"], + self.color_space + ) + asset_builds.add((asset_build, version, + workfile_subset, 'workfile')) + processed_instance = True + + if not representations.get(workfile_subset): + representations[workfile_subset] = [] + + if ext in self.main_workfile_extensions: + # workfiles can have only single representation + # currently OP is not supporting different extensions in + # representation files + representations[workfile_subset] = [repre] + + workfile_files[asset_build] = repre_file + + if ext in self.other_workfile_extensions: + # add only if not added already from main + if not representations.get(workfile_subset): + representations[workfile_subset] = [repre] + + # only overwrite if not present + if not workfile_files.get(asset_build): + workfile_files[asset_build] = repre_file + + if not resource_files.get(workfile_subset): + resource_files[workfile_subset] = [] + item = { + "files": [os.path.join(repre["stagingDir"], + repre["files"])], + "source": "standalone publisher" + } + resource_files[workfile_subset].append(item) + + if ext in self.texture_extensions: + c_space = self._get_color_space( + repre_file, + self.color_space + ) + + channel = self._get_channel_name( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + + shader = self._get_shader_name( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + + formatting_data = { + "color_space": c_space or '', # None throws exception + "channel": channel or '', + "shader": shader or '', + "subset": parsed_subset or '' + } + + fill_pairs = prepare_template_data(formatting_data) + subset = format_template_with_optional_keys( + fill_pairs, self.texture_subset_template) + + asset_build = self._get_asset_build( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + version = self._get_version( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + if not representations.get(subset): + representations[subset] = [] + representations[subset].append(repre) + + ver_data = { + "color_space": c_space or '', + "channel_name": channel or '', + "shader_name": shader or '' + } + version_data[subset] = ver_data + + asset_builds.add( + (asset_build, version, subset, "textures")) + processed_instance = True + + if processed_instance: + self.context.remove(instance) + + self._create_new_instances(context, + asset, + asset_builds, + resource_files, + representations, + version_data, + workfile_files) + + def _create_new_instances(self, context, asset, asset_builds, + resource_files, representations, + version_data, workfile_files): + """Prepare new instances from collected data. + + Args: + context (ContextPlugin) + asset (string): selected asset from SP + asset_builds (set) of tuples + (asset_build, version, subset, family) + resource_files (list) of resource dicts - to store additional + files to main workfile + representations (list) of dicts - to store workfile info OR + all collected texture files, key is asset_build + version_data (dict) - prepared to store into version doc in DB + workfile_files (dict) - to store workfile to add to textures + key is asset_build + """ + # sort workfile first + asset_builds = sorted(asset_builds, + key=lambda tup: tup[3], reverse=True) + + # workfile must have version, textures might + main_version = None + for asset_build, version, subset, family in asset_builds: + if not main_version: + main_version = version + new_instance = context.create_instance(subset) + new_instance.data.update( + { + "subset": subset, + "asset": asset, + "label": subset, + "name": subset, + "family": family, + "version": int(version or main_version or 1), + "asset_build": asset_build # remove in validator + } + ) + + workfile = workfile_files.get(asset_build) + + if resource_files.get(subset): + # add resources only when workfile is main style + for ext in self.main_workfile_extensions: + if ext in workfile: + new_instance.data.update({ + "resources": resource_files.get(subset) + }) + break + + # store origin + if family == 'workfile': + families = self.workfile_families + + new_instance.data["source"] = "standalone publisher" + else: + families = self.textures_families + + repre = representations.get(subset)[0] + new_instance.context.data["currentFile"] = os.path.join( + repre["stagingDir"], workfile or 'dummy.txt') + + new_instance.data["families"] = families + + # add data for version document + ver_data = version_data.get(subset) + if ver_data: + if workfile: + ver_data['workfile'] = workfile + + new_instance.data.update( + {"versionData": ver_data} + ) + + upd_representations = representations.get(subset) + if upd_representations and family != 'workfile': + upd_representations = self._update_representations( + upd_representations) + + new_instance.data["representations"] = upd_representations + + self.log.debug("new instance - {}:: {}".format( + family, + json.dumps(new_instance.data, indent=4))) + + def _get_asset_build(self, name, + input_naming_patterns, input_naming_groups, + color_spaces): + """Loops through configured workfile patterns to find asset name. + + Asset name used to bind workfile and its textures. + + Args: + name (str): workfile name + input_naming_patterns (list): + [workfile_pattern] or [texture_pattern] + input_naming_groups (list) + ordinal position of regex groups matching to input_naming.. + color_spaces (list) - predefined color spaces + """ + asset_name = "NOT_AVAIL" + + return self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'asset') or asset_name + + def _get_version(self, name, input_naming_patterns, input_naming_groups, + color_spaces): + found = self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'version') + + if found: + return found.replace('v', '') + + self.log.info("No version found in the name {}".format(name)) + + def _get_udim(self, name, input_naming_patterns, input_naming_groups, + color_spaces): + """Parses from 'name' udim value.""" + found = self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'udim') + if found: + return found + + self.log.warning("Didn't find UDIM in {}".format(name)) + + def _get_color_space(self, name, color_spaces): + """Looks for color_space from a list in a file name. + + Color space seems not to be recognizable by regex pattern, set of + known space spaces must be provided. + """ + color_space = None + found = [cs for cs in color_spaces if + re.search("_{}_".format(cs), name)] + + if not found: + self.log.warning("No color space found in {}".format(name)) + else: + if len(found) > 1: + msg = "Multiple color spaces found in {}->{}".format(name, + found) + self.log.warning(msg) + + color_space = found[0] + + return color_space + + def _get_shader_name(self, name, input_naming_patterns, + input_naming_groups, color_spaces): + """Return parsed shader name. + + Shader name is needed for overlapping udims (eg. udims might be + used for different materials, shader needed to not overwrite). + + Unknown format of channel name and color spaces >> cs are known + list - 'color_space' used as a placeholder + """ + found = self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'shader') + if found: + return found + + self.log.warning("Didn't find shader in {}".format(name)) + + def _get_channel_name(self, name, input_naming_patterns, + input_naming_groups, color_spaces): + """Return parsed channel name. + + Unknown format of channel name and color spaces >> cs are known + list - 'color_space' used as a placeholder + """ + found = self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'channel') + if found: + return found + + self.log.warning("Didn't find channel in {}".format(name)) + + def _parse(self, name, input_naming_patterns, input_naming_groups, + color_spaces, key): + """Universal way to parse 'name' with configurable regex groups. + + Args: + name (str): workfile name + input_naming_patterns (list): + [workfile_pattern] or [texture_pattern] + input_naming_groups (list) + ordinal position of regex groups matching to input_naming.. + color_spaces (list) - predefined color spaces + + Raises: + ValueError - if broken 'input_naming_groups' + """ + for input_pattern in input_naming_patterns: + for cs in color_spaces: + pattern = input_pattern.replace('{color_space}', cs) + regex_result = re.findall(pattern, name) + if regex_result: + idx = list(input_naming_groups).index(key) + if idx < 0: + msg = "input_naming_groups must " +\ + "have '{}' key".format(key) + raise ValueError(msg) + + try: + parsed_value = regex_result[0][idx] + return parsed_value + except IndexError: + self.log.warning("Wrong index, probably " + "wrong name {}".format(name)) + + def _update_representations(self, upd_representations): + """Frames dont have sense for textures, add collected udims instead.""" + udims = [] + for repre in upd_representations: + repre.pop("frameStart", None) + repre.pop("frameEnd", None) + repre.pop("fps", None) + + # ignore unique name from SP, use extension instead + # SP enforces unique name, here different subsets >> unique repres + repre["name"] = repre["ext"].replace('.', '') + + files = repre.get("files", []) + if not isinstance(files, list): + files = [files] + + for file_name in files: + udim = self._get_udim(file_name, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space) + udims.append(udim) + + repre["udim"] = udims # must be this way, used for filling path + + return upd_representations diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py new file mode 100644 index 0000000000..1183180833 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py @@ -0,0 +1,42 @@ +import os +import pyblish.api + + +class ExtractResources(pyblish.api.InstancePlugin): + """ + Extracts files from instance.data["resources"]. + + These files are additional (textures etc.), currently not stored in + representations! + + Expects collected 'resourcesDir'. (list of dicts with 'files' key and + list of source urls) + + Provides filled 'transfers' (list of tuples (source_url, target_url)) + """ + + label = "Extract Resources SP" + hosts = ["standalonepublisher"] + order = pyblish.api.ExtractorOrder + + families = ["workfile"] + + def process(self, instance): + if not instance.data.get("resources"): + self.log.info("No resources") + return + + if not instance.data.get("transfers"): + instance.data["transfers"] = [] + + publish_dir = instance.data["resourcesDir"] + + transfers = [] + for resource in instance.data["resources"]: + for file_url in resource.get("files", []): + file_name = os.path.basename(file_url) + dest_url = os.path.join(publish_dir, file_name) + transfers.append((file_url, dest_url)) + + self.log.info("transfers:: {}".format(transfers)) + instance.data["transfers"].extend(transfers) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py new file mode 100644 index 0000000000..18bf0394ae --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py @@ -0,0 +1,43 @@ +import os +import pyblish.api + + +class ExtractWorkfileUrl(pyblish.api.ContextPlugin): + """ + Modifies 'workfile' field to contain link to published workfile. + + Expects that batch contains only single workfile and matching + (multiple) textures. + """ + + label = "Extract Workfile Url SP" + hosts = ["standalonepublisher"] + order = pyblish.api.ExtractorOrder + + families = ["textures"] + + def process(self, context): + filepath = None + + # first loop for workfile + for instance in context: + if instance.data["family"] == 'workfile': + anatomy = context.data['anatomy'] + template_data = instance.data.get("anatomyData") + rep_name = instance.data.get("representations")[0].get("name") + template_data["representation"] = rep_name + template_data["ext"] = rep_name + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + filepath = os.path.normpath(template_filled) + self.log.info("Using published scene for render {}".format( + filepath)) + + if not filepath: + self.log.info("Texture batch doesn't contain workfile.") + return + + # then apply to all textures + for instance in context: + if instance.data["family"] == 'textures': + instance.data["versionData"]["workfile"] = filepath diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py new file mode 100644 index 0000000000..af200b59e0 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py @@ -0,0 +1,22 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureBatch(pyblish.api.InstancePlugin): + """Validates that some texture files are present.""" + + label = "Validate Texture Presence" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["workfile"] + optional = False + + def process(self, instance): + present = False + for instance in instance.context: + if instance.data["family"] == "textures": + self.log.info("Some textures present.") + + return + + assert present, "No textures found in published batch!" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py new file mode 100644 index 0000000000..7cd540668c --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py @@ -0,0 +1,20 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): + """Validates that textures have appropriate workfile attached. + + Workfile is optional, disable this Validator after Refresh if you are + sure it is not needed. + """ + label = "Validate Texture Has Workfile" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["textures"] + optional = True + + def process(self, instance): + wfile = instance.data["versionData"].get("workfile") + + assert wfile, "Textures are missing attached workfile" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py new file mode 100644 index 0000000000..92f930c3fc --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py @@ -0,0 +1,50 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): + """Validates that all instances had properly formatted name.""" + + label = "Validate Texture Batch Naming" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["workfile", "textures"] + optional = False + + def process(self, instance): + file_name = instance.data["representations"][0]["files"] + if isinstance(file_name, list): + file_name = file_name[0] + + msg = "Couldnt find asset name in '{}'\n".format(file_name) + \ + "File name doesn't follow configured pattern.\n" + \ + "Please rename the file." + assert "NOT_AVAIL" not in instance.data["asset_build"], msg + + instance.data.pop("asset_build") + + if instance.data["family"] == "textures": + file_name = instance.data["representations"][0]["files"][0] + self._check_proper_collected(instance.data["versionData"], + file_name) + + def _check_proper_collected(self, versionData, file_name): + """ + Loop through collected versionData to check if name parsing was OK. + Args: + versionData: (dict) + + Returns: + raises AssertionException + """ + missing_key_values = [] + for key, value in versionData.items(): + if not value: + missing_key_values.append(key) + + msg = "Collected data {} doesn't contain values for {}".format( + versionData, missing_key_values) + "\n" + \ + "Name of the texture file doesn't match expected pattern.\n" + \ + "Please rename file(s) {}".format(file_name) + + assert not missing_key_values, msg diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py new file mode 100644 index 0000000000..90d0e8e512 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py @@ -0,0 +1,38 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): + """Validates that versions match in workfile and textures. + + Workfile is optional, so if you are sure, you can disable this + validator after Refresh. + + Validates that only single version is published at a time. + """ + label = "Validate Texture Batch Versions" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["textures"] + optional = False + + def process(self, instance): + wfile = instance.data["versionData"].get("workfile") + + version_str = "v{:03d}".format(instance.data["version"]) + + if not wfile: # no matching workfile, do not check versions + self.log.info("No workfile present for textures") + return + + msg = "Not matching version: texture v{:03d} - workfile {}" + assert version_str in wfile, \ + msg.format( + instance.data["version"], wfile + ) + + present_versions = set() + for instance in instance.context: + present_versions.add(instance.data["version"]) + + assert len(present_versions) == 1, "Too many versions in a batch!" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py new file mode 100644 index 0000000000..aa3aad71db --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py @@ -0,0 +1,29 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): + """Validates that textures workfile has collected resources (optional). + + Collected recourses means secondary workfiles (in most cases). + """ + + label = "Validate Texture Workfile Has Resources" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["workfile"] + optional = True + + # from presets + main_workfile_extensions = ['mra'] + + def process(self, instance): + if instance.data["family"] == "workfile": + ext = instance.data["representations"][0]["ext"] + if ext not in self.main_workfile_extensions: + self.log.warning("Only secondary workfile present!") + return + + msg = "No secondary workfiles present for workfile {}".\ + format(instance.data["name"]) + assert instance.data.get("resources"), msg diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index d8bb03f541..79cc01740a 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -155,6 +155,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): "sceneMarkInState": mark_in_state == "set", "sceneMarkOut": int(mark_out_frame), "sceneMarkOutState": mark_out_state == "set", + "sceneStartFrame": int(lib.execute_george("tv_startframe")), "sceneBgColor": self._get_bg_color() } self.log.debug( diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 536df2adb0..1df7512588 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -49,6 +49,14 @@ class ExtractSequence(pyblish.api.Extractor): family_lowered = instance.data["family"].lower() mark_in = instance.context.data["sceneMarkIn"] mark_out = instance.context.data["sceneMarkOut"] + + # Scene start frame offsets the output files, so we need to offset the + # marks. + scene_start_frame = instance.context.data["sceneStartFrame"] + difference = scene_start_frame - mark_in + mark_in += difference + mark_out += difference + # Frame start/end may be stored as float frame_start = int(instance.data["frameStart"]) frame_end = int(instance.data["frameEnd"]) @@ -98,7 +106,7 @@ class ExtractSequence(pyblish.api.Extractor): self.log.warning(( "Lowering representation range to {} frames." " Changed frame end {} -> {}" - ).format(output_range + 1, mark_out, new_mark_out)) + ).format(output_range + 1, mark_out, new_output_frame_end)) output_frame_end = new_output_frame_end # ------------------------------------------------------------------- diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py new file mode 100644 index 0000000000..d769d47736 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -0,0 +1,27 @@ +import pyblish.api +from avalon.tvpaint import lib + + +class RepairStartFrame(pyblish.api.Action): + """Repair start frame.""" + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + lib.execute_george("tv_startframe 0") + + +class ValidateStartFrame(pyblish.api.ContextPlugin): + """Validate start frame being at frame 0.""" + + label = "Validate Start Frame" + order = pyblish.api.ValidatorOrder + hosts = ["tvpaint"] + actions = [RepairStartFrame] + optional = True + + def process(self, context): + start_frame = lib.execute_george("tv_startframe") + assert int(start_frame) == 0, "Start frame has to be frame 0." diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index fb86d06150..e1b304a351 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -1,4 +1,5 @@ import os +import sys import re import copy import json @@ -708,6 +709,10 @@ class ApplicationLaunchContext: ) self.kwargs["creationflags"] = flags + if not sys.stdout: + self.kwargs["stdout"] = subprocess.DEVNULL + self.kwargs["stderr"] = subprocess.DEVNULL + self.prelaunch_hooks = None self.postlaunch_hooks = None diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 12d687bbf2..3a96ae3311 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -1,6 +1,8 @@ import json +from avalon.api import AvalonMongoDB from openpype.api import ProjectSettings +from openpype.lib import create_project from openpype.modules.ftrack.lib import ( ServerAction, @@ -21,8 +23,24 @@ class PrepareProjectServer(ServerAction): role_list = ["Pypeclub", "Administrator", "Project Manager"] - # Key to store info about trigerring create folder structure + settings_key = "prepare_project" + item_splitter = {"type": "label", "value": "---"} + _keys_order = ( + "fps", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + "clipIn", + "clipOut", + "resolutionHeight", + "resolutionWidth", + "pixelAspect", + "applications", + "tools_env", + "library_project", + ) def discover(self, session, entities, event): """Show only on project.""" @@ -47,13 +65,7 @@ class PrepareProjectServer(ServerAction): project_entity = entities[0] project_name = project_entity["full_name"] - try: - project_settings = ProjectSettings(project_name) - except ValueError: - return { - "message": "Project is not synchronized yet", - "success": False - } + project_settings = ProjectSettings(project_name) project_anatom_settings = project_settings["project_anatomy"] root_items = self.prepare_root_items(project_anatom_settings) @@ -78,14 +90,13 @@ class PrepareProjectServer(ServerAction): items.extend(ca_items) - # This item will be last (before enumerators) - # - sets value of auto synchronization - auto_sync_name = "avalon_auto_sync" + # This item will be last before enumerators + # Set value of auto synchronization auto_sync_value = project_entity["custom_attributes"].get( CUST_ATTR_AUTO_SYNC, False ) auto_sync_item = { - "name": auto_sync_name, + "name": CUST_ATTR_AUTO_SYNC, "type": "boolean", "value": auto_sync_value, "label": "AutoSync to Avalon" @@ -199,7 +210,18 @@ class PrepareProjectServer(ServerAction): str([key for key in attributes_to_set]) )) - for key, in_data in attributes_to_set.items(): + attribute_keys = set(attributes_to_set.keys()) + keys_order = [] + for key in self._keys_order: + if key in attribute_keys: + keys_order.append(key) + + attribute_keys = attribute_keys - set(keys_order) + for key in sorted(attribute_keys): + keys_order.append(key) + + for key in keys_order: + in_data = attributes_to_set[key] attr = in_data["object"] # initial item definition @@ -225,7 +247,7 @@ class PrepareProjectServer(ServerAction): multiselect_enumerators.append(self.item_splitter) multiselect_enumerators.append({ "type": "label", - "value": in_data["label"] + "value": "