diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e76d7b76a..f75f68a5bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,35 @@ # Changelog -## [3.2.0-nightly.7](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.3.0-nightly.4](https://github.com/pypeclub/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.4...HEAD) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.2.0...HEAD) + +**🚀 Enhancements** + +- Ftrack push attributes action adds traceback to job [\#1843](https://github.com/pypeclub/OpenPype/pull/1843) +- Prepare project action enhance [\#1838](https://github.com/pypeclub/OpenPype/pull/1838) +- nuke: settings create missing default subsets [\#1829](https://github.com/pypeclub/OpenPype/pull/1829) +- Update poetry lock [\#1823](https://github.com/pypeclub/OpenPype/pull/1823) +- Settings: settings for plugins [\#1819](https://github.com/pypeclub/OpenPype/pull/1819) +- Maya: Deadline custom settings [\#1797](https://github.com/pypeclub/OpenPype/pull/1797) + +**🐛 Bug fixes** + +- Nuke: updating effects subset fail [\#1841](https://github.com/pypeclub/OpenPype/pull/1841) +- nuke: write render node skipped with crop [\#1836](https://github.com/pypeclub/OpenPype/pull/1836) +- Project folder structure overrides [\#1813](https://github.com/pypeclub/OpenPype/pull/1813) +- Maya: fix yeti settings path in extractor [\#1809](https://github.com/pypeclub/OpenPype/pull/1809) +- Failsafe for cross project containers. [\#1806](https://github.com/pypeclub/OpenPype/pull/1806) +- Houdini colector formatting keys fix [\#1802](https://github.com/pypeclub/OpenPype/pull/1802) + +**Merged pull requests:** + +- Add support for pyenv-win on windows [\#1822](https://github.com/pypeclub/OpenPype/pull/1822) +- PS, AE - send actual context when another webserver is running [\#1811](https://github.com/pypeclub/OpenPype/pull/1811) + +## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.2.0-nightly.7...3.2.0) **🚀 Enhancements** @@ -11,6 +38,7 @@ - Ftrack Multiple notes as server action [\#1795](https://github.com/pypeclub/OpenPype/pull/1795) - Settings conditional dict [\#1777](https://github.com/pypeclub/OpenPype/pull/1777) - Settings application use python 2 only where needed [\#1776](https://github.com/pypeclub/OpenPype/pull/1776) +- Settings UI copy/paste [\#1769](https://github.com/pypeclub/OpenPype/pull/1769) - Workfile tool widths [\#1766](https://github.com/pypeclub/OpenPype/pull/1766) - Push hierarchical attributes care about task parent changes [\#1763](https://github.com/pypeclub/OpenPype/pull/1763) - Application executables with environment variables [\#1757](https://github.com/pypeclub/OpenPype/pull/1757) @@ -42,8 +70,6 @@ - StandalonePublisher: failing collector for editorial [\#1738](https://github.com/pypeclub/OpenPype/pull/1738) - Local settings UI crash on missing defaults [\#1737](https://github.com/pypeclub/OpenPype/pull/1737) - TVPaint white background on thumbnail [\#1735](https://github.com/pypeclub/OpenPype/pull/1735) -- Application without executables [\#1679](https://github.com/pypeclub/OpenPype/pull/1679) -- Unreal: launching on Linux [\#1672](https://github.com/pypeclub/OpenPype/pull/1672) **Merged pull requests:** @@ -69,7 +95,7 @@ - Tools names forwards compatibility [\#1727](https://github.com/pypeclub/OpenPype/pull/1727) -**Merged pull requests:** +**⚠️ Deprecations** - global: removing obsolete ftrack validator plugin [\#1710](https://github.com/pypeclub/OpenPype/pull/1710) @@ -93,24 +119,11 @@ - Log Viewer with OpenPype style [\#1703](https://github.com/pypeclub/OpenPype/pull/1703) - Scrolling in OpenPype info widget [\#1702](https://github.com/pypeclub/OpenPype/pull/1702) -- OpenPype style in modules [\#1694](https://github.com/pypeclub/OpenPype/pull/1694) -- Sort applications and tools alphabetically in Settings UI [\#1689](https://github.com/pypeclub/OpenPype/pull/1689) -- \#683 - Validate Frame Range in Standalone Publisher [\#1683](https://github.com/pypeclub/OpenPype/pull/1683) -- Hiero: old container versions identify with red color [\#1682](https://github.com/pypeclub/OpenPype/pull/1682) **🐛 Bug fixes** - Nuke: broken publishing rendered frames [\#1707](https://github.com/pypeclub/OpenPype/pull/1707) - Standalone publisher Thumbnail export args [\#1705](https://github.com/pypeclub/OpenPype/pull/1705) -- Bad zip can break OpenPype start [\#1691](https://github.com/pypeclub/OpenPype/pull/1691) -- Hiero: published whole edit mov [\#1687](https://github.com/pypeclub/OpenPype/pull/1687) -- Ftrack subprocess handle of stdout/stderr [\#1675](https://github.com/pypeclub/OpenPype/pull/1675) -- Settings list race condifiton and mutable dict list conversion [\#1671](https://github.com/pypeclub/OpenPype/pull/1671) - -**Merged pull requests:** - -- update dependencies [\#1697](https://github.com/pypeclub/OpenPype/pull/1697) -- Bump normalize-url from 4.5.0 to 4.5.1 in /website [\#1686](https://github.com/pypeclub/OpenPype/pull/1686) # Changelog diff --git a/openpype/hooks/pre_foundry_apps.py b/openpype/hooks/pre_foundry_apps.py new file mode 100644 index 0000000000..85f68c6b60 --- /dev/null +++ b/openpype/hooks/pre_foundry_apps.py @@ -0,0 +1,28 @@ +import subprocess +from openpype.lib import PreLaunchHook + + +class LaunchFoundryAppsWindows(PreLaunchHook): + """Foundry applications have specific way how to launch them. + + Nuke is executed "like" python process so it is required to pass + `CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console. + At the same time the newly created console won't create it's own stdout + and stderr handlers so they should not be redirected to DEVNULL. + """ + + # Should be as last hook because must change launch arguments to string + order = 1000 + app_groups = ["nuke", "nukex", "hiero", "nukestudio"] + platforms = ["windows"] + + def execute(self): + # Change `creationflags` to CREATE_NEW_CONSOLE + # - on Windows will nuke create new window using it's console + # Set `stdout` and `stderr` to None so new created console does not + # have redirected output to DEVNULL in build + self.launch_context.kwargs.update({ + "creationflags": subprocess.CREATE_NEW_CONSOLE, + "stdout": None, + "stderr": None + }) diff --git a/openpype/hooks/pre_non_python_host_launch.py b/openpype/hooks/pre_non_python_host_launch.py index 393a878f76..0447f4a06f 100644 --- a/openpype/hooks/pre_non_python_host_launch.py +++ b/openpype/hooks/pre_non_python_host_launch.py @@ -49,5 +49,7 @@ class NonPythonHostHook(PreLaunchHook): if remainders: self.launch_context.launch_args.extend(remainders) + # This must be set otherwise it wouldn't be possible to catch output + # when build OpenPype is used. self.launch_context.kwargs["stdout"] = subprocess.DEVNULL - self.launch_context.kwargs["stderr"] = subprocess.STDOUT + self.launch_context.kwargs["stderr"] = subprocess.DEVNULL diff --git a/openpype/hooks/pre_with_windows_shell.py b/openpype/hooks/pre_with_windows_shell.py deleted file mode 100644 index 441ab1a675..0000000000 --- a/openpype/hooks/pre_with_windows_shell.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import subprocess -from openpype.lib import PreLaunchHook - - -class LaunchWithWindowsShell(PreLaunchHook): - """Add shell command before executable. - - Some hosts have issues when are launched directly from python in that case - it is possible to prepend shell executable which will trigger process - instead. - """ - - # Should be as last hook because must change launch arguments to string - order = 1000 - app_groups = ["nuke", "nukex", "hiero", "nukestudio"] - platforms = ["windows"] - - def execute(self): - launch_args = self.launch_context.clear_launch_args( - self.launch_context.launch_args) - new_args = [ - # Get comspec which is cmd.exe in most cases. - os.environ.get("COMSPEC", "cmd.exe"), - # NOTE change to "/k" if want to keep console opened - "/c", - # Convert arguments to command line arguments (as string) - "\"{}\"".format( - subprocess.list2cmdline(launch_args) - ) - ] - # Convert list to string - # WARNING this only works if is used as string - args_string = " ".join(new_args) - self.log.info(( - "Modified launch arguments to be launched with shell \"{}\"." - ).format(args_string)) - - # Replace launch args with new one - self.launch_context.launch_args = args_string - # Change `creationflags` to CREATE_NEW_CONSOLE - self.launch_context.kwargs["creationflags"] = ( - subprocess.CREATE_NEW_CONSOLE - ) diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py new file mode 100644 index 0000000000..eff89adcb3 --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/publish/validate_instance_asset.py @@ -0,0 +1,61 @@ +from avalon import api +import pyblish.api +import openpype.api +from avalon import aftereffects + + +class ValidateInstanceAssetRepair(pyblish.api.Action): + """Repair the instance asset with value from Context.""" + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + stub = aftereffects.stub() + for instance in instances: + data = stub.read(instance[0]) + + data["asset"] = api.Session["AVALON_ASSET"] + stub.imprint(instance[0], data) + + +class ValidateInstanceAsset(pyblish.api.InstancePlugin): + """Validate the instance asset is the current selected context asset. + + As it might happen that multiple worfiles are opened at same time, + switching between them would mess with selected context. (From Launcher + or Ftrack). + + In that case outputs might be output under wrong asset! + + Repair action will use Context asset value (from Workfiles or Launcher) + Closing and reopening with Workfiles will refresh Context value. + """ + + label = "Validate Instance Asset" + hosts = ["aftereffects"] + actions = [ValidateInstanceAssetRepair] + order = openpype.api.ValidateContentsOrder + + def process(self, instance): + instance_asset = instance.data["asset"] + current_asset = api.Session["AVALON_ASSET"] + msg = ( + f"Instance asset {instance_asset} is not the same " + f"as current context {current_asset}. PLEASE DO:\n" + f"Repair with 'A' action to use '{current_asset}'.\n" + f"If that's not correct value, close workfile and " + f"reopen via Workfiles!" + ) + assert instance_asset == current_asset, msg diff --git a/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py b/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py index b9bed47fa5..56d5dfe901 100644 --- a/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py +++ b/openpype/hosts/maya/plugins/publish/extract_yeti_rig.py @@ -133,10 +133,10 @@ class ExtractYetiRig(openpype.api.Extractor): image_search_path = resources_dir = instance.data["resourcesDir"] settings = instance.data.get("rigsettings", None) - if settings: - settings["imageSearchPath"] = image_search_path - with open(settings_path, "w") as fp: - json.dump(settings, fp, ensure_ascii=False) + assert settings, "Yeti rig settings were not collected." + settings["imageSearchPath"] = image_search_path + with open(settings_path, "w") as fp: + json.dump(settings, fp, ensure_ascii=False) # add textures to transfers if 'transfers' not in instance.data: @@ -192,12 +192,12 @@ class ExtractYetiRig(openpype.api.Extractor): 'stagingDir': dirname } ) - self.log.info("settings file: {}".format(settings)) + self.log.info("settings file: {}".format(settings_path)) instance.data["representations"].append( { 'name': 'rigsettings', 'ext': 'rigsettings', - 'files': os.path.basename(settings), + 'files': os.path.basename(settings_path), 'stagingDir': dirname } ) diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index d7f3fdc6ba..eefbcc5d20 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -113,6 +113,14 @@ def check_inventory_versions(): "_id": io.ObjectId(avalon_knob_data["representation"]) }) + # Failsafe for not finding the representation. + if not representation: + log.warning( + "Could not find the representation on " + "node \"{}\"".format(node.name()) + ) + continue + # Get start frame from version data version = io.find_one({ "type": "version", @@ -391,13 +399,14 @@ def create_write_node(name, data, input=None, prenodes=None, if prenodes: for node in prenodes: # get attributes - name = node["name"] + pre_node_name = node["name"] klass = node["class"] knobs = node["knobs"] dependent = node["dependent"] # create node - now_node = nuke.createNode(klass, "name {}".format(name)) + now_node = nuke.createNode( + klass, "name {}".format(pre_node_name)) now_node.hideControlPanel() # add data to knob @@ -476,27 +485,27 @@ def create_write_node(name, data, input=None, prenodes=None, linked_knob_names.append("Render") - for name in linked_knob_names: - if "_grp-start_" in name: + for _k_name in linked_knob_names: + if "_grp-start_" in _k_name: knob = nuke.Tab_Knob( "rnd_attr", "Rendering attributes", nuke.TABBEGINCLOSEDGROUP) GN.addKnob(knob) - elif "_grp-end_" in name: + elif "_grp-end_" in _k_name: knob = nuke.Tab_Knob( "rnd_attr_end", "Rendering attributes", nuke.TABENDGROUP) GN.addKnob(knob) else: - if "___" in name: + if "___" in _k_name: # add devider GN.addKnob(nuke.Text_Knob("")) else: - # add linked knob by name + # add linked knob by _k_name link = nuke.Link_Knob("") - link.makeLink(write_node.name(), name) - link.setName(name) + link.makeLink(write_node.name(), _k_name) + link.setName(_k_name) # make render - if "Render" in name: + if "Render" in _k_name: link.setLabel("Render Local") link.setFlag(0x1000) GN.addKnob(link) diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index 6306767f37..8ba1b6b7c1 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -214,7 +214,7 @@ class LoadEffects(api.Loader): self.log.warning(e) continue - if isinstance(v, list) and len(v) > 3: + if isinstance(v, list) and len(v) > 4: node[k].setAnimated() for i, value in enumerate(v): if isinstance(value, list): diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index 6c71f2ae16..d0cab26842 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -217,7 +217,7 @@ class LoadEffectsInputProcess(api.Loader): self.log.warning(e) continue - if isinstance(v, list) and len(v) > 3: + if isinstance(v, list) and len(v) > 4: node[k].setAnimated() for i, value in enumerate(v): if isinstance(value, list): @@ -239,10 +239,10 @@ class LoadEffectsInputProcess(api.Loader): output = nuke.createNode("Output") output.setInput(0, pre_node) - # try to place it under Viewer1 - if not self.connect_active_viewer(GN): - nuke.delete(GN) - return + # # try to place it under Viewer1 + # if not self.connect_active_viewer(GN): + # nuke.delete(GN) + # return # get all versions in list versions = io.find({ @@ -298,7 +298,11 @@ class LoadEffectsInputProcess(api.Loader): viewer["input_process_node"].setValue(group_node_name) # put backdrop under - lib.create_backdrop(label="Input Process", layer=2, nodes=[viewer, group_node], color="0x7c7faaff") + lib.create_backdrop( + label="Input Process", + layer=2, + nodes=[viewer, group_node], + color="0x7c7faaff") return True diff --git a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py index a1de02f319..4dc1972074 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py @@ -1,5 +1,4 @@ -import os - +from avalon import api import pyblish.api import openpype.api from avalon import photoshop @@ -27,12 +26,20 @@ class ValidateInstanceAssetRepair(pyblish.api.Action): for instance in instances: data = stub.read(instance[0]) - data["asset"] = os.environ["AVALON_ASSET"] + data["asset"] = api.Session["AVALON_ASSET"] stub.imprint(instance[0], data) class ValidateInstanceAsset(pyblish.api.InstancePlugin): - """Validate the instance asset is the current asset.""" + """Validate the instance asset is the current selected context asset. + + As it might happen that multiple worfiles are opened, switching + between them would mess with selected context. + In that case outputs might be output under wrong asset! + + Repair action will use Context asset value (from Workfiles or Launcher) + Closing and reopening with Workfiles will refresh Context value. + """ label = "Validate Instance Asset" hosts = ["photoshop"] @@ -41,9 +48,12 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin): def process(self, instance): instance_asset = instance.data["asset"] - current_asset = os.environ["AVALON_ASSET"] + current_asset = api.Session["AVALON_ASSET"] msg = ( - "Instance asset is not the same as current asset:" - f"\nInstance: {instance_asset}\nCurrent: {current_asset}" + f"Instance asset {instance_asset} is not the same " + f"as current context {current_asset}. PLEASE DO:\n" + f"Repair with 'A' action to use '{current_asset}'.\n" + f"If that's not correct value, close workfile and " + f"reopen via Workfiles!" ) assert instance_asset == current_asset, msg diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py index fc9d95d3d7..0a1d29ccdc 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py @@ -2,7 +2,7 @@ Optional: presets -> extensions ( example of use: - [".mov", ".mp4"] + ["mov", "mp4"] ) presets -> source_dir ( example of use: @@ -11,6 +11,7 @@ Optional: "{root[work]}/{project[name]}/inputs" "./input" "../input" + "" ) """ @@ -48,7 +49,7 @@ class CollectEditorial(pyblish.api.InstancePlugin): actions = [] # presets - extensions = [".mov", ".mp4"] + extensions = ["mov", "mp4"] source_dir = None def process(self, instance): @@ -72,7 +73,7 @@ class CollectEditorial(pyblish.api.InstancePlugin): video_path = None basename = os.path.splitext(os.path.basename(file_path))[0] - if self.source_dir: + if self.source_dir != "": source_dir = self.source_dir.replace("\\", "/") if ("./" in source_dir) or ("../" in source_dir): # get current working dir @@ -98,7 +99,7 @@ class CollectEditorial(pyblish.api.InstancePlugin): if os.path.splitext(f)[0] not in basename: continue # filter out by respected extensions - if os.path.splitext(f)[1] not in self.extensions: + if os.path.splitext(f)[1][1:] not in self.extensions: continue video_path = os.path.join( staging_dir, f diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py index 3474cbcdde..60a8cf48fc 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py @@ -17,16 +17,12 @@ class CollectInstances(pyblish.api.InstancePlugin): "referenceMain": { "family": "review", "families": ["clip"], - "extensions": [".mp4"] + "extensions": ["mp4"] }, "audioMain": { "family": "audio", "families": ["clip"], - "extensions": [".wav"], - }, - "shotMain": { - "family": "shot", - "families": [] + "extensions": ["wav"], } } timeline_frame_start = 900000 # starndard edl default (10:00:00:00) @@ -178,7 +174,17 @@ class CollectInstances(pyblish.api.InstancePlugin): data_key: instance.data.get(data_key)}) # adding subsets to context as instances + self.subsets.update({ + "shotMain": { + "family": "shot", + "families": [] + } + }) for subset, properities in self.subsets.items(): + version = properities.get("version") + if version and version == 0: + properities.pop("version") + # adding Review-able instance subset_instance_data = instance_data.copy() subset_instance_data.update(properities) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py index e262009637..ffa24cfd93 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py @@ -177,19 +177,23 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): collection_head_name = None # loop trough collections and create representations for _collection in collections: - ext = _collection.tail + ext = _collection.tail[1:] collection_head_name = _collection.head frame_start = list(_collection.indexes)[0] frame_end = list(_collection.indexes)[-1] repre_data = { "frameStart": frame_start, "frameEnd": frame_end, - "name": ext[1:], - "ext": ext[1:], + "name": ext, + "ext": ext, "files": [item for item in _collection], "stagingDir": staging_dir } + if instance_data.get("keepSequence"): + repre_data_keep = deepcopy(repre_data) + instance_data["representations"].append(repre_data_keep) + if "review" in instance_data["families"]: repre_data.update({ "thumbnail": True, @@ -208,20 +212,20 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): # loop trough reminders and create representations for _reminding_file in remainder: - ext = os.path.splitext(_reminding_file)[-1] + ext = os.path.splitext(_reminding_file)[-1][1:] if ext not in instance_data["extensions"]: continue if collection_head_name and ( - (collection_head_name + ext[1:]) not in _reminding_file - ) and (ext in [".mp4", ".mov"]): + (collection_head_name + ext) not in _reminding_file + ) and (ext in ["mp4", "mov"]): self.log.info(f"Skipping file: {_reminding_file}") continue frame_start = 1 frame_end = 1 repre_data = { - "name": ext[1:], - "ext": ext[1:], + "name": ext, + "ext": ext, "files": _reminding_file, "stagingDir": staging_dir } diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py index be36f30f4b..ba2aed4bfc 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py @@ -131,20 +131,21 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): tasks_to_add = dict() project_tasks = io.find_one({"type": "project"})["config"]["tasks"] for task_name, task_data in self.shot_add_tasks.items(): - try: - if task_data["type"] in project_tasks.keys(): - tasks_to_add.update({task_name: task_data}) - else: - raise KeyError( - "Wrong FtrackTaskType `{}` for `{}` is not" - " existing in `{}``".format( - task_data["type"], - task_name, - list(project_tasks.keys()))) - except KeyError as error: + _task_data = deepcopy(task_data) + + # fixing enumerator from settings + _task_data["type"] = task_data["type"][0] + + # check if task type in project task types + if _task_data["type"] in project_tasks.keys(): + tasks_to_add.update({task_name: _task_data}) + else: raise KeyError( - "Wrong presets: `{0}`".format(error) - ) + "Wrong FtrackTaskType `{}` for `{}` is not" + " existing in `{}``".format( + _task_data["type"], + task_name, + list(project_tasks.keys()))) instance.data["tasks"] = tasks_to_add else: diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py new file mode 100644 index 0000000000..d70a0a75b8 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_texture.py @@ -0,0 +1,456 @@ +import os +import re +import pyblish.api +import json + +from avalon.api import format_template_with_optional_keys + +from openpype.lib import prepare_template_data + + +class CollectTextures(pyblish.api.ContextPlugin): + """Collect workfile (and its resource_files) and textures. + + Currently implements use case with Mari and Substance Painter, where + one workfile is main (.mra - Mari) with possible additional workfiles + (.spp - Substance) + + + Provides: + 1 instance per workfile (with 'resources' filled if needed) + (workfile family) + 1 instance per group of textures + (textures family) + """ + + order = pyblish.api.CollectorOrder + label = "Collect Textures" + hosts = ["standalonepublisher"] + families = ["texture_batch"] + actions = [] + + # from presets + main_workfile_extensions = ['mra'] + other_workfile_extensions = ['spp', 'psd'] + texture_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", + "gif", "svg"] + + # additional families (ftrack etc.) + workfile_families = [] + textures_families = [] + + color_space = ["linsRGB", "raw", "acesg"] + + # currently implemented placeholders ["color_space"] + # describing patterns in file names splitted by regex groups + input_naming_patterns = { + # workfile: corridorMain_v001.mra > + # texture: corridorMain_aluminiumID_v001_baseColor_linsRGB_1001.exr + "workfile": r'^([^.]+)(_[^_.]*)?_v([0-9]{3,}).+', + "textures": r'^([^_.]+)_([^_.]+)_v([0-9]{3,})_([^_.]+)_({color_space})_(1[0-9]{3}).+', # noqa + } + # matching regex group position to 'input_naming_patterns' + input_naming_groups = { + "workfile": ('asset', 'filler', 'version'), + "textures": ('asset', 'shader', 'version', 'channel', 'color_space', + 'udim') + } + + workfile_subset_template = "textures{Subset}Workfile" + # implemented keys: ["color_space", "channel", "subset", "shader"] + texture_subset_template = "textures{Subset}_{Shader}_{Channel}" + + def process(self, context): + self.context = context + + resource_files = {} + workfile_files = {} + representations = {} + version_data = {} + asset_builds = set() + asset = None + for instance in context: + if not self.input_naming_patterns: + raise ValueError("Naming patterns are not configured. \n" + "Ask admin to provide naming conventions " + "for workfiles and textures.") + + if not asset: + asset = instance.data["asset"] # selected from SP + + parsed_subset = instance.data["subset"].replace( + instance.data["family"], '') + + fill_pairs = { + "subset": parsed_subset + } + + fill_pairs = prepare_template_data(fill_pairs) + workfile_subset = format_template_with_optional_keys( + fill_pairs, self.workfile_subset_template) + + processed_instance = False + for repre in instance.data["representations"]: + ext = repre["ext"].replace('.', '') + asset_build = version = None + + if isinstance(repre["files"], list): + repre_file = repre["files"][0] + else: + repre_file = repre["files"] + + if ext in self.main_workfile_extensions or \ + ext in self.other_workfile_extensions: + + asset_build = self._get_asset_build( + repre_file, + self.input_naming_patterns["workfile"], + self.input_naming_groups["workfile"], + self.color_space + ) + version = self._get_version( + repre_file, + self.input_naming_patterns["workfile"], + self.input_naming_groups["workfile"], + self.color_space + ) + asset_builds.add((asset_build, version, + workfile_subset, 'workfile')) + processed_instance = True + + if not representations.get(workfile_subset): + representations[workfile_subset] = [] + + if ext in self.main_workfile_extensions: + # workfiles can have only single representation + # currently OP is not supporting different extensions in + # representation files + representations[workfile_subset] = [repre] + + workfile_files[asset_build] = repre_file + + if ext in self.other_workfile_extensions: + # add only if not added already from main + if not representations.get(workfile_subset): + representations[workfile_subset] = [repre] + + # only overwrite if not present + if not workfile_files.get(asset_build): + workfile_files[asset_build] = repre_file + + if not resource_files.get(workfile_subset): + resource_files[workfile_subset] = [] + item = { + "files": [os.path.join(repre["stagingDir"], + repre["files"])], + "source": "standalone publisher" + } + resource_files[workfile_subset].append(item) + + if ext in self.texture_extensions: + c_space = self._get_color_space( + repre_file, + self.color_space + ) + + channel = self._get_channel_name( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + + shader = self._get_shader_name( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + + formatting_data = { + "color_space": c_space or '', # None throws exception + "channel": channel or '', + "shader": shader or '', + "subset": parsed_subset or '' + } + + fill_pairs = prepare_template_data(formatting_data) + subset = format_template_with_optional_keys( + fill_pairs, self.texture_subset_template) + + asset_build = self._get_asset_build( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + version = self._get_version( + repre_file, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space + ) + if not representations.get(subset): + representations[subset] = [] + representations[subset].append(repre) + + ver_data = { + "color_space": c_space or '', + "channel_name": channel or '', + "shader_name": shader or '' + } + version_data[subset] = ver_data + + asset_builds.add( + (asset_build, version, subset, "textures")) + processed_instance = True + + if processed_instance: + self.context.remove(instance) + + self._create_new_instances(context, + asset, + asset_builds, + resource_files, + representations, + version_data, + workfile_files) + + def _create_new_instances(self, context, asset, asset_builds, + resource_files, representations, + version_data, workfile_files): + """Prepare new instances from collected data. + + Args: + context (ContextPlugin) + asset (string): selected asset from SP + asset_builds (set) of tuples + (asset_build, version, subset, family) + resource_files (list) of resource dicts - to store additional + files to main workfile + representations (list) of dicts - to store workfile info OR + all collected texture files, key is asset_build + version_data (dict) - prepared to store into version doc in DB + workfile_files (dict) - to store workfile to add to textures + key is asset_build + """ + # sort workfile first + asset_builds = sorted(asset_builds, + key=lambda tup: tup[3], reverse=True) + + # workfile must have version, textures might + main_version = None + for asset_build, version, subset, family in asset_builds: + if not main_version: + main_version = version + new_instance = context.create_instance(subset) + new_instance.data.update( + { + "subset": subset, + "asset": asset, + "label": subset, + "name": subset, + "family": family, + "version": int(version or main_version or 1), + "asset_build": asset_build # remove in validator + } + ) + + workfile = workfile_files.get(asset_build) + + if resource_files.get(subset): + # add resources only when workfile is main style + for ext in self.main_workfile_extensions: + if ext in workfile: + new_instance.data.update({ + "resources": resource_files.get(subset) + }) + break + + # store origin + if family == 'workfile': + families = self.workfile_families + + new_instance.data["source"] = "standalone publisher" + else: + families = self.textures_families + + repre = representations.get(subset)[0] + new_instance.context.data["currentFile"] = os.path.join( + repre["stagingDir"], workfile or 'dummy.txt') + + new_instance.data["families"] = families + + # add data for version document + ver_data = version_data.get(subset) + if ver_data: + if workfile: + ver_data['workfile'] = workfile + + new_instance.data.update( + {"versionData": ver_data} + ) + + upd_representations = representations.get(subset) + if upd_representations and family != 'workfile': + upd_representations = self._update_representations( + upd_representations) + + new_instance.data["representations"] = upd_representations + + self.log.debug("new instance - {}:: {}".format( + family, + json.dumps(new_instance.data, indent=4))) + + def _get_asset_build(self, name, + input_naming_patterns, input_naming_groups, + color_spaces): + """Loops through configured workfile patterns to find asset name. + + Asset name used to bind workfile and its textures. + + Args: + name (str): workfile name + input_naming_patterns (list): + [workfile_pattern] or [texture_pattern] + input_naming_groups (list) + ordinal position of regex groups matching to input_naming.. + color_spaces (list) - predefined color spaces + """ + asset_name = "NOT_AVAIL" + + return self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'asset') or asset_name + + def _get_version(self, name, input_naming_patterns, input_naming_groups, + color_spaces): + found = self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'version') + + if found: + return found.replace('v', '') + + self.log.info("No version found in the name {}".format(name)) + + def _get_udim(self, name, input_naming_patterns, input_naming_groups, + color_spaces): + """Parses from 'name' udim value.""" + found = self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'udim') + if found: + return found + + self.log.warning("Didn't find UDIM in {}".format(name)) + + def _get_color_space(self, name, color_spaces): + """Looks for color_space from a list in a file name. + + Color space seems not to be recognizable by regex pattern, set of + known space spaces must be provided. + """ + color_space = None + found = [cs for cs in color_spaces if + re.search("_{}_".format(cs), name)] + + if not found: + self.log.warning("No color space found in {}".format(name)) + else: + if len(found) > 1: + msg = "Multiple color spaces found in {}->{}".format(name, + found) + self.log.warning(msg) + + color_space = found[0] + + return color_space + + def _get_shader_name(self, name, input_naming_patterns, + input_naming_groups, color_spaces): + """Return parsed shader name. + + Shader name is needed for overlapping udims (eg. udims might be + used for different materials, shader needed to not overwrite). + + Unknown format of channel name and color spaces >> cs are known + list - 'color_space' used as a placeholder + """ + found = self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'shader') + if found: + return found + + self.log.warning("Didn't find shader in {}".format(name)) + + def _get_channel_name(self, name, input_naming_patterns, + input_naming_groups, color_spaces): + """Return parsed channel name. + + Unknown format of channel name and color spaces >> cs are known + list - 'color_space' used as a placeholder + """ + found = self._parse(name, input_naming_patterns, input_naming_groups, + color_spaces, 'channel') + if found: + return found + + self.log.warning("Didn't find channel in {}".format(name)) + + def _parse(self, name, input_naming_patterns, input_naming_groups, + color_spaces, key): + """Universal way to parse 'name' with configurable regex groups. + + Args: + name (str): workfile name + input_naming_patterns (list): + [workfile_pattern] or [texture_pattern] + input_naming_groups (list) + ordinal position of regex groups matching to input_naming.. + color_spaces (list) - predefined color spaces + + Raises: + ValueError - if broken 'input_naming_groups' + """ + for input_pattern in input_naming_patterns: + for cs in color_spaces: + pattern = input_pattern.replace('{color_space}', cs) + regex_result = re.findall(pattern, name) + if regex_result: + idx = list(input_naming_groups).index(key) + if idx < 0: + msg = "input_naming_groups must " +\ + "have '{}' key".format(key) + raise ValueError(msg) + + try: + parsed_value = regex_result[0][idx] + return parsed_value + except IndexError: + self.log.warning("Wrong index, probably " + "wrong name {}".format(name)) + + def _update_representations(self, upd_representations): + """Frames dont have sense for textures, add collected udims instead.""" + udims = [] + for repre in upd_representations: + repre.pop("frameStart", None) + repre.pop("frameEnd", None) + repre.pop("fps", None) + + # ignore unique name from SP, use extension instead + # SP enforces unique name, here different subsets >> unique repres + repre["name"] = repre["ext"].replace('.', '') + + files = repre.get("files", []) + if not isinstance(files, list): + files = [files] + + for file_name in files: + udim = self._get_udim(file_name, + self.input_naming_patterns["textures"], + self.input_naming_groups["textures"], + self.color_space) + udims.append(udim) + + repre["udim"] = udims # must be this way, used for filling path + + return upd_representations diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py new file mode 100644 index 0000000000..1183180833 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_resources.py @@ -0,0 +1,42 @@ +import os +import pyblish.api + + +class ExtractResources(pyblish.api.InstancePlugin): + """ + Extracts files from instance.data["resources"]. + + These files are additional (textures etc.), currently not stored in + representations! + + Expects collected 'resourcesDir'. (list of dicts with 'files' key and + list of source urls) + + Provides filled 'transfers' (list of tuples (source_url, target_url)) + """ + + label = "Extract Resources SP" + hosts = ["standalonepublisher"] + order = pyblish.api.ExtractorOrder + + families = ["workfile"] + + def process(self, instance): + if not instance.data.get("resources"): + self.log.info("No resources") + return + + if not instance.data.get("transfers"): + instance.data["transfers"] = [] + + publish_dir = instance.data["resourcesDir"] + + transfers = [] + for resource in instance.data["resources"]: + for file_url in resource.get("files", []): + file_name = os.path.basename(file_url) + dest_url = os.path.join(publish_dir, file_name) + transfers.append((file_url, dest_url)) + + self.log.info("transfers:: {}".format(transfers)) + instance.data["transfers"].extend(transfers) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py new file mode 100644 index 0000000000..18bf0394ae --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py @@ -0,0 +1,43 @@ +import os +import pyblish.api + + +class ExtractWorkfileUrl(pyblish.api.ContextPlugin): + """ + Modifies 'workfile' field to contain link to published workfile. + + Expects that batch contains only single workfile and matching + (multiple) textures. + """ + + label = "Extract Workfile Url SP" + hosts = ["standalonepublisher"] + order = pyblish.api.ExtractorOrder + + families = ["textures"] + + def process(self, context): + filepath = None + + # first loop for workfile + for instance in context: + if instance.data["family"] == 'workfile': + anatomy = context.data['anatomy'] + template_data = instance.data.get("anatomyData") + rep_name = instance.data.get("representations")[0].get("name") + template_data["representation"] = rep_name + template_data["ext"] = rep_name + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + filepath = os.path.normpath(template_filled) + self.log.info("Using published scene for render {}".format( + filepath)) + + if not filepath: + self.log.info("Texture batch doesn't contain workfile.") + return + + # then apply to all textures + for instance in context: + if instance.data["family"] == 'textures': + instance.data["versionData"]["workfile"] = filepath diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py new file mode 100644 index 0000000000..af200b59e0 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py @@ -0,0 +1,22 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureBatch(pyblish.api.InstancePlugin): + """Validates that some texture files are present.""" + + label = "Validate Texture Presence" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["workfile"] + optional = False + + def process(self, instance): + present = False + for instance in instance.context: + if instance.data["family"] == "textures": + self.log.info("Some textures present.") + + return + + assert present, "No textures found in published batch!" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py new file mode 100644 index 0000000000..7cd540668c --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py @@ -0,0 +1,20 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): + """Validates that textures have appropriate workfile attached. + + Workfile is optional, disable this Validator after Refresh if you are + sure it is not needed. + """ + label = "Validate Texture Has Workfile" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["textures"] + optional = True + + def process(self, instance): + wfile = instance.data["versionData"].get("workfile") + + assert wfile, "Textures are missing attached workfile" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py new file mode 100644 index 0000000000..92f930c3fc --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py @@ -0,0 +1,50 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): + """Validates that all instances had properly formatted name.""" + + label = "Validate Texture Batch Naming" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["workfile", "textures"] + optional = False + + def process(self, instance): + file_name = instance.data["representations"][0]["files"] + if isinstance(file_name, list): + file_name = file_name[0] + + msg = "Couldnt find asset name in '{}'\n".format(file_name) + \ + "File name doesn't follow configured pattern.\n" + \ + "Please rename the file." + assert "NOT_AVAIL" not in instance.data["asset_build"], msg + + instance.data.pop("asset_build") + + if instance.data["family"] == "textures": + file_name = instance.data["representations"][0]["files"][0] + self._check_proper_collected(instance.data["versionData"], + file_name) + + def _check_proper_collected(self, versionData, file_name): + """ + Loop through collected versionData to check if name parsing was OK. + Args: + versionData: (dict) + + Returns: + raises AssertionException + """ + missing_key_values = [] + for key, value in versionData.items(): + if not value: + missing_key_values.append(key) + + msg = "Collected data {} doesn't contain values for {}".format( + versionData, missing_key_values) + "\n" + \ + "Name of the texture file doesn't match expected pattern.\n" + \ + "Please rename file(s) {}".format(file_name) + + assert not missing_key_values, msg diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py new file mode 100644 index 0000000000..90d0e8e512 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py @@ -0,0 +1,38 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): + """Validates that versions match in workfile and textures. + + Workfile is optional, so if you are sure, you can disable this + validator after Refresh. + + Validates that only single version is published at a time. + """ + label = "Validate Texture Batch Versions" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["textures"] + optional = False + + def process(self, instance): + wfile = instance.data["versionData"].get("workfile") + + version_str = "v{:03d}".format(instance.data["version"]) + + if not wfile: # no matching workfile, do not check versions + self.log.info("No workfile present for textures") + return + + msg = "Not matching version: texture v{:03d} - workfile {}" + assert version_str in wfile, \ + msg.format( + instance.data["version"], wfile + ) + + present_versions = set() + for instance in instance.context: + present_versions.add(instance.data["version"]) + + assert len(present_versions) == 1, "Too many versions in a batch!" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py new file mode 100644 index 0000000000..aa3aad71db --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py @@ -0,0 +1,29 @@ +import pyblish.api +import openpype.api + + +class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): + """Validates that textures workfile has collected resources (optional). + + Collected recourses means secondary workfiles (in most cases). + """ + + label = "Validate Texture Workfile Has Resources" + hosts = ["standalonepublisher"] + order = openpype.api.ValidateContentsOrder + families = ["workfile"] + optional = True + + # from presets + main_workfile_extensions = ['mra'] + + def process(self, instance): + if instance.data["family"] == "workfile": + ext = instance.data["representations"][0]["ext"] + if ext not in self.main_workfile_extensions: + self.log.warning("Only secondary workfile present!") + return + + msg = "No secondary workfiles present for workfile {}".\ + format(instance.data["name"]) + assert instance.data.get("resources"), msg diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index d8bb03f541..79cc01740a 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -155,6 +155,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): "sceneMarkInState": mark_in_state == "set", "sceneMarkOut": int(mark_out_frame), "sceneMarkOutState": mark_out_state == "set", + "sceneStartFrame": int(lib.execute_george("tv_startframe")), "sceneBgColor": self._get_bg_color() } self.log.debug( diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 536df2adb0..1df7512588 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -49,6 +49,14 @@ class ExtractSequence(pyblish.api.Extractor): family_lowered = instance.data["family"].lower() mark_in = instance.context.data["sceneMarkIn"] mark_out = instance.context.data["sceneMarkOut"] + + # Scene start frame offsets the output files, so we need to offset the + # marks. + scene_start_frame = instance.context.data["sceneStartFrame"] + difference = scene_start_frame - mark_in + mark_in += difference + mark_out += difference + # Frame start/end may be stored as float frame_start = int(instance.data["frameStart"]) frame_end = int(instance.data["frameEnd"]) @@ -98,7 +106,7 @@ class ExtractSequence(pyblish.api.Extractor): self.log.warning(( "Lowering representation range to {} frames." " Changed frame end {} -> {}" - ).format(output_range + 1, mark_out, new_mark_out)) + ).format(output_range + 1, mark_out, new_output_frame_end)) output_frame_end = new_output_frame_end # ------------------------------------------------------------------- diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py new file mode 100644 index 0000000000..d769d47736 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -0,0 +1,27 @@ +import pyblish.api +from avalon.tvpaint import lib + + +class RepairStartFrame(pyblish.api.Action): + """Repair start frame.""" + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + lib.execute_george("tv_startframe 0") + + +class ValidateStartFrame(pyblish.api.ContextPlugin): + """Validate start frame being at frame 0.""" + + label = "Validate Start Frame" + order = pyblish.api.ValidatorOrder + hosts = ["tvpaint"] + actions = [RepairStartFrame] + optional = True + + def process(self, context): + start_frame = lib.execute_george("tv_startframe") + assert int(start_frame) == 0, "Start frame has to be frame 0." diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index fb86d06150..e1b304a351 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -1,4 +1,5 @@ import os +import sys import re import copy import json @@ -708,6 +709,10 @@ class ApplicationLaunchContext: ) self.kwargs["creationflags"] = flags + if not sys.stdout: + self.kwargs["stdout"] = subprocess.DEVNULL + self.kwargs["stderr"] = subprocess.DEVNULL + self.prelaunch_hooks = None self.postlaunch_hooks = None diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 12d687bbf2..3a96ae3311 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -1,6 +1,8 @@ import json +from avalon.api import AvalonMongoDB from openpype.api import ProjectSettings +from openpype.lib import create_project from openpype.modules.ftrack.lib import ( ServerAction, @@ -21,8 +23,24 @@ class PrepareProjectServer(ServerAction): role_list = ["Pypeclub", "Administrator", "Project Manager"] - # Key to store info about trigerring create folder structure + settings_key = "prepare_project" + item_splitter = {"type": "label", "value": "---"} + _keys_order = ( + "fps", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + "clipIn", + "clipOut", + "resolutionHeight", + "resolutionWidth", + "pixelAspect", + "applications", + "tools_env", + "library_project", + ) def discover(self, session, entities, event): """Show only on project.""" @@ -47,13 +65,7 @@ class PrepareProjectServer(ServerAction): project_entity = entities[0] project_name = project_entity["full_name"] - try: - project_settings = ProjectSettings(project_name) - except ValueError: - return { - "message": "Project is not synchronized yet", - "success": False - } + project_settings = ProjectSettings(project_name) project_anatom_settings = project_settings["project_anatomy"] root_items = self.prepare_root_items(project_anatom_settings) @@ -78,14 +90,13 @@ class PrepareProjectServer(ServerAction): items.extend(ca_items) - # This item will be last (before enumerators) - # - sets value of auto synchronization - auto_sync_name = "avalon_auto_sync" + # This item will be last before enumerators + # Set value of auto synchronization auto_sync_value = project_entity["custom_attributes"].get( CUST_ATTR_AUTO_SYNC, False ) auto_sync_item = { - "name": auto_sync_name, + "name": CUST_ATTR_AUTO_SYNC, "type": "boolean", "value": auto_sync_value, "label": "AutoSync to Avalon" @@ -199,7 +210,18 @@ class PrepareProjectServer(ServerAction): str([key for key in attributes_to_set]) )) - for key, in_data in attributes_to_set.items(): + attribute_keys = set(attributes_to_set.keys()) + keys_order = [] + for key in self._keys_order: + if key in attribute_keys: + keys_order.append(key) + + attribute_keys = attribute_keys - set(keys_order) + for key in sorted(attribute_keys): + keys_order.append(key) + + for key in keys_order: + in_data = attributes_to_set[key] attr = in_data["object"] # initial item definition @@ -225,7 +247,7 @@ class PrepareProjectServer(ServerAction): multiselect_enumerators.append(self.item_splitter) multiselect_enumerators.append({ "type": "label", - "value": in_data["label"] + "value": "