diff --git a/pype/hosts/harmony/__init__.py b/pype/hosts/harmony/__init__.py index 3cae695852..d4b7d91fdb 100644 --- a/pype/hosts/harmony/__init__.py +++ b/pype/hosts/harmony/__init__.py @@ -151,27 +151,31 @@ def application_launch(): def export_template(backdrops, nodes, filepath): func = """function func(args) { - // Add an extra node just so a new group can be created. + var temp_node = node.add("Top", "temp_note", "NOTE", 0, 0, 0); var template_group = node.createGroup(temp_node, "temp_group"); node.deleteNode( template_group + "/temp_note" ); - // This will make Node View to focus on the new group. + selection.clearSelection(); + for (var f = 0; f < args[1].length; f++) + { + selection.addNodeToSelection(args[1][f]); + } + + Action.perform("copy()", "Node View"); + selection.clearSelection(); selection.addNodeToSelection(template_group); Action.perform("onActionEnterGroup()", "Node View"); + Action.perform("paste()", "Node View"); // Recreate backdrops in group. for (var i = 0 ; i < args[0].length; i++) { + MessageLog.trace(args[0][i]); Backdrop.addBackdrop(template_group, args[0][i]); }; - // Copy-paste the selected nodes into the new group. - var drag_object = copyPaste.copy(args[1], 1, frame.numberOf, ""); - copyPaste.pasteNewNodes(drag_object, template_group, ""); - - // Select all nodes within group and export as template. Action.perform( "selectAll()", "Node View" ); copyPaste.createTemplateFromSelection(args[2], args[3]); diff --git a/pype/modules/ftrack/actions/action_delivery.py b/pype/modules/ftrack/actions/action_delivery.py index a2048222e5..7dbb7c65e8 100644 --- a/pype/modules/ftrack/actions/action_delivery.py +++ b/pype/modules/ftrack/actions/action_delivery.py @@ -81,13 +81,15 @@ class Delivery(BaseAction): anatomy = Anatomy(project_name) new_anatomies = [] first = None - for key in (anatomy.templates.get("delivery") or {}): - new_anatomies.append({ - "label": key, - "value": key - }) - if first is None: - first = key + for key, template in (anatomy.templates.get("delivery") or {}).items(): + # Use only keys with `{root}` or `{root[*]}` in value + if isinstance(template, str) and "{root" in template: + new_anatomies.append({ + "label": key, + "value": key + }) + if first is None: + first = key skipped = False # Add message if there are any common components @@ -226,12 +228,7 @@ class Delivery(BaseAction): if location_path: location_path = os.path.normpath(location_path) if not os.path.exists(location_path): - return { - "success": False, - "message": ( - "Entered location path does not exists. \"{}\"" - ).format(location_path) - } + os.makedirs(location_path) self.db_con.install() self.db_con.Session["AVALON_PROJECT"] = project_name @@ -293,6 +290,20 @@ class Delivery(BaseAction): repres_to_deliver.append(repre) anatomy = Anatomy(project_name) + + format_dict = {} + if location_path: + location_path = location_path.replace("\\", "/") + root_names = anatomy.root_names_from_templates( + anatomy.templates["delivery"] + ) + if root_names is None: + format_dict["root"] = location_path + else: + format_dict["root"] = {} + for name in root_names: + format_dict["root"][name] = location_path + for repre in repres_to_deliver: # Get destination repre path anatomy_data = copy.deepcopy(repre["context"]) @@ -339,25 +350,33 @@ class Delivery(BaseAction): repre_path = self.path_from_represenation(repre, anatomy) # TODO add backup solution where root of path from component # is repalced with root - if not frame: - self.process_single_file( - repre_path, anatomy, anatomy_name, anatomy_data - ) + args = ( + repre_path, + anatomy, + anatomy_name, + anatomy_data, + format_dict + ) + if not frame: + self.process_single_file(*args) else: - self.process_sequence( - repre_path, anatomy, anatomy_name, anatomy_data - ) + self.process_sequence(*args) self.db_con.uninstall() return self.report() def process_single_file( - self, repre_path, anatomy, anatomy_name, anatomy_data + self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict ): anatomy_filled = anatomy.format(anatomy_data) - delivery_path = anatomy_filled["delivery"][anatomy_name] + if format_dict: + template_result = anatomy_filled["delivery"][anatomy_name] + delivery_path = template_result.rootless.format(**format_dict) + else: + delivery_path = anatomy_filled["delivery"][anatomy_name] + delivery_folder = os.path.dirname(delivery_path) if not os.path.exists(delivery_folder): os.makedirs(delivery_folder) @@ -365,7 +384,7 @@ class Delivery(BaseAction): self.copy_file(repre_path, delivery_path) def process_sequence( - self, repre_path, anatomy, anatomy_name, anatomy_data + self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict ): dir_path, file_name = os.path.split(str(repre_path)) @@ -408,8 +427,12 @@ class Delivery(BaseAction): anatomy_data["frame"] = frame_indicator anatomy_filled = anatomy.format(anatomy_data) - delivery_path = anatomy_filled["delivery"][anatomy_name] - print(delivery_path) + if format_dict: + template_result = anatomy_filled["delivery"][anatomy_name] + delivery_path = template_result.rootless.format(**format_dict) + else: + delivery_path = anatomy_filled["delivery"][anatomy_name] + delivery_folder = os.path.dirname(delivery_path) dst_head, dst_tail = delivery_path.split(frame_indicator) dst_padding = src_collection.padding diff --git a/pype/modules/ftrack/lib/avalon_sync.py b/pype/modules/ftrack/lib/avalon_sync.py index c5c9eb9054..885b9d25cc 100644 --- a/pype/modules/ftrack/lib/avalon_sync.py +++ b/pype/modules/ftrack/lib/avalon_sync.py @@ -70,7 +70,7 @@ def get_avalon_attr(session, split_hierarchical=True): cust_attrs_query = ( "select id, entity_type, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" - " where group.name = \"avalon\"" + " where group.name in (\"avalon\", \"pype\")" ) all_avalon_attr = session.query(cust_attrs_query).all() for cust_attr in all_avalon_attr: diff --git a/pype/modules/standalonepublish/publish.py b/pype/modules/standalonepublish/publish.py index dd65030f7a..0a30d5f2cb 100644 --- a/pype/modules/standalonepublish/publish.py +++ b/pype/modules/standalonepublish/publish.py @@ -5,14 +5,14 @@ import tempfile import random import string -from avalon import io, api -from avalon.tools import publish as av_publish - +from avalon import io import pype -from pype.api import execute +from pype.api import execute, Logger import pyblish.api -from . import PUBLISH_PATHS + + +log = Logger().get_logger("standalonepublisher") def set_context(project, asset, task, app): @@ -61,105 +61,71 @@ def set_context(project, asset, task, app): def publish(data, gui=True): # cli pyblish seems like better solution return cli_publish(data, gui) - # # this uses avalon pyblish launch tool - # avalon_api_publish(data, gui) - - -def avalon_api_publish(data, gui=True): - ''' Launches Pyblish (GUI by default) - :param data: Should include data for pyblish and standalone collector - :type data: dict - :param gui: Pyblish will be launched in GUI mode if set to True - :type gui: bool - ''' - io.install() - - # Create hash name folder in temp - chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) - staging_dir = tempfile.mkdtemp(chars) - - # create also json and fill with data - json_data_path = staging_dir + os.path.basename(staging_dir) + '.json' - with open(json_data_path, 'w') as outfile: - json.dump(data, outfile) - - args = [ - "-pp", os.pathsep.join(pyblish.api.registered_paths()) - ] - - envcopy = os.environ.copy() - envcopy["PYBLISH_HOSTS"] = "standalonepublisher" - envcopy["SAPUBLISH_INPATH"] = json_data_path - - if gui: - av_publish.show() - else: - returncode = execute([ - sys.executable, "-u", "-m", "pyblish" - ] + args, env=envcopy) - - io.uninstall() def cli_publish(data, gui=True): + from . import PUBLISH_PATHS + + PUBLISH_SCRIPT_PATH = os.path.join(os.path.dirname(__file__), "publish.py") io.install() - pyblish.api.deregister_all_plugins() - # Registers Global pyblish plugins - pype.install() - # Registers Standalone pyblish plugins - for path in PUBLISH_PATHS: - pyblish.api.register_plugin_path(path) - - project_plugins_paths = os.environ.get("PYPE_PROJECT_PLUGINS") - project_name = os.environ["AVALON_PROJECT"] - if project_plugins_paths and project_name: - for path in project_plugins_paths.split(os.pathsep): - if not path: - continue - plugin_path = os.path.join(path, project_name, "plugins") - if os.path.exists(plugin_path): - pyblish.api.register_plugin_path(plugin_path) - api.register_plugin_path(api.Loader, plugin_path) - api.register_plugin_path(api.Creator, plugin_path) - # Create hash name folder in temp chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) staging_dir = tempfile.mkdtemp(chars) - # create json for return data - return_data_path = ( - staging_dir + os.path.basename(staging_dir) + 'return.json' - ) # create also json and fill with data json_data_path = staging_dir + os.path.basename(staging_dir) + '.json' with open(json_data_path, 'w') as outfile: json.dump(data, outfile) - args = [ - "-pp", os.pathsep.join(pyblish.api.registered_paths()) - ] - - if gui: - args += ["gui"] - envcopy = os.environ.copy() envcopy["PYBLISH_HOSTS"] = "standalonepublisher" envcopy["SAPUBLISH_INPATH"] = json_data_path - envcopy["SAPUBLISH_OUTPATH"] = return_data_path - envcopy["PYBLISH_GUI"] = "pyblish_pype" + envcopy["PYBLISHGUI"] = "pyblish_pype" + envcopy["PUBLISH_PATHS"] = os.pathsep.join(PUBLISH_PATHS) - returncode = execute([ - sys.executable, "-u", "-m", "pyblish" - ] + args, env=envcopy) + result = execute( + [sys.executable, PUBLISH_SCRIPT_PATH], + env=envcopy + ) result = {} if os.path.exists(json_data_path): with open(json_data_path, "r") as f: result = json.load(f) + log.info(f"Publish result: {result}") + io.uninstall() - # TODO: check if was pyblish successful - # if successful return True - print('Check result here') + return False + + +def main(env): + from avalon.tools import publish + # Registers pype's Global pyblish plugins + pype.install() + + # Register additional paths + addition_paths_str = env.get("PUBLISH_PATHS") or "" + addition_paths = addition_paths_str.split(os.pathsep) + for path in addition_paths: + path = os.path.normpath(path) + if not os.path.exists(path): + continue + pyblish.api.register_plugin_path(path) + + # Register project specific plugins + project_name = os.environ["AVALON_PROJECT"] + project_plugins_paths = env.get("PYPE_PROJECT_PLUGINS") or "" + for path in project_plugins_paths.split(os.pathsep): + plugin_path = os.path.join(path, project_name, "plugins") + if os.path.exists(plugin_path): + pyblish.api.register_plugin_path(plugin_path) + + return publish.show() + + +if __name__ == "__main__": + result = main(os.environ) + sys.exit(not bool(result)) diff --git a/pype/modules/standalonepublish/widgets/widget_drop_frame.py b/pype/modules/standalonepublish/widgets/widget_drop_frame.py index c91e906f45..57547a3d5f 100644 --- a/pype/modules/standalonepublish/widgets/widget_drop_frame.py +++ b/pype/modules/standalonepublish/widgets/widget_drop_frame.py @@ -357,7 +357,7 @@ class DropDataFrame(QtWidgets.QFrame): if data['name'] == item.in_data['name']: found = True break - paths = data['files'] + paths = list(data['files']) paths.extend(item.in_data['files']) c, r = clique.assemble(paths) if len(c) == 0: @@ -392,7 +392,7 @@ class DropDataFrame(QtWidgets.QFrame): else: if data['name'] != item.in_data['name']: continue - if data['files'] == item.in_data['files']: + if data['files'] == list(item.in_data['files']): found = True break a_name = 'merge' diff --git a/pype/plugins/celaction/publish/collect_render_path.py b/pype/plugins/celaction/publish/collect_render_path.py index a3918a52b6..9cbb0e4880 100644 --- a/pype/plugins/celaction/publish/collect_render_path.py +++ b/pype/plugins/celaction/publish/collect_render_path.py @@ -12,7 +12,7 @@ class CollectRenderPath(pyblish.api.InstancePlugin): # Presets anatomy_render_key = None - anatomy_publish_render_key = None + publish_render_metadata = None def process(self, instance): anatomy = instance.context.data["anatomy"] @@ -28,7 +28,7 @@ class CollectRenderPath(pyblish.api.InstancePlugin): # get anatomy rendering keys anatomy_render_key = self.anatomy_render_key or "render" - anatomy_publish_render_key = self.anatomy_publish_render_key or "render" + publish_render_metadata = self.publish_render_metadata or "render" # get folder and path for rendering images from celaction render_dir = anatomy_filled[anatomy_render_key]["folder"] @@ -46,8 +46,11 @@ class CollectRenderPath(pyblish.api.InstancePlugin): instance.data["path"] = render_path # get anatomy for published renders folder path - if anatomy_filled.get(anatomy_publish_render_key): - instance.data["publishRenderFolder"] = anatomy_filled[ - anatomy_publish_render_key]["folder"] + if anatomy_filled.get(publish_render_metadata): + instance.data["publishRenderMetadataFolder"] = anatomy_filled[ + publish_render_metadata]["folder"] + self.log.info("Metadata render path: `{}`".format( + instance.data["publishRenderMetadataFolder"] + )) self.log.info(f"Render output path set to: `{render_path}`") diff --git a/pype/plugins/celaction/publish/integrate_version_up.py b/pype/plugins/celaction/publish/integrate_version_up.py index 1822ceabcb..e15c5d5bf6 100644 --- a/pype/plugins/celaction/publish/integrate_version_up.py +++ b/pype/plugins/celaction/publish/integrate_version_up.py @@ -1,5 +1,5 @@ import shutil -import re +import pype import pyblish.api @@ -12,57 +12,9 @@ class VersionUpScene(pyblish.api.ContextPlugin): def process(self, context): current_file = context.data.get('currentFile') - v_up = get_version_up(current_file) + v_up = pype.lib.version_up(current_file) self.log.debug('Current file is: {}'.format(current_file)) self.log.debug('Version up: {}'.format(v_up)) shutil.copy2(current_file, v_up) self.log.info('Scene saved into new version: {}'.format(v_up)) - - -def version_get(string, prefix, suffix=None): - """Extract version information from filenames used by DD (and Weta, apparently) - These are _v# or /v# or .v# where v is a prefix string, in our case - we use "v" for render version and "c" for camera track version. - See the version.py and camera.py plugins for usage.""" - - if string is None: - raise ValueError("Empty version string - no match") - - regex = r"[/_.]{}\d+".format(prefix) - matches = re.findall(regex, string, re.IGNORECASE) - if not len(matches): - msg = f"No `_{prefix}#` found in `{string}`" - raise ValueError(msg) - return (matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group()) - - -def version_set(string, prefix, oldintval, newintval): - """Changes version information from filenames used by DD (and Weta, apparently) - These are _v# or /v# or .v# where v is a prefix string, in our case - we use "v" for render version and "c" for camera track version. - See the version.py and camera.py plugins for usage.""" - - regex = r"[/_.]{}\d+".format(prefix) - matches = re.findall(regex, string, re.IGNORECASE) - if not len(matches): - return "" - - # Filter to retain only version strings with matching numbers - matches = filter(lambda s: int(s[2:]) == oldintval, matches) - - # Replace all version strings with matching numbers - for match in matches: - # use expression instead of expr so 0 prefix does not make octal - fmt = "%%(#)0%dd" % (len(match) - 2) - newfullvalue = match[0] + prefix + str(fmt % {"#": newintval}) - string = re.sub(match, newfullvalue, string) - return string - - -def get_version_up(path): - """ Returns the next version of the path """ - - (prefix, v) = version_get(path, 'v') - v = int(v) - return version_set(path, prefix, v, v + 1) diff --git a/pype/plugins/premiere/publish/integrate_ftrack_component_overwrite.py b/pype/plugins/ftrack/publish/integrate_ftrack_component_overwrite.py similarity index 100% rename from pype/plugins/premiere/publish/integrate_ftrack_component_overwrite.py rename to pype/plugins/ftrack/publish/integrate_ftrack_component_overwrite.py diff --git a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py index a0059c55a6..cc569ce2d1 100644 --- a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py +++ b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py @@ -35,7 +35,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' - families = ["clip", "shot"] + families = ["shot"] optional = False def process(self, context): diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 83ad4af1c2..e1508b9131 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -19,7 +19,14 @@ class ExtractBurnin(pype.api.Extractor): label = "Extract burnins" order = pyblish.api.ExtractorOrder + 0.03 families = ["review", "burnin"] - hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"] + hosts = [ + "nuke", + "maya", + "shell", + "nukestudio", + "premiere", + "standalonepublisher" + ] optional = True positions = [ diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index 83cf03b042..ab8226f6ef 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -7,7 +7,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Hierarchy To Avalon" - families = ["clip", "shot", "editorial"] + families = ["clip", "shot"] def process(self, context): if "hierarchyContext" not in context.data: diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 30d1de8328..a16c3ce256 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -22,7 +22,15 @@ class ExtractReview(pyblish.api.InstancePlugin): label = "Extract Review" order = pyblish.api.ExtractorOrder + 0.02 families = ["review"] - hosts = ["nuke", "maya", "shell", "nukestudio", "premiere", "harmony"] + hosts = [ + "nuke", + "maya", + "shell", + "nukestudio", + "premiere", + "harmony", + "standalonepublisher" + ] # Supported extensions image_exts = ["exr", "jpg", "jpeg", "png", "dpx"] diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 9f89466c31..1ea31d4a0b 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -206,7 +206,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): def _create_metadata_path(self, instance): ins_data = instance.data # Ensure output dir exists - output_dir = ins_data.get("publishRenderFolder", ins_data["outputDir"]) + output_dir = ins_data.get( + "publishRenderMetadataFolder", ins_data["outputDir"]) try: if not os.path.isdir(output_dir): diff --git a/pype/plugins/harmony/publish/extract_render.py b/pype/plugins/harmony/publish/extract_render.py index fe1352f9f9..45b52e0307 100644 --- a/pype/plugins/harmony/publish/extract_render.py +++ b/pype/plugins/harmony/publish/extract_render.py @@ -72,19 +72,27 @@ class ExtractRender(pyblish.api.InstancePlugin): self.log.info(output.decode("utf-8")) # Collect rendered files. + self.log.debug(path) files = os.listdir(path) + self.log.debug(files) collections, remainder = clique.assemble(files, minimum_items=1) assert not remainder, ( "There should not be a remainder for {0}: {1}".format( instance[0], remainder ) ) - assert len(collections) == 1, ( - "There should only be one image sequence in {}. Found: {}".format( - path, len(collections) - ) - ) - collection = collections[0] + self.log.debug(collections) + if len(collections) > 1: + for col in collections: + if len(list(col)) > 1: + collection = col + else: + # assert len(collections) == 1, ( + # "There should only be one image sequence in {}. Found: {}".format( + # path, len(collections) + # ) + # ) + collection = collections[0] # Generate thumbnail. thumbnail_path = os.path.join(path, "thumbnail.png") diff --git a/pype/plugins/harmony/publish/validate_scene_settings.py b/pype/plugins/harmony/publish/validate_scene_settings.py index aa9a70bd85..d7895804bd 100644 --- a/pype/plugins/harmony/publish/validate_scene_settings.py +++ b/pype/plugins/harmony/publish/validate_scene_settings.py @@ -28,8 +28,11 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): hosts = ["harmony"] actions = [ValidateSceneSettingsRepair] + frame_check_filter = ["_ch_", "_pr_", "_intd_", "_extd_"] + def process(self, instance): expected_settings = pype.hosts.harmony.get_asset_settings() + self.log.info(expected_settings) # Harmony is expected to start at 1. frame_start = expected_settings["frameStart"] @@ -37,6 +40,12 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): expected_settings["frameEnd"] = frame_end - frame_start + 1 expected_settings["frameStart"] = 1 + self.log.info(instance.context.data['anatomyData']['asset']) + + if any(string in instance.context.data['anatomyData']['asset'] + for string in self.frame_check_filter): + expected_settings.pop("frameEnd") + func = """function func() { return { diff --git a/pype/plugins/maya/load/load_ass.py b/pype/plugins/maya/load/load_ass.py index 210b1fde1e..ffe70c39e8 100644 --- a/pype/plugins/maya/load/load_ass.py +++ b/pype/plugins/maya/load/load_ass.py @@ -98,15 +98,19 @@ class AssProxyLoader(pype.hosts.maya.plugin.ReferenceLoader): node = container["objectName"] + representation["context"].pop("frame", None) path = api.get_representation_path(representation) + print(path) # path = self.fname + print(self.fname) proxyPath = os.path.splitext(path)[0] + ".ma" + print(proxyPath) # Get reference node from container members members = cmds.sets(node, query=True, nodesOnly=True) reference_node = self._get_reference_node(members) - assert os.path.exists(path), "%s does not exist." % proxyPath + assert os.path.exists(proxyPath), "%s does not exist." % proxyPath try: content = cmds.file(proxyPath, diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 38040f8c51..a41e987bdb 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -210,6 +210,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): self.log.debug( "assets_shared: {assets_shared}".format(**locals())) + class CollectHierarchyContext(pyblish.api.ContextPlugin): '''Collecting Hierarchy from instaces and building context hierarchy tree diff --git a/pype/plugins/photoshop/publish/extract_image.py b/pype/plugins/photoshop/publish/extract_image.py index da3197c7da..6dfccdc4f2 100644 --- a/pype/plugins/photoshop/publish/extract_image.py +++ b/pype/plugins/photoshop/publish/extract_image.py @@ -13,6 +13,7 @@ class ExtractImage(pype.api.Extractor): label = "Extract Image" hosts = ["photoshop"] families = ["image"] + formats = ["png", "jpg"] def process(self, instance): @@ -32,20 +33,22 @@ class ExtractImage(pype.api.Extractor): if layer.id not in extract_ids: layer.Visible = False - save_options = { - "png": photoshop.com_objects.PNGSaveOptions(), - "jpg": photoshop.com_objects.JPEGSaveOptions() - } + save_options = {} + if "png" in self.formats: + save_options["png"] = photoshop.com_objects.PNGSaveOptions() + if "jpg" in self.formats: + save_options["jpg"] = photoshop.com_objects.JPEGSaveOptions() + file_basename = os.path.splitext( + photoshop.app().ActiveDocument.Name + )[0] for extension, save_option in save_options.items(): + _filename = "{}.{}".format(file_basename, extension) + files[extension] = _filename + + full_filename = os.path.join(staging_dir, _filename) photoshop.app().ActiveDocument.SaveAs( - staging_dir, save_option, True - ) - files[extension] = "{} copy.{}".format( - os.path.splitext( - photoshop.app().ActiveDocument.Name - )[0], - extension + full_filename, save_option, True ) representations = [] diff --git a/pype/plugins/photoshop/publish/extract_review.py b/pype/plugins/photoshop/publish/extract_review.py index 8aebd1ea87..078ee53899 100644 --- a/pype/plugins/photoshop/publish/extract_review.py +++ b/pype/plugins/photoshop/publish/extract_review.py @@ -24,9 +24,10 @@ class ExtractReview(pype.api.Extractor): layers.append(image_instance[0]) # Perform extraction - output_image = "{} copy.jpg".format( + output_image = "{}.jpg".format( os.path.splitext(photoshop.app().ActiveDocument.Name)[0] ) + output_image_path = os.path.join(staging_dir, output_image) with photoshop.maintained_visibility(): # Hide all other layers. extract_ids = [ @@ -39,7 +40,9 @@ class ExtractReview(pype.api.Extractor): layer.Visible = False photoshop.app().ActiveDocument.SaveAs( - staging_dir, photoshop.com_objects.JPEGSaveOptions(), True + output_image_path, + photoshop.com_objects.JPEGSaveOptions(), + True ) ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") @@ -56,7 +59,7 @@ class ExtractReview(pype.api.Extractor): thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") args = [ ffmpeg_path, "-y", - "-i", os.path.join(staging_dir, output_image), + "-i", output_image_path, "-vf", "scale=300:-1", "-vframes", "1", thumbnail_path @@ -77,7 +80,7 @@ class ExtractReview(pype.api.Extractor): mov_path = os.path.join(staging_dir, "review.mov") args = [ ffmpeg_path, "-y", - "-i", os.path.join(staging_dir, output_image), + "-i", output_image_path, "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", "-vframes", "1", mov_path diff --git a/pype/plugins/standalonepublisher/publish/collect_clip_instances.py b/pype/plugins/standalonepublisher/publish/collect_clip_instances.py new file mode 100644 index 0000000000..3d9773d0b2 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_clip_instances.py @@ -0,0 +1,187 @@ +import os +import opentimelineio as otio +import tempfile +import pyblish.api +from pype import lib as plib + + +class CollectClipInstances(pyblish.api.InstancePlugin): + """Collect Clips instances from editorial's OTIO sequence""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Collect Clips" + hosts = ["standalonepublisher"] + families = ["editorial"] + + # presets + subsets = { + "referenceMain": { + "family": "review", + "families": ["review", "ftrack"], + "ftrackFamily": "review", + "extension": ".mp4" + }, + "audioMain": { + "family": "audio", + "families": ["ftrack"], + "ftrackFamily": "audio", + "extension": ".wav", + "version": 1 + }, + "shotMain": { + "family": "shot", + "families": [] + } + } + timeline_frame_offset = None # if 900000 for edl default then -900000 + custom_start_frame = None + + def process(self, instance): + staging_dir = os.path.normpath( + tempfile.mkdtemp(prefix="pyblish_tmp_") + ) + # get context + context = instance.context + + # attribute for checking duplicity during creation + if not context.data.get("assetNameCheck"): + context.data["assetNameCheck"] = list() + + # create asset_names conversion table + if not context.data.get("assetsShared"): + context.data["assetsShared"] = dict() + + # get timeline otio data + timeline = instance.data["otio_timeline"] + fps = plib.get_asset()["data"]["fps"] + + tracks = timeline.each_child( + descended_from_type=otio.schema.track.Track + ) + + # get data from avalon + asset_entity = instance.context.data["assetEntity"] + asset_data = asset_entity["data"] + asset_name = asset_entity["name"] + + # Timeline data. + handle_start = int(asset_data["handleStart"]) + handle_end = int(asset_data["handleEnd"]) + + instances = [] + for track in tracks: + try: + track_start_frame = ( + abs(track.source_range.start_time.value) + ) + except AttributeError: + track_start_frame = 0 + + for clip in track.each_child(): + if clip.name is None: + continue + + # skip all generators like black ampty + if isinstance( + clip.media_reference, + otio.schema.GeneratorReference): + continue + + # Transitions are ignored, because Clips have the full frame + # range. + if isinstance(clip, otio.schema.transition.Transition): + continue + + # basic unique asset name + clip_name = os.path.splitext(clip.name)[0].lower() + name = f"{asset_name.split('_')[0]}_{clip_name}" + + if name not in context.data["assetNameCheck"]: + context.data["assetNameCheck"].append(name) + else: + self.log.warning(f"duplicate shot name: {name}") + + # frame ranges data + clip_in = clip.range_in_parent().start_time.value + clip_out = clip.range_in_parent().end_time_inclusive().value + + # add offset in case there is any + if self.timeline_frame_offset: + clip_in += self.timeline_frame_offset + clip_out += self.timeline_frame_offset + + clip_duration = clip.duration().value + self.log.info(f"clip duration: {clip_duration}") + + source_in = clip.trimmed_range().start_time.value + source_out = source_in + clip_duration + source_in_h = source_in - handle_start + source_out_h = source_out + handle_end + + clip_in_h = clip_in - handle_start + clip_out_h = clip_out + handle_end + + # define starting frame for future shot + if self.custom_start_frame is not None: + frame_start = self.custom_start_frame + else: + frame_start = clip_in + + frame_end = frame_start + (clip_duration - 1) + + # create shared new instance data + instance_data = { + "stagingDir": staging_dir, + + # shared attributes + "asset": name, + "assetShareName": name, + "editorialVideoPath": instance.data[ + "editorialVideoPath"], + "item": clip, + + # parent time properities + "trackStartFrame": track_start_frame, + "handleStart": handle_start, + "handleEnd": handle_end, + "fps": fps, + + # media source + "sourceIn": source_in, + "sourceOut": source_out, + "sourceInH": source_in_h, + "sourceOutH": source_out_h, + + # timeline + "clipIn": clip_in, + "clipOut": clip_out, + "clipDuration": clip_duration, + "clipInH": clip_in_h, + "clipOutH": clip_out_h, + "clipDurationH": clip_duration + handle_start + handle_end, + + # task + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartH": frame_start - handle_start, + "frameEndH": frame_end + handle_end + } + + # adding subsets to context as instances + for subset, properities in self.subsets.items(): + # adding Review-able instance + subset_instance_data = instance_data.copy() + subset_instance_data.update(properities) + subset_instance_data.update({ + # unique attributes + "name": f"{subset}_{name}", + "label": f"{subset} {name} ({clip_in}-{clip_out})", + "subset": subset + }) + instances.append(instance.context.create_instance( + **subset_instance_data)) + + context.data["assetsShared"][name] = { + "_clipIn": clip_in, + "_clipOut": clip_out + } diff --git a/pype/plugins/standalonepublisher/publish/collect_context.py b/pype/plugins/standalonepublisher/publish/collect_context.py index 8bd4e609ab..4dcb25f927 100644 --- a/pype/plugins/standalonepublisher/publish/collect_context.py +++ b/pype/plugins/standalonepublisher/publish/collect_context.py @@ -36,18 +36,6 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): def process(self, context): # get json paths from os and load them io.install() - input_json_path = os.environ.get("SAPUBLISH_INPATH") - output_json_path = os.environ.get("SAPUBLISH_OUTPATH") - - # context.data["stagingDir"] = os.path.dirname(input_json_path) - context.data["returnJsonPath"] = output_json_path - - with open(input_json_path, "r") as f: - in_data = json.load(f) - - asset_name = in_data["asset"] - family = in_data["family"] - subset = in_data["subset"] # Load presets presets = context.data.get("presets") @@ -57,19 +45,92 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): presets = config.get_presets() project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", "name": asset_name}) context.data["project"] = project + + # get json file context + input_json_path = os.environ.get("SAPUBLISH_INPATH") + + with open(input_json_path, "r") as f: + in_data = json.load(f) + self.log.debug(f"_ in_data: {in_data}") + + self.asset_name = in_data["asset"] + self.family = in_data["family"] + asset = io.find_one({"type": "asset", "name": self.asset_name}) context.data["asset"] = asset + # exception for editorial + if "editorial" in self.family: + # avoid subset name duplicity + if not context.data.get("subsetNamesCheck"): + context.data["subsetNamesCheck"] = list() + + in_data_list = list() + representations = in_data.pop("representations") + for repr in representations: + in_data_copy = in_data.copy() + ext = repr["ext"][1:] + subset = in_data_copy["subset"] + # filter out non editorial files + if ext not in ["edl", "xml"]: + in_data_copy["representations"] = [repr] + in_data_copy["subset"] = f"{ext}{subset}" + in_data_list.append(in_data_copy) + + files = repr.pop("files") + + # delete unneeded keys + delete_repr_keys = ["frameStart", "frameEnd"] + for k in delete_repr_keys: + if repr.get(k): + repr.pop(k) + + # convert files to list if it isnt + if not isinstance(files, list): + files = [files] + + self.log.debug(f"_ files: {files}") + for index, f in enumerate(files): + index += 1 + # copy dictionaries + in_data_copy = in_data_copy.copy() + repr_new = repr.copy() + + repr_new["files"] = f + repr_new["name"] = ext + in_data_copy["representations"] = [repr_new] + + # create subset Name + new_subset = f"{ext}{index}{subset}" + while new_subset in context.data["subsetNamesCheck"]: + index += 1 + new_subset = f"{ext}{index}{subset}" + + context.data["subsetNamesCheck"].append(new_subset) + in_data_copy["subset"] = new_subset + in_data_list.append(in_data_copy) + self.log.info(f"Creating subset: {ext}{index}{subset}") + else: + in_data_list = [in_data] + + self.log.debug(f"_ in_data_list: {in_data_list}") + + for in_data in in_data_list: + # create instance + self.create_instance(context, in_data) + + def create_instance(self, context, in_data): + subset = in_data["subset"] + instance = context.create_instance(subset) instance.data.update( { "subset": subset, - "asset": asset_name, + "asset": self.asset_name, "label": subset, "name": subset, - "family": family, + "family": self.family, "version": in_data.get("version", 1), "frameStart": in_data.get("representations", [None])[0].get( "frameStart", None @@ -77,7 +138,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): "frameEnd": in_data.get("representations", [None])[0].get( "frameEnd", None ), - "families": [family, "ftrack"], + "families": [self.family, "ftrack"], } ) self.log.info("collected instance: {}".format(instance.data)) @@ -105,5 +166,3 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): self.log.debug("Adding review family") instance.data["representations"].append(component) - - self.log.info(in_data) diff --git a/pype/plugins/standalonepublisher/publish/collect_editorial.py b/pype/plugins/standalonepublisher/publish/collect_editorial.py new file mode 100644 index 0000000000..a31125d9a8 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_editorial.py @@ -0,0 +1,82 @@ +import os +import opentimelineio as otio +import pyblish.api +from pype import lib as plib + + +class OTIO_View(pyblish.api.Action): + """Currently disabled because OTIO requires PySide2. Issue on Qt.py: + https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289 + """ + + label = "OTIO View" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + instance = context[0] + representation = instance.data["representations"][0] + file_path = os.path.join( + representation["stagingDir"], representation["files"] + ) + plib._subprocess(["otioview", file_path]) + + +class CollectEditorial(pyblish.api.InstancePlugin): + """Collect Editorial OTIO timeline""" + + order = pyblish.api.CollectorOrder + label = "Collect Editorial" + hosts = ["standalonepublisher"] + families = ["editorial"] + actions = [] + + # presets + extensions = [".mov"] + + def process(self, instance): + # remove context test attribute + if instance.context.data.get("subsetNamesCheck"): + instance.context.data.pop("subsetNamesCheck") + + self.log.debug(f"__ instance: `{instance}`") + # get representation with editorial file + for representation in instance.data["representations"]: + self.log.debug(f"__ representation: `{representation}`") + # make editorial sequence file path + staging_dir = representation["stagingDir"] + file_path = os.path.join( + staging_dir, str(representation["files"]) + ) + instance.context.data["currentFile"] = file_path + + # get video file path + video_path = None + basename = os.path.splitext(os.path.basename(file_path))[0] + for f in os.listdir(staging_dir): + self.log.debug(f"__ test file: `{f}`") + # filter out by not sharing the same name + if os.path.splitext(f)[0] not in basename: + continue + # filter out by respected extensions + if os.path.splitext(f)[1] not in self.extensions: + continue + video_path = os.path.join( + staging_dir, f + ) + self.log.debug(f"__ video_path: `{video_path}`") + instance.data["editorialVideoPath"] = video_path + instance.data["stagingDir"] = staging_dir + + # get editorial sequence file into otio timeline object + extension = os.path.splitext(file_path)[1] + kwargs = {} + if extension == ".edl": + # EDL has no frame rate embedded so needs explicit + # frame rate else 24 is asssumed. + kwargs["rate"] = plib.get_asset()["data"]["fps"] + + instance.data["otio_timeline"] = otio.adapters.read_from_file( + file_path, **kwargs) + + self.log.info(f"Added OTIO timeline from: `{file_path}`") diff --git a/pype/plugins/standalonepublisher/publish/collect_hierarchy.py b/pype/plugins/standalonepublisher/publish/collect_hierarchy.py new file mode 100644 index 0000000000..b5d37d0a6c --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_hierarchy.py @@ -0,0 +1,242 @@ +import pyblish.api +import re +import os +from avalon import io + + +class CollectHierarchyInstance(pyblish.api.ContextPlugin): + """Collecting hierarchy context from `parents` and `hierarchy` data + present in `clip` family instances coming from the request json data file + + It will add `hierarchical_context` into each instance for integrate + plugins to be able to create needed parents for the context if they + don't exist yet + """ + + label = "Collect Hierarchy Clip" + order = pyblish.api.CollectorOrder + 0.101 + hosts = ["standalonepublisher"] + families = ["shot"] + + # presets + shot_rename_template = None + shot_rename_search_patterns = None + shot_add_hierarchy = None + shot_add_tasks = None + + def convert_to_entity(self, key, value): + # ftrack compatible entity types + types = {"shot": "Shot", + "folder": "Folder", + "episode": "Episode", + "sequence": "Sequence", + "track": "Sequence", + } + # convert to entity type + entity_type = types.get(key, None) + + # return if any + if entity_type: + return {"entityType": entity_type, "entityName": value} + + def rename_with_hierarchy(self, instance): + search_text = "" + parent_name = instance.context.data["assetEntity"]["name"] + clip = instance.data["item"] + clip_name = os.path.splitext(clip.name)[0].lower() + if self.shot_rename_search_patterns: + search_text += parent_name + clip_name + instance.data["anatomyData"].update({"clip_name": clip_name}) + for type, pattern in self.shot_rename_search_patterns.items(): + p = re.compile(pattern) + match = p.findall(search_text) + if not match: + continue + instance.data["anatomyData"][type] = match[-1] + + # format to new shot name + instance.data["asset"] = self.shot_rename_template.format( + **instance.data["anatomyData"]) + + def create_hierarchy(self, instance): + parents = list() + hierarchy = "" + visual_hierarchy = [instance.context.data["assetEntity"]] + while True: + visual_parent = io.find_one( + {"_id": visual_hierarchy[-1]["data"]["visualParent"]} + ) + if visual_parent: + visual_hierarchy.append(visual_parent) + else: + visual_hierarchy.append( + instance.context.data["projectEntity"]) + break + + # add current selection context hierarchy from standalonepublisher + for entity in reversed(visual_hierarchy): + parents.append({ + "entityType": entity["data"]["entityType"], + "entityName": entity["name"] + }) + + if self.shot_add_hierarchy: + # fill the parents parts from presets + shot_add_hierarchy = self.shot_add_hierarchy.copy() + hierarchy_parents = shot_add_hierarchy["parents"].copy() + for parent in hierarchy_parents: + hierarchy_parents[parent] = hierarchy_parents[parent].format( + **instance.data["anatomyData"]) + prnt = self.convert_to_entity( + parent, hierarchy_parents[parent]) + parents.append(prnt) + + hierarchy = shot_add_hierarchy[ + "parents_path"].format(**hierarchy_parents) + + instance.data["hierarchy"] = hierarchy + instance.data["parents"] = parents + self.log.debug(f"Hierarchy: {hierarchy}") + + if self.shot_add_tasks: + instance.data["tasks"] = self.shot_add_tasks + else: + instance.data["tasks"] = list() + + # updating hierarchy data + instance.data["anatomyData"].update({ + "asset": instance.data["asset"], + "task": "conform" + }) + + def process(self, context): + for instance in context: + if instance.data["family"] in self.families: + self.processing_instance(instance) + + def processing_instance(self, instance): + self.log.info(f"_ instance: {instance}") + # adding anatomyData for burnins + instance.data["anatomyData"] = instance.context.data["anatomyData"] + + asset = instance.data["asset"] + assets_shared = instance.context.data.get("assetsShared") + + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + + if self.shot_rename_template: + self.rename_with_hierarchy(instance) + + self.create_hierarchy(instance) + + shot_name = instance.data["asset"] + self.log.debug(f"Shot Name: {shot_name}") + + if instance.data["hierarchy"] not in shot_name: + self.log.warning("wrong parent") + + label = f"{shot_name} ({frame_start}-{frame_end})" + instance.data["label"] = label + + # dealing with shared attributes across instances + # with the same asset name + if assets_shared.get(asset): + asset_shared = assets_shared.get(asset) + else: + asset_shared = assets_shared[asset] + + asset_shared.update({ + "asset": instance.data["asset"], + "hierarchy": instance.data["hierarchy"], + "parents": instance.data["parents"], + "tasks": instance.data["tasks"] + }) + + +class CollectHierarchyContext(pyblish.api.ContextPlugin): + '''Collecting Hierarchy from instaces and building + context hierarchy tree + ''' + + label = "Collect Hierarchy Context" + order = pyblish.api.CollectorOrder + 0.102 + hosts = ["standalonepublisher"] + + def update_dict(self, ex_dict, new_dict): + for key in ex_dict: + if key in new_dict and isinstance(ex_dict[key], dict): + new_dict[key] = self.update_dict(ex_dict[key], new_dict[key]) + else: + if ex_dict.get(key) and new_dict.get(key): + continue + else: + new_dict[key] = ex_dict[key] + + return new_dict + + def process(self, context): + instances = context + # create hierarchyContext attr if context has none + assets_shared = context.data.get("assetsShared") + final_context = {} + for instance in instances: + if 'editorial' in instance.data.get('family', ''): + continue + # inject assetsShared to other instances with + # the same `assetShareName` attribute in data + asset_shared_name = instance.data.get("assetShareName") + + s_asset_data = assets_shared.get(asset_shared_name) + if s_asset_data: + instance.data["asset"] = s_asset_data["asset"] + instance.data["parents"] = s_asset_data["parents"] + instance.data["hierarchy"] = s_asset_data["hierarchy"] + instance.data["tasks"] = s_asset_data["tasks"] + + # generate hierarchy data only on shot instances + if 'shot' not in instance.data.get('family', ''): + continue + + name = instance.data["asset"] + + # get handles + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) + + in_info = {} + + # suppose that all instances are Shots + in_info['entity_type'] = 'Shot' + + # get custom attributes of the shot + + in_info['custom_attributes'] = { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "clipIn": instance.data["clipIn"], + "clipOut": instance.data["clipOut"], + 'fps': instance.data["fps"] + } + + in_info['tasks'] = instance.data['tasks'] + + parents = instance.data.get('parents', []) + + actual = {name: in_info} + + for parent in reversed(parents): + next_dict = {} + parent_name = parent["entityName"] + next_dict[parent_name] = {} + next_dict[parent_name]["entity_type"] = parent["entityType"] + next_dict[parent_name]["childs"] = actual + actual = next_dict + + final_context = self.update_dict(final_context, actual) + + # adding hierarchy context to instance + context.data["hierarchyContext"] = final_context + self.log.info("Hierarchy instance collected") diff --git a/pype/plugins/standalonepublisher/publish/collect_shots.py b/pype/plugins/standalonepublisher/publish/collect_shots.py deleted file mode 100644 index 4f682bd808..0000000000 --- a/pype/plugins/standalonepublisher/publish/collect_shots.py +++ /dev/null @@ -1,147 +0,0 @@ -import os - -import opentimelineio as otio -from bson import json_util - -import pyblish.api -from pype import lib -from avalon import io - - -class OTIO_View(pyblish.api.Action): - """Currently disabled because OTIO requires PySide2. Issue on Qt.py: - https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289 - """ - - label = "OTIO View" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - instance = context[0] - representation = instance.data["representations"][0] - file_path = os.path.join( - representation["stagingDir"], representation["files"] - ) - lib._subprocess(["otioview", file_path]) - - -class CollectShots(pyblish.api.InstancePlugin): - """Collect Anatomy object into Context""" - - order = pyblish.api.CollectorOrder - label = "Collect Shots" - hosts = ["standalonepublisher"] - families = ["editorial"] - actions = [] - - def process(self, instance): - representation = instance.data["representations"][0] - file_path = os.path.join( - representation["stagingDir"], representation["files"] - ) - instance.context.data["editorialPath"] = file_path - - extension = os.path.splitext(file_path)[1][1:] - kwargs = {} - if extension == "edl": - # EDL has no frame rate embedded so needs explicit frame rate else - # 24 is asssumed. - kwargs["rate"] = lib.get_asset()["data"]["fps"] - - timeline = otio.adapters.read_from_file(file_path, **kwargs) - tracks = timeline.each_child( - descended_from_type=otio.schema.track.Track - ) - asset_entity = instance.context.data["assetEntity"] - asset_name = asset_entity["name"] - - # Ask user for sequence start. Usually 10:00:00:00. - sequence_start_frame = 900000 - - # Project specific prefix naming. This needs to be replaced with some - # options to be more flexible. - asset_name = asset_name.split("_")[0] - - instances = [] - for track in tracks: - track_start_frame = ( - abs(track.source_range.start_time.value) - sequence_start_frame - ) - for child in track.each_child(): - - # Transitions are ignored, because Clips have the full frame - # range. - if isinstance(child, otio.schema.transition.Transition): - continue - - if child.name is None: - continue - - # Hardcoded to expect a shot name of "[name].[extension]" - child_name = os.path.splitext(child.name)[0].lower() - name = f"{asset_name}_{child_name}" - - frame_start = track_start_frame - frame_start += child.range_in_parent().start_time.value - frame_end = track_start_frame - frame_end += child.range_in_parent().end_time_inclusive().value - - label = f"{name} (framerange: {frame_start}-{frame_end})" - instances.append( - instance.context.create_instance(**{ - "name": name, - "label": label, - "frameStart": frame_start, - "frameEnd": frame_end, - "family": "shot", - "families": ["review", "ftrack"], - "ftrackFamily": "review", - "asset": name, - "subset": "shotMain", - "representations": [], - "source": file_path - }) - ) - - visual_hierarchy = [asset_entity] - while True: - visual_parent = io.find_one( - {"_id": visual_hierarchy[-1]["data"]["visualParent"]} - ) - if visual_parent: - visual_hierarchy.append(visual_parent) - else: - visual_hierarchy.append(instance.context.data["projectEntity"]) - break - - context_hierarchy = None - for entity in visual_hierarchy: - childs = {} - if context_hierarchy: - name = context_hierarchy.pop("name") - childs = {name: context_hierarchy} - else: - for instance in instances: - childs[instance.data["name"]] = { - "childs": {}, - "entity_type": "Shot", - "custom_attributes": { - "frameStart": instance.data["frameStart"], - "frameEnd": instance.data["frameEnd"] - } - } - - context_hierarchy = { - "entity_type": entity["data"]["entityType"], - "childs": childs, - "name": entity["name"] - } - - name = context_hierarchy.pop("name") - context_hierarchy = {name: context_hierarchy} - instance.context.data["hierarchyContext"] = context_hierarchy - self.log.info( - "Hierarchy:\n" + - json_util.dumps(context_hierarchy, sort_keys=True, indent=4) - ) diff --git a/pype/plugins/standalonepublisher/publish/extract_shot.py b/pype/plugins/standalonepublisher/publish/extract_shot.py deleted file mode 100644 index d58ddfe8d5..0000000000 --- a/pype/plugins/standalonepublisher/publish/extract_shot.py +++ /dev/null @@ -1,96 +0,0 @@ -import os - -import clique - -import pype.api -import pype.lib - - -class ExtractShot(pype.api.Extractor): - """Extract shot "mov" and "wav" files.""" - - label = "Extract Shot" - hosts = ["standalonepublisher"] - families = ["shot"] - - def process(self, instance): - staging_dir = self.staging_dir(instance) - self.log.info("Outputting shot to {}".format(staging_dir)) - - editorial_path = instance.context.data["editorialPath"] - basename = os.path.splitext(os.path.basename(editorial_path))[0] - - # Generate mov file. - fps = pype.lib.get_asset()["data"]["fps"] - input_path = os.path.join( - os.path.dirname(editorial_path), basename + ".mov" - ) - shot_mov = os.path.join(staging_dir, instance.data["name"] + ".mov") - ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - args = [ - ffmpeg_path, - "-ss", str(instance.data["frameStart"] / fps), - "-i", input_path, - "-t", str( - (instance.data["frameEnd"] - instance.data["frameStart"] + 1) / - fps - ), - "-crf", "18", - "-pix_fmt", "yuv420p", - shot_mov - ] - self.log.info(f"Processing: {args}") - output = pype.lib._subprocess(args) - self.log.info(output) - - instance.data["representations"].append({ - "name": "mov", - "ext": "mov", - "files": os.path.basename(shot_mov), - "stagingDir": staging_dir, - "frameStart": instance.data["frameStart"], - "frameEnd": instance.data["frameEnd"], - "fps": fps, - "thumbnail": True, - "tags": ["review", "ftrackreview"] - }) - - # Generate jpegs. - shot_jpegs = os.path.join( - staging_dir, instance.data["name"] + ".%04d.jpeg" - ) - args = [ffmpeg_path, "-i", shot_mov, shot_jpegs] - self.log.info(f"Processing: {args}") - output = pype.lib._subprocess(args) - self.log.info(output) - - collection = clique.Collection( - head=instance.data["name"] + ".", tail='.jpeg', padding=4 - ) - for f in os.listdir(staging_dir): - if collection.match(f): - collection.add(f) - - instance.data["representations"].append({ - "name": "jpeg", - "ext": "jpeg", - "files": list(collection), - "stagingDir": staging_dir - }) - - # Generate wav file. - shot_wav = os.path.join(staging_dir, instance.data["name"] + ".wav") - args = [ffmpeg_path, "-i", shot_mov, shot_wav] - self.log.info(f"Processing: {args}") - output = pype.lib._subprocess(args) - self.log.info(output) - - instance.data["representations"].append({ - "name": "wav", - "ext": "wav", - "files": os.path.basename(shot_wav), - "stagingDir": staging_dir - }) - - # Required for extract_review plugin (L222 onwards). - instance.data["fps"] = fps diff --git a/pype/plugins/standalonepublisher/publish/extract_shot_data.py b/pype/plugins/standalonepublisher/publish/extract_shot_data.py new file mode 100644 index 0000000000..6cbc2c7882 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/extract_shot_data.py @@ -0,0 +1,123 @@ +import os +import clique +import pype.api + +from pprint import pformat + + +class ExtractShotData(pype.api.Extractor): + """Extract shot "mov" and "wav" files.""" + + label = "Extract Shot Data" + hosts = ["standalonepublisher"] + families = ["review", "audio"] + + # presets + add_representation = None # ".jpeg" + + def process(self, instance): + representation = instance.data.get("representations") + self.log.debug(f"_ representation: {representation}") + + if not representation: + instance.data["representations"] = list() + + # get ffmpet path + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + + # get staging dir + staging_dir = self.staging_dir(instance) + self.log.info("Staging dir set to: `{}`".format(staging_dir)) + + # Generate mov file. + fps = instance.data["fps"] + video_file_path = instance.data["editorialVideoPath"] + ext = instance.data.get("extension", ".mov") + + clip_trimed_path = os.path.join( + staging_dir, instance.data["name"] + ext) + # + # # check video file metadata + # input_data = plib.ffprobe_streams(video_file_path)[0] + # self.log.debug(f"__ input_data: `{input_data}`") + + start = float(instance.data["clipInH"]) + dur = float(instance.data["clipDurationH"]) + + if ext in ".wav": + start += 0.5 + + args = [ + ffmpeg_path, + "-ss", str(start / fps), + "-i", f"\"{video_file_path}\"", + "-t", str(dur / fps) + ] + if ext in [".mov", ".mp4"]: + args.extend([ + "-crf", "18", + "-pix_fmt", "yuv420p"]) + elif ext in ".wav": + args.extend([ + "-vn -acodec pcm_s16le", + "-ar 48000 -ac 2" + ]) + + # add output path + args.append(f"\"{clip_trimed_path}\"") + + self.log.info(f"Processing: {args}") + ffmpeg_args = " ".join(args) + output = pype.api.subprocess(ffmpeg_args) + self.log.info(output) + + repr = { + "name": ext[1:], + "ext": ext[1:], + "files": os.path.basename(clip_trimed_path), + "stagingDir": staging_dir, + "frameStart": int(instance.data["frameStart"]), + "frameEnd": int(instance.data["frameEnd"]), + "frameStartFtrack": int(instance.data["frameStartH"]), + "frameEndFtrack": int(instance.data["frameEndH"]), + "fps": fps, + } + + if ext[1:] in ["mov", "mp4"]: + repr.update({ + "thumbnail": True, + "tags": ["review", "ftrackreview", "delete"]}) + + instance.data["representations"].append(repr) + + if self.add_representation: + # Generate jpegs. + clip_img_sequence = os.path.join( + staging_dir, instance.data["name"] + ".%04d.jpeg" + ) + args = [ + ffmpeg_path, "-i", + f"\"{clip_trimed_path}\"", + f"\"{clip_img_sequence}\"" + ] + self.log.info(f"Processing: {args}") + output = pype.lib._subprocess(args) + self.log.info(output) + + # collect jpeg sequence if editorial data for publish + # are image sequence + collection = clique.Collection( + head=instance.data["name"] + ".", tail='.jpeg', padding=4 + ) + for f in os.listdir(staging_dir): + if collection.match(f): + collection.add(f) + + instance.data["representations"].append({ + "name": "jpeg", + "ext": "jpeg", + "files": list(collection), + "stagingDir": staging_dir + }) + + self.log.debug(f"Instance data: {pformat(instance.data)}") diff --git a/pype/plugins/standalonepublisher/publish/validate_editorial_resources.py b/pype/plugins/standalonepublisher/publish/validate_editorial_resources.py index 961641b8fa..ebc449c4ec 100644 --- a/pype/plugins/standalonepublisher/publish/validate_editorial_resources.py +++ b/pype/plugins/standalonepublisher/publish/validate_editorial_resources.py @@ -9,20 +9,10 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin): label = "Validate Editorial Resources" hosts = ["standalonepublisher"] - families = ["editorial"] + families = ["audio", "review"] order = pype.api.ValidateContentsOrder def process(self, instance): - representation = instance.data["representations"][0] - staging_dir = representation["stagingDir"] - basename = os.path.splitext( - os.path.basename(representation["files"]) - )[0] - - files = [x for x in os.listdir(staging_dir)] - - # Check for "mov" file. - filename = basename + ".mov" - filepath = os.path.join(staging_dir, filename) - msg = f"Missing \"{filepath}\"." - assert filename in files, msg + check_file = instance.data["editorialVideoPath"] + msg = f"Missing \"{check_file}\"." + assert check_file, msg diff --git a/pype/plugins/standalonepublisher/publish/validate_shots.py b/pype/plugins/standalonepublisher/publish/validate_shot_duplicates.py similarity index 77% rename from pype/plugins/standalonepublisher/publish/validate_shots.py rename to pype/plugins/standalonepublisher/publish/validate_shot_duplicates.py index 3267af7685..04d2f3ea6c 100644 --- a/pype/plugins/standalonepublisher/publish/validate_shots.py +++ b/pype/plugins/standalonepublisher/publish/validate_shot_duplicates.py @@ -2,10 +2,10 @@ import pyblish.api import pype.api -class ValidateShots(pyblish.api.ContextPlugin): - """Validate there is a "mov" next to the editorial file.""" +class ValidateShotDuplicates(pyblish.api.ContextPlugin): + """Validating no duplicate names are in context.""" - label = "Validate Shots" + label = "Validate Shot Duplicates" hosts = ["standalonepublisher"] order = pype.api.ValidateContentsOrder diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 104ff0255c..16e24757dd 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -528,6 +528,9 @@ def burnins_from_data( if pix_fmt: ffmpeg_args.append("-pix_fmt {}".format(pix_fmt)) + # Use group one (same as `-intra` argument, which is deprecated) + ffmpeg_args.append("-g 1") + ffmpeg_args_str = " ".join(ffmpeg_args) burnin.render( output_path, args=ffmpeg_args_str, overwrite=overwrite, **data diff --git a/pype/version.py b/pype/version.py index 7f6646a762..d70304e62c 100644 --- a/pype/version.py +++ b/pype/version.py @@ -1 +1 @@ -__version__ = "2.11.0" +__version__ = "2.11.4"