diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 53de588bcc..faf7539540 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -26,11 +26,7 @@ class SyncToAvalonEvent(BaseEvent): dbcon = DbConnector() - ignore_entTypes = [ - "socialfeed", "socialnotification", "note", - "assetversion", "job", "user", "reviewsessionobject", "timer", - "timelog", "auth_userrole", "appointment", "notelabellink" - ] + interest_entTypes = ["show", "task"] ignore_ent_types = ["Milestone"] ignore_keys = ["statusid", "thumbid"] @@ -137,9 +133,10 @@ class SyncToAvalonEvent(BaseEvent): if self._avalon_ents_by_id is None: self._avalon_ents_by_id = {} proj, ents = self.avalon_entities - self._avalon_ents_by_id[proj["_id"]] = proj - for ent in ents: - self._avalon_ents_by_id[ent["_id"]] = ent + if proj: + self._avalon_ents_by_id[proj["_id"]] = proj + for ent in ents: + self._avalon_ents_by_id[ent["_id"]] = ent return self._avalon_ents_by_id @property @@ -159,13 +156,14 @@ class SyncToAvalonEvent(BaseEvent): if self._avalon_ents_by_ftrack_id is None: self._avalon_ents_by_ftrack_id = {} proj, ents = self.avalon_entities - ftrack_id = proj["data"]["ftrackId"] - self._avalon_ents_by_ftrack_id[ftrack_id] = proj - for ent in ents: - ftrack_id = ent["data"].get("ftrackId") - if ftrack_id is None: - continue - self._avalon_ents_by_ftrack_id[ftrack_id] = ent + if proj: + ftrack_id = proj["data"]["ftrackId"] + self._avalon_ents_by_ftrack_id[ftrack_id] = proj + for ent in ents: + ftrack_id = ent["data"].get("ftrackId") + if ftrack_id is None: + continue + self._avalon_ents_by_ftrack_id[ftrack_id] = ent return self._avalon_ents_by_ftrack_id @property @@ -508,7 +506,7 @@ class SyncToAvalonEvent(BaseEvent): found_actions = set() for ent_info in entities_info: entityType = ent_info["entityType"] - if entityType in self.ignore_entTypes: + if entityType not in self.interest_entTypes: continue entity_type = ent_info.get("entity_type") diff --git a/pype/ftrack/ftrack_server/sub_event_status.py b/pype/ftrack/ftrack_server/sub_event_status.py index 1a15a1f28d..d3e6a3d647 100644 --- a/pype/ftrack/ftrack_server/sub_event_status.py +++ b/pype/ftrack/ftrack_server/sub_event_status.py @@ -369,13 +369,6 @@ def main(args): # store socket connection object ObjectFactory.sock = sock - statuse_names = { - "main": "Main process", - "storer": "Event Storer", - "processor": "Event Processor" - } - - ObjectFactory.status_factory = StatusFactory(statuse_names) ObjectFactory.status_factory["main"].update(server_info) _returncode = 0 try: @@ -429,6 +422,13 @@ if __name__ == "__main__": signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) + statuse_names = { + "main": "Main process", + "storer": "Event Storer", + "processor": "Event Processor" + } + ObjectFactory.status_factory = StatusFactory(statuse_names) + checker_thread = OutputChecker() ObjectFactory.checker_thread = checker_thread checker_thread.start() diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index a429b3fc84..20899361c5 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -30,7 +30,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): assert project_entity, ( "Project '{0}' was not found." ).format(project_name) - self.log.debug("Collected Project entity \"{}\"".format(project_entity)) + self.log.debug("Collected Project \"{}\"".format(project_entity)) asset_entity = io.find_one({ "type": "asset", @@ -41,7 +41,12 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): "No asset found by the name '{0}' in project '{1}'" ).format(asset_name, project_name) - self.log.debug("Collected Asset entity \"{}\"".format(asset_entity)) + self.log.debug("Collected Asset \"{}\"".format(asset_entity)) context.data["projectEntity"] = project_entity context.data["assetEntity"] = asset_entity + + data = asset_entity['data'] + context.data['handles'] = int(data.get("handles", 0)) + context.data["handleStart"] = int(data.get("handleStart", 0)) + context.data["handleEnd"] = int(data.get("handleEnd", 0)) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 4afcac118c..06a25b7c8a 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -110,11 +110,13 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): pixel_aspect = instance.data.get("pixelAspect") if pixel_aspect: - anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect)) + anatomy_data["pixel_aspect"] = float("{:0.2f}".format( + float(pixel_aspect))) fps = instance.data.get("fps") - if resolution_height: - anatomy_data["fps"] = float("{:0.2f}".format(fps)) + if fps: + anatomy_data["fps"] = float("{:0.2f}".format( + float(fps))) instance.data["projectEntity"] = project_entity instance.data["assetEntity"] = asset_entity diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index d09ba91f72..faecbb47a7 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -4,7 +4,6 @@ import copy import pype.api import pyblish -from pypeapp import config class ExtractBurnin(pype.api.Extractor): @@ -26,13 +25,24 @@ class ExtractBurnin(pype.api.Extractor): if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") + context_data = instance.context.data + version = instance.data.get( 'version', instance.context.data.get('version')) frame_start = int(instance.data.get("frameStart") or 0) frame_end = int(instance.data.get("frameEnd") or 1) + handle_start = instance.data.get("handleStart", + context_data.get("handleStart")) + handle_end = instance.data.get("handleEnd", + context_data.get("handleEnd")) duration = frame_end - frame_start + 1 prep_data = copy.deepcopy(instance.data["anatomyData"]) + + if "slate.farm" in instance.data["families"]: + frame_start += 1 + duration -= 1 + prep_data.update({ "frame_start": frame_start, "frame_end": frame_end, @@ -42,22 +52,6 @@ class ExtractBurnin(pype.api.Extractor): "intent": instance.context.data.get("intent", "") }) - slate_frame_start = frame_start - slate_frame_end = frame_end - slate_duration = duration - - # exception for slate workflow - if "slate" in instance.data["families"]: - slate_frame_start = frame_start - 1 - slate_frame_end = frame_end - slate_duration = slate_frame_end - slate_frame_start + 1 - - prep_data.update({ - "slate_frame_start": slate_frame_start, - "slate_frame_end": slate_frame_end, - "slate_duration": slate_duration - }) - # get anatomy project anatomy = instance.context.data['anatomy'] @@ -70,6 +64,9 @@ class ExtractBurnin(pype.api.Extractor): is_sequence = "sequence" in repre.get("tags", []) + # no handles switch from profile tags + no_handles = "no-handles" in repre.get("tags", []) + stagingdir = repre["stagingDir"] filename = "{0}".format(repre["files"]) @@ -101,6 +98,41 @@ class ExtractBurnin(pype.api.Extractor): filled_anatomy = anatomy.format_all(_prep_data) _prep_data["anatomy"] = filled_anatomy.get_solved() + # copy frame range variables + frame_start_cp = frame_start + frame_end_cp = frame_end + duration_cp = duration + + if no_handles: + frame_start_cp = frame_start + handle_start + frame_end_cp = frame_end - handle_end + duration_cp = frame_end_cp - frame_start_cp + 1 + _prep_data.update({ + "frame_start": frame_start_cp, + "frame_end": frame_end_cp, + "duration": duration_cp, + }) + + # dealing with slates + slate_frame_start = frame_start_cp + slate_frame_end = frame_end_cp + slate_duration = duration_cp + + # exception for slate workflow + if ("slate" in instance.data["families"]): + if "slate-frame" in repre.get("tags", []): + slate_frame_start = frame_start_cp - 1 + slate_frame_end = frame_end_cp + slate_duration = duration_cp + 1 + + self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start)) + + _prep_data.update({ + "slate_frame_start": slate_frame_start, + "slate_frame_end": slate_frame_end, + "slate_duration": slate_duration + }) + burnin_data = { "input": full_movie_path.replace("\\", "/"), "codec": repre.get("codec", []), diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index f5dba108c5..23e582edd2 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -12,7 +12,8 @@ class ExtractReview(pyblish.api.InstancePlugin): otherwise the representation is ignored. All new represetnations are created and encoded by ffmpeg following - presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension + presets found in `pype-config/presets/plugins/global/ + publish.json:ExtractReview:outputs`. To change the file extension filter values use preset's attributes `ext_filter` """ @@ -31,12 +32,22 @@ class ExtractReview(pyblish.api.InstancePlugin): output_profiles = self.outputs or {} inst_data = instance.data - fps = inst_data.get("fps") - start_frame = inst_data.get("frameStart") + context_data = instance.context.data + fps = float(inst_data.get("fps")) + frame_start = inst_data.get("frameStart") + frame_end = inst_data.get("frameEnd") + handle_start = inst_data.get("handleStart", + context_data.get("handleStart")) + handle_end = inst_data.get("handleEnd", + context_data.get("handleEnd")) + pixel_aspect = inst_data.get("pixelAspect", 1) resolution_width = inst_data.get("resolutionWidth", to_width) resolution_height = inst_data.get("resolutionHeight", to_height) - pixel_aspect = inst_data.get("pixelAspect", 1) self.log.debug("Families In: `{}`".format(inst_data["families"])) + self.log.debug("__ frame_start: {}".format(frame_start)) + self.log.debug("__ frame_end: {}".format(frame_end)) + self.log.debug("__ handle_start: {}".format(handle_start)) + self.log.debug("__ handle_end: {}".format(handle_end)) # get representation and loop them representations = inst_data["representations"] @@ -73,6 +84,9 @@ class ExtractReview(pyblish.api.InstancePlugin): is_sequence = ("sequence" in p_tags) and (ext in ( "png", "jpg", "jpeg")) + # no handles switch from profile tags + no_handles = "no-handles" in p_tags + self.log.debug("Profile name: {}".format(name)) if not ext: @@ -142,6 +156,7 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.info("new_tags: `{}`".format(new_tags)) input_args = [] + output_args = [] # overrides output file input_args.append("-y") @@ -152,12 +167,23 @@ class ExtractReview(pyblish.api.InstancePlugin): # necessary input data # adds start arg only if image sequence if isinstance(repre["files"], list): + if frame_start != repre.get("detectedStart", frame_start): + frame_start = repre.get("detectedStart") + + # exclude handle if no handles defined + if no_handles: + frame_start_no_handles = frame_start + handle_start + frame_end_no_handles = frame_end - handle_end - if start_frame != repre.get("detectedStart", start_frame): - start_frame = repre.get("detectedStart") input_args.append( "-start_number {0} -framerate {1}".format( - start_frame, fps)) + frame_start, fps)) + else: + if no_handles: + start_sec = float(handle_start) / fps + input_args.append("-ss {:0.2f}".format(start_sec)) + frame_start_no_handles = frame_start + handle_start + frame_end_no_handles = frame_end - handle_end input_args.append("-i {}".format(full_input_path)) @@ -191,7 +217,6 @@ class ExtractReview(pyblish.api.InstancePlugin): ] ) - output_args = [] codec_args = profile.get('codec', []) output_args.extend(codec_args) # preset's output data @@ -238,6 +263,13 @@ class ExtractReview(pyblish.api.InstancePlugin): # In case audio is longer than video. output_args.append("-shortest") + if no_handles: + duration_sec = float( + (frame_end - ( + frame_start + handle_start + ) + 1) - handle_end) / fps + output_args.append("-t {:0.2f}".format(duration_sec)) + # output filename output_args.append(full_output_path) @@ -351,14 +383,19 @@ class ExtractReview(pyblish.api.InstancePlugin): "codec": codec_args, "_profile": profile, "resolutionHeight": resolution_height, - "resolutionWidth": resolution_width, + "resolutionWidth": resolution_width }) if is_sequence: repre_new.update({ "stagingDir": stg_dir, "files": os.listdir(stg_dir) }) - + if no_handles: + repre_new.update({ + "outputName": name + "_noHandles", + "startFrameReview": frame_start_no_handles, + "endFrameReview": frame_end_no_handles + }) if repre_new.get('preview'): repre_new.pop("preview") if repre_new.get('thumbnail'): diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 8935127e9e..1085f18d1a 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -81,6 +81,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "image" "source", "assembly", + "textures", "fbx" ] exclude_families = ["clip"] diff --git a/pype/plugins/nuke/publish/collect_asset_info.py b/pype/plugins/nuke/publish/collect_asset_info.py deleted file mode 100644 index 8a8791ec36..0000000000 --- a/pype/plugins/nuke/publish/collect_asset_info.py +++ /dev/null @@ -1,25 +0,0 @@ -from avalon import api, io -import pyblish.api - - -class CollectAssetInfo(pyblish.api.ContextPlugin): - """Collect framerate.""" - - order = pyblish.api.CollectorOrder - label = "Collect Asset Info" - hosts = [ - "nuke", - "nukeassist" - ] - - def process(self, context): - asset_data = io.find_one({ - "type": "asset", - "name": api.Session["AVALON_ASSET"] - }) - self.log.info("asset_data: {}".format(asset_data)) - - context.data['handles'] = int(asset_data["data"].get("handles", 0)) - context.data["handleStart"] = int(asset_data["data"].get( - "handleStart", 0)) - context.data["handleEnd"] = int(asset_data["data"].get("handleEnd", 0)) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 76c2e8fa75..993b8574f5 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -41,6 +41,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): handle_end = instance.context.data["handleEnd"] first_frame = int(nuke.root()["first_frame"].getValue()) last_frame = int(nuke.root()["last_frame"].getValue()) + frame_length = int( + last_frame - first_frame + 1 + ) if node["use_limit"].getValue(): handles = 0 @@ -81,8 +84,26 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): collected_frames = [f for f in os.listdir(output_dir) if ext in f] if collected_frames: - representation['frameStart'] = "%0{}d".format( + collected_frames_len = len(collected_frames) + frame_start_str = "%0{}d".format( len(str(last_frame))) % first_frame + representation['frameStart'] = frame_start_str + + # in case slate is expected and not yet rendered + self.log.debug("_ frame_length: {}".format(frame_length)) + self.log.debug( + "_ collected_frames_len: {}".format( + collected_frames_len)) + # this will only run if slate frame is not already + # rendered from previews publishes + if "slate" in instance.data["families"] \ + and (frame_length == collected_frames_len): + frame_slate_str = "%0{}d".format( + len(str(last_frame))) % (first_frame - 1) + slate_frame = collected_frames[0].replace( + frame_start_str, frame_slate_str) + collected_frames.insert(0, slate_frame) + representation['files'] = collected_frames instance.data["representations"].append(representation) except Exception: diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index 4d43f38859..488f9bd31d 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -33,6 +33,7 @@ class ExtractSlateFrame(pype.api.Extractor): self.render_slate(instance) def render_slate(self, instance): + node_subset_name = instance.data.get("name", None) node = instance[0] # group node self.log.info("Creating staging dir...") @@ -47,6 +48,10 @@ class ExtractSlateFrame(pype.api.Extractor): self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) + frame_length = int( + instance.data["frameEnd"] - instance.data["frameStart"] + 1 + ) + temporary_nodes = [] collection = instance.data.get("collection", None) @@ -56,10 +61,16 @@ class ExtractSlateFrame(pype.api.Extractor): "{head}{padding}{tail}")) fhead = collection.format("{head}") + collected_frames_len = int(len(collection.indexes)) + # get first and last frame first_frame = min(collection.indexes) - 1 - - if "slate" in instance.data["families"]: + self.log.info('frame_length: {}'.format(frame_length)) + self.log.info( + 'len(collection.indexes): {}'.format(collected_frames_len) + ) + if ("slate" in instance.data["families"]) \ + and (frame_length != collected_frames_len): first_frame += 1 last_frame = first_frame @@ -103,6 +114,8 @@ class ExtractSlateFrame(pype.api.Extractor): # Render frames nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + # also render slate as sequence frame + nuke.execute(node_subset_name, int(first_frame), int(last_frame)) self.log.debug( "slate frame path: {}".format(instance.data["slateFrame"])) diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index 169ea1ecb5..8a8bf3cc5e 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -76,7 +76,8 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): 'len(collection.indexes): {}'.format(collected_frames_len) ) - if "slate" in instance.data["families"]: + if ("slate" in instance.data["families"]) \ + and (frame_length != collected_frames_len): collected_frames_len -= 1 assert (collected_frames_len == frame_length), (