Merge branch '2.x/develop' into feature/tiled-exr-to-scanline

This commit is contained in:
Ondrej Samohel 2020-10-01 13:58:02 +02:00
commit face948e3f
No known key found for this signature in database
GPG key ID: 8A29C663C672C2B7
309 changed files with 15423 additions and 1677 deletions

View file

@ -97,6 +97,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
def process(self, instance):
@ -178,6 +179,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Adding metadata
@ -228,6 +230,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Adding metadata
@ -242,6 +245,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
session.commit()
except Exception:
session.rollback()
session._configure_locations()
self.log.warning((
"Comment was not possible to set for AssetVersion"
"\"{0}\". Can't set it's value to: \"{1}\""
@ -258,6 +262,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
continue
except Exception:
session.rollback()
session._configure_locations()
self.log.warning((
"Custom Attrubute \"{0}\""
@ -272,6 +277,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Component
@ -316,6 +322,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Reset members in memory
@ -432,6 +439,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
if assetversion_entity not in used_asset_versions:

View file

@ -88,8 +88,14 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
instance.data["frameEnd"] - instance.data["frameStart"]
)
if not comp.get('fps'):
comp['fps'] = instance.context.data['fps']
fps = comp.get('fps')
if fps is None:
fps = instance.data.get(
"fps", instance.context.data['fps']
)
comp['fps'] = fps
location = self.get_ftrack_location(
'ftrack.server', ft_session
)

View file

@ -145,4 +145,5 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)

View file

@ -2,6 +2,7 @@ import sys
import six
import pyblish.api
from avalon import io
from pprint import pformat
try:
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_AUTO_SYNC
@ -40,9 +41,14 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
def process(self, context):
self.context = context
if "hierarchyContext" not in context.data:
if "hierarchyContext" not in self.context.data:
return
hierarchy_context = self.context.data["hierarchyContext"]
self.log.debug(
f"__ hierarchy_context: `{pformat(hierarchy_context)}`")
self.session = self.context.data["ftrackSession"]
project_name = self.context.data["projectEntity"]["name"]
query = 'Project where full_name is "{}"'.format(project_name)
@ -55,7 +61,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
self.ft_project = None
input_data = context.data["hierarchyContext"]
input_data = hierarchy_context
# disable termporarily ftrack project's autosyncing
if auto_sync_state:
@ -128,6 +134,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# TASKS
@ -156,6 +163,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Incoming links.
@ -165,8 +173,31 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Create notes.
user = self.session.query(
"User where username is \"{}\"".format(self.session.api_user)
).first()
if user:
for comment in entity_data.get("comments", []):
entity.create_note(comment, user)
else:
self.log.warning(
"Was not able to query current User {}".format(
self.session.api_user
)
)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Import children.
if 'childs' in entity_data:
self.import_to_ftrack(
entity_data['childs'], entity)
@ -180,6 +211,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Create new links.
@ -221,6 +253,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
return task
@ -235,6 +268,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
return entity
@ -249,7 +283,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
raise
self.session._configure_locations()
six.reraise(tp, value, tb)
def auto_sync_on(self, project):
@ -262,4 +297,5 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
raise
self.session._configure_locations()
six.reraise(tp, value, tb)

View file

@ -20,8 +20,8 @@ class CopyFile(api.Loader):
def copy_file_to_clipboard(path):
from avalon.vendor.Qt import QtCore, QtWidgets
app = QtWidgets.QApplication.instance()
assert app, "Must have running QApplication instance"
clipboard = QtWidgets.QApplication.clipboard()
assert clipboard, "Must have running QApplication instance"
# Build mime data for clipboard
data = QtCore.QMimeData()
@ -29,5 +29,4 @@ class CopyFile(api.Loader):
data.setUrls([url])
# Set to Clipboard
clipboard = app.clipboard()
clipboard.setMimeData(data)

View file

@ -19,11 +19,10 @@ class CopyFilePath(api.Loader):
@staticmethod
def copy_path_to_clipboard(path):
from avalon.vendor.Qt import QtCore, QtWidgets
from avalon.vendor.Qt import QtWidgets
app = QtWidgets.QApplication.instance()
assert app, "Must have running QApplication instance"
clipboard = QtWidgets.QApplication.clipboard()
assert clipboard, "Must have running QApplication instance"
# Set to Clipboard
clipboard = app.clipboard()
clipboard.setText(os.path.normpath(path))

View file

@ -25,7 +25,8 @@ class ExtractBurnin(pype.api.Extractor):
"shell",
"nukestudio",
"premiere",
"standalonepublisher"
"standalonepublisher",
"harmony"
]
optional = True
@ -194,11 +195,14 @@ class ExtractBurnin(pype.api.Extractor):
if "delete" in new_repre["tags"]:
new_repre["tags"].remove("delete")
# Update name and outputName to be able have multiple outputs
# Join previous "outputName" with filename suffix
new_name = "_".join([new_repre["outputName"], filename_suffix])
new_repre["name"] = new_name
new_repre["outputName"] = new_name
if len(repre_burnin_defs.keys()) > 1:
# Update name and outputName to be
# able have multiple outputs in case of more burnin presets
# Join previous "outputName" with filename suffix
new_name = "_".join(
[new_repre["outputName"], filename_suffix])
new_repre["name"] = new_name
new_repre["outputName"] = new_name
# Prepare paths and files for process.
self.input_output_paths(new_repre, temp_data, filename_suffix)

View file

@ -10,6 +10,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
families = ["clip", "shot"]
def process(self, context):
# processing starts here
if "hierarchyContext" not in context.data:
self.log.info("skipping IntegrateHierarchyToAvalon")
return
@ -17,7 +18,29 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
if not io.Session:
io.install()
input_data = context.data["hierarchyContext"]
active_assets = []
hierarchy_context = context.data["hierarchyContext"]
hierarchy_assets = self._get_assets(hierarchy_context)
# filter only the active publishing insatnces
for instance in context:
if instance.data.get("publish") is False:
continue
if not instance.data.get("asset"):
continue
active_assets.append(instance.data["asset"])
# filter out only assets which are activated as isntances
new_hierarchy_assets = {k: v for k, v in hierarchy_assets.items()
if k in active_assets}
# modify the hierarchy context so there are only fitred assets
self._set_assets(hierarchy_context, new_hierarchy_assets)
input_data = context.data["hierarchyContext"] = hierarchy_context
self.project = None
self.import_to_avalon(input_data)
@ -83,7 +106,6 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
for task_name in new_tasks:
if task_name not in cur_entity_data["tasks"]:
cur_entity_data["tasks"].append(task_name)
cur_entity_data.update(data)
data = cur_entity_data
else:
@ -150,3 +172,41 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
entity_id = io.insert_one(item).inserted_id
return io.find_one({"_id": entity_id})
def _get_assets(self, input_dict):
""" Returns only asset dictionary.
Usually the last part of deep dictionary which
is not having any children
"""
for key in input_dict.keys():
# check if child key is available
if input_dict[key].get("childs"):
# loop deeper
return self._get_assets(input_dict[key]["childs"])
else:
# give the dictionary with assets
return input_dict
def _set_assets(self, input_dict, new_assets=None):
""" Modify the hierarchy context dictionary.
It will replace the asset dictionary with only the filtred one.
"""
for key in input_dict.keys():
# check if child key is available
if input_dict[key].get("childs"):
# return if this is just for testing purpose and no
# new_assets property is avalable
if not new_assets:
return True
# test for deeper inner children availabelity
if self._set_assets(input_dict[key]["childs"]):
# if one level deeper is still children available
# then process farther
self._set_assets(input_dict[key]["childs"], new_assets)
else:
# or just assign the filtred asset ditionary
input_dict[key]["childs"] = new_assets
else:
# test didnt find more childs in input dictionary
return None

View file

@ -81,6 +81,11 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
jpeg_items.append("-i {}".format(full_input_path))
# output arguments from presets
jpeg_items.extend(ffmpeg_args.get("output") or [])
# If its a movie file, we just want one frame.
if repre["ext"] == "mov":
jpeg_items.append("-vframes 1")
# output file
jpeg_items.append(full_output_path)

View file

@ -6,6 +6,8 @@ import copy
import clique
import errno
import six
import re
import shutil
from pymongo import DeleteOne, InsertOne
import pyblish.api
@ -680,6 +682,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data.get('subsetGroup')}}
)
# Update families on subset.
families = [instance.data["family"]]
families.extend(instance.data.get("families", []))
io.update_many(
{"type": "subset", "_id": io.ObjectId(subset["_id"])},
{"$set": {"data.families": families}}
)
return subset
def create_version(self, subset, version_number, data=None):
@ -952,21 +962,37 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"""
if integrated_file_sizes:
for file_url, _file_size in integrated_file_sizes.items():
if not os.path.exists(file_url):
self.log.debug(
"File {} was not found.".format(file_url)
)
continue
try:
if mode == 'remove':
self.log.debug("Removing file ...{}".format(file_url))
self.log.debug("Removing file {}".format(file_url))
os.remove(file_url)
if mode == 'finalize':
self.log.debug("Renaming file ...{}".format(file_url))
import re
os.rename(file_url,
re.sub('\.{}$'.format(self.TMP_FILE_EXT),
'',
file_url)
)
new_name = re.sub(
r'\.{}$'.format(self.TMP_FILE_EXT),
'',
file_url
)
except FileNotFoundError:
pass # file not there, nothing to delete
if os.path.exists(new_name):
self.log.debug(
"Overwriting file {} to {}".format(
file_url, new_name
)
)
shutil.copy(file_url, new_name)
else:
self.log.debug(
"Renaming file {} to {}".format(
file_url, new_name
)
)
os.rename(file_url, new_name)
except OSError:
self.log.error("Cannot {} file {}".format(mode, file_url),
exc_info=True)

View file

@ -429,7 +429,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"to render, don't know what to do "
"with them.")
col = rem[0]
_, ext = os.path.splitext(col)
ext = os.path.splitext(col)[1].lstrip(".")
else:
# but we really expect only one collection.
# Nothing else make sense.
@ -729,7 +729,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"resolutionWidth": data.get("resolutionWidth", 1920),
"resolutionHeight": data.get("resolutionHeight", 1080),
"multipartExr": data.get("multipartExr", False),
"jobBatchName": data.get("jobBatchName", "")
"jobBatchName": data.get("jobBatchName", ""),
"review": data.get("review", True)
}
if "prerender" in instance.data["families"]:

View file

@ -0,0 +1,31 @@
import pyblish.api
import os
class ValidateIntent(pyblish.api.ContextPlugin):
"""Validate intent of the publish.
It is required to fill the intent of this publish. Chech the log
for more details
"""
order = pyblish.api.ValidatorOrder
label = "Validate Intent"
# TODO: this should be off by default and only activated viac config
tasks = ["animation"]
hosts = ["harmony"]
if os.environ.get("AVALON_TASK") not in tasks:
active = False
def process(self, context):
msg = (
"Please make sure that you select the intent of this publish."
)
intent = context.data.get("intent")
self.log.debug(intent)
assert intent, msg
intent_value = intent.get("value")
assert intent is not "", msg

View file

@ -10,7 +10,7 @@ class ValidateVersion(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder
label = "Validate Version"
hosts = ["nuke", "maya", "blender"]
hosts = ["nuke", "maya", "blender", "standalonepublisher"]
def process(self, instance):
version = instance.data.get("version")

View file

@ -31,7 +31,7 @@ func
class ImportAudioLoader(api.Loader):
"""Import audio."""
families = ["shot"]
families = ["shot", "audio"]
representations = ["wav"]
label = "Import Audio"

View file

@ -230,7 +230,7 @@ class ImageSequenceLoader(api.Loader):
"""Load images
Stores the imported asset in a container named after the asset.
"""
families = ["shot", "render", "image", "plate"]
families = ["shot", "render", "image", "plate", "reference"]
representations = ["jpeg", "png", "jpg"]
def load(self, context, name=None, namespace=None, data=None):

View file

@ -4,6 +4,7 @@ import subprocess
import pyblish.api
from avalon import harmony
import pype.lib
import clique
@ -43,6 +44,9 @@ class ExtractRender(pyblish.api.InstancePlugin):
frame_start = result[4]
frame_end = result[5]
audio_path = result[6]
if audio_path:
instance.data["audio"] = [{"filename": audio_path}]
instance.data["fps"] = frame_rate
# Set output path to temp folder.
path = tempfile.mkdtemp()
@ -87,17 +91,13 @@ class ExtractRender(pyblish.api.InstancePlugin):
if len(list(col)) > 1:
collection = col
else:
# assert len(collections) == 1, (
# "There should only be one image sequence in {}. Found: {}".format(
# path, len(collections)
# )
# )
collection = collections[0]
# Generate thumbnail.
thumbnail_path = os.path.join(path, "thumbnail.png")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
args = [
"ffmpeg", "-y",
ffmpeg_path, "-y",
"-i", os.path.join(path, list(collections[0])[0]),
"-vf", "scale=300:-1",
"-vframes", "1",
@ -117,57 +117,17 @@ class ExtractRender(pyblish.api.InstancePlugin):
self.log.debug(output.decode("utf-8"))
# Generate mov.
mov_path = os.path.join(path, instance.data["name"] + ".mov")
if os.path.isfile(audio_path):
args = [
"ffmpeg", "-y",
"-i", audio_path,
"-i",
os.path.join(path, collection.head + "%04d" + collection.tail),
mov_path
]
else:
args = [
"ffmpeg", "-y",
"-i",
os.path.join(path, collection.head + "%04d" + collection.tail),
mov_path
]
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE
)
output = process.communicate()[0]
if process.returncode != 0:
raise ValueError(output.decode("utf-8"))
self.log.debug(output.decode("utf-8"))
# Generate representations.
extension = collection.tail[1:]
representation = {
"name": extension,
"ext": extension,
"files": list(collection),
"stagingDir": path
}
movie = {
"name": "mov",
"ext": "mov",
"files": os.path.basename(mov_path),
"stagingDir": path,
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": frame_rate,
"preview": True,
"tags": ["review", "ftrackreview"]
"tags": ["review"],
"fps": frame_rate
}
thumbnail = {
"name": "thumbnail",
"ext": "png",
@ -175,7 +135,7 @@ class ExtractRender(pyblish.api.InstancePlugin):
"stagingDir": path,
"tags": ["thumbnail"]
}
instance.data["representations"] = [representation, movie, thumbnail]
instance.data["representations"] = [representation, thumbnail]
# Required for extract_review plugin (L222 onwards).
instance.data["frameStart"] = frame_start

View file

@ -12,6 +12,7 @@ class CreateReview(avalon.maya.Creator):
icon = "video-camera"
defaults = ['Main']
keepImages = False
isolate = False
def __init__(self, *args, **kwargs):
super(CreateReview, self).__init__(*args, **kwargs)
@ -22,6 +23,7 @@ class CreateReview(avalon.maya.Creator):
for key, value in animation_data.items():
data[key] = value
data["isolate"] = self.isolate
data["keepImages"] = self.keepImages
self.data = data

View file

@ -1,7 +1,7 @@
from maya import cmds, mel
import pymel.core as pc
from avalon import api
from avalon import api, io
from avalon.maya.pipeline import containerise
from avalon.maya import lib
@ -58,6 +58,13 @@ class AudioLoader(api.Loader):
type="string"
)
# Set frame range.
version = io.find_one({"_id": representation["parent"]})
subset = io.find_one({"_id": version["parent"]})
asset = io.find_one({"_id": subset["parent"]})
audio_node.sourceStart.set(1 - asset["data"]["frameStart"])
audio_node.sourceEnd.set(asset["data"]["frameEnd"])
def switch(self, container, representation):
self.update(container, representation)

View file

@ -1,7 +1,7 @@
import pymel.core as pc
import maya.cmds as cmds
from avalon import api
from avalon import api, io
from avalon.maya.pipeline import containerise
from avalon.maya import lib
from Qt import QtWidgets
@ -12,7 +12,7 @@ class ImagePlaneLoader(api.Loader):
families = ["plate", "render"]
label = "Create imagePlane on selected camera."
representations = ["mov", "exr", "preview"]
representations = ["mov", "exr", "preview", "png"]
icon = "image"
color = "orange"
@ -29,6 +29,8 @@ class ImagePlaneLoader(api.Loader):
# Getting camera from selection.
selection = pc.ls(selection=True)
camera = None
if len(selection) > 1:
QtWidgets.QMessageBox.critical(
None,
@ -39,25 +41,29 @@ class ImagePlaneLoader(api.Loader):
return
if len(selection) < 1:
QtWidgets.QMessageBox.critical(
result = QtWidgets.QMessageBox.critical(
None,
"Error!",
"No camera selected.",
QtWidgets.QMessageBox.Ok
"No camera selected. Do you want to create a camera?",
QtWidgets.QMessageBox.Ok,
QtWidgets.QMessageBox.Cancel
)
return
relatives = pc.listRelatives(selection[0], shapes=True)
if not pc.ls(relatives, type="camera"):
QtWidgets.QMessageBox.critical(
None,
"Error!",
"Selected node is not a camera.",
QtWidgets.QMessageBox.Ok
)
return
camera = selection[0]
if result == QtWidgets.QMessageBox.Ok:
camera = pc.createNode("camera")
else:
return
else:
relatives = pc.listRelatives(selection[0], shapes=True)
if pc.ls(relatives, type="camera"):
camera = selection[0]
else:
QtWidgets.QMessageBox.critical(
None,
"Error!",
"Selected node is not a camera.",
QtWidgets.QMessageBox.Ok
)
return
try:
camera.displayResolution.set(1)
@ -81,6 +87,7 @@ class ImagePlaneLoader(api.Loader):
image_plane_shape.frameOffset.set(1 - start_frame)
image_plane_shape.frameIn.set(start_frame)
image_plane_shape.frameOut.set(end_frame)
image_plane_shape.frameCache.set(end_frame)
image_plane_shape.useFrameExtension.set(1)
movie_representations = ["mov", "preview"]
@ -140,6 +147,17 @@ class ImagePlaneLoader(api.Loader):
type="string"
)
# Set frame range.
version = io.find_one({"_id": representation["parent"]})
subset = io.find_one({"_id": version["parent"]})
asset = io.find_one({"_id": subset["parent"]})
start_frame = asset["data"]["frameStart"]
end_frame = asset["data"]["frameEnd"]
image_plane_shape.frameOffset.set(1 - start_frame)
image_plane_shape.frameIn.set(start_frame)
image_plane_shape.frameOut.set(end_frame)
image_plane_shape.frameCache.set(end_frame)
def switch(self, container, representation):
self.update(container, representation)

View file

@ -64,6 +64,7 @@ class CollectReview(pyblish.api.InstancePlugin):
data['handles'] = instance.data.get('handles', None)
data['step'] = instance.data['step']
data['fps'] = instance.data['fps']
data["isolate"] = instance.data["isolate"]
cmds.setAttr(str(instance) + '.active', 1)
self.log.debug('data {}'.format(instance.context[i].data))
instance.context[i].data.update(data)

View file

@ -76,6 +76,11 @@ class ExtractPlayblast(pype.api.Extractor):
pm.currentTime(refreshFrameInt - 1, edit=True)
pm.currentTime(refreshFrameInt, edit=True)
# Isolate view is requested by having objects in the set besides a
# camera.
if instance.data.get("isolate"):
preset["isolate"] = instance.data["setMembers"]
with maintained_time():
filename = preset.get("filename", "%TEMP%")

View file

@ -77,6 +77,11 @@ class ExtractThumbnail(pype.api.Extractor):
pm.currentTime(refreshFrameInt - 1, edit=True)
pm.currentTime(refreshFrameInt, edit=True)
# Isolate view is requested by having objects in the set besides a
# camera.
if instance.data.get("isolate"):
preset["isolate"] = instance.data["setMembers"]
with maintained_time():
filename = preset.get("filename", "%TEMP%")

View file

@ -25,6 +25,7 @@ import re
import hashlib
from datetime import datetime
import itertools
from collections import OrderedDict
import clique
import requests
@ -67,7 +68,7 @@ payload_skeleton = {
def _format_tiles(
filename, index, tiles_x, tiles_y,
width, height, prefix, origin="blc"):
width, height, prefix):
"""Generate tile entries for Deadline tile job.
Returns two dictionaries - one that can be directly used in Deadline
@ -113,12 +114,14 @@ def _format_tiles(
"""
tile = 0
out = {"JobInfo": {}, "PluginInfo": {}}
cfg = {}
cfg = OrderedDict()
w_space = width / tiles_x
h_space = height / tiles_y
cfg["TilesCropped"] = "False"
for tile_x in range(1, tiles_x + 1):
for tile_y in range(1, tiles_y + 1):
for tile_y in reversed(range(1, tiles_y + 1)):
tile_prefix = "_tile_{}x{}_{}x{}_".format(
tile_x, tile_y,
tiles_x,
@ -143,14 +146,13 @@ def _format_tiles(
cfg["Tile{}".format(tile)] = new_filename
cfg["Tile{}Tile".format(tile)] = new_filename
cfg["Tile{}FileName".format(tile)] = new_filename
cfg["Tile{}X".format(tile)] = (tile_x - 1) * w_space
if origin == "blc":
cfg["Tile{}Y".format(tile)] = (tile_y - 1) * h_space
else:
cfg["Tile{}Y".format(tile)] = int(height) - ((tile_y - 1) * h_space) # noqa: E501
cfg["Tile{}Width".format(tile)] = tile_x * w_space
cfg["Tile{}Height".format(tile)] = tile_y * h_space
cfg["Tile{}Y".format(tile)] = int(height) - (tile_y * h_space)
cfg["Tile{}Width".format(tile)] = w_space
cfg["Tile{}Height".format(tile)] = h_space
tile += 1
return out, cfg
@ -538,7 +540,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"AuxFiles": [],
"JobInfo": {
"BatchName": payload["JobInfo"]["BatchName"],
"Frames": 0,
"Frames": 1,
"Name": "{} - Tile Assembly Job".format(
payload["JobInfo"]["Name"]),
"OutputDirectory0":
@ -590,7 +592,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
payload["JobInfo"]["Name"],
frame,
instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501
)
)
self.log.info(
"... preparing job {}".format(
new_payload["JobInfo"]["Name"]))

View file

@ -0,0 +1,233 @@
import re
import nuke
from avalon.vendor import qargparse
from avalon import api, io
from pype.hosts.nuke import presets
class LoadImage(api.Loader):
"""Load still image into Nuke"""
families = [
"render2d", "source", "plate",
"render", "prerender", "review",
"image"
]
representations = ["exr", "dpx", "jpg", "jpeg", "png", "psd"]
label = "Load Image"
order = -10
icon = "image"
color = "white"
options = [
qargparse.Integer(
"frame_number",
label="Frame Number",
default=int(nuke.root()["first_frame"].getValue()),
min=1,
max=999999,
help="What frame is reading from?"
)
]
def load(self, context, name, namespace, options):
from avalon.nuke import (
containerise,
viewer_update_and_undo_stop
)
self.log.info("__ options: `{}`".format(options))
frame_number = options.get("frame_number", 1)
version = context['version']
version_data = version.get("data", {})
repr_id = context["representation"]["_id"]
self.log.info("version_data: {}\n".format(version_data))
self.log.debug(
"Representation id `{}` ".format(repr_id))
last = first = int(frame_number)
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
file = self.fname
if not file:
repr_id = context["representation"]["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
repr_cont = context["representation"]["context"]
frame = repr_cont.get("frame")
if frame:
padding = len(frame)
file = file.replace(
frame,
format(frame_number, "0{}".format(padding)))
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
r = nuke.createNode(
"Read",
"name {}".format(read_name))
r["file"].setValue(file)
# Set colorspace defined in version data
colorspace = context["version"]["data"].get("colorspace")
if colorspace:
r["colorspace"].setValue(str(colorspace))
# load nuke presets for Read's colorspace
read_clrs_presets = presets.get_colorspace_preset().get(
"nuke", {}).get("read", {})
# check if any colorspace presets for read is mathing
preset_clrsp = next((read_clrs_presets[k]
for k in read_clrs_presets
if bool(re.search(k, file))),
None)
if preset_clrsp is not None:
r["colorspace"].setValue(str(preset_clrsp))
r["origfirst"].setValue(first)
r["first"].setValue(first)
r["origlast"].setValue(last)
r["last"].setValue(last)
# add additional metadata from the version to imprint Avalon knob
add_keys = ["source", "colorspace", "author", "fps", "version"]
data_imprint = {
"frameStart": first,
"frameEnd": last
}
for k in add_keys:
if k == 'version':
data_imprint.update({k: context["version"]['name']})
else:
data_imprint.update(
{k: context["version"]['data'].get(k, str(None))})
data_imprint.update({"objectName": read_name})
r["tile_color"].setValue(int("0x4ecd25ff", 16))
return containerise(r,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
from avalon.nuke import (
update_container
)
node = nuke.toNode(container["objectName"])
frame_number = node["first"].value()
assert node.Class() == "Read", "Must be Read"
repr_cont = representation["context"]
file = api.get_representation_path(representation)
if not file:
repr_id = representation["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
frame = repr_cont.get("frame")
if frame:
padding = len(frame)
file = file.replace(
frame,
format(frame_number, "0{}".format(padding)))
# Get start frame from version data
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
version_data = version.get("data", {})
last = first = int(frame_number)
# Set the global in to the start frame of the sequence
node["origfirst"].setValue(first)
node["first"].setValue(first)
node["origlast"].setValue(last)
node["last"].setValue(last)
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"frameStart": str(first),
"frameEnd": str(last),
"version": str(version.get("name")),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
"fps": str(version_data.get("fps")),
"author": version_data.get("author"),
"outputDir": version_data.get("outputDir"),
})
# change color of node
if version.get("name") not in [max_version]:
node["tile_color"].setValue(int("0xd84f20ff", 16))
else:
node["tile_color"].setValue(int("0x4ecd25ff", 16))
# Update the imprinted representation
update_container(
node,
updated_dict
)
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
assert node.Class() == "Read", "Must be Read"
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -120,12 +120,12 @@ class LoadSequence(api.Loader):
if "#" not in file:
frame = repr_cont.get("frame")
padding = len(frame)
file = file.replace(frame, "#"*padding)
file = file.replace(frame, "#" * padding)
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
@ -250,7 +250,7 @@ class LoadSequence(api.Loader):
if "#" not in file:
frame = repr_cont.get("frame")
padding = len(frame)
file = file.replace(frame, "#"*padding)
file = file.replace(frame, "#" * padding)
# Get start frame from version data
version = io.find_one({
@ -276,10 +276,10 @@ class LoadSequence(api.Loader):
last = version_data.get("frameEnd")
if first is None:
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(
node['name'].value(), representation))
self.log.warning(
"Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(node['name'].value(), representation))
first = 0
first -= self.handle_start

View file

@ -15,10 +15,12 @@ class ExtractThumbnail(pype.api.Extractor):
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Thumbnail"
families = ["review", "render.farm"]
families = ["review"]
hosts = ["nuke"]
def process(self, instance):
if "render.farm" in instance.data["families"]:
return
with anlib.maintained_selection():
self.log.debug("instance: {}".format(instance))

View file

@ -273,8 +273,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["clipOut"] -
instance.data["clipIn"])
self.log.debug(
"__ instance.data[parents]: {}".format(
instance.data["parents"]
@ -319,6 +317,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
})
in_info['tasks'] = instance.data['tasks']
in_info["comments"] = instance.data.get("comments", [])
parents = instance.data.get('parents', [])
self.log.debug("__ in_info: {}".format(in_info))

View file

@ -40,11 +40,12 @@ class CollectShots(api.InstancePlugin):
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = (
"{} - {} - tasks:{} - assetbuilds:{}".format(
"{} - {} - tasks: {} - assetbuilds: {} - comments: {}".format(
data["asset"],
data["subset"],
data["tasks"],
[x["name"] for x in data.get("assetbuilds", [])]
[x["name"] for x in data.get("assetbuilds", [])],
len(data.get("comments", []))
)
)

View file

@ -17,7 +17,7 @@ class CollectClipTagComments(api.InstancePlugin):
for tag in instance.data["tags"]:
if tag["name"].lower() == "comment":
instance.data["comments"].append(
tag.metadata().dict()["tag.note"]
tag["metadata"]["tag.note"]
)
# Find tags on the source clip.

View file

@ -1,5 +1,6 @@
from avalon import api, photoshop
from avalon import api
from avalon.vendor import Qt
from avalon import photoshop
class CreateImage(api.Creator):
@ -13,11 +14,12 @@ class CreateImage(api.Creator):
groups = []
layers = []
create_group = False
group_constant = photoshop.get_com_objects().constants().psLayerSet
stub = photoshop.stub()
if (self.options or {}).get("useSelection"):
multiple_instances = False
selection = photoshop.get_selected_layers()
selection = stub.get_selected_layers()
self.log.info("selection {}".format(selection))
if len(selection) > 1:
# Ask user whether to create one image or image per selected
# item.
@ -40,19 +42,18 @@ class CreateImage(api.Creator):
if multiple_instances:
for item in selection:
if item.LayerType == group_constant:
if item.group:
groups.append(item)
else:
layers.append(item)
else:
group = photoshop.group_selected_layers()
group.Name = self.name
group = stub.group_selected_layers(self.name)
groups.append(group)
elif len(selection) == 1:
# One selected item. Use group if its a LayerSet (group), else
# create a new group.
if selection[0].LayerType == group_constant:
if selection[0].group:
groups.append(selection[0])
else:
layers.append(selection[0])
@ -63,16 +64,14 @@ class CreateImage(api.Creator):
create_group = True
if create_group:
group = photoshop.app().ActiveDocument.LayerSets.Add()
group.Name = self.name
group = stub.create_group(self.name)
groups.append(group)
for layer in layers:
photoshop.select_layers([layer])
group = photoshop.group_selected_layers()
group.Name = layer.Name
stub.select_layers([layer])
group = stub.group_selected_layers(layer.name)
groups.append(group)
for group in groups:
self.data.update({"subset": "image" + group.Name})
photoshop.imprint(group, self.data)
self.data.update({"subset": "image" + group.name})
stub.imprint(group, self.data)

View file

@ -1,5 +1,7 @@
from avalon import api, photoshop
stub = photoshop.stub()
class ImageLoader(api.Loader):
"""Load images
@ -12,7 +14,7 @@ class ImageLoader(api.Loader):
def load(self, context, name=None, namespace=None, data=None):
with photoshop.maintained_selection():
layer = photoshop.import_smart_object(self.fname)
layer = stub.import_smart_object(self.fname)
self[:] = [layer]
@ -28,11 +30,11 @@ class ImageLoader(api.Loader):
layer = container.pop("layer")
with photoshop.maintained_selection():
photoshop.replace_smart_object(
stub.replace_smart_object(
layer, api.get_representation_path(representation)
)
photoshop.imprint(
stub.imprint(
layer, {"representation": str(representation["_id"])}
)

View file

@ -1,6 +1,7 @@
import os
import pyblish.api
from avalon import photoshop
@ -13,5 +14,5 @@ class CollectCurrentFile(pyblish.api.ContextPlugin):
def process(self, context):
context.data["currentFile"] = os.path.normpath(
photoshop.app().ActiveDocument.FullName
photoshop.stub().get_active_document_full_name()
).replace("\\", "/")

View file

@ -1,9 +1,9 @@
import pythoncom
from avalon import photoshop
import pyblish.api
from avalon import photoshop
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by LayerSet and file metadata
@ -27,8 +27,11 @@ class CollectInstances(pyblish.api.ContextPlugin):
# can be.
pythoncom.CoInitialize()
for layer in photoshop.get_layers_in_document():
layer_data = photoshop.read(layer)
stub = photoshop.stub()
layers = stub.get_layers()
layers_meta = stub.get_layers_metadata()
for layer in layers:
layer_data = stub.read(layer, layers_meta)
# Skip layers without metadata.
if layer_data is None:
@ -38,18 +41,19 @@ class CollectInstances(pyblish.api.ContextPlugin):
if "container" in layer_data["id"]:
continue
child_layers = [*layer.Layers]
if not child_layers:
self.log.info("%s skipped, it was empty." % layer.Name)
continue
# child_layers = [*layer.Layers]
# self.log.debug("child_layers {}".format(child_layers))
# if not child_layers:
# self.log.info("%s skipped, it was empty." % layer.Name)
# continue
instance = context.create_instance(layer.Name)
instance = context.create_instance(layer.name)
instance.append(layer)
instance.data.update(layer_data)
instance.data["families"] = self.families_mapping[
layer_data["family"]
]
instance.data["publish"] = layer.Visible
instance.data["publish"] = layer.visible
# Produce diagnostic message for any graphical
# user interface interested in visualising it.

View file

@ -21,35 +21,37 @@ class ExtractImage(pype.api.Extractor):
self.log.info("Outputting image to {}".format(staging_dir))
# Perform extraction
stub = photoshop.stub()
files = {}
with photoshop.maintained_selection():
self.log.info("Extracting %s" % str(list(instance)))
with photoshop.maintained_visibility():
# Hide all other layers.
extract_ids = [
x.id for x in photoshop.get_layers_in_layers([instance[0]])
]
for layer in photoshop.get_layers_in_document():
if layer.id not in extract_ids:
layer.Visible = False
extract_ids = set([ll.id for ll in stub.
get_layers_in_layers([instance[0]])])
save_options = {}
for layer in stub.get_layers():
# limit unnecessary calls to client
if layer.visible and layer.id not in extract_ids:
stub.set_visible(layer.id, False)
if not layer.visible and layer.id in extract_ids:
stub.set_visible(layer.id, True)
save_options = []
if "png" in self.formats:
save_options["png"] = photoshop.com_objects.PNGSaveOptions()
save_options.append('png')
if "jpg" in self.formats:
save_options["jpg"] = photoshop.com_objects.JPEGSaveOptions()
save_options.append('jpg')
file_basename = os.path.splitext(
photoshop.app().ActiveDocument.Name
stub.get_active_document_name()
)[0]
for extension, save_option in save_options.items():
for extension in save_options:
_filename = "{}.{}".format(file_basename, extension)
files[extension] = _filename
full_filename = os.path.join(staging_dir, _filename)
photoshop.app().ActiveDocument.SaveAs(
full_filename, save_option, True
)
stub.saveAs(full_filename, extension, True)
representations = []
for extension, filename in files.items():

View file

@ -13,10 +13,11 @@ class ExtractReview(pype.api.Extractor):
families = ["review"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
self.log.info("Outputting image to {}".format(staging_dir))
stub = photoshop.stub()
layers = []
for image_instance in instance.context:
if image_instance.data["family"] != "image":
@ -25,25 +26,22 @@ class ExtractReview(pype.api.Extractor):
# Perform extraction
output_image = "{}.jpg".format(
os.path.splitext(photoshop.app().ActiveDocument.Name)[0]
os.path.splitext(stub.get_active_document_name())[0]
)
output_image_path = os.path.join(staging_dir, output_image)
with photoshop.maintained_visibility():
# Hide all other layers.
extract_ids = [
x.id for x in photoshop.get_layers_in_layers(layers)
]
for layer in photoshop.get_layers_in_document():
if layer.id in extract_ids:
layer.Visible = True
else:
layer.Visible = False
extract_ids = set([ll.id for ll in stub.
get_layers_in_layers(layers)])
self.log.info("extract_ids {}".format(extract_ids))
for layer in stub.get_layers():
# limit unnecessary calls to client
if layer.visible and layer.id not in extract_ids:
stub.set_visible(layer.id, False)
if not layer.visible and layer.id in extract_ids:
stub.set_visible(layer.id, True)
photoshop.app().ActiveDocument.SaveAs(
output_image_path,
photoshop.com_objects.JPEGSaveOptions(),
True
)
stub.saveAs(output_image_path, 'jpg', True)
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
@ -66,8 +64,6 @@ class ExtractReview(pype.api.Extractor):
]
output = pype.lib._subprocess(args)
self.log.debug(output)
instance.data["representations"].append({
"name": "thumbnail",
"ext": "jpg",
@ -75,7 +71,6 @@ class ExtractReview(pype.api.Extractor):
"stagingDir": staging_dir,
"tags": ["thumbnail"]
})
# Generate mov.
mov_path = os.path.join(staging_dir, "review.mov")
args = [
@ -86,9 +81,7 @@ class ExtractReview(pype.api.Extractor):
mov_path
]
output = pype.lib._subprocess(args)
self.log.debug(output)
instance.data["representations"].append({
"name": "mov",
"ext": "mov",

View file

@ -11,4 +11,4 @@ class ExtractSaveScene(pype.api.Extractor):
families = ["workfile"]
def process(self, instance):
photoshop.app().ActiveDocument.Save()
photoshop.stub().save()

View file

@ -1,6 +1,7 @@
import pyblish.api
from pype.action import get_errored_plugins_from_data
from pype.lib import version_up
from avalon import photoshop
@ -24,6 +25,6 @@ class IncrementWorkfile(pyblish.api.InstancePlugin):
)
scene_path = version_up(instance.context.data["currentFile"])
photoshop.app().ActiveDocument.SaveAs(scene_path)
photoshop.stub().saveAs(scene_path, 'psd', True)
self.log.info("Incremented workfile to: {}".format(scene_path))

View file

@ -23,11 +23,12 @@ class ValidateInstanceAssetRepair(pyblish.api.Action):
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
data = photoshop.read(instance[0])
data = stub.read(instance[0])
data["asset"] = os.environ["AVALON_ASSET"]
photoshop.imprint(instance[0], data)
stub.imprint(instance[0], data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin):

View file

@ -21,13 +21,14 @@ class ValidateNamingRepair(pyblish.api.Action):
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
self.log.info("validate_naming instance {}".format(instance))
name = instance.data["name"].replace(" ", "_")
instance[0].Name = name
data = photoshop.read(instance[0])
data = stub.read(instance[0])
data["subset"] = "image" + name
photoshop.imprint(instance[0], data)
stub.imprint(instance[0], data)
return True

View file

@ -0,0 +1,79 @@
from pprint import pformat
from pype.hosts import resolve
from pype.hosts.resolve import lib
class CreateShotClip(resolve.Creator):
"""Publishable clip"""
label = "Shot"
family = "clip"
icon = "film"
defaults = ["Main"]
gui_name = "Pype sequencial rename with hirerarchy"
gui_info = "Define sequencial rename and fill hierarchy data."
gui_inputs = {
"clipName": "{episode}{sequence}{shot}",
"hierarchy": "{folder}/{sequence}/{shot}",
"countFrom": 10,
"steps": 10,
"hierarchyData": {
"folder": "shots",
"shot": "sh####",
"track": "{track}",
"sequence": "sc010",
"episode": "ep01"
}
}
presets = None
def process(self):
# solve gui inputs overwrites from presets
# overwrite gui inputs from presets
for k, v in self.gui_inputs.items():
if isinstance(v, dict):
# nested dictionary (only one level allowed)
for _k, _v in v.items():
if self.presets.get(_k):
self.gui_inputs[k][_k] = self.presets[_k]
if self.presets.get(k):
self.gui_inputs[k] = self.presets[k]
# open widget for plugins inputs
widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs)
widget.exec_()
print(f"__ selected_clips: {self.selected}")
if len(self.selected) < 1:
return
if not widget.result:
print("Operation aborted")
return
# sequence attrs
sq_frame_start = self.sequence.GetStartFrame()
sq_markers = self.sequence.GetMarkers()
print(f"__ sq_frame_start: {pformat(sq_frame_start)}")
print(f"__ seq_markers: {pformat(sq_markers)}")
# create media bin for compound clips (trackItems)
mp_folder = resolve.create_current_sequence_media_bin(self.sequence)
print(f"_ mp_folder: {mp_folder.GetName()}")
lib.rename_add = 0
for i, t_data in enumerate(self.selected):
lib.rename_index = i
# clear color after it is done
t_data["clip"]["item"].ClearClipColor()
# convert track item to timeline media pool item
resolve.create_compound_clip(
t_data,
mp_folder,
rename=True,
**dict(
{"presets": widget.result})
)

View file

@ -0,0 +1,162 @@
import os
from pyblish import api
from pype.hosts import resolve
import json
class CollectClips(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.CollectorOrder + 0.01
label = "Collect Clips"
hosts = ["resolve"]
def process(self, context):
# create asset_names conversion table
if not context.data.get("assetsShared"):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
projectdata = context.data["projectEntity"]["data"]
selection = resolve.get_current_track_items(
filter=True, selecting_color="Pink")
for clip_data in selection:
data = dict()
# get basic objects form data
project = clip_data["project"]
sequence = clip_data["sequence"]
clip = clip_data["clip"]
# sequence attrs
sq_frame_start = sequence.GetStartFrame()
self.log.debug(f"sq_frame_start: {sq_frame_start}")
sq_markers = sequence.GetMarkers()
# get details of objects
clip_item = clip["item"]
track = clip_data["track"]
mp = project.GetMediaPool()
# get clip attributes
clip_metadata = resolve.get_pype_clip_metadata(clip_item)
clip_metadata = json.loads(clip_metadata)
self.log.debug(f"clip_metadata: {clip_metadata}")
compound_source_prop = clip_metadata["sourceProperties"]
self.log.debug(f"compound_source_prop: {compound_source_prop}")
asset_name = clip_item.GetName()
mp_item = clip_item.GetMediaPoolItem()
mp_prop = mp_item.GetClipProperty()
source_first = int(compound_source_prop["Start"])
source_last = int(compound_source_prop["End"])
source_duration = compound_source_prop["Frames"]
fps = float(mp_prop["FPS"])
self.log.debug(f"source_first: {source_first}")
self.log.debug(f"source_last: {source_last}")
self.log.debug(f"source_duration: {source_duration}")
self.log.debug(f"fps: {fps}")
source_path = os.path.normpath(
compound_source_prop["File Path"])
source_name = compound_source_prop["File Name"]
source_id = clip_metadata["sourceId"]
self.log.debug(f"source_path: {source_path}")
self.log.debug(f"source_name: {source_name}")
self.log.debug(f"source_id: {source_id}")
clip_left_offset = int(clip_item.GetLeftOffset())
clip_right_offset = int(clip_item.GetRightOffset())
self.log.debug(f"clip_left_offset: {clip_left_offset}")
self.log.debug(f"clip_right_offset: {clip_right_offset}")
# source in/out
source_in = int(source_first + clip_left_offset)
source_out = int(source_first + clip_right_offset)
self.log.debug(f"source_in: {source_in}")
self.log.debug(f"source_out: {source_out}")
clip_in = int(clip_item.GetStart() - sq_frame_start)
clip_out = int(clip_item.GetEnd() - sq_frame_start)
clip_duration = int(clip_item.GetDuration())
self.log.debug(f"clip_in: {clip_in}")
self.log.debug(f"clip_out: {clip_out}")
self.log.debug(f"clip_duration: {clip_duration}")
is_sequence = False
self.log.debug(
"__ assets_shared: {}".format(
context.data["assetsShared"]))
# Check for clips with the same range
# this is for testing if any vertically neighbouring
# clips has been already processed
clip_matching_with_range = next(
(k for k, v in context.data["assetsShared"].items()
if (v.get("_clipIn", 0) == clip_in)
and (v.get("_clipOut", 0) == clip_out)
), False)
# check if clip name is the same in matched
# vertically neighbouring clip
# if it is then it is correct and resent variable to False
# not to be rised wrong name exception
if asset_name in str(clip_matching_with_range):
clip_matching_with_range = False
# rise wrong name exception if found one
assert (not clip_matching_with_range), (
"matching clip: {asset}"
" timeline range ({clip_in}:{clip_out})"
" conflicting with {clip_matching_with_range}"
" >> rename any of clips to be the same as the other <<"
).format(
**locals())
if ("[" in source_name) and ("]" in source_name):
is_sequence = True
data.update({
"name": "_".join([
track["name"], asset_name, source_name]),
"item": clip_item,
"source": mp_item,
# "timecodeStart": str(source.timecodeStart()),
"timelineStart": sq_frame_start,
"sourcePath": source_path,
"sourceFileHead": source_name,
"isSequence": is_sequence,
"track": track["name"],
"trackIndex": track["index"],
"sourceFirst": source_first,
"sourceIn": source_in,
"sourceOut": source_out,
"mediaDuration": source_duration,
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": clip_duration,
"asset": asset_name,
"subset": "plateMain",
"family": "clip",
"families": [],
"handleStart": projectdata.get("handleStart", 0),
"handleEnd": projectdata.get("handleEnd", 0)})
instance = context.create_instance(**data)
self.log.info("Created instance: {}".format(instance))
self.log.info("Created instance.data: {}".format(instance.data))
context.data["assetsShared"][asset_name] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}
self.log.info(
"context.data[\"assetsShared\"]: {}".format(
context.data["assetsShared"]))

View file

@ -1,17 +0,0 @@
import pyblish.api
from pype.hosts.resolve.utils import get_resolve_module
class CollectProject(pyblish.api.ContextPlugin):
"""Collect Project object"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Project"
hosts = ["resolve"]
def process(self, context):
resolve = get_resolve_module()
PM = resolve.GetProjectManager()
P = PM.GetCurrentProject()
self.log.info(P.GetName())

View file

@ -0,0 +1,29 @@
import os
import pyblish.api
from pype.hosts.resolve.utils import get_resolve_module
class CollectProject(pyblish.api.ContextPlugin):
"""Collect Project object"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Project"
hosts = ["resolve"]
def process(self, context):
exported_projet_ext = ".drp"
current_dir = os.getenv("AVALON_WORKDIR")
resolve = get_resolve_module()
PM = resolve.GetProjectManager()
P = PM.GetCurrentProject()
name = P.GetName()
fname = name + exported_projet_ext
current_file = os.path.join(current_dir, fname)
normalised = os.path.normpath(current_file)
context.data["project"] = P
context.data["currentFile"] = normalised
self.log.info(name)
self.log.debug(normalised)

View file

@ -123,7 +123,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
"label": subset,
"name": subset,
"family": in_data["family"],
"version": in_data.get("version", 1),
# "version": in_data.get("version", 1),
"frameStart": in_data.get("representations", [None])[0].get(
"frameStart", None
),

View file

@ -32,7 +32,7 @@ class CollectEditorial(pyblish.api.InstancePlugin):
actions = []
# presets
extensions = [".mov"]
extensions = [".mov", ".mp4"]
def process(self, instance):
# remove context test attribute

View file

@ -9,7 +9,7 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
"""
label = "Collect Psd Instances"
order = pyblish.api.CollectorOrder + 0.492
order = pyblish.api.CollectorOrder + 0.489
hosts = ["standalonepublisher"]
families = ["background_batch"]
@ -34,8 +34,6 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
context = instance.context
asset_data = instance.data["assetEntity"]
asset_name = instance.data["asset"]
anatomy_data = instance.data["anatomyData"]
for subset_name, subset_data in self.subsets.items():
instance_name = f"{asset_name}_{subset_name}"
task = subset_data.get("task", "background")
@ -55,16 +53,8 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
new_instance.data["label"] = f"{instance_name}"
new_instance.data["subset"] = subset_name
new_instance.data["task"] = task
# fix anatomy data
anatomy_data_new = copy.deepcopy(anatomy_data)
# updating hierarchy data
anatomy_data_new.update({
"asset": asset_data["name"],
"task": task,
"subset": subset_name
})
new_instance.data["anatomyData"] = anatomy_data_new
if subset_name in self.unchecked_by_default:
new_instance.data["publish"] = False