Merge branch 'develop' into feature/PYPE-666_anatomy_context_data

This commit is contained in:
Jakub Jezek 2020-02-14 16:53:35 +01:00
commit 2312dffbd1
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
34 changed files with 650 additions and 722 deletions

View file

@ -9,7 +9,7 @@ from pypeapp import config
import logging
log = logging.getLogger(__name__)
__version__ = "2.3.0"
__version__ = "2.5.0"
PACKAGE_DIR = os.path.dirname(__file__)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")

View file

@ -0,0 +1,313 @@
import os
import requests
import errno
from bson.objectid import ObjectId
from pype.ftrack import BaseAction
from pype.ftrack.lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)
from pypeapp import Anatomy
from pype.ftrack.lib.io_nonsingleton import DbConnector
class StoreThumbnailsToAvalon(BaseAction):
# Action identifier
identifier = "store.thubmnail.to.avalon"
# Action label
label = "Pype Admin"
# Action variant
variant = "- Store Thumbnails to avalon"
# Action description
description = 'Test action'
# roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project Manager"]
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
thumbnail_key = "AVALON_THUMBNAIL_ROOT"
db_con = DbConnector()
def discover(self, session, entities, event):
for entity in entities:
if entity.entity_type.lower() == "assetversion":
return True
return False
def launch(self, session, entities, event):
# DEBUG LINE
# root_path = r"C:\Users\jakub.trllo\Desktop\Tests\ftrack_thumbnails"
thumbnail_roots = os.environ.get(self.thumbnail_key)
if not thumbnail_roots:
return {
"success": False,
"message": "`{}` environment is not set".format(
self.thumbnail_key
)
}
existing_thumbnail_root = None
for path in thumbnail_roots.split(os.pathsep):
if os.path.exists(path):
existing_thumbnail_root = path
break
if existing_thumbnail_root is None:
return {
"success": False,
"message": (
"Can't access paths, set in `{}` ({})"
).format(self.thumbnail_key, thumbnail_roots)
}
project = get_project_from_entity(entities[0])
project_name = project["full_name"]
anatomy = Anatomy(project_name)
if "publish" not in anatomy.templates:
msg = "Anatomy does not have set publish key!"
self.log.warning(msg)
return {
"success": False,
"message": msg
}
if "thumbnail" not in anatomy.templates["publish"]:
msg = (
"There is not set \"thumbnail\""
" template in Antomy for project \"{}\""
).format(project_name)
self.log.warning(msg)
return {
"success": False,
"message": msg
}
example_template_data = {
"_id": "ID",
"thumbnail_root": "THUBMNAIL_ROOT",
"thumbnail_type": "THUMBNAIL_TYPE",
"ext": ".EXT",
"project": {
"name": "PROJECT_NAME",
"code": "PROJECT_CODE"
},
"asset": "ASSET_NAME",
"subset": "SUBSET_NAME",
"version": "VERSION_NAME",
"hierarchy": "HIERARCHY"
}
tmp_filled = anatomy.format_all(example_template_data)
thumbnail_result = tmp_filled["publish"]["thumbnail"]
if not thumbnail_result.solved:
missing_keys = thumbnail_result.missing_keys
invalid_types = thumbnail_result.invalid_types
submsg = ""
if missing_keys:
submsg += "Missing keys: {}".format(", ".join(
["\"{}\"".format(key) for key in missing_keys]
))
if invalid_types:
items = []
for key, value in invalid_types.items():
items.append("{}{}".format(str(key), str(value)))
submsg += "Invalid types: {}".format(", ".join(items))
msg = (
"Thumbnail Anatomy template expects more keys than action"
" can offer. {}"
).format(submsg)
self.log.warning(msg)
return {
"success": False,
"message": msg
}
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
self.db_con.install()
for entity in entities:
# Skip if entity is not AssetVersion (never should happend, but..)
if entity.entity_type.lower() != "assetversion":
continue
# Skip if AssetVersion don't have thumbnail
thumbnail_ent = entity["thumbnail"]
if thumbnail_ent is None:
self.log.debug((
"Skipping. AssetVersion don't "
"have set thumbnail. {}"
).format(entity["id"]))
continue
avalon_ents_result = get_avalon_entities_for_assetversion(
entity, self.db_con
)
version_full_path = (
"Asset: \"{project_name}/{asset_path}\""
" | Subset: \"{subset_name}\""
" | Version: \"{version_name}\""
).format(**avalon_ents_result)
version = avalon_ents_result["version"]
if not version:
self.log.warning((
"AssetVersion does not have version in avalon. {}"
).format(version_full_path))
continue
thumbnail_id = version["data"].get("thumbnail_id")
if thumbnail_id:
self.log.info((
"AssetVersion skipped, already has thubmanil set. {}"
).format(version_full_path))
continue
# Get thumbnail extension
file_ext = thumbnail_ent["file_type"]
if not file_ext.startswith("."):
file_ext = ".{}".format(file_ext)
avalon_project = avalon_ents_result["project"]
avalon_asset = avalon_ents_result["asset"]
hierarchy = ""
parents = avalon_asset["data"].get("parents") or []
if parents:
hierarchy = "/".join(parents)
# Prepare anatomy template fill data
# 1. Create new id for thumbnail entity
thumbnail_id = ObjectId()
template_data = {
"_id": str(thumbnail_id),
"thumbnail_root": existing_thumbnail_root,
"thumbnail_type": "thumbnail",
"ext": file_ext,
"project": {
"name": avalon_project["name"],
"code": avalon_project["data"].get("code")
},
"asset": avalon_ents_result["asset_name"],
"subset": avalon_ents_result["subset_name"],
"version": avalon_ents_result["version_name"],
"hierarchy": hierarchy
}
anatomy_filled = anatomy.format(template_data)
thumbnail_path = anatomy_filled["publish"]["thumbnail"]
thumbnail_path = thumbnail_path.replace("..", ".")
thumbnail_path = os.path.normpath(thumbnail_path)
downloaded = False
for loc in (thumbnail_ent.get("component_locations") or []):
res_id = loc.get("resource_identifier")
if not res_id:
continue
thubmnail_url = self.get_thumbnail_url(res_id)
if self.download_file(thubmnail_url, thumbnail_path):
downloaded = True
break
if not downloaded:
self.log.warning(
"Could not download thumbnail for {}".format(
version_full_path
)
)
continue
# Clean template data from keys that are dynamic
template_data.pop("_id")
template_data.pop("thumbnail_root")
thumbnail_entity = {
"_id": thumbnail_id,
"type": "thumbnail",
"schema": "pype:thumbnail-1.0",
"data": {
"template": thumbnail_template,
"template_data": template_data
}
}
# Create thumbnail entity
self.db_con.insert_one(thumbnail_entity)
self.log.debug(
"Creating entity in database {}".format(str(thumbnail_entity))
)
# Set thumbnail id for version
self.db_con.update_one(
{"_id": version["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
self.db_con.update_one(
{"_id": avalon_asset["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
return True
def get_thumbnail_url(self, resource_identifier, size=None):
# TODO use ftrack_api method rather (find way how to use it)
url_string = (
u'{url}/component/thumbnail?id={id}&username={username}'
u'&apiKey={apiKey}'
)
url = url_string.format(
url=self.session.server_url,
id=resource_identifier,
username=self.session.api_user,
apiKey=self.session.api_key
)
if size:
url += u'&size={0}'.format(size)
return url
def download_file(self, source_url, dst_file_path):
dir_path = os.path.dirname(dst_file_path)
try:
os.makedirs(dir_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
self.log.warning(
"Could not create folder: \"{}\"".format(dir_path)
)
return False
self.log.debug(
"Downloading file \"{}\" -> \"{}\"".format(
source_url, dst_file_path
)
)
file_open = open(dst_file_path, "wb")
try:
file_open.write(requests.get(source_url).content)
except Exception:
self.log.warning(
"Download of image `{}` failed.".format(source_url)
)
return False
finally:
file_open.close()
return True
def register(session, plugins_presets={}):
StoreThumbnailsToAvalon(session, plugins_presets).register()

View file

@ -1643,7 +1643,7 @@ class SyncToAvalonEvent(BaseEvent):
new_name, "task", schema_patterns=self.regex_schemas
)
if not passed_regex:
self.regex_failed.append(ent_infos["entityId"])
self.regex_failed.append(ent_info["entityId"])
continue
if new_name not in self.task_changes_by_avalon_id[mongo_id]:

View file

@ -4,3 +4,8 @@ from .ftrack_app_handler import *
from .ftrack_event_handler import *
from .ftrack_action_handler import *
from .ftrack_base_handler import *
from .lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)

135
pype/ftrack/lib/lib.py Normal file
View file

@ -0,0 +1,135 @@
from bson.objectid import ObjectId
from .avalon_sync import CustAttrIdKey
import avalon.io
def get_project_from_entity(entity):
# TODO add more entities
ent_type_lowered = entity.entity_type.lower()
if ent_type_lowered == "project":
return entity
elif ent_type_lowered == "assetversion":
return entity["asset"]["parent"]["project"]
elif "project" in entity:
return entity["project"]
return None
def get_avalon_entities_for_assetversion(asset_version, db_con=None):
output = {
"success": True,
"message": None,
"project": None,
"project_name": None,
"asset": None,
"asset_name": None,
"asset_path": None,
"subset": None,
"subset_name": None,
"version": None,
"version_name": None,
"representations": None
}
if db_con is None:
db_con = avalon.io
db_con.install()
ft_asset = asset_version["asset"]
subset_name = ft_asset["name"]
version = asset_version["version"]
parent = ft_asset["parent"]
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
project = get_project_from_entity(asset_version)
project_name = project["full_name"]
output["project_name"] = project_name
output["asset_name"] = parent["name"]
output["asset_path"] = ent_path
output["subset_name"] = subset_name
output["version_name"] = version
db_con.Session["AVALON_PROJECT"] = project_name
avalon_project = db_con.find_one({"type": "project"})
output["project"] = avalon_project
if not avalon_project:
output["success"] = False
output["message"] = "Project not synchronized to avalon `{}`".format(
project_name
)
return output
asset_ent = None
asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if asset_mongo_id:
try:
asset_mongo_id = ObjectId(asset_mongo_id)
asset_ent = db_con.find_one({
"type": "asset",
"_id": asset_mongo_id
})
except Exception:
pass
if not asset_ent:
asset_ent = db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
output["asset"] = asset_ent
if not asset_ent:
output["success"] = False
output["message"] = "Not synchronized entity to avalon `{}`".format(
ent_path
)
return output
asset_mongo_id = asset_ent["_id"]
subset_ent = db_con.find_one({
"type": "subset",
"parent": asset_mongo_id,
"name": subset_name
})
output["subset"] = subset_ent
if not subset_ent:
output["success"] = False
output["message"] = (
"Subset `{}` does not exist under Asset `{}`"
).format(subset_name, ent_path)
return output
version_ent = db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
output["version"] = version_ent
if not version_ent:
output["success"] = False
output["message"] = (
"Version `{}` does not exist under Subset `{}` | Asset `{}`"
).format(version, subset_name, ent_path)
return output
repre_ents = list(db_con.find({
"type": "representation",
"parent": version_ent["_id"]
}))
output["representations"] = repre_ents
return output

View file

@ -33,42 +33,6 @@ if os.getenv("PYBLISH_GUI", None):
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
# class NukeHandler(logging.Handler):
# '''
# Nuke Handler - emits logs into nuke's script editor.
# warning will emit nuke.warning()
# critical and fatal would popup msg dialog to alert of the error.
# '''
#
# def __init__(self):
# logging.Handler.__init__(self)
# self.set_name("Pype_Nuke_Handler")
#
# def emit(self, record):
# # Formated message:
# msg = self.format(record)
#
# if record.levelname.lower() in [
# # "warning",
# "critical",
# "fatal",
# "error"
# ]:
# msg = self.format(record)
# nuke.message(msg)
#
#
# '''Adding Nuke Logging Handler'''
# log.info([handler.get_name() for handler in logging.root.handlers[:]])
# nuke_handler = NukeHandler()
# if nuke_handler.get_name() \
# not in [handler.get_name()
# for handler in logging.root.handlers[:]]:
# logging.getLogger().addHandler(nuke_handler)
# logging.getLogger().setLevel(logging.INFO)
# log.info([handler.get_name() for handler in logging.root.handlers[:]])
def reload_config():
"""Attempt to reload pipeline at run-time.

View file

@ -196,7 +196,7 @@ def format_anatomy(data):
"root": api.Session["AVALON_PROJECTS"],
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
"task": api.Session["AVALON_TASK"].lower(),
"task": api.Session["AVALON_TASK"],
"family": data["avalon"]["family"],
"project": {"name": project_document["name"],
"code": project_document["data"].get("code", '')},
@ -374,7 +374,7 @@ def create_write_node(name, data, input=None, prenodes=None):
now_node.setInput(0, prev_node)
# imprinting group node
avalon.nuke.imprint(GN, data["avalon"], tab="Pype")
avalon.nuke.imprint(GN, data["avalon"])
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@ -645,15 +645,105 @@ class WorkfileSettings(object):
write_dict (dict): nuke write node as dictionary
'''
# TODO: complete this function so any write node in
# scene will have fixed colorspace following presets for the project
if not isinstance(write_dict, dict):
msg = "set_root_colorspace(): argument should be dictionary"
nuke.message(msg)
log.error(msg)
return
log.debug("__ set_writes_colorspace(): {}".format(write_dict))
from avalon.nuke import get_avalon_knob_data
for node in nuke.allNodes():
if node.Class() in ["Viewer", "Dot"]:
continue
# get data from avalon knob
avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"])
if not avalon_knob_data:
continue
if avalon_knob_data["id"] != "pyblish.avalon.instance":
continue
# establish families
families = [avalon_knob_data["family"]]
if avalon_knob_data.get("families"):
families.append(avalon_knob_data.get("families"))
# except disabled nodes but exclude backdrops in test
for fmly, knob in write_dict.items():
write = None
if (fmly in families):
# Add all nodes in group instances.
if node.Class() == "Group":
node.begin()
for x in nuke.allNodes():
if x.Class() == "Write":
write = x
node.end()
elif node.Class() == "Write":
write = node
else:
log.warning("Wrong write node Class")
write["colorspace"].setValue(str(knob["colorspace"]))
log.info(
"Setting `{0}` to `{1}`".format(
write.name(),
knob["colorspace"]))
def set_reads_colorspace(self, reads):
""" Setting colorspace to Read nodes
Looping trought all read nodes and tries to set colorspace based on regex rules in presets
"""
changes = dict()
for n in nuke.allNodes():
file = nuke.filename(n)
if not n.Class() == "Read":
continue
# load nuke presets for Read's colorspace
read_clrs_presets = get_colorspace_preset().get(
"nuke", {}).get("read", {})
# check if any colorspace presets for read is mathing
preset_clrsp = next((read_clrs_presets[k]
for k in read_clrs_presets
if bool(re.search(k, file))),
None)
log.debug(preset_clrsp)
if preset_clrsp is not None:
current = n["colorspace"].value()
future = str(preset_clrsp)
if current != future:
changes.update({
n.name(): {
"from": current,
"to": future
}
})
log.debug(changes)
if changes:
msg = "Read nodes are not set to correct colospace:\n\n"
for nname, knobs in changes.items():
msg += str(" - node: '{0}' is now '{1}' "
"but should be '{2}'\n").format(
nname, knobs["from"], knobs["to"]
)
msg += "\nWould you like to change it?"
if nuke.ask(msg):
for nname, knobs in changes.items():
n = nuke.toNode(nname)
n["colorspace"].setValue(knobs["to"])
log.info(
"Setting `{0}` to `{1}`".format(
nname,
knobs["to"]))
def set_colorspace(self):
''' Setting colorpace following presets
@ -671,6 +761,7 @@ class WorkfileSettings(object):
msg = "set_colorspace(): missing `viewer` settings in template"
nuke.message(msg)
log.error(msg)
try:
self.set_writes_colorspace(nuke_colorspace["write"])
except AttributeError:
@ -678,6 +769,10 @@ class WorkfileSettings(object):
nuke.message(msg)
log.error(msg)
reads = nuke_colorspace.get("read")
if reads:
self.set_reads_colorspace(reads)
try:
for key in nuke_colorspace:
log.debug("Preset's colorspace key: {}".format(key))
@ -975,7 +1070,7 @@ class BuildWorkfile(WorkfileSettings):
"project": {"name": self._project["name"],
"code": self._project["data"].get("code", '')},
"asset": self._asset or os.environ["AVALON_ASSET"],
"task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(),
"task": kwargs.get("task") or api.Session["AVALON_TASK"],
"hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
"version": kwargs.get("version", {}).get("name", 1),
"user": getpass.getuser(),

View file

@ -51,7 +51,7 @@ if os.getenv("PYBLISH_GUI", None):
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
def install(config):
def install():
"""
Installing Nukestudio integration for avalon

View file

@ -1,24 +0,0 @@
import pyblish.api
class WriteToRender(pyblish.api.InstancePlugin):
"""Swith Render knob on write instance to on,
so next time publish will be set to render
"""
order = pyblish.api.ExtractorOrder + 0.1
label = "Write to render next"
optional = True
hosts = ["nuke", "nukeassist"]
families = ["write"]
def process(self, instance):
return
if [f for f in instance.data["families"]
if ".frames" in f]:
instance[0]["render"].setValue(True)
self.log.info("Swith write node render to `on`")
else:
# swith to
instance[0]["render"].setValue(False)
self.log.info("Swith write node render to `Off`")

View file

@ -1,14 +0,0 @@
import pyblish.api
import nuke
class CollectActiveViewer(pyblish.api.ContextPlugin):
"""Collect any active viewer from nodes
"""
order = pyblish.api.CollectorOrder + 0.3
label = "Collect Active Viewer"
hosts = ["nuke"]
def process(self, context):
context.data["ActiveViewer"] = nuke.activeViewer()

View file

@ -1,22 +0,0 @@
import pyblish
class ExtractFramesToIntegrate(pyblish.api.InstancePlugin):
"""Extract rendered frames for integrator
"""
order = pyblish.api.ExtractorOrder
label = "Extract rendered frames"
hosts = ["nuke"]
families = ["render"]
def process(self, instance\
return
# staging_dir = instance.data.get('stagingDir', None)
# output_dir = instance.data.get('outputDir', None)
#
# if not staging_dir:
# staging_dir = output_dir
# instance.data['stagingDir'] = staging_dir
# # instance.data['transfer'] = False

View file

@ -1,116 +0,0 @@
import os
import nuke
import pyblish.api
class Extract(pyblish.api.InstancePlugin):
"""Super class for write and writegeo extractors."""
order = pyblish.api.ExtractorOrder
optional = True
label = "Extract Nuke [super]"
hosts = ["nuke"]
match = pyblish.api.Subset
# targets = ["process.local"]
def execute(self, instance):
# Get frame range
node = instance[0]
first_frame = nuke.root()["first_frame"].value()
last_frame = nuke.root()["last_frame"].value()
if node["use_limit"].value():
first_frame = node["first"].value()
last_frame = node["last"].value()
# Render frames
nuke.execute(node.name(), int(first_frame), int(last_frame))
class ExtractNukeWrite(Extract):
""" Extract output from write nodes. """
families = ["write", "local"]
label = "Extract Write"
def process(self, instance):
self.execute(instance)
# Validate output
for filename in list(instance.data["collection"]):
if not os.path.exists(filename):
instance.data["collection"].remove(filename)
self.log.warning("\"{0}\" didn't render.".format(filename))
class ExtractNukeCache(Extract):
label = "Cache"
families = ["cache", "local"]
def process(self, instance):
self.execute(instance)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg
class ExtractNukeCamera(Extract):
label = "Camera"
families = ["camera", "local"]
def process(self, instance):
node = instance[0]
node["writeGeometries"].setValue(False)
node["writePointClouds"].setValue(False)
node["writeAxes"].setValue(False)
file_path = node["file"].getValue()
node["file"].setValue(instance.data["output_path"])
self.execute(instance)
node["writeGeometries"].setValue(True)
node["writePointClouds"].setValue(True)
node["writeAxes"].setValue(True)
node["file"].setValue(file_path)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg
class ExtractNukeGeometry(Extract):
label = "Geometry"
families = ["geometry", "local"]
def process(self, instance):
node = instance[0]
node["writeCameras"].setValue(False)
node["writePointClouds"].setValue(False)
node["writeAxes"].setValue(False)
file_path = node["file"].getValue()
node["file"].setValue(instance.data["output_path"])
self.execute(instance)
node["writeCameras"].setValue(True)
node["writePointClouds"].setValue(True)
node["writeAxes"].setValue(True)
node["file"].setValue(file_path)
# Validate output
msg = "\"{0}\" didn't render.".format(instance.data["output_path"])
assert os.path.exists(instance.data["output_path"]), msg

View file

@ -1,40 +0,0 @@
import pyblish.api
import os
import pype
import shutil
class ExtractScript(pype.api.Extractor):
"""Publish script
"""
label = 'Extract Script'
order = pyblish.api.ExtractorOrder - 0.05
optional = True
hosts = ['nuke']
families = ["workfile"]
def process(self, instance):
self.log.debug("instance extracting: {}".format(instance.data))
current_script = instance.context.data["currentFile"]
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = "{0}".format(instance.data["name"])
path = os.path.join(stagingdir, filename)
self.log.info("Performing extraction..")
shutil.copy(current_script, path)
if "representations" not in instance.data:
instance.data["representations"] = list()
representation = {
'name': 'nk',
'ext': '.nk',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -1,27 +0,0 @@
import pyblish.api
import shutil
import os
class CopyStagingDir(pyblish.api.InstancePlugin):
"""Copy data rendered into temp local directory
"""
order = pyblish.api.IntegratorOrder - 2
label = "Copy data from temp dir"
hosts = ["nuke", "nukeassist"]
families = ["render.local"]
def process(self, instance):
temp_dir = instance.data.get("stagingDir")
output_dir = instance.data.get("outputDir")
# copy data to correct dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.log.info("output dir has been created")
for f in os.listdir(temp_dir):
self.log.info("copy file to correct destination: {}".format(f))
shutil.copy(os.path.join(temp_dir, os.path.basename(f)),
os.path.join(output_dir, os.path.basename(f)))

View file

@ -1,98 +0,0 @@
import re
import os
import json
import subprocess
import pyblish.api
from pype.action import get_errored_plugins_from_data
def _get_script():
"""Get path to the image sequence script"""
# todo: use a more elegant way to get the python script
try:
from pype.fusion.scripts import publish_filesequence
except Exception:
raise RuntimeError("Expected module 'publish_imagesequence'"
"to be available")
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
return module_path
class PublishImageSequence(pyblish.api.InstancePlugin):
"""Publish the generated local image sequences."""
order = pyblish.api.IntegratorOrder
label = "Publish Rendered Image Sequence(s)"
hosts = ["fusion"]
families = ["saver.renderlocal"]
def process(self, instance):
# Skip this plug-in if the ExtractImageSequence failed
errored_plugins = get_errored_plugins_from_data(instance.context)
if any(plugin.__name__ == "FusionRenderLocal" for plugin in
errored_plugins):
raise RuntimeError("Fusion local render failed, "
"publishing images skipped.")
subset = instance.data["subset"]
ext = instance.data["ext"]
# Regex to match resulting renders
regex = "^{subset}.*[0-9]+{ext}+$".format(subset=re.escape(subset),
ext=re.escape(ext))
# The instance has most of the information already stored
metadata = {
"regex": regex,
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"families": ["imagesequence"],
}
# Write metadata and store the path in the instance
output_directory = instance.data["outputDir"]
path = os.path.join(output_directory,
"{}_metadata.json".format(subset))
with open(path, "w") as f:
json.dump(metadata, f)
assert os.path.isfile(path), ("Stored path is not a file for %s"
% instance.data["name"])
# Suppress any subprocess console
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
process = subprocess.Popen(["python", _get_script(),
"--paths", path],
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
while True:
output = process.stdout.readline()
# Break when there is no output or a return code has been given
if output == '' and process.poll() is not None:
process.stdout.close()
break
if output:
line = output.strip()
if line.startswith("ERROR"):
self.log.error(line)
else:
self.log.info(line)
if process.returncode != 0:
raise RuntimeError("Process quit with non-zero "
"return code: {}".format(process.returncode))

View file

@ -1,24 +0,0 @@
import pyblish.api
import nuke
class ValidateActiveViewer(pyblish.api.ContextPlugin):
"""Validate presentse of the active viewer from nodes
"""
order = pyblish.api.ValidatorOrder
label = "Validate Active Viewer"
hosts = ["nuke"]
def process(self, context):
viewer_process_node = context.data.get("ViewerProcess")
assert viewer_process_node, (
"Missing active viewer process! Please click on output write node and push key number 1-9"
)
active_viewer = context.data["ActiveViewer"]
active_input = active_viewer.activeInput()
assert active_input is not None, (
"Missing active viewer input! Please click on output write node and push key number 1-9"
)

View file

@ -1,36 +0,0 @@
import os
import pyblish.api
import pype.utils
@pyblish.api.log
class RepairNukeWriteNodeVersionAction(pyblish.api.Action):
label = "Repair"
on = "failed"
icon = "wrench"
def process(self, context, plugin):
import pype.nuke.lib as nukelib
instances = pype.utils.filter_instances(context, plugin)
for instance in instances:
node = instance[0]
render_path = nukelib.get_render_path(node)
self.log.info("render_path: {}".format(render_path))
node['file'].setValue(render_path.replace("\\", "/"))
class ValidateVersionMatch(pyblish.api.InstancePlugin):
"""Checks if write version matches workfile version"""
label = "Validate Version Match"
order = pyblish.api.ValidatorOrder
actions = [RepairNukeWriteNodeVersionAction]
hosts = ["nuke"]
families = ['write']
def process(self, instance):
assert instance.data['version'] == instance.context.data['version'], "\
Version in write doesn't match version of the workfile"

View file

@ -1,59 +0,0 @@
import pyblish.api
import pype.api
import pype.nuke.actions
class RepairWriteFamiliesAction(pyblish.api.Action):
label = "Fix Write's render attributes"
on = "failed"
icon = "wrench"
def process(self, instance, plugin):
self.log.info("instance {}".format(instance))
instance["render"].setValue(True)
self.log.info("Rendering toggled ON")
@pyblish.api.log
class ValidateWriteFamilies(pyblish.api.InstancePlugin):
""" Validates write families. """
order = pyblish.api.ValidatorOrder
label = "Valitade writes families"
hosts = ["nuke"]
families = ["write"]
actions = [pype.nuke.actions.SelectInvalidAction, pype.api.RepairAction]
@staticmethod
def get_invalid(self, instance):
if not [f for f in instance.data["families"]
if ".frames" in f]:
return
if not instance.data.get('files'):
return (instance)
def process(self, instance):
self.log.debug('instance.data["files"]: {}'.format(instance.data['files']))
invalid = self.get_invalid(self, instance)
if invalid:
raise ValueError(str("`{}`: Switch `Render` on! "
"> {}".format(__name__, invalid)))
# if any(".frames" in f for f in instance.data["families"]):
# if not instance.data["files"]:
# raise ValueError("instance {} is set to publish frames\
# but no files were collected, render the frames first or\
# check 'render' checkbox onthe no to 'ON'".format(instance)))
#
#
# self.log.info("Checked correct writes families")
@classmethod
def repair(cls, instance):
cls.log.info("instance {}".format(instance))
instance[0]["render"].setValue(True)
cls.log.info("Rendering toggled ON")

View file

@ -2,6 +2,7 @@ from avalon.nuke.pipeline import Creator
from avalon.nuke import lib as anlib
import nuke
class CreateBackdrop(Creator):
"""Add Publishable Backdrop"""
@ -35,8 +36,8 @@ class CreateBackdrop(Creator):
return instance
else:
msg = "Please select nodes you "
"wish to add to a container"
msg = str("Please select nodes you "
"wish to add to a container")
self.log.error(msg)
nuke.message(msg)
return

View file

@ -240,77 +240,6 @@ class LoadBackdropNodes(api.Loader):
return update_container(GN, data_imprint)
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
if not (len(nodes) < 2):
msg = "Please create Viewer node before you "
"run this action again"
self.log.error(msg)
nuke.message(msg)
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
pnlib.create_backdrop(label="Input Process", layer=2,
nodes=[viewer, group_node], color="0x7c7faaff")
return True
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)

View file

@ -4,9 +4,7 @@ import contextlib
from avalon import api, io
from pype.nuke import presets
from pype.api import Logger
log = Logger().get_logger(__name__, "nuke")
from pypeapp import config
@contextlib.contextmanager
@ -34,14 +32,14 @@ def preserve_trim(node):
if start_at_frame:
node['frame_mode'].setValue("start at")
node['frame'].setValue(str(script_start))
log.info("start frame of Read was set to"
"{}".format(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
if offset_frame:
node['frame_mode'].setValue("offset")
node['frame'].setValue(str((script_start + offset_frame)))
log.info("start frame of Read was set to"
"{}".format(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
def loader_shift(node, frame, relative=True):
@ -70,11 +68,37 @@ def loader_shift(node, frame, relative=True):
return int(script_start)
def add_review_presets_config():
returning = {
"families": list(),
"representations": list()
}
review_presets = config.get_presets()["plugins"]["global"]["publish"].get(
"ExtractReview", {})
outputs = review_presets.get("outputs", {})
#
for output, properities in outputs.items():
returning["representations"].append(output)
returning["families"] += properities.get("families", [])
return returning
class LoadMov(api.Loader):
"""Load mov file into Nuke"""
presets = add_review_presets_config()
families = [
"source",
"plate",
"render",
"review"] + presets["families"]
families = ["write", "source", "plate", "render", "review"]
representations = ["wipmov", "h264", "mov", "preview", "review", "mp4"]
representations = [
"mov",
"preview",
"review",
"mp4"] + presets["representations"]
label = "Load mov"
order = -10
@ -115,7 +139,7 @@ class LoadMov(api.Loader):
if not file:
repr_id = context["representation"]["_id"]
log.warning(
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
@ -211,7 +235,7 @@ class LoadMov(api.Loader):
if not file:
repr_id = representation["_id"]
log.warning(
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
@ -246,9 +270,10 @@ class LoadMov(api.Loader):
colorspace = version_data.get("colorspace")
if first is None:
log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(node['name'].value(), representation))
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(
node['name'].value(), representation))
first = 0
# fix handle start and end if none are available
@ -264,7 +289,7 @@ class LoadMov(api.Loader):
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file)
log.info("__ node['file']: {}".format(node["file"].value()))
self.log.info("__ node['file']: {}".format(node["file"].value()))
# Set the global in to the start frame of the sequence
loader_shift(node, first, relative=True)
@ -290,7 +315,6 @@ class LoadMov(api.Loader):
if preset_clrsp is not None:
node["colorspace"].setValue(str(preset_clrsp))
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
@ -316,7 +340,7 @@ class LoadMov(api.Loader):
update_container(
node, updated_dict
)
log.info("udated to version: {}".format(version.get("name")))
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):

View file

@ -5,10 +5,6 @@ import contextlib
from avalon import api, io
from pype.nuke import presets
from pype.api import Logger
log = Logger().get_logger(__name__, "nuke")
@contextlib.contextmanager
def preserve_trim(node):
@ -35,14 +31,14 @@ def preserve_trim(node):
if start_at_frame:
node['frame_mode'].setValue("start at")
node['frame'].setValue(str(script_start))
log.info("start frame of Read was set to"
"{}".format(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
if offset_frame:
node['frame_mode'].setValue("offset")
node['frame'].setValue(str((script_start + offset_frame)))
log.info("start frame of Read was set to"
"{}".format(script_start))
print("start frame of Read was set to"
"{}".format(script_start))
def loader_shift(node, frame, relative=True):
@ -74,7 +70,7 @@ def loader_shift(node, frame, relative=True):
class LoadSequence(api.Loader):
"""Load image sequence into Nuke"""
families = ["write", "source", "plate", "render"]
families = ["render2d", "source", "plate", "render"]
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
label = "Load sequence"
@ -91,7 +87,7 @@ class LoadSequence(api.Loader):
version = context['version']
version_data = version.get("data", {})
log.info("version_data: {}\n".format(version_data))
self.log.info("version_data: {}\n".format(version_data))
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
@ -111,7 +107,7 @@ class LoadSequence(api.Loader):
if not file:
repr_id = context["representation"]["_id"]
log.warning(
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
@ -242,7 +238,7 @@ class LoadSequence(api.Loader):
if not file:
repr_id = representation["_id"]
log.warning(
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
@ -277,9 +273,10 @@ class LoadSequence(api.Loader):
last = version_data.get("frameEnd")
if first is None:
log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(node['name'].value(), representation))
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(
node['name'].value(), representation))
first = 0
first -= self.handle_start
@ -288,7 +285,7 @@ class LoadSequence(api.Loader):
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file)
log.info("__ node['file']: {}".format(node["file"].value()))
self.log.info("__ node['file']: {}".format(node["file"].value()))
# Set the global in to the start frame of the sequence
loader_shift(node, first, relative=True)
@ -328,7 +325,7 @@ class LoadSequence(api.Loader):
node,
updated_dict
)
log.info("udated to version: {}".format(version.get("name")))
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):

View file

@ -58,7 +58,11 @@ class CollectBackdrops(pyblish.api.InstancePlugin):
last_frame = int(nuke.root()["last_frame"].getValue())
# get version
version = pype.get_version_from_path(nuke.root().name())
version = instance.context.data.get('version')
if not version:
raise RuntimeError("Script name has no version in the name.")
instance.data['version'] = version
# Add version data to instance

View file

@ -15,12 +15,6 @@ class ValidateScript(pyblish.api.InstancePlugin):
def process(self, instance):
ctx_data = instance.context.data
asset_name = ctx_data["asset"]
# asset = io.find_one({
# "type": "asset",
# "name": asset_name
# })
asset = lib.get_asset(asset_name)
asset_data = asset["data"]

View file

@ -105,7 +105,6 @@ class CollectClips(api.ContextPlugin):
"asset": asset,
"family": "clip",
"families": [],
"handles": 0,
"handleStart": projectdata.get("handleStart", 0),
"handleEnd": projectdata.get("handleEnd", 0),
"version": int(version)})

View file

@ -11,7 +11,9 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
def process(self, instance):
self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset")))
self.log.debug(
"Finding soft effect for subset: `{}`".format(
instance.data.get("subset")))
# taking active sequence
subset = instance.data.get("subset")
@ -41,8 +43,12 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
instance.data["families"] += ["lut"]
self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys()))
self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {})))
self.log.debug(
"effects.keys: {}".format(
instance.data.get("effectTrackItems", {}).keys()))
self.log.debug(
"effects: {}".format(
instance.data.get("effectTrackItems", {})))
def add_effect(self, instance, track_index, item):
track = item.parentTrack().name()

View file

@ -24,7 +24,6 @@ class CollectClipHandles(api.ContextPlugin):
continue
# get handles
handles = int(instance.data["handles"])
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
@ -38,19 +37,16 @@ class CollectClipHandles(api.ContextPlugin):
self.log.debug("Adding to shared assets: `{}`".format(
instance.data["name"]))
asset_shared.update({
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end
})
for instance in filtered_instances:
if not instance.data.get("main") and not instance.data.get("handleTag"):
self.log.debug("Synchronize handles on: `{}`".format(
instance.data["name"]))
name = instance.data["asset"]
s_asset_data = assets_shared.get(name)
instance.data["handles"] = s_asset_data.get("handles", 0)
instance.data["handleStart"] = s_asset_data.get(
"handleStart", 0
)

View file

@ -263,7 +263,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# get custom attributes of the shot
if instance.data.get("main"):
in_info['custom_attributes'] = {
'handles': int(instance.data.get('handles', 0)),
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": instance.data["frameStart"],

View file

@ -134,7 +134,6 @@ class CollectPlatesData(api.InstancePlugin):
# add to data of representation
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": [f for f in families if 'ftrack' not in f],
@ -156,8 +155,9 @@ class CollectPlatesData(api.InstancePlugin):
ext=ext
)
start_frame = source_first_frame + instance.data["sourceInH"]
duration = instance.data["sourceOutH"] - instance.data["sourceInH"]
start_frame = int(source_first_frame + instance.data["sourceInH"])
duration = int(
instance.data["sourceOutH"] - instance.data["sourceInH"])
end_frame = start_frame + duration
self.log.debug("start_frame: `{}`".format(start_frame))
self.log.debug("end_frame: `{}`".format(end_frame))

View file

@ -125,7 +125,7 @@ class CollectReviews(api.InstancePlugin):
thumb_path,
format='png'
)
self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"]))
self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
@ -145,7 +145,10 @@ class CollectReviews(api.InstancePlugin):
item = instance.data["item"]
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track", "version"
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
"track", "version"
]
version_data = dict()
@ -154,7 +157,6 @@ class CollectReviews(api.InstancePlugin):
# add to data of representation
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"families": instance.data["families"],
"subset": instance.data["subset"],

View file

@ -38,7 +38,9 @@ class CollectClipTagHandles(api.ContextPlugin):
# gets arguments if there are any
t_args = t_metadata.get("tag.args", "")
assert t_args, self.log.error("Tag with Handles is missing Args. Use only handle start/end")
assert t_args, self.log.error(
"Tag with Handles is missing Args. "
"Use only handle start/end")
t_args = json.loads(t_args.replace("'", "\""))
# add in start
@ -55,8 +57,8 @@ class CollectClipTagHandles(api.ContextPlugin):
# adding handles to asset_shared on context
if instance.data.get("handleEnd"):
assets_shared_a["handleEnd"] = instance.data["handleEnd"]
assets_shared_a[
"handleEnd"] = instance.data["handleEnd"]
if instance.data.get("handleStart"):
assets_shared_a["handleStart"] = instance.data["handleStart"]
if instance.data.get("handles"):
assets_shared_a["handles"] = instance.data["handles"]
assets_shared_a[
"handleStart"] = instance.data["handleStart"]

View file

@ -19,9 +19,12 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
item = instance.data["item"]
effects = instance.data.get("effectTrackItems")
instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]]
instance.data["families"] = [f for f in instance.data.get(
"families", []) if f not in ["lut"]]
self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"]))
self.log.debug(
"__ instance.data[families]: `{}`".format(
instance.data["families"]))
# skip any without effects
if not effects:
@ -106,7 +109,6 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
# add to data of representation
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": ["plate", "lut"],
@ -136,7 +138,7 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
def copy_linked_files(self, effect, dst_dir):
for k, v in effect["node"].items():
if k in "file" and v is not '':
if k in "file" and v != '':
base_name = os.path.basename(v)
dst = os.path.join(dst_dir, base_name).replace("\\", "/")

View file

@ -1,79 +0,0 @@
import pyblish
from avalon import io
from pype.action import get_errored_instances_from_context
import pype.api as pype
@pyblish.api.log
class RepairNukestudioVersionUp(pyblish.api.Action):
label = "Version Up Workfile"
on = "failed"
icon = "wrench"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
if instances:
project = context.data["activeProject"]
path = context.data.get("currentFile")
new_path = pype.version_up(path)
if project:
project.saveAs(new_path)
self.log.info("Project workfile version was fixed")
class ValidateVersion(pyblish.api.InstancePlugin):
"""Validate clip's versions.
"""
order = pyblish.api.ValidatorOrder
families = ["plate"]
label = "Validate Version"
actions = [RepairNukestudioVersionUp]
hosts = ["nukestudio"]
def process(self, instance):
version = int(instance.data.get("version", 0))
asset_name = instance.data.get("asset", None)
subset_name = instance.data.get("subset", None)
assert version, "The file is missing version string! example: filename_v001.hrox `{}`"
self.log.debug("Collected version: `{0}`".format(version))
found_v = 0
try:
io.install()
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": subset_name
})
version_db = io.find_one({
'type': 'version',
'parent': subset["_id"],
'name': version
}) or {}
found_v = version_db.get("name", 0)
self.log.debug("Found version: `{0}`".format(found_v))
except Exception as e:
self.log.debug("Problem to get data from database for asset `{0}` subset `{1}`. Error: `{2}`".format(asset_name, subset_name, e))
assert (found_v != version), "Version must not be the same as in database `{0}`, Versions file: `{1}`, db: `{2}`".format(asset_name, version, found_v)