Merge branch 'release/2.6.0'

This commit is contained in:
Milan Kolar 2020-03-10 20:41:18 +01:00
commit b42efa4717
109 changed files with 6050 additions and 4416 deletions

View file

@ -9,8 +9,9 @@ from pypeapp import config
import logging
log = logging.getLogger(__name__)
__version__ = "2.5.0"
__version__ = "2.6.0"
PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS")
PACKAGE_DIR = os.path.dirname(__file__)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
@ -72,6 +73,18 @@ def install():
pyblish.register_discovery_filter(filter_pyblish_plugins)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
# Register project specific plugins
project_name = os.environ.get("AVALON_PROJECT")
if PROJECT_PLUGINS_PATH and project_name:
for path in PROJECT_PLUGINS_PATH.split(os.pathsep):
if not path:
continue
plugin_path = os.path.join(path, project_name, "plugins")
if os.path.exists(plugin_path):
pyblish.register_plugin_path(plugin_path)
avalon.register_plugin_path(avalon.Loader, plugin_path)
avalon.register_plugin_path(avalon.Creator, plugin_path)
# apply monkey patched discover to original one
avalon.discover = patched_discover

View file

@ -0,0 +1,534 @@
import os
import collections
import uuid
import clique
from pymongo import UpdateOne
from pype.ftrack import BaseAction
from pype.ftrack.lib.io_nonsingleton import DbConnector
import avalon.pipeline
class DeleteOldVersions(BaseAction):
identifier = "delete.old.versions"
label = "Pype Admin"
variant = "- Delete old versions"
description = (
"Delete files from older publishes so project can be"
" archived with only lates versions."
)
role_list = ["Pypeclub", "Project Manager", "Administrator"]
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
dbcon = DbConnector()
inteface_title = "Choose your preferences"
splitter_item = {"type": "label", "value": "---"}
sequence_splitter = "__sequence_splitter__"
def discover(self, session, entities, event):
''' Validation '''
selection = event["data"].get("selection") or []
for entity in selection:
entity_type = (entity.get("entityType") or "").lower()
if entity_type == "assetversion":
return True
return False
def interface(self, session, entities, event):
items = []
root = os.environ.get("AVALON_PROJECTS")
if not root:
msg = "Root path to projects is not set."
items.append({
"type": "label",
"value": "<i><b>ERROR:</b> {}</i>".format(msg)
})
self.show_interface(
items=items, title=self.inteface_title, event=event
)
return {
"success": False,
"message": msg
}
if not os.path.exists(root):
msg = "Root path does not exists \"{}\".".format(str(root))
items.append({
"type": "label",
"value": "<i><b>ERROR:</b> {}</i>".format(msg)
})
self.show_interface(
items=items, title=self.inteface_title, event=event
)
return {
"success": False,
"message": msg
}
values = event["data"].get("values")
if values:
versions_count = int(values["last_versions_count"])
if versions_count >= 1:
return
items.append({
"type": "label",
"value": (
"# You have to keep at least 1 version!"
)
})
items.append({
"type": "label",
"value": (
"<i><b>WARNING:</b> This will remove published files of older"
" versions from disk so we don't recommend use"
" this action on \"live\" project.</i>"
)
})
items.append(self.splitter_item)
# How many versions to keep
items.append({
"type": "label",
"value": "## Choose how many versions you want to keep:"
})
items.append({
"type": "label",
"value": (
"<i><b>NOTE:</b> We do recommend to keep 2 versions.</i>"
)
})
items.append({
"type": "number",
"name": "last_versions_count",
"label": "Versions",
"value": 2
})
items.append(self.splitter_item)
items.append({
"type": "label",
"value": (
"## Remove publish folder even if there"
" are other than published files:"
)
})
items.append({
"type": "label",
"value": (
"<i><b>WARNING:</b> This may remove more than you want.</i>"
)
})
items.append({
"type": "boolean",
"name": "force_delete_publish_folder",
"label": "Are You sure?",
"value": False
})
return {
"items": items,
"title": self.inteface_title
}
def launch(self, session, entities, event):
values = event["data"].get("values")
if not values:
return
versions_count = int(values["last_versions_count"])
force_to_remove = values["force_delete_publish_folder"]
_val1 = "OFF"
if force_to_remove:
_val1 = "ON"
_val3 = "s"
if versions_count == 1:
_val3 = ""
self.log.debug((
"Process started. Force to delete publish folder is set to [{0}]"
" and will keep {1} latest version{2}."
).format(_val1, versions_count, _val3))
self.dbcon.install()
project = None
avalon_asset_names = []
asset_versions_by_parent_id = collections.defaultdict(list)
subset_names_by_asset_name = collections.defaultdict(list)
ftrack_assets_by_name = {}
for entity in entities:
ftrack_asset = entity["asset"]
parent_ent = ftrack_asset["parent"]
parent_ftrack_id = parent_ent["id"]
parent_name = parent_ent["name"]
if parent_name not in avalon_asset_names:
avalon_asset_names.append(parent_name)
# Group asset versions by parent entity
asset_versions_by_parent_id[parent_ftrack_id].append(entity)
# Get project
if project is None:
project = parent_ent["project"]
# Collect subset names per asset
subset_name = ftrack_asset["name"]
subset_names_by_asset_name[parent_name].append(subset_name)
if subset_name not in ftrack_assets_by_name:
ftrack_assets_by_name[subset_name] = ftrack_asset
# Set Mongo collection
project_name = project["full_name"]
self.dbcon.Session["AVALON_PROJECT"] = project_name
self.log.debug("Project is set to {}".format(project_name))
# Get Assets from avalon database
assets = list(self.dbcon.find({
"type": "asset",
"name": {"$in": avalon_asset_names}
}))
asset_id_to_name_map = {
asset["_id"]: asset["name"] for asset in assets
}
asset_ids = list(asset_id_to_name_map.keys())
self.log.debug("Collected assets ({})".format(len(asset_ids)))
# Get Subsets
subsets = list(self.dbcon.find({
"type": "subset",
"parent": {"$in": asset_ids}
}))
subsets_by_id = {}
subset_ids = []
for subset in subsets:
asset_id = subset["parent"]
asset_name = asset_id_to_name_map[asset_id]
available_subsets = subset_names_by_asset_name[asset_name]
if subset["name"] not in available_subsets:
continue
subset_ids.append(subset["_id"])
subsets_by_id[subset["_id"]] = subset
self.log.debug("Collected subsets ({})".format(len(subset_ids)))
# Get Versions
versions = list(self.dbcon.find({
"type": "version",
"parent": {"$in": subset_ids}
}))
versions_by_parent = collections.defaultdict(list)
for ent in versions:
versions_by_parent[ent["parent"]].append(ent)
def sort_func(ent):
return int(ent["name"])
all_last_versions = []
for parent_id, _versions in versions_by_parent.items():
for idx, version in enumerate(
sorted(_versions, key=sort_func, reverse=True)
):
if idx >= versions_count:
break
all_last_versions.append(version)
self.log.debug("Collected versions ({})".format(len(versions)))
# Filter latest versions
for version in all_last_versions:
versions.remove(version)
# Update versions_by_parent without filtered versions
versions_by_parent = collections.defaultdict(list)
for ent in versions:
versions_by_parent[ent["parent"]].append(ent)
# Filter already deleted versions
versions_to_pop = []
for version in versions:
version_tags = version["data"].get("tags")
if version_tags and "deleted" in version_tags:
versions_to_pop.append(version)
for version in versions_to_pop:
subset = subsets_by_id[version["parent"]]
asset_id = subset["parent"]
asset_name = asset_id_to_name_map[asset_id]
msg = "Asset: \"{}\" | Subset: \"{}\" | Version: \"{}\"".format(
asset_name, subset["name"], version["name"]
)
self.log.warning((
"Skipping version. Already tagged as `deleted`. < {} >"
).format(msg))
versions.remove(version)
version_ids = [ent["_id"] for ent in versions]
self.log.debug(
"Filtered versions to delete ({})".format(len(version_ids))
)
if not version_ids:
msg = "Skipping processing. Nothing to delete."
self.log.debug(msg)
return {
"success": True,
"message": msg
}
repres = list(self.dbcon.find({
"type": "representation",
"parent": {"$in": version_ids}
}))
self.log.debug(
"Collected representations to remove ({})".format(len(repres))
)
dir_paths = {}
file_paths_by_dir = collections.defaultdict(list)
for repre in repres:
file_path, seq_path = self.path_from_represenation(repre)
if file_path is None:
self.log.warning((
"Could not format path for represenation \"{}\""
).format(str(repre)))
continue
dir_path = os.path.dirname(file_path)
dir_id = None
for _dir_id, _dir_path in dir_paths.items():
if _dir_path == dir_path:
dir_id = _dir_id
break
if dir_id is None:
dir_id = uuid.uuid4()
dir_paths[dir_id] = dir_path
file_paths_by_dir[dir_id].append([file_path, seq_path])
dir_ids_to_pop = []
for dir_id, dir_path in dir_paths.items():
if os.path.exists(dir_path):
continue
dir_ids_to_pop.append(dir_id)
# Pop dirs from both dictionaries
for dir_id in dir_ids_to_pop:
dir_paths.pop(dir_id)
paths = file_paths_by_dir.pop(dir_id)
# TODO report of missing directories?
paths_msg = ", ".join([
"'{}'".format(path[0].replace("\\", "/")) for path in paths
])
self.log.warning((
"Folder does not exist. Deleting it's files skipped: {}"
).format(paths_msg))
if force_to_remove:
self.delete_whole_dir_paths(dir_paths.values())
else:
self.delete_only_repre_files(dir_paths, file_paths_by_dir)
mongo_changes_bulk = []
for version in versions:
orig_version_tags = version["data"].get("tags") or []
version_tags = [tag for tag in orig_version_tags]
if "deleted" not in version_tags:
version_tags.append("deleted")
if version_tags == orig_version_tags:
continue
update_query = {"_id": version["_id"]}
update_data = {"$set": {"data.tags": version_tags}}
mongo_changes_bulk.append(UpdateOne(update_query, update_data))
if mongo_changes_bulk:
self.dbcon.bulk_write(mongo_changes_bulk)
self.dbcon.uninstall()
# Set attribute `is_published` to `False` on ftrack AssetVersions
for subset_id, _versions in versions_by_parent.items():
subset_name = None
for subset in subsets:
if subset["_id"] == subset_id:
subset_name = subset["name"]
break
if subset_name is None:
self.log.warning(
"Subset with ID `{}` was not found.".format(str(subset_id))
)
continue
ftrack_asset = ftrack_assets_by_name.get(subset_name)
if not ftrack_asset:
self.log.warning((
"Could not find Ftrack asset with name `{}`"
).format(subset_name))
continue
version_numbers = [int(ver["name"]) for ver in _versions]
for version in ftrack_asset["versions"]:
if int(version["version"]) in version_numbers:
version["is_published"] = False
try:
session.commit()
except Exception:
msg = (
"Could not set `is_published` attribute to `False`"
" for selected AssetVersions."
)
self.log.warning(msg, exc_info=True)
return {
"success": False,
"message": msg
}
return True
def delete_whole_dir_paths(self, dir_paths):
for dir_path in dir_paths:
# Delete all files and fodlers in dir path
for root, dirs, files in os.walk(dir_path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
# Delete even the folder and it's parents folders if they are empty
while True:
if not os.path.exists(dir_path):
dir_path = os.path.dirname(dir_path)
continue
if len(os.listdir(dir_path)) != 0:
break
os.rmdir(os.path.join(dir_path))
def delete_only_repre_files(self, dir_paths, file_paths):
for dir_id, dir_path in dir_paths.items():
dir_files = os.listdir(dir_path)
collections, remainders = clique.assemble(dir_files)
for file_path, seq_path in file_paths[dir_id]:
file_path_base = os.path.split(file_path)[1]
# Just remove file if `frame` key was not in context or
# filled path is in remainders (single file sequence)
if not seq_path or file_path_base in remainders:
if not os.path.exists(file_path):
self.log.warning(
"File was not found: {}".format(file_path)
)
continue
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
remainders.remove(file_path_base)
continue
seq_path_base = os.path.split(seq_path)[1]
head, tail = seq_path_base.split(self.sequence_splitter)
final_col = None
for collection in collections:
if head != collection.head or tail != collection.tail:
continue
final_col = collection
break
if final_col is not None:
# Fill full path to head
final_col.head = os.path.join(dir_path, final_col.head)
for _file_path in final_col:
if os.path.exists(_file_path):
os.remove(_file_path)
_seq_path = final_col.format("{head}{padding}{tail}")
self.log.debug("Removed files: {}".format(_seq_path))
collections.remove(final_col)
elif os.path.exists(file_path):
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
else:
self.log.warning(
"File was not found: {}".format(file_path)
)
# Delete as much as possible parent folders
for dir_path in dir_paths.values():
while True:
if not os.path.exists(dir_path):
dir_path = os.path.dirname(dir_path)
continue
if len(os.listdir(dir_path)) != 0:
break
self.log.debug("Removed folder: {}".format(dir_path))
os.rmdir(dir_path)
def path_from_represenation(self, representation):
try:
template = representation["data"]["template"]
except KeyError:
return (None, None)
root = os.environ["AVALON_PROJECTS"]
if not root:
return (None, None)
sequence_path = None
try:
context = representation["context"]
context["root"] = root
path = avalon.pipeline.format_template_with_optional_keys(
context, template
)
if "frame" in context:
context["frame"] = self.sequence_splitter
sequence_path = os.path.normpath(
avalon.pipeline.format_template_with_optional_keys(
context, template
)
)
except KeyError:
# Template references unavailable data
return (None, None)
return (os.path.normpath(path), sequence_path)
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
DeleteOldVersions(session, plugins_presets).register()

View file

@ -0,0 +1,350 @@
import os
import requests
import errno
import json
from bson.objectid import ObjectId
from pype.ftrack import BaseAction
from pype.ftrack.lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)
from pypeapp import Anatomy
from pype.ftrack.lib.io_nonsingleton import DbConnector
class StoreThumbnailsToAvalon(BaseAction):
# Action identifier
identifier = "store.thubmnail.to.avalon"
# Action label
label = "Pype Admin"
# Action variant
variant = "- Store Thumbnails to avalon"
# Action description
description = 'Test action'
# roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project Manager"]
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
thumbnail_key = "AVALON_THUMBNAIL_ROOT"
db_con = DbConnector()
def discover(self, session, entities, event):
for entity in entities:
if entity.entity_type.lower() == "assetversion":
return True
return False
def launch(self, session, entities, event):
# DEBUG LINE
# root_path = r"C:\Users\jakub.trllo\Desktop\Tests\ftrack_thumbnails"
user = session.query(
"User where username is '{0}'".format(session.api_user)
).one()
action_job = session.create("Job", {
"user": user,
"status": "running",
"data": json.dumps({
"description": "Storing thumbnails to avalon."
})
})
session.commit()
thumbnail_roots = os.environ.get(self.thumbnail_key)
if not thumbnail_roots:
msg = "`{}` environment is not set".format(self.thumbnail_key)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
existing_thumbnail_root = None
for path in thumbnail_roots.split(os.pathsep):
if os.path.exists(path):
existing_thumbnail_root = path
break
if existing_thumbnail_root is None:
msg = (
"Can't access paths, set in `{}` ({})"
).format(self.thumbnail_key, thumbnail_roots)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
project = get_project_from_entity(entities[0])
project_name = project["full_name"]
anatomy = Anatomy(project_name)
if "publish" not in anatomy.templates:
msg = "Anatomy does not have set publish key!"
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
if "thumbnail" not in anatomy.templates["publish"]:
msg = (
"There is not set \"thumbnail\""
" template in Antomy for project \"{}\""
).format(project_name)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
example_template_data = {
"_id": "ID",
"thumbnail_root": "THUBMNAIL_ROOT",
"thumbnail_type": "THUMBNAIL_TYPE",
"ext": ".EXT",
"project": {
"name": "PROJECT_NAME",
"code": "PROJECT_CODE"
},
"asset": "ASSET_NAME",
"subset": "SUBSET_NAME",
"version": "VERSION_NAME",
"hierarchy": "HIERARCHY"
}
tmp_filled = anatomy.format_all(example_template_data)
thumbnail_result = tmp_filled["publish"]["thumbnail"]
if not thumbnail_result.solved:
missing_keys = thumbnail_result.missing_keys
invalid_types = thumbnail_result.invalid_types
submsg = ""
if missing_keys:
submsg += "Missing keys: {}".format(", ".join(
["\"{}\"".format(key) for key in missing_keys]
))
if invalid_types:
items = []
for key, value in invalid_types.items():
items.append("{}{}".format(str(key), str(value)))
submsg += "Invalid types: {}".format(", ".join(items))
msg = (
"Thumbnail Anatomy template expects more keys than action"
" can offer. {}"
).format(submsg)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
self.db_con.install()
for entity in entities:
# Skip if entity is not AssetVersion (never should happend, but..)
if entity.entity_type.lower() != "assetversion":
continue
# Skip if AssetVersion don't have thumbnail
thumbnail_ent = entity["thumbnail"]
if thumbnail_ent is None:
self.log.debug((
"Skipping. AssetVersion don't "
"have set thumbnail. {}"
).format(entity["id"]))
continue
avalon_ents_result = get_avalon_entities_for_assetversion(
entity, self.db_con
)
version_full_path = (
"Asset: \"{project_name}/{asset_path}\""
" | Subset: \"{subset_name}\""
" | Version: \"{version_name}\""
).format(**avalon_ents_result)
version = avalon_ents_result["version"]
if not version:
self.log.warning((
"AssetVersion does not have version in avalon. {}"
).format(version_full_path))
continue
thumbnail_id = version["data"].get("thumbnail_id")
if thumbnail_id:
self.log.info((
"AssetVersion skipped, already has thubmanil set. {}"
).format(version_full_path))
continue
# Get thumbnail extension
file_ext = thumbnail_ent["file_type"]
if not file_ext.startswith("."):
file_ext = ".{}".format(file_ext)
avalon_project = avalon_ents_result["project"]
avalon_asset = avalon_ents_result["asset"]
hierarchy = ""
parents = avalon_asset["data"].get("parents") or []
if parents:
hierarchy = "/".join(parents)
# Prepare anatomy template fill data
# 1. Create new id for thumbnail entity
thumbnail_id = ObjectId()
template_data = {
"_id": str(thumbnail_id),
"thumbnail_root": existing_thumbnail_root,
"thumbnail_type": "thumbnail",
"ext": file_ext,
"project": {
"name": avalon_project["name"],
"code": avalon_project["data"].get("code")
},
"asset": avalon_ents_result["asset_name"],
"subset": avalon_ents_result["subset_name"],
"version": avalon_ents_result["version_name"],
"hierarchy": hierarchy
}
anatomy_filled = anatomy.format(template_data)
thumbnail_path = anatomy_filled["publish"]["thumbnail"]
thumbnail_path = thumbnail_path.replace("..", ".")
thumbnail_path = os.path.normpath(thumbnail_path)
downloaded = False
for loc in (thumbnail_ent.get("component_locations") or []):
res_id = loc.get("resource_identifier")
if not res_id:
continue
thubmnail_url = self.get_thumbnail_url(res_id)
if self.download_file(thubmnail_url, thumbnail_path):
downloaded = True
break
if not downloaded:
self.log.warning(
"Could not download thumbnail for {}".format(
version_full_path
)
)
continue
# Clean template data from keys that are dynamic
template_data.pop("_id")
template_data.pop("thumbnail_root")
thumbnail_entity = {
"_id": thumbnail_id,
"type": "thumbnail",
"schema": "pype:thumbnail-1.0",
"data": {
"template": thumbnail_template,
"template_data": template_data
}
}
# Create thumbnail entity
self.db_con.insert_one(thumbnail_entity)
self.log.debug(
"Creating entity in database {}".format(str(thumbnail_entity))
)
# Set thumbnail id for version
self.db_con.update_one(
{"_id": version["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
self.db_con.update_one(
{"_id": avalon_asset["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
action_job["status"] = "done"
session.commit()
return True
def get_thumbnail_url(self, resource_identifier, size=None):
# TODO use ftrack_api method rather (find way how to use it)
url_string = (
u'{url}/component/thumbnail?id={id}&username={username}'
u'&apiKey={apiKey}'
)
url = url_string.format(
url=self.session.server_url,
id=resource_identifier,
username=self.session.api_user,
apiKey=self.session.api_key
)
if size:
url += u'&size={0}'.format(size)
return url
def download_file(self, source_url, dst_file_path):
dir_path = os.path.dirname(dst_file_path)
try:
os.makedirs(dir_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
self.log.warning(
"Could not create folder: \"{}\"".format(dir_path)
)
return False
self.log.debug(
"Downloading file \"{}\" -> \"{}\"".format(
source_url, dst_file_path
)
)
file_open = open(dst_file_path, "wb")
try:
file_open.write(requests.get(source_url).content)
except Exception:
self.log.warning(
"Download of image `{}` failed.".format(source_url)
)
return False
finally:
file_open.close()
return True
def register(session, plugins_presets={}):
StoreThumbnailsToAvalon(session, plugins_presets).register()

View file

@ -0,0 +1,188 @@
from pype.ftrack import BaseEvent
class FirstVersionStatus(BaseEvent):
# WARNING Priority MUST be higher
# than handler in `event_version_to_task_statuses.py`
priority = 200
keys_enum = ["task", "task_type"]
# This should be set with presets
task_status_map = []
# EXAMPLE of `task_status_map`
__example_status_map__ = [{
# `key` specify where to look for name (is enumerator of `keys_enum`)
# By default is set to "task"
"key": "task",
# speicification of name
"name": "compositing",
# Status to set to the asset version
"status": "Blocking"
}]
def register(self, *args, **kwargs):
result = super(FirstVersionStatus, self).register(*args, **kwargs)
valid_task_status_map = []
for item in self.task_status_map:
key = (item.get("key") or "task").lower()
name = (item.get("name") or "").lower()
status = (item.get("status") or "").lower()
if not (key and name and status):
self.log.warning((
"Invalid item in Task -> Status mapping. {}"
).format(str(item)))
continue
if key not in self.keys_enum:
expected_msg = ""
last_key_idx = len(self.keys_enum) - 1
for idx, key in enumerate(self.keys_enum):
if idx == 0:
joining_part = "`{}`"
elif idx == last_key_idx:
joining_part = "or `{}`"
else:
joining_part = ", `{}`"
expected_msg += joining_part.format(key)
self.log.warning((
"Invalid key `{}`. Expected: {}."
).format(key, expected_msg))
continue
valid_task_status_map.append({
"key": key,
"name": name,
"status": status
})
self.task_status_map = valid_task_status_map
if not self.task_status_map:
self.log.warning((
"Event handler `{}` don't have set presets."
).format(self.__class__.__name__))
return result
def launch(self, session, event):
"""Set task's status for first created Asset Version."""
if not self.task_status_map:
return
entities_info = self.filter_event_ents(event)
if not entities_info:
return
entity_ids = []
for entity_info in entities_info:
entity_ids.append(entity_info["entityId"])
joined_entity_ids = ",".join(
["\"{}\"".format(entity_id) for entity_id in entity_ids]
)
asset_versions = session.query(
"AssetVersion where id in ({})".format(joined_entity_ids)
).all()
asset_version_statuses = None
project_schema = None
for asset_version in asset_versions:
task_entity = asset_version["task"]
found_item = None
for item in self.task_status_map:
if (
item["key"] == "task" and
task_entity["name"].lower() != item["name"]
):
continue
elif (
item["key"] == "task_type" and
task_entity["type"]["name"].lower() != item["name"]
):
continue
found_item = item
break
if not found_item:
continue
if project_schema is None:
project_schema = task_entity["project"]["project_schema"]
# Get all available statuses for Task
if asset_version_statuses is None:
statuses = project_schema.get_statuses("AssetVersion")
# map lowered status name with it's object
asset_version_statuses = {
status["name"].lower(): status for status in statuses
}
ent_path = "/".join(
[ent["name"] for ent in task_entity["link"]] +
[
str(asset_version["asset"]["name"]),
str(asset_version["version"])
]
)
new_status = asset_version_statuses.get(found_item["status"])
if not new_status:
self.log.warning(
"AssetVersion doesn't have status `{}`."
).format(found_item["status"])
continue
try:
asset_version["status"] = new_status
session.commit()
self.log.debug("[ {} ] Status updated to [ {} ]".format(
ent_path, new_status['name']
))
except Exception:
session.rollback()
self.log.warning(
"[ {} ] Status couldn't be set.".format(ent_path),
exc_info=True
)
def filter_event_ents(self, event):
filtered_ents = []
for entity in event["data"].get("entities", []):
# Care only about add actions
if entity["action"] != "add":
continue
# Filter AssetVersions
if entity["entityType"] != "assetversion":
continue
entity_changes = entity.get("changes") or {}
# Check if version of Asset Version is `1`
version_num = entity_changes.get("version", {}).get("new")
if version_num != 1:
continue
# Skip in Asset Version don't have task
task_id = entity_changes.get("taskid", {}).get("new")
if not task_id:
continue
filtered_ents.append(entity)
return filtered_ents
def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
FirstVersionStatus(session, plugins_presets).register()

View file

@ -3,6 +3,7 @@ import collections
import copy
import queue
import time
import datetime
import atexit
import traceback
@ -47,9 +48,39 @@ class SyncToAvalonEvent(BaseEvent):
def __init__(self, session, plugins_presets={}):
'''Expects a ftrack_api.Session instance'''
# Debug settings
# - time expiration in seconds
self.debug_print_time_expiration = 5 * 60
# - store current time
self.debug_print_time = datetime.datetime.now()
# - store synchronize entity types to be able to use
# only entityTypes in interest instead of filtering by ignored
self.debug_sync_types = collections.defaultdict(list)
# Set processing session to not use global
self.set_process_session(session)
super().__init__(session, plugins_presets)
def debug_logs(self):
"""This is debug method for printing small debugs messages. """
now_datetime = datetime.datetime.now()
delta = now_datetime - self.debug_print_time
if delta.total_seconds() < self.debug_print_time_expiration:
return
self.debug_print_time = now_datetime
known_types_items = []
for entityType, entity_type in self.debug_sync_types.items():
ent_types_msg = ", ".join(entity_type)
known_types_items.append(
"<{}> ({})".format(entityType, ent_types_msg)
)
known_entityTypes = ", ".join(known_types_items)
self.log.debug(
"DEBUG MESSAGE: Known types {}".format(known_entityTypes)
)
@property
def cur_project(self):
if self._cur_project is None:
@ -482,6 +513,9 @@ class SyncToAvalonEvent(BaseEvent):
if not entity_type or entity_type in self.ignore_ent_types:
continue
if entity_type not in self.debug_sync_types[entityType]:
self.debug_sync_types[entityType].append(entity_type)
action = ent_info["action"]
ftrack_id = ent_info["entityId"]
if isinstance(ftrack_id, list):
@ -571,8 +605,7 @@ class SyncToAvalonEvent(BaseEvent):
if auto_sync is not True:
return True
debug_msg = ""
debug_msg += "Updated: {}".format(len(updated))
debug_msg = "Updated: {}".format(len(updated))
debug_action_map = {
"add": "Created",
"remove": "Removed",
@ -632,6 +665,8 @@ class SyncToAvalonEvent(BaseEvent):
self.ftrack_added = entities_by_action["add"]
self.ftrack_updated = updated
self.debug_logs()
self.log.debug("Synchronization begins")
try:
time_1 = time.time()
@ -1569,7 +1604,7 @@ class SyncToAvalonEvent(BaseEvent):
try:
# Commit changes of mongo_id to empty string
self.process_session.commit()
self.log.debug("Commititng unsetting")
self.log.debug("Committing unsetting")
except Exception:
self.process_session.rollback()
# TODO logging

View file

@ -4,9 +4,13 @@ import signal
import datetime
import subprocess
import socket
import json
import platform
import argparse
import getpass
import atexit
import time
import uuid
import ftrack_api
from pype.ftrack.lib import credentials
@ -63,10 +67,19 @@ def validate_credentials(url, user, api):
)
session.close()
except Exception as e:
print(
'ERROR: Can\'t log into Ftrack with used credentials:'
' Ftrack server: "{}" // Username: {} // API key: {}'
).format(url, user, api)
print("Can't log into Ftrack with used credentials:")
ftrack_cred = {
"Ftrack server": str(url),
"Username": str(user),
"API key": str(api)
}
item_lens = [len(key) + 1 for key in ftrack_cred.keys()]
justify_len = max(*item_lens)
for key, value in ftrack_cred.items():
print("{} {}".format(
(key + ":").ljust(justify_len, " "),
value
))
return False
print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format(
@ -175,6 +188,7 @@ def main_loop(ftrack_url):
otherwise thread will be killed.
"""
os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1())
# Get mongo hostname and port for testing mongo connection
mongo_list = ftrack_events_mongo_settings()
mongo_hostname = mongo_list[0]
@ -202,6 +216,13 @@ def main_loop(ftrack_url):
processor_last_failed = datetime.datetime.now()
processor_failed_count = 0
statuser_name = "StorerThread"
statuser_port = 10021
statuser_path = "{}/sub_event_status.py".format(file_path)
statuser_thread = None
statuser_last_failed = datetime.datetime.now()
statuser_failed_count = 0
ftrack_accessible = False
mongo_accessible = False
@ -210,7 +231,7 @@ def main_loop(ftrack_url):
# stop threads on exit
# TODO check if works and args have thread objects!
def on_exit(processor_thread, storer_thread):
def on_exit(processor_thread, storer_thread, statuser_thread):
if processor_thread is not None:
processor_thread.stop()
processor_thread.join()
@ -221,9 +242,27 @@ def main_loop(ftrack_url):
storer_thread.join()
storer_thread = None
if statuser_thread is not None:
statuser_thread.stop()
statuser_thread.join()
statuser_thread = None
atexit.register(
on_exit, processor_thread=processor_thread, storer_thread=storer_thread
on_exit,
processor_thread=processor_thread,
storer_thread=storer_thread,
statuser_thread=statuser_thread
)
system_name, pc_name = platform.uname()[:2]
host_name = socket.gethostname()
main_info = {
"created_at": datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S"),
"Username": getpass.getuser(),
"Host Name": host_name,
"Host IP": socket.gethostbyname(host_name)
}
main_info_str = json.dumps(main_info)
# Main loop
while True:
# Check if accessible Ftrack and Mongo url
@ -261,6 +300,52 @@ def main_loop(ftrack_url):
printed_ftrack_error = False
printed_mongo_error = False
# ====== STATUSER =======
if statuser_thread is None:
if statuser_failed_count < max_fail_count:
statuser_thread = socket_thread.StatusSocketThread(
statuser_name, statuser_port, statuser_path,
[main_info_str]
)
statuser_thread.start()
elif statuser_failed_count == max_fail_count:
print((
"Statuser failed {}times in row"
" I'll try to run again {}s later"
).format(str(max_fail_count), str(wait_time_after_max_fail)))
statuser_failed_count += 1
elif ((
datetime.datetime.now() - statuser_last_failed
).seconds > wait_time_after_max_fail):
statuser_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not statuser_thread.isAlive():
statuser_thread.join()
statuser_thread = None
ftrack_accessible = False
mongo_accessible = False
_processor_last_failed = datetime.datetime.now()
delta_time = (
_processor_last_failed - statuser_last_failed
).seconds
if delta_time < min_fail_seconds:
statuser_failed_count += 1
else:
statuser_failed_count = 0
statuser_last_failed = _processor_last_failed
elif statuser_thread.stop_subprocess:
print("Main process was stopped by action")
on_exit(processor_thread, storer_thread, statuser_thread)
os.kill(os.getpid(), signal.SIGTERM)
return 1
# ====== STORER =======
# Run backup thread which does not requeire mongo to work
if storer_thread is None:
if storer_failed_count < max_fail_count:
@ -268,6 +353,7 @@ def main_loop(ftrack_url):
storer_name, storer_port, storer_path
)
storer_thread.start()
elif storer_failed_count == max_fail_count:
print((
"Storer failed {}times I'll try to run again {}s later"
@ -295,6 +381,7 @@ def main_loop(ftrack_url):
storer_failed_count = 0
storer_last_failed = _storer_last_failed
# ====== PROCESSOR =======
if processor_thread is None:
if processor_failed_count < max_fail_count:
processor_thread = socket_thread.SocketThread(
@ -336,6 +423,10 @@ def main_loop(ftrack_url):
processor_failed_count = 0
processor_last_failed = _processor_last_failed
if statuser_thread is not None:
statuser_thread.set_process("storer", storer_thread)
statuser_thread.set_process("processor", processor_thread)
time.sleep(1)
@ -446,9 +537,9 @@ def main(argv):
event_paths = kwargs.ftrackeventpaths
if not kwargs.noloadcred:
cred = credentials._get_credentials(True)
cred = credentials.get_credentials(ftrack_url)
username = cred.get('username')
api_key = cred.get('apiKey')
api_key = cred.get('api_key')
if kwargs.ftrackuser:
username = kwargs.ftrackuser
@ -482,7 +573,7 @@ def main(argv):
return 1
if kwargs.storecred:
credentials._save_credentials(username, api_key, True)
credentials.save_credentials(username, api_key, ftrack_url)
# Set Ftrack environments
os.environ["FTRACK_SERVER"] = ftrack_url

View file

@ -100,9 +100,9 @@ class FtrackServer:
log.warning(msg, exc_info=e)
if len(register_functions_dict) < 1:
raise Exception((
"There are no events with register function."
" Registered paths: \"{}\""
log.warning((
"There are no events with `register` function"
" in registered paths: \"{}\""
).format("| ".join(paths)))
# Load presets for setting plugins
@ -122,7 +122,7 @@ class FtrackServer:
else:
register(self.session, plugins_presets=plugins_presets)
if function_counter%7 == 0:
if function_counter % 7 == 0:
time.sleep(0.1)
function_counter += 1
except Exception as exc:

View file

@ -28,6 +28,10 @@ from pypeapp import Logger
from pype.ftrack.lib.custom_db_connector import DbConnector
TOPIC_STATUS_SERVER = "pype.event.server.status"
TOPIC_STATUS_SERVER_RESULT = "pype.event.server.status.result"
def ftrack_events_mongo_settings():
host = None
port = None
@ -123,20 +127,59 @@ def check_ftrack_url(url, log_errors=True):
return url
class StorerEventHub(ftrack_api.event.hub.EventHub):
class SocketBaseEventHub(ftrack_api.event.hub.EventHub):
hearbeat_msg = b"hearbeat"
heartbeat_callbacks = []
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(StorerEventHub, self).__init__(*args, **kwargs)
super(SocketBaseEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"storer")
return self._send_packet(self._code_name_mapping['heartbeat'])
for callback in self.heartbeat_callbacks:
callback()
elif code_name == "connect":
self.sock.sendall(self.hearbeat_msg)
return self._send_packet(self._code_name_mapping["heartbeat"])
return super(SocketBaseEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StatusEventHub(SocketBaseEventHub):
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.status.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(StatusEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StorerEventHub(SocketBaseEventHub):
hearbeat_msg = b"storer"
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
@ -152,7 +195,9 @@ class StorerEventHub(ftrack_api.event.hub.EventHub):
)
class ProcessEventHub(ftrack_api.event.hub.EventHub):
class ProcessEventHub(SocketBaseEventHub):
hearbeat_msg = b"processor"
url, database, table_name = get_ftrack_event_mongo_info()
is_table_created = False
@ -164,7 +209,6 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
database_name=self.database,
table_name=self.table_name
)
self.sock = kwargs.pop("sock")
super(ProcessEventHub, self).__init__(*args, **kwargs)
def prepare_dbcon(self):
@ -260,42 +304,10 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
code_name = self._code_name_mapping[code]
if code_name == "event":
return
if code_name == "heartbeat":
self.sock.sendall(b"processor")
return self._send_packet(self._code_name_mapping["heartbeat"])
return super()._handle_packet(code, packet_identifier, path, data)
class UserEventHub(ftrack_api.event.hub.EventHub):
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(UserEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"hearbeat")
return self._send_packet(self._code_name_mapping['heartbeat'])
elif code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(UserEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class SocketSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(

View file

@ -3,6 +3,7 @@ import sys
import time
import socket
import threading
import traceback
import subprocess
from pypeapp import Logger
@ -12,13 +13,15 @@ class SocketThread(threading.Thread):
MAX_TIMEOUT = 35
def __init__(self, name, port, filepath):
def __init__(self, name, port, filepath, additional_args=[]):
super(SocketThread, self).__init__()
self.log = Logger().get_logger("SocketThread", "Event Thread")
self.log = Logger().get_logger(self.__class__.__name__)
self.setName(name)
self.name = name
self.port = port
self.filepath = filepath
self.additional_args = additional_args
self.sock = None
self.subproc = None
self.connection = None
@ -53,7 +56,13 @@ class SocketThread(threading.Thread):
)
self.subproc = subprocess.Popen(
[sys.executable, self.filepath, "-port", str(self.port)]
[
sys.executable,
self.filepath,
*self.additional_args,
str(self.port)
],
stdin=subprocess.PIPE
)
# Listen for incoming connections
@ -127,3 +136,52 @@ class SocketThread(threading.Thread):
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)
class StatusSocketThread(SocketThread):
process_name_mapping = {
b"RestartS": "storer",
b"RestartP": "processor",
b"RestartM": "main"
}
def __init__(self, *args, **kwargs):
self.process_threads = {}
self.stop_subprocess = False
super(StatusSocketThread, self).__init__(*args, **kwargs)
def set_process(self, process_name, thread):
try:
if not self.subproc:
self.process_threads[process_name] = None
return
if (
process_name in self.process_threads and
self.process_threads[process_name] == thread
):
return
self.process_threads[process_name] = thread
self.subproc.stdin.write(
str.encode("reset:{}\r\n".format(process_name))
)
self.subproc.stdin.flush()
except Exception:
print("Could not set thread in StatusSocketThread")
traceback.print_exception(*sys.exc_info())
def _handle_data(self, connection, data):
if not data:
return
process_name = self.process_name_mapping.get(data)
if process_name:
if process_name == "main":
self.stop_subprocess = True
else:
subp = self.process_threads.get(process_name)
if subp:
subp.stop()
connection.sendall(data)

View file

@ -1,13 +1,59 @@
import os
import sys
import signal
import socket
import datetime
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub
from pype.ftrack.ftrack_server.lib import (
SocketSession, ProcessEventHub, TOPIC_STATUS_SERVER
)
import ftrack_api
from pypeapp import Logger
log = Logger().get_logger("Event processor")
subprocess_started = datetime.datetime.now()
class SessionFactory:
session = None
def send_status(event):
subprocess_id = event["data"].get("subprocess_id")
if not subprocess_id:
return
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
session = SessionFactory.session
if not session:
return
new_event_data = {
"subprocess_id": subprocess_id,
"source": "processor",
"status_info": {
"created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S")
}
}
new_event = ftrack_api.event.base.Event(
topic="pype.event.server.status.result",
data=new_event_data
)
session.event_hub.publish(new_event)
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER), send_status
)
def main(args):
port = int(args[-1])
@ -24,6 +70,9 @@ def main(args):
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub
)
register(session)
SessionFactory.session = session
server = FtrackServer("event")
log.debug("Launched Ftrack Event processor")
server.run_server(session)

View file

@ -0,0 +1,436 @@
import os
import sys
import json
import threading
import signal
import socket
import datetime
import ftrack_api
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import (
SocketSession, StatusEventHub,
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
)
from pypeapp import Logger, config
log = Logger().get_logger("Event storer")
action_identifier = (
"event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"]
)
host_ip = socket.gethostbyname(socket.gethostname())
action_data = {
"label": "Pype Admin",
"variant": "- Event server Status ({})".format(host_ip),
"description": "Get Infromation about event server",
"actionIdentifier": action_identifier,
"icon": "{}/ftrack/action_icons/PypeAdmin.svg".format(
os.environ.get(
"PYPE_STATICS_SERVER",
"http://localhost:{}".format(
config.get_presets().get("services", {}).get(
"rest_api", {}
).get("default_port", 8021)
)
)
)
}
class ObjectFactory:
session = None
status_factory = None
checker_thread = None
last_trigger = None
class Status:
default_item = {
"type": "label",
"value": "Process info is not available at this moment."
}
def __init__(self, name, label, parent):
self.name = name
self.label = label or name
self.parent = parent
self.info = None
self.last_update = None
def update(self, info):
self.last_update = datetime.datetime.now()
self.info = info
def get_delta_string(self, delta):
days, hours, minutes = (
delta.days, delta.seconds // 3600, delta.seconds // 60 % 60
)
delta_items = [
"{}d".format(days),
"{}h".format(hours),
"{}m".format(minutes)
]
if not days:
delta_items.pop(0)
if not hours:
delta_items.pop(0)
delta_items.append("{}s".format(delta.seconds % 60))
if not minutes:
delta_items.pop(0)
return " ".join(delta_items)
def get_items(self):
items = []
last_update = "N/A"
if self.last_update:
delta = datetime.datetime.now() - self.last_update
last_update = "{} ago".format(
self.get_delta_string(delta)
)
last_update = "Updated: {}".format(last_update)
items.append({
"type": "label",
"value": "#{}".format(self.label)
})
items.append({
"type": "label",
"value": "##{}".format(last_update)
})
if not self.info:
if self.info is None:
trigger_info_get()
items.append(self.default_item)
return items
info = {}
for key, value in self.info.items():
if key not in ["created_at:", "created_at"]:
info[key] = value
continue
datetime_value = datetime.datetime.strptime(
value, "%Y.%m.%d %H:%M:%S"
)
delta = datetime.datetime.now() - datetime_value
running_for = self.get_delta_string(delta)
info["Started at"] = "{} [running: {}]".format(value, running_for)
for key, value in info.items():
items.append({
"type": "label",
"value": "<b>{}:</b> {}".format(key, value)
})
return items
class StatusFactory:
note_item = {
"type": "label",
"value": (
"<i>HINT: To refresh data uncheck"
" all checkboxes and hit `Submit` button.</i>"
)
}
splitter_item = {
"type": "label",
"value": "---"
}
def __init__(self, statuses={}):
self.statuses = []
for status in statuses.items():
self.create_status(*status)
def __getitem__(self, key):
return self.get(key)
def get(self, key, default=None):
for status in self.statuses:
if status.name == key:
return status
return default
def is_filled(self):
for status in self.statuses:
if status.info is None:
return False
return True
def create_status(self, name, label):
new_status = Status(name, label, self)
self.statuses.append(new_status)
def process_event_result(self, event):
subprocess_id = event["data"].get("subprocess_id")
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
source = event["data"]["source"]
data = event["data"]["status_info"]
self.update_status_info(source, data)
def update_status_info(self, process_name, info):
for status in self.statuses:
if status.name == process_name:
status.update(info)
break
def bool_items(self):
items = []
items.append({
"type": "label",
"value": "#Restart process"
})
items.append({
"type": "label",
"value": (
"<i><b>WARNING:</b> Main process may shut down when checked"
" if does not run as a service!</i>"
)
})
name_labels = {}
for status in self.statuses:
name_labels[status.name] = status.label
for name, label in name_labels.items():
items.append({
"type": "boolean",
"value": False,
"label": label,
"name": name
})
return items
def items(self):
items = []
items.append(self.note_item)
items.extend(self.bool_items())
for status in self.statuses:
items.append(self.splitter_item)
items.extend(status.get_items())
return items
def server_activity_validate_user(event):
"""Validate user permissions to show server info."""
session = ObjectFactory.session
username = event["source"].get("user", {}).get("username")
if not username:
return False
user_ent = session.query(
"User where username = \"{}\"".format(username)
).first()
if not user_ent:
return False
role_list = ["Pypeclub", "Administrator"]
for role in user_ent["user_security_roles"]:
if role["security_role"]["name"] in role_list:
return True
return False
def server_activity_discover(event):
"""Discover action in actions menu conditions."""
session = ObjectFactory.session
if session is None:
return
if not server_activity_validate_user(event):
return
return {"items": [action_data]}
def server_activity(event):
session = ObjectFactory.session
if session is None:
msg = "Session is not set. Can't trigger Reset action."
log.warning(msg)
return {
"success": False,
"message": msg
}
if not server_activity_validate_user(event):
return {
"success": False,
"message": "You don't have permissions to see Event server status!"
}
values = event["data"].get("values") or {}
is_checked = False
for value in values.values():
if value:
is_checked = True
break
if not is_checked:
return {
"items": ObjectFactory.status_factory.items(),
"title": "Server current status"
}
session = ObjectFactory.session
if values["main"]:
session.event_hub.sock.sendall(b"RestartM")
return
if values["storer"]:
session.event_hub.sock.sendall(b"RestartS")
if values["processor"]:
session.event_hub.sock.sendall(b"RestartP")
def trigger_info_get():
if ObjectFactory.last_trigger:
delta = datetime.datetime.now() - ObjectFactory.last_trigger
if delta.seconds() < 5:
return
session = ObjectFactory.session
session.event_hub.publish(
ftrack_api.event.base.Event(
topic=TOPIC_STATUS_SERVER,
data={"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"]}
),
on_error="ignore"
)
def on_start(event):
session = ObjectFactory.session
source_id = event.get("source", {}).get("id")
if not source_id or source_id != session.event_hub.id:
return
if session is None:
log.warning("Session is not set. Can't trigger Sync to avalon action.")
return True
trigger_info_get()
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
session.event_hub.subscribe(
"topic=ftrack.action.discover",
server_activity_discover
)
session.event_hub.subscribe("topic=pype.status.started", on_start)
status_launch_subscription = (
"topic=ftrack.action.launch and data.actionIdentifier={}"
).format(action_identifier)
session.event_hub.subscribe(
status_launch_subscription,
server_activity
)
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER_RESULT),
ObjectFactory.status_factory.process_event_result
)
def heartbeat():
if ObjectFactory.status_factory.is_filled():
return
trigger_info_get()
def main(args):
port = int(args[-1])
server_info = json.loads(args[-2])
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ("localhost", port)
log.debug("Statuser connected to {} port {}".format(*server_address))
sock.connect(server_address)
sock.sendall(b"CreatedStatus")
# store socket connection object
ObjectFactory.sock = sock
ObjectFactory.status_factory["main"].update(server_info)
_returncode = 0
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=StatusEventHub
)
ObjectFactory.session = session
session.event_hub.heartbeat_callbacks.append(heartbeat)
register(session)
server = FtrackServer("event")
log.debug("Launched Ftrack Event statuser")
server.run_server(session, load_files=False)
except Exception:
_returncode = 1
log.error("ServerInfo subprocess crashed", exc_info=True)
finally:
log.debug("Ending. Closing socket.")
sock.close()
return _returncode
class OutputChecker(threading.Thread):
read_input = True
def run(self):
while self.read_input:
for line in sys.stdin:
line = line.rstrip().lower()
if not line.startswith("reset:"):
continue
process_name = line.replace("reset:", "")
ObjectFactory.status_factory.update_status_info(
process_name, None
)
def stop(self):
self.read_input = False
if __name__ == "__main__":
# Register interupt signal
def signal_handler(sig, frame):
print("You pressed Ctrl+C. Process ended.")
ObjectFactory.checker_thread.stop()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
statuse_names = {
"main": "Main process",
"storer": "Event Storer",
"processor": "Event Processor"
}
ObjectFactory.status_factory = StatusFactory(statuse_names)
checker_thread = OutputChecker()
ObjectFactory.checker_thread = checker_thread
checker_thread.start()
sys.exit(main(sys.argv))

View file

@ -8,14 +8,15 @@ import pymongo
import ftrack_api
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import (
SocketSession, StorerEventHub,
get_ftrack_event_mongo_info,
SocketSession,
StorerEventHub
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
)
from pype.ftrack.lib.custom_db_connector import DbConnector
from pypeapp import Logger
log = Logger().get_logger("Event storer")
subprocess_started = datetime.datetime.now()
class SessionFactory:
@ -138,11 +139,42 @@ def trigger_sync(event):
)
def send_status(event):
session = SessionFactory.session
if not session:
return
subprocess_id = event["data"].get("subprocess_id")
if not subprocess_id:
return
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
new_event_data = {
"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"],
"source": "storer",
"status_info": {
"created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S")
}
}
new_event = ftrack_api.event.base.Event(
topic=TOPIC_STATUS_SERVER_RESULT,
data=new_event_data
)
session.event_hub.publish(new_event)
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
install_db()
session.event_hub.subscribe("topic=*", launch)
session.event_hub.subscribe("topic=pype.storer.started", trigger_sync)
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER), send_status
)
def main(args):

View file

@ -5,7 +5,7 @@ import socket
import traceback
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub
from pype.ftrack.ftrack_server.lib import SocketSession, SocketBaseEventHub
from pypeapp import Logger
@ -28,7 +28,7 @@ def main(args):
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub
auto_connect_event_hub=True, sock=sock, Eventhub=SocketBaseEventHub
)
server = FtrackServer("action")
log.debug("Launched User Ftrack Server")

View file

@ -1,6 +1,11 @@
from . import avalon_sync
from .credentials import *
from . import credentials
from .ftrack_app_handler import *
from .ftrack_event_handler import *
from .ftrack_action_handler import *
from .ftrack_base_handler import *
from .lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)

View file

@ -2067,9 +2067,10 @@ class SyncEntitiesFactory:
# different hierarchy - can't recreate entity
continue
_vis_parent = str(deleted_entity["data"]["visualParent"])
_vis_parent = deleted_entity["data"]["visualParent"]
if _vis_parent is None:
_vis_parent = self.avalon_project_id
_vis_parent = str(_vis_parent)
ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent]
self.create_ftrack_ent_from_avalon_ent(
deleted_entity, ftrack_parent_id

View file

@ -2,85 +2,140 @@ import os
import json
import ftrack_api
import appdirs
import getpass
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
config_path = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype'))
action_file_name = 'ftrack_cred.json'
event_file_name = 'ftrack_event_cred.json'
action_fpath = os.path.join(config_path, action_file_name)
event_fpath = os.path.join(config_path, event_file_name)
folders = set([os.path.dirname(action_fpath), os.path.dirname(event_fpath)])
CONFIG_PATH = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
CREDENTIALS_FILE_NAME = "ftrack_cred.json"
CREDENTIALS_PATH = os.path.join(CONFIG_PATH, CREDENTIALS_FILE_NAME)
CREDENTIALS_FOLDER = os.path.dirname(CREDENTIALS_PATH)
for folder in folders:
if not os.path.isdir(folder):
os.makedirs(folder)
if not os.path.isdir(CREDENTIALS_FOLDER):
os.makedirs(CREDENTIALS_FOLDER)
USER_GETTER = None
def _get_credentials(event=False):
if event:
fpath = event_fpath
else:
fpath = action_fpath
def get_ftrack_hostname(ftrack_server=None):
if not ftrack_server:
ftrack_server = os.environ["FTRACK_SERVER"]
if "//" not in ftrack_server:
ftrack_server = "//" + ftrack_server
return urlparse(ftrack_server).hostname
def get_user():
if USER_GETTER:
return USER_GETTER()
return getpass.getuser()
def get_credentials(ftrack_server=None, user=None):
credentials = {}
try:
file = open(fpath, 'r')
credentials = json.load(file)
except Exception:
file = open(fpath, 'w')
if not os.path.exists(CREDENTIALS_PATH):
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(credentials))
file.close()
return credentials
file.close()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
content_json = json.loads(content or "{}")
credentials = content_json.get(hostname, {}).get(user) or {}
return credentials
def _save_credentials(username, apiKey, event=False, auto_connect=None):
data = {
'username': username,
'apiKey': apiKey
def save_credentials(ft_user, ft_api_key, ftrack_server=None, user=None):
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
content_json = json.loads(content or "{}")
if hostname not in content_json:
content_json[hostname] = {}
content_json[hostname][user] = {
"username": ft_user,
"api_key": ft_api_key
}
if event:
fpath = event_fpath
if auto_connect is None:
cred = _get_credentials(True)
auto_connect = cred.get('auto_connect', False)
data['auto_connect'] = auto_connect
else:
fpath = action_fpath
# Deprecated keys
if "username" in content_json:
content_json.pop("username")
if "apiKey" in content_json:
content_json.pop("apiKey")
file = open(fpath, 'w')
file.write(json.dumps(data))
file.close()
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(content_json, indent=4))
def _clear_credentials(event=False):
if event:
fpath = event_fpath
else:
fpath = action_fpath
open(fpath, 'w').close()
_set_env(None, None)
def clear_credentials(ft_user=None, ftrack_server=None, user=None):
if not ft_user:
ft_user = os.environ.get("FTRACK_API_USER")
if not ft_user:
return
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
content_json = json.loads(content or "{}")
if hostname not in content_json:
content_json[hostname] = {}
content_json[hostname].pop(user, None)
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(content_json))
def _set_env(username, apiKey):
if not username:
username = ''
if not apiKey:
apiKey = ''
os.environ['FTRACK_API_USER'] = username
os.environ['FTRACK_API_KEY'] = apiKey
def set_env(ft_user=None, ft_api_key=None):
os.environ["FTRACK_API_USER"] = ft_user or ""
os.environ["FTRACK_API_KEY"] = ft_api_key or ""
def _check_credentials(username=None, apiKey=None):
def get_env_credentials():
return (
os.environ.get("FTRACK_API_USER"),
os.environ.get("FTRACK_API_KEY")
)
if username and apiKey:
_set_env(username, apiKey)
def check_credentials(ft_user, ft_api_key, ftrack_server=None):
if not ftrack_server:
ftrack_server = os.environ["FTRACK_SERVER"]
if not ft_user or not ft_api_key:
return False
try:
session = ftrack_api.Session()
session = ftrack_api.Session(
server_url=ftrack_server,
api_key=ft_api_key,
api_user=ft_user
)
session.close()
except Exception as e:
except Exception:
return False
return True

View file

@ -193,6 +193,8 @@ class AppAction(BaseHandler):
if parents:
hierarchy = os.path.join(*parents)
os.environ["AVALON_HIERARCHY"] = hierarchy
application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
data = {

View file

@ -49,7 +49,7 @@ class BaseHandler(object):
).format(
str(type(session)),
str(ftrack_api.session.Session),
str(session_processor.ProcessSession)
str(SocketSession)
))
self._session = session

135
pype/ftrack/lib/lib.py Normal file
View file

@ -0,0 +1,135 @@
from bson.objectid import ObjectId
from .avalon_sync import CustAttrIdKey
import avalon.io
def get_project_from_entity(entity):
# TODO add more entities
ent_type_lowered = entity.entity_type.lower()
if ent_type_lowered == "project":
return entity
elif ent_type_lowered == "assetversion":
return entity["asset"]["parent"]["project"]
elif "project" in entity:
return entity["project"]
return None
def get_avalon_entities_for_assetversion(asset_version, db_con=None):
output = {
"success": True,
"message": None,
"project": None,
"project_name": None,
"asset": None,
"asset_name": None,
"asset_path": None,
"subset": None,
"subset_name": None,
"version": None,
"version_name": None,
"representations": None
}
if db_con is None:
db_con = avalon.io
db_con.install()
ft_asset = asset_version["asset"]
subset_name = ft_asset["name"]
version = asset_version["version"]
parent = ft_asset["parent"]
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
project = get_project_from_entity(asset_version)
project_name = project["full_name"]
output["project_name"] = project_name
output["asset_name"] = parent["name"]
output["asset_path"] = ent_path
output["subset_name"] = subset_name
output["version_name"] = version
db_con.Session["AVALON_PROJECT"] = project_name
avalon_project = db_con.find_one({"type": "project"})
output["project"] = avalon_project
if not avalon_project:
output["success"] = False
output["message"] = "Project not synchronized to avalon `{}`".format(
project_name
)
return output
asset_ent = None
asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if asset_mongo_id:
try:
asset_mongo_id = ObjectId(asset_mongo_id)
asset_ent = db_con.find_one({
"type": "asset",
"_id": asset_mongo_id
})
except Exception:
pass
if not asset_ent:
asset_ent = db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
output["asset"] = asset_ent
if not asset_ent:
output["success"] = False
output["message"] = "Not synchronized entity to avalon `{}`".format(
ent_path
)
return output
asset_mongo_id = asset_ent["_id"]
subset_ent = db_con.find_one({
"type": "subset",
"parent": asset_mongo_id,
"name": subset_name
})
output["subset"] = subset_ent
if not subset_ent:
output["success"] = False
output["message"] = (
"Subset `{}` does not exist under Asset `{}`"
).format(subset_name, ent_path)
return output
version_ent = db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
output["version"] = version_ent
if not version_ent:
output["success"] = False
output["message"] = (
"Version `{}` does not exist under Subset `{}` | Asset `{}`"
).format(version, subset_name, ent_path)
return output
repre_ents = list(db_con.find({
"type": "representation",
"parent": version_ent["_id"]
}))
output["representations"] = repre_ents
return output

View file

@ -34,29 +34,28 @@ class FtrackModule:
def validate(self):
validation = False
cred = credentials._get_credentials()
try:
if 'username' in cred and 'apiKey' in cred:
validation = credentials._check_credentials(
cred['username'],
cred['apiKey']
)
if validation is False:
self.show_login_widget()
else:
self.show_login_widget()
except Exception as e:
log.error("We are unable to connect to Ftrack: {0}".format(e))
validation = credentials._check_credentials()
if validation is True:
cred = credentials.get_credentials()
ft_user = cred.get("username")
ft_api_key = cred.get("api_key")
validation = credentials.check_credentials(ft_user, ft_api_key)
if validation:
credentials.set_env(ft_user, ft_api_key)
log.info("Connected to Ftrack successfully")
self.loginChange()
else:
log.warning("Please sign in to Ftrack")
self.bool_logged = False
self.set_menu_visibility()
return validation
if not validation and ft_user and ft_api_key:
log.warning(
"Current Ftrack credentials are not valid. {}: {} - {}".format(
str(os.environ.get("FTRACK_SERVER")), ft_user, ft_api_key
)
)
log.info("Please sign in to Ftrack")
self.bool_logged = False
self.show_login_widget()
self.set_menu_visibility()
return validation
@ -67,7 +66,7 @@ class FtrackModule:
self.start_action_server()
def logout(self):
credentials._clear_credentials()
credentials.clear_credentials()
self.stop_action_server()
log.info("Logged out of Ftrack")
@ -307,11 +306,23 @@ class FtrackModule:
except Exception as e:
log.error("During Killing Timer event server: {0}".format(e))
def changed_user(self):
self.stop_action_server()
credentials.set_env()
self.validate()
def process_modules(self, modules):
if 'TimersManager' in modules:
self.timer_manager = modules['TimersManager']
self.timer_manager.add_module(self)
if "UserModule" in modules:
credentials.USER_GETTER = modules["UserModule"].get_user
modules["UserModule"].register_callback_on_user_change(
self.changed_user
)
def start_timer_manager(self, data):
if self.thread_timer is not None:
self.thread_timer.ftrack_start_timer(data)
@ -336,7 +347,7 @@ class FtrackEventsThread(QtCore.QThread):
def __init__(self, parent):
super(FtrackEventsThread, self).__init__()
cred = credentials._get_credentials()
cred = credentials.get_credentials()
self.username = cred['username']
self.user = None
self.last_task = None

View file

@ -204,11 +204,11 @@ class Login_Dialog_ui(QtWidgets.QWidget):
self.setError("{0} {1}".format(msg, " and ".join(missing)))
return
verification = credentials._check_credentials(username, apiKey)
verification = credentials.check_credentials(username, apiKey)
if verification:
credentials._save_credentials(username, apiKey, self.is_event)
credentials._set_env(username, apiKey)
credentials.save_credentials(username, apiKey, self.is_event)
credentials.set_env(username, apiKey)
if self.parent is not None:
self.parent.loginChange()
self._close_widget()
@ -304,11 +304,11 @@ class Login_Dialog_ui(QtWidgets.QWidget):
self._login_server_thread.start(url)
return
verification = credentials._check_credentials(username, apiKey)
verification = credentials.check_credentials(username, apiKey)
if verification is True:
credentials._save_credentials(username, apiKey, self.is_event)
credentials._set_env(username, apiKey)
credentials.save_credentials(username, apiKey, self.is_event)
credentials.set_env(username, apiKey)
if self.parent is not None:
self.parent.loginChange()
self._close_widget()

View file

@ -13,6 +13,62 @@ import avalon
log = logging.getLogger(__name__)
def get_paths_from_environ(env_key, return_first=False):
"""Return existing paths from specific envirnment variable.
:param env_key: Environment key where should look for paths.
:type env_key: str
:param return_first: Return first path on `True`, list of all on `False`.
:type return_first: boolean
Difference when none of paths exists:
- when `return_first` is set to `False` then function returns empty list.
- when `return_first` is set to `True` then function returns `None`.
"""
existing_paths = []
paths = os.environ.get(env_key) or ""
path_items = paths.split(os.pathsep)
for path in path_items:
# Skip empty string
if not path:
continue
# Normalize path
path = os.path.normpath(path)
# Check if path exists
if os.path.exists(path):
# Return path if `return_first` is set to True
if return_first:
return path
# Store path
existing_paths.append(path)
# Return None if none of paths exists
if return_first:
return None
# Return all existing paths from environment variable
return existing_paths
def get_ffmpeg_tool_path(tool="ffmpeg"):
"""Find path to ffmpeg tool in FFMPEG_PATH paths.
Function looks for tool in paths set in FFMPEG_PATH environment. If tool
exists then returns it's full path.
Returns tool name itself when tool path was not found. (FFmpeg path may be
set in PATH environment variable)
"""
dir_paths = get_paths_from_environ("FFMPEG_PATH")
for dir_path in dir_paths:
for file_name in os.listdir(dir_path):
base, ext = os.path.splitext(file_name)
if base.lower() == tool.lower():
return os.path.join(dir_path, tool)
return tool
# Special naming case for subprocess since its a built-in method.
def _subprocess(*args, **kwargs):
"""Convenience method for getting output errors for subprocess."""

View file

@ -8,7 +8,6 @@ from avalon import api as avalon, pipeline, maya
from avalon.maya.pipeline import IS_HEADLESS
from avalon.tools import workfiles
from pyblish import api as pyblish
from pypeapp import config
from ..lib import (
any_outdated
@ -156,6 +155,12 @@ def on_open(_):
from avalon.vendor.Qt import QtWidgets
from ..widgets import popup
cmds.evalDeferred(
"from pype.maya import lib;lib.remove_render_layer_observer()")
cmds.evalDeferred(
"from pype.maya import lib;lib.add_render_layer_observer()")
cmds.evalDeferred(
"from pype.maya import lib;lib.add_render_layer_change_observer()")
# # Update current task for the current scene
# update_task_from_path(cmds.file(query=True, sceneName=True))
@ -194,6 +199,12 @@ def on_new(_):
"""Set project resolution and fps when create a new file"""
avalon.logger.info("Running callback on new..")
with maya.suspended_refresh():
cmds.evalDeferred(
"from pype.maya import lib;lib.remove_render_layer_observer()")
cmds.evalDeferred(
"from pype.maya import lib;lib.add_render_layer_observer()")
cmds.evalDeferred(
"from pype.maya import lib;lib.add_render_layer_change_observer()")
lib.set_context_settings()
@ -218,3 +229,10 @@ def on_task_changed(*args):
# Run
maya.pipeline._on_task_changed()
with maya.suspended_refresh():
lib.set_context_settings()
lib.update_content_on_context_change()
lib.show_message("Context was changed",
("Context was changed to {}".format(
avalon.Session["AVALON_ASSET"])))

View file

@ -2176,18 +2176,29 @@ def load_capture_preset(path=None, data=None):
4: 'nolights'}
for key in preset[id]:
if key == 'high_quality':
temp_options2['multiSampleEnable'] = True
temp_options2['multiSampleCount'] = 8
temp_options2['textureMaxResolution'] = 1024
temp_options2['enableTextureMaxRes'] = True
if preset[id][key] == True:
temp_options2['multiSampleEnable'] = True
temp_options2['multiSampleCount'] = 4
temp_options2['textureMaxResolution'] = 1024
temp_options2['enableTextureMaxRes'] = True
temp_options2['textureMaxResMode'] = 1
else:
temp_options2['multiSampleEnable'] = False
temp_options2['multiSampleCount'] = 4
temp_options2['textureMaxResolution'] = 512
temp_options2['enableTextureMaxRes'] = True
temp_options2['textureMaxResMode'] = 0
if key == 'ssaoEnable':
if preset[id][key] == True:
temp_options2['ssaoEnable'] = True
else:
temp_options2['ssaoEnable'] = False
if key == 'alphaCut':
temp_options2['transparencyAlgorithm'] = 5
temp_options2['transparencyQuality'] = 1
if key == 'ssaoEnable':
temp_options2['ssaoEnable'] = True
if key == 'headsUpDisplay':
temp_options['headsUpDisplay'] = True
@ -2388,15 +2399,19 @@ class shelf():
if not item.get('command'):
item['command'] = self._null
if item['type'] == 'button':
self.addButon(item['name'], command=item['command'])
self.addButon(item['name'],
command=item['command'],
icon=item['icon'])
if item['type'] == 'menuItem':
self.addMenuItem(item['parent'],
item['name'],
command=item['command'])
command=item['command'],
icon=item['icon'])
if item['type'] == 'subMenu':
self.addMenuItem(item['parent'],
item['name'],
command=item['command'])
command=item['command'],
icon=item['icon'])
def addButon(self, label, icon="commandButton.png",
command=_null, doubleCommand=_null):
@ -2406,7 +2421,8 @@ class shelf():
'''
cmds.setParent(self.name)
if icon:
icon = self.iconPath + icon
icon = os.path.join(self.iconPath, icon)
print(icon)
cmds.shelfButton(width=37, height=37, image=icon, label=label,
command=command, dcc=doubleCommand,
imageOverlayLabel=label, olb=self.labelBackground,
@ -2418,7 +2434,8 @@ class shelf():
double click command and image.
'''
if icon:
icon = self.iconPath + icon
icon = os.path.join(self.iconPath, icon)
print(icon)
return cmds.menuItem(p=parent, label=label, c=command, i="")
def addSubMenu(self, parent, label, icon=None):
@ -2427,7 +2444,8 @@ class shelf():
the specified parent popup menu.
'''
if icon:
icon = self.iconPath + icon
icon = os.path.join(self.iconPath, icon)
print(icon)
return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1)
def _cleanOldShelf(self):
@ -2441,3 +2459,177 @@ class shelf():
cmds.deleteUI(each)
else:
cmds.shelfLayout(self.name, p="ShelfLayout")
def _get_render_instance():
objectset = cmds.ls("*.id", long=True, type="objectSet",
recursive=True, objectsOnly=True)
for objset in objectset:
if not cmds.attributeQuery("id", node=objset, exists=True):
continue
id_attr = "{}.id".format(objset)
if cmds.getAttr(id_attr) != "pyblish.avalon.instance":
continue
has_family = cmds.attributeQuery("family",
node=objset,
exists=True)
if not has_family:
continue
if cmds.getAttr("{}.family".format(objset)) == 'rendering':
return objset
return None
renderItemObserverList = []
class RenderSetupListObserver:
def listItemAdded(self, item):
print("--- adding ...")
self._add_render_layer(item)
def listItemRemoved(self, item):
print("--- removing ...")
self._remove_render_layer(item.name())
def _add_render_layer(self, item):
render_set = _get_render_instance()
layer_name = item.name()
if not render_set:
return
members = cmds.sets(render_set, query=True) or []
if not "LAYER_{}".format(layer_name) in members:
print(" - creating set for {}".format(layer_name))
set = cmds.sets(n="LAYER_{}".format(layer_name), empty=True)
cmds.sets(set, forceElement=render_set)
rio = RenderSetupItemObserver(item)
print("- adding observer for {}".format(item.name()))
item.addItemObserver(rio.itemChanged)
renderItemObserverList.append(rio)
def _remove_render_layer(self, layer_name):
render_set = _get_render_instance()
if not render_set:
return
members = cmds.sets(render_set, query=True)
if "LAYER_{}".format(layer_name) in members:
print(" - removing set for {}".format(layer_name))
cmds.delete("LAYER_{}".format(layer_name))
class RenderSetupItemObserver():
def __init__(self, item):
self.item = item
self.original_name = item.name()
def itemChanged(self, *args, **kwargs):
if self.item.name() == self.original_name:
return
render_set = _get_render_instance()
if not render_set:
return
members = cmds.sets(render_set, query=True)
if "LAYER_{}".format(self.original_name) in members:
print(" <> renaming {} to {}".format(self.original_name,
self.item.name()))
cmds.rename("LAYER_{}".format(self.original_name),
"LAYER_{}".format(self.item.name()))
self.original_name = self.item.name()
renderListObserver = RenderSetupListObserver()
def add_render_layer_change_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
rs = renderSetup.instance()
render_set = _get_render_instance()
if not render_set:
return
members = cmds.sets(render_set, query=True)
layers = rs.getRenderLayers()
for layer in layers:
if "LAYER_{}".format(layer.name()) in members:
rio = RenderSetupItemObserver(layer)
print("- adding observer for {}".format(layer.name()))
layer.addItemObserver(rio.itemChanged)
renderItemObserverList.append(rio)
def add_render_layer_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
print("> adding renderSetup observer ...")
rs = renderSetup.instance()
rs.addListObserver(renderListObserver)
pass
def remove_render_layer_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
print("< removing renderSetup observer ...")
rs = renderSetup.instance()
try:
rs.removeListObserver(renderListObserver)
except ValueError:
# no observer set yet
pass
def update_content_on_context_change():
"""
This will update scene content to match new asset on context change
"""
scene_sets = cmds.listSets(allSets=True)
new_asset = api.Session["AVALON_ASSET"]
new_data = lib.get_asset()["data"]
for s in scene_sets:
try:
if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance":
attr = cmds.listAttr(s)
print(s)
if "asset" in attr:
print(" - setting asset to: [ {} ]".format(new_asset))
cmds.setAttr("{}.asset".format(s),
new_asset, type="string")
if "frameStart" in attr:
cmds.setAttr("{}.frameStart".format(s),
new_data["frameStart"])
if "frameEnd" in attr:
cmds.setAttr("{}.frameEnd".format(s),
new_data["frameEnd"],)
except ValueError:
pass
def show_message(title, msg):
from avalon.vendor.Qt import QtWidgets
from ..widgets import message_window
# Find maya main window
top_level_widgets = {w.objectName(): w for w in
QtWidgets.QApplication.topLevelWidgets()}
parent = top_level_widgets.get("MayaWindow", None)
if parent is None:
pass
else:
message_window.message(title=title, message=msg, parent=parent)

View file

@ -432,7 +432,7 @@ def add_deadline_tab(node):
node.addKnob(nuke.Tab_Knob("Deadline"))
knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size")
knob.setValue(1)
knob.setValue(0)
node.addKnob(knob)
knob = nuke.Int_Knob("deadlinePriority", "Priority")

View file

@ -5,13 +5,6 @@ from pypeapp import Logger
from avalon.api import Session
from hiero.ui import findMenuAction
# this way we secure compatibility between nuke 10 and 11
try:
from PySide.QtGui import *
except Exception:
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from .tags import add_tags_from_presets
from .lib import (
@ -50,14 +43,8 @@ def install():
"""
# here is the best place to add menu
from avalon.tools import (
creator,
publish,
cbloader,
cbsceneinventory,
contextmanager,
libraryloader
)
from avalon.tools import publish
from avalon.vendor.Qt import QtGui
menu_name = os.environ['AVALON_LABEL']
@ -67,94 +54,53 @@ def install():
self._change_context_menu = context_label
# Grab Hiero's MenuBar
M = hiero.ui.menuBar()
try:
check_made_menu = findMenuAction(menu_name)
except Exception:
pass
check_made_menu = None
if not check_made_menu:
menu = M.addMenu(menu_name)
# Grab Hiero's MenuBar
menu = hiero.ui.menuBar().addMenu(menu_name)
else:
menu = check_made_menu.menu()
actions = [
{
'parent': context_label,
'action': QAction('Set Context', None),
'function': contextmanager.show,
'icon': QIcon('icons:Position.png')
},
"separator",
{
'action': QAction("Work Files...", None),
'function': set_workfiles,
'icon': QIcon('icons:Position.png')
},
{
'action': QAction('Create Default Tags..', None),
'function': add_tags_from_presets,
'icon': QIcon('icons:Position.png')
},
"separator",
# {
# 'action': QAction('Create...', None),
# 'function': creator.show,
# 'icon': QIcon('icons:ColorAdd.png')
# },
# {
# 'action': QAction('Load...', None),
# 'function': cbloader.show,
# 'icon': QIcon('icons:CopyRectangle.png')
# },
{
'action': QAction('Publish...', None),
'function': publish.show,
'icon': QIcon('icons:Output.png')
},
# {
# 'action': QAction('Manage...', None),
# 'function': cbsceneinventory.show,
# 'icon': QIcon('icons:ModifyMetaData.png')
# },
{
'action': QAction('Library...', None),
'function': libraryloader.show,
'icon': QIcon('icons:ColorAdd.png')
},
"separator",
{
'action': QAction('Reload pipeline...', None),
'function': reload_config,
'icon': QIcon('icons:ColorAdd.png')
}]
context_label_action = menu.addAction(context_label)
context_label_action.setEnabled(False)
# Create menu items
for a in actions:
add_to_menu = menu
if isinstance(a, dict):
# create action
for k in a.keys():
if 'parent' in k:
submenus = [sm for sm in a[k].split('/')]
submenu = None
for sm in submenus:
if submenu:
submenu.addMenu(sm)
else:
submenu = menu.addMenu(sm)
add_to_menu = submenu
if 'action' in k:
action = a[k]
elif 'function' in k:
action.triggered.connect(a[k])
elif 'icon' in k:
action.setIcon(a[k])
menu.addSeparator()
# add action to menu
add_to_menu.addAction(action)
hiero.ui.registerAction(action)
elif isinstance(a, str):
add_to_menu.addSeparator()
workfiles_action = menu.addAction("Work Files...")
workfiles_action.setIcon(QtGui.QIcon("icons:Position.png"))
workfiles_action.triggered.connect(set_workfiles)
default_tags_action = menu.addAction("Create Default Tags...")
default_tags_action.setIcon(QtGui.QIcon("icons:Position.png"))
default_tags_action.triggered.connect(add_tags_from_presets)
menu.addSeparator()
publish_action = menu.addAction("Publish...")
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
publish_action.triggered.connect(
lambda *args: publish.show(hiero.ui.mainWindow())
)
menu.addSeparator()
reload_action = menu.addAction("Reload pipeline...")
reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
reload_action.triggered.connect(reload_config)
# Is this required?
hiero.ui.registerAction(context_label_action)
hiero.ui.registerAction(workfiles_action)
hiero.ui.registerAction(default_tags_action)
hiero.ui.registerAction(publish_action)
hiero.ui.registerAction(reload_action)
self.context_label_action = context_label_action
self.workfile_actions = workfiles_action
self.default_tags_action = default_tags_action
self.publish_action = publish_action
self.reload_action = reload_action

View file

@ -73,5 +73,5 @@ def current_file():
return normalised
def work_root():
return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/")
def work_root(session):
return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/")

View file

@ -35,7 +35,18 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
# Find project entity
project_query = 'Project where full_name is "{0}"'.format(project_name)
self.log.debug("Project query: < {0} >".format(project_query))
project_entity = session.query(project_query).one()
project_entity = list(session.query(project_query).all())
if len(project_entity) == 0:
raise AssertionError(
"Project \"{0}\" not found in Ftrack.".format(project_name)
)
# QUESTION Is possible to happen?
elif len(project_entity) > 1:
raise AssertionError((
"Found more than one project with name \"{0}\" in Ftrack."
).format(project_name))
project_entity = project_entity[0]
self.log.debug("Project found: {0}".format(project_entity))
# Find asset entity
@ -44,7 +55,25 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
' and name is "{1}"'
).format(project_entity["id"], asset_name)
self.log.debug("Asset entity query: < {0} >".format(entity_query))
asset_entity = session.query(entity_query).one()
asset_entities = []
for entity in session.query(entity_query).all():
# Skip tasks
if entity.entity_type.lower() != "task":
asset_entities.append(entity)
if len(asset_entities) == 0:
raise AssertionError((
"Entity with name \"{0}\" not found"
" in Ftrack project \"{1}\"."
).format(asset_name, project_name))
elif len(asset_entities) > 1:
raise AssertionError((
"Found more than one entity with name \"{0}\""
" in Ftrack project \"{1}\"."
).format(asset_name, project_name))
asset_entity = asset_entities[0]
self.log.debug("Asset found: {0}".format(asset_entity))
# Find task entity if task is set
@ -53,8 +82,15 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
'Task where name is "{0}" and parent_id is "{1}"'
).format(task_name, asset_entity["id"])
self.log.debug("Task entity query: < {0} >".format(task_query))
task_entity = session.query(task_query).one()
self.log.debug("Task entity found: {0}".format(task_entity))
task_entity = session.query(task_query).first()
if not task_entity:
self.log.warning(
"Task entity with name \"{0}\" was not found.".format(
task_name
)
)
else:
self.log.debug("Task entity found: {0}".format(task_entity))
else:
task_entity = None

View file

@ -12,13 +12,32 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
families = ["ftrack"]
optional = True
# Can be set in presets:
# - Allows only `intent` and `comment` keys
note_with_intent_template = "{intent}: {comment}"
# - note label must exist in Ftrack
note_labels = []
def process(self, instance):
comment = (instance.context.data.get("comment") or "").strip()
if not comment:
self.log.info("Comment is not set.")
return
self.log.debug("Comment is set to {}".format(comment))
self.log.debug("Comment is set to `{}`".format(comment))
intent = instance.context.data.get("intent")
if intent:
msg = "Intent is set to `{}` and was added to comment.".format(
intent
)
comment = self.note_with_intent_template.format(**{
"intent": intent,
"comment": comment
})
else:
msg = "Intent is not set."
self.log.debug(msg)
asset_versions_key = "ftrackIntegratedAssetVersions"
asset_versions = instance.data.get(asset_versions_key)
@ -37,8 +56,22 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
)
)
labels = []
if self.note_labels:
all_labels = session.query("NoteLabel").all()
labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels}
for _label in self.note_labels:
label = labels_by_low_name.get(_label.lower())
if not label:
self.log.warning(
"Note Label `{}` was not found.".format(_label)
)
continue
labels.append(label)
for asset_version in asset_versions:
asset_version.create_note(comment, author=user)
asset_version.create_note(comment, author=user, labels=labels)
try:
session.commit()

View file

@ -1,60 +0,0 @@
import os
import subprocess
import pyblish.api
CREATE_NO_WINDOW = 0x08000000
def deadline_command(cmd):
# Find Deadline
path = os.environ.get("DEADLINE_PATH", None)
assert path is not None, "Variable 'DEADLINE_PATH' must be set"
executable = os.path.join(path, "deadlinecommand")
if os.name == "nt":
executable += ".exe"
assert os.path.exists(
executable), "Deadline executable not found at %s" % executable
assert cmd, "Must have a command"
query = (executable, cmd)
process = subprocess.Popen(query, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
creationflags=CREATE_NO_WINDOW)
out, err = process.communicate()
return out
class CollectDeadlineUser(pyblish.api.ContextPlugin):
"""Retrieve the local active Deadline user"""
order = pyblish.api.CollectorOrder + 0.499
label = "Deadline User"
hosts = ['maya', 'fusion', 'nuke']
families = [
"renderlayer",
"saver.deadline",
"imagesequence"
]
def process(self, context):
"""Inject the current working file"""
user = None
try:
user = deadline_command("GetCurrentUserName").strip()
except:
self.log.warning("Deadline command seems not to be working")
if not user:
self.log.warning("No Deadline user found. "
"Do you have Deadline installed?")
return
self.log.info("Found Deadline user: {}".format(user))
context.data['deadlineUser'] = user

View file

@ -1,127 +0,0 @@
import os
import json
import re
import pyblish.api
import clique
class CollectJSON(pyblish.api.ContextPlugin):
""" Collecting the json files in current directory. """
label = "JSON"
order = pyblish.api.CollectorOrder
hosts = ['maya']
def version_get(self, string, prefix):
""" Extract version information from filenames. Code from Foundry"s
nukescripts.version_get()
"""
regex = r"[/_.]{}\d+".format(prefix)
matches = re.findall(regex, string, re.IGNORECASE)
if not len(matches):
msg = "No '_{}#' found in '{}'".format(prefix, string)
raise ValueError(msg)
return matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group()
def process(self, context):
current_file = context.data.get("currentFile", '')
# Skip if current file is not a directory
if not os.path.isdir(current_file):
return
# Traverse directory and collect collections from json files.
instances = []
for root, dirs, files in os.walk(current_file):
for f in files:
if f.endswith(".json"):
with open(os.path.join(root, f)) as json_data:
for data in json.load(json_data):
instances.append(data)
# Validate instance based on supported families.
valid_families = ["img", "cache", "scene", "mov"]
valid_data = []
for data in instances:
families = data.get("families", []) + [data["family"]]
family_type = list(set(families) & set(valid_families))
if family_type:
valid_data.append(data)
# Create existing output instance.
scanned_dirs = []
files = []
collections = []
for data in valid_data:
if "collection" not in data.keys():
continue
if data["collection"] is None:
continue
instance_collection = clique.parse(data["collection"])
try:
version = self.version_get(
os.path.basename(instance_collection.format()), "v"
)[1]
except KeyError:
# Ignore any output that is not versioned
continue
# Getting collections of all previous versions and current version
for count in range(1, int(version) + 1):
# Generate collection
version_string = "v" + str(count).zfill(len(version))
head = instance_collection.head.replace(
"v" + version, version_string
)
collection = clique.Collection(
head=head.replace("\\", "/"),
padding=instance_collection.padding,
tail=instance_collection.tail
)
collection.version = count
# Scan collection directory
scan_dir = os.path.dirname(collection.head)
if scan_dir not in scanned_dirs and os.path.exists(scan_dir):
for f in os.listdir(scan_dir):
file_path = os.path.join(scan_dir, f)
files.append(file_path.replace("\\", "/"))
scanned_dirs.append(scan_dir)
# Match files to collection and add
for f in files:
if collection.match(f):
collection.add(f)
# Skip if no files were found in the collection
if not list(collection):
continue
# Skip existing collections
if collection in collections:
continue
instance = context.create_instance(name=data["name"])
version = self.version_get(
os.path.basename(collection.format()), "v"
)[1]
basename = os.path.basename(collection.format())
instance.data["label"] = "{0} - {1}".format(
data["name"], basename
)
families = data["families"] + [data["family"]]
family = list(set(valid_families) & set(families))[0]
instance.data["family"] = family
instance.data["families"] = ["output"]
instance.data["collection"] = collection
instance.data["version"] = int(version)
instance.data["publish"] = False
collections.append(collection)

View file

@ -1,88 +0,0 @@
import os
import re
import copy
from avalon import io
from pprint import pprint
import pyblish.api
from avalon import api
texture_extensions = ['.tif', '.tiff', '.jpg', '.jpeg', '.tx', '.png', '.tga',
'.psd', '.dpx', '.hdr', '.hdri', '.exr', '.sxr', '.psb']
class CollectTextures(pyblish.api.ContextPlugin):
"""
Gather all texture files in working directory, traversing whole structure.
"""
order = pyblish.api.CollectorOrder
targets = ["texture"]
label = "Textures"
hosts = ["shell"]
def process(self, context):
if os.environ.get("PYPE_PUBLISH_PATHS"):
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
else:
cwd = context.get("workspaceDir", os.getcwd())
paths = [cwd]
textures = []
for path in paths:
for dir, subdir, files in os.walk(path):
textures.extend(
os.path.join(dir, x) for x in files
if os.path.splitext(x)[1].lower() in texture_extensions)
self.log.info("Got {} texture files.".format(len(textures)))
if len(textures) < 1:
raise RuntimeError("no textures found.")
asset_name = os.environ.get("AVALON_ASSET")
family = 'texture'
subset = 'Main'
project = io.find_one({'type': 'project'})
asset = io.find_one({
'type': 'asset',
'name': asset_name
})
context.data['project'] = project
context.data['asset'] = asset
for tex in textures:
self.log.info("Processing: {}".format(tex))
name, ext = os.path.splitext(tex)
simple_name = os.path.splitext(os.path.basename(tex))[0]
instance = context.create_instance(simple_name)
instance.data.update({
"subset": subset,
"asset": asset_name,
"label": simple_name,
"name": simple_name,
"family": family,
"families": [family, 'ftrack'],
})
instance.data['destination_list'] = list()
instance.data['representations'] = list()
instance.data['source'] = 'pype command'
texture_data = {}
texture_data['anatomy_template'] = 'texture'
texture_data["ext"] = ext
texture_data["label"] = simple_name
texture_data["name"] = "texture"
texture_data["stagingDir"] = os.path.dirname(tex)
texture_data["files"] = os.path.basename(tex)
texture_data["thumbnail"] = False
texture_data["preview"] = False
instance.data["representations"].append(texture_data)
self.log.info("collected instance: {}".format(instance.data))
self.log.info("All collected.")

View file

@ -1,51 +0,0 @@
import os
import json
import datetime
import time
import pyblish.api
import clique
class ExtractJSON(pyblish.api.ContextPlugin):
""" Extract all instances to a serialized json file. """
order = pyblish.api.IntegratorOrder
label = "JSON"
hosts = ['maya']
def process(self, context):
workspace = os.path.join(
os.path.dirname(context.data["currentFile"]), "workspace",
"instances")
if not os.path.exists(workspace):
os.makedirs(workspace)
output_data = []
for instance in context:
self.log.debug(instance['data'])
data = {}
for key, value in instance.data.iteritems():
if isinstance(value, clique.Collection):
value = value.format()
try:
json.dumps(value)
data[key] = value
except KeyError:
msg = "\"{0}\"".format(value)
msg += " in instance.data[\"{0}\"]".format(key)
msg += " could not be serialized."
self.log.debug(msg)
output_data.append(data)
timestamp = datetime.datetime.fromtimestamp(
time.time()).strftime("%Y%m%d-%H%M%S")
filename = timestamp + "_instances.json"
with open(os.path.join(workspace, filename), "w") as outfile:
outfile.write(json.dumps(output_data, indent=4, sort_keys=True))

View file

@ -1,86 +0,0 @@
import os
import pyblish.api
import subprocess
import clique
class ExtractQuicktimeEXR(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Extract Quicktime"
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
hosts = ["shell"]
def process(self, instance):
# fps = instance.data.get("fps")
# start = instance.data.get("startFrame")
# stagingdir = os.path.normpath(instance.data.get("stagingDir"))
#
# collected_frames = os.listdir(stagingdir)
# collections, remainder = clique.assemble(collected_frames)
#
# full_input_path = os.path.join(
# stagingdir, collections[0].format('{head}{padding}{tail}')
# )
# self.log.info("input {}".format(full_input_path))
#
# filename = collections[0].format('{head}')
# if not filename.endswith('.'):
# filename += "."
# movFile = filename + "mov"
# full_output_path = os.path.join(stagingdir, movFile)
#
# self.log.info("output {}".format(full_output_path))
#
# config_data = instance.context.data['output_repre_config']
#
# proj_name = os.environ.get('AVALON_PROJECT', '__default__')
# profile = config_data.get(proj_name, config_data['__default__'])
#
# input_args = []
# # overrides output file
# input_args.append("-y")
# # preset's input data
# input_args.extend(profile.get('input', []))
# # necessary input data
# input_args.append("-start_number {}".format(start))
# input_args.append("-i {}".format(full_input_path))
# input_args.append("-framerate {}".format(fps))
#
# output_args = []
# # preset's output data
# output_args.extend(profile.get('output', []))
# # output filename
# output_args.append(full_output_path)
# mov_args = [
# "ffmpeg",
# " ".join(input_args),
# " ".join(output_args)
# ]
# subprocess_mov = " ".join(mov_args)
# sub_proc = subprocess.Popen(subprocess_mov)
# sub_proc.wait()
#
# if not os.path.isfile(full_output_path):
# raise("Quicktime wasn't created succesfully")
#
# if "representations" not in instance.data:
# instance.data["representations"] = []
#
# representation = {
# 'name': 'mov',
# 'ext': 'mov',
# 'files': movFile,
# "stagingDir": stagingdir,
# "preview": True
# }
# instance.data["representations"].append(representation)

View file

@ -1,153 +0,0 @@
import os
import subprocess
import pyblish.api
import filelink
class ExtractTranscode(pyblish.api.InstancePlugin):
"""Extracts review movie from image sequence.
Offset to get images to transcode from.
"""
order = pyblish.api.ExtractorOrder + 0.1
label = "Transcode"
optional = True
families = ["review"]
def find_previous_index(self, index, indexes):
"""Finds the closest previous value in a list from a value."""
data = []
for i in indexes:
if i >= index:
continue
data.append(index - i)
return indexes[data.index(min(data))]
def process(self, instance):
if "collection" in instance.data.keys():
self.process_image(instance)
if "output_path" in instance.data.keys():
self.process_movie(instance)
def process_image(self, instance):
collection = instance.data.get("collection", [])
if not list(collection):
msg = "Skipping \"{0}\" because no frames was found."
self.log.warning(msg.format(instance.data["name"]))
return
# Temporary fill the missing frames.
missing = collection.holes()
if not collection.is_contiguous():
pattern = collection.format("{head}{padding}{tail}")
for index in missing.indexes:
dst = pattern % index
src_index = self.find_previous_index(
index, list(collection.indexes)
)
src = pattern % src_index
filelink.create(src, dst)
# Generate args.
# Has to be yuv420p for compatibility with older players and smooth
# playback. This does come with a sacrifice of more visible banding
# issues.
# -crf 18 is visually lossless.
args = [
"ffmpeg", "-y",
"-start_number", str(min(collection.indexes)),
"-framerate", str(instance.context.data["framerate"]),
"-i", collection.format("{head}{padding}{tail}"),
"-pix_fmt", "yuv420p",
"-crf", "18",
"-timecode", "00:00:00:01",
"-vframes",
str(max(collection.indexes) - min(collection.indexes) + 1),
"-vf",
"scale=trunc(iw/2)*2:trunc(ih/2)*2",
]
if instance.data.get("baked_colorspace_movie"):
args = [
"ffmpeg", "-y",
"-i", instance.data["baked_colorspace_movie"],
"-pix_fmt", "yuv420p",
"-crf", "18",
"-timecode", "00:00:00:01",
]
args.append(collection.format("{head}.mov"))
self.log.debug("Executing args: {0}".format(args))
# Can't use subprocess.check_output, cause Houdini doesn't like that.
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
cwd=os.path.dirname(args[-1])
)
output = p.communicate()[0]
# Remove temporary frame fillers
for f in missing:
os.remove(f)
if p.returncode != 0:
raise ValueError(output)
self.log.debug(output)
def process_movie(self, instance):
# Generate args.
# Has to be yuv420p for compatibility with older players and smooth
# playback. This does come with a sacrifice of more visible banding
# issues.
args = [
"ffmpeg", "-y",
"-i", instance.data["output_path"],
"-pix_fmt", "yuv420p",
"-crf", "18",
"-timecode", "00:00:00:01",
]
if instance.data.get("baked_colorspace_movie"):
args = [
"ffmpeg", "-y",
"-i", instance.data["baked_colorspace_movie"],
"-pix_fmt", "yuv420p",
"-crf", "18",
"-timecode", "00:00:00:01",
]
split = os.path.splitext(instance.data["output_path"])
args.append(split[0] + "_review.mov")
self.log.debug("Executing args: {0}".format(args))
# Can't use subprocess.check_output, cause Houdini doesn't like that.
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
cwd=os.path.dirname(args[-1])
)
output = p.communicate()[0]
if p.returncode != 0:
raise ValueError(output)
self.log.debug(output)

View file

@ -1,10 +1,24 @@
"""
"""Collect Anatomy and global anatomy data.
Requires:
None
session -> AVALON_TASK
projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder)
username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001)
datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder)
Optional:
comment -> collect_comment *(pyblish.api.CollectorOrder)
intent -> collected in pyblish-lite
Provides:
context -> anatomy (pypeapp.Anatomy)
context -> anatomyData
"""
import os
import json
from avalon import api, lib
from pypeapp import Anatomy
import pyblish.api
@ -12,9 +26,52 @@ import pyblish.api
class CollectAnatomy(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder + 0.002
label = "Collect Anatomy"
def process(self, context):
context.data['anatomy'] = Anatomy()
self.log.info("Anatomy templates collected...")
root_path = api.registered_root()
task_name = api.Session["AVALON_TASK"]
project_entity = context.data["projectEntity"]
asset_entity = context.data["assetEntity"]
project_name = project_entity["name"]
context.data["anatomy"] = Anatomy(project_name)
self.log.info(
"Anatomy object collected for project \"{}\".".format(project_name)
)
hierarchy_items = asset_entity["data"]["parents"]
hierarchy = ""
if hierarchy_items:
hierarchy = os.path.join(*hierarchy_items)
context_data = {
"root": root_path,
"project": {
"name": project_name,
"code": project_entity["data"].get("code")
},
"asset": asset_entity["name"],
"hierarchy": hierarchy.replace("\\", "/"),
"task": task_name,
"username": context.data["user"]
}
avalon_app_name = os.environ.get("AVALON_APP_NAME")
if avalon_app_name:
application_def = lib.get_application(avalon_app_name)
app_dir = application_def.get("application_dir")
if app_dir:
context_data["app"] = app_dir
datetime_data = context.data.get("datetimeData") or {}
context_data.update(datetime_data)
context.data["anatomyData"] = context_data
self.log.info("Global anatomy Data collected")
self.log.debug(json.dumps(context_data, indent=4))

View file

@ -0,0 +1,47 @@
"""Collect Anatomy and global anatomy data.
Requires:
session -> AVALON_PROJECT, AVALON_ASSET
Provides:
context -> projectEntity - project entity from database
context -> assetEntity - asset entity from database
"""
from avalon import io, api
import pyblish.api
class CollectAvalonEntities(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
label = "Collect Avalon Entities"
def process(self, context):
io.install()
project_name = api.Session["AVALON_PROJECT"]
asset_name = api.Session["AVALON_ASSET"]
project_entity = io.find_one({
"type": "project",
"name": project_name
})
assert project_entity, (
"Project '{0}' was not found."
).format(project_name)
self.log.debug("Collected Project entity \"{}\"".format(project_entity))
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name '{0}' in project '{1}'"
).format(asset_name, project_name)
self.log.debug("Collected Asset entity \"{}\"".format(asset_entity))
context.data["projectEntity"] = project_entity
context.data["assetEntity"] = asset_entity

View file

@ -1,450 +0,0 @@
"""
Requires:
environment -> PYPE_PUBLISH_PATHS
context -> workspaceDir
Provides:
context -> user (str)
instance -> new instance
"""
import os
import re
import copy
import json
import pyblish.api
from avalon import api
def collect(root,
regex=None,
exclude_regex=None,
frame_start=None,
frame_end=None):
"""Collect sequence collections in root"""
from avalon.vendor import clique
files = list()
for filename in os.listdir(root):
# Must have extension
ext = os.path.splitext(filename)[1]
if not ext:
continue
# Only files
if not os.path.isfile(os.path.join(root, filename)):
continue
# Include and exclude regex
if regex and not re.search(regex, filename):
continue
if exclude_regex and re.search(exclude_regex, filename):
continue
files.append(filename)
# Match collections
# Support filenames like: projectX_shot01_0010.tiff with this regex
pattern = r"(?P<index>(?P<padding>0*)\d+)\.\D+\d?$"
collections, remainder = clique.assemble(files,
patterns=[pattern],
minimum_items=1)
# Exclude any frames outside start and end frame.
for collection in collections:
for index in list(collection.indexes):
if frame_start is not None and index < frame_start:
collection.indexes.discard(index)
continue
if frame_end is not None and index > frame_end:
collection.indexes.discard(index)
continue
# Keep only collections that have at least a single frame
collections = [c for c in collections if c.indexes]
return collections, remainder
class CollectRenderedFrames(pyblish.api.ContextPlugin):
"""Gather file sequences from working directory
When "FILESEQUENCE" environment variable is set these paths (folders or
.json files) are parsed for image sequences. Otherwise the current
working directory is searched for file sequences.
The json configuration may have the optional keys:
asset (str): The asset to publish to. If not provided fall back to
api.Session["AVALON_ASSET"]
subset (str): The subset to publish to. If not provided the sequence's
head (up to frame number) will be used.
frame_start (int): The start frame for the sequence
frame_end (int): The end frame for the sequence
root (str): The path to collect from (can be relative to the .json)
regex (str): A regex for the sequence filename
exclude_regex (str): A regex for filename to exclude from collection
metadata (dict): Custom metadata for instance.data["metadata"]
"""
order = pyblish.api.CollectorOrder - 0.0001
targets = ["filesequence"]
label = "RenderedFrames"
def process(self, context):
pixel_aspect = 1
resolution_width = 1920
resolution_height = 1080
lut_path = None
slate_frame = None
families_data = None
baked_mov_path = None
subset = None
version = None
frame_start = 0
frame_end = 0
if os.environ.get("PYPE_PUBLISH_PATHS"):
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
self.log.info("Collecting paths: {}".format(paths))
else:
cwd = context.get("workspaceDir", os.getcwd())
paths = [cwd]
for path in paths:
self.log.info("Loading: {}".format(path))
if path.endswith(".json"):
# Search using .json configuration
with open(path, "r") as f:
try:
data = json.load(f)
except Exception as exc:
self.log.error(
"Error loading json: "
"{} - Exception: {}".format(path, exc)
)
raise
cwd = os.path.dirname(path)
root_override = data.get("root")
frame_start = int(data.get("frameStart"))
frame_end = int(data.get("frameEnd"))
subset = data.get("subset")
if root_override:
if os.path.isabs(root_override):
root = root_override
else:
root = os.path.join(cwd, root_override)
else:
root = cwd
if data.get("ftrack"):
f = data.get("ftrack")
os.environ["FTRACK_API_USER"] = f["FTRACK_API_USER"]
os.environ["FTRACK_API_KEY"] = f["FTRACK_API_KEY"]
os.environ["FTRACK_SERVER"] = f["FTRACK_SERVER"]
metadata = data.get("metadata")
if metadata:
session = metadata.get("session")
if session:
self.log.info("setting session using metadata")
api.Session.update(session)
os.environ.update(session)
instance = metadata.get("instance")
if instance:
pixel_aspect = instance.get("pixelAspect", 1)
resolution_width = instance.get("resolutionWidth", 1920)
resolution_height = instance.get("resolutionHeight", 1080)
lut_path = instance.get("lutPath", None)
baked_mov_path = instance.get("bakeRenderPath")
families_data = instance.get("families")
slate_frame = instance.get("slateFrame")
version = instance.get("version")
else:
# Search in directory
data = dict()
root = path
self.log.info("Collecting: {}".format(root))
regex = data.get("regex")
if baked_mov_path:
regex = "^{}.*$".format(subset)
if regex:
self.log.info("Using regex: {}".format(regex))
if "slate" in families_data:
frame_start -= 1
collections, remainder = collect(
root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
frame_start=frame_start,
frame_end=frame_end,
)
self.log.info("Found collections: {}".format(collections))
self.log.info("Found remainder: {}".format(remainder))
fps = data.get("fps", 25)
# adding publish comment and intent to context
context.data["comment"] = data.get("comment", "")
context.data["intent"] = data.get("intent", "")
if data.get("user"):
context.data["user"] = data["user"]
if data.get("version"):
version = data.get("version")
# Get family from the data
families = data.get("families", ["render"])
if "ftrack" not in families:
families.append("ftrack")
if families_data and "render2d" in families_data:
families.append("render2d")
if families_data and "slate" in families_data:
families.append("slate")
families.append("slate.farm")
if data.get("attachTo"):
# we need to attach found collections to existing
# subset version as review represenation.
for attach in data.get("attachTo"):
self.log.info(
"Attaching render {}:v{}".format(
attach["subset"], attach["version"]))
instance = context.create_instance(
attach["subset"])
instance.data.update(
{
"name": attach["subset"],
"version": attach["version"],
"family": 'review',
"families": ['review', 'ftrack'],
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height
})
if "representations" not in instance.data:
instance.data["representations"] = []
for collection in collections:
self.log.info(
" - adding representation: {}".format(
str(collection))
)
ext = collection.tail.lstrip(".")
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
}
instance.data["representations"].append(
representation)
elif subset:
# if we have subset - add all collections and known
# reminder as representations
# take out review family if mov path
# this will make imagesequence none review
if baked_mov_path:
self.log.info(
"Baked mov is available {}".format(
baked_mov_path))
families.append("review")
if session['AVALON_APP'] == "maya":
families.append("review")
self.log.info(
"Adding representations to subset {}".format(
subset))
instance = context.create_instance(subset)
data = copy.deepcopy(data)
instance.data.update(
{
"name": subset,
"family": families[0],
"families": list(families),
"subset": subset,
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": frame_start,
"frameEnd": frame_end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"slateFrame": slate_frame,
"version": version
}
)
if "representations" not in instance.data:
instance.data["representations"] = []
for collection in collections:
self.log.info(" - {}".format(str(collection)))
ext = collection.tail.lstrip(".")
if "slate" in instance.data["families"]:
frame_start += 1
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": frame_start,
"frameEnd": frame_end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"] if not baked_mov_path else ["thumb-nuke"],
}
instance.data["representations"].append(
representation)
# filter out only relevant mov in case baked available
self.log.debug("__ remainder {}".format(remainder))
if baked_mov_path:
remainder = [r for r in remainder
if r in baked_mov_path]
self.log.debug("__ remainder {}".format(remainder))
# process reminders
for rem in remainder:
# add only known types to representation
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
self.log.info(" . {}".format(rem))
if "slate" in instance.data["families"]:
frame_start += 1
tags = ["review"]
if baked_mov_path:
tags.append("delete")
representation = {
"name": rem.split(".")[-1],
"ext": "{}".format(rem.split(".")[-1]),
"files": rem,
"stagingDir": root,
"frameStart": frame_start,
"anatomy_template": "render",
"fps": fps,
"tags": tags
}
instance.data["representations"].append(
representation)
else:
# we have no subset so we take every collection and create one
# from it
for collection in collections:
instance = context.create_instance(str(collection))
self.log.info("Creating subset from: %s" % str(collection))
# Ensure each instance gets a unique reference to the data
data = copy.deepcopy(data)
# If no subset provided, get it from collection's head
subset = data.get("subset", collection.head.rstrip("_. "))
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
start = int(data.get("frameStart", indices[0]))
end = int(data.get("frameEnd", indices[-1]))
ext = list(collection)[0].split(".")[-1]
if "review" not in families:
families.append("review")
instance.data.update(
{
"name": str(collection),
"family": families[0], # backwards compatibility
"families": list(families),
"subset": subset,
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": start,
"frameEnd": end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"version": version
}
)
if lut_path:
instance.data.update({"lutPath": lut_path})
instance.append(collection)
instance.context.data["fps"] = fps
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": start,
"frameEnd": end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"],
}
instance.data["representations"].append(representation)
# temporary ... allow only beauty on ftrack
if session['AVALON_APP'] == "maya":
AOV_filter = ['beauty']
for aov in AOV_filter:
if aov not in instance.data['subset']:
instance.data['families'].remove('review')
instance.data['families'].remove('ftrack')
representation["tags"].remove('review')
self.log.debug(
"__ representations {}".format(
instance.data["representations"]))
self.log.debug(
"__ instance.data {}".format(instance.data))

View file

@ -0,0 +1,127 @@
"""
Requires:
context -> anatomyData
context -> projectEntity
context -> assetEntity
instance -> asset
instance -> subset
instance -> family
Optional:
instance -> version
instance -> resolutionWidth
instance -> resolutionHeight
instance -> fps
Provides:
instance -> projectEntity
instance -> assetEntity
instance -> anatomyData
instance -> version
instance -> latestVersion
"""
import copy
import json
from avalon import io
import pyblish.api
class CollectInstanceAnatomyData(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
order = pyblish.api.CollectorOrder + 0.49
label = "Collect instance anatomy data"
def process(self, instance):
# get all the stuff from the database
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
asset_name = instance.data["asset"]
# Check if asset name is the same as what is in context
# - they may be different, e.g. in NukeStudio
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
# If version is not specified for instance or context
if version_number is None:
# TODO we should be able to change default version by studio
# preferences (like start with version number `0`)
version_number = 1
# use latest version (+1) if already any exist
if latest_version is not None:
version_number += int(latest_version)
anatomy_updates = {
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number
}
task_name = instance.data.get("task")
if task_name:
anatomy_updates["task"] = task_name
# Version should not be collected since may be instance
anatomy_data.update(anatomy_updates)
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
pixel_aspect = instance.data.get("pixelAspect")
if pixel_aspect:
anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect))
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = float("{:0.2f}".format(fps))
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
# TODO should be version number set here?
instance.data["version"] = version_number
self.log.info("Instance anatomy Data collected")
self.log.debug(json.dumps(anatomy_data, indent=4))

View file

@ -1,24 +0,0 @@
"""
Requires:
None
Provides:
context -> projectData
"""
import pyblish.api
import pype.api as pype
class CollectProjectData(pyblish.api.ContextPlugin):
"""Collecting project data from avalon db"""
label = "Collect Project Data"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["nukestudio"]
def process(self, context):
# get project data from avalon db
context.data["projectData"] = pype.get_project()["data"]
return

View file

@ -0,0 +1,94 @@
import os
import json
import pyblish.api
from avalon import api
from pypeapp import PypeLauncher
class CollectRenderedFiles(pyblish.api.ContextPlugin):
"""
This collector will try to find json files in provided
`PYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
"""
order = pyblish.api.CollectorOrder - 0.0001
targets = ["filesequence"]
label = "Collect rendered frames"
_context = None
def _load_json(self, path):
assert os.path.isfile(path), ("path to json file doesn't exist")
data = None
with open(path, "r") as json_file:
try:
data = json.load(json_file)
except Exception as exc:
self.log.error(
"Error loading json: "
"{} - Exception: {}".format(path, exc)
)
return data
def _process_path(self, data):
# validate basic necessary data
data_err = "invalid json file - missing data"
required = ["asset", "user", "intent", "comment",
"job", "instances", "session", "version"]
assert all(elem in data.keys() for elem in required), data_err
# set context by first json file
ctx = self._context.data
ctx["asset"] = ctx.get("asset") or data.get("asset")
ctx["intent"] = ctx.get("intent") or data.get("intent")
ctx["comment"] = ctx.get("comment") or data.get("comment")
ctx["user"] = ctx.get("user") or data.get("user")
ctx["version"] = ctx.get("version") or data.get("version")
# basic sanity check to see if we are working in same context
# if some other json file has different context, bail out.
ctx_err = "inconsistent contexts in json files - %s"
assert ctx.get("asset") == data.get("asset"), ctx_err % "asset"
assert ctx.get("intent") == data.get("intent"), ctx_err % "intent"
assert ctx.get("comment") == data.get("comment"), ctx_err % "comment"
assert ctx.get("user") == data.get("user"), ctx_err % "user"
assert ctx.get("version") == data.get("version"), ctx_err % "version"
# ftrack credentials are passed as environment variables by Deadline
# to publish job, but Muster doesn't pass them.
if data.get("ftrack") and not os.environ.get("FTRACK_API_USER"):
ftrack = data.get("ftrack")
os.environ["FTRACK_API_USER"] = ftrack["FTRACK_API_USER"]
os.environ["FTRACK_API_KEY"] = ftrack["FTRACK_API_KEY"]
os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"]
# now we can just add instances from json file and we are done
for instance in data.get("instances"):
self.log.info(" - processing instance for {}".format(
instance.get("subset")))
i = self._context.create_instance(instance.get("subset"))
self.log.info("remapping paths ...")
i.data["representations"] = [PypeLauncher().path_remapper(
data=r) for r in instance.get("representations")]
i.data.update(instance)
def process(self, context):
self._context = context
assert os.environ.get("PYPE_PUBLISH_DATA"), (
"Missing `PYPE_PUBLISH_DATA`")
paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep)
session_set = False
for path in paths:
data = self._load_json(path)
if not session_set:
self.log.info("Setting session using data from file")
api.Session.update(data.get("session"))
os.environ.update(data.get("session"))
session_set = True
assert data, "failed to load json file"
self._process_path(data)

View file

@ -0,0 +1,60 @@
"""
Requires:
context -> anatomy
context -> anatomyData
Provides:
instance -> publishDir
instance -> resourcesDir
"""
import os
import copy
import pyblish.api
from avalon import api
class CollectResourcesPath(pyblish.api.InstancePlugin):
"""Generate directory path where the files and resources will be stored"""
label = "Collect Resources Path"
order = pyblish.api.CollectorOrder + 0.495
def process(self, instance):
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
self.log.debug("publishDir: \"{}\"".format(publish_folder))
self.log.debug("resourcesDir: \"{}\"".format(resources_folder))

View file

@ -21,7 +21,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
if '<shell>' in filename:
return
rootVersion = pype.get_version_from_path(filename)
rootVersion = int(pype.get_version_from_path(filename))
context.data['version'] = rootVersion
self.log.info("{}".format(type(rootVersion)))
self.log.info('Scene Version: %s' % context.data.get('version'))

View file

@ -1,119 +0,0 @@
"""
Requires:
session -> AVALON_PROJECT
context -> anatomy (pypeapp.Anatomy)
instance -> subset
instance -> asset
instance -> family
Provides:
instance -> template
instance -> assumedTemplateData
instance -> assumedDestination
"""
import os
from avalon import io, api
import pyblish.api
class CollectTemplates(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect and fill Templates"
hosts = ["maya", "nuke", "standalonepublisher"]
def process(self, instance):
# get all the stuff from the database
subset_name = instance.data["subset"]
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += int(version["name"])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*hierarchy)
else:
hierarchy = ""
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"version": version_number,
"hierarchy": hierarchy.replace("\\", "/"),
"representation": "TEMP"}
# Add datetime data to template data
datetime_data = instance.context.data.get("datetimeData") or {}
template_data.update(datetime_data)
resolution_width = instance.data.get("resolutionWidth")
resolution_height = instance.data.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
instance.data["template"] = template
instance.data["assumedTemplateData"] = template_data
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
(anatomy.format(template_data))["publish"]["path"]
)
self.log.info("Assumed Destination has been created...")
self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"]))
self.log.debug("__ template: `{}`".format(instance.data["template"]))

View file

@ -26,35 +26,26 @@ class ExtractBurnin(pype.api.Extractor):
if "representations" not in instance.data:
raise RuntimeError("Burnin needs already created mov to work on.")
version = instance.context.data.get(
'version', instance.data.get('version'))
version = instance.data.get(
'version', instance.context.data.get('version'))
frame_start = int(instance.data.get("frameStart") or 0)
frame_end = int(instance.data.get("frameEnd") or 1)
duration = frame_end - frame_start + 1
prep_data = copy.deepcopy(instance.data["anatomyData"])
if "slate.farm" in instance.data["families"]:
frame_start += 1
duration -= 1
prep_data = {
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
prep_data.update({
"frame_start": frame_start,
"frame_end": frame_end,
"duration": duration,
"version": int(version),
"comment": instance.context.data.get("comment", ""),
"intent": instance.context.data.get("intent", "")
}
# Add datetime data to preparation data
datetime_data = instance.context.data.get("datetimeData") or {}
prep_data.update(datetime_data)
# Update data with template data
template_data = instance.data.get("assumedTemplateData") or {}
prep_data.update(template_data)
})
# get anatomy project
anatomy = instance.context.data['anatomy']

View file

@ -28,29 +28,33 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
entity_type = entity_data["entity_type"]
data = {}
data["inputs"] = entity_data.get("inputs", [])
data["entityType"] = entity_type
# Custom attributes.
for k, val in entity_data.get("custom_attributes", {}).items():
data[k] = val
# Tasks.
tasks = entity_data.get("tasks", [])
if tasks is not None or len(tasks) > 0:
data["tasks"] = tasks
parents = []
visualParent = None
# do not store project"s id as visualParent (silo asset)
if self.project is not None:
if self.project["_id"] != parent["_id"]:
visualParent = parent["_id"]
parents.extend(parent.get("data", {}).get("parents", []))
parents.append(parent["name"])
data["visualParent"] = visualParent
data["parents"] = parents
if entity_type.lower() != "project":
data["inputs"] = entity_data.get("inputs", [])
# Tasks.
tasks = entity_data.get("tasks", [])
if tasks is not None or len(tasks) > 0:
data["tasks"] = tasks
parents = []
visualParent = None
# do not store project"s id as visualParent (silo asset)
if self.project is not None:
if self.project["_id"] != parent["_id"]:
visualParent = parent["_id"]
parents.extend(
parent.get("data", {}).get("parents", [])
)
parents.append(parent["name"])
data["visualParent"] = visualParent
data["parents"] = parents
update_data = True
# Process project
if entity_type.lower() == "project":
entity = io.find_one({"type": "project"})
@ -58,8 +62,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
assert (entity is not None), "Did not find project in DB"
# get data from already existing project
for key, value in entity.get("data", {}).items():
data[key] = value
cur_entity_data = entity.get("data") or {}
cur_entity_data.update(data)
data = cur_entity_data
self.project = entity
# Raise error if project or parent are not set
@ -70,16 +75,63 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
# Else process assset
else:
entity = io.find_one({"type": "asset", "name": name})
# Create entity if doesn"t exist
if entity is None:
entity = self.create_avalon_asset(name, data)
if entity:
# Do not override data, only update
cur_entity_data = entity.get("data") or {}
cur_entity_data.update(data)
data = cur_entity_data
else:
# Skip updating data
update_data = False
# Update entity data with input data
io.update_many({"_id": entity["_id"]}, {"$set": {"data": data}})
archived_entities = io.find({
"type": "archived_asset",
"name": name
})
unarchive_entity = None
for archived_entity in archived_entities:
archived_parents = (
archived_entity
.get("data", {})
.get("parents")
)
if data["parents"] == archived_parents:
unarchive_entity = archived_entity
break
if unarchive_entity is None:
# Create entity if doesn"t exist
entity = self.create_avalon_asset(name, data)
else:
# Unarchive if entity was archived
entity = self.unarchive_entity(unarchive_entity, data)
if update_data:
# Update entity data with input data
io.update_many(
{"_id": entity["_id"]},
{"$set": {"data": data}}
)
if "childs" in entity_data:
self.import_to_avalon(entity_data["childs"], entity)
def unarchive_entity(self, entity, data):
# Unarchived asset should not use same data
new_entity = {
"_id": entity["_id"],
"schema": "avalon-core:asset-3.0",
"name": entity["name"],
"parent": self.project["_id"],
"type": "asset",
"data": data
}
io.replace_one(
{"_id": entity["_id"]},
new_entity
)
return new_entity
def create_avalon_asset(self, name, data):
item = {
"schema": "avalon-core:asset-3.0",

View file

@ -1,20 +1,12 @@
import os
import pyblish.api
import clique
import pype.api
import pype.lib
class ExtractJpegEXR(pyblish.api.InstancePlugin):
"""Resolve any dependency issues
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
"""Create jpg thumbnail from sequence using ffmpeg"""
label = "Extract Jpeg EXR"
hosts = ["shell"]
@ -23,11 +15,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
enabled = False
def process(self, instance):
start = instance.data.get("frameStart")
stagingdir = os.path.normpath(instance.data.get("stagingDir"))
collected_frames = os.listdir(stagingdir)
collections, remainder = clique.assemble(collected_frames)
self.log.info("subset {}".format(instance.data['subset']))
if 'crypto' in instance.data['subset']:
@ -40,64 +27,70 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
representations_new = representations[:]
for repre in representations:
tags = repre.get("tags", [])
self.log.debug(repre)
if 'review' in repre['tags'] or "thumb-nuke" in repre['tags']:
if not isinstance(repre['files'], list):
return
valid = 'review' in tags or "thumb-nuke" in tags
if not valid:
continue
input_file = repre['files'][0]
if not isinstance(repre['files'], list):
continue
# input_file = (
# collections[0].format('{head}{padding}{tail}') % start
# )
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
stagingdir = os.path.normpath(repre.get("stagingDir"))
input_file = repre['files'][0]
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpeg_file = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpeg_file)
# input_file = (
# collections[0].format('{head}{padding}{tail}') % start
# )
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpeg_file = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpeg_file)
config_data = instance.context.data['output_repre_config']
self.log.info("output {}".format(full_output_path))
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
config_data = instance.context.data['output_repre_config']
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
subprocess_jpeg = " ".join(jpeg_items)
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
jpeg_items = []
jpeg_items.append(ffmpeg_path)
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
if "representations" not in instance.data:
instance.data["representations"] = []
subprocess_jpeg = " ".join(jpeg_items)
representation = {
'name': 'thumbnail',
'ext': 'jpg',
'files': jpeg_file,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ['thumbnail']
}
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
# adding representation
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'thumbnail',
'ext': 'jpg',
'files': jpeg_file,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ['thumbnail']
}
# adding representation
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)
instance.data["representations"] = representations_new

View file

@ -2,6 +2,7 @@ import os
import pyblish.api
import clique
import pype.api
import pype.lib
class ExtractReview(pyblish.api.InstancePlugin):
@ -40,323 +41,333 @@ class ExtractReview(pyblish.api.InstancePlugin):
# get representation and loop them
representations = inst_data["representations"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
if repre['ext'] in self.ext_filter:
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" in tags:
staging_dir = repre["stagingDir"]
# iterating preset output profiles
for name, profile in output_profiles.items():
repre_new = repre.copy()
ext = profile.get("ext", None)
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# adding control for presets to be sequence
# or single file
is_sequence = ("sequence" in p_tags) and (ext in (
"png", "jpg", "jpeg"))
self.log.debug("Profile name: {}".format(name))
if not ext:
ext = "mov"
self.log.warning(
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
if any(item in instance.data['families'] for item in profile['families']):
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(
repre["files"])
full_input_path = os.path.join(
staging_dir, collections[0].format(
'{head}{padding}{tail}')
)
filename = collections[0].format('{head}')
if filename.endswith('.'):
filename = filename[:-1]
else:
full_input_path = os.path.join(
staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
if is_sequence:
filename_base = filename + "_{0}".format(name)
repr_file = filename_base + ".%08d.{0}".format(
ext)
repre_new["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
new_tags = [x for x in tags if x != "delete"]
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("startFrameReview") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
output_args = []
codec_args = profile.get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
# letter_box
lb = profile.get('letter_box', 0)
if lb != 0:
ffmpet_width = to_width
ffmpet_height = to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
if resolution_ratio != delivery_ratio:
ffmpet_width = resolution_width
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# baking lut file application
lut_path = instance.data.get("lutPath")
if lut_path and ("bake-lut" in p_tags):
# removing Gama info as it is all baked in lut
gamma = next((g for g in input_args
if "-gamma" in g), None)
if gamma:
input_args.remove(gamma)
# create lut argument
lut_arg = "lut3d=file='{}'".format(
lut_path.replace(
"\\", "/").replace(":/", "\\:/")
)
lut_arg += ",colormatrix=bt601:bt709"
vf_back = self.add_video_filter_args(
output_args, lut_arg)
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug(
"_ output_args: `{}`".format(output_args))
if is_sequence:
stg_dir = os.path.dirname(full_output_path)
if not os.path.exists(stg_dir):
self.log.debug(
"creating dir: {}".format(stg_dir))
os.mkdir(stg_dir)
mov_args = [
os.path.join(
os.environ.get(
"FFMPEG_PATH",
""), "ffmpeg"),
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
'name': name,
'ext': ext,
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if is_sequence:
repre_new.update({
"stagingDir": stg_dir,
"files": os.listdir(stg_dir)
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):
repre_new.pop("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
else:
continue
else:
if repre['ext'] not in self.ext_filter:
continue
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" not in tags:
continue
staging_dir = repre["stagingDir"]
# iterating preset output profiles
for name, profile in output_profiles.items():
repre_new = repre.copy()
ext = profile.get("ext", None)
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# adding control for presets to be sequence
# or single file
is_sequence = ("sequence" in p_tags) and (ext in (
"png", "jpg", "jpeg"))
self.log.debug("Profile name: {}".format(name))
if not ext:
ext = "mov"
self.log.warning(
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
profile_family_check = False
for _family in profile['families']:
if _family in instance.data['families']:
profile_family_check = True
break
if not profile_family_check:
continue
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(
repre["files"])
full_input_path = os.path.join(
staging_dir, collections[0].format(
'{head}{padding}{tail}')
)
filename = collections[0].format('{head}')
if filename.endswith('.'):
filename = filename[:-1]
else:
full_input_path = os.path.join(
staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
if is_sequence:
filename_base = filename + "_{0}".format(name)
repr_file = filename_base + ".%08d.{0}".format(
ext)
repre_new["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
new_tags = [x for x in tags if x != "delete"]
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
if start_frame != repre.get("detectedStart", start_frame):
start_frame = repre.get("detectedStart")
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("startFrameReview") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
output_args = []
codec_args = profile.get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
# letter_box
lb = profile.get('letter_box', 0)
if lb != 0:
ffmpet_width = to_width
ffmpet_height = to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
if resolution_ratio != delivery_ratio:
ffmpet_width = resolution_width
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# baking lut file application
lut_path = instance.data.get("lutPath")
if lut_path and ("bake-lut" in p_tags):
# removing Gama info as it is all baked in lut
gamma = next((g for g in input_args
if "-gamma" in g), None)
if gamma:
input_args.remove(gamma)
# create lut argument
lut_arg = "lut3d=file='{}'".format(
lut_path.replace(
"\\", "/").replace(":/", "\\:/")
)
lut_arg += ",colormatrix=bt601:bt709"
vf_back = self.add_video_filter_args(
output_args, lut_arg)
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug(
"_ output_args: `{}`".format(output_args))
if is_sequence:
stg_dir = os.path.dirname(full_output_path)
if not os.path.exists(stg_dir):
self.log.debug(
"creating dir: {}".format(stg_dir))
os.mkdir(stg_dir)
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
'name': name,
'ext': ext,
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if is_sequence:
repre_new.update({
"stagingDir": stg_dir,
"files": os.listdir(stg_dir)
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):
repre_new.pop("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
for repre in representations_new:
if "delete" in repre.get("tags", []):
representations_new.remove(repre)

View file

@ -1,5 +1,6 @@
import os
import pype.api
import pype.lib
import pyblish
@ -21,7 +22,7 @@ class ExtractReviewSlate(pype.api.Extractor):
suffix = "_slate"
slate_path = inst_data.get("slateFrame")
ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
to_width = 1920
to_height = 1080

View file

@ -1,417 +0,0 @@
import os
import logging
import shutil
import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
log = logging.getLogger(__name__)
class IntegrateAsset(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
families = []
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
# Ensure at least one file is set up for transfer in staging dir.
files = instance.data.get("files", [])
assert files, "Instance has no files to transfer"
assert isinstance(files, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(files)
)
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
collection = files
# Assert that each member has identical suffix
_, ext = os.path.splitext(collection[0])
assert all(ext == os.path.splitext(name)[1]
for name in collection), (
"Files had varying suffixes, this is a bug"
)
assert not any(os.path.isabs(name) for name in collection)
template_data["representation"] = ext[1:]
for fname in collection:
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = files
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': dst, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
'task': api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": version["name"],
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data.get("transfers", list())
for src, dest in transfers:
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
# Produce hardlinked copies
# Note: hardlink can only be produced between two files on the same
# server/disk and editing one of the two will edit both files at once.
# As such it is recommended to only make hardlinks between static files
# to ensure publishes remain safe and non-edited.
hardlinks = instance.data.get("hardlinks", list())
for src, dest in hardlinks:
self.log.info("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "avalon-core:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "avalon-core:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
self.log.debug("Registered root: {}".format(api.registered_root()))
# create relative source path for DB
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
self.log.debug("Source: {}".format(source))
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
# Include optional data if present in
optionals = [
"frameStart", "frameEnd", "step", "handles", "sourceHashes"
]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data[key]
return version_data

View file

@ -1,147 +0,0 @@
import pyblish.api
import os
from avalon import io, api
class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
"""Generate the assumed destination path where the file will be stored"""
label = "Integrate Assumed Destination"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
def process(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
template_data = instance.data["assumedTemplateData"]
# self.log.info(anatomy.templates)
anatomy_filled = anatomy.format(template_data)
# self.log.info(anatomy_filled)
mock_template = anatomy_filled["publish"]["path"]
# For now assume resources end up in a "resources" folder in the
# published folder
mock_destination = os.path.join(os.path.dirname(mock_template),
"resources")
# Clean the path
mock_destination = os.path.abspath(
os.path.normpath(mock_destination)).replace("\\", "/")
# Define resource destination and transfers
resources = instance.data.get("resources", list())
transfers = instance.data.get("transfers", list())
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(
resource["source"]).replace("\\", "/")
destination = os.path.join(mock_destination, source_filename)
# Force forward slashes to fix issue with software unable
# to work correctly with backslashes in specific scenarios
# (e.g. escape characters in PLN-151 V-Ray UDIM)
destination = destination.replace("\\", "/")
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(
mock_destination, fname).replace("\\", "/")
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
padding = int(a_template['render']['padding'])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template

View file

@ -2,8 +2,11 @@ import os
from os.path import getsize
import logging
import sys
import copy
import clique
import errno
from pymongo import DeleteOne, InsertOne
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
@ -100,144 +103,148 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
TASK = instance.data.get("task") or api.Session["AVALON_TASK"]
LOCATION = api.Session["AVALON_LOCATION"]
anatomy_data = instance.data["anatomyData"]
io.install()
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
# for result in context.data["results"]:
# if not result["success"]:
# self.log.debug(result)
# exc_type, exc_value, exc_traceback = result["error_info"]
# extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
# self.log.debug(
# "Error at line {}: \"{}\"".format(
# extracted_traceback[1], result["error"]
# )
# )
# assert all(result["success"] for result in context.data["results"]),(
# "Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
project_entity = instance.data["projectEntity"]
context_asset_name = context.data["assetEntity"]["name"]
asset_name = instance.data["asset"]
asset_entity = instance.data.get("assetEntity")
if not asset_entity or asset_entity["name"] != context_asset_name:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name \"{0}\" in project \"{1}\""
).format(asset_name, project_entity["name"])
instance.data["assetEntity"] = asset_entity
# update anatomy data with asset specific keys
# - name should already been set
hierarchy = ""
parents = asset_entity["data"]["parents"]
if parents:
hierarchy = "/".join(parents)
anatomy_data["hierarchy"] = hierarchy
task_name = instance.data.get("task")
if task_name:
anatomy_data["task"] = task_name
stagingdir = instance.data.get("stagingDir")
if not stagingdir:
self.log.info('''{} is missing reference to staging
directory Will try to get it from
representation'''.format(instance))
self.log.info((
"{0} is missing reference to staging directory."
" Will try to get it from representation."
).format(instance))
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
else:
self.log.debug(
"Establishing staging directory @ {0}".format(stagingdir)
)
# Ensure at least one file is set up for transfer in staging dir.
repres = instance.data.get("representations", None)
repres = instance.data.get("representations")
assert repres, "Instance has no files to transfer"
assert isinstance(repres, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(repres)
"Instance 'files' must be a list, got: {0} {1}".format(
str(type(repres)), str(repres)
)
)
# FIXME: io is not initialized at this point for shell host
io.install()
project = io.find_one({"type": "project"})
subset = self.get_subset(asset_entity, instance)
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_number = instance.data["version"]
self.log.debug("Next version: v{}".format(version_number))
version_data = self.create_version_data(context, instance)
version_data_instance = instance.data.get('versionData')
if version_data_instance:
version_data.update(version_data_instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
# TODO rename method from `create_version` to
# `prepare_version` or similar...
version = self.create_version(
subset=subset,
version_number=version_number,
data=version_data
)
self.log.debug("Creating version ...")
new_repre_names_low = [_repre["name"].lower() for _repre in repres]
existing_version = io.find_one({
'type': 'version',
'parent': subset["_id"],
'name': next_version
'name': version_number
})
if existing_version is None:
version_id = io.insert_one(version).inserted_id
else:
# Check if instance have set `append` mode which cause that
# only replicated representations are set to archive
append_repres = instance.data.get("append", False)
# Update version data
# TODO query by _id and
io.update_many({
'type': 'version',
'parent': subset["_id"],
'name': next_version
}, {'$set': version}
)
'name': version_number
}, {
'$set': version
})
version_id = existing_version['_id']
# Find representations of existing version and archive them
current_repres = list(io.find({
"type": "representation",
"parent": version_id
}))
bulk_writes = []
for repre in current_repres:
if append_repres:
# archive only duplicated representations
if repre["name"].lower() not in new_repre_names_low:
continue
# Representation must change type,
# `_id` must be stored to other key and replaced with new
# - that is because new representations should have same ID
repre_id = repre["_id"]
bulk_writes.append(DeleteOne({"_id": repre_id}))
repre["orig_id"] = repre_id
repre["_id"] = io.ObjectId()
repre["type"] = "archived_representation"
bulk_writes.append(InsertOne(repre))
# bulk updates
if bulk_writes:
io._database[io.Session["AVALON_PROJECT"]].bulk_write(
bulk_writes
)
existing_repres = list(io.find({
"parent": version_id,
"type": "archived_representation"
}))
instance.data['version'] = version['name']
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
intent = context.data.get("intent")
if intent is not None:
anatomy_data["intent"] = intent
anatomy = instance.context.data['anatomy']
@ -250,31 +257,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data['transfers'] = []
for idx, repre in enumerate(instance.data["representations"]):
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
# create template data for Anatomy
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"task": TASK,
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# Add datetime data to template data
datetime_data = context.data.get("datetimeData") or {}
template_data.update(datetime_data)
template_data = copy.deepcopy(anatomy_data)
if intent is not None:
template_data["intent"] = intent
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
@ -292,6 +278,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
stagingdir = repre['stagingDir']
if repre.get('anatomy_template'):
template_name = repre['anatomy_template']
if repre.get("outputName"):
template_data["output"] = repre['outputName']
template = os.path.normpath(
anatomy.templates[template_name]["path"])
@ -322,7 +311,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template_filled = anatomy_filled[template_name]["path"]
if repre_context is None:
repre_context = template_filled.used_values
test_dest_files.append(
os.path.normpath(template_filled)
)
@ -338,16 +326,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
index_frame_start = None
if repre.get("frameStart"):
frame_start_padding = anatomy.templates["render"]["padding"]
frame_start_padding = (
anatomy.templates["render"]["padding"]
)
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
if "slate" in instance.data["families"]:
if index_frame_start and "slate" in instance.data["families"]:
index_frame_start -= 1
dst_padding_exp = src_padding_exp
dst_start_frame = None
for i in src_collection.indexes:
# TODO 1.) do not count padding in each index iteration
# 2.) do not count dst_padding from src_padding before
# index_frame_start check
src_padding = src_padding_exp % i
src_file_name = "{0}{1}{2}".format(
@ -375,7 +368,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if not dst_start_frame:
dst_start_frame = dst_padding
dst = "{0}{1}{2}".format(
dst_head,
dst_start_frame,
@ -399,9 +391,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template_data["representation"] = repre['ext']
if repre.get("outputName"):
template_data["output"] = repre['outputName']
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
@ -419,8 +408,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
continue
repre_context[key] = template_data[key]
# Use previous representation's id if there are any
repre_id = None
repre_name_low = repre["name"].lower()
for _repre in existing_repres:
# NOTE should we check lowered names?
if repre_name_low == _repre["name"]:
repre_id = _repre["orig_id"]
break
# Create new id if existing representations does not match
if repre_id is None:
repre_id = io.ObjectId()
representation = {
"_id": io.ObjectId(),
"_id": repre_id,
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
@ -437,7 +439,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
representation["context"]["output"] = repre['outputName']
if sequence_repre and repre.get("frameStart"):
representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart"))
representation['context']['frame'] = (
dst_padding_exp % int(repre.get("frameStart"))
)
self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
@ -446,6 +450,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
representations.append(representation)
self.log.debug("__ representations: {}".format(representations))
# Remove old representations if there are any (before insertion of new)
if existing_repres:
repre_ids_to_remove = []
for repre in existing_repres:
repre_ids_to_remove.append(repre["_id"])
io.delete_many({"_id": {"$in": repre_ids_to_remove}})
self.log.debug("__ representations: {}".format(representations))
for rep in instance.data["representations"]:
self.log.debug("__ represNAME: {}".format(rep['name']))
@ -511,7 +522,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"""
src = self.unc_convert(src)
dst = self.unc_convert(dst)
src = os.path.normpath(src)
dst = os.path.normpath(dst)
self.log.debug("Copying file .. {} -> {}".format(src, dst))
dirname = os.path.dirname(dst)
try:
@ -547,14 +559,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset_name = instance.data["subset"]
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
"name": subset_name
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
self.log.debug("families. %s" % instance.data.get('families'))
self.log.debug(
@ -583,26 +595,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
return subset
def create_version(self, subset, version_number, locations, data=None):
def create_version(self, subset, version_number, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-3.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
@ -645,6 +652,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"fps": context.data.get(
"fps", instance.data.get("fps"))}
intent = context.data.get("intent")
if intent is not None:
version_data["intent"] = intent
# Include optional data if present in
optionals = [
"frameStart", "frameEnd", "step", "handles",

View file

@ -1,423 +0,0 @@
import os
import logging
import shutil
import clique
import errno
import pyblish.api
from avalon import api, io
log = logging.getLogger(__name__)
class IntegrateFrames(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Frames"
order = pyblish.api.IntegratorOrder
families = ["imagesequence"]
family_targets = [".frames", ".local", ".review", "imagesequence", "render", "source"]
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
families = [f for f in instance.data["families"]
for search in self.family_targets
if search in f]
if not families:
return
self.register(instance)
# self.log.info("Integrating Asset in to the database ...")
# self.log.info("instance.data: {}".format(instance.data))
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({"type": 'asset', "name": ASSET})[
'data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"task": api.Session["AVALON_TASK"],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
src_collections, remainder = clique.assemble(files)
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = ext = src_collection.format("{tail}")
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = src_tail[1:]
template_data["frame"] = src_collection.format(
"{padding}") % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(anatomy_filled["render"]["path"])
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
for i in src_collection.indexes:
src_padding = src_collection.format("{padding}") % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = dst_collection.format("{padding}") % i
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
src = os.path.join(stagingdir, src_file_name)
instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
template_data.pop("frame", None)
fname = files
self.log.info("fname: {}".format(fname))
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["render"]["path"]
instance.data["transfers"].append([src, dst])
if ext[1:] not in ["jpeg", "jpg", "mov", "mp4", "wav"]:
template_data["frame"] = "#" * int(anatomy_filled["render"]["padding"])
anatomy_filled = anatomy.format(template_data)
path_to_save = anatomy_filled["render"]["path"]
template = anatomy.templates["render"]["path"]
self.log.debug("path_to_save: {}".format(path_to_save))
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': path_to_save, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {
"name": PROJECT,
"code": project['data']['code']
},
"task": api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data["transfers"]
for src, dest in transfers:
src = os.path.normpath(src)
dest = os.path.normpath(dest)
if src in dest:
continue
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def get_subset(self, asset, instance):
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "pype:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["frameStart", "frameEnd", "step",
"handles", "colorspace", "fps", "outputDir"]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data.get(key, None)
return version_data

View file

@ -0,0 +1,49 @@
import os
import pyblish.api
class IntegrateResourcesPath(pyblish.api.InstancePlugin):
"""Generate directory path where the files and resources will be stored"""
label = "Integrate Resources Path"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
def process(self, instance):
resources = instance.data.get("resources") or []
transfers = instance.data.get("transfers") or []
if not resources and not transfers:
self.log.debug(
"Instance does not have `resources` and `transfers`"
)
return
resources_folder = instance.data["resourcesDir"]
# Define resource destination and transfers
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(
resource["source"]).replace("\\", "/")
destination = os.path.join(resources_folder, source_filename)
# Force forward slashes to fix issue with software unable
# to work correctly with backslashes in specific scenarios
# (e.g. escape characters in PLN-151 V-Ray UDIM)
destination = destination.replace("\\", "/")
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(
resources_folder, fname
).replace("\\", "/")
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers

View file

@ -137,3 +137,12 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
self.log.debug("Setting thumbnail for version \"{}\" <{}>".format(
version["name"], str(version["_id"])
))
asset_entity = instance.data["assetEntity"]
io.update_many(
{"_id": asset_entity["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format(
asset_entity["name"], str(version["_id"])
))

View file

@ -1,7 +1,7 @@
import os
import json
import re
import logging
from copy import copy
from avalon import api, io
from avalon.vendor import requests, clique
@ -14,16 +14,15 @@ def _get_script():
try:
from pype.scripts import publish_filesequence
except Exception:
raise RuntimeError("Expected module 'publish_deadline'"
"to be available")
assert False, "Expected module 'publish_deadline'to be available"
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
module_path = module_path[: -len(".pyc")] + ".py"
module_path = os.path.normpath(module_path)
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT'])
network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH'])
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"])
network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"])
module_path = module_path.replace(mount_root, network_root)
@ -34,39 +33,29 @@ def _get_script():
def get_latest_version(asset_name, subset_name, family):
# Get asset
asset_name = io.find_one(
{
"type": "asset",
"name": asset_name
},
projection={"name": True}
{"type": "asset", "name": asset_name}, projection={"name": True}
)
subset = io.find_one(
{
"type": "subset",
"name": subset_name,
"parent": asset_name["_id"]
},
projection={"_id": True, "name": True}
{"type": "subset", "name": subset_name, "parent": asset_name["_id"]},
projection={"_id": True, "name": True},
)
# Check if subsets actually exists (pre-run check)
assert subset, "No subsets found, please publish with `extendFrames` off"
# Get version
version_projection = {"name": True,
"data.startFrame": True,
"data.endFrame": True,
"parent": True}
version_projection = {
"name": True,
"data.startFrame": True,
"data.endFrame": True,
"parent": True,
}
version = io.find_one(
{
"type": "version",
"parent": subset["_id"],
"data.families": family
},
{"type": "version", "parent": subset["_id"], "data.families": family},
projection=version_projection,
sort=[("name", -1)]
sort=[("name", -1)],
)
assert version, "No version found, this is a bug"
@ -87,8 +76,12 @@ def get_resources(version, extension=None):
directory = api.get_representation_path(representation)
print("Source: ", directory)
resources = sorted([os.path.normpath(os.path.join(directory, fname))
for fname in os.listdir(directory)])
resources = sorted(
[
os.path.normpath(os.path.join(directory, fname))
for fname in os.listdir(directory)
]
)
return resources
@ -138,8 +131,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
- publishJobState (str, Optional): "Active" or "Suspended"
This defaults to "Suspended"
This requires a "frameStart" and "frameEnd" to be present in instance.data
or in context.data.
- expectedFiles (list or dict): explained bellow
"""
@ -149,23 +141,39 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
hosts = ["fusion", "maya", "nuke"]
families = [
"render.farm",
"renderlayer",
"imagesequence"
]
families = ["render.farm", "renderlayer", "imagesequence"]
aov_filter = {"maya": ["beauty"]}
enviro_filter = [
"PATH",
"PYTHONPATH",
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"PYPE_ROOT",
"PYPE_METADATA_FILE",
"PYPE_STUDIO_PROJECTS_PATH",
"PYPE_STUDIO_PROJECTS_MOUNT"
]
"PATH",
"PYTHONPATH",
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"PYPE_ROOT",
"PYPE_METADATA_FILE",
"PYPE_STUDIO_PROJECTS_PATH",
"PYPE_STUDIO_PROJECTS_MOUNT",
]
# pool used to do the publishing job
deadline_pool = ""
# regex for finding frame number in string
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
# mapping of instance properties to be transfered to new instance for every
# specified family
instance_transfer = {
"slate": ["slateFrame"],
"review": ["lutPath"],
"render.farm": ["bakeScriptPath", "bakeRenderPath",
"bakeWriteNodeName", "version"]
}
# list of family names to transfer to new family if present
families_transfer = ["render3d", "render2d", "ftrack", "slate"]
def _submit_deadline_post_job(self, instance, job):
"""
@ -176,8 +184,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
data = instance.data.copy()
subset = data["subset"]
job_name = "{batch} - {subset} [publish image sequence]".format(
batch=job["Props"]["Name"],
subset=subset
batch=job["Props"]["Name"], subset=subset
)
metadata_filename = "{}_metadata.json".format(subset)
@ -185,11 +192,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
metadata_path = os.path.join(output_dir, metadata_filename)
metadata_path = os.path.normpath(metadata_path)
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
network_root = os.path.normpath(
os.environ['PYPE_STUDIO_PROJECTS_PATH'])
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"])
network_root = os.environ["PYPE_STUDIO_PROJECTS_PATH"]
metadata_path = metadata_path.replace(mount_root, network_root)
metadata_path = os.path.normpath(metadata_path)
# Generate the payload for Deadline submission
payload = {
@ -197,21 +203,21 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"Plugin": "Python",
"BatchName": job["Props"]["Batch"],
"Name": job_name,
"JobType": "Normal",
"JobDependency0": job["_id"],
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
"Priority": job["Props"]["Pri"]
"Priority": job["Props"]["Pri"],
"Pool": self.deadline_pool,
"OutputDirectory0": output_dir
},
"PluginInfo": {
"Version": "3.6",
"ScriptFile": _get_script(),
"Arguments": "",
"SingleFrameOnly": "True"
"SingleFrameOnly": "True",
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
"AuxFiles": [],
}
# Transfer the environment from the original job to this dependent
@ -221,30 +227,268 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
environment["PYPE_METADATA_FILE"] = metadata_path
i = 0
for index, key in enumerate(environment):
self.log.info("KEY: {}".format(key))
self.log.info("FILTER: {}".format(self.enviro_filter))
if key.upper() in self.enviro_filter:
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % i: "{key}={value}".format(
key=key,
value=environment[key]
)
})
payload["JobInfo"].update(
{
"EnvironmentKeyValue%d"
% i: "{key}={value}".format(
key=key, value=environment[key]
)
}
)
i += 1
# Avoid copied pools and remove secondary pool
payload["JobInfo"]["Pool"] = "none"
payload["JobInfo"].pop("SecondaryPool", None)
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
self.log.info("Submitting Deadline job ...")
# self.log.info(json.dumps(payload, indent=4, sort_keys=True))
url = "{}/api/jobs".format(self.DEADLINE_REST_URL)
response = requests.post(url, json=payload)
if not response.ok:
raise Exception(response.text)
def _copy_extend_frames(self, instance, representation):
"""
This will copy all existing frames from subset's latest version back
to render directory and rename them to what renderer is expecting.
:param instance: instance to get required data from
:type instance: pyblish.plugin.Instance
"""
import speedcopy
self.log.info("Preparing to copy ...")
start = instance.data.get("startFrame")
end = instance.data.get("endFrame")
# get latest version of subset
# this will stop if subset wasn't published yet
version = get_latest_version(
instance.data.get("asset"),
instance.data.get("subset"), "render")
# get its files based on extension
subset_resources = get_resources(version, representation.get("ext"))
r_col, _ = clique.assemble(subset_resources)
# if override remove all frames we are expecting to be rendered
# so we'll copy only those missing from current render
if instance.data.get("overrideExistingFrame"):
for frame in range(start, end+1):
if frame not in r_col.indexes:
continue
r_col.indexes.remove(frame)
# now we need to translate published names from represenation
# back. This is tricky, right now we'll just use same naming
# and only switch frame numbers
resource_files = []
r_filename = os.path.basename(
representation.get("files")[0]) # first file
op = re.search(self.R_FRAME_NUMBER, r_filename)
pre = r_filename[:op.start("frame")]
post = r_filename[op.end("frame"):]
assert op is not None, "padding string wasn't found"
for frame in list(r_col):
fn = re.search(self.R_FRAME_NUMBER, frame)
# silencing linter as we need to compare to True, not to
# type
assert fn is not None, "padding string wasn't found"
# list of tuples (source, destination)
resource_files.append(
(frame,
os.path.join(representation.get("stagingDir"),
"{}{}{}".format(pre,
fn.group("frame"),
post)))
)
# test if destination dir exists and create it if not
output_dir = os.path.dirname(representation.get("files")[0])
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# copy files
for source in resource_files:
speedcopy.copy(source[0], source[1])
self.log.info(" > {}".format(source[1]))
self.log.info(
"Finished copying %i files" % len(resource_files))
def _create_instances_for_aov(self, instance_data, exp_files):
"""
This will create new instance for every aov it can detect in expected
files list.
:param instance_data: skeleton data for instance (those needed) later
by collector
:type instance_data: pyblish.plugin.Instance
:param exp_files: list of expected files divided by aovs
:type exp_files: list
:returns: list of instances
:rtype: list(publish.plugin.Instance)
"""
task = os.environ["AVALON_TASK"]
subset = instance_data["subset"]
instances = []
# go through aovs in expected files
for aov, files in exp_files[0].items():
cols, rem = clique.assemble(files)
# we shouldn't have any reminders
if rem:
self.log.warning(
"skipping unexpected files found "
"in sequence: {}".format(rem))
# but we really expect only one collection, nothing else make sense
assert len(cols) == 1, "only one image sequence type is expected"
# create subset name `familyTaskSubset_AOV`
subset_name = 'render{}{}{}{}_{}'.format(
task[0].upper(), task[1:],
subset[0].upper(), subset[1:],
aov)
staging = os.path.dirname(list(cols[0])[0])
start = int(instance_data.get("frameStart"))
end = int(instance_data.get("frameEnd"))
self.log.info("Creating data for: {}".format(subset_name))
app = os.environ.get("AVALON_APP", "")
preview = False
if app in self.aov_filter.keys():
if aov in self.aov_filter[app]:
preview = True
new_instance = copy(instance_data)
new_instance["subset"] = subset_name
ext = cols[0].tail.lstrip(".")
# create represenation
rep = {
"name": ext,
"ext": ext,
"files": [os.path.basename(f) for f in list(cols[0])],
"frameStart": start,
"frameEnd": end,
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"anatomy_template": "render",
"fps": new_instance.get("fps"),
"tags": ["review"] if preview else []
}
self._solve_families(new_instance, preview)
new_instance["representations"] = [rep]
# if extending frames from existing version, copy files from there
# into our destination directory
if new_instance.get("extendFrames", False):
self._copy_extend_frames(new_instance, rep)
instances.append(new_instance)
return instances
def _get_representations(self, instance, exp_files):
"""
This will return representations of expected files if they are not
in hierarchy of aovs. There should be only one sequence of files for
most cases, but if not - we create representation from each of them.
:param instance: instance for which we are setting representations
:type instance: pyblish.plugin.Instance
:param exp_files: list of expected files
:type exp_files: list
:returns: list of representations
:rtype: list(dict)
"""
representations = []
start = int(instance.get("frameStart"))
end = int(instance.get("frameEnd"))
cols, rem = clique.assemble(exp_files)
bake_render_path = instance.get("bakeRenderPath")
# create representation for every collected sequence
for c in cols:
ext = c.tail.lstrip(".")
preview = False
# if filtered aov name is found in filename, toggle it for
# preview video rendering
for app in self.aov_filter:
if os.environ.get("AVALON_APP", "") == app:
for aov in self.aov_filter[app]:
if re.match(
r".+(?:\.|_)({})(?:\.|_).*".format(aov),
list(c)[0]
):
preview = True
break
break
if bake_render_path:
preview = False
rep = {
"name": ext,
"ext": ext,
"files": [os.path.basename(f) for f in list(c)],
"frameStart": start,
"frameEnd": end,
# If expectedFile are absolute, we need only filenames
"stagingDir": os.path.dirname(list(c)[0]),
"anatomy_template": "render",
"fps": instance.get("fps"),
"tags": ["review", "preview"] if preview else [],
}
representations.append(rep)
self._solve_families(instance, preview)
# add reminders as representations
for r in rem:
ext = r.split(".")[-1]
rep = {
"name": ext,
"ext": ext,
"files": os.path.basename(r),
"stagingDir": os.path.dirname(r),
"anatomy_template": "publish",
}
if r in bake_render_path:
rep.update({
"fps": instance.get("fps"),
"anatomy_template": "render",
"tags": ["review", "delete"]
})
# solve families with `preview` attributes
self._solve_families(instance, True)
representations.append(rep)
return representations
def _solve_families(self, instance, preview=False):
families = instance.get("families")
# if we have one representation with preview tag
# flag whole instance for review and for ftrack
if preview:
if "ftrack" not in families:
if os.environ.get("FTRACK_SERVER"):
families.append("ftrack")
if "review" not in families:
families.append("review")
instance["families"] = families
def process(self, instance):
"""
Detect type of renderfarm submission and create and post dependend job
@ -254,212 +498,278 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
:param instance: Instance data
:type instance: dict
"""
# Get a submission job
data = instance.data.copy()
context = instance.context
self.context = context
if hasattr(instance, "_log"):
data['_log'] = instance._log
render_job = data.pop("deadlineSubmissionJob", None)
submission_type = "deadline"
if not render_job:
# No deadline job. Try Muster: musterSubmissionJob
render_job = data.pop("musterSubmissionJob", None)
submission_type = "muster"
if not render_job:
raise RuntimeError("Can't continue without valid Deadline "
"or Muster submission prior to this "
"plug-in.")
assert render_job, (
"Can't continue without valid Deadline "
"or Muster submission prior to this "
"plug-in."
)
if submission_type == "deadline":
self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
"http://localhost:8082")
self.DEADLINE_REST_URL = os.environ.get(
"DEADLINE_REST_URL", "http://localhost:8082"
)
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
self._submit_deadline_post_job(instance, render_job)
asset = data.get("asset") or api.Session["AVALON_ASSET"]
subset = data["subset"]
subset = data.get("subset")
# Get start/end frame from instance, if not available get from context
context = instance.context
start = instance.data.get("frameStart")
if start is None:
start = context.data["frameStart"]
end = instance.data.get("frameEnd")
if end is None:
end = context.data["frameEnd"]
# Add in regex for sequence filename
# This assumes the output files start with subset name and ends with
# a file extension. The "ext" key includes the dot with the extension.
if "ext" in instance.data:
ext = r"\." + re.escape(instance.data["ext"])
else:
ext = r"\.\D+"
handle_start = instance.data.get("handleStart")
if handle_start is None:
handle_start = context.data["handleStart"]
regex = r"^{subset}.*\d+{ext}$".format(subset=re.escape(subset),
ext=ext)
handle_end = instance.data.get("handleEnd")
if handle_end is None:
handle_end = context.data["handleEnd"]
fps = instance.data.get("fps")
if fps is None:
fps = context.data["fps"]
if data.get("extendFrames", False):
start, end = self._extend_frames(
asset,
subset,
start,
end,
data["overrideExistingFrame"])
try:
source = data['source']
source = data["source"]
except KeyError:
source = context.data["currentFile"]
source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"),
api.registered_root())
source = source.replace(
os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root()
)
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
# find subsets and version to attach render to
attach_to = instance.data.get("attachTo")
attach_subset_versions = []
if attach_to:
for subset in attach_to:
for instance in context:
if instance.data["subset"] != subset["subset"]:
continue
attach_subset_versions.append(
{"version": instance.data["version"],
"subset": subset["subset"],
"family": subset["family"]})
families = ["render"]
# Write metadata for publish job
metadata = {
instance_skeleton_data = {
"family": "render",
"subset": subset,
"families": families,
"asset": asset,
"frameStart": start,
"frameEnd": end,
"handleStart": handle_start,
"handleEnd": handle_end,
"fps": fps,
"source": source,
"extendFrames": data.get("extendFrames"),
"overrideExistingFrame": data.get("overrideExistingFrame"),
"pixelAspect": data.get("pixelAspect", 1),
"resolutionWidth": data.get("resolutionWidth", 1920),
"resolutionHeight": data.get("resolutionHeight", 1080),
}
# transfer specific families from original instance to new render
for item in self.families_transfer:
if item in instance.data.get("families", []):
instance_skeleton_data["families"] += [item]
# transfer specific properties from original instance based on
# mapping dictionary `instance_transfer`
for key, values in self.instance_transfer.items():
if key in instance.data.get("families", []):
for v in values:
instance_skeleton_data[v] = instance.data.get(v)
# look into instance data if representations are not having any
# which are having tag `publish_on_farm` and include them
for r in instance.data.get("representations", []):
if "publish_on_farm" in r.get("tags"):
# create representations attribute of not there
if "representations" not in instance_skeleton_data.keys():
instance_skeleton_data["representations"] = []
instance_skeleton_data["representations"].append(r)
instances = None
assert data.get("expectedFiles"), ("Submission from old Pype version"
" - missing expectedFiles")
"""
if content of `expectedFiles` are dictionaries, we will handle
it as list of AOVs, creating instance from every one of them.
Example:
--------
expectedFiles = [
{
"beauty": [
"foo_v01.0001.exr",
"foo_v01.0002.exr"
],
"Z": [
"boo_v01.0001.exr",
"boo_v01.0002.exr"
]
}
]
This will create instances for `beauty` and `Z` subset
adding those files to their respective representations.
If we've got only list of files, we collect all filesequences.
More then one doesn't probably make sense, but we'll handle it
like creating one instance with multiple representations.
Example:
--------
expectedFiles = [
"foo_v01.0001.exr",
"foo_v01.0002.exr",
"xxx_v01.0001.exr",
"xxx_v01.0002.exr"
]
This will result in one instance with two representations:
`foo` and `xxx`
"""
self.log.info(data.get("expectedFiles"))
if isinstance(data.get("expectedFiles")[0], dict):
# we cannot attach AOVs to other subsets as we consider every
# AOV subset of its own.
if len(data.get("attachTo")) > 0:
assert len(data.get("expectedFiles")[0].keys()) == 1, (
"attaching multiple AOVs or renderable cameras to "
"subset is not supported")
# create instances for every AOV we found in expected files.
# note: this is done for every AOV and every render camere (if
# there are multiple renderable cameras in scene)
instances = self._create_instances_for_aov(
instance_skeleton_data,
data.get("expectedFiles"))
self.log.info("got {} instance{}".format(
len(instances),
"s" if len(instances) > 1 else ""))
else:
representations = self._get_representations(
instance_skeleton_data,
data.get("expectedFiles")
)
if "representations" not in instance_skeleton_data.keys():
instance_skeleton_data["representations"] = []
# add representation
instance_skeleton_data["representations"] += representations
instances = [instance_skeleton_data]
# if we are attaching to other subsets, create copy of existing
# instances, change data to match thats subset and replace
# existing instances with modified data
if instance.data.get("attachTo"):
self.log.info("Attaching render to subset:")
new_instances = []
for at in instance.data.get("attachTo"):
for i in instances:
new_i = copy(i)
new_i["version"] = at.get("version")
new_i["subset"] = at.get("subset")
new_i["append"] = True
new_i["families"].append(at.get("family"))
new_instances.append(new_i)
self.log.info(" - {} / v{}".format(
at.get("subset"), at.get("version")))
instances = new_instances
# publish job file
publish_job = {
"asset": asset,
"regex": regex,
"frameStart": start,
"frameEnd": end,
"fps": context.data.get("fps", None),
"families": ["render"],
"source": source,
"user": context.data["user"],
"version": context.data["version"],
"version": context.data["version"], # this is workfile version
"intent": context.data.get("intent"),
"comment": context.data.get("comment"),
# Optional metadata (for debugging)
"metadata": {
"instance": data,
"job": render_job,
"session": api.Session.copy()
}
"job": render_job,
"session": api.Session.copy(),
"instances": instances
}
if api.Session["AVALON_APP"] == "nuke":
metadata['subset'] = subset
# pass Ftrack credentials in case of Muster
if submission_type == "muster":
ftrack = {
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
"FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"),
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER")
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER"),
}
metadata.update({"ftrack": ftrack})
publish_job.update({"ftrack": ftrack})
# Ensure output dir exists
output_dir = instance.data["outputDir"]
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
if data.get("extendFrames", False):
family = "render"
override = data["overrideExistingFrame"]
# override = data.get("overrideExistingFrame", False)
out_file = render_job.get("OutFile")
if not out_file:
raise RuntimeError("OutFile not found in render job!")
extension = os.path.splitext(out_file[0])[1]
_ext = extension[1:]
# Frame comparison
prev_start = None
prev_end = None
resource_range = range(int(start), int(end)+1)
# Gather all the subset files (one subset per render pass!)
subset_names = [data["subset"]]
subset_names.extend(data.get("renderPasses", []))
resources = []
for subset_name in subset_names:
version = get_latest_version(asset_name=data["asset"],
subset_name=subset_name,
family=family)
# Set prev start / end frames for comparison
if not prev_start and not prev_end:
prev_start = version["data"]["frameStart"]
prev_end = version["data"]["frameEnd"]
subset_resources = get_resources(version, _ext)
resource_files = get_resource_files(subset_resources,
resource_range,
override)
resources.extend(resource_files)
updated_start = min(start, prev_start)
updated_end = max(end, prev_end)
# Update metadata and instance start / end frame
self.log.info("Updating start / end frame : "
"{} - {}".format(updated_start, updated_end))
# TODO : Improve logic to get new frame range for the
# publish job (publish_filesequence.py)
# The current approach is not following Pyblish logic
# which is based
# on Collect / Validate / Extract.
# ---- Collect Plugins ---
# Collect Extend Frames - Only run if extendFrames is toggled
# # # Store in instance:
# # # Previous rendered files per subset based on frames
# # # --> Add to instance.data[resources]
# # # Update publish frame range
# ---- Validate Plugins ---
# Validate Extend Frames
# # # Check if instance has the requirements to extend frames
# There might have been some things which can be added to the list
# Please do so when fixing this.
# Start frame
metadata["frameStart"] = updated_start
metadata["metadata"]["instance"]["frameStart"] = updated_start
# End frame
metadata["frameEnd"] = updated_end
metadata["metadata"]["instance"]["frameEnd"] = updated_end
metadata_filename = "{}_metadata.json".format(subset)
metadata_path = os.path.join(output_dir, metadata_filename)
# convert log messages if they are `LogRecord` to their
# string format to allow serializing as JSON later on.
rendered_logs = []
for log in metadata["metadata"]["instance"].get("_log", []):
if isinstance(log, logging.LogRecord):
rendered_logs.append(log.getMessage())
else:
rendered_logs.append(log)
metadata["metadata"]["instance"]["_log"] = rendered_logs
self.log.info("Writing json file: {}".format(metadata_path))
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=4, sort_keys=True)
json.dump(publish_job, f, indent=4, sort_keys=True)
# Copy files from previous render if extendFrame is True
if data.get("extendFrames", False):
def _extend_frames(self, asset, subset, start, end, override):
"""
This will get latest version of asset and update frame range based
on minimum and maximuma values
"""
self.log.info("Preparing to copy ..")
import shutil
# Frame comparison
prev_start = None
prev_end = None
dest_path = data["outputDir"]
for source in resources:
src_file = os.path.basename(source)
dest = os.path.join(dest_path, src_file)
shutil.copy(source, dest)
version = get_latest_version(
asset_name=asset,
subset_name=subset,
family='render'
)
self.log.info("Finished copying %i files" % len(resources))
# Set prev start / end frames for comparison
if not prev_start and not prev_end:
prev_start = version["data"]["frameStart"]
prev_end = version["data"]["frameEnd"]
updated_start = min(start, prev_start)
updated_end = max(end, prev_end)
self.log.info(
"Updating start / end frame : "
"{} - {}".format(updated_start, updated_end)
)
return updated_start, updated_end

View file

@ -1,13 +1,14 @@
import pyblish.api
import os
import subprocess
import pype.lib
try:
import os.errno as errno
except ImportError:
import errno
class ValidateFfmpegInstallef(pyblish.api.Validator):
class ValidateFFmpegInstalled(pyblish.api.Validator):
"""Validate availability of ffmpeg tool in PATH"""
order = pyblish.api.ValidatorOrder
@ -27,10 +28,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator):
return True
def process(self, instance):
self.log.info("ffmpeg path: `{}`".format(
os.environ.get("FFMPEG_PATH", "")))
if self.is_tool(
os.path.join(
os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False:
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
self.log.info("ffmpeg path: `{}`".format(ffmpeg_path))
if self.is_tool(ffmpeg_path) is False:
self.log.error("ffmpeg not found in PATH")
raise RuntimeError('ffmpeg not installed.')

View file

@ -1,43 +0,0 @@
import pyblish.api
import os
class ValidateTemplates(pyblish.api.ContextPlugin):
"""Check if all templates were filled"""
label = "Validate Templates"
order = pyblish.api.ValidatorOrder - 0.1
hosts = ["maya", "houdini", "nuke"]
def process(self, context):
anatomy = context.data["anatomy"]
if not anatomy:
raise RuntimeError("Did not find anatomy")
else:
data = {
"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
"project": {"name": "D001_projectsx",
"code": "prjX"},
"ext": "exr",
"version": 3,
"task": "animation",
"asset": "sh001",
"app": "maya",
"hierarchy": "ep101/sq01/sh010"}
anatomy_filled = anatomy.format(data)
self.log.info(anatomy_filled)
data = {"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
"project": {"name": "D001_projectsy",
"code": "prjY"},
"ext": "abc",
"version": 1,
"task": "lookdev",
"asset": "bob",
"app": "maya",
"hierarchy": "ep101/sq01/bob"}
anatomy_filled = context.data["anatomy"].format(data)
self.log.info(anatomy_filled["work"]["folder"])

View file

@ -0,0 +1,25 @@
import pyblish.api
class ValidateVersion(pyblish.api.InstancePlugin):
"""Validate instance version.
Pype is not allowing overwiting previously published versions.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Version"
hosts = ["nuke", "maya", "blender"]
def process(self, instance):
version = instance.data.get("version")
latest_version = instance.data.get("latestVersion")
if latest_version is not None:
msg = ("Version `{0}` that you are"
" trying to publish, already"
" exists in the"
" database.").format(
version, latest_version)
assert (int(version) > int(latest_version)), msg

View file

@ -2,43 +2,108 @@ import os
import json
import appdirs
import requests
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
import pype.maya.lib as lib
import avalon.maya
class CreateRenderGlobals(avalon.maya.Creator):
class CreateRender(avalon.maya.Creator):
"""Create render layer for export"""
label = "Render Globals"
family = "renderglobals"
icon = "gears"
defaults = ['Main']
label = "Render"
family = "rendering"
icon = "eye"
defaults = ["Main"]
_token = None
_user = None
_password = None
# renderSetup instance
_rs = None
_image_prefix_nodes = {
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
'vray': 'vraySettings.fileNamePrefix',
'arnold': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'defaultRenderGlobals.imageFilePrefix',
'redshift': 'defaultRenderGlobals.imageFilePrefix'
}
_image_prefixes = {
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
'vray': '"maya/<scene>/<Layer>/<Layer>',
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
'renderman': 'maya/<Scene>/<layer>/<layer>_<aov>',
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>'
}
def __init__(self, *args, **kwargs):
super(CreateRenderGlobals, self).__init__(*args, **kwargs)
super(CreateRender, self).__init__(*args, **kwargs)
# We won't be publishing this one
self.data["id"] = "avalon.renderglobals"
def process(self):
exists = cmds.ls(self.name)
if exists:
return cmds.warning("%s already exists." % exists[0])
use_selection = self.options.get("useSelection")
with lib.undo_chunk():
self._create_render_settings()
instance = super(CreateRender, self).process()
cmds.setAttr("{}.machineList".format(instance), lock=True)
self._rs = renderSetup.instance()
layers = self._rs.getRenderLayers()
if use_selection:
print(">>> processing existing layers")
sets = []
for layer in layers:
print(" - creating set for {}".format(layer.name()))
render_set = cmds.sets(n="LAYER_{}".format(layer.name()))
sets.append(render_set)
cmds.sets(sets, forceElement=instance)
# if no render layers are present, create default one with
# asterix selector
if not layers:
rl = self._rs.createRenderLayer('Main')
cl = rl.createCollection("defaultCollection")
cl.getSelector().setPattern('*')
renderer = cmds.getAttr(
'defaultRenderGlobals.currentRenderer').lower()
# handle various renderman names
if renderer.startswith('renderman'):
renderer = 'renderman'
cmds.setAttr(self._image_prefix_nodes[renderer],
self._image_prefixes[renderer],
type="string")
def _create_render_settings(self):
# get pools
pools = []
deadline_url = os.environ.get('DEADLINE_REST_URL', None)
muster_url = os.environ.get('MUSTER_REST_URL', None)
deadline_url = os.environ.get("DEADLINE_REST_URL", None)
muster_url = os.environ.get("MUSTER_REST_URL", None)
if deadline_url and muster_url:
self.log.error("Both Deadline and Muster are enabled. "
"Cannot support both.")
self.log.error(
"Both Deadline and Muster are enabled. " "Cannot support both."
)
raise RuntimeError("Both Deadline and Muster are enabled")
if deadline_url is None:
self.log.warning("Deadline REST API url not found.")
else:
argument = "{}/api/pools?NamesOnly=true".format(deadline_url)
response = self._requests_get(argument)
try:
response = self._requests_get(argument)
except requests.exceptions.ConnectionError as e:
msg = 'Cannot connect to deadline web service'
self.log.error(msg)
raise RuntimeError('{} - {}'.format(msg, e))
if not response.ok:
self.log.warning("No pools retrieved")
else:
@ -57,8 +122,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
try:
pools = self._get_muster_pools()
except requests.exceptions.HTTPError as e:
if e.startswith('401'):
self.log.warning('access token expired')
if e.startswith("401"):
self.log.warning("access token expired")
self._show_login()
raise RuntimeError("Access token expired")
except requests.exceptions.ConnectionError:
@ -66,20 +131,15 @@ class CreateRenderGlobals(avalon.maya.Creator):
raise RuntimeError("Cannot connect to {}".format(muster_url))
pool_names = []
for pool in pools:
self.log.info(" - pool: {}".format(pool['name']))
pool_names.append(pool['name'])
self.log.info(" - pool: {}".format(pool["name"]))
pool_names.append(pool["name"])
self.data["primaryPool"] = pool_names
# We don't need subset or asset attributes
# self.data.pop("subset", None)
# self.data.pop("asset", None)
# self.data.pop("active", None)
self.data["suspendPublishJob"] = False
self.data["extendFrames"] = False
self.data["overrideExistingFrame"] = True
self.data["useLegacyRenderLayers"] = True
# self.data["useLegacyRenderLayers"] = True
self.data["priority"] = 50
self.data["framesPerTask"] = 1
self.data["whitelist"] = False
@ -88,20 +148,6 @@ class CreateRenderGlobals(avalon.maya.Creator):
self.options = {"useSelection": False} # Force no content
def process(self):
exists = cmds.ls(self.name)
assert len(exists) <= 1, (
"More than one renderglobal exists, this is a bug"
)
if exists:
return cmds.warning("%s already exists." % exists[0])
with lib.undo_chunk():
super(CreateRenderGlobals, self).process()
cmds.setAttr("{}.machineList".format(self.name), lock=True)
def _load_credentials(self):
"""
Load Muster credentials from file and set `MUSTER_USER`,
@ -111,14 +157,12 @@ class CreateRenderGlobals(avalon.maya.Creator):
Show login dialog if access token is invalid or missing.
"""
app_dir = os.path.normpath(
appdirs.user_data_dir('pype-app', 'pype')
)
file_name = 'muster_cred.json'
app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
file_name = "muster_cred.json"
fpath = os.path.join(app_dir, file_name)
file = open(fpath, 'r')
file = open(fpath, "r")
muster_json = json.load(file)
self._token = muster_json.get('token', None)
self._token = muster_json.get("token", None)
if not self._token:
self._show_login()
raise RuntimeError("Invalid access token for Muster")
@ -131,26 +175,25 @@ class CreateRenderGlobals(avalon.maya.Creator):
"""
Get render pools from muster
"""
params = {
'authToken': self._token
}
api_entry = '/api/pools/list'
response = self._requests_get(
self.MUSTER_REST_URL + api_entry, params=params)
params = {"authToken": self._token}
api_entry = "/api/pools/list"
response = self._requests_get(self.MUSTER_REST_URL + api_entry,
params=params)
if response.status_code != 200:
if response.status_code == 401:
self.log.warning('Authentication token expired.')
self.log.warning("Authentication token expired.")
self._show_login()
else:
self.log.error(
'Cannot get pools from Muster: {}'.format(
response.status_code))
raise Exception('Cannot get pools from Muster')
("Cannot get pools from "
"Muster: {}").format(response.status_code)
)
raise Exception("Cannot get pools from Muster")
try:
pools = response.json()['ResponseData']['pools']
pools = response.json()["ResponseData"]["pools"]
except ValueError as e:
self.log.error('Invalid response from Muster server {}'.format(e))
raise Exception('Invalid response from Muster server')
self.log.error("Invalid response from Muster server {}".format(e))
raise Exception("Invalid response from Muster server")
return pools
@ -162,8 +205,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
self.log.debug(api_url)
login_response = self._requests_post(api_url, timeout=1)
if login_response.status_code != 200:
self.log.error('Cannot show login form to Muster')
raise Exception('Cannot show login form to Muster')
self.log.error("Cannot show login form to Muster")
raise Exception("Cannot show login form to Muster")
def _requests_post(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
@ -175,8 +218,10 @@ class CreateRenderGlobals(avalon.maya.Creator):
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
if "verify" not in kwargs:
kwargs["verify"] = (
False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True
) # noqa
return requests.post(*args, **kwargs)
def _requests_get(self, *args, **kwargs):
@ -189,6 +234,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
if "verify" not in kwargs:
kwargs["verify"] = (
False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True
) # noqa
return requests.get(*args, **kwargs)

View file

@ -0,0 +1,909 @@
"""
This collector will go through render layers in maya and prepare all data
needed to create instances and their representations for submition and
publishing on farm.
Requires:
instance -> families
instance -> setMembers
context -> currentFile
context -> workspaceDir
context -> user
session -> AVALON_ASSET
Optional:
Provides:
instance -> label
instance -> subset
instance -> attachTo
instance -> setMembers
instance -> publish
instance -> frameStart
instance -> frameEnd
instance -> byFrameStep
instance -> renderer
instance -> family
instance -> families
instance -> asset
instance -> time
instance -> author
instance -> source
instance -> expectedFiles
instance -> resolutionWidth
instance -> resolutionHeight
instance -> pixelAspect
"""
import re
import os
import types
import six
from abc import ABCMeta, abstractmethod
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
import pyblish.api
from avalon import maya, api
import pype.maya.lib as lib
R_SINGLE_FRAME = re.compile(r'^(-?)\d+$')
R_FRAME_RANGE = re.compile(r'^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$')
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
R_LAYER_TOKEN = re.compile(
r'.*%l.*|.*<layer>.*|.*<renderlayer>.*', re.IGNORECASE)
R_AOV_TOKEN = re.compile(r'.*%a.*|.*<aov>.*|.*<renderpass>.*', re.IGNORECASE)
R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a|<aov>|<renderpass>', re.IGNORECASE)
R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_<aov>|_<renderpass>', re.IGNORECASE)
# to remove unused renderman tokens
R_CLEAN_FRAME_TOKEN = re.compile(r'\.?<f\d>\.?', re.IGNORECASE)
R_CLEAN_EXT_TOKEN = re.compile(r'\.?<ext>\.?', re.IGNORECASE)
R_SUBSTITUTE_LAYER_TOKEN = re.compile(
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
RENDERER_NAMES = {
'mentalray': 'MentalRay',
'vray': 'V-Ray',
'arnold': 'Arnold',
'renderman': 'Renderman',
'redshift': 'Redshift'
}
# not sure about the renderman image prefix
ImagePrefixes = {
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
'vray': 'vraySettings.fileNamePrefix',
'arnold': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'rmanGlobals.imageFileFormat',
'redshift': 'defaultRenderGlobals.imageFilePrefix'
}
class CollectMayaRender(pyblish.api.ContextPlugin):
"""Gather all publishable render layers from renderSetup"""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["maya"]
label = "Collect Render Layers"
def process(self, context):
render_instance = None
for instance in context:
if 'rendering' in instance.data['families']:
render_instance = instance
render_instance.data["remove"] = True
# make sure workfile instance publishing is enabled
if 'workfile' in instance.data['families']:
instance.data["publish"] = True
if not render_instance:
self.log.info("No render instance found, skipping render "
"layer collection.")
return
render_globals = render_instance
collected_render_layers = render_instance.data['setMembers']
filepath = context.data["currentFile"].replace("\\", "/")
asset = api.Session["AVALON_ASSET"]
workspace = context.data["workspaceDir"]
self._rs = renderSetup.instance()
maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()}
self.maya_layers = maya_render_layers
for layer in collected_render_layers:
# every layer in set should start with `LAYER_` prefix
try:
expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1)
except IndexError:
msg = ("Invalid layer name in set [ {} ]".format(layer))
self.log.warnig(msg)
continue
self.log.info("processing %s" % layer)
# check if layer is part of renderSetup
if expected_layer_name not in maya_render_layers:
msg = ("Render layer [ {} ] is not in "
"Render Setup".format(expected_layer_name))
self.log.warning(msg)
continue
# check if layer is renderable
if not maya_render_layers[expected_layer_name].isRenderable():
msg = ("Render layer [ {} ] is not "
"renderable".format(expected_layer_name))
self.log.warning(msg)
continue
# test if there are sets (subsets) to attach render to
sets = cmds.sets(layer, query=True) or []
attachTo = []
if sets:
for s in sets:
attachTo.append({
"version": None, # we need integrator to get version
"subset": s,
"family": cmds.getAttr("{}.family".format(s))
})
self.log.info(" -> attach render to: {}".format(s))
layer_name = "rs_{}".format(expected_layer_name)
# collect all frames we are expecting to be rendered
renderer = cmds.getAttr(
'defaultRenderGlobals.currentRenderer').lower()
# handle various renderman names
if renderer.startswith('renderman'):
renderer = 'renderman'
# return all expected files for all cameras and aovs in given
# frame range
exp_files = ExpectedFiles().get(renderer, layer_name)
assert exp_files, ("no file names were generated, this is bug")
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV
# (considered to be subset on its own) to another subset
if attachTo:
assert len(exp_files[0].keys()) == 1, (
"attaching multiple AOVs or renderable cameras to "
"subset is not supported")
# append full path
full_exp_files = []
aov_dict = {}
# we either get AOVs or just list of files. List of files can
# mean two things - there are no AOVs enabled or multipass EXR
# is produced. In either case we treat those as `beauty`.
if isinstance(exp_files[0], dict):
for aov, files in exp_files[0].items():
full_paths = []
for ef in files:
full_path = os.path.join(workspace, "renders", ef)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
aov_dict[aov] = full_paths
else:
full_paths = []
for ef in exp_files:
full_path = os.path.join(workspace, "renders", ef)
full_path = full_path.replace("\\", "/")
full_paths.append(full_path)
aov_dict["beauty"] = full_paths
full_exp_files.append(aov_dict)
self.log.info(full_exp_files)
self.log.info("collecting layer: {}".format(layer_name))
# Get layer specific settings, might be overrides
data = {
"subset": expected_layer_name,
"attachTo": attachTo,
"setMembers": layer_name,
"publish": True,
"frameStart": int(self.get_render_attribute("startFrame",
layer=layer_name)),
"frameEnd": int(self.get_render_attribute("endFrame",
layer=layer_name)),
"byFrameStep": int(
self.get_render_attribute("byFrameStep",
layer=layer_name)),
"renderer": self.get_render_attribute("currentRenderer",
layer=layer_name),
"handleStart": context.data["assetEntity"]['data']['handleStart'],
"handleEnd": context.data["assetEntity"]['data']['handleEnd'],
# instance subset
"family": "renderlayer",
"families": ["renderlayer"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath,
"expectedFiles": full_exp_files,
"resolutionWidth": cmds.getAttr("defaultResolution.width"),
"resolutionHeight": cmds.getAttr("defaultResolution.height"),
"pixelAspect": cmds.getAttr("defaultResolution.height")
}
# Apply each user defined attribute as data
for attr in cmds.listAttr(layer, userDefined=True) or list():
try:
value = cmds.getAttr("{}.{}".format(layer, attr))
except Exception:
# Some attributes cannot be read directly,
# such as mesh and color attributes. These
# are considered non-essential to this
# particular publishing pipeline.
value = None
data[attr] = value
# Include (optional) global settings
# Get global overrides and translate to Deadline values
overrides = self.parse_options(str(render_globals))
data.update(**overrides)
# Define nice label
label = "{0} ({1})".format(expected_layer_name, data["asset"])
label += " [{0}-{1}]".format(int(data["frameStart"]),
int(data["frameEnd"]))
instance = context.create_instance(expected_layer_name)
instance.data["label"] = label
instance.data.update(data)
pass
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without
Here's the kicker. These globals override defaults in the submission
integrator, but an empty value means no overriding is made.
Otherwise, Frames would override the default frames set under globals.
Args:
render_globals (str): collection of render globals
Returns:
dict: only overrides with values
"""
attributes = maya.read(render_globals)
options = {"renderGlobals": {}}
options["renderGlobals"]["Priority"] = attributes["priority"]
# Check for specific pools
pool_a, pool_b = self._discover_pools(attributes)
options["renderGlobals"].update({"Pool": pool_a})
if pool_b:
options["renderGlobals"].update({"SecondaryPool": pool_b})
# Machine list
machine_list = attributes["machineList"]
if machine_list:
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
options['renderGlobals'][key] = machine_list
# Suspend publish job
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
options["publishJobState"] = state
chunksize = attributes.get("framesPerTask", 1)
options["renderGlobals"]["ChunkSize"] = chunksize
# Override frames should be False if extendFrames is False. This is
# to ensure it doesn't go off doing crazy unpredictable things
override_frames = False
extend_frames = attributes.get("extendFrames", False)
if extend_frames:
override_frames = attributes.get("overrideExistingFrame", False)
options["extendFrames"] = extend_frames
options["overrideExistingFrame"] = override_frames
maya_render_plugin = "MayaBatch"
if not attributes.get("useMayaBatch", True):
maya_render_plugin = "MayaCmd"
options["mayaRenderPlugin"] = maya_render_plugin
return options
def _discover_pools(self, attributes):
pool_a = None
pool_b = None
# Check for specific pools
pool_b = []
if "primaryPool" in attributes:
pool_a = attributes["primaryPool"]
if "secondaryPool" in attributes:
pool_b = attributes["secondaryPool"]
else:
# Backwards compatibility
pool_str = attributes.get("pools", None)
if pool_str:
pool_a, pool_b = pool_str.split(";")
# Ensure empty entry token is caught
if pool_b == "-":
pool_b = None
return pool_a, pool_b
def _get_overrides(self, layer):
rset = self.maya_layers[layer].renderSettingsCollectionInstance()
return rset.getOverrides()
def get_render_attribute(self, attr, layer):
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
layer=layer)
class ExpectedFiles:
def get(self, renderer, layer):
if renderer.lower() == 'arnold':
return ExpectedFilesArnold(layer).get_files()
elif renderer.lower() == 'vray':
return ExpectedFilesVray(layer).get_files()
elif renderer.lower() == 'redshift':
return ExpectedFilesRedshift(layer).get_files()
elif renderer.lower() == 'mentalray':
return ExpectedFilesMentalray(layer).get_files()
elif renderer.lower() == 'renderman':
return ExpectedFilesRenderman(layer).get_files()
else:
raise UnsupportedRendererException(
"unsupported {}".format(renderer))
@six.add_metaclass(ABCMeta)
class AExpectedFiles:
renderer = None
layer = None
def __init__(self, layer):
self.layer = layer
@abstractmethod
def get_aovs(self):
pass
def get_renderer_prefix(self):
try:
file_prefix = cmds.getAttr(ImagePrefixes[self.renderer])
except KeyError:
raise UnsupportedRendererException(
"Unsupported renderer {}".format(self.renderer))
return file_prefix
def _get_layer_data(self):
# ______________________________________________
# ____________________/ ____________________________________________/
# 1 - get scene name /__________________/
# ____________________/
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
# ______________________________________________
# ____________________/ ____________________________________________/
# 2 - detect renderer /__________________/
# ____________________/
renderer = self.renderer
# ________________________________________________
# __________________/ ______________________________________________/
# 3 - image prefix /__________________/
# __________________/
file_prefix = self.get_renderer_prefix()
if not file_prefix:
raise RuntimeError("Image prefix not set")
default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey')
# ________________________________________________
# __________________/ ______________________________________________/
# 4 - get renderable cameras_____________/
# __________________/
# if we have <camera> token in prefix path we'll expect output for
# every renderable camera in layer.
renderable_cameras = self.get_renderable_cameras()
# ________________________________________________
# __________________/ ______________________________________________/
# 5 - get AOVs /____________________/
# __________________/
enabled_aovs = self.get_aovs()
layer_name = self.layer
if self.layer.startswith("rs_"):
layer_name = self.layer[3:]
start_frame = int(self.get_render_attribute('startFrame'))
end_frame = int(self.get_render_attribute('endFrame'))
frame_step = int(self.get_render_attribute('byFrameStep'))
padding = int(self.get_render_attribute('extensionPadding'))
scene_data = {
"frameStart": start_frame,
"frameEnd": end_frame,
"frameStep": frame_step,
"padding": padding,
"cameras": renderable_cameras,
"sceneName": scene_name,
"layerName": layer_name,
"renderer": renderer,
"defaultExt": default_ext,
"filePrefix": file_prefix,
"enabledAOVs": enabled_aovs
}
return scene_data
def _generate_single_file_sequence(self, layer_data):
expected_files = []
file_prefix = layer_data["filePrefix"]
for cam in layer_data["cameras"]:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
# this is required to remove unfilled aov token, for example
# in Redshift
(R_REMOVE_AOV_TOKEN, ""),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, "")
)
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"])):
expected_files.append(
'{}.{}.{}'.format(file_prefix,
str(frame).rjust(
layer_data["padding"], "0"),
layer_data["defaultExt"]))
return expected_files
def _generate_aov_file_sequences(self, layer_data):
expected_files = []
aov_file_list = {}
file_prefix = layer_data["filePrefix"]
for aov in layer_data["enabledAOVs"]:
for cam in layer_data["cameras"]:
mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
(R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, "")
)
for regex, value in mappings:
file_prefix = re.sub(regex, value, file_prefix)
aov_files = []
for frame in range(
int(layer_data["frameStart"]),
int(layer_data["frameEnd"]) + 1,
int(layer_data["frameStep"])):
aov_files.append(
'{}.{}.{}'.format(
file_prefix,
str(frame).rjust(layer_data["padding"], "0"),
aov[1]))
# if we have more then one renderable camera, append
# camera name to AOV to allow per camera AOVs.
aov_name = aov[0]
if len(layer_data["cameras"]) > 1:
aov_name = "{}_{}".format(aov[0], cam)
aov_file_list[aov_name] = aov_files
file_prefix = layer_data["filePrefix"]
expected_files.append(aov_file_list)
return expected_files
def get_files(self):
"""
This method will return list of expected files.
It will translate render token strings ('<RenderPass>', etc.) to
their values. This task is tricky as every renderer deals with this
differently. It depends on `get_aovs()` abstract method implemented
for every supported renderer.
"""
layer_data = self._get_layer_data()
expected_files = []
if layer_data.get("enabledAOVs"):
expected_files = self._generate_aov_file_sequences(layer_data)
else:
expected_files = self._generate_single_file_sequence(layer_data)
return expected_files
def get_renderable_cameras(self):
cam_parents = [cmds.listRelatives(x, ap=True)[-1]
for x in cmds.ls(cameras=True)]
renderable_cameras = []
for cam in cam_parents:
renderable = False
if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))):
renderable = True
for override in self.get_layer_overrides(
'{}.renderable'.format(cam), self.layer):
renderable = self.maya_is_true(override)
if renderable:
renderable_cameras.append(cam)
return renderable_cameras
def maya_is_true(self, attr_val):
"""
Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)
def get_layer_overrides(self, attr, layer):
connections = cmds.listConnections(attr, plugs=True)
if connections:
for connection in connections:
if connection:
node_name = connection.split('.')[0]
if cmds.nodeType(node_name) == 'renderLayer':
attr_name = '%s.value' % '.'.join(
connection.split('.')[:-1])
if node_name == layer:
yield cmds.getAttr(attr_name)
def get_render_attribute(self, attr):
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
layer=self.layer)
class ExpectedFilesArnold(AExpectedFiles):
# Arnold AOV driver extension mapping
# Is there a better way?
aiDriverExtension = {
'jpeg': 'jpg',
'exr': 'exr',
'deepexr': 'exr',
'png': 'png',
'tiff': 'tif',
'mtoa_shaders': 'ass', # TODO: research what those last two should be
'maya': ''
}
def __init__(self, layer):
super(ExpectedFilesArnold, self).__init__(layer)
self.renderer = 'arnold'
def get_aovs(self):
enabled_aovs = []
try:
if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')):
# AOVs are merged in mutli-channel file
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
# AOVs are set to be rendered separately. We should expect
# <RenderPass> token in path.
ai_aovs = [n for n in cmds.ls(type='aiAOV')]
for aov in ai_aovs:
enabled = self.maya_is_true(
cmds.getAttr('{}.enabled'.format(aov)))
ai_driver = cmds.listConnections(
'{}.outputs'.format(aov))[0]
ai_translator = cmds.getAttr(
'{}.aiTranslator'.format(ai_driver))
try:
aov_ext = self.aiDriverExtension[ai_translator]
except KeyError:
msg = ('Unrecognized arnold '
'driver format for AOV - {}').format(
cmds.getAttr('{}.name'.format(aov))
)
raise AOVError(msg)
for override in self.get_layer_overrides(
'{}.enabled'.format(aov), self.layer):
enabled = self.maya_is_true(override)
if enabled:
# If aov RGBA is selected, arnold will translate it to `beauty`
aov_name = cmds.getAttr('%s.name' % aov)
if aov_name == 'RGBA':
aov_name = 'beauty'
enabled_aovs.append(
(
aov_name,
aov_ext
)
)
# Append 'beauty' as this is arnolds
# default. If <RenderPass> token is specified and no AOVs are
# defined, this will be used.
enabled_aovs.append(
(
u'beauty',
cmds.getAttr('defaultRenderGlobals.imfPluginKey')
)
)
return enabled_aovs
class ExpectedFilesVray(AExpectedFiles):
# V-ray file extension mapping
# 5 - exr
# 6 - multichannel exr
# 13 - deep exr
def __init__(self, layer):
super(ExpectedFilesVray, self).__init__(layer)
self.renderer = 'vray'
def get_renderer_prefix(self):
prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
prefix = "{}_<aov>".format(prefix)
return prefix
def get_files(self):
expected_files = super(ExpectedFilesVray, self).get_files()
# we need to add one sequence for plain beauty if AOVs are enabled.
# as vray output beauty without 'beauty' in filename.
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
return expected_files
def get_aovs(self):
enabled_aovs = []
try:
# really? do we set it in vray just by selecting multichannel exr?
if cmds.getAttr(
"vraySettings.imageFormatStr") == "exr (multichannel)":
# AOVs are merged in mutli-channel file
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
default_ext = cmds.getAttr('vraySettings.imageFormatStr')
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
default_ext = "exr"
vr_aovs = [n for n in cmds.ls(
type=["VRayRenderElement", "VRayRenderElementSet"])]
# todo: find out how to detect multichannel exr for vray
for aov in vr_aovs:
enabled = self.maya_is_true(
cmds.getAttr('{}.enabled'.format(aov)))
for override in self.get_layer_overrides(
'{}.enabled'.format(aov), 'rs_{}'.format(self.layer)):
enabled = self.maya_is_true(override)
if enabled:
# todo: find how vray set format for AOVs
enabled_aovs.append(
(
self._get_vray_aov_name(aov),
default_ext)
)
return enabled_aovs
def _get_vray_aov_name(self, node):
# Get render element pass type
vray_node_attr = next(attr for attr in cmds.listAttr(node)
if attr.startswith("vray_name"))
pass_type = vray_node_attr.rsplit("_", 1)[-1]
# Support V-Ray extratex explicit name (if set by user)
if pass_type == "extratex":
explicit_attr = "{}.vray_explicit_name_extratex".format(node)
explicit_name = cmds.getAttr(explicit_attr)
if explicit_name:
return explicit_name
# Node type is in the attribute name but we need to check if value
# of the attribute as it can be changed
return cmds.getAttr("{}.{}".format(node, vray_node_attr))
class ExpectedFilesRedshift(AExpectedFiles):
# mapping redshift extension dropdown values to strings
ext_mapping = ['iff', 'exr', 'tif', 'png', 'tga', 'jpg']
def __init__(self, layer):
super(ExpectedFilesRedshift, self).__init__(layer)
self.renderer = 'redshift'
def get_renderer_prefix(self):
prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
prefix = "{}_<aov>".format(prefix)
return prefix
def get_files(self):
expected_files = super(ExpectedFilesRedshift, self).get_files()
# we need to add one sequence for plain beauty if AOVs are enabled.
# as redshift output beauty without 'beauty' in filename.
layer_data = self._get_layer_data()
if layer_data.get("enabledAOVs"):
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
return expected_files
def get_aovs(self):
enabled_aovs = []
try:
if self.maya_is_true(
cmds.getAttr("redshiftOptions.exrForceMultilayer")):
# AOVs are merged in mutli-channel file
return enabled_aovs
except ValueError:
# this occurs when Render Setting windows was not opened yet. In
# such case there are no Arnold options created so query for AOVs
# will fail. We terminate here as there are no AOVs specified then.
# This state will most probably fail later on some Validator
# anyway.
return enabled_aovs
default_ext = self.ext_mapping[
cmds.getAttr('redshiftOptions.imageFormat')
]
rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')]
# todo: find out how to detect multichannel exr for redshift
for aov in rs_aovs:
enabled = self.maya_is_true(
cmds.getAttr('{}.enabled'.format(aov)))
for override in self.get_layer_overrides(
'{}.enabled'.format(aov), self.layer):
enabled = self.maya_is_true(override)
if enabled:
enabled_aovs.append(
(
cmds.getAttr('%s.name' % aov),
default_ext
)
)
return enabled_aovs
class ExpectedFilesRenderman(AExpectedFiles):
def __init__(self, layer):
super(ExpectedFilesRenderman, self).__init__(layer)
self.renderer = 'renderman'
def get_aovs(self):
enabled_aovs = []
default_ext = "exr"
displays = cmds.listConnections("rmanGlobals.displays")
for aov in displays:
aov_name = str(aov)
if aov_name == "rmanDefaultDisplay":
aov_name = "beauty"
enabled = self.maya_is_true(
cmds.getAttr("{}.enable".format(aov)))
for override in self.get_layer_overrides(
'{}.enable'.format(aov), self.layer):
enabled = self.maya_is_true(override)
if enabled:
enabled_aovs.append(
(
aov_name,
default_ext
)
)
return enabled_aovs
def get_files(self):
"""
In renderman we hack it with prepending path. This path would
normally be translated from `rmanGlobals.imageOutputDir`. We skip
this and harcode prepend path we expect. There is no place for user
to mess around with this settings anyway and it is enforced in
render settings validator.
"""
layer_data = self._get_layer_data()
new_aovs = {}
expected_files = super(ExpectedFilesRenderman, self).get_files()
# we always get beauty
for aov, files in expected_files[0].items():
new_files = []
for file in files:
new_file = "{}/{}/{}".format(layer_data["sceneName"],
layer_data["layerName"],
file)
new_files.append(new_file)
new_aovs[aov] = new_files
return [new_aovs]
class ExpectedFilesMentalray(AExpectedFiles):
def __init__(self, layer):
raise UnimplementedRendererException('Mentalray not implemented')
def get_aovs(self):
return []
class AOVError(Exception):
pass
class UnsupportedRendererException(Exception):
pass
class UnimplementedRendererException(Exception):
pass

View file

@ -17,7 +17,7 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin):
def process(self, instance):
layer = instance.data["setMembers"]
self.log.info("layer: {}".format(layer))
cameras = cmds.ls(type="camera", long=True)
renderable = [c for c in cameras if
lib.get_attr_in_layer("%s.renderable" % c, layer=layer)]

View file

@ -1,201 +0,0 @@
from maya import cmds
import pyblish.api
from avalon import maya, api
import pype.maya.lib as lib
class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
"""Gather instances by active render layers"""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["maya"]
label = "Render Layers"
def process(self, context):
asset = api.Session["AVALON_ASSET"]
filepath = context.data["currentFile"].replace("\\", "/")
# Get render globals node
try:
render_globals = cmds.ls("renderglobalsMain")[0]
for instance in context:
self.log.debug(instance.name)
if instance.data['family'] == 'workfile':
instance.data['publish'] = True
except IndexError:
self.log.info("Skipping renderlayer collection, no "
"renderGlobalsDefault found..")
return
# Get all valid renderlayers
# This is how Maya populates the renderlayer display
rlm_attribute = "renderLayerManager.renderLayerId"
connected_layers = cmds.listConnections(rlm_attribute) or []
valid_layers = set(connected_layers)
# Get all renderlayers and check their state
renderlayers = [i for i in cmds.ls(type="renderLayer") if
cmds.getAttr("{}.renderable".format(i)) and not
cmds.referenceQuery(i, isNodeReferenced=True)]
# Sort by displayOrder
def sort_by_display_order(layer):
return cmds.getAttr("%s.displayOrder" % layer)
renderlayers = sorted(renderlayers, key=sort_by_display_order)
for layer in renderlayers:
# Check if layer is in valid (linked) layers
if layer not in valid_layers:
self.log.warning("%s is invalid, skipping" % layer)
continue
if layer.endswith("defaultRenderLayer"):
continue
else:
# Remove Maya render setup prefix `rs_`
layername = layer.split("rs_", 1)[-1]
# Get layer specific settings, might be overrides
data = {
"subset": layername,
"setMembers": layer,
"publish": True,
"frameStart": self.get_render_attribute("startFrame",
layer=layer),
"frameEnd": self.get_render_attribute("endFrame",
layer=layer),
"byFrameStep": self.get_render_attribute("byFrameStep",
layer=layer),
"renderer": self.get_render_attribute("currentRenderer",
layer=layer),
# instance subset
"family": "Render Layers",
"families": ["renderlayer"],
"asset": asset,
"time": api.time(),
"author": context.data["user"],
# Add source to allow tracing back to the scene from
# which was submitted originally
"source": filepath
}
# Apply each user defined attribute as data
for attr in cmds.listAttr(layer, userDefined=True) or list():
try:
value = cmds.getAttr("{}.{}".format(layer, attr))
except Exception:
# Some attributes cannot be read directly,
# such as mesh and color attributes. These
# are considered non-essential to this
# particular publishing pipeline.
value = None
data[attr] = value
# Include (optional) global settings
# TODO(marcus): Take into account layer overrides
# Get global overrides and translate to Deadline values
overrides = self.parse_options(render_globals)
data.update(**overrides)
# Define nice label
label = "{0} ({1})".format(layername, data["asset"])
label += " [{0}-{1}]".format(int(data["frameStart"]),
int(data["frameEnd"]))
instance = context.create_instance(layername)
instance.data["label"] = label
instance.data.update(data)
def get_render_attribute(self, attr, layer):
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
layer=layer)
def parse_options(self, render_globals):
"""Get all overrides with a value, skip those without
Here's the kicker. These globals override defaults in the submission
integrator, but an empty value means no overriding is made.
Otherwise, Frames would override the default frames set under globals.
Args:
render_globals (str): collection of render globals
Returns:
dict: only overrides with values
"""
attributes = maya.read(render_globals)
options = {"renderGlobals": {}}
options["renderGlobals"]["Priority"] = attributes["priority"]
# Check for specific pools
pool_a, pool_b = self._discover_pools(attributes)
options["renderGlobals"].update({"Pool": pool_a})
if pool_b:
options["renderGlobals"].update({"SecondaryPool": pool_b})
legacy = attributes["useLegacyRenderLayers"]
options["renderGlobals"]["UseLegacyRenderLayers"] = legacy
# Machine list
machine_list = attributes["machineList"]
if machine_list:
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
options['renderGlobals'][key] = machine_list
# Suspend publish job
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
options["publishJobState"] = state
chunksize = attributes.get("framesPerTask", 1)
options["renderGlobals"]["ChunkSize"] = chunksize
# Override frames should be False if extendFrames is False. This is
# to ensure it doesn't go off doing crazy unpredictable things
override_frames = False
extend_frames = attributes.get("extendFrames", False)
if extend_frames:
override_frames = attributes.get("overrideExistingFrame", False)
options["extendFrames"] = extend_frames
options["overrideExistingFrame"] = override_frames
maya_render_plugin = "MayaBatch"
if not attributes.get("useMayaBatch", True):
maya_render_plugin = "MayaCmd"
options["mayaRenderPlugin"] = maya_render_plugin
return options
def _discover_pools(self, attributes):
pool_a = None
pool_b = None
# Check for specific pools
pool_b = []
if "primaryPool" in attributes:
pool_a = attributes["primaryPool"]
if "secondaryPool" in attributes:
pool_b = attributes["secondaryPool"]
else:
# Backwards compatibility
pool_str = attributes.get("pools", None)
if pool_str:
pool_a, pool_b = pool_str.split(";")
# Ensure empty entry token is caught
if pool_b == "-":
pool_b = None
return pool_a, pool_b

View file

@ -35,7 +35,7 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
"subset": subset,
"asset": os.getenv("AVALON_ASSET", None),
"label": subset,
"publish": False,
"publish": True,
"family": 'workfile',
"families": ['workfile'],
"setMembers": [current_file]

View file

@ -0,0 +1,28 @@
import pyblish
class DetermineFutureVersion(pyblish.api.InstancePlugin):
"""
This will determine version of subset if we want render to be attached to.
"""
label = "Determine Subset Version"
order = pyblish.api.IntegratorOrder
hosts = ["maya"]
families = ["renderlayer"]
def process(self, instance):
context = instance.context
attach_to_subsets = [s["subset"] for s in instance.data['attachTo']]
if not attach_to_subsets:
return
for i in context:
if i.data["subset"] in attach_to_subsets:
# # this will get corresponding subset in attachTo list
# # so we can set version there
sub = next(item for item in instance.data['attachTo'] if item["subset"] == i.data["subset"]) # noqa: E501
sub["version"] = i.data.get("version", 1)
self.log.info("render will be attached to {} v{}".format(
sub["subset"], sub["version"]
))

View file

@ -1,6 +1,7 @@
import os
import sys
import json
import copy
import tempfile
import contextlib
import subprocess
@ -330,10 +331,9 @@ class ExtractLook(pype.api.Extractor):
maya_path))
def resource_destination(self, instance, filepath, do_maketx):
anatomy = instance.context.data["anatomy"]
self.create_destination_template(instance, anatomy)
resources_dir = instance.data["resourcesDir"]
# Compute destination location
basename, ext = os.path.splitext(os.path.basename(filepath))
@ -343,7 +343,7 @@ class ExtractLook(pype.api.Extractor):
ext = ".tx"
return os.path.join(
instance.data["assumedDestination"], "resources", basename + ext
resources_dir, basename + ext
)
def _process_texture(self, filepath, do_maketx, staging, linearise, force):
@ -407,97 +407,3 @@ class ExtractLook(pype.api.Extractor):
return converted, COPY, texture_hash
return filepath, COPY, texture_hash
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template["publish"]["path"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'").format(asset_name, project_name)
silo = asset.get("silo")
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get("version"):
version_number = int(instance.data.get("version"))
padding = int(a_template["render"]["padding"])
hierarchy = asset["data"]["parents"]
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {
"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name, "code": project["data"]["code"]},
"silo": silo,
"family": instance.data["family"],
"asset": asset_name,
"subset": subset_name,
"frame": ("#" * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP",
}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)

View file

@ -110,15 +110,7 @@ class ExtractYetiRig(pype.api.Extractor):
self.log.info("Writing metadata file")
# Create assumed destination folder for imageSearchPath
assumed_temp_data = instance.data["assumedTemplateData"]
template = instance.data["template"]
template_formatted = template.format(**assumed_temp_data)
destination_folder = os.path.dirname(template_formatted)
image_search_path = os.path.join(destination_folder, "resources")
image_search_path = os.path.normpath(image_search_path)
image_search_path = resources_dir = instance.data["resourcesDir"]
settings = instance.data.get("rigsettings", None)
if settings:

View file

@ -1,6 +1,7 @@
import os
import json
import getpass
import clique
from maya import cmds
@ -117,6 +118,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
else:
optional = True
use_published = True
def process(self, instance):
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
@ -125,21 +128,66 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
context = instance.context
workspace = context.data["workspaceDir"]
anatomy = context.data['anatomy']
filepath = None
if self.use_published:
for i in context:
if "workfile" in i.data["families"]:
assert i.data["publish"] is True, (
"Workfile (scene) must be published along")
template_data = i.data.get("anatomyData")
rep = i.data.get("representations")[0].get("name")
template_data["representation"] = rep
template_data["ext"] = rep
template_data["comment"] = None
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled["publish"]["path"]
filepath = os.path.normpath(template_filled)
self.log.info("Using published scene for render {}".format(
filepath))
# now we need to switch scene in expected files
# because <scene> token will now point to published
# scene file and that might differ from current one
new_scene = os.path.splitext(
os.path.basename(filepath))[0]
orig_scene = os.path.splitext(
os.path.basename(context.data["currentFile"]))[0]
exp = instance.data.get("expectedFiles")
if isinstance(exp[0], dict):
# we have aovs and we need to iterate over them
new_exp = {}
for aov, files in exp[0].items():
replaced_files = []
for f in files:
replaced_files.append(
f.replace(orig_scene, new_scene)
)
new_exp[aov] = replaced_files
instance.data["expectedFiles"] = [new_exp]
else:
new_exp = []
for f in exp:
new_exp.append(
f.replace(orig_scene, new_scene)
)
instance.data["expectedFiles"] = [new_exp]
self.log.info("Scene name was switched {} -> {}".format(
orig_scene, new_scene
))
allInstances = []
for result in context.data["results"]:
if (result["instance"] is not None and
result["instance"] not in allInstances):
allInstances.append(result["instance"])
for inst in allInstances:
print(inst)
if inst.data['family'] == 'scene':
filepath = inst.data['destination_list'][0]
# fallback if nothing was set
if not filepath:
self.log.warning("Falling back to workfile")
filepath = context.data["currentFile"]
self.log.debug(filepath)
@ -150,8 +198,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
dirname = os.path.join(workspace, "renders")
renderlayer = instance.data['setMembers'] # rs_beauty
renderlayer_name = instance.data['subset'] # beauty
renderlayer_globals = instance.data["renderGlobals"]
legacy_layers = renderlayer_globals["UseLegacyRenderLayers"]
# renderlayer_globals = instance.data["renderGlobals"]
# legacy_layers = renderlayer_globals["UseLegacyRenderLayers"]
deadline_user = context.data.get("deadlineUser", getpass.getuser())
jobname = "%s - %s" % (filename, instance.name)
@ -195,7 +243,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
"OutputFilename0": output_filename_0.replace("\\", "/"),
"OutputDirectory0": os.path.dirname(output_filename_0),
"OutputFilename0": output_filename_0.replace("\\", "/")
},
"PluginInfo": {
# Input
@ -211,9 +260,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
# Only render layers are considered renderable in this pipeline
"UsingRenderLayers": True,
# Use legacy Render Layer system
"UseLegacyRenderLayers": legacy_layers,
# Render only this layer
"RenderLayer": renderlayer,
@ -228,6 +274,26 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"AuxFiles": []
}
exp = instance.data.get("expectedFiles")
OutputFilenames = {}
expIndex = 0
if isinstance(exp[0], dict):
# we have aovs and we need to iterate over them
for aov, files in exp[0].items():
col = clique.assemble(files)[0][0]
outputFile = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile
OutputFilenames[expIndex] = outputFile
expIndex += 1
else:
col = clique.assemble(files)[0][0]
outputFile = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile
# OutputFilenames[expIndex] = outputFile
# We need those to pass them to pype for it to set correct context
keys = [
"FTRACK_API_KEY",

View file

@ -0,0 +1,97 @@
import os
import types
import maya.cmds as cmds
import pyblish.api
import pype.api
import pype.maya.action
class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
"""Ensure exporting ass file has set relative texture paths"""
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ['ass']
label = "ASS has relative texture paths"
actions = [pype.api.RepairAction]
def process(self, instance):
# we cannot ask this until user open render settings as
# `defaultArnoldRenderOptions` doesn't exists
try:
relative_texture = cmds.getAttr(
"defaultArnoldRenderOptions.absolute_texture_paths")
relative_procedural = cmds.getAttr(
"defaultArnoldRenderOptions.absolute_procedural_paths")
texture_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.tspath"
)
procedural_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.pspath"
)
except ValueError:
assert False, ("Can not validate, render setting were not opened "
"yet so Arnold setting cannot be validate")
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
project_root = "{}{}{}".format(
os.environ.get("AVALON_PROJECTS"),
os.path.sep,
os.environ.get("AVALON_PROJECT")
)
assert self.maya_is_true(relative_texture) is not True, \
("Texture path is set to be absolute")
assert self.maya_is_true(relative_procedural) is not True, \
("Procedural path is set to be absolute")
texture_search_path = texture_search_path.replace("\\", "/")
procedural_search_path = procedural_search_path.replace("\\", "/")
project_root = project_root.replace("\\", "/")
assert project_root in texture_search_path, \
("Project root is not in texture_search_path")
assert project_root in procedural_search_path, \
("Project root is not in procedural_search_path")
@classmethod
def repair(cls, instance):
texture_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.tspath"
)
procedural_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.pspath"
)
project_root = "{}{}{}".format(
os.environ.get("AVALON_PROJECTS"),
os.path.sep,
os.environ.get("AVALON_PROJECT"),
).replace("\\", "/")
cmds.setAttr("defaultArnoldRenderOptions.tspath",
project_root + os.pathsep + texture_search_path,
type="string")
cmds.setAttr("defaultArnoldRenderOptions.pspath",
project_root + os.pathsep + procedural_search_path,
type="string")
cmds.setAttr("defaultArnoldRenderOptions.absolute_procedural_paths",
False)
cmds.setAttr("defaultArnoldRenderOptions.absolute_texture_paths",
False)
def maya_is_true(self, attr_val):
"""
Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)

View file

@ -1,17 +1,26 @@
import re
import pyblish.api
import pype.api
import pype.maya.action
from maya import cmds
ImagePrefixes = {
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
'vray': 'vraySettings.fileNamePrefix',
'arnold': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'defaultRenderGlobals.imageFilePrefix',
'redshift': 'defaultRenderGlobals.imageFilePrefix'
}
class ValidateRenderSingleCamera(pyblish.api.InstancePlugin):
"""Only one camera may be renderable in a layer.
Currently the pipeline supports only a single camera per layer.
This is because when multiple cameras are rendered the output files
automatically get different names because the <Camera> render token
is not in the output path. As such the output files conflict with how
our pipeline expects the output.
"""Validate renderable camera count for layer and <Camera> token.
Pipeline is supporting multiple renderable cameras per layer, but image
prefix must contain <Camera> token.
"""
order = pype.api.ValidateContentsOrder
@ -21,6 +30,8 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin):
"vrayscene"]
actions = [pype.maya.action.SelectInvalidAction]
R_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
def process(self, instance):
"""Process all the cameras in the instance"""
invalid = self.get_invalid(instance)
@ -31,8 +42,17 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin):
def get_invalid(cls, instance):
cameras = instance.data.get("cameras", [])
renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower()
# handle various renderman names
if renderer.startswith('renderman'):
renderer = 'renderman'
file_prefix = cmds.getAttr(ImagePrefixes[renderer])
if len(cameras) > 1:
if re.search(cls.R_CAMERA_TOKEN, file_prefix):
# if there is <Camera> token in prefix and we have more then
# 1 camera, all is ok.
return
cls.log.error("Multiple renderable cameras found for %s: %s " %
(instance.data["setMembers"], cameras))
return [instance.data["setMembers"]] + cameras

View file

@ -1,4 +1,5 @@
import os
import re
from maya import cmds, mel
import pymel.core as pm
@ -11,9 +12,13 @@ import pype.maya.lib as lib
class ValidateRenderSettings(pyblish.api.InstancePlugin):
"""Validates the global render settings
* File Name Prefix must be as followed:
* vray: maya/<Scene>/<Layer>/<Layer>
* default: maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>
* File Name Prefix must start with: `maya/<Scene>`
all other token are customizable but sane values are:
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
<Camera> token is supported also, usefull for multiple renderable
cameras per render layer.
* Frame Padding must be:
* default: 4
@ -35,16 +40,47 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
families = ["renderlayer"]
actions = [pype.api.RepairAction]
ImagePrefixes = {
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
'vray': 'vraySettings.fileNamePrefix',
'arnold': 'defaultRenderGlobals.imageFilePrefix',
'renderman': 'rmanGlobals.imageFileFormat',
'redshift': 'defaultRenderGlobals.imageFilePrefix'
}
ImagePrefixTokens = {
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
'vray': 'maya/<scene>/<Layer>/<Layer>',
'renderman': '<layer>_<aov>.<f4>.<ext>'
}
# WARNING: There is bug? in renderman, translating <scene> token
# to something left behind mayas default image prefix. So instead
# `SceneName_v01` it translates to:
# `SceneName_v01/<RenderLayer>/<RenderLayers_<RenderPass>` that means
# for example:
# `SceneName_v01/Main/Main_<RenderPass>`. Possible solution is to define
# custom token like <scene_name> to point to determined scene name.
RendermanDirPrefix = "<ws>/renders/maya/<scene>/<layer>"
R_AOV_TOKEN = re.compile(
r'%a|<aov>|<renderpass>', re.IGNORECASE)
R_LAYER_TOKEN = re.compile(
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
R_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
R_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
DEFAULT_PADDING = 4
RENDERER_PREFIX = {"vray": "maya/<scene>/<Layer>/<Layer>"}
VRAY_PREFIX = "maya/<scene>/<Layer>/<Layer>"
DEFAULT_PREFIX = "maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise ValueError("Invalid render settings found for '%s'!"
% instance.name)
assert invalid is False, ("Invalid render settings "
"found for '{}'!".format(instance.name))
@classmethod
def get_invalid(cls, instance):
@ -53,10 +89,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
renderer = instance.data['renderer']
layer = instance.data['setMembers']
cameras = instance.data.get("cameras", [])
# Get the node attributes for current renderer
attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default'])
prefix = lib.get_attr_in_layer("{node}.{prefix}".format(**attrs),
prefix = lib.get_attr_in_layer(cls.ImagePrefixes[renderer],
layer=layer)
padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs),
layer=layer)
@ -68,12 +105,63 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
cls.log.error("Animation needs to be enabled. Use the same "
"frame for start and end to render single frame")
fname_prefix = cls.get_prefix(renderer)
if prefix != fname_prefix:
if not prefix.lower().startswith("maya/<scene>"):
invalid = True
cls.log.error("Wrong file name prefix: %s (expected: %s)"
% (prefix, fname_prefix))
cls.log.error("Wrong image prefix [ {} ] - "
"doesn't start with: 'maya/<scene>'".format(prefix))
if not re.search(cls.R_LAYER_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"doesn't have: '<renderlayer>' or "
"'<layer>' token".format(prefix))
if len(cameras) > 1:
if not re.search(cls.R_CAMERA_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"doesn't have: '<camera>' token".format(prefix))
# renderer specific checks
if renderer == "vray":
# no vray checks implemented yet
pass
elif renderer == "redshift":
# no redshift check implemented yet
pass
elif renderer == "renderman":
file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat")
dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir")
if file_prefix.lower() != cls.ImagePrefixTokens[renderer].lower():
invalid = True
cls.log.error("Wrong image prefix [ {} ]".format(file_prefix))
if dir_prefix.lower() != cls.RendermanDirPrefix.lower():
invalid = True
cls.log.error("Wrong directory prefix [ {} ]".format(
dir_prefix))
else:
multichannel = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
if multichannel:
if re.search(cls.R_AOV_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"You can't use '<renderpass>' token "
"with merge AOVs turned on".format(prefix))
else:
if not re.search(cls.R_AOV_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
"doesn't have: '<renderpass>' or "
"token".format(prefix))
# prefix check
if prefix.lower() != cls.ImagePrefixTokens[renderer].lower():
cls.log.warning("warning: prefix differs from "
"recommended {}".format(
cls.ImagePrefixTokens[renderer]))
if padding != cls.DEFAULT_PADDING:
invalid = True
@ -82,21 +170,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
return invalid
@classmethod
def get_prefix(cls, renderer):
prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX)
# maya.cmds and pymel.core return only default project directory and
# not the current one but only default.
output_path = os.path.join(
mel.eval("workspace -q -rd;"), pm.workspace.fileRules["images"]
)
# Workfile paths can be configured to have host name in file path.
# In this case we want to avoid duplicate folder names.
if "maya" in output_path.lower():
prefix = prefix.replace("maya/", "")
return prefix
@classmethod
def repair(cls, instance):
@ -108,14 +181,23 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
render_attrs = lib.RENDER_ATTRS.get(renderer, default)
# Repair prefix
node = render_attrs["node"]
prefix_attr = render_attrs["prefix"]
if renderer != "renderman":
node = render_attrs["node"]
prefix_attr = render_attrs["prefix"]
fname_prefix = cls.get_prefix(renderer)
cmds.setAttr("{}.{}".format(node, prefix_attr),
fname_prefix, type="string")
fname_prefix = cls.ImagePrefixTokens[renderer]
cmds.setAttr("{}.{}".format(node, prefix_attr),
fname_prefix, type="string")
# Repair padding
padding_attr = render_attrs["padding"]
cmds.setAttr("{}.{}".format(node, padding_attr),
cls.DEFAULT_PADDING)
# Repair padding
padding_attr = render_attrs["padding"]
cmds.setAttr("{}.{}".format(node, padding_attr),
cls.DEFAULT_PADDING)
else:
# renderman handles stuff differently
cmds.setAttr("rmanGlobals.imageFileFormat",
cls.ImagePrefixTokens[renderer],
type="string")
cmds.setAttr("rmanGlobals.imageOutputDir",
cls.RendermanDirPrefix,
type="string")

View file

@ -1 +0,0 @@
# usually used for mattepainting

View file

@ -1,46 +0,0 @@
import pyblish.api
@pyblish.api.log
class CollectRenderTarget(pyblish.api.InstancePlugin):
"""Collect families for all instances"""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Render Target"
hosts = ["nuke", "nukeassist"]
families = ['write']
def process(self, instance):
node = instance[0]
self.log.info('processing {}'.format(node))
families = []
if instance.data.get('families'):
families += instance.data['families']
# set for ftrack to accept
# instance.data["families"] = ["ftrack"]
if node["render"].value():
# dealing with local/farm rendering
if node["render_farm"].value():
families.append("render.farm")
else:
families.append("render.local")
else:
families.append("render.frames")
# to ignore staging dir op in integrate
instance.data['transfer'] = False
families.append('ftrack')
instance.data["families"] = families
# Sort/grouped by family (preserving local index)
instance.context[:] = sorted(instance.context, key=self.sort_by_family)
def sort_by_family(self, instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))

View file

@ -1,147 +0,0 @@
import os
import json
import getpass
from avalon import api
from avalon.vendor import requests
import pyblish.api
class NukeSubmitDeadline(pyblish.api.InstancePlugin):
# TODO: rewrite docstring to nuke
"""Submit current Comp to Deadline
Renders are submitted to a Deadline Web Service as
supplied via the environment variable DEADLINE_REST_URL
"""
label = "Submit to Deadline"
order = pyblish.api.IntegratorOrder
hosts = ["nuke"]
families = ["write", "render.deadline"]
def process(self, instance):
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
DEADLINE_REST_URL = api.Session.get("DEADLINE_REST_URL",
"http://localhost:8082")
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
# Collect all saver instances in context that are to be rendered
write_instances = []
for instance in context[:]:
if not self.families[0] in instance.data.get("families"):
# Allow only saver family instances
continue
if not instance.data.get("publish", True):
# Skip inactive instances
continue
self.log.debug(instance.data["name"])
write_instances.append(instance)
if not write_instances:
raise RuntimeError("No instances found for Deadline submittion")
hostVersion = int(context.data["hostVersion"])
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
comment = context.data.get("comment", "")
deadline_user = context.data.get("deadlineUser", getpass.getuser())
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload = {
"JobInfo": {
# Top-level group name
"BatchName": filename,
# Job name, as seen in Monitor
"Name": filename,
# User, as seen in Monitor
"UserName": deadline_user,
# Use a default submission pool for Nuke
"Pool": "nuke",
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
),
"Comment": comment,
},
"PluginInfo": {
# Input
"FlowFile": filepath,
# Mandatory for Deadline
"Version": str(hostVersion),
# Render in high quality
"HighQuality": True,
# Whether saver output should be checked after rendering
# is complete
"CheckOutput": True,
# Proxy: higher numbers smaller images for faster test renders
# 1 = no proxy quality
"Proxy": 1,
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
# Enable going to rendered frames from Deadline Monitor
for index, instance in enumerate(write_instances):
path = instance.data["path"]
folder, filename = os.path.split(path)
payload["JobInfo"]["OutputDirectory%d" % index] = folder
payload["JobInfo"]["OutputFilename%d" % index] = filename
# Include critical variables with submission
keys = [
# TODO: This won't work if the slaves don't have accesss to
# these paths, such as if slaves are running Linux and the
# submitter is on Windows.
"PYTHONPATH",
"NUKE_PATH"
# "OFX_PLUGIN_PATH",
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
key=key,
value=environment[key]
) for index, key in enumerate(environment)
})
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(DEADLINE_REST_URL)
response = requests.post(url, json=payload)
if not response.ok:
raise Exception(response.text)
# Store the response for dependent job submission plug-ins
for instance in write_instances:
instance.data["deadlineSubmissionJob"] = response.json()

View file

@ -1,24 +0,0 @@
import pyblish.api
class IncrementTestPlugin(pyblish.api.ContextPlugin):
"""Increment current script version."""
order = pyblish.api.CollectorOrder + 0.5
label = "Test Plugin"
hosts = ['nuke']
def process(self, context):
instances = context[:]
prerender_check = list()
families_check = list()
for instance in instances:
if ("prerender" in str(instance)):
prerender_check.append(instance)
if instance.data.get("families", None):
families_check.append(True)
if len(prerender_check) != len(families_check):
self.log.info(prerender_check)
self.log.info(families_check)

View file

@ -1,68 +0,0 @@
import nuke
import os
import pyblish.api
from avalon import io
# TODO: add repair function
@pyblish.api.log
class ValidateSettingsNuke(pyblish.api.Validator):
""" Validates settings """
families = ['scene']
hosts = ['nuke']
optional = True
label = 'Settings'
def process(self, instance):
asset = io.find_one({"name": os.environ['AVALON_ASSET']})
try:
avalon_resolution = asset["data"].get("resolution", '')
avalon_pixel_aspect = asset["data"].get("pixelAspect", '')
avalon_fps = asset["data"].get("fps", '')
avalon_first = asset["data"].get("frameStart", '')
avalon_last = asset["data"].get("frameEnd", '')
avalon_crop = asset["data"].get("crop", '')
except KeyError:
print(
"No resolution information found for \"{0}\".".format(
asset["name"]
)
)
return
# validating first frame
local_first = nuke.root()['first_frame'].value()
msg = 'First frame is incorrect.'
msg += '\n\nLocal first: %s' % local_first
msg += '\n\nOnline first: %s' % avalon_first
assert local_first == avalon_first, msg
# validating last frame
local_last = nuke.root()['last_frame'].value()
msg = 'Last frame is incorrect.'
msg += '\n\nLocal last: %s' % local_last
msg += '\n\nOnline last: %s' % avalon_last
assert local_last == avalon_last, msg
# validating fps
local_fps = nuke.root()['fps'].value()
msg = 'FPS is incorrect.'
msg += '\n\nLocal fps: %s' % local_fps
msg += '\n\nOnline fps: %s' % avalon_fps
assert local_fps == avalon_fps, msg
# validating resolution width
local_width = nuke.root().format().width()
msg = 'Width is incorrect.'
msg += '\n\nLocal width: %s' % local_width
msg += '\n\nOnline width: %s' % avalon_resolution[0]
assert local_width == avalon_resolution[0], msg
# validating resolution width
local_height = nuke.root().format().height()
msg = 'Height is incorrect.'
msg += '\n\nLocal height: %s' % local_height
msg += '\n\nOnline height: %s' % avalon_resolution[1]
assert local_height == avalon_resolution[1], msg

View file

@ -1,33 +0,0 @@
import nuke
import pyblish.api
class RepairNukeProxyModeAction(pyblish.api.Action):
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
nuke.root()["proxy"].setValue(0)
class ValidateNukeProxyMode(pyblish.api.ContextPlugin):
"""Validates against having proxy mode on."""
order = pyblish.api.ValidatorOrder
optional = True
label = "Proxy Mode"
actions = [RepairNukeProxyModeAction]
hosts = ["nuke", "nukeassist"]
# targets = ["default", "process"]
def process(self, context):
msg = (
"Proxy mode is not supported. Please disable Proxy Mode in the "
"Project settings."
)
assert not nuke.root()["proxy"].getValue(), msg

View file

@ -112,6 +112,7 @@ class LoadMov(api.Loader):
)
version = context['version']
version_data = version.get("data", {})
repr_id = context["representation"]["_id"]
orig_first = version_data.get("frameStart")
orig_last = version_data.get("frameEnd")
@ -120,12 +121,16 @@ class LoadMov(api.Loader):
first = orig_first - diff
last = orig_last - diff
handle_start = version_data.get("handleStart")
handle_end = version_data.get("handleEnd")
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
colorspace = version_data.get("colorspace")
repr_cont = context["representation"]["context"]
self.log.debug(
"Representation id `{}` ".format(repr_id))
context["representation"]["_id"]
# create handles offset (only to last, because of mov)
last += handle_start + handle_end
# offset should be with handles so it match orig frame range
@ -138,7 +143,6 @@ class LoadMov(api.Loader):
file = self.fname
if not file:
repr_id = context["representation"]["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return

View file

@ -86,8 +86,11 @@ class LoadSequence(api.Loader):
version = context['version']
version_data = version.get("data", {})
repr_id = context["representation"]["_id"]
self.log.info("version_data: {}\n".format(version_data))
self.log.debug(
"Representation id `{}` ".format(repr_id))
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)

View file

@ -1,22 +0,0 @@
import os
import pype.api as pype
import pyblish.api
class CollectScriptVersion(pyblish. api.ContextPlugin):
"""Collect Script Version."""
order = pyblish.api.CollectorOrder
label = "Collect Script Version"
hosts = [
"nuke",
"nukeassist"
]
def process(self, context):
file_path = context.data["currentFile"]
base_name = os.path.basename(file_path)
# get version string
version = pype.get_version_from_path(base_name)
context.data['version'] = version

View file

@ -56,8 +56,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
self.log.debug('output dir: {}'.format(output_dir))
# get version to instance for integration
instance.data['version'] = instance.context.data.get(
"version", pype.get_version_from_path(nuke.root().name()))
instance.data['version'] = instance.context.data["version"]
self.log.debug('Write Version: %s' % instance.data('version'))
@ -113,16 +112,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
"handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"version": int(instance.data['version']),
"colorspace": node["colorspace"].value(),
"families": ["render"],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data["family"] = "write"
@ -142,7 +132,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"outputDir": output_dir,
"ext": ext,
"label": label,
"handles": handles,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame,
"frameEnd": last_frame,
"outputType": output_type,

View file

@ -116,7 +116,7 @@ class ExtractThumbnail(pype.api.Extractor):
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
tags = ["thumbnail"]
tags = ["thumbnail", "publish_on_farm"]
# retime for
first_frame = int(last_frame) / 2

View file

@ -5,7 +5,6 @@ import getpass
from avalon import api
from avalon.vendor import requests
import re
import pyblish.api
@ -23,6 +22,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
families = ["render.farm"]
optional = True
deadline_priority = 50
deadline_pool = ""
deadline_pool_secondary = ""
deadline_chunk_size = 1
def process(self, instance):
node = instance[0]
@ -55,7 +59,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = response.json()
instance.data["publishJobState"] = "Active"
instance.data["outputDir"] = os.path.dirname(
render_path).replace("\\", "/")
instance.data["publishJobState"] = "Suspended"
if instance.data.get("bakeScriptPath"):
render_path = instance.data.get("bakeRenderPath")
@ -87,6 +93,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
script_name = os.path.basename(script_path)
jobname = "%s - %s" % (script_name, instance.name)
output_filename_0 = self.preview_fname(render_path)
if not responce_data:
responce_data = {}
@ -96,6 +104,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
except OSError:
pass
# define chunk and priority
chunk_size = instance.data.get("deadlineChunkSize")
if chunk_size == 0:
chunk_size = self.deadline_chunk_size
priority = instance.data.get("deadlinePriority")
if priority != 50:
priority = self.deadline_priority
payload = {
"JobInfo": {
# Top-level group name
@ -107,10 +124,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
# Arbitrary username, for visualisation in Monitor
"UserName": self._deadline_user,
"Priority": instance.data["deadlinePriority"],
"Priority": priority,
"ChunkSize": chunk_size,
"Pool": "2d",
"SecondaryPool": "2d",
"Pool": self.deadline_pool,
"SecondaryPool": self.deadline_pool_secondary,
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
@ -119,6 +137,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
),
"Comment": self._comment,
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
"OutputFilename0": output_filename_0.replace("\\", "/")
},
"PluginInfo": {
# Input
@ -220,6 +242,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# adding expectied files to instance.data
self.expected_files(instance, render_path)
self.log.debug("__ expectedFiles: `{}`".format(
instance.data["expectedFiles"]))
response = requests.post(self.deadline_url, json=payload)
if not response.ok:
@ -240,3 +266,51 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"%f=%d was rounded off to nearest integer"
% (value, int(value))
)
def preview_fname(self, path):
"""Return output file path with #### for padding.
Deadline requires the path to be formatted with # in place of numbers.
For example `/path/to/render.####.png`
Args:
path (str): path to rendered images
Returns:
str
"""
self.log.debug("_ path: `{}`".format(path))
if "%" in path:
search_results = re.search(r"(%0)(\d)(d.)", path).groups()
self.log.debug("_ search_results: `{}`".format(search_results))
return int(search_results[1])
if "#" in path:
self.log.debug("_ path: `{}`".format(path))
return path
else:
return path
def expected_files(self,
instance,
path):
""" Create expected files in instance data
"""
if not instance.data.get("expectedFiles"):
instance.data["expectedFiles"] = list()
dir = os.path.dirname(path)
file = os.path.basename(path)
if "#" in file:
pparts = file.split("#")
padding = "%0{}d".format(len(pparts) - 1)
file = pparts[0] + padding + pparts[-1]
if "%" not in file:
instance.data["expectedFiles"].append(path)
return
for i in range(self._frame_start, (self._frame_end + 1)):
instance.data["expectedFiles"].append(
os.path.join(dir, (file % i)).replace("\\", "/"))

View file

@ -1,5 +1,5 @@
from pyblish import api
import os
class CollectAudio(api.InstancePlugin):
"""Collect audio from tags.
@ -12,7 +12,7 @@ class CollectAudio(api.InstancePlugin):
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1025
order = api.CollectorOrder + 0.1021
label = "Collect Audio"
hosts = ["nukestudio"]
families = ["clip"]
@ -21,8 +21,10 @@ class CollectAudio(api.InstancePlugin):
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
family = dict(tag["metadata"]).get("tag.family", "")
tag_data = dict(tag["metadata"])
family = tag_data.get("tag.family", "")
if family.lower() == "audio":
subset = tag_data.get("tag.subset", "Main")
tagged = True
if not tagged:
@ -40,14 +42,14 @@ class CollectAudio(api.InstancePlugin):
data["family"] = "audio"
data["families"] = ["ftrack"]
subset = ""
for tag in instance.data["tags"]:
tag_data = dict(tag["metadata"])
if "tag.subset" in tag_data:
subset = tag_data["tag.subset"]
data["subset"] = "audio" + subset.title()
data["source"] = data["sourcePath"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
1]
)
self.log.debug("Creating instance with data: {}".format(data))
instance.context.create_instance(**data)

View file

@ -1,7 +1,7 @@
import os
from pyblish import api
import hiero
import nuke
class CollectClips(api.ContextPlugin):
@ -17,7 +17,7 @@ class CollectClips(api.ContextPlugin):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
projectdata = context.data["projectData"]
projectdata = context.data["projectEntity"]["data"]
version = context.data.get("version", "001")
sequence = context.data.get("activeSequence")
selection = context.data.get("selection")
@ -48,7 +48,9 @@ class CollectClips(api.ContextPlugin):
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
effects = [f for f in item.linkedItems() if f.isEnabled()]
effects = [f for f in item.linkedItems()
if f.isEnabled()
if isinstance(f, hiero.core.EffectTrackItem)]
# If source is *.nk its a comp effect and we need to fetch the
# write node output. This should be improved by parsing the script

View file

@ -55,8 +55,6 @@ class CollectClipHandles(api.ContextPlugin):
# debug printing
self.log.debug("_ s_asset_data: `{}`".format(
s_asset_data))
self.log.debug("_ instance.data[handles]: `{}`".format(
instance.data["handles"]))
self.log.debug("_ instance.data[handleStart]: `{}`".format(
instance.data["handleStart"]))
self.log.debug("_ instance.data[handleEnd]: `{}`".format(

View file

@ -42,6 +42,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
width = int(sequence.format().width())
height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
fps = context.data["fps"]
# build data for inner nukestudio project property
data = {
@ -161,9 +162,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
"width": width,
"height": height,
"resolutionWidth": width,
"resolutionHeight": height,
"pixelAspect": pixel_aspect,
"fps": fps,
"tasks": instance.data["tasks"]
})
@ -223,9 +225,12 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["width"] = s_asset_data["width"]
instance.data["height"] = s_asset_data["height"]
instance.data["resolutionWidth"] = s_asset_data[
"resolutionWidth"]
instance.data["resolutionHeight"] = s_asset_data[
"resolutionHeight"]
instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
instance.data["fps"] = s_asset_data["fps"]
# adding frame start if any on instance
start_frame = s_asset_data.get("startingFrame")
@ -275,8 +280,8 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# adding SourceResolution if Tag was present
if instance.data.get("main"):
in_info['custom_attributes'].update({
"resolutionWidth": instance.data["width"],
"resolutionHeight": instance.data["height"],
"resolutionWidth": instance.data["resolutionWidth"],
"resolutionHeight": instance.data["resolutionHeight"],
"pixelAspect": instance.data["pixelAspect"]
})

View file

@ -14,7 +14,7 @@ class CollectPlates(api.InstancePlugin):
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1025
order = api.CollectorOrder + 0.1021
label = "Collect Plates"
hosts = ["nukestudio"]
families = ["clip"]
@ -23,8 +23,10 @@ class CollectPlates(api.InstancePlugin):
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
family = dict(tag["metadata"]).get("tag.family", "")
tag_data = dict(tag["metadata"])
family = tag_data.get("tag.family", "")
if family.lower() == "plate":
subset = tag_data.get("tag.subset", "Main")
tagged = True
break
@ -34,29 +36,27 @@ class CollectPlates(api.InstancePlugin):
"\"plate\"".format(instance)
)
return
self.log.debug("__ subset: `{}`".format(instance.data["subset"]))
# if "audio" in instance.data["subset"]:
# return
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
self.log.debug("__ family: `{}`".format(family))
self.log.debug("__ subset: `{}`".format(subset))
data["family"] = family.lower()
data["families"] = ["ftrack"] + instance.data["families"][1:]
data["source"] = data["sourcePath"]
subset = ""
for tag in instance.data["tags"]:
tag_data = dict(tag["metadata"])
if "tag.subset" in tag_data:
subset = tag_data["tag.subset"]
data["subset"] = data["family"] + subset.title()
data["subset"] = family + subset.title()
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
1]
)
data['asset'], data["subset"], os.path.splitext(
data["sourcePath"])[1])
if "review" in instance.data["families"]:
data["label"] += " - review"
@ -83,7 +83,7 @@ class CollectPlates(api.InstancePlugin):
class CollectPlatesData(api.InstancePlugin):
"""Collect plates"""
order = api.CollectorOrder + 0.495
order = api.CollectorOrder + 0.48
label = "Collect Plates Data"
hosts = ["nukestudio"]
families = ["plate"]
@ -126,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin):
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
"frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
"clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect"
"clipInH", "clipOutH", "asset", "track", "version", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps"
]
# pass data to version
@ -146,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin):
head, padding = os.path.splitext(basename)
ext = ext[1:]
padding = padding[1:]
self.log.debug("_ padding: `{}`".format(padding))
# head, padding, ext = source_file.split('.')
source_first_frame = int(padding)
padding = len(padding)

View file

@ -16,7 +16,7 @@ class CollectReviews(api.InstancePlugin):
order = api.CollectorOrder + 0.1022
label = "Collect Reviews"
hosts = ["nukestudio"]
families = ["clip"]
families = ["plate"]
def process(self, instance):
# Exclude non-tagged instances.

View file

@ -10,8 +10,6 @@ class ExtractAudioFile(pype.api.Extractor):
hosts = ["nukestudio"]
families = ["clip", "audio"]
match = api.Intersection
optional = True
active = False
def process(self, instance):
import os

View file

@ -2,6 +2,7 @@
import os
import json
import re
import copy
import pyblish.api
import tempfile
from avalon import io, api
@ -75,9 +76,11 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
)
data["source"] = data["sourcePath"]
# WARNING instance should not be created in Extractor!
# create new instance
instance = instance.context.create_instance(**data)
# TODO replace line below with `instance.data["resourcesDir"]`
# when instance is created during collection part
dst_dir = self.resource_destination_dir(instance)
# change paths in effects to files
@ -144,103 +147,114 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
return (v, dst)
def resource_destination_dir(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
# WARNING this is from `collect_instance_anatomy_data.py`
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
return os.path.join(
instance.data["assumedDestination"],
"resources"
)
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
if version_number is None:
version_number = 1
if latest_version is not None:
version_number += int(latest_version)
anatomy_data.update({
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number,
"hierarchy": instance.data["hierarchy"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
pixel_aspect = instance.data.get("pixelAspect")
if pixel_aspect:
anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect))
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = float("{:0.2f}".format(fps))
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
instance.data["version"] = version_number
# WARNING this is from `collect_resources_path.py`
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
anatomy_filled = anatomy.format(template_data)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
padding = int(a_template['render']['padding'])
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)
return resources_folder

View file

@ -4,6 +4,7 @@ import tempfile
import pyblish.api
import clique
import pype.api
import pype.lib
class ExtractReviewSP(pyblish.api.InstancePlugin):
@ -148,12 +149,7 @@ class ExtractReviewSP(pyblish.api.InstancePlugin):
# output filename
output_args.append(full_output_path)
ffmpeg_path = os.getenv("FFMPEG_PATH", "")
if ffmpeg_path:
ffmpeg_path += "/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
mov_args = [
ffmpeg_path,
" ".join(input_args),

View file

@ -3,6 +3,7 @@ import tempfile
import subprocess
import pyblish.api
import pype.api
import pype.lib
class ExtractThumbnailSP(pyblish.api.InstancePlugin):
@ -73,11 +74,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
config_data.get("__default__", {})
)
ffmpeg_path = os.getenv("FFMPEG_PATH", "")
if ffmpeg_path:
ffmpeg_path += "/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
jpeg_items = []
jpeg_items.append(ffmpeg_path)

View file

@ -1,31 +1,42 @@
import os
import datetime
import sys
import re
import subprocess
import json
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
from pypeapp.lib import config
from pype import api as pype
from subprocess import Popen, PIPE
# FFmpeg in PATH is required
from pypeapp import Logger
import pype.lib
log = Logger().get_logger("BurninWrapper", "burninwrap")
log = pype.Logger().get_logger("BurninWrapper", "burninwrap")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
ffmpeg_path = os.environ.get("FFMPEG_PATH")
if ffmpeg_path and os.path.exists(ffmpeg_path):
# add separator "/" or "\" to be prepared for next part
ffmpeg_path += os.path.sep
else:
ffmpeg_path = ""
FFMPEG = (
'{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s'
).format(os.path.normpath(ffmpeg_path + "ffmpeg"))
).format(ffmpeg_path)
FFPROBE = (
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
).format(os.path.normpath(ffmpeg_path + "ffprobe"))
).format(ffprobe_path)
DRAWTEXT = (
"drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor="
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'"
)
TIMECODE = (
"drawtext=timecode=\\'%(timecode)s\\':text=\\'%(text)s\\'"
":timecode_rate=%(fps).2f:x=%(x)s:y=%(y)s:fontcolor="
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'"
)
MISSING_KEY_VALUE = "N/A"
CURRENT_FRAME_KEY = "{current_frame}"
CURRENT_FRAME_SPLITTER = "_-_CURRENT_FRAME_-_"
TIME_CODE_KEY = "{timecode}"
def _streams(source):
@ -120,82 +131,69 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if options_init:
self.options_init.update(options_init)
def add_text(self, text, align, options=None):
def add_text(
self, text, align, frame_start=None, frame_end=None, options=None
):
"""
Adding static text to a filter.
:param str text: text to apply to the drawtext
:param enum align: alignment, must use provided enum flags
:param int frame_start: starting frame for burnins current frame
:param dict options: recommended to use TextOptions
"""
if not options:
options = ffmpeg_burnins.TextOptions(**self.options_init)
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_datetime(self, date_format, align, options=None):
"""
Adding date text to a filter. Using pythons datetime module.
options = options.copy()
if frame_start:
options["frame_offset"] = frame_start
:param str date_format: format of date (e.g. `%d.%m.%Y`)
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use TextOptions
"""
if not options:
options = ffmpeg_burnins.TextOptions(**self.options_init)
today = datetime.datetime.today()
text = today.strftime(date_format)
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
# `frame_end` is only for meassurements of text position
if frame_end:
options["frame_end"] = frame_end
def add_frame_numbers(
self, align, options=None, start_frame=None, text=None
self._add_burnin(text, align, options, DRAWTEXT)
def add_timecode(
self, align, frame_start=None, frame_end=None, frame_start_tc=None,
text=None, options=None
):
"""
Convenience method to create the frame number expression.
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use FrameNumberOptions
"""
if not options:
options = ffmpeg_burnins.FrameNumberOptions(**self.options_init)
if start_frame:
options['frame_offset'] = start_frame
expr = r'%%{eif\:n+%d\:d}' % options['frame_offset']
_text = str(int(self.end_frame + options['frame_offset']))
if text and isinstance(text, str):
text = r"{}".format(text)
expr = text.replace("{current_frame}", expr)
text = text.replace("{current_frame}", _text)
options['expression'] = expr
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_timecode(self, align, options=None, start_frame=None):
"""
Convenience method to create the frame number expression.
:param enum align: alignment, must use provided enum flags
:param int frame_start: starting frame for burnins current frame
:param int frame_start_tc: starting frame for burnins timecode
:param str text: text that will be before timecode
:param dict options: recommended to use TimeCodeOptions
"""
if not options:
options = ffmpeg_burnins.TimeCodeOptions(**self.options_init)
if start_frame:
options['frame_offset'] = start_frame
timecode = ffmpeg_burnins._frames_to_timecode(
options['frame_offset'],
options = options.copy()
if frame_start:
options["frame_offset"] = frame_start
# `frame_end` is only for meassurements of text position
if frame_end:
options["frame_end"] = frame_end
if not frame_start_tc:
frame_start_tc = options["frame_offset"]
if not text:
text = ""
if not options.get("fps"):
options["fps"] = self.frame_rate
options["timecode"] = ffmpeg_burnins._frames_to_timecode(
frame_start_tc,
self.frame_rate
)
options = options.copy()
if not options.get('fps'):
options['fps'] = self.frame_rate
self._add_burnin(
timecode.replace(':', r'\:'),
align,
options,
ffmpeg_burnins.TIMECODE
)
self._add_burnin(text, align, options, TIMECODE)
def _add_burnin(self, text, align, options, draw):
"""
@ -204,14 +202,43 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
:param enum align: alignment, must use provided enum flags
:param dict options:
"""
final_text = text
text_for_size = text
if CURRENT_FRAME_SPLITTER in text:
frame_start = options["frame_offset"]
frame_end = options.get("frame_end", frame_start)
if not frame_start:
replacement_final = replacement_size = str(MISSING_KEY_VALUE)
else:
replacement_final = "\\'{}\\'".format(
r'%%{eif\:n+%d\:d}' % frame_start
)
replacement_size = str(frame_end)
final_text = final_text.replace(
CURRENT_FRAME_SPLITTER, replacement_final
)
text_for_size = text_for_size.replace(
CURRENT_FRAME_SPLITTER, replacement_size
)
resolution = self.resolution
data = {
'text': options.get('expression') or text,
'text': (
final_text
.replace(",", r"\,")
.replace(':', r'\:')
),
'color': options['font_color'],
'size': options['font_size']
}
timecode_text = options.get("timecode") or ""
text_for_size += timecode_text
data.update(options)
data.update(ffmpeg_burnins._drawtext(align, resolution, text, options))
data.update(
ffmpeg_burnins._drawtext(align, resolution, text_for_size, options)
)
if 'font' in data and ffmpeg_burnins._is_windows():
data['font'] = data['font'].replace(os.sep, r'\\' + os.sep)
data['font'] = data['font'].replace(':', r'\:')
@ -264,10 +291,14 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
is_sequence = "%" in output
command = self.command(output=output,
args=args,
overwrite=overwrite)
proc = Popen(command, shell=True)
command = self.command(
output=output,
args=args,
overwrite=overwrite
)
print(command)
proc = subprocess.Popen(command, shell=True)
proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Failed to render '%s': %s'"
@ -295,15 +326,13 @@ def example(input_path, output_path):
burnin.add_text('My Text', ModifiedBurnins.TOP_CENTERED)
# Datetime
burnin.add_text('%d-%m-%y', ModifiedBurnins.TOP_RIGHT)
# Frame number
burnin.add_frame_numbers(ModifiedBurnins.TOP_RIGHT, start_frame=start_frame)
# Timecode
burnin.add_timecode(ModifiedBurnins.TOP_LEFT, start_frame=start_frame)
# Start render (overwrite output file if exist)
burnin.render(output_path, overwrite=True)
def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True):
def burnins_from_data(
input_path, output_path, data, codec_data=None, overwrite=True
):
'''
This method adds burnins to video/image file based on presets setting.
Extension of output MUST be same as input. (mov -> mov, avi -> avi,...)
@ -327,47 +356,35 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
- each key of "burnins" represents Alignment, there are 6 possibilities:
TOP_LEFT TOP_CENTERED TOP_RIGHT
BOTTOM_LEFT BOTTOM_CENTERED BOTTOM_RIGHT
- value for each key is dict which should contain "function" which says
what kind of burnin is that:
"text", "timecode" or "frame_numbers"
- "text" key with content is also required when "text" function is used
- value must be string with text you want to burn-in
- text may contain specific formatting keys (exmplained below)
Requirement of *data* keys is based on presets.
- "start_frame" - is required when "timecode" or "frame_numbers" function is used
- "start_frame_tc" - when "timecode" should start with different frame
- "frame_start" - is required when "timecode" or "current_frame" ins keys
- "frame_start_tc" - when "timecode" should start with different frame
- *keys for static text*
EXAMPLE:
preset = {
"options": {*OPTIONS FOR LOOK*},
"burnins": {
"TOP_LEFT": {
"function": "text",
"text": "static_text"
},
"TOP_RIGHT": {
"function": "text",
"text": "{shot}"
},
"BOTTOM_LEFT": {
"function": "timecode"
},
"BOTTOM_RIGHT": {
"function": "frame_numbers"
}
"TOP_LEFT": "static_text",
"TOP_RIGHT": "{shot}",
"BOTTOM_LEFT": "TC: {timecode}",
"BOTTOM_RIGHT": "{frame_start}{current_frame}"
}
}
For this preset we'll need at least this data:
data = {
"start_frame": 1001,
"frame_start": 1001,
"shot": "sh0010"
}
When Timecode should start from 1 then data need:
data = {
"start_frame": 1001,
"start_frame_tc": 1,
"frame_start": 1001,
"frame_start_tc": 1,
"shot": "sh0010"
}
'''
@ -377,104 +394,102 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
burnin = ModifiedBurnins(input_path, options_init=options_init)
frame_start = data.get("frame_start")
frame_end = data.get("frame_end")
frame_start_tc = data.get('frame_start_tc', frame_start)
stream = burnin._streams[0]
if "resolution_width" not in data:
data["resolution_width"] = stream.get("width", "Unknown")
data["resolution_width"] = stream.get("width", MISSING_KEY_VALUE)
if "resolution_height" not in data:
data["resolution_height"] = stream.get("height", "Unknown")
data["resolution_height"] = stream.get("height", MISSING_KEY_VALUE)
if "fps" not in data:
data["fps"] = get_fps(stream.get("r_frame_rate", "0/0"))
for align_text, preset in presets.get('burnins', {}).items():
# Check frame start and add expression if is available
if frame_start is not None:
data[CURRENT_FRAME_KEY[1:-1]] = CURRENT_FRAME_SPLITTER
if frame_start_tc is not None:
data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY
for align_text, value in presets.get('burnins', {}).items():
if not value:
continue
if isinstance(value, (dict, list, tuple)):
raise TypeError((
"Expected string or number type."
" Got: {} - \"{}\""
" (Make sure you have new burnin presets)."
).format(str(type(value)), str(value)))
has_timecode = TIME_CODE_KEY in value
align = None
if align_text == 'TOP_LEFT':
align_text = align_text.strip().lower()
if align_text == "top_left":
align = ModifiedBurnins.TOP_LEFT
elif align_text == 'TOP_CENTERED':
elif align_text == "top_centered":
align = ModifiedBurnins.TOP_CENTERED
elif align_text == 'TOP_RIGHT':
elif align_text == "top_right":
align = ModifiedBurnins.TOP_RIGHT
elif align_text == 'BOTTOM_LEFT':
elif align_text == "bottom_left":
align = ModifiedBurnins.BOTTOM_LEFT
elif align_text == 'BOTTOM_CENTERED':
elif align_text == "bottom_centered":
align = ModifiedBurnins.BOTTOM_CENTERED
elif align_text == 'BOTTOM_RIGHT':
elif align_text == "bottom_right":
align = ModifiedBurnins.BOTTOM_RIGHT
bi_func = preset.get('function')
if not bi_func:
log.error(
'Missing function for burnin!'
'Burnins are not created!'
# Replace with missing key value if frame_start_tc is not set
if frame_start_tc is None and has_timecode:
has_timecode = False
log.warning(
"`frame_start` and `frame_start_tc`"
" are not set in entered data."
)
return
value = value.replace(TIME_CODE_KEY, MISSING_KEY_VALUE)
if (
bi_func in ['frame_numbers', 'timecode'] and
frame_start is None
):
log.error(
'start_frame is not set in entered data!'
'Burnins are not created!'
)
return
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
if bi_func == 'frame_numbers':
current_frame_identifier = "{current_frame}"
text = preset.get('text') or current_frame_identifier
missing_keys = []
for group in key_pattern.findall(value):
try:
group.format(**data)
except (TypeError, KeyError):
missing_keys.append(group)
if current_frame_identifier not in text:
log.warning((
'Text for Frame numbers don\'t have '
'`{current_frame}` key in text!'
))
missing_keys = list(set(missing_keys))
for key in missing_keys:
value = value.replace(key, MISSING_KEY_VALUE)
text_items = []
split_items = text.split(current_frame_identifier)
for item in split_items:
text_items.append(item.format(**data))
# Handle timecode differently
if has_timecode:
args = [align, frame_start, frame_end, frame_start_tc]
if not value.startswith(TIME_CODE_KEY):
value_items = value.split(TIME_CODE_KEY)
text = value_items[0].format(**data)
args.append(text)
text = "{current_frame}".join(text_items)
burnin.add_timecode(*args)
continue
burnin.add_frame_numbers(align, start_frame=frame_start, text=text)
text = value.format(**data)
burnin.add_text(text, align, frame_start, frame_end)
elif bi_func == 'timecode':
burnin.add_timecode(align, start_frame=frame_start_tc)
elif bi_func == 'text':
if not preset.get('text'):
log.error('Text is not set for text function burnin!')
return
text = preset['text'].format(**data)
burnin.add_text(text, align)
elif bi_func == "datetime":
date_format = preset["format"]
burnin.add_datetime(date_format, align)
else:
log.error(
'Unknown function for burnins {}'.format(bi_func)
)
return
codec_args = ''
if codec_data is not []:
codec_args = ""
if codec_data:
codec_args = " ".join(codec_data)
burnin.render(output_path, args=codec_args, overwrite=overwrite, **data)
if __name__ == '__main__':
import sys
import json
data = json.loads(sys.argv[-1])
in_data = json.loads(sys.argv[-1])
burnins_from_data(
data['input'],
data['codec'],
data['output'],
data['burnin_data']
in_data['input'],
in_data['output'],
in_data['burnin_data'],
in_data['codec']
)

View file

@ -6,6 +6,7 @@ import argparse
import logging
import subprocess
import platform
import json
try:
from shutil import which
@ -24,6 +25,18 @@ log.setLevel(logging.DEBUG)
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
def _load_json(path):
assert os.path.isfile(path), ("path to json file doesn't exist")
data = None
with open(path, "r") as json_file:
try:
data = json.load(json_file)
except Exception as exc:
log.error(
"Error loading json: "
"{} - Exception: {}".format(path, exc)
)
return data
def __main__():
parser = argparse.ArgumentParser()
@ -77,6 +90,12 @@ def __main__():
paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa
for path in paths:
data = _load_json(path)
log.info("Setting session using data from file")
os.environ["AVALON_PROJECT"] = data["session"]["AVALON_PROJECT"]
break
args = [
os.path.join(pype_root, pype_command),
"publish",

View file

@ -1,3 +1,5 @@
PUBLISH_PATHS = []
from .standalonepublish_module import StandAlonePublishModule
from .app import (
show,

View file

@ -5,14 +5,14 @@ import tempfile
import random
import string
from avalon import io
from avalon import api as avalon
from avalon import io, api
from avalon.tools import publish as av_publish
import pype
from pypeapp import execute
import pyblish.api
from . import PUBLISH_PATHS
def set_context(project, asset, task, app):
@ -31,7 +31,6 @@ def set_context(project, asset, task, app):
os.environ["AVALON_TASK"] = task
io.Session["AVALON_TASK"] = task
io.install()
av_project = io.find_one({'type': 'project'})
@ -76,7 +75,7 @@ def avalon_api_publish(data, gui=True):
io.install()
# Create hash name folder in temp
chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] )
chars = "".join([random.choice(string.ascii_letters) for i in range(15)])
staging_dir = tempfile.mkdtemp(chars)
# create also json and fill with data
@ -105,8 +104,27 @@ def avalon_api_publish(data, gui=True):
def cli_publish(data, gui=True):
io.install()
pyblish.api.deregister_all_plugins()
# Registers Global pyblish plugins
pype.install()
# Registers Standalone pyblish plugins
for path in PUBLISH_PATHS:
pyblish.api.register_plugin_path(path)
project_plugins_paths = os.environ.get("PYPE_PROJECT_PLUGINS")
project_name = os.environ["AVALON_PROJECT"]
if project_plugins_paths and project_name:
for path in project_plugins_paths.split(os.pathsep):
if not path:
continue
plugin_path = os.path.join(path, project_name, "plugins")
if os.path.exists(plugin_path):
pyblish.api.register_plugin_path(plugin_path)
api.register_plugin_path(api.Loader, plugin_path)
api.register_plugin_path(api.Creator, plugin_path)
# Create hash name folder in temp
chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] )
chars = "".join([random.choice(string.ascii_letters) for i in range(15)])
staging_dir = tempfile.mkdtemp(chars)
# create json for return data

Some files were not shown because too many files have changed in this diff Show more