Merge branch 'develop' into feature/PYPE-95-nks-load-subset-to-timeline

This commit is contained in:
Jakub Jezek 2020-02-20 17:02:07 +01:00
commit 28b4b1e16e
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
50 changed files with 3326 additions and 2236 deletions

View file

@ -0,0 +1,484 @@
import os
import collections
import uuid
import clique
from pymongo import UpdateOne
from pype.ftrack import BaseAction
from pype.ftrack.lib.io_nonsingleton import DbConnector
import avalon.pipeline
class DeleteOldVersions(BaseAction):
identifier = "delete.old.versions"
label = "Pype Admin"
variant = "- Delete old versions"
description = (
"Delete files from older publishes so project can be"
" archived with only lates versions."
)
role_list = ["Pypeclub", "Project Manager", "Administrator"]
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
dbcon = DbConnector()
inteface_title = "Choose your preferences"
splitter_item = {"type": "label", "value": "---"}
sequence_splitter = "__sequence_splitter__"
def discover(self, session, entities, event):
''' Validation '''
selection = event["data"].get("selection") or []
for entity in selection:
entity_type = (entity.get("entityType") or "").lower()
if entity_type == "assetversion":
return True
return False
def interface(self, session, entities, event):
items = []
root = os.environ.get("AVALON_PROJECTS")
if not root:
msg = "Root path to projects is not set."
items.append({
"type": "label",
"value": "<i><b>ERROR:</b> {}</i>".format(msg)
})
self.show_interface(
items=items, title=self.inteface_title, event=event
)
return {
"success": False,
"message": msg
}
if not os.path.exists(root):
msg = "Root path does not exists \"{}\".".format(str(root))
items.append({
"type": "label",
"value": "<i><b>ERROR:</b> {}</i>".format(msg)
})
self.show_interface(
items=items, title=self.inteface_title, event=event
)
return {
"success": False,
"message": msg
}
values = event["data"].get("values")
if values:
versions_count = int(values["last_versions_count"])
if versions_count >= 1:
return
items.append({
"type": "label",
"value": (
"# You have to keep at least 1 version!"
)
})
items.append({
"type": "label",
"value": (
"<i><b>WARNING:</b> This will remove published files of older"
" versions from disk so we don't recommend use"
" this action on \"live\" project.</i>"
)
})
items.append(self.splitter_item)
# How many versions to keep
items.append({
"type": "label",
"value": "## Choose how many versions you want to keep:"
})
items.append({
"type": "label",
"value": (
"<i><b>NOTE:</b> We do recommend to keep 2 versions.</i>"
)
})
items.append({
"type": "number",
"name": "last_versions_count",
"label": "Versions",
"value": 2
})
items.append(self.splitter_item)
items.append({
"type": "label",
"value": (
"## Remove publish folder even if there"
" are other than published files:"
)
})
items.append({
"type": "label",
"value": (
"<i><b>WARNING:</b> This may remove more than you want.</i>"
)
})
items.append({
"type": "boolean",
"name": "force_delete_publish_folder",
"label": "Are You sure?",
"value": False
})
return {
"items": items,
"title": self.inteface_title
}
def launch(self, session, entities, event):
values = event["data"].get("values")
if not values:
return
versions_count = int(values["last_versions_count"])
force_to_remove = values["force_delete_publish_folder"]
_val1 = "OFF"
if force_to_remove:
_val1 = "ON"
_val3 = "s"
if versions_count == 1:
_val3 = ""
self.log.debug((
"Process started. Force to delete publish folder is set to [{0}]"
" and will keep {1} latest version{2}."
).format(_val1, versions_count, _val3))
self.dbcon.install()
project = None
avalon_asset_names = []
asset_versions_by_parent_id = collections.defaultdict(list)
subset_names_by_asset_name = collections.defaultdict(list)
for entity in entities:
parent_ent = entity["asset"]["parent"]
parent_ftrack_id = parent_ent["id"]
parent_name = parent_ent["name"]
if parent_name not in avalon_asset_names:
avalon_asset_names.append(parent_name)
# Group asset versions by parent entity
asset_versions_by_parent_id[parent_ftrack_id].append(entity)
# Get project
if project is None:
project = parent_ent["project"]
# Collect subset names per asset
subset_name = entity["asset"]["name"]
subset_names_by_asset_name[parent_name].append(subset_name)
# Set Mongo collection
project_name = project["full_name"]
self.dbcon.Session["AVALON_PROJECT"] = project_name
self.log.debug("Project is set to {}".format(project_name))
# Get Assets from avalon database
assets = list(self.dbcon.find({
"type": "asset",
"name": {"$in": avalon_asset_names}
}))
asset_id_to_name_map = {
asset["_id"]: asset["name"] for asset in assets
}
asset_ids = list(asset_id_to_name_map.keys())
self.log.debug("Collected assets ({})".format(len(asset_ids)))
# Get Subsets
subsets = list(self.dbcon.find({
"type": "subset",
"parent": {"$in": asset_ids}
}))
subsets_by_id = {}
subset_ids = []
for subset in subsets:
asset_id = subset["parent"]
asset_name = asset_id_to_name_map[asset_id]
available_subsets = subset_names_by_asset_name[asset_name]
if subset["name"] not in available_subsets:
continue
subset_ids.append(subset["_id"])
subsets_by_id[subset["_id"]] = subset
self.log.debug("Collected subsets ({})".format(len(subset_ids)))
# Get Versions
versions = list(self.dbcon.find({
"type": "version",
"parent": {"$in": subset_ids}
}))
versions_by_parent = collections.defaultdict(list)
for ent in versions:
versions_by_parent[ent["parent"]].append(ent)
def sort_func(ent):
return int(ent["name"])
last_versions_by_parent = collections.defaultdict(list)
all_last_versions = []
for parent_id, _versions in versions_by_parent.items():
for idx, version in enumerate(
sorted(_versions, key=sort_func, reverse=True)
):
if idx >= versions_count:
break
last_versions_by_parent[parent_id].append(version)
all_last_versions.append(version)
self.log.debug("Collected versions ({})".format(len(versions)))
# Filter latest versions
for version in all_last_versions:
versions.remove(version)
# Filter already deleted versions
versions_to_pop = []
for version in versions:
version_tags = version["data"].get("tags")
if version_tags and "deleted" in version_tags:
versions_to_pop.append(version)
for version in versions_to_pop:
subset = subsets_by_id[version["parent"]]
asset_id = subset["parent"]
asset_name = asset_id_to_name_map[asset_id]
msg = "Asset: \"{}\" | Subset: \"{}\" | Version: \"{}\"".format(
asset_name, subset["name"], version["name"]
)
self.log.warning((
"Skipping version. Already tagged as `deleted`. < {} >"
).format(msg))
versions.remove(version)
version_ids = [ent["_id"] for ent in versions]
self.log.debug(
"Filtered versions to delete ({})".format(len(version_ids))
)
if not version_ids:
msg = "Skipping processing. Nothing to delete."
self.log.debug(msg)
return {
"success": True,
"message": msg
}
repres = list(self.dbcon.find({
"type": "representation",
"parent": {"$in": version_ids}
}))
self.log.debug(
"Collected representations to remove ({})".format(len(repres))
)
dir_paths = {}
file_paths_by_dir = collections.defaultdict(list)
for repre in repres:
file_path, seq_path = self.path_from_represenation(repre)
if file_path is None:
self.log.warning((
"Could not format path for represenation \"{}\""
).format(str(repre)))
continue
dir_path = os.path.dirname(file_path)
dir_id = None
for _dir_id, _dir_path in dir_paths.items():
if _dir_path == dir_path:
dir_id = _dir_id
break
if dir_id is None:
dir_id = uuid.uuid4()
dir_paths[dir_id] = dir_path
file_paths_by_dir[dir_id].append([file_path, seq_path])
dir_ids_to_pop = []
for dir_id, dir_path in dir_paths.items():
if os.path.exists(dir_path):
continue
dir_ids_to_pop.append(dir_id)
# Pop dirs from both dictionaries
for dir_id in dir_ids_to_pop:
dir_paths.pop(dir_id)
paths = file_paths_by_dir.pop(dir_id)
# TODO report of missing directories?
paths_msg = ", ".join([
"'{}'".format(path[0].replace("\\", "/")) for path in paths
])
self.log.warning((
"Folder does not exist. Deleting it's files skipped: {}"
).format(paths_msg))
if force_to_remove:
self.delete_whole_dir_paths(dir_paths.values())
else:
self.delete_only_repre_files(dir_paths, file_paths_by_dir)
mongo_changes_bulk = []
for version in versions:
orig_version_tags = version["data"].get("tags") or []
version_tags = [tag for tag in orig_version_tags]
if "deleted" not in version_tags:
version_tags.append("deleted")
if version_tags == orig_version_tags:
continue
update_query = {"_id": version["_id"]}
update_data = {"$set": {"data.tags": version_tags}}
mongo_changes_bulk.append(UpdateOne(update_query, update_data))
if mongo_changes_bulk:
self.dbcon.bulk_write(mongo_changes_bulk)
self.dbcon.uninstall()
return True
def delete_whole_dir_paths(self, dir_paths):
for dir_path in dir_paths:
# Delete all files and fodlers in dir path
for root, dirs, files in os.walk(dir_path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
# Delete even the folder and it's parents folders if they are empty
while True:
if not os.path.exists(dir_path):
dir_path = os.path.dirname(dir_path)
continue
if len(os.listdir(dir_path)) != 0:
break
os.rmdir(os.path.join(dir_path))
def delete_only_repre_files(self, dir_paths, file_paths):
for dir_id, dir_path in dir_paths.items():
dir_files = os.listdir(dir_path)
collections, remainders = clique.assemble(dir_files)
for file_path, seq_path in file_paths[dir_id]:
file_path_base = os.path.split(file_path)[1]
# Just remove file if `frame` key was not in context or
# filled path is in remainders (single file sequence)
if not seq_path or file_path_base in remainders:
if not os.path.exists(file_path):
self.log.warning(
"File was not found: {}".format(file_path)
)
continue
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
remainders.remove(file_path_base)
continue
seq_path_base = os.path.split(seq_path)[1]
head, tail = seq_path_base.split(self.sequence_splitter)
final_col = None
for collection in collections:
if head != collection.head or tail != collection.tail:
continue
final_col = collection
break
if final_col is not None:
# Fill full path to head
final_col.head = os.path.join(dir_path, final_col.head)
for _file_path in final_col:
if os.path.exists(_file_path):
os.remove(_file_path)
_seq_path = final_col.format("{head}{padding}{tail}")
self.log.debug("Removed files: {}".format(_seq_path))
collections.remove(final_col)
elif os.path.exists(file_path):
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
else:
self.log.warning(
"File was not found: {}".format(file_path)
)
# Delete as much as possible parent folders
for dir_path in dir_paths.values():
while True:
if not os.path.exists(dir_path):
dir_path = os.path.dirname(dir_path)
continue
if len(os.listdir(dir_path)) != 0:
break
self.log.debug("Removed folder: {}".format(dir_path))
os.rmdir(dir_path)
def path_from_represenation(self, representation):
try:
template = representation["data"]["template"]
except KeyError:
return (None, None)
root = os.environ["AVALON_PROJECTS"]
if not root:
return (None, None)
sequence_path = None
try:
context = representation["context"]
context["root"] = root
path = avalon.pipeline.format_template_with_optional_keys(
context, template
)
if "frame" in context:
context["frame"] = self.sequence_splitter
sequence_path = os.path.normpath(
avalon.pipeline.format_template_with_optional_keys(
context, template
)
)
except KeyError:
# Template references unavailable data
return (None, None)
return (os.path.normpath(path), sequence_path)
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
DeleteOldVersions(session, plugins_presets).register()

View file

@ -0,0 +1,350 @@
import os
import requests
import errno
import json
from bson.objectid import ObjectId
from pype.ftrack import BaseAction
from pype.ftrack.lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)
from pypeapp import Anatomy
from pype.ftrack.lib.io_nonsingleton import DbConnector
class StoreThumbnailsToAvalon(BaseAction):
# Action identifier
identifier = "store.thubmnail.to.avalon"
# Action label
label = "Pype Admin"
# Action variant
variant = "- Store Thumbnails to avalon"
# Action description
description = 'Test action'
# roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project Manager"]
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
thumbnail_key = "AVALON_THUMBNAIL_ROOT"
db_con = DbConnector()
def discover(self, session, entities, event):
for entity in entities:
if entity.entity_type.lower() == "assetversion":
return True
return False
def launch(self, session, entities, event):
# DEBUG LINE
# root_path = r"C:\Users\jakub.trllo\Desktop\Tests\ftrack_thumbnails"
user = session.query(
"User where username is '{0}'".format(session.api_user)
).one()
action_job = session.create("Job", {
"user": user,
"status": "running",
"data": json.dumps({
"description": "Storing thumbnails to avalon."
})
})
session.commit()
thumbnail_roots = os.environ.get(self.thumbnail_key)
if not thumbnail_roots:
msg = "`{}` environment is not set".format(self.thumbnail_key)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
existing_thumbnail_root = None
for path in thumbnail_roots.split(os.pathsep):
if os.path.exists(path):
existing_thumbnail_root = path
break
if existing_thumbnail_root is None:
msg = (
"Can't access paths, set in `{}` ({})"
).format(self.thumbnail_key, thumbnail_roots)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
project = get_project_from_entity(entities[0])
project_name = project["full_name"]
anatomy = Anatomy(project_name)
if "publish" not in anatomy.templates:
msg = "Anatomy does not have set publish key!"
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
if "thumbnail" not in anatomy.templates["publish"]:
msg = (
"There is not set \"thumbnail\""
" template in Antomy for project \"{}\""
).format(project_name)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
example_template_data = {
"_id": "ID",
"thumbnail_root": "THUBMNAIL_ROOT",
"thumbnail_type": "THUMBNAIL_TYPE",
"ext": ".EXT",
"project": {
"name": "PROJECT_NAME",
"code": "PROJECT_CODE"
},
"asset": "ASSET_NAME",
"subset": "SUBSET_NAME",
"version": "VERSION_NAME",
"hierarchy": "HIERARCHY"
}
tmp_filled = anatomy.format_all(example_template_data)
thumbnail_result = tmp_filled["publish"]["thumbnail"]
if not thumbnail_result.solved:
missing_keys = thumbnail_result.missing_keys
invalid_types = thumbnail_result.invalid_types
submsg = ""
if missing_keys:
submsg += "Missing keys: {}".format(", ".join(
["\"{}\"".format(key) for key in missing_keys]
))
if invalid_types:
items = []
for key, value in invalid_types.items():
items.append("{}{}".format(str(key), str(value)))
submsg += "Invalid types: {}".format(", ".join(items))
msg = (
"Thumbnail Anatomy template expects more keys than action"
" can offer. {}"
).format(submsg)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
self.db_con.install()
for entity in entities:
# Skip if entity is not AssetVersion (never should happend, but..)
if entity.entity_type.lower() != "assetversion":
continue
# Skip if AssetVersion don't have thumbnail
thumbnail_ent = entity["thumbnail"]
if thumbnail_ent is None:
self.log.debug((
"Skipping. AssetVersion don't "
"have set thumbnail. {}"
).format(entity["id"]))
continue
avalon_ents_result = get_avalon_entities_for_assetversion(
entity, self.db_con
)
version_full_path = (
"Asset: \"{project_name}/{asset_path}\""
" | Subset: \"{subset_name}\""
" | Version: \"{version_name}\""
).format(**avalon_ents_result)
version = avalon_ents_result["version"]
if not version:
self.log.warning((
"AssetVersion does not have version in avalon. {}"
).format(version_full_path))
continue
thumbnail_id = version["data"].get("thumbnail_id")
if thumbnail_id:
self.log.info((
"AssetVersion skipped, already has thubmanil set. {}"
).format(version_full_path))
continue
# Get thumbnail extension
file_ext = thumbnail_ent["file_type"]
if not file_ext.startswith("."):
file_ext = ".{}".format(file_ext)
avalon_project = avalon_ents_result["project"]
avalon_asset = avalon_ents_result["asset"]
hierarchy = ""
parents = avalon_asset["data"].get("parents") or []
if parents:
hierarchy = "/".join(parents)
# Prepare anatomy template fill data
# 1. Create new id for thumbnail entity
thumbnail_id = ObjectId()
template_data = {
"_id": str(thumbnail_id),
"thumbnail_root": existing_thumbnail_root,
"thumbnail_type": "thumbnail",
"ext": file_ext,
"project": {
"name": avalon_project["name"],
"code": avalon_project["data"].get("code")
},
"asset": avalon_ents_result["asset_name"],
"subset": avalon_ents_result["subset_name"],
"version": avalon_ents_result["version_name"],
"hierarchy": hierarchy
}
anatomy_filled = anatomy.format(template_data)
thumbnail_path = anatomy_filled["publish"]["thumbnail"]
thumbnail_path = thumbnail_path.replace("..", ".")
thumbnail_path = os.path.normpath(thumbnail_path)
downloaded = False
for loc in (thumbnail_ent.get("component_locations") or []):
res_id = loc.get("resource_identifier")
if not res_id:
continue
thubmnail_url = self.get_thumbnail_url(res_id)
if self.download_file(thubmnail_url, thumbnail_path):
downloaded = True
break
if not downloaded:
self.log.warning(
"Could not download thumbnail for {}".format(
version_full_path
)
)
continue
# Clean template data from keys that are dynamic
template_data.pop("_id")
template_data.pop("thumbnail_root")
thumbnail_entity = {
"_id": thumbnail_id,
"type": "thumbnail",
"schema": "pype:thumbnail-1.0",
"data": {
"template": thumbnail_template,
"template_data": template_data
}
}
# Create thumbnail entity
self.db_con.insert_one(thumbnail_entity)
self.log.debug(
"Creating entity in database {}".format(str(thumbnail_entity))
)
# Set thumbnail id for version
self.db_con.update_one(
{"_id": version["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
self.db_con.update_one(
{"_id": avalon_asset["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
action_job["status"] = "done"
session.commit()
return True
def get_thumbnail_url(self, resource_identifier, size=None):
# TODO use ftrack_api method rather (find way how to use it)
url_string = (
u'{url}/component/thumbnail?id={id}&username={username}'
u'&apiKey={apiKey}'
)
url = url_string.format(
url=self.session.server_url,
id=resource_identifier,
username=self.session.api_user,
apiKey=self.session.api_key
)
if size:
url += u'&size={0}'.format(size)
return url
def download_file(self, source_url, dst_file_path):
dir_path = os.path.dirname(dst_file_path)
try:
os.makedirs(dir_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
self.log.warning(
"Could not create folder: \"{}\"".format(dir_path)
)
return False
self.log.debug(
"Downloading file \"{}\" -> \"{}\"".format(
source_url, dst_file_path
)
)
file_open = open(dst_file_path, "wb")
try:
file_open.write(requests.get(source_url).content)
except Exception:
self.log.warning(
"Download of image `{}` failed.".format(source_url)
)
return False
finally:
file_open.close()
return True
def register(session, plugins_presets={}):
StoreThumbnailsToAvalon(session, plugins_presets).register()

View file

@ -0,0 +1,188 @@
from pype.ftrack import BaseEvent
class FirstVersionStatus(BaseEvent):
# WARNING Priority MUST be higher
# than handler in `event_version_to_task_statuses.py`
priority = 200
keys_enum = ["task", "task_type"]
# This should be set with presets
task_status_map = []
# EXAMPLE of `task_status_map`
__example_status_map__ = [{
# `key` specify where to look for name (is enumerator of `keys_enum`)
# By default is set to "task"
"key": "task",
# speicification of name
"name": "compositing",
# Status to set to the asset version
"status": "Blocking"
}]
def register(self, *args, **kwargs):
result = super(FirstVersionStatus, self).register(*args, **kwargs)
valid_task_status_map = []
for item in self.task_status_map:
key = (item.get("key") or "task").lower()
name = (item.get("name") or "").lower()
status = (item.get("status") or "").lower()
if not (key and name and status):
self.log.warning((
"Invalid item in Task -> Status mapping. {}"
).format(str(item)))
continue
if key not in self.keys_enum:
expected_msg = ""
last_key_idx = len(self.keys_enum) - 1
for idx, key in enumerate(self.keys_enum):
if idx == 0:
joining_part = "`{}`"
elif idx == last_key_idx:
joining_part = "or `{}`"
else:
joining_part = ", `{}`"
expected_msg += joining_part.format(key)
self.log.warning((
"Invalid key `{}`. Expected: {}."
).format(key, expected_msg))
continue
valid_task_status_map.append({
"key": key,
"name": name,
"status": status
})
self.task_status_map = valid_task_status_map
if not self.task_status_map:
self.log.warning((
"Event handler `{}` don't have set presets."
).format(self.__class__.__name__))
return result
def launch(self, session, event):
"""Set task's status for first created Asset Version."""
if not self.task_status_map:
return
entities_info = self.filter_event_ents(event)
if not entities_info:
return
entity_ids = []
for entity_info in entities_info:
entity_ids.append(entity_info["entityId"])
joined_entity_ids = ",".join(
["\"{}\"".format(entity_id) for entity_id in entity_ids]
)
asset_versions = session.query(
"AssetVersion where id in ({})".format(joined_entity_ids)
).all()
asset_version_statuses = None
project_schema = None
for asset_version in asset_versions:
task_entity = asset_version["task"]
found_item = None
for item in self.task_status_map:
if (
item["key"] == "task" and
task_entity["name"].lower() != item["name"]
):
continue
elif (
item["key"] == "task_type" and
task_entity["type"]["name"].lower() != item["name"]
):
continue
found_item = item
break
if not found_item:
continue
if project_schema is None:
project_schema = task_entity["project"]["project_schema"]
# Get all available statuses for Task
if asset_version_statuses is None:
statuses = project_schema.get_statuses("AssetVersion")
# map lowered status name with it's object
asset_version_statuses = {
status["name"].lower(): status for status in statuses
}
ent_path = "/".join(
[ent["name"] for ent in task_entity["link"]] +
[
str(asset_version["asset"]["name"]),
str(asset_version["version"])
]
)
new_status = asset_version_statuses.get(found_item["status"])
if not new_status:
self.log.warning(
"AssetVersion doesn't have status `{}`."
).format(found_item["status"])
continue
try:
asset_version["status"] = new_status
session.commit()
self.log.debug("[ {} ] Status updated to [ {} ]".format(
ent_path, new_status['name']
))
except Exception:
session.rollback()
self.log.warning(
"[ {} ] Status couldn't be set.".format(ent_path),
exc_info=True
)
def filter_event_ents(self, event):
filtered_ents = []
for entity in event["data"].get("entities", []):
# Care only about add actions
if entity["action"] != "add":
continue
# Filter AssetVersions
if entity["entityType"] != "assetversion":
continue
entity_changes = entity.get("changes") or {}
# Check if version of Asset Version is `1`
version_num = entity_changes.get("version", {}).get("new")
if version_num != 1:
continue
# Skip in Asset Version don't have task
task_id = entity_changes.get("taskid", {}).get("new")
if not task_id:
continue
filtered_ents.append(entity)
return filtered_ents
def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
FirstVersionStatus(session, plugins_presets).register()

View file

@ -3,6 +3,7 @@ import collections
import copy
import queue
import time
import datetime
import atexit
import traceback
@ -28,7 +29,7 @@ class SyncToAvalonEvent(BaseEvent):
ignore_entTypes = [
"socialfeed", "socialnotification", "note",
"assetversion", "job", "user", "reviewsessionobject", "timer",
"timelog", "auth_userrole", "appointment"
"timelog", "auth_userrole", "appointment", "notelabellink"
]
ignore_ent_types = ["Milestone"]
ignore_keys = ["statusid", "thumbid"]
@ -51,9 +52,39 @@ class SyncToAvalonEvent(BaseEvent):
def __init__(self, session, plugins_presets={}):
'''Expects a ftrack_api.Session instance'''
# Debug settings
# - time expiration in seconds
self.debug_print_time_expiration = 5 * 60
# - store current time
self.debug_print_time = datetime.datetime.now()
# - store synchronize entity types to be able to use
# only entityTypes in interest instead of filtering by ignored
self.debug_sync_types = collections.defaultdict(list)
# Set processing session to not use global
self.set_process_session(session)
super().__init__(session, plugins_presets)
def debug_logs(self):
"""This is debug method for printing small debugs messages. """
now_datetime = datetime.datetime.now()
delta = now_datetime - self.debug_print_time
if delta.total_seconds() < self.debug_print_time_expiration:
return
self.debug_print_time = now_datetime
known_types_items = []
for entityType, entity_type in self.debug_sync_types.items():
ent_types_msg = ", ".join(entity_type)
known_types_items.append(
"<{}> ({})".format(entityType, ent_types_msg)
)
known_entityTypes = ", ".join(known_types_items)
self.log.debug(
"DEBUG MESSAGE: Known types {}".format(known_entityTypes)
)
@property
def cur_project(self):
if self._cur_project is None:
@ -484,6 +515,9 @@ class SyncToAvalonEvent(BaseEvent):
if not entity_type or entity_type in self.ignore_ent_types:
continue
if entity_type not in self.debug_sync_types[entityType]:
self.debug_sync_types[entityType].append(entity_type)
action = ent_info["action"]
ftrack_id = ent_info["entityId"]
if isinstance(ftrack_id, list):
@ -573,8 +607,7 @@ class SyncToAvalonEvent(BaseEvent):
if auto_sync is not True:
return True
debug_msg = ""
debug_msg += "Updated: {}".format(len(updated))
debug_msg = "Updated: {}".format(len(updated))
debug_action_map = {
"add": "Created",
"remove": "Removed",
@ -634,6 +667,8 @@ class SyncToAvalonEvent(BaseEvent):
self.ftrack_added = entities_by_action["add"]
self.ftrack_updated = updated
self.debug_logs()
self.log.debug("Synchronization begins")
try:
time_1 = time.time()
@ -1545,6 +1580,14 @@ class SyncToAvalonEvent(BaseEvent):
entity_type_conf_ids[entity_type] = configuration_id
break
if not configuration_id:
self.log.warning(
"BUG REPORT: Missing configuration for `{} < {} >`".format(
entity_type, ent_info["entityType"]
)
)
continue
_entity_key = collections.OrderedDict({
"configuration_id": configuration_id,
"entity_id": ftrack_id
@ -1563,7 +1606,7 @@ class SyncToAvalonEvent(BaseEvent):
try:
# Commit changes of mongo_id to empty string
self.process_session.commit()
self.log.debug("Commititng unsetting")
self.log.debug("Committing unsetting")
except Exception:
self.process_session.rollback()
# TODO logging

View file

@ -4,9 +4,13 @@ import signal
import datetime
import subprocess
import socket
import json
import platform
import argparse
import getpass
import atexit
import time
import uuid
import ftrack_api
from pype.ftrack.lib import credentials
@ -63,10 +67,19 @@ def validate_credentials(url, user, api):
)
session.close()
except Exception as e:
print(
'ERROR: Can\'t log into Ftrack with used credentials:'
' Ftrack server: "{}" // Username: {} // API key: {}'
).format(url, user, api)
print("Can't log into Ftrack with used credentials:")
ftrack_cred = {
"Ftrack server": str(url),
"Username": str(user),
"API key": str(api)
}
item_lens = [len(key) + 1 for key in ftrack_cred.keys()]
justify_len = max(*item_lens)
for key, value in ftrack_cred.items():
print("{} {}".format(
(key + ":").ljust(justify_len, " "),
value
))
return False
print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format(
@ -175,6 +188,7 @@ def main_loop(ftrack_url):
otherwise thread will be killed.
"""
os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1())
# Get mongo hostname and port for testing mongo connection
mongo_list = ftrack_events_mongo_settings()
mongo_hostname = mongo_list[0]
@ -202,6 +216,13 @@ def main_loop(ftrack_url):
processor_last_failed = datetime.datetime.now()
processor_failed_count = 0
statuser_name = "StorerThread"
statuser_port = 10021
statuser_path = "{}/sub_event_status.py".format(file_path)
statuser_thread = None
statuser_last_failed = datetime.datetime.now()
statuser_failed_count = 0
ftrack_accessible = False
mongo_accessible = False
@ -210,7 +231,7 @@ def main_loop(ftrack_url):
# stop threads on exit
# TODO check if works and args have thread objects!
def on_exit(processor_thread, storer_thread):
def on_exit(processor_thread, storer_thread, statuser_thread):
if processor_thread is not None:
processor_thread.stop()
processor_thread.join()
@ -221,9 +242,27 @@ def main_loop(ftrack_url):
storer_thread.join()
storer_thread = None
if statuser_thread is not None:
statuser_thread.stop()
statuser_thread.join()
statuser_thread = None
atexit.register(
on_exit, processor_thread=processor_thread, storer_thread=storer_thread
on_exit,
processor_thread=processor_thread,
storer_thread=storer_thread,
statuser_thread=statuser_thread
)
system_name, pc_name = platform.uname()[:2]
host_name = socket.gethostname()
main_info = {
"created_at": datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S"),
"Username": getpass.getuser(),
"Host Name": host_name,
"Host IP": socket.gethostbyname(host_name)
}
main_info_str = json.dumps(main_info)
# Main loop
while True:
# Check if accessible Ftrack and Mongo url
@ -261,6 +300,52 @@ def main_loop(ftrack_url):
printed_ftrack_error = False
printed_mongo_error = False
# ====== STATUSER =======
if statuser_thread is None:
if statuser_failed_count < max_fail_count:
statuser_thread = socket_thread.StatusSocketThread(
statuser_name, statuser_port, statuser_path,
[main_info_str]
)
statuser_thread.start()
elif statuser_failed_count == max_fail_count:
print((
"Statuser failed {}times in row"
" I'll try to run again {}s later"
).format(str(max_fail_count), str(wait_time_after_max_fail)))
statuser_failed_count += 1
elif ((
datetime.datetime.now() - statuser_last_failed
).seconds > wait_time_after_max_fail):
statuser_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not statuser_thread.isAlive():
statuser_thread.join()
statuser_thread = None
ftrack_accessible = False
mongo_accessible = False
_processor_last_failed = datetime.datetime.now()
delta_time = (
_processor_last_failed - statuser_last_failed
).seconds
if delta_time < min_fail_seconds:
statuser_failed_count += 1
else:
statuser_failed_count = 0
statuser_last_failed = _processor_last_failed
elif statuser_thread.stop_subprocess:
print("Main process was stopped by action")
on_exit(processor_thread, storer_thread, statuser_thread)
os.kill(os.getpid(), signal.SIGTERM)
return 1
# ====== STORER =======
# Run backup thread which does not requeire mongo to work
if storer_thread is None:
if storer_failed_count < max_fail_count:
@ -268,6 +353,7 @@ def main_loop(ftrack_url):
storer_name, storer_port, storer_path
)
storer_thread.start()
elif storer_failed_count == max_fail_count:
print((
"Storer failed {}times I'll try to run again {}s later"
@ -295,6 +381,7 @@ def main_loop(ftrack_url):
storer_failed_count = 0
storer_last_failed = _storer_last_failed
# ====== PROCESSOR =======
if processor_thread is None:
if processor_failed_count < max_fail_count:
processor_thread = socket_thread.SocketThread(
@ -336,6 +423,10 @@ def main_loop(ftrack_url):
processor_failed_count = 0
processor_last_failed = _processor_last_failed
if statuser_thread is not None:
statuser_thread.set_process("storer", storer_thread)
statuser_thread.set_process("processor", processor_thread)
time.sleep(1)
@ -446,9 +537,9 @@ def main(argv):
event_paths = kwargs.ftrackeventpaths
if not kwargs.noloadcred:
cred = credentials._get_credentials(True)
cred = credentials.get_credentials(ftrack_url)
username = cred.get('username')
api_key = cred.get('apiKey')
api_key = cred.get('api_key')
if kwargs.ftrackuser:
username = kwargs.ftrackuser
@ -482,7 +573,7 @@ def main(argv):
return 1
if kwargs.storecred:
credentials._save_credentials(username, api_key, True)
credentials.save_credentials(username, api_key, ftrack_url)
# Set Ftrack environments
os.environ["FTRACK_SERVER"] = ftrack_url

View file

@ -100,9 +100,9 @@ class FtrackServer:
log.warning(msg, exc_info=e)
if len(register_functions_dict) < 1:
raise Exception((
"There are no events with register function."
" Registered paths: \"{}\""
log.warning((
"There are no events with `register` function"
" in registered paths: \"{}\""
).format("| ".join(paths)))
# Load presets for setting plugins
@ -122,7 +122,7 @@ class FtrackServer:
else:
register(self.session, plugins_presets=plugins_presets)
if function_counter%7 == 0:
if function_counter % 7 == 0:
time.sleep(0.1)
function_counter += 1
except Exception as exc:

View file

@ -28,6 +28,10 @@ from pypeapp import Logger
from pype.ftrack.lib.custom_db_connector import DbConnector
TOPIC_STATUS_SERVER = "pype.event.server.status"
TOPIC_STATUS_SERVER_RESULT = "pype.event.server.status.result"
def ftrack_events_mongo_settings():
host = None
port = None
@ -123,20 +127,59 @@ def check_ftrack_url(url, log_errors=True):
return url
class StorerEventHub(ftrack_api.event.hub.EventHub):
class SocketBaseEventHub(ftrack_api.event.hub.EventHub):
hearbeat_msg = b"hearbeat"
heartbeat_callbacks = []
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(StorerEventHub, self).__init__(*args, **kwargs)
super(SocketBaseEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"storer")
return self._send_packet(self._code_name_mapping['heartbeat'])
for callback in self.heartbeat_callbacks:
callback()
elif code_name == "connect":
self.sock.sendall(self.hearbeat_msg)
return self._send_packet(self._code_name_mapping["heartbeat"])
return super(SocketBaseEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StatusEventHub(SocketBaseEventHub):
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.status.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(StatusEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StorerEventHub(SocketBaseEventHub):
hearbeat_msg = b"storer"
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
@ -152,7 +195,9 @@ class StorerEventHub(ftrack_api.event.hub.EventHub):
)
class ProcessEventHub(ftrack_api.event.hub.EventHub):
class ProcessEventHub(SocketBaseEventHub):
hearbeat_msg = b"processor"
url, database, table_name = get_ftrack_event_mongo_info()
is_table_created = False
@ -164,7 +209,6 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
database_name=self.database,
table_name=self.table_name
)
self.sock = kwargs.pop("sock")
super(ProcessEventHub, self).__init__(*args, **kwargs)
def prepare_dbcon(self):
@ -260,42 +304,10 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
code_name = self._code_name_mapping[code]
if code_name == "event":
return
if code_name == "heartbeat":
self.sock.sendall(b"processor")
return self._send_packet(self._code_name_mapping["heartbeat"])
return super()._handle_packet(code, packet_identifier, path, data)
class UserEventHub(ftrack_api.event.hub.EventHub):
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(UserEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"hearbeat")
return self._send_packet(self._code_name_mapping['heartbeat'])
elif code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(UserEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class SocketSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(

View file

@ -3,6 +3,7 @@ import sys
import time
import socket
import threading
import traceback
import subprocess
from pypeapp import Logger
@ -12,13 +13,15 @@ class SocketThread(threading.Thread):
MAX_TIMEOUT = 35
def __init__(self, name, port, filepath):
def __init__(self, name, port, filepath, additional_args=[]):
super(SocketThread, self).__init__()
self.log = Logger().get_logger("SocketThread", "Event Thread")
self.log = Logger().get_logger(self.__class__.__name__)
self.setName(name)
self.name = name
self.port = port
self.filepath = filepath
self.additional_args = additional_args
self.sock = None
self.subproc = None
self.connection = None
@ -53,7 +56,13 @@ class SocketThread(threading.Thread):
)
self.subproc = subprocess.Popen(
[sys.executable, self.filepath, "-port", str(self.port)]
[
sys.executable,
self.filepath,
*self.additional_args,
str(self.port)
],
stdin=subprocess.PIPE
)
# Listen for incoming connections
@ -127,3 +136,52 @@ class SocketThread(threading.Thread):
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)
class StatusSocketThread(SocketThread):
process_name_mapping = {
b"RestartS": "storer",
b"RestartP": "processor",
b"RestartM": "main"
}
def __init__(self, *args, **kwargs):
self.process_threads = {}
self.stop_subprocess = False
super(StatusSocketThread, self).__init__(*args, **kwargs)
def set_process(self, process_name, thread):
try:
if not self.subproc:
self.process_threads[process_name] = None
return
if (
process_name in self.process_threads and
self.process_threads[process_name] == thread
):
return
self.process_threads[process_name] = thread
self.subproc.stdin.write(
str.encode("reset:{}\r\n".format(process_name))
)
self.subproc.stdin.flush()
except Exception:
print("Could not set thread in StatusSocketThread")
traceback.print_exception(*sys.exc_info())
def _handle_data(self, connection, data):
if not data:
return
process_name = self.process_name_mapping.get(data)
if process_name:
if process_name == "main":
self.stop_subprocess = True
else:
subp = self.process_threads.get(process_name)
if subp:
subp.stop()
connection.sendall(data)

View file

@ -1,13 +1,59 @@
import os
import sys
import signal
import socket
import datetime
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub
from pype.ftrack.ftrack_server.lib import (
SocketSession, ProcessEventHub, TOPIC_STATUS_SERVER
)
import ftrack_api
from pypeapp import Logger
log = Logger().get_logger("Event processor")
subprocess_started = datetime.datetime.now()
class SessionFactory:
session = None
def send_status(event):
subprocess_id = event["data"].get("subprocess_id")
if not subprocess_id:
return
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
session = SessionFactory.session
if not session:
return
new_event_data = {
"subprocess_id": subprocess_id,
"source": "processor",
"status_info": {
"created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S")
}
}
new_event = ftrack_api.event.base.Event(
topic="pype.event.server.status.result",
data=new_event_data
)
session.event_hub.publish(new_event)
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER), send_status
)
def main(args):
port = int(args[-1])
@ -24,6 +70,9 @@ def main(args):
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub
)
register(session)
SessionFactory.session = session
server = FtrackServer("event")
log.debug("Launched Ftrack Event processor")
server.run_server(session)

View file

@ -0,0 +1,436 @@
import os
import sys
import json
import threading
import signal
import socket
import datetime
import ftrack_api
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import (
SocketSession, StatusEventHub,
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
)
from pypeapp import Logger, config
log = Logger().get_logger("Event storer")
action_identifier = (
"event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"]
)
host_ip = socket.gethostbyname(socket.gethostname())
action_data = {
"label": "Pype Admin",
"variant": "- Event server Status ({})".format(host_ip),
"description": "Get Infromation about event server",
"actionIdentifier": action_identifier,
"icon": "{}/ftrack/action_icons/PypeAdmin.svg".format(
os.environ.get(
"PYPE_STATICS_SERVER",
"http://localhost:{}".format(
config.get_presets().get("services", {}).get(
"rest_api", {}
).get("default_port", 8021)
)
)
)
}
class ObjectFactory:
session = None
status_factory = None
checker_thread = None
last_trigger = None
class Status:
default_item = {
"type": "label",
"value": "Process info is not available at this moment."
}
def __init__(self, name, label, parent):
self.name = name
self.label = label or name
self.parent = parent
self.info = None
self.last_update = None
def update(self, info):
self.last_update = datetime.datetime.now()
self.info = info
def get_delta_string(self, delta):
days, hours, minutes = (
delta.days, delta.seconds // 3600, delta.seconds // 60 % 60
)
delta_items = [
"{}d".format(days),
"{}h".format(hours),
"{}m".format(minutes)
]
if not days:
delta_items.pop(0)
if not hours:
delta_items.pop(0)
delta_items.append("{}s".format(delta.seconds % 60))
if not minutes:
delta_items.pop(0)
return " ".join(delta_items)
def get_items(self):
items = []
last_update = "N/A"
if self.last_update:
delta = datetime.datetime.now() - self.last_update
last_update = "{} ago".format(
self.get_delta_string(delta)
)
last_update = "Updated: {}".format(last_update)
items.append({
"type": "label",
"value": "#{}".format(self.label)
})
items.append({
"type": "label",
"value": "##{}".format(last_update)
})
if not self.info:
if self.info is None:
trigger_info_get()
items.append(self.default_item)
return items
info = {}
for key, value in self.info.items():
if key not in ["created_at:", "created_at"]:
info[key] = value
continue
datetime_value = datetime.datetime.strptime(
value, "%Y.%m.%d %H:%M:%S"
)
delta = datetime.datetime.now() - datetime_value
running_for = self.get_delta_string(delta)
info["Started at"] = "{} [running: {}]".format(value, running_for)
for key, value in info.items():
items.append({
"type": "label",
"value": "<b>{}:</b> {}".format(key, value)
})
return items
class StatusFactory:
note_item = {
"type": "label",
"value": (
"<i>HINT: To refresh data uncheck"
" all checkboxes and hit `Submit` button.</i>"
)
}
splitter_item = {
"type": "label",
"value": "---"
}
def __init__(self, statuses={}):
self.statuses = []
for status in statuses.items():
self.create_status(*status)
def __getitem__(self, key):
return self.get(key)
def get(self, key, default=None):
for status in self.statuses:
if status.name == key:
return status
return default
def is_filled(self):
for status in self.statuses:
if status.info is None:
return False
return True
def create_status(self, name, label):
new_status = Status(name, label, self)
self.statuses.append(new_status)
def process_event_result(self, event):
subprocess_id = event["data"].get("subprocess_id")
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
source = event["data"]["source"]
data = event["data"]["status_info"]
self.update_status_info(source, data)
def update_status_info(self, process_name, info):
for status in self.statuses:
if status.name == process_name:
status.update(info)
break
def bool_items(self):
items = []
items.append({
"type": "label",
"value": "#Restart process"
})
items.append({
"type": "label",
"value": (
"<i><b>WARNING:</b> Main process may shut down when checked"
" if does not run as a service!</i>"
)
})
name_labels = {}
for status in self.statuses:
name_labels[status.name] = status.label
for name, label in name_labels.items():
items.append({
"type": "boolean",
"value": False,
"label": label,
"name": name
})
return items
def items(self):
items = []
items.append(self.note_item)
items.extend(self.bool_items())
for status in self.statuses:
items.append(self.splitter_item)
items.extend(status.get_items())
return items
def server_activity_validate_user(event):
"""Validate user permissions to show server info."""
session = ObjectFactory.session
username = event["source"].get("user", {}).get("username")
if not username:
return False
user_ent = session.query(
"User where username = \"{}\"".format(username)
).first()
if not user_ent:
return False
role_list = ["Pypeclub", "Administrator"]
for role in user_ent["user_security_roles"]:
if role["security_role"]["name"] in role_list:
return True
return False
def server_activity_discover(event):
"""Discover action in actions menu conditions."""
session = ObjectFactory.session
if session is None:
return
if not server_activity_validate_user(event):
return
return {"items": [action_data]}
def server_activity(event):
session = ObjectFactory.session
if session is None:
msg = "Session is not set. Can't trigger Reset action."
log.warning(msg)
return {
"success": False,
"message": msg
}
if not server_activity_validate_user(event):
return {
"success": False,
"message": "You don't have permissions to see Event server status!"
}
values = event["data"].get("values") or {}
is_checked = False
for value in values.values():
if value:
is_checked = True
break
if not is_checked:
return {
"items": ObjectFactory.status_factory.items(),
"title": "Server current status"
}
session = ObjectFactory.session
if values["main"]:
session.event_hub.sock.sendall(b"RestartM")
return
if values["storer"]:
session.event_hub.sock.sendall(b"RestartS")
if values["processor"]:
session.event_hub.sock.sendall(b"RestartP")
def trigger_info_get():
if ObjectFactory.last_trigger:
delta = datetime.datetime.now() - ObjectFactory.last_trigger
if delta.seconds() < 5:
return
session = ObjectFactory.session
session.event_hub.publish(
ftrack_api.event.base.Event(
topic=TOPIC_STATUS_SERVER,
data={"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"]}
),
on_error="ignore"
)
def on_start(event):
session = ObjectFactory.session
source_id = event.get("source", {}).get("id")
if not source_id or source_id != session.event_hub.id:
return
if session is None:
log.warning("Session is not set. Can't trigger Sync to avalon action.")
return True
trigger_info_get()
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
session.event_hub.subscribe(
"topic=ftrack.action.discover",
server_activity_discover
)
session.event_hub.subscribe("topic=pype.status.started", on_start)
status_launch_subscription = (
"topic=ftrack.action.launch and data.actionIdentifier={}"
).format(action_identifier)
session.event_hub.subscribe(
status_launch_subscription,
server_activity
)
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER_RESULT),
ObjectFactory.status_factory.process_event_result
)
def heartbeat():
if ObjectFactory.status_factory.is_filled():
return
trigger_info_get()
def main(args):
port = int(args[-1])
server_info = json.loads(args[-2])
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ("localhost", port)
log.debug("Statuser connected to {} port {}".format(*server_address))
sock.connect(server_address)
sock.sendall(b"CreatedStatus")
# store socket connection object
ObjectFactory.sock = sock
statuse_names = {
"main": "Main process",
"storer": "Event Storer",
"processor": "Event Processor"
}
ObjectFactory.status_factory = StatusFactory(statuse_names)
ObjectFactory.status_factory["main"].update(server_info)
_returncode = 0
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=StatusEventHub
)
ObjectFactory.session = session
session.event_hub.heartbeat_callbacks.append(heartbeat)
register(session)
server = FtrackServer("event")
log.debug("Launched Ftrack Event statuser")
server.run_server(session, load_files=False)
except Exception:
_returncode = 1
log.error("ServerInfo subprocess crashed", exc_info=True)
finally:
log.debug("Ending. Closing socket.")
sock.close()
return _returncode
class OutputChecker(threading.Thread):
read_input = True
def run(self):
while self.read_input:
for line in sys.stdin:
line = line.rstrip().lower()
if not line.startswith("reset:"):
continue
process_name = line.replace("reset:", "")
ObjectFactory.status_factory.update_status_info(
process_name, None
)
def stop(self):
self.read_input = False
if __name__ == "__main__":
# Register interupt signal
def signal_handler(sig, frame):
print("You pressed Ctrl+C. Process ended.")
ObjectFactory.checker_thread.stop()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
checker_thread = OutputChecker()
ObjectFactory.checker_thread = checker_thread
checker_thread.start()
sys.exit(main(sys.argv))

View file

@ -8,14 +8,15 @@ import pymongo
import ftrack_api
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import (
SocketSession, StorerEventHub,
get_ftrack_event_mongo_info,
SocketSession,
StorerEventHub
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
)
from pype.ftrack.lib.custom_db_connector import DbConnector
from pypeapp import Logger
log = Logger().get_logger("Event storer")
subprocess_started = datetime.datetime.now()
class SessionFactory:
@ -138,11 +139,42 @@ def trigger_sync(event):
)
def send_status(event):
session = SessionFactory.session
if not session:
return
subprocess_id = event["data"].get("subprocess_id")
if not subprocess_id:
return
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
new_event_data = {
"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"],
"source": "storer",
"status_info": {
"created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S")
}
}
new_event = ftrack_api.event.base.Event(
topic=TOPIC_STATUS_SERVER_RESULT,
data=new_event_data
)
session.event_hub.publish(new_event)
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
install_db()
session.event_hub.subscribe("topic=*", launch)
session.event_hub.subscribe("topic=pype.storer.started", trigger_sync)
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER), send_status
)
def main(args):

View file

@ -5,7 +5,7 @@ import socket
import traceback
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub
from pype.ftrack.ftrack_server.lib import SocketSession, SocketBaseEventHub
from pypeapp import Logger
@ -28,7 +28,7 @@ def main(args):
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub
auto_connect_event_hub=True, sock=sock, Eventhub=SocketBaseEventHub
)
server = FtrackServer("action")
log.debug("Launched User Ftrack Server")

View file

@ -1,6 +1,11 @@
from . import avalon_sync
from .credentials import *
from . import credentials
from .ftrack_app_handler import *
from .ftrack_event_handler import *
from .ftrack_action_handler import *
from .ftrack_base_handler import *
from .lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)

View file

@ -2,85 +2,140 @@ import os
import json
import ftrack_api
import appdirs
import getpass
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
config_path = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype'))
action_file_name = 'ftrack_cred.json'
event_file_name = 'ftrack_event_cred.json'
action_fpath = os.path.join(config_path, action_file_name)
event_fpath = os.path.join(config_path, event_file_name)
folders = set([os.path.dirname(action_fpath), os.path.dirname(event_fpath)])
CONFIG_PATH = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
CREDENTIALS_FILE_NAME = "ftrack_cred.json"
CREDENTIALS_PATH = os.path.join(CONFIG_PATH, CREDENTIALS_FILE_NAME)
CREDENTIALS_FOLDER = os.path.dirname(CREDENTIALS_PATH)
for folder in folders:
if not os.path.isdir(folder):
os.makedirs(folder)
if not os.path.isdir(CREDENTIALS_FOLDER):
os.makedirs(CREDENTIALS_FOLDER)
USER_GETTER = None
def _get_credentials(event=False):
if event:
fpath = event_fpath
else:
fpath = action_fpath
def get_ftrack_hostname(ftrack_server=None):
if not ftrack_server:
ftrack_server = os.environ["FTRACK_SERVER"]
if "//" not in ftrack_server:
ftrack_server = "//" + ftrack_server
return urlparse(ftrack_server).hostname
def get_user():
if USER_GETTER:
return USER_GETTER()
return getpass.getuser()
def get_credentials(ftrack_server=None, user=None):
credentials = {}
try:
file = open(fpath, 'r')
credentials = json.load(file)
except Exception:
file = open(fpath, 'w')
if not os.path.exists(CREDENTIALS_PATH):
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(credentials))
file.close()
return credentials
file.close()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
content_json = json.loads(content or "{}")
credentials = content_json.get(hostname, {}).get(user) or {}
return credentials
def _save_credentials(username, apiKey, event=False, auto_connect=None):
data = {
'username': username,
'apiKey': apiKey
def save_credentials(ft_user, ft_api_key, ftrack_server=None, user=None):
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
content_json = json.loads(content or "{}")
if hostname not in content_json:
content_json[hostname] = {}
content_json[hostname][user] = {
"username": ft_user,
"api_key": ft_api_key
}
if event:
fpath = event_fpath
if auto_connect is None:
cred = _get_credentials(True)
auto_connect = cred.get('auto_connect', False)
data['auto_connect'] = auto_connect
else:
fpath = action_fpath
# Deprecated keys
if "username" in content_json:
content_json.pop("username")
if "apiKey" in content_json:
content_json.pop("apiKey")
file = open(fpath, 'w')
file.write(json.dumps(data))
file.close()
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(content_json, indent=4))
def _clear_credentials(event=False):
if event:
fpath = event_fpath
else:
fpath = action_fpath
open(fpath, 'w').close()
_set_env(None, None)
def clear_credentials(ft_user=None, ftrack_server=None, user=None):
if not ft_user:
ft_user = os.environ.get("FTRACK_API_USER")
if not ft_user:
return
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
content_json = json.loads(content or "{}")
if hostname not in content_json:
content_json[hostname] = {}
content_json[hostname].pop(user, None)
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(content_json))
def _set_env(username, apiKey):
if not username:
username = ''
if not apiKey:
apiKey = ''
os.environ['FTRACK_API_USER'] = username
os.environ['FTRACK_API_KEY'] = apiKey
def set_env(ft_user=None, ft_api_key=None):
os.environ["FTRACK_API_USER"] = ft_user or ""
os.environ["FTRACK_API_KEY"] = ft_api_key or ""
def _check_credentials(username=None, apiKey=None):
def get_env_credentials():
return (
os.environ.get("FTRACK_API_USER"),
os.environ.get("FTRACK_API_KEY")
)
if username and apiKey:
_set_env(username, apiKey)
def check_credentials(ft_user, ft_api_key, ftrack_server=None):
if not ftrack_server:
ftrack_server = os.environ["FTRACK_SERVER"]
if not ft_user or not ft_api_key:
return False
try:
session = ftrack_api.Session()
session = ftrack_api.Session(
server_url=ftrack_server,
api_key=ft_api_key,
api_user=ft_user
)
session.close()
except Exception as e:
except Exception:
return False
return True

View file

@ -49,7 +49,7 @@ class BaseHandler(object):
).format(
str(type(session)),
str(ftrack_api.session.Session),
str(session_processor.ProcessSession)
str(SocketSession)
))
self._session = session

135
pype/ftrack/lib/lib.py Normal file
View file

@ -0,0 +1,135 @@
from bson.objectid import ObjectId
from .avalon_sync import CustAttrIdKey
import avalon.io
def get_project_from_entity(entity):
# TODO add more entities
ent_type_lowered = entity.entity_type.lower()
if ent_type_lowered == "project":
return entity
elif ent_type_lowered == "assetversion":
return entity["asset"]["parent"]["project"]
elif "project" in entity:
return entity["project"]
return None
def get_avalon_entities_for_assetversion(asset_version, db_con=None):
output = {
"success": True,
"message": None,
"project": None,
"project_name": None,
"asset": None,
"asset_name": None,
"asset_path": None,
"subset": None,
"subset_name": None,
"version": None,
"version_name": None,
"representations": None
}
if db_con is None:
db_con = avalon.io
db_con.install()
ft_asset = asset_version["asset"]
subset_name = ft_asset["name"]
version = asset_version["version"]
parent = ft_asset["parent"]
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
project = get_project_from_entity(asset_version)
project_name = project["full_name"]
output["project_name"] = project_name
output["asset_name"] = parent["name"]
output["asset_path"] = ent_path
output["subset_name"] = subset_name
output["version_name"] = version
db_con.Session["AVALON_PROJECT"] = project_name
avalon_project = db_con.find_one({"type": "project"})
output["project"] = avalon_project
if not avalon_project:
output["success"] = False
output["message"] = "Project not synchronized to avalon `{}`".format(
project_name
)
return output
asset_ent = None
asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if asset_mongo_id:
try:
asset_mongo_id = ObjectId(asset_mongo_id)
asset_ent = db_con.find_one({
"type": "asset",
"_id": asset_mongo_id
})
except Exception:
pass
if not asset_ent:
asset_ent = db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
output["asset"] = asset_ent
if not asset_ent:
output["success"] = False
output["message"] = "Not synchronized entity to avalon `{}`".format(
ent_path
)
return output
asset_mongo_id = asset_ent["_id"]
subset_ent = db_con.find_one({
"type": "subset",
"parent": asset_mongo_id,
"name": subset_name
})
output["subset"] = subset_ent
if not subset_ent:
output["success"] = False
output["message"] = (
"Subset `{}` does not exist under Asset `{}`"
).format(subset_name, ent_path)
return output
version_ent = db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
output["version"] = version_ent
if not version_ent:
output["success"] = False
output["message"] = (
"Version `{}` does not exist under Subset `{}` | Asset `{}`"
).format(version, subset_name, ent_path)
return output
repre_ents = list(db_con.find({
"type": "representation",
"parent": version_ent["_id"]
}))
output["representations"] = repre_ents
return output

View file

@ -34,29 +34,28 @@ class FtrackModule:
def validate(self):
validation = False
cred = credentials._get_credentials()
try:
if 'username' in cred and 'apiKey' in cred:
validation = credentials._check_credentials(
cred['username'],
cred['apiKey']
)
if validation is False:
self.show_login_widget()
else:
self.show_login_widget()
except Exception as e:
log.error("We are unable to connect to Ftrack: {0}".format(e))
validation = credentials._check_credentials()
if validation is True:
cred = credentials.get_credentials()
ft_user = cred.get("username")
ft_api_key = cred.get("api_key")
validation = credentials.check_credentials(ft_user, ft_api_key)
if validation:
credentials.set_env(ft_user, ft_api_key)
log.info("Connected to Ftrack successfully")
self.loginChange()
else:
log.warning("Please sign in to Ftrack")
self.bool_logged = False
self.set_menu_visibility()
return validation
if not validation and ft_user and ft_api_key:
log.warning(
"Current Ftrack credentials are not valid. {}: {} - {}".format(
str(os.environ.get("FTRACK_SERVER")), ft_user, ft_api_key
)
)
log.info("Please sign in to Ftrack")
self.bool_logged = False
self.show_login_widget()
self.set_menu_visibility()
return validation
@ -67,7 +66,7 @@ class FtrackModule:
self.start_action_server()
def logout(self):
credentials._clear_credentials()
credentials.clear_credentials()
self.stop_action_server()
log.info("Logged out of Ftrack")
@ -307,11 +306,23 @@ class FtrackModule:
except Exception as e:
log.error("During Killing Timer event server: {0}".format(e))
def changed_user(self):
self.stop_action_server()
credentials.set_env()
self.validate()
def process_modules(self, modules):
if 'TimersManager' in modules:
self.timer_manager = modules['TimersManager']
self.timer_manager.add_module(self)
if "UserModule" in modules:
credentials.USER_GETTER = modules["UserModule"].get_user
modules["UserModule"].register_callback_on_user_change(
self.changed_user
)
def start_timer_manager(self, data):
if self.thread_timer is not None:
self.thread_timer.ftrack_start_timer(data)
@ -336,7 +347,7 @@ class FtrackEventsThread(QtCore.QThread):
def __init__(self, parent):
super(FtrackEventsThread, self).__init__()
cred = credentials._get_credentials()
cred = credentials.get_credentials()
self.username = cred['username']
self.user = None
self.last_task = None

View file

@ -204,11 +204,11 @@ class Login_Dialog_ui(QtWidgets.QWidget):
self.setError("{0} {1}".format(msg, " and ".join(missing)))
return
verification = credentials._check_credentials(username, apiKey)
verification = credentials.check_credentials(username, apiKey)
if verification:
credentials._save_credentials(username, apiKey, self.is_event)
credentials._set_env(username, apiKey)
credentials.save_credentials(username, apiKey, self.is_event)
credentials.set_env(username, apiKey)
if self.parent is not None:
self.parent.loginChange()
self._close_widget()
@ -304,11 +304,11 @@ class Login_Dialog_ui(QtWidgets.QWidget):
self._login_server_thread.start(url)
return
verification = credentials._check_credentials(username, apiKey)
verification = credentials.check_credentials(username, apiKey)
if verification is True:
credentials._save_credentials(username, apiKey, self.is_event)
credentials._set_env(username, apiKey)
credentials.save_credentials(username, apiKey, self.is_event)
credentials.set_env(username, apiKey)
if self.parent is not None:
self.parent.loginChange()
self._close_widget()

View file

@ -2176,18 +2176,29 @@ def load_capture_preset(path=None, data=None):
4: 'nolights'}
for key in preset[id]:
if key == 'high_quality':
temp_options2['multiSampleEnable'] = True
temp_options2['multiSampleCount'] = 8
temp_options2['textureMaxResolution'] = 1024
temp_options2['enableTextureMaxRes'] = True
if preset[id][key] == True:
temp_options2['multiSampleEnable'] = True
temp_options2['multiSampleCount'] = 4
temp_options2['textureMaxResolution'] = 1024
temp_options2['enableTextureMaxRes'] = True
temp_options2['textureMaxResMode'] = 1
else:
temp_options2['multiSampleEnable'] = False
temp_options2['multiSampleCount'] = 4
temp_options2['textureMaxResolution'] = 512
temp_options2['enableTextureMaxRes'] = True
temp_options2['textureMaxResMode'] = 0
if key == 'ssaoEnable':
if preset[id][key] == True:
temp_options2['ssaoEnable'] = True
else:
temp_options2['ssaoEnable'] = False
if key == 'alphaCut':
temp_options2['transparencyAlgorithm'] = 5
temp_options2['transparencyQuality'] = 1
if key == 'ssaoEnable':
temp_options2['ssaoEnable'] = True
if key == 'headsUpDisplay':
temp_options['headsUpDisplay'] = True

View file

@ -12,13 +12,32 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
families = ["ftrack"]
optional = True
# Can be set in presets:
# - Allows only `intent` and `comment` keys
note_with_intent_template = "{intent}: {comment}"
# - note label must exist in Ftrack
note_labels = []
def process(self, instance):
comment = (instance.context.data.get("comment") or "").strip()
if not comment:
self.log.info("Comment is not set.")
return
self.log.debug("Comment is set to {}".format(comment))
self.log.debug("Comment is set to `{}`".format(comment))
intent = instance.context.data.get("intent")
if intent:
msg = "Intent is set to `{}` and was added to comment.".format(
intent
)
comment = self.note_with_intent_template.format(**{
"intent": intent,
"comment": comment
})
else:
msg = "Intent is not set."
self.log.debug(msg)
asset_versions_key = "ftrackIntegratedAssetVersions"
asset_versions = instance.data.get(asset_versions_key)
@ -37,8 +56,22 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
)
)
labels = []
if self.note_labels:
all_labels = session.query("NoteLabel").all()
labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels}
for _label in self.note_labels:
label = labels_by_low_name.get(_label.lower())
if not label:
self.log.warning(
"Note Label `{}` was not found.".format(_label)
)
continue
labels.append(label)
for asset_version in asset_versions:
asset_version.create_note(comment, author=user)
asset_version.create_note(comment, author=user, labels=labels)
try:
session.commit()

View file

@ -1,10 +1,24 @@
"""
"""Collect Anatomy and global anatomy data.
Requires:
None
session -> AVALON_TASK
projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder)
username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001)
datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder)
Optional:
comment -> collect_comment *(pyblish.api.CollectorOrder)
intent -> collected in pyblish-lite
Provides:
context -> anatomy (pypeapp.Anatomy)
context -> anatomyData
"""
import os
import json
from avalon import api, lib
from pypeapp import Anatomy
import pyblish.api
@ -12,9 +26,52 @@ import pyblish.api
class CollectAnatomy(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder + 0.002
label = "Collect Anatomy"
def process(self, context):
context.data['anatomy'] = Anatomy()
self.log.info("Anatomy templates collected...")
root_path = api.registered_root()
task_name = api.Session["AVALON_TASK"]
project_entity = context.data["projectEntity"]
asset_entity = context.data["assetEntity"]
project_name = project_entity["name"]
context.data["anatomy"] = Anatomy(project_name)
self.log.info(
"Anatomy object collected for project \"{}\".".format(project_name)
)
hierarchy_items = asset_entity["data"]["parents"]
hierarchy = ""
if hierarchy_items:
hierarchy = os.path.join(*hierarchy_items)
context_data = {
"root": root_path,
"project": {
"name": project_name,
"code": project_entity["data"].get("code")
},
"asset": asset_entity["name"],
"hierarchy": hierarchy.replace("\\", "/"),
"task": task_name,
"username": context.data["user"]
}
avalon_app_name = os.environ.get("AVALON_APP_NAME")
if avalon_app_name:
application_def = lib.get_application(avalon_app_name)
app_dir = application_def.get("application_dir")
if app_dir:
context_data["app"] = app_dir
datetime_data = context.data.get("datetimeData") or {}
context_data.update(datetime_data)
context.data["anatomyData"] = context_data
self.log.info("Global anatomy Data collected")
self.log.debug(json.dumps(context_data, indent=4))

View file

@ -0,0 +1,46 @@
"""Collect Anatomy and global anatomy data.
Requires:
session -> AVALON_PROJECT, AVALON_ASSET
Provides:
context -> projectEntity - project entity from database
context -> assetEntity - asset entity from database
"""
from avalon import io, api
import pyblish.api
class CollectAvalonEntities(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
label = "Collect Avalon Entities"
def process(self, context):
project_name = api.Session["AVALON_PROJECT"]
asset_name = api.Session["AVALON_ASSET"]
project_entity = io.find_one({
"type": "project",
"name": project_name
})
assert project_entity, (
"Project '{0}' was not found."
).format(project_name)
self.log.debug("Collected Project entity \"{}\"".format(project_entity))
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name '{0}' in project '{1}'"
).format(asset_name, project_name)
self.log.debug("Collected Asset entity \"{}\"".format(asset_entity))
context.data["projectEntity"] = project_entity
context.data["assetEntity"] = asset_entity

View file

@ -0,0 +1,123 @@
"""
Requires:
context -> anatomyData
context -> projectEntity
context -> assetEntity
instance -> asset
instance -> subset
instance -> family
Optional:
instance -> version
instance -> resolutionWidth
instance -> resolutionHeight
instance -> fps
Provides:
instance -> projectEntity
instance -> assetEntity
instance -> anatomyData
instance -> version
instance -> latestVersion
"""
import copy
import json
from avalon import io
import pyblish.api
class CollectInstanceAnatomyData(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
order = pyblish.api.CollectorOrder + 0.49
label = "Collect instance anatomy data"
def process(self, instance):
# get all the stuff from the database
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
asset_name = instance.data["asset"]
# Check if asset name is the same as what is in context
# - they may be different, e.g. in NukeStudio
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
# If version is not specified for instance or context
if version_number is None:
# TODO we should be able to change default version by studio
# preferences (like start with version number `0`)
version_number = 1
# use latest version (+1) if already any exist
if latest_version is not None:
version_number += int(latest_version)
anatomy_updates = {
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number
}
task_name = instance.data.get("task")
if task_name:
anatomy_updates["task"] = task_name
# Version should not be collected since may be instance
anatomy_data.update(anatomy_updates)
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = fps
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
# TODO should be version number set here?
instance.data["version"] = version_number
self.log.info("Instance anatomy Data collected")
self.log.debug(json.dumps(anatomy_data, indent=4))

View file

@ -1,24 +0,0 @@
"""
Requires:
None
Provides:
context -> projectData
"""
import pyblish.api
import pype.api as pype
class CollectProjectData(pyblish.api.ContextPlugin):
"""Collecting project data from avalon db"""
label = "Collect Project Data"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["nukestudio"]
def process(self, context):
# get project data from avalon db
context.data["projectData"] = pype.get_project()["data"]
return

View file

@ -0,0 +1,60 @@
"""
Requires:
context -> anatomy
context -> anatomyData
Provides:
instance -> publishDir
instance -> resourcesDir
"""
import os
import copy
import pyblish.api
from avalon import api
class CollectResourcesPath(pyblish.api.InstancePlugin):
"""Generate directory path where the files and resources will be stored"""
label = "Collect Resources Path"
order = pyblish.api.CollectorOrder + 0.495
def process(self, instance):
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
self.log.debug("publishDir: \"{}\"".format(publish_folder))
self.log.debug("resourcesDir: \"{}\"".format(resources_folder))

View file

@ -1,119 +0,0 @@
"""
Requires:
session -> AVALON_PROJECT
context -> anatomy (pypeapp.Anatomy)
instance -> subset
instance -> asset
instance -> family
Provides:
instance -> template
instance -> assumedTemplateData
instance -> assumedDestination
"""
import os
from avalon import io, api
import pyblish.api
class CollectTemplates(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect and fill Templates"
hosts = ["maya", "nuke", "standalonepublisher"]
def process(self, instance):
# get all the stuff from the database
subset_name = instance.data["subset"]
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += int(version["name"])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*hierarchy)
else:
hierarchy = ""
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"version": version_number,
"hierarchy": hierarchy.replace("\\", "/"),
"representation": "TEMP"}
# Add datetime data to template data
datetime_data = instance.context.data.get("datetimeData") or {}
template_data.update(datetime_data)
resolution_width = instance.data.get("resolutionWidth")
resolution_height = instance.data.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
instance.data["template"] = template
instance.data["assumedTemplateData"] = template_data
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
(anatomy.format(template_data))["publish"]["path"]
)
self.log.info("Assumed Destination has been created...")
self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"]))
self.log.debug("__ template: `{}`".format(instance.data["template"]))

View file

@ -32,29 +32,31 @@ class ExtractBurnin(pype.api.Extractor):
frame_end = int(instance.data.get("frameEnd") or 1)
duration = frame_end - frame_start + 1
if "slate.farm" in instance.data["families"]:
frame_start += 1
duration -= 1
prep_data = {
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
prep_data = copy.deepcopy(instance.data["anatomyData"])
prep_data.update({
"frame_start": frame_start,
"frame_end": frame_end,
"duration": duration,
"version": int(version),
"comment": instance.context.data.get("comment", ""),
"intent": instance.context.data.get("intent", "")
}
})
# Add datetime data to preparation data
datetime_data = instance.context.data.get("datetimeData") or {}
prep_data.update(datetime_data)
slate_frame_start = frame_start
slate_frame_end = frame_end
slate_duration = duration
# Update data with template data
template_data = instance.data.get("assumedTemplateData") or {}
prep_data.update(template_data)
# exception for slate workflow
if "slate" in instance.data["families"]:
slate_frame_start = frame_start - 1
slate_frame_end = frame_end
slate_duration = slate_frame_end - slate_frame_start + 1
prep_data.update({
"slate_frame_start": slate_frame_start,
"slate_frame_end": slate_frame_end,
"slate_duration": slate_duration
})
# get anatomy project
anatomy = instance.context.data['anatomy']

View file

@ -41,63 +41,66 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
for repre in representations:
self.log.debug(repre)
if 'review' in repre['tags'] or "thumb-nuke" in repre['tags']:
if not isinstance(repre['files'], list):
return
valid = 'review' in repre['tags'] or "thumb-nuke" in repre['tags']
if not valid:
continue
input_file = repre['files'][0]
if not isinstance(repre['files'], list):
continue
# input_file = (
# collections[0].format('{head}{padding}{tail}') % start
# )
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
input_file = repre['files'][0]
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpeg_file = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpeg_file)
# input_file = (
# collections[0].format('{head}{padding}{tail}') % start
# )
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpeg_file = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpeg_file)
config_data = instance.context.data['output_repre_config']
self.log.info("output {}".format(full_output_path))
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
config_data = instance.context.data['output_repre_config']
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
profile = config_data.get(proj_name, config_data['__default__'])
subprocess_jpeg = " ".join(jpeg_items)
jpeg_items = []
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(profile.get('input', []))
# input file
jpeg_items.append("-i {}".format(full_input_path))
# output file
jpeg_items.append(full_output_path)
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
subprocess_jpeg = " ".join(jpeg_items)
if "representations" not in instance.data:
instance.data["representations"] = []
# run subprocess
self.log.debug("{}".format(subprocess_jpeg))
pype.api.subprocess(subprocess_jpeg)
representation = {
'name': 'thumbnail',
'ext': 'jpg',
'files': jpeg_file,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ['thumbnail']
}
if "representations" not in instance.data:
instance.data["representations"] = []
# adding representation
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)
representation = {
'name': 'thumbnail',
'ext': 'jpg',
'files': jpeg_file,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ['thumbnail']
}
# adding representation
self.log.debug("Adding: {}".format(representation))
representations_new.append(representation)
instance.data["representations"] = representations_new

View file

@ -43,320 +43,328 @@ class ExtractReview(pyblish.api.InstancePlugin):
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
if repre['ext'] in self.ext_filter:
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" in tags:
staging_dir = repre["stagingDir"]
# iterating preset output profiles
for name, profile in output_profiles.items():
repre_new = repre.copy()
ext = profile.get("ext", None)
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# adding control for presets to be sequence
# or single file
is_sequence = ("sequence" in p_tags) and (ext in (
"png", "jpg", "jpeg"))
self.log.debug("Profile name: {}".format(name))
if not ext:
ext = "mov"
self.log.warning(
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
if any(item in instance.data['families'] for item in profile['families']):
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(
repre["files"])
full_input_path = os.path.join(
staging_dir, collections[0].format(
'{head}{padding}{tail}')
)
filename = collections[0].format('{head}')
if filename.endswith('.'):
filename = filename[:-1]
else:
full_input_path = os.path.join(
staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
if is_sequence:
filename_base = filename + "_{0}".format(name)
repr_file = filename_base + ".%08d.{0}".format(
ext)
repre_new["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
new_tags = [x for x in tags if x != "delete"]
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("startFrameReview") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
output_args = []
codec_args = profile.get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
# letter_box
lb = profile.get('letter_box', 0)
if lb != 0:
ffmpet_width = to_width
ffmpet_height = to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
if resolution_ratio != delivery_ratio:
ffmpet_width = resolution_width
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# baking lut file application
lut_path = instance.data.get("lutPath")
if lut_path and ("bake-lut" in p_tags):
# removing Gama info as it is all baked in lut
gamma = next((g for g in input_args
if "-gamma" in g), None)
if gamma:
input_args.remove(gamma)
# create lut argument
lut_arg = "lut3d=file='{}'".format(
lut_path.replace(
"\\", "/").replace(":/", "\\:/")
)
lut_arg += ",colormatrix=bt601:bt709"
vf_back = self.add_video_filter_args(
output_args, lut_arg)
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug(
"_ output_args: `{}`".format(output_args))
if is_sequence:
stg_dir = os.path.dirname(full_output_path)
if not os.path.exists(stg_dir):
self.log.debug(
"creating dir: {}".format(stg_dir))
os.mkdir(stg_dir)
mov_args = [
os.path.join(
os.environ.get(
"FFMPEG_PATH",
""), "ffmpeg"),
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
'name': name,
'ext': ext,
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if is_sequence:
repre_new.update({
"stagingDir": stg_dir,
"files": os.listdir(stg_dir)
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):
repre_new.pop("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
else:
continue
else:
if repre['ext'] not in self.ext_filter:
continue
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" not in tags:
continue
staging_dir = repre["stagingDir"]
# iterating preset output profiles
for name, profile in output_profiles.items():
repre_new = repre.copy()
ext = profile.get("ext", None)
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# adding control for presets to be sequence
# or single file
is_sequence = ("sequence" in p_tags) and (ext in (
"png", "jpg", "jpeg"))
self.log.debug("Profile name: {}".format(name))
if not ext:
ext = "mov"
self.log.warning(
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
profile_family_check = False
for _family in profile['families']:
if _family in instance.data['families']:
profile_family_check = True
break
if not profile_family_check:
continue
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(
repre["files"])
full_input_path = os.path.join(
staging_dir, collections[0].format(
'{head}{padding}{tail}')
)
filename = collections[0].format('{head}')
if filename.endswith('.'):
filename = filename[:-1]
else:
full_input_path = os.path.join(
staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
if is_sequence:
filename_base = filename + "_{0}".format(name)
repr_file = filename_base + ".%08d.{0}".format(
ext)
repre_new["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
new_tags = [x for x in tags if x != "delete"]
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("startFrameReview") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
output_args = []
codec_args = profile.get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
# letter_box
lb = profile.get('letter_box', 0)
if lb != 0:
ffmpet_width = to_width
ffmpet_height = to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
if resolution_ratio != delivery_ratio:
ffmpet_width = resolution_width
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# baking lut file application
lut_path = instance.data.get("lutPath")
if lut_path and ("bake-lut" in p_tags):
# removing Gama info as it is all baked in lut
gamma = next((g for g in input_args
if "-gamma" in g), None)
if gamma:
input_args.remove(gamma)
# create lut argument
lut_arg = "lut3d=file='{}'".format(
lut_path.replace(
"\\", "/").replace(":/", "\\:/")
)
lut_arg += ",colormatrix=bt601:bt709"
vf_back = self.add_video_filter_args(
output_args, lut_arg)
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug(
"_ output_args: `{}`".format(output_args))
if is_sequence:
stg_dir = os.path.dirname(full_output_path)
if not os.path.exists(stg_dir):
self.log.debug(
"creating dir: {}".format(stg_dir))
os.mkdir(stg_dir)
mov_args = [
os.path.join(
os.environ.get(
"FFMPEG_PATH",
""), "ffmpeg"),
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
'name': name,
'ext': ext,
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if is_sequence:
repre_new.update({
"stagingDir": stg_dir,
"files": os.listdir(stg_dir)
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):
repre_new.pop("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
for repre in representations_new:
if "delete" in repre.get("tags", []):
representations_new.remove(repre)

View file

@ -1,417 +0,0 @@
import os
import logging
import shutil
import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
log = logging.getLogger(__name__)
class IntegrateAsset(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
families = []
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
# Ensure at least one file is set up for transfer in staging dir.
files = instance.data.get("files", [])
assert files, "Instance has no files to transfer"
assert isinstance(files, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(files)
)
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
collection = files
# Assert that each member has identical suffix
_, ext = os.path.splitext(collection[0])
assert all(ext == os.path.splitext(name)[1]
for name in collection), (
"Files had varying suffixes, this is a bug"
)
assert not any(os.path.isabs(name) for name in collection)
template_data["representation"] = ext[1:]
for fname in collection:
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = files
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': dst, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
'task': api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": version["name"],
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data.get("transfers", list())
for src, dest in transfers:
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
# Produce hardlinked copies
# Note: hardlink can only be produced between two files on the same
# server/disk and editing one of the two will edit both files at once.
# As such it is recommended to only make hardlinks between static files
# to ensure publishes remain safe and non-edited.
hardlinks = instance.data.get("hardlinks", list())
for src, dest in hardlinks:
self.log.info("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "avalon-core:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "avalon-core:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
self.log.debug("Registered root: {}".format(api.registered_root()))
# create relative source path for DB
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
self.log.debug("Source: {}".format(source))
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
# Include optional data if present in
optionals = [
"frameStart", "frameEnd", "step", "handles", "sourceHashes"
]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data[key]
return version_data

View file

@ -1,147 +0,0 @@
import pyblish.api
import os
from avalon import io, api
class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
"""Generate the assumed destination path where the file will be stored"""
label = "Integrate Assumed Destination"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
def process(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
template_data = instance.data["assumedTemplateData"]
# self.log.info(anatomy.templates)
anatomy_filled = anatomy.format(template_data)
# self.log.info(anatomy_filled)
mock_template = anatomy_filled["publish"]["path"]
# For now assume resources end up in a "resources" folder in the
# published folder
mock_destination = os.path.join(os.path.dirname(mock_template),
"resources")
# Clean the path
mock_destination = os.path.abspath(
os.path.normpath(mock_destination)).replace("\\", "/")
# Define resource destination and transfers
resources = instance.data.get("resources", list())
transfers = instance.data.get("transfers", list())
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(
resource["source"]).replace("\\", "/")
destination = os.path.join(mock_destination, source_filename)
# Force forward slashes to fix issue with software unable
# to work correctly with backslashes in specific scenarios
# (e.g. escape characters in PLN-151 V-Ray UDIM)
destination = destination.replace("\\", "/")
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(
mock_destination, fname).replace("\\", "/")
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
padding = int(a_template['render']['padding'])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template

View file

@ -2,8 +2,11 @@ import os
from os.path import getsize
import logging
import sys
import copy
import clique
import errno
from pymongo import DeleteOne, InsertOne
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
@ -100,144 +103,148 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
TASK = instance.data.get("task") or api.Session["AVALON_TASK"]
LOCATION = api.Session["AVALON_LOCATION"]
anatomy_data = instance.data["anatomyData"]
io.install()
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
# for result in context.data["results"]:
# if not result["success"]:
# self.log.debug(result)
# exc_type, exc_value, exc_traceback = result["error_info"]
# extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
# self.log.debug(
# "Error at line {}: \"{}\"".format(
# extracted_traceback[1], result["error"]
# )
# )
# assert all(result["success"] for result in context.data["results"]),(
# "Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
project_entity = instance.data["projectEntity"]
context_asset_name = context.data["assetEntity"]["name"]
asset_name = instance.data["asset"]
asset_entity = instance.data.get("assetEntity")
if not asset_entity or asset_entity["name"] != context_asset_name:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name \"{0}\" in project \"{1}\""
).format(asset_name, project_entity["name"])
instance.data["assetEntity"] = asset_entity
# update anatomy data with asset specific keys
# - name should already been set
hierarchy = ""
parents = asset_entity["data"]["parents"]
if parents:
hierarchy = "/".join(parents)
anatomy_data["hierarchy"] = hierarchy
task_name = instance.data.get("task")
if task_name:
anatomy_data["task"] = task_name
stagingdir = instance.data.get("stagingDir")
if not stagingdir:
self.log.info('''{} is missing reference to staging
directory Will try to get it from
representation'''.format(instance))
self.log.info((
"{0} is missing reference to staging directory."
" Will try to get it from representation."
).format(instance))
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
else:
self.log.debug(
"Establishing staging directory @ {0}".format(stagingdir)
)
# Ensure at least one file is set up for transfer in staging dir.
repres = instance.data.get("representations", None)
repres = instance.data.get("representations")
assert repres, "Instance has no files to transfer"
assert isinstance(repres, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(repres)
"Instance 'files' must be a list, got: {0} {1}".format(
str(type(repres)), str(repres)
)
)
# FIXME: io is not initialized at this point for shell host
io.install()
project = io.find_one({"type": "project"})
subset = self.get_subset(asset_entity, instance)
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_number = instance.data["version"]
self.log.debug("Next version: v{}".format(version_number))
version_data = self.create_version_data(context, instance)
version_data_instance = instance.data.get('versionData')
if version_data_instance:
version_data.update(version_data_instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
# TODO rename method from `create_version` to
# `prepare_version` or similar...
version = self.create_version(
subset=subset,
version_number=version_number,
data=version_data
)
self.log.debug("Creating version ...")
new_repre_names_low = [_repre["name"].lower() for _repre in repres]
existing_version = io.find_one({
'type': 'version',
'parent': subset["_id"],
'name': next_version
'name': version_number
})
if existing_version is None:
version_id = io.insert_one(version).inserted_id
else:
# Check if instance have set `append` mode which cause that
# only replicated representations are set to archive
append_repres = instance.data.get("append", False)
# Update version data
# TODO query by _id and
io.update_many({
'type': 'version',
'parent': subset["_id"],
'name': next_version
}, {'$set': version}
)
'name': version_number
}, {
'$set': version
})
version_id = existing_version['_id']
# Find representations of existing version and archive them
current_repres = list(io.find({
"type": "representation",
"parent": version_id
}))
bulk_writes = []
for repre in current_repres:
if append_repres:
# archive only duplicated representations
if repre["name"].lower() not in new_repre_names_low:
continue
# Representation must change type,
# `_id` must be stored to other key and replaced with new
# - that is because new representations should have same ID
repre_id = repre["_id"]
bulk_writes.append(DeleteOne({"_id": repre_id}))
repre["orig_id"] = repre_id
repre["_id"] = io.ObjectId()
repre["type"] = "archived_representation"
bulk_writes.append(InsertOne(repre))
# bulk updates
if bulk_writes:
io._database[io.Session["AVALON_PROJECT"]].bulk_write(
bulk_writes
)
existing_repres = list(io.find({
"parent": version_id,
"type": "archived_representation"
}))
instance.data['version'] = version['name']
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
intent = context.data.get("intent")
if intent is not None:
anatomy_data["intent"] = intent
anatomy = instance.context.data['anatomy']
@ -250,31 +257,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data['transfers'] = []
for idx, repre in enumerate(instance.data["representations"]):
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
# create template data for Anatomy
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"task": TASK,
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# Add datetime data to template data
datetime_data = context.data.get("datetimeData") or {}
template_data.update(datetime_data)
template_data = copy.deepcopy(anatomy_data)
if intent is not None:
template_data["intent"] = intent
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
@ -292,6 +278,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
stagingdir = repre['stagingDir']
if repre.get('anatomy_template'):
template_name = repre['anatomy_template']
template = os.path.normpath(
anatomy.templates[template_name]["path"])
@ -322,7 +309,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template_filled = anatomy_filled[template_name]["path"]
if repre_context is None:
repre_context = template_filled.used_values
test_dest_files.append(
os.path.normpath(template_filled)
)
@ -338,16 +324,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
index_frame_start = None
if repre.get("frameStart"):
frame_start_padding = anatomy.templates["render"]["padding"]
frame_start_padding = (
anatomy.templates["render"]["padding"]
)
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
if "slate" in instance.data["families"]:
if index_frame_start and "slate" in instance.data["families"]:
index_frame_start -= 1
dst_padding_exp = src_padding_exp
dst_start_frame = None
for i in src_collection.indexes:
# TODO 1.) do not count padding in each index iteration
# 2.) do not count dst_padding from src_padding before
# index_frame_start check
src_padding = src_padding_exp % i
src_file_name = "{0}{1}{2}".format(
@ -375,7 +366,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if not dst_start_frame:
dst_start_frame = dst_padding
dst = "{0}{1}{2}".format(
dst_head,
dst_start_frame,
@ -419,8 +409,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
continue
repre_context[key] = template_data[key]
# Use previous representation's id if there are any
repre_id = None
repre_name_low = repre["name"].lower()
for _repre in existing_repres:
# NOTE should we check lowered names?
if repre_name_low == _repre["name"]:
repre_id = _repre["orig_id"]
break
# Create new id if existing representations does not match
if repre_id is None:
repre_id = io.ObjectId()
representation = {
"_id": io.ObjectId(),
"_id": repre_id,
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
@ -437,7 +440,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
representation["context"]["output"] = repre['outputName']
if sequence_repre and repre.get("frameStart"):
representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart"))
representation['context']['frame'] = (
src_padding_exp % int(repre.get("frameStart"))
)
self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
@ -446,6 +451,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
representations.append(representation)
self.log.debug("__ representations: {}".format(representations))
# Remove old representations if there are any (before insertion of new)
if existing_repres:
repre_ids_to_remove = []
for repre in existing_repres:
repre_ids_to_remove.append(repre["_id"])
io.delete_many({"_id": {"$in": repre_ids_to_remove}})
self.log.debug("__ representations: {}".format(representations))
for rep in instance.data["representations"]:
self.log.debug("__ represNAME: {}".format(rep['name']))
@ -547,14 +559,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset_name = instance.data["subset"]
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
"name": subset_name
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
self.log.debug("families. %s" % instance.data.get('families'))
self.log.debug(
@ -583,26 +595,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
return subset
def create_version(self, subset, version_number, locations, data=None):
def create_version(self, subset, version_number, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-3.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
@ -645,6 +652,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"fps": context.data.get(
"fps", instance.data.get("fps"))}
intent = context.data.get("intent")
if intent is not None:
version_data["intent"] = intent
# Include optional data if present in
optionals = [
"frameStart", "frameEnd", "step", "handles",

View file

@ -1,423 +0,0 @@
import os
import logging
import shutil
import clique
import errno
import pyblish.api
from avalon import api, io
log = logging.getLogger(__name__)
class IntegrateFrames(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Frames"
order = pyblish.api.IntegratorOrder
families = ["imagesequence"]
family_targets = [".frames", ".local", ".review", "imagesequence", "render", "source"]
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
families = [f for f in instance.data["families"]
for search in self.family_targets
if search in f]
if not families:
return
self.register(instance)
# self.log.info("Integrating Asset in to the database ...")
# self.log.info("instance.data: {}".format(instance.data))
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({"type": 'asset', "name": ASSET})[
'data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"task": api.Session["AVALON_TASK"],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
src_collections, remainder = clique.assemble(files)
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = ext = src_collection.format("{tail}")
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = src_tail[1:]
template_data["frame"] = src_collection.format(
"{padding}") % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(anatomy_filled["render"]["path"])
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
for i in src_collection.indexes:
src_padding = src_collection.format("{padding}") % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = dst_collection.format("{padding}") % i
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
src = os.path.join(stagingdir, src_file_name)
instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
template_data.pop("frame", None)
fname = files
self.log.info("fname: {}".format(fname))
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["render"]["path"]
instance.data["transfers"].append([src, dst])
if ext[1:] not in ["jpeg", "jpg", "mov", "mp4", "wav"]:
template_data["frame"] = "#" * int(anatomy_filled["render"]["padding"])
anatomy_filled = anatomy.format(template_data)
path_to_save = anatomy_filled["render"]["path"]
template = anatomy.templates["render"]["path"]
self.log.debug("path_to_save: {}".format(path_to_save))
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': path_to_save, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {
"name": PROJECT,
"code": project['data']['code']
},
"task": api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data["transfers"]
for src, dest in transfers:
src = os.path.normpath(src)
dest = os.path.normpath(dest)
if src in dest:
continue
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def get_subset(self, asset, instance):
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "pype:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["frameStart", "frameEnd", "step",
"handles", "colorspace", "fps", "outputDir"]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data.get(key, None)
return version_data

View file

@ -0,0 +1,49 @@
import os
import pyblish.api
class IntegrateResourcesPath(pyblish.api.InstancePlugin):
"""Generate directory path where the files and resources will be stored"""
label = "Integrate Resources Path"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
def process(self, instance):
resources = instance.data.get("resources") or []
transfers = instance.data.get("transfers") or []
if not resources and not transfers:
self.log.debug(
"Instance does not have `resources` and `transfers`"
)
return
resources_folder = instance.data["resourcesDir"]
# Define resource destination and transfers
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(
resource["source"]).replace("\\", "/")
destination = os.path.join(resources_folder, source_filename)
# Force forward slashes to fix issue with software unable
# to work correctly with backslashes in specific scenarios
# (e.g. escape characters in PLN-151 V-Ray UDIM)
destination = destination.replace("\\", "/")
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(
resources_folder, fname
).replace("\\", "/")
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers

View file

@ -137,3 +137,12 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
self.log.debug("Setting thumbnail for version \"{}\" <{}>".format(
version["name"], str(version["_id"])
))
asset_entity = instance.data["assetEntity"]
io.update_many(
{"_id": asset_entity["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format(
asset_entity["name"], str(version["_id"])
))

View file

@ -166,6 +166,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"PYPE_STUDIO_PROJECTS_PATH",
"PYPE_STUDIO_PROJECTS_MOUNT"
]
deadline_pool = ""
def _submit_deadline_post_job(self, instance, job):
"""
@ -201,7 +203,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"JobDependency0": job["_id"],
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
"Priority": job["Props"]["Pri"]
"Priority": job["Props"]["Pri"],
"Pool": self.deadline_pool
},
"PluginInfo": {
"Version": "3.6",

View file

@ -1,43 +0,0 @@
import pyblish.api
import os
class ValidateTemplates(pyblish.api.ContextPlugin):
"""Check if all templates were filled"""
label = "Validate Templates"
order = pyblish.api.ValidatorOrder - 0.1
hosts = ["maya", "houdini", "nuke"]
def process(self, context):
anatomy = context.data["anatomy"]
if not anatomy:
raise RuntimeError("Did not find anatomy")
else:
data = {
"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
"project": {"name": "D001_projectsx",
"code": "prjX"},
"ext": "exr",
"version": 3,
"task": "animation",
"asset": "sh001",
"app": "maya",
"hierarchy": "ep101/sq01/sh010"}
anatomy_filled = anatomy.format(data)
self.log.info(anatomy_filled)
data = {"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
"project": {"name": "D001_projectsy",
"code": "prjY"},
"ext": "abc",
"version": 1,
"task": "lookdev",
"asset": "bob",
"app": "maya",
"hierarchy": "ep101/sq01/bob"}
anatomy_filled = context.data["anatomy"].format(data)
self.log.info(anatomy_filled["work"]["folder"])

View file

@ -35,7 +35,7 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
"subset": subset,
"asset": os.getenv("AVALON_ASSET", None),
"label": subset,
"publish": False,
"publish": True,
"family": 'workfile',
"families": ['workfile'],
"setMembers": [current_file]

View file

@ -1,6 +1,7 @@
import os
import sys
import json
import copy
import tempfile
import contextlib
import subprocess
@ -330,10 +331,9 @@ class ExtractLook(pype.api.Extractor):
maya_path))
def resource_destination(self, instance, filepath, do_maketx):
anatomy = instance.context.data["anatomy"]
self.create_destination_template(instance, anatomy)
resources_dir = instance.data["resourcesDir"]
# Compute destination location
basename, ext = os.path.splitext(os.path.basename(filepath))
@ -343,7 +343,7 @@ class ExtractLook(pype.api.Extractor):
ext = ".tx"
return os.path.join(
instance.data["assumedDestination"], "resources", basename + ext
resources_dir, basename + ext
)
def _process_texture(self, filepath, do_maketx, staging, linearise, force):
@ -407,97 +407,3 @@ class ExtractLook(pype.api.Extractor):
return converted, COPY, texture_hash
return filepath, COPY, texture_hash
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template["publish"]["path"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'").format(asset_name, project_name)
silo = asset.get("silo")
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get("version"):
version_number = int(instance.data.get("version"))
padding = int(a_template["render"]["padding"])
hierarchy = asset["data"]["parents"]
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {
"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name, "code": project["data"]["code"]},
"silo": silo,
"family": instance.data["family"],
"asset": asset_name,
"subset": subset_name,
"frame": ("#" * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP",
}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)

View file

@ -110,15 +110,7 @@ class ExtractYetiRig(pype.api.Extractor):
self.log.info("Writing metadata file")
# Create assumed destination folder for imageSearchPath
assumed_temp_data = instance.data["assumedTemplateData"]
template = instance.data["template"]
template_formatted = template.format(**assumed_temp_data)
destination_folder = os.path.dirname(template_formatted)
image_search_path = os.path.join(destination_folder, "resources")
image_search_path = os.path.normpath(image_search_path)
image_search_path = resources_dir = instance.data["resourcesDir"]
settings = instance.data.get("rigsettings", None)
if settings:

View file

@ -55,9 +55,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_dir = os.path.dirname(path)
self.log.debug('output dir: {}'.format(output_dir))
# get version to instance for integration
instance.data['version'] = instance.context.data.get(
"version", pype.get_version_from_path(nuke.root().name()))
# # get version to instance for integration
# instance.data['version'] = instance.context.data.get(
# "version", pype.get_version_from_path(nuke.root().name()))
self.log.debug('Write Version: %s' % instance.data('version'))
@ -113,16 +113,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
"handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"version": int(instance.data['version']),
"colorspace": node["colorspace"].value(),
"families": ["render"],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data["family"] = "write"

View file

@ -1,5 +1,5 @@
from pyblish import api
import os
class CollectAudio(api.InstancePlugin):
"""Collect audio from tags.
@ -12,7 +12,7 @@ class CollectAudio(api.InstancePlugin):
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1025
order = api.CollectorOrder + 0.1021
label = "Collect Audio"
hosts = ["nukestudio"]
families = ["clip"]
@ -21,8 +21,10 @@ class CollectAudio(api.InstancePlugin):
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
family = dict(tag["metadata"]).get("tag.family", "")
tag_data = dict(tag["metadata"])
family = tag_data.get("tag.family", "")
if family.lower() == "audio":
subset = tag_data.get("tag.subset", "Main")
tagged = True
if not tagged:
@ -40,14 +42,14 @@ class CollectAudio(api.InstancePlugin):
data["family"] = "audio"
data["families"] = ["ftrack"]
subset = ""
for tag in instance.data["tags"]:
tag_data = dict(tag["metadata"])
if "tag.subset" in tag_data:
subset = tag_data["tag.subset"]
data["subset"] = "audio" + subset.title()
data["source"] = data["sourcePath"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
1]
)
self.log.debug("Creating instance with data: {}".format(data))
instance.context.create_instance(**data)

View file

@ -1,7 +1,7 @@
import os
from pyblish import api
import hiero
import nuke
class CollectClips(api.ContextPlugin):
@ -17,7 +17,7 @@ class CollectClips(api.ContextPlugin):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
projectdata = context.data["projectData"]
projectdata = context.data["projectEntity"]["data"]
version = context.data.get("version", "001")
sequence = context.data.get("activeSequence")
selection = context.data.get("selection")
@ -48,7 +48,9 @@ class CollectClips(api.ContextPlugin):
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
effects = [f for f in item.linkedItems() if f.isEnabled()]
effects = [f for f in item.linkedItems()
if f.isEnabled()
if isinstance(f, hiero.core.EffectTrackItem)]
# If source is *.nk its a comp effect and we need to fetch the
# write node output. This should be improved by parsing the script

View file

@ -14,7 +14,7 @@ class CollectPlates(api.InstancePlugin):
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1025
order = api.CollectorOrder + 0.1021
label = "Collect Plates"
hosts = ["nukestudio"]
families = ["clip"]
@ -23,8 +23,10 @@ class CollectPlates(api.InstancePlugin):
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
family = dict(tag["metadata"]).get("tag.family", "")
tag_data = dict(tag["metadata"])
family = tag_data.get("tag.family", "")
if family.lower() == "plate":
subset = tag_data.get("tag.subset", "Main")
tagged = True
break
@ -34,29 +36,27 @@ class CollectPlates(api.InstancePlugin):
"\"plate\"".format(instance)
)
return
self.log.debug("__ subset: `{}`".format(instance.data["subset"]))
# if "audio" in instance.data["subset"]:
# return
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
self.log.debug("__ family: `{}`".format(family))
self.log.debug("__ subset: `{}`".format(subset))
data["family"] = family.lower()
data["families"] = ["ftrack"] + instance.data["families"][1:]
data["source"] = data["sourcePath"]
subset = ""
for tag in instance.data["tags"]:
tag_data = dict(tag["metadata"])
if "tag.subset" in tag_data:
subset = tag_data["tag.subset"]
data["subset"] = data["family"] + subset.title()
data["subset"] = family + subset.title()
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
1]
)
data['asset'], data["subset"], os.path.splitext(
data["sourcePath"])[1])
if "review" in instance.data["families"]:
data["label"] += " - review"
@ -146,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin):
head, padding = os.path.splitext(basename)
ext = ext[1:]
padding = padding[1:]
self.log.debug("_ padding: `{}`".format(padding))
# head, padding, ext = source_file.split('.')
source_first_frame = int(padding)
padding = len(padding)

View file

@ -16,7 +16,7 @@ class CollectReviews(api.InstancePlugin):
order = api.CollectorOrder + 0.1022
label = "Collect Reviews"
hosts = ["nukestudio"]
families = ["clip"]
families = ["plate"]
def process(self, instance):
# Exclude non-tagged instances.

View file

@ -10,8 +10,6 @@ class ExtractAudioFile(pype.api.Extractor):
hosts = ["nukestudio"]
families = ["clip", "audio"]
match = api.Intersection
optional = True
active = False
def process(self, instance):
import os

View file

@ -2,6 +2,7 @@
import os
import json
import re
import copy
import pyblish.api
import tempfile
from avalon import io, api
@ -75,9 +76,11 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
)
data["source"] = data["sourcePath"]
# WARNING instance should not be created in Extractor!
# create new instance
instance = instance.context.create_instance(**data)
# TODO replace line below with `instance.data["resourcesDir"]`
# when instance is created during collection part
dst_dir = self.resource_destination_dir(instance)
# change paths in effects to files
@ -144,103 +147,109 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
return (v, dst)
def resource_destination_dir(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
# WARNING this is from `collect_instance_anatomy_data.py`
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
return os.path.join(
instance.data["assumedDestination"],
"resources"
)
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
if version_number is None:
version_number = 1
if latest_version is not None:
version_number += int(latest_version)
anatomy_data.update({
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = fps
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
instance.data["version"] = version_number
# WARNING this is from `collect_resources_path.py`
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
anatomy_filled = anatomy.format(template_data)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
padding = int(a_template['render']['padding'])
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)
return resources_folder

View file

@ -1,4 +1,6 @@
import os
import sys
import re
import datetime
import subprocess
import json
@ -27,6 +29,20 @@ FFPROBE = (
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
).format(os.path.normpath(ffmpeg_path + "ffprobe"))
DRAWTEXT = (
"drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor="
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'"
)
TIMECODE = (
"drawtext=timecode=\\'%(timecode)s\\':text=\\'%(text)s\\'"
":timecode_rate=%(fps).2f:x=%(x)s:y=%(y)s:fontcolor="
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'"
)
MISSING_KEY_VALUE = "N/A"
CURRENT_FRAME_KEY = "{current_frame}"
TIME_CODE_KEY = "{timecode}"
def _streams(source):
"""Reimplemented from otio burnins to be able use full path to ffprobe
@ -120,82 +136,59 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if options_init:
self.options_init.update(options_init)
def add_text(self, text, align, options=None):
def add_text(self, text, align, frame_start=None, options=None):
"""
Adding static text to a filter.
:param str text: text to apply to the drawtext
:param enum align: alignment, must use provided enum flags
:param int frame_start: starting frame for burnins current frame
:param dict options: recommended to use TextOptions
"""
if not options:
options = ffmpeg_burnins.TextOptions(**self.options_init)
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_datetime(self, date_format, align, options=None):
"""
Adding date text to a filter. Using pythons datetime module.
options = options.copy()
if frame_start:
options["frame_offset"] = frame_start
:param str date_format: format of date (e.g. `%d.%m.%Y`)
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use TextOptions
"""
if not options:
options = ffmpeg_burnins.TextOptions(**self.options_init)
today = datetime.datetime.today()
text = today.strftime(date_format)
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
self._add_burnin(text, align, options, DRAWTEXT)
def add_frame_numbers(
self, align, options=None, start_frame=None, text=None
def add_timecode(
self, align, frame_start=None, frame_start_tc=None, text=None,
options=None
):
"""
Convenience method to create the frame number expression.
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use FrameNumberOptions
"""
if not options:
options = ffmpeg_burnins.FrameNumberOptions(**self.options_init)
if start_frame:
options['frame_offset'] = start_frame
expr = r'%%{eif\:n+%d\:d}' % options['frame_offset']
_text = str(int(self.end_frame + options['frame_offset']))
if text and isinstance(text, str):
text = r"{}".format(text)
expr = text.replace("{current_frame}", expr)
text = text.replace("{current_frame}", _text)
options['expression'] = expr
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_timecode(self, align, options=None, start_frame=None):
"""
Convenience method to create the frame number expression.
:param enum align: alignment, must use provided enum flags
:param int frame_start: starting frame for burnins current frame
:param int frame_start_tc: starting frame for burnins timecode
:param str text: text that will be before timecode
:param dict options: recommended to use TimeCodeOptions
"""
if not options:
options = ffmpeg_burnins.TimeCodeOptions(**self.options_init)
if start_frame:
options['frame_offset'] = start_frame
timecode = ffmpeg_burnins._frames_to_timecode(
options['frame_offset'],
options = options.copy()
if frame_start:
options["frame_offset"] = frame_start
if not frame_start_tc:
frame_start_tc = options["frame_offset"]
if not text:
text = ""
if not options.get("fps"):
options["fps"] = self.frame_rate
options["timecode"] = ffmpeg_burnins._frames_to_timecode(
frame_start_tc,
self.frame_rate
)
options = options.copy()
if not options.get('fps'):
options['fps'] = self.frame_rate
self._add_burnin(
timecode.replace(':', r'\:'),
align,
options,
ffmpeg_burnins.TIMECODE
)
self._add_burnin(text, align, options, TIMECODE)
def _add_burnin(self, text, align, options, draw):
"""
@ -206,12 +199,20 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
"""
resolution = self.resolution
data = {
'text': options.get('expression') or text,
'text': (
text
.replace(",", r"\,")
.replace(':', r'\:')
),
'color': options['font_color'],
'size': options['font_size']
}
timecode_text = options.get("timecode") or ""
text_for_size = text + timecode_text
data.update(options)
data.update(ffmpeg_burnins._drawtext(align, resolution, text, options))
data.update(
ffmpeg_burnins._drawtext(align, resolution, text_for_size, options)
)
if 'font' in data and ffmpeg_burnins._is_windows():
data['font'] = data['font'].replace(os.sep, r'\\' + os.sep)
data['font'] = data['font'].replace(':', r'\:')
@ -264,9 +265,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
is_sequence = "%" in output
command = self.command(output=output,
args=args,
overwrite=overwrite)
command = self.command(
output=output,
args=args,
overwrite=overwrite
)
print(command)
proc = Popen(command, shell=True)
proc.communicate()
if proc.returncode != 0:
@ -295,15 +300,13 @@ def example(input_path, output_path):
burnin.add_text('My Text', ModifiedBurnins.TOP_CENTERED)
# Datetime
burnin.add_text('%d-%m-%y', ModifiedBurnins.TOP_RIGHT)
# Frame number
burnin.add_frame_numbers(ModifiedBurnins.TOP_RIGHT, start_frame=start_frame)
# Timecode
burnin.add_timecode(ModifiedBurnins.TOP_LEFT, start_frame=start_frame)
# Start render (overwrite output file if exist)
burnin.render(output_path, overwrite=True)
def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True):
def burnins_from_data(
input_path, output_path, data, codec_data=None, overwrite=True
):
'''
This method adds burnins to video/image file based on presets setting.
Extension of output MUST be same as input. (mov -> mov, avi -> avi,...)
@ -327,47 +330,35 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
- each key of "burnins" represents Alignment, there are 6 possibilities:
TOP_LEFT TOP_CENTERED TOP_RIGHT
BOTTOM_LEFT BOTTOM_CENTERED BOTTOM_RIGHT
- value for each key is dict which should contain "function" which says
what kind of burnin is that:
"text", "timecode" or "frame_numbers"
- "text" key with content is also required when "text" function is used
- value must be string with text you want to burn-in
- text may contain specific formatting keys (exmplained below)
Requirement of *data* keys is based on presets.
- "start_frame" - is required when "timecode" or "frame_numbers" function is used
- "start_frame_tc" - when "timecode" should start with different frame
- "frame_start" - is required when "timecode" or "current_frame" ins keys
- "frame_start_tc" - when "timecode" should start with different frame
- *keys for static text*
EXAMPLE:
preset = {
"options": {*OPTIONS FOR LOOK*},
"burnins": {
"TOP_LEFT": {
"function": "text",
"text": "static_text"
},
"TOP_RIGHT": {
"function": "text",
"text": "{shot}"
},
"BOTTOM_LEFT": {
"function": "timecode"
},
"BOTTOM_RIGHT": {
"function": "frame_numbers"
}
"TOP_LEFT": "static_text",
"TOP_RIGHT": "{shot}",
"BOTTOM_LEFT": "TC: {timecode}",
"BOTTOM_RIGHT": "{frame_start}{current_frame}"
}
}
For this preset we'll need at least this data:
data = {
"start_frame": 1001,
"frame_start": 1001,
"shot": "sh0010"
}
When Timecode should start from 1 then data need:
data = {
"start_frame": 1001,
"start_frame_tc": 1,
"frame_start": 1001,
"frame_start_tc": 1,
"shot": "sh0010"
}
'''
@ -381,100 +372,90 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
stream = burnin._streams[0]
if "resolution_width" not in data:
data["resolution_width"] = stream.get("width", "Unknown")
data["resolution_width"] = stream.get("width", MISSING_KEY_VALUE)
if "resolution_height" not in data:
data["resolution_height"] = stream.get("height", "Unknown")
data["resolution_height"] = stream.get("height", MISSING_KEY_VALUE)
if "fps" not in data:
data["fps"] = get_fps(stream.get("r_frame_rate", "0/0"))
for align_text, preset in presets.get('burnins', {}).items():
# Check frame start and add expression if is available
if frame_start is not None:
data[CURRENT_FRAME_KEY[1:-1]] = r'%%{eif\:n+%d\:d}' % frame_start
if frame_start_tc is not None:
data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY
for align_text, value in presets.get('burnins', {}).items():
if not value:
continue
has_timecode = TIME_CODE_KEY in value
align = None
if align_text == 'TOP_LEFT':
align_text = align_text.strip().lower()
if align_text == "top_left":
align = ModifiedBurnins.TOP_LEFT
elif align_text == 'TOP_CENTERED':
elif align_text == "top_centered":
align = ModifiedBurnins.TOP_CENTERED
elif align_text == 'TOP_RIGHT':
elif align_text == "top_right":
align = ModifiedBurnins.TOP_RIGHT
elif align_text == 'BOTTOM_LEFT':
elif align_text == "bottom_left":
align = ModifiedBurnins.BOTTOM_LEFT
elif align_text == 'BOTTOM_CENTERED':
elif align_text == "bottom_centered":
align = ModifiedBurnins.BOTTOM_CENTERED
elif align_text == 'BOTTOM_RIGHT':
elif align_text == "bottom_right":
align = ModifiedBurnins.BOTTOM_RIGHT
bi_func = preset.get('function')
if not bi_func:
log.error(
'Missing function for burnin!'
'Burnins are not created!'
# Replace with missing key value if frame_start_tc is not set
if frame_start_tc is None and has_timecode:
has_timecode = False
log.warning(
"`frame_start` and `frame_start_tc`"
" are not set in entered data."
)
return
value = value.replace(TIME_CODE_KEY, MISSING_KEY_VALUE)
if (
bi_func in ['frame_numbers', 'timecode'] and
frame_start is None
):
log.error(
'start_frame is not set in entered data!'
'Burnins are not created!'
)
return
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
if bi_func == 'frame_numbers':
current_frame_identifier = "{current_frame}"
text = preset.get('text') or current_frame_identifier
missing_keys = []
for group in key_pattern.findall(value):
try:
group.format(**data)
except (TypeError, KeyError):
missing_keys.append(group)
if current_frame_identifier not in text:
log.warning((
'Text for Frame numbers don\'t have '
'`{current_frame}` key in text!'
))
missing_keys = list(set(missing_keys))
for key in missing_keys:
value = value.replace(key, MISSING_KEY_VALUE)
text_items = []
split_items = text.split(current_frame_identifier)
for item in split_items:
text_items.append(item.format(**data))
# Handle timecode differently
if has_timecode:
args = [align, frame_start, frame_start_tc]
if not value.startswith(TIME_CODE_KEY):
value_items = value.split(TIME_CODE_KEY)
text = value_items[0].format(**data)
args.append(text)
text = "{current_frame}".join(text_items)
burnin.add_timecode(*args)
continue
burnin.add_frame_numbers(align, start_frame=frame_start, text=text)
text = value.format(**data)
burnin.add_text(text, align, frame_start)
elif bi_func == 'timecode':
burnin.add_timecode(align, start_frame=frame_start_tc)
elif bi_func == 'text':
if not preset.get('text'):
log.error('Text is not set for text function burnin!')
return
text = preset['text'].format(**data)
burnin.add_text(text, align)
elif bi_func == "datetime":
date_format = preset["format"]
burnin.add_datetime(date_format, align)
else:
log.error(
'Unknown function for burnins {}'.format(bi_func)
)
return
codec_args = ''
if codec_data is not []:
codec_args = ""
if codec_data:
codec_args = " ".join(codec_data)
burnin.render(output_path, args=codec_args, overwrite=overwrite, **data)
if __name__ == '__main__':
import sys
import json
data = json.loads(sys.argv[-1])
in_data = json.loads(sys.argv[-1])
burnins_from_data(
data['input'],
data['codec'],
data['output'],
data['burnin_data']
in_data['input'],
in_data['output'],
in_data['burnin_data'],
in_data['codec']
)

View file

@ -19,8 +19,8 @@ class UserModule:
log = pype.Logger().get_logger("UserModule", "user")
def __init__(self, main_parent=None, parent=None):
self._callbacks_on_user_change = []
self.cred = {}
self.cred_path = os.path.normpath(os.path.join(
self.cred_folder_path, self.cred_filename
))
@ -28,6 +28,9 @@ class UserModule:
self.load_credentials()
def register_callback_on_user_change(self, callback):
self._callbacks_on_user_change.append(callback)
def tray_start(self):
"""Store credentials to env and preset them to widget"""
username = ""
@ -37,6 +40,9 @@ class UserModule:
os.environ[self.env_name] = username
self.widget_login.set_user(username)
def get_user(self):
return self.cred.get("username") or getpass.getuser()
def process_modules(self, modules):
""" Gives ability to connect with imported modules from TrayManager.
@ -95,6 +101,17 @@ class UserModule:
))
return self.save_credentials(getpass.getuser())
def change_credentials(self, username):
self.save_credentials(username)
for callback in self._callbacks_on_user_change:
try:
callback()
except Exception:
self.log.warning(
"Failed to execute callback \"{}\".".format(str(callback)),
exc_info=True
)
def save_credentials(self, username):
"""Save credentials to JSON file, env and widget"""
if username is None:

View file

@ -77,7 +77,7 @@ class UserWidget(QtWidgets.QWidget):
def click_save(self):
# all what should happen - validations and saving into appsdir
username = self.input_username.text()
self.module.save_credentials(username)
self.module.change_credentials(username)
self._close_widget()
def closeEvent(self, event):