Merge branch 'develop' into draft/ffmpeg_with_multiple_paths

This commit is contained in:
iLLiCiTiT 2020-02-17 17:33:57 +01:00
commit 71c71057ec
49 changed files with 2135 additions and 1999 deletions

View file

@ -0,0 +1,345 @@
import os
import requests
import errno
import json
from bson.objectid import ObjectId
from pype.ftrack import BaseAction
from pype.ftrack.lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)
from pypeapp import Anatomy
from pype.ftrack.lib.io_nonsingleton import DbConnector
class StoreThumbnailsToAvalon(BaseAction):
# Action identifier
identifier = "store.thubmnail.to.avalon"
# Action label
label = "Pype Admin"
# Action variant
variant = "- Store Thumbnails to avalon"
# Action description
description = 'Test action'
# roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project Manager"]
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
thumbnail_key = "AVALON_THUMBNAIL_ROOT"
db_con = DbConnector()
def discover(self, session, entities, event):
for entity in entities:
if entity.entity_type.lower() == "assetversion":
return True
return False
def launch(self, session, entities, event):
# DEBUG LINE
# root_path = r"C:\Users\jakub.trllo\Desktop\Tests\ftrack_thumbnails"
user = session.query(
"User where username is '{0}'".format(session.api_user)
).one()
action_job = session.create("Job", {
"user": user,
"status": "running",
"data": json.dumps({
"description": "Storing thumbnails to avalon."
})
})
session.commit()
thumbnail_roots = os.environ.get(self.thumbnail_key)
if not thumbnail_roots:
msg = "`{}` environment is not set".format(self.thumbnail_key)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
existing_thumbnail_root = None
for path in thumbnail_roots.split(os.pathsep):
if os.path.exists(path):
existing_thumbnail_root = path
break
if existing_thumbnail_root is None:
msg = (
"Can't access paths, set in `{}` ({})"
).format(self.thumbnail_key, thumbnail_roots)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
project = get_project_from_entity(entities[0])
project_name = project["full_name"]
anatomy = Anatomy(project_name)
if "publish" not in anatomy.templates:
msg = "Anatomy does not have set publish key!"
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
if "thumbnail" not in anatomy.templates["publish"]:
msg = (
"There is not set \"thumbnail\""
" template in Antomy for project \"{}\""
).format(project_name)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
example_template_data = {
"_id": "ID",
"thumbnail_root": "THUBMNAIL_ROOT",
"thumbnail_type": "THUMBNAIL_TYPE",
"ext": ".EXT",
"project": {
"name": "PROJECT_NAME",
"code": "PROJECT_CODE"
},
"asset": "ASSET_NAME",
"subset": "SUBSET_NAME",
"version": "VERSION_NAME",
"hierarchy": "HIERARCHY"
}
tmp_filled = anatomy.format_all(example_template_data)
thumbnail_result = tmp_filled["publish"]["thumbnail"]
if not thumbnail_result.solved:
missing_keys = thumbnail_result.missing_keys
invalid_types = thumbnail_result.invalid_types
submsg = ""
if missing_keys:
submsg += "Missing keys: {}".format(", ".join(
["\"{}\"".format(key) for key in missing_keys]
))
if invalid_types:
items = []
for key, value in invalid_types.items():
items.append("{}{}".format(str(key), str(value)))
submsg += "Invalid types: {}".format(", ".join(items))
msg = (
"Thumbnail Anatomy template expects more keys than action"
" can offer. {}"
).format(submsg)
action_job["status"] = "failed"
session.commit()
self.log.warning(msg)
return {
"success": False,
"message": msg
}
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
self.db_con.install()
for entity in entities:
# Skip if entity is not AssetVersion (never should happend, but..)
if entity.entity_type.lower() != "assetversion":
continue
# Skip if AssetVersion don't have thumbnail
thumbnail_ent = entity["thumbnail"]
if thumbnail_ent is None:
self.log.debug((
"Skipping. AssetVersion don't "
"have set thumbnail. {}"
).format(entity["id"]))
continue
avalon_ents_result = get_avalon_entities_for_assetversion(
entity, self.db_con
)
version_full_path = (
"Asset: \"{project_name}/{asset_path}\""
" | Subset: \"{subset_name}\""
" | Version: \"{version_name}\""
).format(**avalon_ents_result)
version = avalon_ents_result["version"]
if not version:
self.log.warning((
"AssetVersion does not have version in avalon. {}"
).format(version_full_path))
continue
thumbnail_id = version["data"].get("thumbnail_id")
if thumbnail_id:
self.log.info((
"AssetVersion skipped, already has thubmanil set. {}"
).format(version_full_path))
continue
# Get thumbnail extension
file_ext = thumbnail_ent["file_type"]
if not file_ext.startswith("."):
file_ext = ".{}".format(file_ext)
avalon_project = avalon_ents_result["project"]
avalon_asset = avalon_ents_result["asset"]
hierarchy = ""
parents = avalon_asset["data"].get("parents") or []
if parents:
hierarchy = "/".join(parents)
# Prepare anatomy template fill data
# 1. Create new id for thumbnail entity
thumbnail_id = ObjectId()
template_data = {
"_id": str(thumbnail_id),
"thumbnail_root": existing_thumbnail_root,
"thumbnail_type": "thumbnail",
"ext": file_ext,
"project": {
"name": avalon_project["name"],
"code": avalon_project["data"].get("code")
},
"asset": avalon_ents_result["asset_name"],
"subset": avalon_ents_result["subset_name"],
"version": avalon_ents_result["version_name"],
"hierarchy": hierarchy
}
anatomy_filled = anatomy.format(template_data)
thumbnail_path = anatomy_filled["publish"]["thumbnail"]
thumbnail_path = thumbnail_path.replace("..", ".")
thumbnail_path = os.path.normpath(thumbnail_path)
downloaded = False
for loc in (thumbnail_ent.get("component_locations") or []):
res_id = loc.get("resource_identifier")
if not res_id:
continue
thubmnail_url = self.get_thumbnail_url(res_id)
if self.download_file(thubmnail_url, thumbnail_path):
downloaded = True
break
if not downloaded:
self.log.warning(
"Could not download thumbnail for {}".format(
version_full_path
)
)
continue
# Clean template data from keys that are dynamic
template_data.pop("_id")
template_data.pop("thumbnail_root")
thumbnail_entity = {
"_id": thumbnail_id,
"type": "thumbnail",
"schema": "pype:thumbnail-1.0",
"data": {
"template": thumbnail_template,
"template_data": template_data
}
}
# Create thumbnail entity
self.db_con.insert_one(thumbnail_entity)
self.log.debug(
"Creating entity in database {}".format(str(thumbnail_entity))
)
# Set thumbnail id for version
self.db_con.update_one(
{"_id": version["_id"]},
{"$set": {"data.thumbnail_id": thumbnail_id}}
)
action_job["status"] = "done"
session.commit()
return True
def get_thumbnail_url(self, resource_identifier, size=None):
# TODO use ftrack_api method rather (find way how to use it)
url_string = (
u'{url}/component/thumbnail?id={id}&username={username}'
u'&apiKey={apiKey}'
)
url = url_string.format(
url=self.session.server_url,
id=resource_identifier,
username=self.session.api_user,
apiKey=self.session.api_key
)
if size:
url += u'&size={0}'.format(size)
return url
def download_file(self, source_url, dst_file_path):
dir_path = os.path.dirname(dst_file_path)
try:
os.makedirs(dir_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
self.log.warning(
"Could not create folder: \"{}\"".format(dir_path)
)
return False
self.log.debug(
"Downloading file \"{}\" -> \"{}\"".format(
source_url, dst_file_path
)
)
file_open = open(dst_file_path, "wb")
try:
file_open.write(requests.get(source_url).content)
except Exception:
self.log.warning(
"Download of image `{}` failed.".format(source_url)
)
return False
finally:
file_open.close()
return True
def register(session, plugins_presets={}):
StoreThumbnailsToAvalon(session, plugins_presets).register()

View file

@ -3,6 +3,7 @@ import collections
import copy
import queue
import time
import datetime
import atexit
import traceback
@ -28,7 +29,7 @@ class SyncToAvalonEvent(BaseEvent):
ignore_entTypes = [
"socialfeed", "socialnotification", "note",
"assetversion", "job", "user", "reviewsessionobject", "timer",
"timelog", "auth_userrole", "appointment"
"timelog", "auth_userrole", "appointment", "notelabellink"
]
ignore_ent_types = ["Milestone"]
ignore_keys = ["statusid", "thumbid"]
@ -51,9 +52,39 @@ class SyncToAvalonEvent(BaseEvent):
def __init__(self, session, plugins_presets={}):
'''Expects a ftrack_api.Session instance'''
# Debug settings
# - time expiration in seconds
self.debug_print_time_expiration = 5 * 60
# - store current time
self.debug_print_time = datetime.datetime.now()
# - store synchronize entity types to be able to use
# only entityTypes in interest instead of filtering by ignored
self.debug_sync_types = collections.defaultdict(list)
# Set processing session to not use global
self.set_process_session(session)
super().__init__(session, plugins_presets)
def debug_logs(self):
"""This is debug method for printing small debugs messages. """
now_datetime = datetime.datetime.now()
delta = now_datetime - self.debug_print_time
if delta.total_seconds() < self.debug_print_time_expiration:
return
self.debug_print_time = now_datetime
known_types_items = []
for entityType, entity_type in self.debug_sync_types.items():
ent_types_msg = ", ".join(entity_type)
known_types_items.append(
"<{}> ({})".format(entityType, ent_types_msg)
)
known_entityTypes = ", ".join(known_types_items)
self.log.debug(
"DEBUG MESSAGE: Known types {}".format(known_entityTypes)
)
@property
def cur_project(self):
if self._cur_project is None:
@ -484,6 +515,9 @@ class SyncToAvalonEvent(BaseEvent):
if not entity_type or entity_type in self.ignore_ent_types:
continue
if entity_type not in self.debug_sync_types[entityType]:
self.debug_sync_types[entityType].append(entity_type)
action = ent_info["action"]
ftrack_id = ent_info["entityId"]
if isinstance(ftrack_id, list):
@ -573,8 +607,7 @@ class SyncToAvalonEvent(BaseEvent):
if auto_sync is not True:
return True
debug_msg = ""
debug_msg += "Updated: {}".format(len(updated))
debug_msg = "Updated: {}".format(len(updated))
debug_action_map = {
"add": "Created",
"remove": "Removed",
@ -634,6 +667,8 @@ class SyncToAvalonEvent(BaseEvent):
self.ftrack_added = entities_by_action["add"]
self.ftrack_updated = updated
self.debug_logs()
self.log.debug("Synchronization begins")
try:
time_1 = time.time()
@ -1545,6 +1580,14 @@ class SyncToAvalonEvent(BaseEvent):
entity_type_conf_ids[entity_type] = configuration_id
break
if not configuration_id:
self.log.warning(
"BUG REPORT: Missing configuration for `{} < {} >`".format(
entity_type, ent_info["entityType"]
)
)
continue
_entity_key = collections.OrderedDict({
"configuration_id": configuration_id,
"entity_id": ftrack_id
@ -1563,7 +1606,7 @@ class SyncToAvalonEvent(BaseEvent):
try:
# Commit changes of mongo_id to empty string
self.process_session.commit()
self.log.debug("Commititng unsetting")
self.log.debug("Committing unsetting")
except Exception:
self.process_session.rollback()
# TODO logging
@ -1643,7 +1686,7 @@ class SyncToAvalonEvent(BaseEvent):
new_name, "task", schema_patterns=self.regex_schemas
)
if not passed_regex:
self.regex_failed.append(ent_infos["entityId"])
self.regex_failed.append(ent_info["entityId"])
continue
if new_name not in self.task_changes_by_avalon_id[mongo_id]:

View file

@ -4,9 +4,13 @@ import signal
import datetime
import subprocess
import socket
import json
import platform
import argparse
import getpass
import atexit
import time
import uuid
import ftrack_api
from pype.ftrack.lib import credentials
@ -63,10 +67,19 @@ def validate_credentials(url, user, api):
)
session.close()
except Exception as e:
print(
'ERROR: Can\'t log into Ftrack with used credentials:'
' Ftrack server: "{}" // Username: {} // API key: {}'
).format(url, user, api)
print("Can't log into Ftrack with used credentials:")
ftrack_cred = {
"Ftrack server": str(url),
"Username": str(user),
"API key": str(api)
}
item_lens = [len(key) + 1 for key in ftrack_cred.keys()]
justify_len = max(*item_lens)
for key, value in ftrack_cred.items():
print("{} {}".format(
(key + ":").ljust(justify_len, " "),
value
))
return False
print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format(
@ -175,6 +188,7 @@ def main_loop(ftrack_url):
otherwise thread will be killed.
"""
os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1())
# Get mongo hostname and port for testing mongo connection
mongo_list = ftrack_events_mongo_settings()
mongo_hostname = mongo_list[0]
@ -202,6 +216,13 @@ def main_loop(ftrack_url):
processor_last_failed = datetime.datetime.now()
processor_failed_count = 0
statuser_name = "StorerThread"
statuser_port = 10021
statuser_path = "{}/sub_event_status.py".format(file_path)
statuser_thread = None
statuser_last_failed = datetime.datetime.now()
statuser_failed_count = 0
ftrack_accessible = False
mongo_accessible = False
@ -210,7 +231,7 @@ def main_loop(ftrack_url):
# stop threads on exit
# TODO check if works and args have thread objects!
def on_exit(processor_thread, storer_thread):
def on_exit(processor_thread, storer_thread, statuser_thread):
if processor_thread is not None:
processor_thread.stop()
processor_thread.join()
@ -221,9 +242,27 @@ def main_loop(ftrack_url):
storer_thread.join()
storer_thread = None
if statuser_thread is not None:
statuser_thread.stop()
statuser_thread.join()
statuser_thread = None
atexit.register(
on_exit, processor_thread=processor_thread, storer_thread=storer_thread
on_exit,
processor_thread=processor_thread,
storer_thread=storer_thread,
statuser_thread=statuser_thread
)
system_name, pc_name = platform.uname()[:2]
host_name = socket.gethostname()
main_info = {
"created_at": datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S"),
"Username": getpass.getuser(),
"Host Name": host_name,
"Host IP": socket.gethostbyname(host_name)
}
main_info_str = json.dumps(main_info)
# Main loop
while True:
# Check if accessible Ftrack and Mongo url
@ -261,6 +300,52 @@ def main_loop(ftrack_url):
printed_ftrack_error = False
printed_mongo_error = False
# ====== STATUSER =======
if statuser_thread is None:
if statuser_failed_count < max_fail_count:
statuser_thread = socket_thread.StatusSocketThread(
statuser_name, statuser_port, statuser_path,
[main_info_str]
)
statuser_thread.start()
elif statuser_failed_count == max_fail_count:
print((
"Statuser failed {}times in row"
" I'll try to run again {}s later"
).format(str(max_fail_count), str(wait_time_after_max_fail)))
statuser_failed_count += 1
elif ((
datetime.datetime.now() - statuser_last_failed
).seconds > wait_time_after_max_fail):
statuser_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not statuser_thread.isAlive():
statuser_thread.join()
statuser_thread = None
ftrack_accessible = False
mongo_accessible = False
_processor_last_failed = datetime.datetime.now()
delta_time = (
_processor_last_failed - statuser_last_failed
).seconds
if delta_time < min_fail_seconds:
statuser_failed_count += 1
else:
statuser_failed_count = 0
statuser_last_failed = _processor_last_failed
elif statuser_thread.stop_subprocess:
print("Main process was stopped by action")
on_exit(processor_thread, storer_thread, statuser_thread)
os.kill(os.getpid(), signal.SIGTERM)
return 1
# ====== STORER =======
# Run backup thread which does not requeire mongo to work
if storer_thread is None:
if storer_failed_count < max_fail_count:
@ -268,6 +353,7 @@ def main_loop(ftrack_url):
storer_name, storer_port, storer_path
)
storer_thread.start()
elif storer_failed_count == max_fail_count:
print((
"Storer failed {}times I'll try to run again {}s later"
@ -295,6 +381,7 @@ def main_loop(ftrack_url):
storer_failed_count = 0
storer_last_failed = _storer_last_failed
# ====== PROCESSOR =======
if processor_thread is None:
if processor_failed_count < max_fail_count:
processor_thread = socket_thread.SocketThread(
@ -336,6 +423,10 @@ def main_loop(ftrack_url):
processor_failed_count = 0
processor_last_failed = _processor_last_failed
if statuser_thread is not None:
statuser_thread.set_process("storer", storer_thread)
statuser_thread.set_process("processor", processor_thread)
time.sleep(1)
@ -446,9 +537,9 @@ def main(argv):
event_paths = kwargs.ftrackeventpaths
if not kwargs.noloadcred:
cred = credentials._get_credentials(True)
cred = credentials.get_credentials(ftrack_url)
username = cred.get('username')
api_key = cred.get('apiKey')
api_key = cred.get('api_key')
if kwargs.ftrackuser:
username = kwargs.ftrackuser
@ -482,7 +573,7 @@ def main(argv):
return 1
if kwargs.storecred:
credentials._save_credentials(username, api_key, True)
credentials.save_credentials(username, api_key, ftrack_url)
# Set Ftrack environments
os.environ["FTRACK_SERVER"] = ftrack_url

View file

@ -100,9 +100,9 @@ class FtrackServer:
log.warning(msg, exc_info=e)
if len(register_functions_dict) < 1:
raise Exception((
"There are no events with register function."
" Registered paths: \"{}\""
log.warning((
"There are no events with `register` function"
" in registered paths: \"{}\""
).format("| ".join(paths)))
# Load presets for setting plugins
@ -122,7 +122,7 @@ class FtrackServer:
else:
register(self.session, plugins_presets=plugins_presets)
if function_counter%7 == 0:
if function_counter % 7 == 0:
time.sleep(0.1)
function_counter += 1
except Exception as exc:

View file

@ -28,6 +28,10 @@ from pypeapp import Logger
from pype.ftrack.lib.custom_db_connector import DbConnector
TOPIC_STATUS_SERVER = "pype.event.server.status"
TOPIC_STATUS_SERVER_RESULT = "pype.event.server.status.result"
def ftrack_events_mongo_settings():
host = None
port = None
@ -123,20 +127,59 @@ def check_ftrack_url(url, log_errors=True):
return url
class StorerEventHub(ftrack_api.event.hub.EventHub):
class SocketBaseEventHub(ftrack_api.event.hub.EventHub):
hearbeat_msg = b"hearbeat"
heartbeat_callbacks = []
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(StorerEventHub, self).__init__(*args, **kwargs)
super(SocketBaseEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"storer")
return self._send_packet(self._code_name_mapping['heartbeat'])
for callback in self.heartbeat_callbacks:
callback()
elif code_name == "connect":
self.sock.sendall(self.hearbeat_msg)
return self._send_packet(self._code_name_mapping["heartbeat"])
return super(SocketBaseEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StatusEventHub(SocketBaseEventHub):
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.status.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(StatusEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StorerEventHub(SocketBaseEventHub):
hearbeat_msg = b"storer"
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
@ -152,7 +195,9 @@ class StorerEventHub(ftrack_api.event.hub.EventHub):
)
class ProcessEventHub(ftrack_api.event.hub.EventHub):
class ProcessEventHub(SocketBaseEventHub):
hearbeat_msg = b"processor"
url, database, table_name = get_ftrack_event_mongo_info()
is_table_created = False
@ -164,7 +209,6 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
database_name=self.database,
table_name=self.table_name
)
self.sock = kwargs.pop("sock")
super(ProcessEventHub, self).__init__(*args, **kwargs)
def prepare_dbcon(self):
@ -260,42 +304,10 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
code_name = self._code_name_mapping[code]
if code_name == "event":
return
if code_name == "heartbeat":
self.sock.sendall(b"processor")
return self._send_packet(self._code_name_mapping["heartbeat"])
return super()._handle_packet(code, packet_identifier, path, data)
class UserEventHub(ftrack_api.event.hub.EventHub):
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(UserEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
self.sock.sendall(b"hearbeat")
return self._send_packet(self._code_name_mapping['heartbeat'])
elif code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(UserEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class SocketSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(

View file

@ -3,6 +3,7 @@ import sys
import time
import socket
import threading
import traceback
import subprocess
from pypeapp import Logger
@ -12,13 +13,15 @@ class SocketThread(threading.Thread):
MAX_TIMEOUT = 35
def __init__(self, name, port, filepath):
def __init__(self, name, port, filepath, additional_args=[]):
super(SocketThread, self).__init__()
self.log = Logger().get_logger("SocketThread", "Event Thread")
self.log = Logger().get_logger(self.__class__.__name__)
self.setName(name)
self.name = name
self.port = port
self.filepath = filepath
self.additional_args = additional_args
self.sock = None
self.subproc = None
self.connection = None
@ -53,7 +56,13 @@ class SocketThread(threading.Thread):
)
self.subproc = subprocess.Popen(
[sys.executable, self.filepath, "-port", str(self.port)]
[
sys.executable,
self.filepath,
*self.additional_args,
str(self.port)
],
stdin=subprocess.PIPE
)
# Listen for incoming connections
@ -127,3 +136,52 @@ class SocketThread(threading.Thread):
if data == b"MongoError":
self.mongo_error = True
connection.sendall(data)
class StatusSocketThread(SocketThread):
process_name_mapping = {
b"RestartS": "storer",
b"RestartP": "processor",
b"RestartM": "main"
}
def __init__(self, *args, **kwargs):
self.process_threads = {}
self.stop_subprocess = False
super(StatusSocketThread, self).__init__(*args, **kwargs)
def set_process(self, process_name, thread):
try:
if not self.subproc:
self.process_threads[process_name] = None
return
if (
process_name in self.process_threads and
self.process_threads[process_name] == thread
):
return
self.process_threads[process_name] = thread
self.subproc.stdin.write(
str.encode("reset:{}\r\n".format(process_name))
)
self.subproc.stdin.flush()
except Exception:
print("Could not set thread in StatusSocketThread")
traceback.print_exception(*sys.exc_info())
def _handle_data(self, connection, data):
if not data:
return
process_name = self.process_name_mapping.get(data)
if process_name:
if process_name == "main":
self.stop_subprocess = True
else:
subp = self.process_threads.get(process_name)
if subp:
subp.stop()
connection.sendall(data)

View file

@ -1,13 +1,59 @@
import os
import sys
import signal
import socket
import datetime
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub
from pype.ftrack.ftrack_server.lib import (
SocketSession, ProcessEventHub, TOPIC_STATUS_SERVER
)
import ftrack_api
from pypeapp import Logger
log = Logger().get_logger("Event processor")
subprocess_started = datetime.datetime.now()
class SessionFactory:
session = None
def send_status(event):
subprocess_id = event["data"].get("subprocess_id")
if not subprocess_id:
return
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
session = SessionFactory.session
if not session:
return
new_event_data = {
"subprocess_id": subprocess_id,
"source": "processor",
"status_info": {
"created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S")
}
}
new_event = ftrack_api.event.base.Event(
topic="pype.event.server.status.result",
data=new_event_data
)
session.event_hub.publish(new_event)
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER), send_status
)
def main(args):
port = int(args[-1])
@ -24,6 +70,9 @@ def main(args):
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub
)
register(session)
SessionFactory.session = session
server = FtrackServer("event")
log.debug("Launched Ftrack Event processor")
server.run_server(session)

View file

@ -0,0 +1,436 @@
import os
import sys
import json
import threading
import signal
import socket
import datetime
import ftrack_api
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import (
SocketSession, StatusEventHub,
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
)
from pypeapp import Logger, config
log = Logger().get_logger("Event storer")
action_identifier = (
"event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"]
)
host_ip = socket.gethostbyname(socket.gethostname())
action_data = {
"label": "Pype Admin",
"variant": "- Event server Status ({})".format(host_ip),
"description": "Get Infromation about event server",
"actionIdentifier": action_identifier,
"icon": "{}/ftrack/action_icons/PypeAdmin.svg".format(
os.environ.get(
"PYPE_STATICS_SERVER",
"http://localhost:{}".format(
config.get_presets().get("services", {}).get(
"rest_api", {}
).get("default_port", 8021)
)
)
)
}
class ObjectFactory:
session = None
status_factory = None
checker_thread = None
last_trigger = None
class Status:
default_item = {
"type": "label",
"value": "Process info is not available at this moment."
}
def __init__(self, name, label, parent):
self.name = name
self.label = label or name
self.parent = parent
self.info = None
self.last_update = None
def update(self, info):
self.last_update = datetime.datetime.now()
self.info = info
def get_delta_string(self, delta):
days, hours, minutes = (
delta.days, delta.seconds // 3600, delta.seconds // 60 % 60
)
delta_items = [
"{}d".format(days),
"{}h".format(hours),
"{}m".format(minutes)
]
if not days:
delta_items.pop(0)
if not hours:
delta_items.pop(0)
delta_items.append("{}s".format(delta.seconds % 60))
if not minutes:
delta_items.pop(0)
return " ".join(delta_items)
def get_items(self):
items = []
last_update = "N/A"
if self.last_update:
delta = datetime.datetime.now() - self.last_update
last_update = "{} ago".format(
self.get_delta_string(delta)
)
last_update = "Updated: {}".format(last_update)
items.append({
"type": "label",
"value": "#{}".format(self.label)
})
items.append({
"type": "label",
"value": "##{}".format(last_update)
})
if not self.info:
if self.info is None:
trigger_info_get()
items.append(self.default_item)
return items
info = {}
for key, value in self.info.items():
if key not in ["created_at:", "created_at"]:
info[key] = value
continue
datetime_value = datetime.datetime.strptime(
value, "%Y.%m.%d %H:%M:%S"
)
delta = datetime.datetime.now() - datetime_value
running_for = self.get_delta_string(delta)
info["Started at"] = "{} [running: {}]".format(value, running_for)
for key, value in info.items():
items.append({
"type": "label",
"value": "<b>{}:</b> {}".format(key, value)
})
return items
class StatusFactory:
note_item = {
"type": "label",
"value": (
"<i>HINT: To refresh data uncheck"
" all checkboxes and hit `Submit` button.</i>"
)
}
splitter_item = {
"type": "label",
"value": "---"
}
def __init__(self, statuses={}):
self.statuses = []
for status in statuses.items():
self.create_status(*status)
def __getitem__(self, key):
return self.get(key)
def get(self, key, default=None):
for status in self.statuses:
if status.name == key:
return status
return default
def is_filled(self):
for status in self.statuses:
if status.info is None:
return False
return True
def create_status(self, name, label):
new_status = Status(name, label, self)
self.statuses.append(new_status)
def process_event_result(self, event):
subprocess_id = event["data"].get("subprocess_id")
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
source = event["data"]["source"]
data = event["data"]["status_info"]
self.update_status_info(source, data)
def update_status_info(self, process_name, info):
for status in self.statuses:
if status.name == process_name:
status.update(info)
break
def bool_items(self):
items = []
items.append({
"type": "label",
"value": "#Restart process"
})
items.append({
"type": "label",
"value": (
"<i><b>WARNING:</b> Main process may shut down when checked"
" if does not run as a service!</i>"
)
})
name_labels = {}
for status in self.statuses:
name_labels[status.name] = status.label
for name, label in name_labels.items():
items.append({
"type": "boolean",
"value": False,
"label": label,
"name": name
})
return items
def items(self):
items = []
items.append(self.note_item)
items.extend(self.bool_items())
for status in self.statuses:
items.append(self.splitter_item)
items.extend(status.get_items())
return items
def server_activity_validate_user(event):
"""Validate user permissions to show server info."""
session = ObjectFactory.session
username = event["source"].get("user", {}).get("username")
if not username:
return False
user_ent = session.query(
"User where username = \"{}\"".format(username)
).first()
if not user_ent:
return False
role_list = ["Pypeclub", "Administrator"]
for role in user_ent["user_security_roles"]:
if role["security_role"]["name"] in role_list:
return True
return False
def server_activity_discover(event):
"""Discover action in actions menu conditions."""
session = ObjectFactory.session
if session is None:
return
if not server_activity_validate_user(event):
return
return {"items": [action_data]}
def server_activity(event):
session = ObjectFactory.session
if session is None:
msg = "Session is not set. Can't trigger Reset action."
log.warning(msg)
return {
"success": False,
"message": msg
}
if not server_activity_validate_user(event):
return {
"success": False,
"message": "You don't have permissions to see Event server status!"
}
values = event["data"].get("values") or {}
is_checked = False
for value in values.values():
if value:
is_checked = True
break
if not is_checked:
return {
"items": ObjectFactory.status_factory.items(),
"title": "Server current status"
}
session = ObjectFactory.session
if values["main"]:
session.event_hub.sock.sendall(b"RestartM")
return
if values["storer"]:
session.event_hub.sock.sendall(b"RestartS")
if values["processor"]:
session.event_hub.sock.sendall(b"RestartP")
def trigger_info_get():
if ObjectFactory.last_trigger:
delta = datetime.datetime.now() - ObjectFactory.last_trigger
if delta.seconds() < 5:
return
session = ObjectFactory.session
session.event_hub.publish(
ftrack_api.event.base.Event(
topic=TOPIC_STATUS_SERVER,
data={"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"]}
),
on_error="ignore"
)
def on_start(event):
session = ObjectFactory.session
source_id = event.get("source", {}).get("id")
if not source_id or source_id != session.event_hub.id:
return
if session is None:
log.warning("Session is not set. Can't trigger Sync to avalon action.")
return True
trigger_info_get()
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
session.event_hub.subscribe(
"topic=ftrack.action.discover",
server_activity_discover
)
session.event_hub.subscribe("topic=pype.status.started", on_start)
status_launch_subscription = (
"topic=ftrack.action.launch and data.actionIdentifier={}"
).format(action_identifier)
session.event_hub.subscribe(
status_launch_subscription,
server_activity
)
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER_RESULT),
ObjectFactory.status_factory.process_event_result
)
def heartbeat():
if ObjectFactory.status_factory.is_filled():
return
trigger_info_get()
def main(args):
port = int(args[-1])
server_info = json.loads(args[-2])
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ("localhost", port)
log.debug("Statuser connected to {} port {}".format(*server_address))
sock.connect(server_address)
sock.sendall(b"CreatedStatus")
# store socket connection object
ObjectFactory.sock = sock
statuse_names = {
"main": "Main process",
"storer": "Event Storer",
"processor": "Event Processor"
}
ObjectFactory.status_factory = StatusFactory(statuse_names)
ObjectFactory.status_factory["main"].update(server_info)
_returncode = 0
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=StatusEventHub
)
ObjectFactory.session = session
session.event_hub.heartbeat_callbacks.append(heartbeat)
register(session)
server = FtrackServer("event")
log.debug("Launched Ftrack Event statuser")
server.run_server(session, load_files=False)
except Exception:
_returncode = 1
log.error("ServerInfo subprocess crashed", exc_info=True)
finally:
log.debug("Ending. Closing socket.")
sock.close()
return _returncode
class OutputChecker(threading.Thread):
read_input = True
def run(self):
while self.read_input:
for line in sys.stdin:
line = line.rstrip().lower()
if not line.startswith("reset:"):
continue
process_name = line.replace("reset:", "")
ObjectFactory.status_factory.update_status_info(
process_name, None
)
def stop(self):
self.read_input = False
if __name__ == "__main__":
# Register interupt signal
def signal_handler(sig, frame):
print("You pressed Ctrl+C. Process ended.")
ObjectFactory.checker_thread.stop()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
checker_thread = OutputChecker()
ObjectFactory.checker_thread = checker_thread
checker_thread.start()
sys.exit(main(sys.argv))

View file

@ -8,14 +8,15 @@ import pymongo
import ftrack_api
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import (
SocketSession, StorerEventHub,
get_ftrack_event_mongo_info,
SocketSession,
StorerEventHub
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
)
from pype.ftrack.lib.custom_db_connector import DbConnector
from pypeapp import Logger
log = Logger().get_logger("Event storer")
subprocess_started = datetime.datetime.now()
class SessionFactory:
@ -138,11 +139,42 @@ def trigger_sync(event):
)
def send_status(event):
session = SessionFactory.session
if not session:
return
subprocess_id = event["data"].get("subprocess_id")
if not subprocess_id:
return
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
return
new_event_data = {
"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"],
"source": "storer",
"status_info": {
"created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S")
}
}
new_event = ftrack_api.event.base.Event(
topic=TOPIC_STATUS_SERVER_RESULT,
data=new_event_data
)
session.event_hub.publish(new_event)
def register(session):
'''Registers the event, subscribing the discover and launch topics.'''
install_db()
session.event_hub.subscribe("topic=*", launch)
session.event_hub.subscribe("topic=pype.storer.started", trigger_sync)
session.event_hub.subscribe(
"topic={}".format(TOPIC_STATUS_SERVER), send_status
)
def main(args):

View file

@ -5,7 +5,7 @@ import socket
import traceback
from ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub
from pype.ftrack.ftrack_server.lib import SocketSession, SocketBaseEventHub
from pypeapp import Logger
@ -28,7 +28,7 @@ def main(args):
try:
session = SocketSession(
auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub
auto_connect_event_hub=True, sock=sock, Eventhub=SocketBaseEventHub
)
server = FtrackServer("action")
log.debug("Launched User Ftrack Server")

View file

@ -1,6 +1,11 @@
from . import avalon_sync
from .credentials import *
from . import credentials
from .ftrack_app_handler import *
from .ftrack_event_handler import *
from .ftrack_action_handler import *
from .ftrack_base_handler import *
from .lib import (
get_project_from_entity,
get_avalon_entities_for_assetversion
)

View file

@ -2,85 +2,140 @@ import os
import json
import ftrack_api
import appdirs
import getpass
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
config_path = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype'))
action_file_name = 'ftrack_cred.json'
event_file_name = 'ftrack_event_cred.json'
action_fpath = os.path.join(config_path, action_file_name)
event_fpath = os.path.join(config_path, event_file_name)
folders = set([os.path.dirname(action_fpath), os.path.dirname(event_fpath)])
CONFIG_PATH = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
CREDENTIALS_FILE_NAME = "ftrack_cred.json"
CREDENTIALS_PATH = os.path.join(CONFIG_PATH, CREDENTIALS_FILE_NAME)
CREDENTIALS_FOLDER = os.path.dirname(CREDENTIALS_PATH)
for folder in folders:
if not os.path.isdir(folder):
os.makedirs(folder)
if not os.path.isdir(CREDENTIALS_FOLDER):
os.makedirs(CREDENTIALS_FOLDER)
USER_GETTER = None
def _get_credentials(event=False):
if event:
fpath = event_fpath
else:
fpath = action_fpath
def get_ftrack_hostname(ftrack_server=None):
if not ftrack_server:
ftrack_server = os.environ["FTRACK_SERVER"]
if "//" not in ftrack_server:
ftrack_server = "//" + ftrack_server
return urlparse(ftrack_server).hostname
def get_user():
if USER_GETTER:
return USER_GETTER()
return getpass.getuser()
def get_credentials(ftrack_server=None, user=None):
credentials = {}
try:
file = open(fpath, 'r')
credentials = json.load(file)
except Exception:
file = open(fpath, 'w')
if not os.path.exists(CREDENTIALS_PATH):
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(credentials))
file.close()
return credentials
file.close()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
content_json = json.loads(content or "{}")
credentials = content_json.get(hostname, {}).get(user) or {}
return credentials
def _save_credentials(username, apiKey, event=False, auto_connect=None):
data = {
'username': username,
'apiKey': apiKey
def save_credentials(ft_user, ft_api_key, ftrack_server=None, user=None):
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
content_json = json.loads(content or "{}")
if hostname not in content_json:
content_json[hostname] = {}
content_json[hostname][user] = {
"username": ft_user,
"api_key": ft_api_key
}
if event:
fpath = event_fpath
if auto_connect is None:
cred = _get_credentials(True)
auto_connect = cred.get('auto_connect', False)
data['auto_connect'] = auto_connect
else:
fpath = action_fpath
# Deprecated keys
if "username" in content_json:
content_json.pop("username")
if "apiKey" in content_json:
content_json.pop("apiKey")
file = open(fpath, 'w')
file.write(json.dumps(data))
file.close()
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(content_json, indent=4))
def _clear_credentials(event=False):
if event:
fpath = event_fpath
else:
fpath = action_fpath
open(fpath, 'w').close()
_set_env(None, None)
def clear_credentials(ft_user=None, ftrack_server=None, user=None):
if not ft_user:
ft_user = os.environ.get("FTRACK_API_USER")
if not ft_user:
return
hostname = get_ftrack_hostname(ftrack_server)
if not user:
user = get_user()
with open(CREDENTIALS_PATH, "r") as file:
content = file.read()
content_json = json.loads(content or "{}")
if hostname not in content_json:
content_json[hostname] = {}
content_json[hostname].pop(user, None)
with open(CREDENTIALS_PATH, "w") as file:
file.write(json.dumps(content_json))
def _set_env(username, apiKey):
if not username:
username = ''
if not apiKey:
apiKey = ''
os.environ['FTRACK_API_USER'] = username
os.environ['FTRACK_API_KEY'] = apiKey
def set_env(ft_user=None, ft_api_key=None):
os.environ["FTRACK_API_USER"] = ft_user or ""
os.environ["FTRACK_API_KEY"] = ft_api_key or ""
def _check_credentials(username=None, apiKey=None):
def get_env_credentials():
return (
os.environ.get("FTRACK_API_USER"),
os.environ.get("FTRACK_API_KEY")
)
if username and apiKey:
_set_env(username, apiKey)
def check_credentials(ft_user, ft_api_key, ftrack_server=None):
if not ftrack_server:
ftrack_server = os.environ["FTRACK_SERVER"]
if not ft_user or not ft_api_key:
return False
try:
session = ftrack_api.Session()
session = ftrack_api.Session(
server_url=ftrack_server,
api_key=ft_api_key,
api_user=ft_user
)
session.close()
except Exception as e:
except Exception:
return False
return True

View file

@ -49,7 +49,7 @@ class BaseHandler(object):
).format(
str(type(session)),
str(ftrack_api.session.Session),
str(session_processor.ProcessSession)
str(SocketSession)
))
self._session = session

135
pype/ftrack/lib/lib.py Normal file
View file

@ -0,0 +1,135 @@
from bson.objectid import ObjectId
from .avalon_sync import CustAttrIdKey
import avalon.io
def get_project_from_entity(entity):
# TODO add more entities
ent_type_lowered = entity.entity_type.lower()
if ent_type_lowered == "project":
return entity
elif ent_type_lowered == "assetversion":
return entity["asset"]["parent"]["project"]
elif "project" in entity:
return entity["project"]
return None
def get_avalon_entities_for_assetversion(asset_version, db_con=None):
output = {
"success": True,
"message": None,
"project": None,
"project_name": None,
"asset": None,
"asset_name": None,
"asset_path": None,
"subset": None,
"subset_name": None,
"version": None,
"version_name": None,
"representations": None
}
if db_con is None:
db_con = avalon.io
db_con.install()
ft_asset = asset_version["asset"]
subset_name = ft_asset["name"]
version = asset_version["version"]
parent = ft_asset["parent"]
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
project = get_project_from_entity(asset_version)
project_name = project["full_name"]
output["project_name"] = project_name
output["asset_name"] = parent["name"]
output["asset_path"] = ent_path
output["subset_name"] = subset_name
output["version_name"] = version
db_con.Session["AVALON_PROJECT"] = project_name
avalon_project = db_con.find_one({"type": "project"})
output["project"] = avalon_project
if not avalon_project:
output["success"] = False
output["message"] = "Project not synchronized to avalon `{}`".format(
project_name
)
return output
asset_ent = None
asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if asset_mongo_id:
try:
asset_mongo_id = ObjectId(asset_mongo_id)
asset_ent = db_con.find_one({
"type": "asset",
"_id": asset_mongo_id
})
except Exception:
pass
if not asset_ent:
asset_ent = db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
output["asset"] = asset_ent
if not asset_ent:
output["success"] = False
output["message"] = "Not synchronized entity to avalon `{}`".format(
ent_path
)
return output
asset_mongo_id = asset_ent["_id"]
subset_ent = db_con.find_one({
"type": "subset",
"parent": asset_mongo_id,
"name": subset_name
})
output["subset"] = subset_ent
if not subset_ent:
output["success"] = False
output["message"] = (
"Subset `{}` does not exist under Asset `{}`"
).format(subset_name, ent_path)
return output
version_ent = db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
output["version"] = version_ent
if not version_ent:
output["success"] = False
output["message"] = (
"Version `{}` does not exist under Subset `{}` | Asset `{}`"
).format(version, subset_name, ent_path)
return output
repre_ents = list(db_con.find({
"type": "representation",
"parent": version_ent["_id"]
}))
output["representations"] = repre_ents
return output

View file

@ -34,29 +34,28 @@ class FtrackModule:
def validate(self):
validation = False
cred = credentials._get_credentials()
try:
if 'username' in cred and 'apiKey' in cred:
validation = credentials._check_credentials(
cred['username'],
cred['apiKey']
)
if validation is False:
self.show_login_widget()
else:
self.show_login_widget()
except Exception as e:
log.error("We are unable to connect to Ftrack: {0}".format(e))
validation = credentials._check_credentials()
if validation is True:
cred = credentials.get_credentials()
ft_user = cred.get("username")
ft_api_key = cred.get("api_key")
validation = credentials.check_credentials(ft_user, ft_api_key)
if validation:
credentials.set_env(ft_user, ft_api_key)
log.info("Connected to Ftrack successfully")
self.loginChange()
else:
log.warning("Please sign in to Ftrack")
self.bool_logged = False
self.set_menu_visibility()
return validation
if not validation and ft_user and ft_api_key:
log.warning(
"Current Ftrack credentials are not valid. {}: {} - {}".format(
str(os.environ.get("FTRACK_SERVER")), ft_user, ft_api_key
)
)
log.info("Please sign in to Ftrack")
self.bool_logged = False
self.show_login_widget()
self.set_menu_visibility()
return validation
@ -67,7 +66,7 @@ class FtrackModule:
self.start_action_server()
def logout(self):
credentials._clear_credentials()
credentials.clear_credentials()
self.stop_action_server()
log.info("Logged out of Ftrack")
@ -307,11 +306,23 @@ class FtrackModule:
except Exception as e:
log.error("During Killing Timer event server: {0}".format(e))
def changed_user(self):
self.stop_action_server()
credentials.set_env()
self.validate()
def process_modules(self, modules):
if 'TimersManager' in modules:
self.timer_manager = modules['TimersManager']
self.timer_manager.add_module(self)
if "UserModule" in modules:
credentials.USER_GETTER = modules["UserModule"].get_user
modules["UserModule"].register_callback_on_user_change(
self.changed_user
)
def start_timer_manager(self, data):
if self.thread_timer is not None:
self.thread_timer.ftrack_start_timer(data)
@ -336,7 +347,7 @@ class FtrackEventsThread(QtCore.QThread):
def __init__(self, parent):
super(FtrackEventsThread, self).__init__()
cred = credentials._get_credentials()
cred = credentials.get_credentials()
self.username = cred['username']
self.user = None
self.last_task = None

View file

@ -204,11 +204,11 @@ class Login_Dialog_ui(QtWidgets.QWidget):
self.setError("{0} {1}".format(msg, " and ".join(missing)))
return
verification = credentials._check_credentials(username, apiKey)
verification = credentials.check_credentials(username, apiKey)
if verification:
credentials._save_credentials(username, apiKey, self.is_event)
credentials._set_env(username, apiKey)
credentials.save_credentials(username, apiKey, self.is_event)
credentials.set_env(username, apiKey)
if self.parent is not None:
self.parent.loginChange()
self._close_widget()
@ -304,11 +304,11 @@ class Login_Dialog_ui(QtWidgets.QWidget):
self._login_server_thread.start(url)
return
verification = credentials._check_credentials(username, apiKey)
verification = credentials.check_credentials(username, apiKey)
if verification is True:
credentials._save_credentials(username, apiKey, self.is_event)
credentials._set_env(username, apiKey)
credentials.save_credentials(username, apiKey, self.is_event)
credentials.set_env(username, apiKey)
if self.parent is not None:
self.parent.loginChange()
self._close_widget()

View file

@ -417,23 +417,7 @@ def _get_host_name():
def get_asset(asset_name=None):
entity_data_keys_from_project_when_miss = [
"frameStart", "frameEnd", "handleStart", "handleEnd", "fps",
"resolutionWidth", "resolutionHeight"
]
entity_keys_from_project_when_miss = []
alternatives = {
"handleStart": "handles",
"handleEnd": "handles"
}
defaults = {
"handleStart": 0,
"handleEnd": 0
}
""" Returning asset document from database """
if not asset_name:
asset_name = avalon.api.Session["AVALON_ASSET"]
@ -441,57 +425,10 @@ def get_asset(asset_name=None):
"name": asset_name,
"type": "asset"
})
if not asset_document:
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
project_document = io.find_one({"type": "project"})
for key in entity_data_keys_from_project_when_miss:
if asset_document["data"].get(key):
continue
value = project_document["data"].get(key)
if value is not None or key not in alternatives:
asset_document["data"][key] = value
continue
alt_key = alternatives[key]
value = asset_document["data"].get(alt_key)
if value is not None:
asset_document["data"][key] = value
continue
value = project_document["data"].get(alt_key)
if value:
asset_document["data"][key] = value
continue
if key in defaults:
asset_document["data"][key] = defaults[key]
for key in entity_keys_from_project_when_miss:
if asset_document.get(key):
continue
value = project_document.get(key)
if value is not None or key not in alternatives:
asset_document[key] = value
continue
alt_key = alternatives[key]
value = asset_document.get(alt_key)
if value:
asset_document[key] = value
continue
value = project_document.get(alt_key)
if value:
asset_document[key] = value
continue
if key in defaults:
asset_document[key] = defaults[key]
return asset_document

View file

@ -196,7 +196,7 @@ def format_anatomy(data):
"root": api.Session["AVALON_PROJECTS"],
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
"task": api.Session["AVALON_TASK"].lower(),
"task": api.Session["AVALON_TASK"],
"family": data["avalon"]["family"],
"project": {"name": project_document["name"],
"code": project_document["data"].get("code", '')},
@ -519,11 +519,6 @@ class WorkfileSettings(object):
self.data = kwargs
def get_nodes(self, nodes=None, nodes_filter=None):
# filter out only dictionaries for node creation
#
# print("\n\n")
# pprint(self._nodes)
#
if not isinstance(nodes, list) and not isinstance(nodes_filter, list):
return [n for n in nuke.allNodes()]
@ -791,6 +786,8 @@ class WorkfileSettings(object):
return
data = self._asset_entity["data"]
log.debug("__ asset data: `{}`".format(data))
missing_cols = []
check_cols = ["fps", "frameStart", "frameEnd",
"handleStart", "handleEnd"]
@ -1070,7 +1067,7 @@ class BuildWorkfile(WorkfileSettings):
"project": {"name": self._project["name"],
"code": self._project["data"].get("code", '')},
"asset": self._asset or os.environ["AVALON_ASSET"],
"task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(),
"task": kwargs.get("task") or api.Session["AVALON_TASK"],
"hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
"version": kwargs.get("version", {}).get("name", 1),
"user": getpass.getuser(),

View file

@ -18,7 +18,7 @@ def open(filepath):
class Openfile(api.Loader):
"""Open Image Sequence with system default"""
families = ["write"]
families = ["render2d"]
representations = ["*"]
label = "Open"

View file

@ -1,10 +1,24 @@
"""
"""Collect Anatomy and global anatomy data.
Requires:
None
session -> AVALON_TASK
projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder)
username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001)
datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder)
Optional:
comment -> collect_comment *(pyblish.api.CollectorOrder)
intent -> collected in pyblish-lite
Provides:
context -> anatomy (pypeapp.Anatomy)
context -> anatomyData
"""
import os
import json
from avalon import io, api, lib
from pypeapp import Anatomy
import pyblish.api
@ -12,9 +26,52 @@ import pyblish.api
class CollectAnatomy(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder + 0.002
label = "Collect Anatomy"
def process(self, context):
context.data['anatomy'] = Anatomy()
self.log.info("Anatomy templates collected...")
root_path = api.registered_root()
task_name = api.Session["AVALON_TASK"]
project_entity = context.data["projectEntity"]
asset_entity = context.data["assetEntity"]
project_name = project_entity["name"]
context.data["anatomy"] = Anatomy(project_name)
self.log.info(
"Anatomy object collected for project \"{}\".".format(project_name)
)
hierarchy_items = asset_entity["data"]["parents"]
hierarchy = ""
if hierarchy_items:
hierarchy = os.path.join(*hierarchy_items)
context_data = {
"root": root_path,
"project": {
"name": project_name,
"code": project_entity["data"].get("code")
},
"asset": asset_entity["name"],
"hierarchy": hierarchy.replace("\\", "/"),
"task": task_name,
"username": context.data["user"]
}
avalon_app_name = os.environ.get("AVALON_APP_NAME")
if avalon_app_name:
application_def = lib.get_application(avalon_app_name)
app_dir = application_def.get("application_dir")
if app_dir:
context_data["app"] = app_dir
datetime_data = context.data.get("datetimeData") or {}
context_data.update(datetime_data)
context.data["anatomyData"] = context_data
self.log.info("Global anatomy Data collected")
self.log.debug(json.dumps(context_data, indent=4))

View file

@ -0,0 +1,46 @@
"""Collect Anatomy and global anatomy data.
Requires:
session -> AVALON_PROJECT, AVALON_ASSET
Provides:
context -> projectEntity - project entity from database
context -> assetEntity - asset entity from database
"""
from avalon import io, api
import pyblish.api
class CollectAvalonEntities(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
label = "Collect Avalon Entities"
def process(self, context):
project_name = api.Session["AVALON_PROJECT"]
asset_name = api.Session["AVALON_ASSET"]
project_entity = io.find_one({
"type": "project",
"name": project_name
})
assert project_entity, (
"Project '{0}' was not found."
).format(project_name)
self.log.debug("Collected Project entity \"{}\"".format(project_entity))
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name '{0}' in project '{1}'"
).format(asset_name, project_name)
self.log.debug("Collected Asset entity \"{}\"".format(asset_entity))
context.data["projectEntity"] = project_entity
context.data["assetEntity"] = asset_entity

View file

@ -211,12 +211,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
# Get family from the data
families = data.get("families", ["render"])
if "render" not in families:
families.append("render")
if "ftrack" not in families:
families.append("ftrack")
if "write" in instance_family:
families.append("write")
if families_data and "render2d" in families_data:
families.append("render2d")
if families_data and "slate" in families_data:
families.append("slate")
@ -334,7 +332,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"] if not baked_mov_path else [],
"tags": ["review"] if not baked_mov_path else ["thumb-nuke"],
}
instance.data["representations"].append(
representation)
@ -388,8 +386,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
start = data.get("frameStart", indices[0])
end = data.get("frameEnd", indices[-1])
start = int(data.get("frameStart", indices[0]))
end = int(data.get("frameEnd", indices[-1]))
ext = list(collection)[0].split(".")[-1]

View file

@ -0,0 +1,124 @@
"""
Requires:
context -> anatomyData
context -> projectEntity
context -> assetEntity
instance -> asset
instance -> subset
instance -> family
Optional:
instance -> version
instance -> resolutionWidth
instance -> resolutionHeight
instance -> fps
Provides:
instance -> projectEntity
instance -> assetEntity
instance -> anatomyData
instance -> version
instance -> latestVersion
"""
import copy
import json
from avalon import io
import pyblish.api
class CollectInstanceAnatomyData(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
order = pyblish.api.CollectorOrder + 0.49
label = "Collect instance anatomy data"
hosts = ["maya", "nuke", "standalonepublisher"]
def process(self, instance):
# get all the stuff from the database
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
asset_name = instance.data["asset"]
# Check if asset name is the same as what is in context
# - they may be different, e.g. in NukeStudio
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
# If version is not specified for instance or context
if version_number is None:
# TODO we should be able to change default version by studio
# preferences (like start with version number `0`)
version_number = 1
# use latest version (+1) if already any exist
if latest_version is not None:
version_number += int(latest_version)
anatomy_updates = {
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number
}
task_name = instance.data.get("task")
if task_name:
anatomy_updates["task"] = task_name
# Version should not be collected since may be instance
anatomy_data.update(anatomy_updates)
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = fps
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
# TODO should be version number set here?
instance.data["version"] = version_number
self.log.info("Instance anatomy Data collected")
self.log.debug(json.dumps(anatomy_data, indent=4))

View file

@ -1,24 +0,0 @@
"""
Requires:
None
Provides:
context -> projectData
"""
import pyblish.api
import pype.api as pype
class CollectProjectData(pyblish.api.ContextPlugin):
"""Collecting project data from avalon db"""
label = "Collect Project Data"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["nukestudio"]
def process(self, context):
# get project data from avalon db
context.data["projectData"] = pype.get_project()["data"]
return

View file

@ -0,0 +1,60 @@
"""
Requires:
context -> anatomy
context -> anatomyData
Provides:
instance -> publishDir
instance -> resourcesDir
"""
import os
import copy
import pyblish.api
from avalon import api
class CollectResourcesPath(pyblish.api.InstancePlugin):
"""Generate directory path where the files and resources will be stored"""
label = "Collect Resources Path"
order = pyblish.api.CollectorOrder + 0.495
def process(self, instance):
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
self.log.debug("publishDir: \"{}\"".format(publish_folder))
self.log.debug("resourcesDir: \"{}\"".format(resources_folder))

View file

@ -1,119 +0,0 @@
"""
Requires:
session -> AVALON_PROJECT
context -> anatomy (pypeapp.Anatomy)
instance -> subset
instance -> asset
instance -> family
Provides:
instance -> template
instance -> assumedTemplateData
instance -> assumedDestination
"""
import os
from avalon import io, api
import pyblish.api
class CollectTemplates(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect and fill Templates"
hosts = ["maya", "nuke", "standalonepublisher"]
def process(self, instance):
# get all the stuff from the database
subset_name = instance.data["subset"]
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += int(version["name"])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*hierarchy)
else:
hierarchy = ""
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"version": version_number,
"hierarchy": hierarchy.replace("\\", "/"),
"representation": "TEMP"}
# Add datetime data to template data
datetime_data = instance.context.data.get("datetimeData") or {}
template_data.update(datetime_data)
resolution_width = instance.data.get("resolutionWidth")
resolution_height = instance.data.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
instance.data["template"] = template
instance.data["assumedTemplateData"] = template_data
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
(anatomy.format(template_data))["publish"]["path"]
)
self.log.info("Assumed Destination has been created...")
self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"]))
self.log.debug("__ template: `{}`".format(instance.data["template"]))

View file

@ -32,21 +32,15 @@ class ExtractBurnin(pype.api.Extractor):
frame_end = int(instance.data.get("frameEnd") or 1)
duration = frame_end - frame_start + 1
prep_data = {
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
prep_data = copy.deepcopy(instance.data["anatomyData"])
prep_data.update({
"frame_start": frame_start,
"frame_end": frame_end,
"duration": duration,
"version": int(version),
"comment": instance.context.data.get("comment", ""),
"intent": instance.context.data.get("intent", "")
}
# Add datetime data to preparation data
datetime_data = instance.context.data.get("datetimeData") or {}
prep_data.update(datetime_data)
})
slate_frame_start = frame_start
slate_frame_end = frame_end
@ -64,10 +58,6 @@ class ExtractBurnin(pype.api.Extractor):
"slate_duration": slate_duration
})
# Update data with template data
template_data = instance.data.get("assumedTemplateData") or {}
prep_data.update(template_data)
# get anatomy project
anatomy = instance.context.data['anatomy']

View file

@ -20,7 +20,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
label = "Extract Jpeg EXR"
hosts = ["shell"]
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
families = ["imagesequence", "render", "render2d", "source"]
enabled = False
def process(self, instance):
@ -42,8 +42,12 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
for repre in representations:
self.log.debug(repre)
if 'review' not in repre['tags']:
return
valid = 'review' in repre['tags'] or "thumb-nuke" in repre['tags']
if not valid:
continue
if not isinstance(repre['files'], list):
continue
input_file = repre['files'][0]
@ -69,7 +73,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
jpeg_items = []
jpeg_items.append(ffmpeg_path)
jpeg_items.append(
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov

View file

@ -1,417 +0,0 @@
import os
import logging
import shutil
import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
log = logging.getLogger(__name__)
class IntegrateAsset(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
families = []
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
# Ensure at least one file is set up for transfer in staging dir.
files = instance.data.get("files", [])
assert files, "Instance has no files to transfer"
assert isinstance(files, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(files)
)
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
collection = files
# Assert that each member has identical suffix
_, ext = os.path.splitext(collection[0])
assert all(ext == os.path.splitext(name)[1]
for name in collection), (
"Files had varying suffixes, this is a bug"
)
assert not any(os.path.isabs(name) for name in collection)
template_data["representation"] = ext[1:]
for fname in collection:
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = files
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': dst, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
'task': api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": version["name"],
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data.get("transfers", list())
for src, dest in transfers:
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
# Produce hardlinked copies
# Note: hardlink can only be produced between two files on the same
# server/disk and editing one of the two will edit both files at once.
# As such it is recommended to only make hardlinks between static files
# to ensure publishes remain safe and non-edited.
hardlinks = instance.data.get("hardlinks", list())
for src, dest in hardlinks:
self.log.info("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "avalon-core:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "avalon-core:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
self.log.debug("Registered root: {}".format(api.registered_root()))
# create relative source path for DB
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
self.log.debug("Source: {}".format(source))
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
# Include optional data if present in
optionals = [
"frameStart", "frameEnd", "step", "handles", "sourceHashes"
]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data[key]
return version_data

View file

@ -1,147 +0,0 @@
import pyblish.api
import os
from avalon import io, api
class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
"""Generate the assumed destination path where the file will be stored"""
label = "Integrate Assumed Destination"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
def process(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
template_data = instance.data["assumedTemplateData"]
# self.log.info(anatomy.templates)
anatomy_filled = anatomy.format(template_data)
# self.log.info(anatomy_filled)
mock_template = anatomy_filled["publish"]["path"]
# For now assume resources end up in a "resources" folder in the
# published folder
mock_destination = os.path.join(os.path.dirname(mock_template),
"resources")
# Clean the path
mock_destination = os.path.abspath(
os.path.normpath(mock_destination)).replace("\\", "/")
# Define resource destination and transfers
resources = instance.data.get("resources", list())
transfers = instance.data.get("transfers", list())
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(
resource["source"]).replace("\\", "/")
destination = os.path.join(mock_destination, source_filename)
# Force forward slashes to fix issue with software unable
# to work correctly with backslashes in specific scenarios
# (e.g. escape characters in PLN-151 V-Ray UDIM)
destination = destination.replace("\\", "/")
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(
mock_destination, fname).replace("\\", "/")
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
padding = int(a_template['render']['padding'])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template

View file

@ -2,6 +2,7 @@ import os
from os.path import getsize
import logging
import sys
import copy
import clique
import errno
import pyblish.api
@ -100,144 +101,104 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
TASK = instance.data.get("task") or api.Session["AVALON_TASK"]
LOCATION = api.Session["AVALON_LOCATION"]
anatomy_data = instance.data["anatomyData"]
io.install()
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
# for result in context.data["results"]:
# if not result["success"]:
# self.log.debug(result)
# exc_type, exc_value, exc_traceback = result["error_info"]
# extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
# self.log.debug(
# "Error at line {}: \"{}\"".format(
# extracted_traceback[1], result["error"]
# )
# )
# assert all(result["success"] for result in context.data["results"]),(
# "Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
project_entity = instance.data["projectEntity"]
asset_name = instance.data["asset"]
asset_entity = instance.data.get("assetEntity")
if not asset_entity:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name \"{0}\" in project \"{1}\""
).format(asset_name, project_entity["name"])
instance.data["assetEntity"] = asset_entity
# update anatomy data with asset specific keys
# - name should already been set
hierarchy = ""
parents = asset_entity["data"]["parents"]
if parents:
hierarchy = "/".join(parents)
anatomy_data["hierarchy"] = hierarchy
task_name = instance.data.get("task")
if task_name:
anatomy_data["task"] = task_name
stagingdir = instance.data.get("stagingDir")
if not stagingdir:
self.log.info('''{} is missing reference to staging
directory Will try to get it from
representation'''.format(instance))
self.log.info((
"{0} is missing reference to staging directory."
" Will try to get it from representation."
).format(instance))
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
else:
self.log.debug(
"Establishing staging directory @ {0}".format(stagingdir)
)
# Ensure at least one file is set up for transfer in staging dir.
repres = instance.data.get("representations", None)
repres = instance.data.get("representations")
assert repres, "Instance has no files to transfer"
assert isinstance(repres, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(repres)
"Instance 'files' must be a list, got: {0} {1}".format(
str(type(repres)), str(repres)
)
)
# FIXME: io is not initialized at this point for shell host
io.install()
project = io.find_one({"type": "project"})
subset = self.get_subset(asset_entity, instance)
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_number = instance.data["version"]
self.log.debug("Next version: v{}".format(version_number))
version_data = self.create_version_data(context, instance)
version_data_instance = instance.data.get('versionData')
if version_data_instance:
version_data.update(version_data_instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
# TODO rename method from `create_version` to
# `prepare_version` or similar...
version = self.create_version(
subset=subset,
version_number=version_number,
data=version_data
)
self.log.debug("Creating version ...")
existing_version = io.find_one({
'type': 'version',
'parent': subset["_id"],
'name': next_version
'name': version_number
})
if existing_version is None:
version_id = io.insert_one(version).inserted_id
else:
# TODO query by _id and
# remove old version and representations but keep their ids
io.update_many({
'type': 'version',
'parent': subset["_id"],
'name': next_version
'name': version_number
}, {'$set': version}
)
version_id = existing_version['_id']
instance.data['version'] = version['name']
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
intent = context.data.get("intent")
if intent is not None:
anatomy_data["intent"] = intent
anatomy = instance.context.data['anatomy']
@ -250,31 +211,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data['transfers'] = []
for idx, repre in enumerate(instance.data["representations"]):
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
# create template data for Anatomy
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"task": TASK,
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# Add datetime data to template data
datetime_data = context.data.get("datetimeData") or {}
template_data.update(datetime_data)
template_data = copy.deepcopy(anatomy_data)
if intent is not None:
template_data["intent"] = intent
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
@ -292,6 +232,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
stagingdir = repre['stagingDir']
if repre.get('anatomy_template'):
template_name = repre['anatomy_template']
template = os.path.normpath(
anatomy.templates[template_name]["path"])
@ -322,7 +263,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template_filled = anatomy_filled[template_name]["path"]
if repre_context is None:
repre_context = template_filled.used_values
test_dest_files.append(
os.path.normpath(template_filled)
)
@ -342,12 +282,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
if "slate" in instance.data["families"]:
if index_frame_start and "slate" in instance.data["families"]:
index_frame_start -= 1
dst_padding_exp = src_padding_exp
dst_start_frame = None
for i in src_collection.indexes:
# TODO 1.) do not count padding in each index iteration
# 2.) do not count dst_padding from src_padding before
# index_frame_start check
src_padding = src_padding_exp % i
src_file_name = "{0}{1}{2}".format(
@ -375,7 +318,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if not dst_start_frame:
dst_start_frame = dst_padding
dst = "{0}{1}{2}".format(
dst_head,
dst_start_frame,
@ -547,14 +489,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset_name = instance.data["subset"]
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
"name": subset_name
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
self.log.debug("families. %s" % instance.data.get('families'))
self.log.debug(
@ -583,26 +525,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
return subset
def create_version(self, subset, version_number, locations, data=None):
def create_version(self, subset, version_number, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-3.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
@ -645,6 +582,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"fps": context.data.get(
"fps", instance.data.get("fps"))}
intent = context.data.get("intent")
if intent is not None:
version_data["intent"] = intent
# Include optional data if present in
optionals = [
"frameStart", "frameEnd", "step", "handles",

View file

@ -1,423 +0,0 @@
import os
import logging
import shutil
import clique
import errno
import pyblish.api
from avalon import api, io
log = logging.getLogger(__name__)
class IntegrateFrames(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Frames"
order = pyblish.api.IntegratorOrder
families = ["imagesequence"]
family_targets = [".frames", ".local", ".review", "imagesequence", "render", "source"]
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
families = [f for f in instance.data["families"]
for search in self.family_targets
if search in f]
if not families:
return
self.register(instance)
# self.log.info("Integrating Asset in to the database ...")
# self.log.info("instance.data: {}".format(instance.data))
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({"type": 'asset', "name": ASSET})[
'data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"task": api.Session["AVALON_TASK"],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
src_collections, remainder = clique.assemble(files)
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = ext = src_collection.format("{tail}")
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = src_tail[1:]
template_data["frame"] = src_collection.format(
"{padding}") % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(anatomy_filled["render"]["path"])
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
for i in src_collection.indexes:
src_padding = src_collection.format("{padding}") % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = dst_collection.format("{padding}") % i
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
src = os.path.join(stagingdir, src_file_name)
instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
template_data.pop("frame", None)
fname = files
self.log.info("fname: {}".format(fname))
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["render"]["path"]
instance.data["transfers"].append([src, dst])
if ext[1:] not in ["jpeg", "jpg", "mov", "mp4", "wav"]:
template_data["frame"] = "#" * int(anatomy_filled["render"]["padding"])
anatomy_filled = anatomy.format(template_data)
path_to_save = anatomy_filled["render"]["path"]
template = anatomy.templates["render"]["path"]
self.log.debug("path_to_save: {}".format(path_to_save))
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': path_to_save, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {
"name": PROJECT,
"code": project['data']['code']
},
"task": api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data["transfers"]
for src, dest in transfers:
src = os.path.normpath(src)
dest = os.path.normpath(dest)
if src in dest:
continue
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def get_subset(self, asset, instance):
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "pype:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["frameStart", "frameEnd", "step",
"handles", "colorspace", "fps", "outputDir"]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data.get(key, None)
return version_data

View file

@ -0,0 +1,49 @@
import os
import pyblish.api
class IntegrateResourcesPath(pyblish.api.InstancePlugin):
"""Generate directory path where the files and resources will be stored"""
label = "Integrate Resources Path"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
def process(self, instance):
resources = instance.data.get("resources") or []
transfers = instance.data.get("transfers") or []
if not resources and not transfers:
self.log.debug(
"Instance does not have `resources` and `transfers`"
)
return
resources_folder = instance.data["resourcesDir"]
# Define resource destination and transfers
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(
resource["source"]).replace("\\", "/")
destination = os.path.join(resources_folder, source_filename)
# Force forward slashes to fix issue with software unable
# to work correctly with backslashes in specific scenarios
# (e.g. escape characters in PLN-151 V-Ray UDIM)
destination = destination.replace("\\", "/")
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(
resources_folder, fname
).replace("\\", "/")
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers

View file

@ -166,6 +166,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"PYPE_STUDIO_PROJECTS_PATH",
"PYPE_STUDIO_PROJECTS_MOUNT"
]
deadline_pool = ""
def _submit_deadline_post_job(self, instance, job):
"""
@ -201,7 +203,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"JobDependency0": job["_id"],
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
"Priority": job["Props"]["Pri"]
"Priority": job["Props"]["Pri"],
"Pool": self.deadline_pool
},
"PluginInfo": {
"Version": "3.6",

View file

@ -1,43 +0,0 @@
import pyblish.api
import os
class ValidateTemplates(pyblish.api.ContextPlugin):
"""Check if all templates were filled"""
label = "Validate Templates"
order = pyblish.api.ValidatorOrder - 0.1
hosts = ["maya", "houdini", "nuke"]
def process(self, context):
anatomy = context.data["anatomy"]
if not anatomy:
raise RuntimeError("Did not find anatomy")
else:
data = {
"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
"project": {"name": "D001_projectsx",
"code": "prjX"},
"ext": "exr",
"version": 3,
"task": "animation",
"asset": "sh001",
"app": "maya",
"hierarchy": "ep101/sq01/sh010"}
anatomy_filled = anatomy.format(data)
self.log.info(anatomy_filled)
data = {"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
"project": {"name": "D001_projectsy",
"code": "prjY"},
"ext": "abc",
"version": 1,
"task": "lookdev",
"asset": "bob",
"app": "maya",
"hierarchy": "ep101/sq01/bob"}
anatomy_filled = context.data["anatomy"].format(data)
self.log.info(anatomy_filled["work"]["folder"])

View file

@ -35,7 +35,7 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
"subset": subset,
"asset": os.getenv("AVALON_ASSET", None),
"label": subset,
"publish": False,
"publish": True,
"family": 'workfile',
"families": ['workfile'],
"setMembers": [current_file]

View file

@ -1,6 +1,7 @@
import os
import sys
import json
import copy
import tempfile
import contextlib
import subprocess
@ -330,10 +331,9 @@ class ExtractLook(pype.api.Extractor):
maya_path))
def resource_destination(self, instance, filepath, do_maketx):
anatomy = instance.context.data["anatomy"]
self.create_destination_template(instance, anatomy)
resources_dir = instance.data["resourcesDir"]
# Compute destination location
basename, ext = os.path.splitext(os.path.basename(filepath))
@ -343,7 +343,7 @@ class ExtractLook(pype.api.Extractor):
ext = ".tx"
return os.path.join(
instance.data["assumedDestination"], "resources", basename + ext
resources_dir, basename + ext
)
def _process_texture(self, filepath, do_maketx, staging, linearise, force):
@ -407,97 +407,3 @@ class ExtractLook(pype.api.Extractor):
return converted, COPY, texture_hash
return filepath, COPY, texture_hash
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template["publish"]["path"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'").format(asset_name, project_name)
silo = asset.get("silo")
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get("version"):
version_number = int(instance.data.get("version"))
padding = int(a_template["render"]["padding"])
hierarchy = asset["data"]["parents"]
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {
"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name, "code": project["data"]["code"]},
"silo": silo,
"family": instance.data["family"],
"asset": asset_name,
"subset": subset_name,
"frame": ("#" * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP",
}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)

View file

@ -110,15 +110,7 @@ class ExtractYetiRig(pype.api.Extractor):
self.log.info("Writing metadata file")
# Create assumed destination folder for imageSearchPath
assumed_temp_data = instance.data["assumedTemplateData"]
template = instance.data["template"]
template_formatted = template.format(**assumed_temp_data)
destination_folder = os.path.dirname(template_formatted)
image_search_path = os.path.join(destination_folder, "resources")
image_search_path = os.path.normpath(image_search_path)
image_search_path = resources_dir = instance.data["resourcesDir"]
settings = instance.data.get("rigsettings", None)
if settings:

View file

@ -2,6 +2,7 @@ from avalon.nuke.pipeline import Creator
from avalon.nuke import lib as anlib
import nuke
class CreateBackdrop(Creator):
"""Add Publishable Backdrop"""
@ -35,8 +36,8 @@ class CreateBackdrop(Creator):
return instance
else:
msg = "Please select nodes you "
"wish to add to a container"
msg = str("Please select nodes you "
"wish to add to a container")
self.log.error(msg)
nuke.message(msg)
return

View file

@ -240,77 +240,6 @@ class LoadBackdropNodes(api.Loader):
return update_container(GN, data_imprint)
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
if not (len(nodes) < 2):
msg = "Please create Viewer node before you "
"run this action again"
self.log.error(msg)
nuke.message(msg)
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
pnlib.create_backdrop(label="Input Process", layer=2,
nodes=[viewer, group_node], color="0x7c7faaff")
return True
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)

View file

@ -176,8 +176,8 @@ class LoadGizmoInputProcess(api.Loader):
if len(viewer) > 0:
viewer = viewer[0]
else:
msg = "Please create Viewer node before you "
"run this action again"
msg = str("Please create Viewer node before you "
"run this action again")
self.log.error(msg)
nuke.message(msg)
return None

View file

@ -276,8 +276,8 @@ class LoadLutsInputProcess(api.Loader):
if len(viewer) > 0:
viewer = viewer[0]
else:
msg = "Please create Viewer node before you "
"run this action again"
msg = str("Please create Viewer node before you "
"run this action again")
self.log.error(msg)
nuke.message(msg)
return None

View file

@ -58,7 +58,11 @@ class CollectBackdrops(pyblish.api.InstancePlugin):
last_frame = int(nuke.root()["last_frame"].getValue())
# get version
version = pype.get_version_from_path(nuke.root().name())
version = instance.context.data.get('version')
if not version:
raise RuntimeError("Script name has no version in the name.")
instance.data['version'] = version
# Add version data to instance

View file

@ -52,9 +52,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_dir = os.path.dirname(path)
self.log.debug('output dir: {}'.format(output_dir))
# get version to instance for integration
instance.data['version'] = instance.context.data.get(
"version", pype.get_version_from_path(nuke.root().name()))
# # get version to instance for integration
# instance.data['version'] = instance.context.data.get(
# "version", pype.get_version_from_path(nuke.root().name()))
self.log.debug('Write Version: %s' % instance.data('version'))
@ -92,16 +92,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
"handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"version": int(instance.data['version']),
"colorspace": node["colorspace"].value(),
"families": ["render"],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data["family"] = "write"

View file

@ -17,7 +17,7 @@ class CollectClips(api.ContextPlugin):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
projectdata = context.data["projectData"]
projectdata = context.data["projectEntity"]["data"]
version = context.data.get("version", "001")
sequence = context.data.get("activeSequence")
selection = context.data.get("selection")

View file

@ -2,6 +2,7 @@
import os
import json
import re
import copy
import pyblish.api
import tempfile
from avalon import io, api
@ -75,9 +76,11 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
)
data["source"] = data["sourcePath"]
# WARNING instance should not be created in Extractor!
# create new instance
instance = instance.context.create_instance(**data)
# TODO replace line below with `instance.data["resourcesDir"]`
# when instance is created during collection part
dst_dir = self.resource_destination_dir(instance)
# change paths in effects to files
@ -144,103 +147,109 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
return (v, dst)
def resource_destination_dir(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
# WARNING this is from `collect_instance_anatomy_data.py`
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
return os.path.join(
instance.data["assumedDestination"],
"resources"
)
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
if version_number is None:
version_number = 1
if latest_version is not None:
version_number += int(latest_version)
anatomy_data.update({
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = fps
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
instance.data["version"] = version_number
# WARNING this is from `collect_resources_path.py`
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
anatomy_filled = anatomy.format(template_data)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
padding = int(a_template['render']['padding'])
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)
return resources_folder

View file

@ -1,4 +1,6 @@
import os
import sys
import re
import datetime
import subprocess
import json
@ -24,6 +26,20 @@ FFPROBE = (
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
).format(ffprobe_path)
DRAWTEXT = (
"drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor="
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'"
)
TIMECODE = (
"drawtext=timecode=\\'%(timecode)s\\':text=\\'%(text)s\\'"
":timecode_rate=%(fps).2f:x=%(x)s:y=%(y)s:fontcolor="
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'"
)
MISSING_KEY_VALUE = "N/A"
CURRENT_FRAME_KEY = "{current_frame}"
TIME_CODE_KEY = "{timecode}"
def _streams(source):
"""Reimplemented from otio burnins to be able use full path to ffprobe
@ -117,82 +133,59 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if options_init:
self.options_init.update(options_init)
def add_text(self, text, align, options=None):
def add_text(self, text, align, frame_start=None, options=None):
"""
Adding static text to a filter.
:param str text: text to apply to the drawtext
:param enum align: alignment, must use provided enum flags
:param int frame_start: starting frame for burnins current frame
:param dict options: recommended to use TextOptions
"""
if not options:
options = ffmpeg_burnins.TextOptions(**self.options_init)
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_datetime(self, date_format, align, options=None):
"""
Adding date text to a filter. Using pythons datetime module.
options = options.copy()
if frame_start:
options["frame_offset"] = frame_start
:param str date_format: format of date (e.g. `%d.%m.%Y`)
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use TextOptions
"""
if not options:
options = ffmpeg_burnins.TextOptions(**self.options_init)
today = datetime.datetime.today()
text = today.strftime(date_format)
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
self._add_burnin(text, align, options, DRAWTEXT)
def add_frame_numbers(
self, align, options=None, start_frame=None, text=None
def add_timecode(
self, align, frame_start=None, frame_start_tc=None, text=None,
options=None
):
"""
Convenience method to create the frame number expression.
:param enum align: alignment, must use provided enum flags
:param dict options: recommended to use FrameNumberOptions
"""
if not options:
options = ffmpeg_burnins.FrameNumberOptions(**self.options_init)
if start_frame:
options['frame_offset'] = start_frame
expr = r'%%{eif\:n+%d\:d}' % options['frame_offset']
_text = str(int(self.end_frame + options['frame_offset']))
if text and isinstance(text, str):
text = r"{}".format(text)
expr = text.replace("{current_frame}", expr)
text = text.replace("{current_frame}", _text)
options['expression'] = expr
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_timecode(self, align, options=None, start_frame=None):
"""
Convenience method to create the frame number expression.
:param enum align: alignment, must use provided enum flags
:param int frame_start: starting frame for burnins current frame
:param int frame_start_tc: starting frame for burnins timecode
:param str text: text that will be before timecode
:param dict options: recommended to use TimeCodeOptions
"""
if not options:
options = ffmpeg_burnins.TimeCodeOptions(**self.options_init)
if start_frame:
options['frame_offset'] = start_frame
timecode = ffmpeg_burnins._frames_to_timecode(
options['frame_offset'],
options = options.copy()
if frame_start:
options["frame_offset"] = frame_start
if not frame_start_tc:
frame_start_tc = options["frame_offset"]
if not text:
text = ""
if not options.get("fps"):
options["fps"] = self.frame_rate
options["timecode"] = ffmpeg_burnins._frames_to_timecode(
frame_start_tc,
self.frame_rate
)
options = options.copy()
if not options.get('fps'):
options['fps'] = self.frame_rate
self._add_burnin(
timecode.replace(':', r'\:'),
align,
options,
ffmpeg_burnins.TIMECODE
)
self._add_burnin(text, align, options, TIMECODE)
def _add_burnin(self, text, align, options, draw):
"""
@ -203,12 +196,20 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
"""
resolution = self.resolution
data = {
'text': options.get('expression') or text,
'text': (
text
.replace(",", r"\,")
.replace(':', r'\:')
),
'color': options['font_color'],
'size': options['font_size']
}
timecode_text = options.get("timecode") or ""
text_for_size = text + timecode_text
data.update(options)
data.update(ffmpeg_burnins._drawtext(align, resolution, text, options))
data.update(
ffmpeg_burnins._drawtext(align, resolution, text_for_size, options)
)
if 'font' in data and ffmpeg_burnins._is_windows():
data['font'] = data['font'].replace(os.sep, r'\\' + os.sep)
data['font'] = data['font'].replace(':', r'\:')
@ -261,9 +262,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
is_sequence = "%" in output
command = self.command(output=output,
args=args,
overwrite=overwrite)
command = self.command(
output=output,
args=args,
overwrite=overwrite
)
print(command)
proc = subprocess.Popen(command, shell=True)
proc.communicate()
if proc.returncode != 0:
@ -292,15 +297,13 @@ def example(input_path, output_path):
burnin.add_text('My Text', ModifiedBurnins.TOP_CENTERED)
# Datetime
burnin.add_text('%d-%m-%y', ModifiedBurnins.TOP_RIGHT)
# Frame number
burnin.add_frame_numbers(ModifiedBurnins.TOP_RIGHT, start_frame=start_frame)
# Timecode
burnin.add_timecode(ModifiedBurnins.TOP_LEFT, start_frame=start_frame)
# Start render (overwrite output file if exist)
burnin.render(output_path, overwrite=True)
def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True):
def burnins_from_data(
input_path, output_path, data, codec_data=None, overwrite=True
):
'''
This method adds burnins to video/image file based on presets setting.
Extension of output MUST be same as input. (mov -> mov, avi -> avi,...)
@ -324,47 +327,35 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
- each key of "burnins" represents Alignment, there are 6 possibilities:
TOP_LEFT TOP_CENTERED TOP_RIGHT
BOTTOM_LEFT BOTTOM_CENTERED BOTTOM_RIGHT
- value for each key is dict which should contain "function" which says
what kind of burnin is that:
"text", "timecode" or "frame_numbers"
- "text" key with content is also required when "text" function is used
- value must be string with text you want to burn-in
- text may contain specific formatting keys (exmplained below)
Requirement of *data* keys is based on presets.
- "start_frame" - is required when "timecode" or "frame_numbers" function is used
- "start_frame_tc" - when "timecode" should start with different frame
- "frame_start" - is required when "timecode" or "current_frame" ins keys
- "frame_start_tc" - when "timecode" should start with different frame
- *keys for static text*
EXAMPLE:
preset = {
"options": {*OPTIONS FOR LOOK*},
"burnins": {
"TOP_LEFT": {
"function": "text",
"text": "static_text"
},
"TOP_RIGHT": {
"function": "text",
"text": "{shot}"
},
"BOTTOM_LEFT": {
"function": "timecode"
},
"BOTTOM_RIGHT": {
"function": "frame_numbers"
}
"TOP_LEFT": "static_text",
"TOP_RIGHT": "{shot}",
"BOTTOM_LEFT": "TC: {timecode}",
"BOTTOM_RIGHT": "{frame_start}{current_frame}"
}
}
For this preset we'll need at least this data:
data = {
"start_frame": 1001,
"frame_start": 1001,
"shot": "sh0010"
}
When Timecode should start from 1 then data need:
data = {
"start_frame": 1001,
"start_frame_tc": 1,
"frame_start": 1001,
"frame_start_tc": 1,
"shot": "sh0010"
}
'''
@ -378,100 +369,90 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
stream = burnin._streams[0]
if "resolution_width" not in data:
data["resolution_width"] = stream.get("width", "Unknown")
data["resolution_width"] = stream.get("width", MISSING_KEY_VALUE)
if "resolution_height" not in data:
data["resolution_height"] = stream.get("height", "Unknown")
data["resolution_height"] = stream.get("height", MISSING_KEY_VALUE)
if "fps" not in data:
data["fps"] = get_fps(stream.get("r_frame_rate", "0/0"))
for align_text, preset in presets.get('burnins', {}).items():
# Check frame start and add expression if is available
if frame_start is not None:
data[CURRENT_FRAME_KEY[1:-1]] = r'%%{eif\:n+%d\:d}' % frame_start
if frame_start_tc is not None:
data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY
for align_text, value in presets.get('burnins', {}).items():
if not value:
continue
has_timecode = TIME_CODE_KEY in value
align = None
if align_text == 'TOP_LEFT':
align_text = align_text.strip().lower()
if align_text == "top_left":
align = ModifiedBurnins.TOP_LEFT
elif align_text == 'TOP_CENTERED':
elif align_text == "top_centered":
align = ModifiedBurnins.TOP_CENTERED
elif align_text == 'TOP_RIGHT':
elif align_text == "top_right":
align = ModifiedBurnins.TOP_RIGHT
elif align_text == 'BOTTOM_LEFT':
elif align_text == "bottom_left":
align = ModifiedBurnins.BOTTOM_LEFT
elif align_text == 'BOTTOM_CENTERED':
elif align_text == "bottom_centered":
align = ModifiedBurnins.BOTTOM_CENTERED
elif align_text == 'BOTTOM_RIGHT':
elif align_text == "bottom_right":
align = ModifiedBurnins.BOTTOM_RIGHT
bi_func = preset.get('function')
if not bi_func:
log.error(
'Missing function for burnin!'
'Burnins are not created!'
# Replace with missing key value if frame_start_tc is not set
if frame_start_tc is None and has_timecode:
has_timecode = False
log.warning(
"`frame_start` and `frame_start_tc`"
" are not set in entered data."
)
return
value = value.replace(TIME_CODE_KEY, MISSING_KEY_VALUE)
if (
bi_func in ['frame_numbers', 'timecode'] and
frame_start is None
):
log.error(
'start_frame is not set in entered data!'
'Burnins are not created!'
)
return
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
if bi_func == 'frame_numbers':
current_frame_identifier = "{current_frame}"
text = preset.get('text') or current_frame_identifier
missing_keys = []
for group in key_pattern.findall(value):
try:
group.format(**data)
except (TypeError, KeyError):
missing_keys.append(group)
if current_frame_identifier not in text:
log.warning((
'Text for Frame numbers don\'t have '
'`{current_frame}` key in text!'
))
missing_keys = list(set(missing_keys))
for key in missing_keys:
value = value.replace(key, MISSING_KEY_VALUE)
text_items = []
split_items = text.split(current_frame_identifier)
for item in split_items:
text_items.append(item.format(**data))
# Handle timecode differently
if has_timecode:
args = [align, frame_start, frame_start_tc]
if not value.startswith(TIME_CODE_KEY):
value_items = value.split(TIME_CODE_KEY)
text = value_items[0].format(**data)
args.append(text)
text = "{current_frame}".join(text_items)
burnin.add_timecode(*args)
continue
burnin.add_frame_numbers(align, start_frame=frame_start, text=text)
text = value.format(**data)
burnin.add_text(text, align, frame_start)
elif bi_func == 'timecode':
burnin.add_timecode(align, start_frame=frame_start_tc)
elif bi_func == 'text':
if not preset.get('text'):
log.error('Text is not set for text function burnin!')
return
text = preset['text'].format(**data)
burnin.add_text(text, align)
elif bi_func == "datetime":
date_format = preset["format"]
burnin.add_datetime(date_format, align)
else:
log.error(
'Unknown function for burnins {}'.format(bi_func)
)
return
codec_args = ''
if codec_data is not []:
codec_args = ""
if codec_data:
codec_args = " ".join(codec_data)
burnin.render(output_path, args=codec_args, overwrite=overwrite, **data)
if __name__ == '__main__':
import sys
import json
data = json.loads(sys.argv[-1])
in_data = json.loads(sys.argv[-1])
burnins_from_data(
data['input'],
data['codec'],
data['output'],
data['burnin_data']
in_data['input'],
in_data['output'],
in_data['burnin_data'],
in_data['codec']
)

View file

@ -19,8 +19,8 @@ class UserModule:
log = pype.Logger().get_logger("UserModule", "user")
def __init__(self, main_parent=None, parent=None):
self._callbacks_on_user_change = []
self.cred = {}
self.cred_path = os.path.normpath(os.path.join(
self.cred_folder_path, self.cred_filename
))
@ -28,6 +28,9 @@ class UserModule:
self.load_credentials()
def register_callback_on_user_change(self, callback):
self._callbacks_on_user_change.append(callback)
def tray_start(self):
"""Store credentials to env and preset them to widget"""
username = ""
@ -37,6 +40,9 @@ class UserModule:
os.environ[self.env_name] = username
self.widget_login.set_user(username)
def get_user(self):
return self.cred.get("username") or getpass.getuser()
def process_modules(self, modules):
""" Gives ability to connect with imported modules from TrayManager.
@ -95,6 +101,17 @@ class UserModule:
))
return self.save_credentials(getpass.getuser())
def change_credentials(self, username):
self.save_credentials(username)
for callback in self._callbacks_on_user_change:
try:
callback()
except Exception:
self.log.warning(
"Failed to execute callback \"{}\".".format(str(callback)),
exc_info=True
)
def save_credentials(self, username):
"""Save credentials to JSON file, env and widget"""
if username is None:

View file

@ -77,7 +77,7 @@ class UserWidget(QtWidgets.QWidget):
def click_save(self):
# all what should happen - validations and saving into appsdir
username = self.input_username.text()
self.module.save_credentials(username)
self.module.change_credentials(username)
self._close_widget()
def closeEvent(self, event):