Merge branch '2.x/develop' into feature/180-Store_task_types

This commit is contained in:
Petr Kalis 2020-10-02 13:22:34 +02:00 committed by GitHub
commit 8e59fe67ff
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
288 changed files with 12871 additions and 498 deletions

View file

@ -1,3 +1,7 @@
from .settings import (
system_settings,
project_settings
)
from pypeapp import (
Logger,
Anatomy,
@ -49,6 +53,9 @@ from .lib import (
from .lib import _subprocess as subprocess
__all__ = [
"system_settings",
"project_settings",
"Logger",
"Anatomy",
"project_overrides_dir_path",

View file

@ -1,5 +1,6 @@
import os
import sys
from uuid import uuid4
from avalon import api, io, harmony
from avalon.vendor import Qt
@ -8,8 +9,11 @@ import pyblish.api
from pype import lib
signature = str(uuid4())
def set_scene_settings(settings):
func = """function func(args)
func = """function %s_func(args)
{
if (args[0]["fps"])
{
@ -18,12 +22,7 @@ def set_scene_settings(settings):
if (args[0]["frameStart"] && args[0]["frameEnd"])
{
var duration = args[0]["frameEnd"] - args[0]["frameStart"] + 1
if (frame.numberOf() > duration)
{
frame.remove(
duration, frame.numberOf() - duration
);
}
if (frame.numberOf() < duration)
{
frame.insert(
@ -41,8 +40,8 @@ def set_scene_settings(settings):
)
}
}
func
"""
%s_func
""" % (signature, signature)
harmony.send({"function": func, "args": [settings]})
@ -112,15 +111,15 @@ def check_inventory():
outdated_containers.append(container)
# Colour nodes.
func = """function func(args){
func = """function %s_func(args){
for( var i =0; i <= args[0].length - 1; ++i)
{
var red_color = new ColorRGBA(255, 0, 0, 255);
node.setColor(args[0][i], red_color);
}
}
func
"""
%s_func
""" % (signature, signature)
outdated_nodes = []
for container in outdated_containers:
if container["loader"] == "ImageSequenceLoader":
@ -149,7 +148,7 @@ def application_launch():
def export_template(backdrops, nodes, filepath):
func = """function func(args)
func = """function %s_func(args)
{
var temp_node = node.add("Top", "temp_note", "NOTE", 0, 0, 0);
@ -184,8 +183,8 @@ def export_template(backdrops, nodes, filepath):
Action.perform("onActionUpToParent()", "Node View");
node.deleteNode(template_group, true, true);
}
func
"""
%s_func
""" % (signature, signature)
harmony.send({
"function": func,
"args": [
@ -226,12 +225,15 @@ def install():
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node enabling on instance toggles."""
func = """function func(args)
func = """function %s_func(args)
{
node.setEnable(args[0], args[1])
}
func
"""
harmony.send(
{"function": func, "args": [instance[0], new_value]}
)
%s_func
""" % (signature, signature)
try:
harmony.send(
{"function": func, "args": [instance[0], new_value]}
)
except IndexError:
print(f"Instance '{instance}' is missing node")

View file

@ -174,6 +174,25 @@ class ReferenceLoader(api.Loader):
assert os.path.exists(path), "%s does not exist." % path
# Need to save alembic settings and reapply, cause referencing resets
# them to incoming data.
alembic_attrs = ["speed", "offset", "cycleType"]
alembic_data = {}
if representation["name"] == "abc":
alembic_nodes = cmds.ls(
"{}:*".format(members[0].split(":")[0]), type="AlembicNode"
)
if alembic_nodes:
for attr in alembic_attrs:
node_attr = "{}.{}".format(alembic_nodes[0], attr)
alembic_data[attr] = cmds.getAttr(node_attr)
else:
cmds.warning(
"No alembic nodes found in {}".format(
cmds.ls("{}:*".format(members[0].split(":")[0]))
)
)
try:
content = cmds.file(path,
loadReference=reference_node,
@ -195,6 +214,16 @@ class ReferenceLoader(api.Loader):
self.log.warning("Ignoring file read error:\n%s", exc)
# Reapply alembic settings.
if representation["name"] == "abc":
alembic_nodes = cmds.ls(
"{}:*".format(members[0].split(":")[0]), type="AlembicNode"
)
if alembic_nodes:
for attr in alembic_attrs:
value = alembic_data[attr]
cmds.setAttr("{}.{}".format(alembic_nodes[0], attr), value)
# Fix PLN-40 for older containers created with Avalon that had the
# `.verticesOnlySet` set to True.
if cmds.getAttr("{}.verticesOnlySet".format(node)):

View file

@ -19,7 +19,7 @@ from abc import ABCMeta, abstractmethod
from avalon import io, pipeline
import six
import avalon.api
from .api import config, Anatomy
from .api import config, Anatomy, Logger
log = logging.getLogger(__name__)
@ -1622,7 +1622,7 @@ class ApplicationAction(avalon.api.Action):
parsed application `.toml` this can launch the application.
"""
_log = None
config = None
group = None
variant = None
@ -1632,6 +1632,12 @@ class ApplicationAction(avalon.api.Action):
"AVALON_TASK"
)
@property
def log(self):
if self._log is None:
self._log = Logger().get_logger(self.__class__.__name__)
return self._log
def is_compatible(self, session):
for key in self.required_session_keys:
if key not in session:
@ -1644,6 +1650,165 @@ class ApplicationAction(avalon.api.Action):
project_name = session["AVALON_PROJECT"]
asset_name = session["AVALON_ASSET"]
task_name = session["AVALON_TASK"]
return launch_application(
launch_application(
project_name, asset_name, task_name, self.name
)
self._ftrack_after_launch_procedure(
project_name, asset_name, task_name
)
def _ftrack_after_launch_procedure(
self, project_name, asset_name, task_name
):
# TODO move to launch hook
required_keys = ("FTRACK_SERVER", "FTRACK_API_USER", "FTRACK_API_KEY")
for key in required_keys:
if not os.environ.get(key):
self.log.debug((
"Missing required environment \"{}\""
" for Ftrack after launch procedure."
).format(key))
return
try:
import ftrack_api
session = ftrack_api.Session(auto_connect_event_hub=True)
self.log.debug("Ftrack session created")
except Exception:
self.log.warning("Couldn't create Ftrack session")
return
try:
entity = self._find_ftrack_task_entity(
session, project_name, asset_name, task_name
)
self._ftrack_status_change(session, entity, project_name)
self._start_timer(session, entity, ftrack_api)
except Exception:
self.log.warning(
"Couldn't finish Ftrack procedure.", exc_info=True
)
return
finally:
session.close()
def _find_ftrack_task_entity(
self, session, project_name, asset_name, task_name
):
project_entity = session.query(
"Project where full_name is \"{}\"".format(project_name)
).first()
if not project_entity:
self.log.warning(
"Couldn't find project \"{}\" in Ftrack.".format(project_name)
)
return
potential_task_entities = session.query((
"TypedContext where parent.name is \"{}\" and project_id is \"{}\""
).format(asset_name, project_entity["id"])).all()
filtered_entities = []
for _entity in potential_task_entities:
if (
_entity.entity_type.lower() == "task"
and _entity["name"] == task_name
):
filtered_entities.append(_entity)
if not filtered_entities:
self.log.warning((
"Couldn't find task \"{}\" under parent \"{}\" in Ftrack."
).format(task_name, asset_name))
return
if len(filtered_entities) > 1:
self.log.warning((
"Found more than one task \"{}\""
" under parent \"{}\" in Ftrack."
).format(task_name, asset_name))
return
return filtered_entities[0]
def _ftrack_status_change(self, session, entity, project_name):
presets = config.get_presets(project_name)["ftrack"]["ftrack_config"]
statuses = presets.get("status_update")
if not statuses:
return
actual_status = entity["status"]["name"].lower()
already_tested = set()
ent_path = "/".join(
[ent["name"] for ent in entity["link"]]
)
while True:
next_status_name = None
for key, value in statuses.items():
if key in already_tested:
continue
if actual_status in value or "_any_" in value:
if key != "_ignore_":
next_status_name = key
already_tested.add(key)
break
already_tested.add(key)
if next_status_name is None:
break
try:
query = "Status where name is \"{}\"".format(
next_status_name
)
status = session.query(query).one()
entity["status"] = status
session.commit()
self.log.debug("Changing status to \"{}\" <{}>".format(
next_status_name, ent_path
))
break
except Exception:
session.rollback()
msg = (
"Status \"{}\" in presets wasn't found"
" on Ftrack entity type \"{}\""
).format(next_status_name, entity.entity_type)
self.log.warning(msg)
def _start_timer(self, session, entity, _ftrack_api):
self.log.debug("Triggering timer start.")
user_entity = session.query("User where username is \"{}\"".format(
os.environ["FTRACK_API_USER"]
)).first()
if not user_entity:
self.log.warning(
"Couldn't find user with username \"{}\" in Ftrack".format(
os.environ["FTRACK_API_USER"]
)
)
return
source = {
"user": {
"id": user_entity["id"],
"username": user_entity["username"]
}
}
event_data = {
"actionIdentifier": "start.timer",
"selection": [{"entityId": entity["id"], "entityType": "task"}]
}
session.event_hub.publish(
_ftrack_api.event.base.Event(
topic="ftrack.action.launch",
data=event_data,
source=source
),
on_error="ignore"
)
self.log.debug("Timer start triggered successfully.")

View file

@ -1,16 +1,27 @@
from Qt import QtWidgets
from avalon.tools import libraryloader
from pype.api import Logger
from pype.tools.launcher import LauncherWindow, actions
class AvalonApps:
def __init__(self, main_parent=None, parent=None):
self.log = Logger().get_logger(__name__)
self.main_parent = main_parent
self.tray_init(main_parent, parent)
def tray_init(self, main_parent, parent):
from avalon.tools.libraryloader import app
from avalon import style
from pype.tools.launcher import LauncherWindow, actions
self.parent = parent
self.main_parent = main_parent
self.app_launcher = LauncherWindow()
self.libraryloader = app.Window(
icon=self.parent.icon,
show_projects=True,
show_libraries=True
)
self.libraryloader.setStyleSheet(style.load_stylesheet())
# actions.register_default_actions()
actions.register_config_actions()
@ -23,6 +34,7 @@ class AvalonApps:
# Definition of Tray menu
def tray_menu(self, parent_menu=None):
from Qt import QtWidgets
# Actions
if parent_menu is None:
if self.parent is None:
@ -52,9 +64,11 @@ class AvalonApps:
self.app_launcher.activateWindow()
def show_library_loader(self):
libraryloader.show(
parent=self.main_parent,
icon=self.parent.icon,
show_projects=True,
show_libraries=True
)
self.libraryloader.show()
# Raise and activate the window
# for MacOS
self.libraryloader.raise_()
# for Windows
self.libraryloader.activateWindow()
self.libraryloader.refresh()

View file

@ -1,9 +1,8 @@
import os
import threading
import time
from pype.api import Logger
from avalon import style
from Qt import QtWidgets
from .widgets import ClockifySettings, MessageWidget
from .clockify_api import ClockifyAPI
from .constants import CLOCKIFY_FTRACK_USER_PATH
@ -17,11 +16,21 @@ class ClockifyModule:
os.environ["CLOCKIFY_WORKSPACE"] = self.workspace_name
self.timer_manager = None
self.MessageWidgetClass = None
self.clockapi = ClockifyAPI(master_parent=self)
self.log = Logger().get_logger(self.__class__.__name__, "PypeTray")
self.tray_init(main_parent, parent)
def tray_init(self, main_parent, parent):
from .widgets import ClockifySettings, MessageWidget
self.MessageWidgetClass = MessageWidget
self.main_parent = main_parent
self.parent = parent
self.clockapi = ClockifyAPI(master_parent=self)
self.message_widget = None
self.widget_settings = ClockifySettings(main_parent, self)
self.widget_settings_required = None
@ -57,11 +66,10 @@ class ClockifyModule:
)
if 'AvalonApps' in modules:
from launcher import lib
actions_path = os.path.sep.join([
actions_path = os.path.join(
os.path.dirname(__file__),
'launcher_actions'
])
)
current = os.environ.get('AVALON_ACTIONS', '')
if current:
current += os.pathsep
@ -78,12 +86,12 @@ class ClockifyModule:
self.stop_timer()
def timer_started(self, data):
if hasattr(self, 'timer_manager'):
if self.timer_manager:
self.timer_manager.start_timers(data)
def timer_stopped(self):
self.bool_timer_run = False
if hasattr(self, 'timer_manager'):
if self.timer_manager:
self.timer_manager.stop_timers()
def start_timer_check(self):
@ -102,7 +110,7 @@ class ClockifyModule:
self.thread_timer_check = None
def check_running(self):
import time
while self.bool_thread_check_running is True:
bool_timer_run = False
if self.clockapi.get_in_progress() is not None:
@ -156,15 +164,14 @@ class ClockifyModule:
self.timer_stopped()
def signed_in(self):
if hasattr(self, 'timer_manager'):
if not self.timer_manager:
return
if not self.timer_manager:
return
if not self.timer_manager.last_task:
return
if not self.timer_manager.last_task:
return
if self.timer_manager.is_running:
self.start_timer_manager(self.timer_manager.last_task)
if self.timer_manager.is_running:
self.start_timer_manager(self.timer_manager.last_task)
def start_timer(self, input_data):
# If not api key is not entered then skip
@ -197,11 +204,14 @@ class ClockifyModule:
"<br><br>Please inform your Project Manager."
).format(project_name, str(self.clockapi.workspace_name))
self.message_widget = MessageWidget(
self.main_parent, msg, "Clockify - Info Message"
)
self.message_widget.closed.connect(self.on_message_widget_close)
self.message_widget.show()
if self.MessageWidgetClass:
self.message_widget = self.MessageWidgetClass(
self.main_parent, msg, "Clockify - Info Message"
)
self.message_widget.closed.connect(
self.on_message_widget_close
)
self.message_widget.show()
return
@ -227,31 +237,29 @@ class ClockifyModule:
# Definition of Tray menu
def tray_menu(self, parent_menu):
# Menu for Tray App
self.menu = QtWidgets.QMenu('Clockify', parent_menu)
self.menu.setProperty('submenu', 'on')
self.menu.setStyleSheet(style.load_stylesheet())
from Qt import QtWidgets
menu = QtWidgets.QMenu("Clockify", parent_menu)
menu.setProperty("submenu", "on")
# Actions
self.aShowSettings = QtWidgets.QAction(
"Settings", self.menu
)
self.aStopTimer = QtWidgets.QAction(
"Stop timer", self.menu
)
action_show_settings = QtWidgets.QAction("Settings", menu)
action_stop_timer = QtWidgets.QAction("Stop timer", menu)
self.menu.addAction(self.aShowSettings)
self.menu.addAction(self.aStopTimer)
menu.addAction(action_show_settings)
menu.addAction(action_stop_timer)
self.aShowSettings.triggered.connect(self.show_settings)
self.aStopTimer.triggered.connect(self.stop_timer)
action_show_settings.triggered.connect(self.show_settings)
action_stop_timer.triggered.connect(self.stop_timer)
self.action_stop_timer = action_stop_timer
self.set_menu_visibility()
parent_menu.addMenu(self.menu)
parent_menu.addMenu(menu)
def show_settings(self):
self.widget_settings.input_api_key.setText(self.clockapi.get_api_key())
self.widget_settings.show()
def set_menu_visibility(self):
self.aStopTimer.setVisible(self.bool_timer_run)
self.action_stop_timer.setVisible(self.bool_timer_run)

View file

@ -1,2 +1,12 @@
from .lib import *
from . import ftrack_server
from .ftrack_server import FtrackServer, check_ftrack_url
from .lib import BaseHandler, BaseEvent, BaseAction
__all__ = (
"ftrack_server",
"FtrackServer",
"check_ftrack_url",
"BaseHandler",
"BaseEvent",
"BaseAction"
)

View file

@ -717,6 +717,9 @@ class SyncToAvalonEvent(BaseEvent):
if not self.ftrack_removed:
return
ent_infos = self.ftrack_removed
self.log.debug(
"Processing removed entities: {}".format(str(ent_infos))
)
removable_ids = []
recreate_ents = []
removed_names = []
@ -878,8 +881,9 @@ class SyncToAvalonEvent(BaseEvent):
self.process_session.commit()
found_idx = None
for idx, _entity in enumerate(self._avalon_ents):
if _entity["_id"] == avalon_entity["_id"]:
proj_doc, asset_docs = self._avalon_ents
for idx, asset_doc in enumerate(asset_docs):
if asset_doc["_id"] == avalon_entity["_id"]:
found_idx = idx
break
@ -894,7 +898,8 @@ class SyncToAvalonEvent(BaseEvent):
new_entity_id
)
# Update cached entities
self._avalon_ents[found_idx] = avalon_entity
asset_docs[found_idx] = avalon_entity
self._avalon_ents = proj_doc, asset_docs
if self._avalon_ents_by_id is not None:
mongo_id = avalon_entity["_id"]
@ -1258,6 +1263,10 @@ class SyncToAvalonEvent(BaseEvent):
if not ent_infos:
return
self.log.debug(
"Processing renamed entities: {}".format(str(ent_infos))
)
renamed_tasks = {}
not_found = {}
changeable_queue = queue.Queue()
@ -1453,6 +1462,10 @@ class SyncToAvalonEvent(BaseEvent):
if not ent_infos:
return
self.log.debug(
"Processing added entities: {}".format(str(ent_infos))
)
cust_attrs, hier_attrs = self.avalon_cust_attrs
entity_type_conf_ids = {}
# Skip if already exit in avalon db or tasks entities
@ -1729,6 +1742,10 @@ class SyncToAvalonEvent(BaseEvent):
if not self.ftrack_moved:
return
self.log.debug(
"Processing moved entities: {}".format(str(self.ftrack_moved))
)
ftrack_moved = {k: v for k, v in sorted(
self.ftrack_moved.items(),
key=(lambda line: len(
@ -1859,6 +1876,10 @@ class SyncToAvalonEvent(BaseEvent):
if not self.ftrack_updated:
return
self.log.debug(
"Processing updated entities: {}".format(str(self.ftrack_updated))
)
ent_infos = self.ftrack_updated
ftrack_mongo_mapping = {}
not_found_ids = []

View file

@ -1,2 +1,8 @@
from .ftrack_server import FtrackServer
from .lib import check_ftrack_url
__all__ = (
"FtrackServer",
"check_ftrack_url"
)

View file

@ -16,9 +16,9 @@ import pymongo
from pype.api import decompose_url
class NotActiveTable(Exception):
class NotActiveCollection(Exception):
def __init__(self, *args, **kwargs):
msg = "Active table is not set. (This is bug)"
msg = "Active collection is not set. (This is bug)"
if not (args or kwargs):
args = [msg]
super().__init__(*args, **kwargs)
@ -40,12 +40,12 @@ def auto_reconnect(func):
return decorated
def check_active_table(func):
def check_active_collection(func):
"""Check if CustomDbConnector has active collection."""
@functools.wraps(func)
def decorated(obj, *args, **kwargs):
if not obj.active_table:
raise NotActiveTable()
if not obj.active_collection:
raise NotActiveCollection()
return func(obj, *args, **kwargs)
return decorated
@ -55,7 +55,7 @@ class CustomDbConnector:
timeout = int(os.environ["AVALON_TIMEOUT"])
def __init__(
self, uri, database_name, port=None, table_name=None
self, uri, database_name, port=None, collection_name=None
):
self._mongo_client = None
self._sentry_client = None
@ -76,10 +76,10 @@ class CustomDbConnector:
self._port = port
self._database_name = database_name
self.active_table = table_name
self.active_collection = collection_name
def __getitem__(self, key):
# gives direct access to collection withou setting `active_table`
# gives direct access to collection withou setting `active_collection`
return self._database[key]
def __getattribute__(self, attr):
@ -88,9 +88,11 @@ class CustomDbConnector:
try:
return super(CustomDbConnector, self).__getattribute__(attr)
except AttributeError:
if self.active_table is None:
raise NotActiveTable()
return self._database[self.active_table].__getattribute__(attr)
if self.active_collection is None:
raise NotActiveCollection()
return self._database[self.active_collection].__getattribute__(
attr
)
def install(self):
"""Establish a persistent connection to the database"""
@ -146,46 +148,30 @@ class CustomDbConnector:
self._is_installed = False
atexit.unregister(self.uninstall)
def create_table(self, name, **options):
if self.exist_table(name):
def collection_exists(self, collection_name):
return collection_name in self.collections()
def create_collection(self, name, **options):
if self.collection_exists(name):
return
return self._database.create_collection(name, **options)
def exist_table(self, table_name):
return table_name in self.tables()
def create_table(self, name, **options):
if self.exist_table(name):
return
return self._database.create_collection(name, **options)
def exist_table(self, table_name):
return table_name in self.tables()
def tables(self):
"""List available tables
Returns:
list of table names
"""
collection_names = self.collections()
for table_name in collection_names:
if table_name in ("system.indexes",):
continue
yield table_name
@auto_reconnect
def collections(self):
return self._database.collection_names()
for col_name in self._database.collection_names():
if col_name not in ("system.indexes",):
yield col_name
@check_active_table
@check_active_collection
@auto_reconnect
def insert_one(self, item, **options):
assert isinstance(item, dict), "item must be of type <dict>"
return self._database[self.active_table].insert_one(item, **options)
return self._database[self.active_collection].insert_one(
item, **options
)
@check_active_table
@check_active_collection
@auto_reconnect
def insert_many(self, items, ordered=True, **options):
# check if all items are valid
@ -194,72 +180,74 @@ class CustomDbConnector:
assert isinstance(item, dict), "`item` must be of type <dict>"
options["ordered"] = ordered
return self._database[self.active_table].insert_many(items, **options)
return self._database[self.active_collection].insert_many(
items, **options
)
@check_active_table
@check_active_collection
@auto_reconnect
def find(self, filter, projection=None, sort=None, **options):
options["sort"] = sort
return self._database[self.active_table].find(
return self._database[self.active_collection].find(
filter, projection, **options
)
@check_active_table
@check_active_collection
@auto_reconnect
def find_one(self, filter, projection=None, sort=None, **options):
assert isinstance(filter, dict), "filter must be <dict>"
options["sort"] = sort
return self._database[self.active_table].find_one(
return self._database[self.active_collection].find_one(
filter,
projection,
**options
)
@check_active_table
@check_active_collection
@auto_reconnect
def replace_one(self, filter, replacement, **options):
return self._database[self.active_table].replace_one(
return self._database[self.active_collection].replace_one(
filter, replacement, **options
)
@check_active_table
@check_active_collection
@auto_reconnect
def update_one(self, filter, update, **options):
return self._database[self.active_table].update_one(
return self._database[self.active_collection].update_one(
filter, update, **options
)
@check_active_table
@check_active_collection
@auto_reconnect
def update_many(self, filter, update, **options):
return self._database[self.active_table].update_many(
return self._database[self.active_collection].update_many(
filter, update, **options
)
@check_active_table
@check_active_collection
@auto_reconnect
def distinct(self, **options):
return self._database[self.active_table].distinct(**options)
return self._database[self.active_collection].distinct(**options)
@check_active_table
@check_active_collection
@auto_reconnect
def drop_collection(self, name_or_collection, **options):
return self._database[self.active_table].drop(
return self._database[self.active_collection].drop(
name_or_collection, **options
)
@check_active_table
@check_active_collection
@auto_reconnect
def delete_one(self, filter, collation=None, **options):
options["collation"] = collation
return self._database[self.active_table].delete_one(
return self._database[self.active_collection].delete_one(
filter, **options
)
@check_active_table
@check_active_collection
@auto_reconnect
def delete_many(self, filter, collation=None, **options):
options["collation"] = collation
return self._database[self.active_table].delete_many(
return self._database[self.active_collection].delete_many(
filter, **options
)

View file

@ -26,7 +26,7 @@ from pype.api import (
compose_url
)
from pype.modules.ftrack.lib.custom_db_connector import CustomDbConnector
from .custom_db_connector import CustomDbConnector
TOPIC_STATUS_SERVER = "pype.event.server.status"
@ -153,9 +153,9 @@ class StorerEventHub(SocketBaseEventHub):
class ProcessEventHub(SocketBaseEventHub):
hearbeat_msg = b"processor"
uri, port, database, table_name = get_ftrack_event_mongo_info()
uri, port, database, collection_name = get_ftrack_event_mongo_info()
is_table_created = False
is_collection_created = False
pypelog = Logger().get_logger("Session Processor")
def __init__(self, *args, **kwargs):
@ -163,7 +163,7 @@ class ProcessEventHub(SocketBaseEventHub):
self.uri,
self.database,
self.port,
self.table_name
self.collection_name
)
super(ProcessEventHub, self).__init__(*args, **kwargs)
@ -184,7 +184,7 @@ class ProcessEventHub(SocketBaseEventHub):
"Error with Mongo access, probably permissions."
"Check if exist database with name \"{}\""
" and collection \"{}\" inside."
).format(self.database, self.table_name))
).format(self.database, self.collection_name))
self.sock.sendall(b"MongoError")
sys.exit(0)
@ -205,10 +205,16 @@ class ProcessEventHub(SocketBaseEventHub):
else:
try:
self._handle(event)
mongo_id = event["data"].get("_event_mongo_id")
if mongo_id is None:
continue
self.dbcon.update_one(
{"id": event["id"]},
{"_id": mongo_id},
{"$set": {"pype_data.is_processed": True}}
)
except pymongo.errors.AutoReconnect:
self.pypelog.error((
"Mongo server \"{}\" is not responding, exiting."
@ -244,6 +250,7 @@ class ProcessEventHub(SocketBaseEventHub):
}
try:
event = ftrack_api.event.base.Event(**new_event_data)
event["data"]["_event_mongo_id"] = event_data["_id"]
except Exception:
self.logger.exception(L(
'Failed to convert payload into event: {0}',

View file

@ -12,7 +12,7 @@ from pype.modules.ftrack.ftrack_server.lib import (
SocketSession, StatusEventHub,
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
)
from pype.api import Logger, config
from pype.api import Logger
log = Logger().get_logger("Event storer")
action_identifier = (
@ -23,17 +23,7 @@ action_data = {
"label": "Pype Admin",
"variant": "- Event server Status ({})".format(host_ip),
"description": "Get Infromation about event server",
"actionIdentifier": action_identifier,
"icon": "{}/ftrack/action_icons/PypeAdmin.svg".format(
os.environ.get(
"PYPE_STATICS_SERVER",
"http://localhost:{}".format(
config.get_presets().get("services", {}).get(
"rest_api", {}
).get("default_port", 8021)
)
)
)
"actionIdentifier": action_identifier
}

View file

@ -12,7 +12,9 @@ from pype.modules.ftrack.ftrack_server.lib import (
get_ftrack_event_mongo_info,
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
)
from pype.modules.ftrack.lib.custom_db_connector import CustomDbConnector
from pype.modules.ftrack.ftrack_server.custom_db_connector import (
CustomDbConnector
)
from pype.api import Logger
log = Logger().get_logger("Event storer")
@ -23,8 +25,8 @@ class SessionFactory:
session = None
uri, port, database, table_name = get_ftrack_event_mongo_info()
dbcon = CustomDbConnector(uri, database, port, table_name)
uri, port, database, collection_name = get_ftrack_event_mongo_info()
dbcon = CustomDbConnector(uri, database, port, collection_name)
# ignore_topics = ["ftrack.meta.connected"]
ignore_topics = []
@ -200,7 +202,7 @@ def main(args):
"Error with Mongo access, probably permissions."
"Check if exist database with name \"{}\""
" and collection \"{}\" inside."
).format(database, table_name))
).format(database, collection_name))
sock.sendall(b"MongoError")
finally:

View file

@ -1150,7 +1150,7 @@ class SyncEntitiesFactory:
continue
ent_path_items = [ent["name"] for ent in entity["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
parents = ent_path_items[1:len(ent_path_items) - 1:]
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
@ -1269,7 +1269,7 @@ class SyncEntitiesFactory:
if not is_right and not else_match_better:
entity = entity_dict["entity"]
ent_path_items = [ent["name"] for ent in entity["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
parents = ent_path_items[1:len(ent_path_items) - 1:]
av_parents = av_ent_by_mongo_id["data"]["parents"]
if av_parents == parents:
is_right = True
@ -2272,6 +2272,7 @@ class SyncEntitiesFactory:
"name": _name,
"parent": parent_entity
})
self.session.commit()
final_entity = {}
for k, v in av_entity.items():

View file

@ -2,7 +2,7 @@ import functools
import time
from pype.api import Logger
import ftrack_api
from pype.modules.ftrack.ftrack_server.lib import SocketSession
from pype.modules.ftrack import ftrack_server
class MissingPermision(Exception):
@ -41,7 +41,7 @@ class BaseHandler(object):
self.log = Logger().get_logger(self.__class__.__name__)
if not(
isinstance(session, ftrack_api.session.Session) or
isinstance(session, SocketSession)
isinstance(session, ftrack_server.lib.SocketSession)
):
raise Exception((
"Session object entered with args is instance of \"{}\""
@ -49,7 +49,7 @@ class BaseHandler(object):
).format(
str(type(session)),
str(ftrack_api.session.Session),
str(SocketSession)
str(ftrack_server.lib.SocketSession)
))
self._session = session

View file

@ -1,7 +1,7 @@
import os
import requests
from avalon import style
from pype.modules.ftrack import credentials
from pype.modules.ftrack.lib import credentials
from . import login_tools
from pype.api import resources
from Qt import QtCore, QtGui, QtWidgets
@ -238,6 +238,8 @@ class CredentialsDialog(QtWidgets.QDialog):
# If there is an existing server thread running we need to stop it.
if self._login_server_thread:
if self._login_server_thread.isAlive():
self._login_server_thread.stop()
self._login_server_thread.join()
self._login_server_thread = None

View file

@ -61,12 +61,17 @@ class LoginServerThread(threading.Thread):
def __init__(self, url, callback):
self.url = url
self.callback = callback
self._server = None
super(LoginServerThread, self).__init__()
def _handle_login(self, api_user, api_key):
'''Login to server with *api_user* and *api_key*.'''
self.callback(api_user, api_key)
def stop(self):
if self._server:
self._server.server_close()
def run(self):
'''Listen for events.'''
self._server = HTTPServer(

View file

@ -1,6 +1,4 @@
from Qt import QtWidgets
from pype.api import Logger
from ..gui.app import LogsWindow
class LoggingModule:
@ -8,7 +6,13 @@ class LoggingModule:
self.parent = parent
self.log = Logger().get_logger(self.__class__.__name__, "logging")
self.window = None
self.tray_init(main_parent, parent)
def tray_init(self, main_parent, parent):
try:
from .gui.app import LogsWindow
self.window = LogsWindow()
self.tray_menu = self._tray_menu
except Exception:
@ -18,12 +22,12 @@ class LoggingModule:
# Definition of Tray menu
def _tray_menu(self, parent_menu):
from Qt import QtWidgets
# Menu for Tray App
menu = QtWidgets.QMenu('Logging', parent_menu)
# menu.setProperty('submenu', 'on')
show_action = QtWidgets.QAction("Show Logs", menu)
show_action.triggered.connect(self.on_show_logs)
show_action.triggered.connect(self._show_logs_gui)
menu.addAction(show_action)
parent_menu.addMenu(menu)
@ -34,5 +38,6 @@ class LoggingModule:
def process_modules(self, modules):
return
def on_show_logs(self):
self.window.show()
def _show_logs_gui(self):
if self.window:
self.window.show()

View file

@ -1,10 +1,7 @@
import appdirs
from avalon import style
from Qt import QtWidgets
import os
import json
from .widget_login import MusterLogin
from avalon.vendor import requests
import appdirs
import requests
class MusterModule:
@ -21,6 +18,11 @@ class MusterModule:
self.cred_path = os.path.join(
self.cred_folder_path, self.cred_filename
)
self.tray_init(main_parent, parent)
def tray_init(self, main_parent, parent):
from .widget_login import MusterLogin
self.main_parent = main_parent
self.parent = parent
self.widget_login = MusterLogin(main_parent, self)
@ -38,10 +40,6 @@ class MusterModule:
pass
def process_modules(self, modules):
def api_callback():
self.aShowLogin.trigger()
if "RestApiServer" in modules:
def api_show_login():
self.aShowLogin.trigger()
@ -51,13 +49,12 @@ class MusterModule:
# Definition of Tray menu
def tray_menu(self, parent):
"""
Add **change credentials** option to tray menu.
"""
"""Add **change credentials** option to tray menu."""
from Qt import QtWidgets
# Menu for Tray App
self.menu = QtWidgets.QMenu('Muster', parent)
self.menu.setProperty('submenu', 'on')
self.menu.setStyleSheet(style.load_stylesheet())
# Actions
self.aShowLogin = QtWidgets.QAction(
@ -91,9 +88,9 @@ class MusterModule:
if not MUSTER_REST_URL:
raise AttributeError("Muster REST API url not set")
params = {
'username': username,
'password': password
}
'username': username,
'password': password
}
api_entry = '/api/login'
response = self._requests_post(
MUSTER_REST_URL + api_entry, params=params)

View file

@ -1,6 +1,6 @@
import os
import socket
from Qt import QtCore
import threading
from socketserver import ThreadingMixIn
from http.server import HTTPServer
@ -155,14 +155,15 @@ class RestApiServer:
def is_running(self):
return self.rest_api_thread.is_running
def tray_exit(self):
self.stop()
def stop(self):
self.rest_api_thread.is_running = False
def thread_stopped(self):
self._is_running = False
self.rest_api_thread.stop()
self.rest_api_thread.join()
class RestApiThread(QtCore.QThread):
class RestApiThread(threading.Thread):
""" Listener for REST requests.
It is possible to register callbacks for url paths.
@ -174,6 +175,12 @@ class RestApiThread(QtCore.QThread):
self.is_running = False
self.module = module
self.port = port
self.httpd = None
def stop(self):
self.is_running = False
if self.httpd:
self.httpd.server_close()
def run(self):
self.is_running = True
@ -185,12 +192,14 @@ class RestApiThread(QtCore.QThread):
)
with ThreadingSimpleServer(("", self.port), Handler) as httpd:
self.httpd = httpd
while self.is_running:
httpd.handle_request()
except Exception:
log.warning(
"Rest Api Server service has failed", exc_info=True
)
self.httpd = None
self.is_running = False
self.module.thread_stopped()

View file

@ -2,7 +2,6 @@ import os
import sys
import subprocess
import pype
from pype import tools
class StandAlonePublishModule:
@ -30,6 +29,7 @@ class StandAlonePublishModule:
))
def show(self):
from pype import tools
standalone_publisher_tool_path = os.path.join(
os.path.dirname(tools.__file__),
"standalonepublish"

View file

@ -1,5 +1,4 @@
from .timers_manager import TimersManager
from .widget_user_idle import WidgetUserIdle
CLASS_DEFINIION = TimersManager

View file

@ -1,21 +1,7 @@
from .widget_user_idle import WidgetUserIdle, SignalHandler
from pype.api import Logger, config
from pype.api import Logger
class Singleton(type):
""" Signleton implementation
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(
Singleton, cls
).__call__(*args, **kwargs)
return cls._instances[cls]
class TimersManager(metaclass=Singleton):
class TimersManager:
""" Handles about Timers.
Should be able to start/stop all timers at once.
@ -41,7 +27,13 @@ class TimersManager(metaclass=Singleton):
self.idle_man = None
self.signal_handler = None
self.trat_init(tray_widget, main_widget)
def trat_init(self, tray_widget, main_widget):
from .widget_user_idle import WidgetUserIdle, SignalHandler
self.widget_user_idle = WidgetUserIdle(self, tray_widget)
self.signal_handler = SignalHandler(self)
def set_signal_times(self):
try:
@ -119,7 +111,6 @@ class TimersManager(metaclass=Singleton):
"""
if 'IdleManager' in modules:
self.signal_handler = SignalHandler(self)
if self.set_signal_times() is True:
self.register_to_idle_manager(modules['IdleManager'])

View file

@ -3,8 +3,6 @@ import json
import getpass
import appdirs
from Qt import QtWidgets
from .widget_user import UserWidget
from pype.api import Logger
@ -24,6 +22,12 @@ class UserModule:
self.cred_path = os.path.normpath(os.path.join(
self.cred_folder_path, self.cred_filename
))
self.widget_login = None
self.tray_init(main_parent, parent)
def tray_init(self, main_parent=None, parent=None):
from .widget_user import UserWidget
self.widget_login = UserWidget(self)
self.load_credentials()
@ -66,6 +70,7 @@ class UserModule:
# Definition of Tray menu
def tray_menu(self, parent_menu):
from Qt import QtWidgets
"""Add menu or action to Tray(or parent)'s menu"""
action = QtWidgets.QAction("Username", parent_menu)
action.triggered.connect(self.show_widget)
@ -121,7 +126,8 @@ class UserModule:
self.cred = {"username": username}
os.environ[self.env_name] = username
self.widget_login.set_user(username)
if self.widget_login:
self.widget_login.set_user(username)
try:
file = open(self.cred_path, "w")
file.write(json.dumps(self.cred))

View file

@ -0,0 +1,64 @@
from pype.api import Logger
from wsrpc_aiohttp import WebSocketRoute
import functools
import avalon.photoshop as photoshop
log = Logger().get_logger("WebsocketServer")
class Photoshop(WebSocketRoute):
"""
One route, mimicking external application (like Harmony, etc).
All functions could be called from client.
'do_notify' function calls function on the client - mimicking
notification after long running job on the server or similar
"""
instance = None
def init(self, **kwargs):
# Python __init__ must be return "self".
# This method might return anything.
log.debug("someone called Photoshop route")
self.instance = self
return kwargs
# server functions
async def ping(self):
log.debug("someone called Photoshop route ping")
# This method calls function on the client side
# client functions
async def read(self):
log.debug("photoshop.read client calls server server calls "
"Photo client")
return await self.socket.call('Photoshop.read')
# panel routes for tools
async def creator_route(self):
self._tool_route("creator")
async def workfiles_route(self):
self._tool_route("workfiles")
async def loader_route(self):
self._tool_route("loader")
async def publish_route(self):
self._tool_route("publish")
async def sceneinventory_route(self):
self._tool_route("sceneinventory")
async def projectmanager_route(self):
self._tool_route("projectmanager")
def _tool_route(self, tool_name):
"""The address accessed when clicking on the buttons."""
partial_method = functools.partial(photoshop.show, tool_name)
photoshop.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"

View file

@ -0,0 +1,283 @@
from pype.modules.websocket_server import WebSocketServer
"""
Stub handling connection from server to client.
Used anywhere solution is calling client methods.
"""
import json
from collections import namedtuple
class PhotoshopServerStub():
"""
Stub for calling function on client (Photoshop js) side.
Expects that client is already connected (started when avalon menu
is opened).
'self.websocketserver.call' is used as async wrapper
"""
def __init__(self):
self.websocketserver = WebSocketServer.get_instance()
self.client = self.websocketserver.get_client()
def open(self, path):
"""
Open file located at 'path' (local).
:param path: <string> file path locally
:return: None
"""
self.websocketserver.call(self.client.call
('Photoshop.open', path=path)
)
def read(self, layer, layers_meta=None):
"""
Parses layer metadata from Headline field of active document
:param layer: <namedTuple Layer("id":XX, "name":"YYY")
:param layers_meta: full list from Headline (for performance in loops)
:return:
"""
if layers_meta is None:
layers_meta = self.get_layers_metadata()
return layers_meta.get(str(layer.id))
def imprint(self, layer, data, all_layers=None, layers_meta=None):
"""
Save layer metadata to Headline field of active document
:param layer: <namedTuple> Layer("id": XXX, "name":'YYY')
:param data: <string> json representation for single layer
:param all_layers: <list of namedTuples> - for performance, could be
injected for usage in loop, if not, single call will be
triggered
:param layers_meta: <string> json representation from Headline
(for performance - provide only if imprint is in
loop - value should be same)
:return: None
"""
if not layers_meta:
layers_meta = self.get_layers_metadata()
# json.dumps writes integer values in a dictionary to string, so
# anticipating it here.
if str(layer.id) in layers_meta and layers_meta[str(layer.id)]:
layers_meta[str(layer.id)].update(data)
else:
layers_meta[str(layer.id)] = data
# Ensure only valid ids are stored.
if not all_layers:
all_layers = self.get_layers()
layer_ids = [layer.id for layer in all_layers]
cleaned_data = {}
for id in layers_meta:
if int(id) in layer_ids:
cleaned_data[id] = layers_meta[id]
payload = json.dumps(cleaned_data, indent=4)
self.websocketserver.call(self.client.call
('Photoshop.imprint', payload=payload)
)
def get_layers(self):
"""
Returns JSON document with all(?) layers in active document.
:return: <list of namedtuples>
Format of tuple: { 'id':'123',
'name': 'My Layer 1',
'type': 'GUIDE'|'FG'|'BG'|'OBJ'
'visible': 'true'|'false'
"""
res = self.websocketserver.call(self.client.call
('Photoshop.get_layers'))
return self._to_records(res)
def get_layers_in_layers(self, layers):
"""
Return all layers that belong to layers (might be groups).
:param layers: <list of namedTuples>
:return: <list of namedTuples>
"""
all_layers = self.get_layers()
ret = []
parent_ids = set([lay.id for lay in layers])
for layer in all_layers:
parents = set(layer.parents)
if len(parent_ids & parents) > 0:
ret.append(layer)
if layer.id in parent_ids:
ret.append(layer)
return ret
def create_group(self, name):
"""
Create new group (eg. LayerSet)
:return: <namedTuple Layer("id":XX, "name":"YYY")>
"""
ret = self.websocketserver.call(self.client.call
('Photoshop.create_group',
name=name))
# create group on PS is asynchronous, returns only id
layer = {"id": ret, "name": name, "group": True}
return namedtuple('Layer', layer.keys())(*layer.values())
def group_selected_layers(self, name):
"""
Group selected layers into new LayerSet (eg. group)
:return: <json representation of Layer>
"""
res = self.websocketserver.call(self.client.call
('Photoshop.group_selected_layers',
name=name)
)
return self._to_records(res)
def get_selected_layers(self):
"""
Get a list of actually selected layers
:return: <list of Layer('id':XX, 'name':"YYY")>
"""
res = self.websocketserver.call(self.client.call
('Photoshop.get_selected_layers'))
return self._to_records(res)
def select_layers(self, layers):
"""
Selecte specified layers in Photoshop
:param layers: <list of Layer('id':XX, 'name':"YYY")>
:return: None
"""
layer_ids = [layer.id for layer in layers]
self.websocketserver.call(self.client.call
('Photoshop.get_layers',
layers=layer_ids)
)
def get_active_document_full_name(self):
"""
Returns full name with path of active document via ws call
:return: <string> full path with name
"""
res = self.websocketserver.call(
self.client.call('Photoshop.get_active_document_full_name'))
return res
def get_active_document_name(self):
"""
Returns just a name of active document via ws call
:return: <string> file name
"""
res = self.websocketserver.call(self.client.call
('Photoshop.get_active_document_name'))
return res
def is_saved(self):
"""
Returns true if no changes in active document
:return: <boolean>
"""
return self.websocketserver.call(self.client.call
('Photoshop.is_saved'))
def save(self):
"""
Saves active document
:return: None
"""
self.websocketserver.call(self.client.call
('Photoshop.save'))
def saveAs(self, image_path, ext, as_copy):
"""
Saves active document to psd (copy) or png or jpg
:param image_path: <string> full local path
:param ext: <string psd|jpg|png>
:param as_copy: <boolean>
:return: None
"""
self.websocketserver.call(self.client.call
('Photoshop.saveAs',
image_path=image_path,
ext=ext,
as_copy=as_copy))
def set_visible(self, layer_id, visibility):
"""
Set layer with 'layer_id' to 'visibility'
:param layer_id: <int>
:param visibility: <true - set visible, false - hide>
:return: None
"""
self.websocketserver.call(self.client.call
('Photoshop.set_visible',
layer_id=layer_id,
visibility=visibility))
def get_layers_metadata(self):
"""
Reads layers metadata from Headline from active document in PS.
(Headline accessible by File > File Info)
:return: <string> - json documents
"""
layers_data = {}
res = self.websocketserver.call(self.client.call('Photoshop.read'))
try:
layers_data = json.loads(res)
except json.decoder.JSONDecodeError:
pass
return layers_data
def import_smart_object(self, path):
"""
Import the file at `path` as a smart object to active document.
Args:
path (str): File path to import.
"""
res = self.websocketserver.call(self.client.call
('Photoshop.import_smart_object',
path=path))
return self._to_records(res).pop()
def replace_smart_object(self, layer, path):
"""
Replace the smart object `layer` with file at `path`
Args:
layer (namedTuple): Layer("id":XX, "name":"YY"..).
path (str): File to import.
"""
self.websocketserver.call(self.client.call
('Photoshop.replace_smart_object',
layer=layer,
path=path))
def close(self):
self.client.close()
def _to_records(self, res):
"""
Converts string json representation into list of named tuples for
dot notation access to work.
:return: <list of named tuples>
:param res: <string> - json representation
"""
try:
layers_data = json.loads(res)
except json.decoder.JSONDecodeError:
raise ValueError("Received broken JSON {}".format(res))
ret = []
# convert to namedtuple to use dot donation
if isinstance(layers_data, dict): # TODO refactore
layers_data = [layers_data]
for d in layers_data:
ret.append(namedtuple('Layer', d.keys())(*d.values()))
return ret

View file

@ -1,4 +1,4 @@
from pype.api import config, Logger
from pype.api import Logger
import threading
from aiohttp import web
@ -9,6 +9,7 @@ import os
import sys
import pyclbr
import importlib
import urllib
log = Logger().get_logger("WebsocketServer")
@ -19,24 +20,24 @@ class WebSocketServer():
Uses class in external_app_1.py to mimic implementation for single
external application.
'test_client' folder contains two test implementations of client
WIP
"""
_instance = None
def __init__(self):
self.qaction = None
self.failed_icon = None
self._is_running = False
default_port = 8099
WebSocketServer._instance = self
self.client = None
self.handlers = {}
try:
self.presets = config.get_presets()["services"]["websocket_server"]
except Exception:
self.presets = {"default_port": default_port, "exclude_ports": []}
log.debug((
"There are not set presets for WebsocketServer."
" Using defaults \"{}\""
).format(str(self.presets)))
port = None
websocket_url = os.getenv("WEBSOCKET_URL")
if websocket_url:
parsed = urllib.parse.urlparse(websocket_url)
port = parsed.port
if not port:
port = 8098 # fallback
self.app = web.Application()
@ -48,7 +49,7 @@ class WebSocketServer():
directories_with_routes = ['hosts']
self.add_routes_for_directories(directories_with_routes)
self.websocket_thread = WebsocketServerThread(self, default_port)
self.websocket_thread = WebsocketServerThread(self, port)
def add_routes_for_directories(self, directories_with_routes):
""" Loops through selected directories to find all modules and
@ -78,6 +79,33 @@ class WebSocketServer():
WebSocketAsync.add_route(class_name, cls)
sys.path.pop()
def call(self, func):
log.debug("websocket.call {}".format(func))
future = asyncio.run_coroutine_threadsafe(func,
self.websocket_thread.loop)
result = future.result()
return result
def get_client(self):
"""
Return first connected client to WebSocket
TODO implement selection by Route
:return: <WebSocketAsync> client
"""
clients = WebSocketAsync.get_clients()
client = None
if len(clients) > 0:
key = list(clients.keys())[0]
client = clients.get(key)
return client
@staticmethod
def get_instance():
if WebSocketServer._instance is None:
WebSocketServer()
return WebSocketServer._instance
def tray_start(self):
self.websocket_thread.start()
@ -124,6 +152,7 @@ class WebsocketServerThread(threading.Thread):
self.loop = None
self.runner = None
self.site = None
self.tasks = []
def run(self):
self.is_running = True
@ -169,6 +198,12 @@ class WebsocketServerThread(threading.Thread):
periodically.
"""
while self.is_running:
while self.tasks:
task = self.tasks.pop(0)
log.debug("waiting for task {}".format(task))
await task
log.debug("returned value {}".format(task.result))
await asyncio.sleep(0.5)
log.debug("Starting shutdown")

View file

@ -97,6 +97,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
def process(self, instance):
@ -178,6 +179,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Adding metadata
@ -228,6 +230,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Adding metadata
@ -242,6 +245,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
session.commit()
except Exception:
session.rollback()
session._configure_locations()
self.log.warning((
"Comment was not possible to set for AssetVersion"
"\"{0}\". Can't set it's value to: \"{1}\""
@ -258,6 +262,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
continue
except Exception:
session.rollback()
session._configure_locations()
self.log.warning((
"Custom Attrubute \"{0}\""
@ -272,6 +277,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Component
@ -316,6 +322,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Reset members in memory
@ -432,6 +439,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
if assetversion_entity not in used_asset_versions:

View file

@ -145,4 +145,5 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)

View file

@ -40,9 +40,11 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
def process(self, context):
self.context = context
if "hierarchyContext" not in context.data:
if "hierarchyContext" not in self.context.data:
return
hierarchy_context = self.context.data["hierarchyContext"]
self.session = self.context.data["ftrackSession"]
project_name = self.context.data["projectEntity"]["name"]
query = 'Project where full_name is "{}"'.format(project_name)
@ -55,7 +57,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
self.ft_project = None
input_data = context.data["hierarchyContext"]
input_data = hierarchy_context
# disable termporarily ftrack project's autosyncing
if auto_sync_state:
@ -128,6 +130,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# TASKS
@ -156,6 +159,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Incoming links.
@ -165,8 +169,31 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Create notes.
user = self.session.query(
"User where username is \"{}\"".format(self.session.api_user)
).first()
if user:
for comment in entity_data.get("comments", []):
entity.create_note(comment, user)
else:
self.log.warning(
"Was not able to query current User {}".format(
self.session.api_user
)
)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Import children.
if 'childs' in entity_data:
self.import_to_ftrack(
entity_data['childs'], entity)
@ -180,6 +207,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
# Create new links.
@ -221,6 +249,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
return task
@ -235,6 +264,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
return entity
@ -249,7 +279,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
raise
self.session._configure_locations()
six.reraise(tp, value, tb)
def auto_sync_on(self, project):
@ -262,4 +293,5 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
raise
self.session._configure_locations()
six.reraise(tp, value, tb)

View file

@ -20,8 +20,8 @@ class CopyFile(api.Loader):
def copy_file_to_clipboard(path):
from avalon.vendor.Qt import QtCore, QtWidgets
app = QtWidgets.QApplication.instance()
assert app, "Must have running QApplication instance"
clipboard = QtWidgets.QApplication.clipboard()
assert clipboard, "Must have running QApplication instance"
# Build mime data for clipboard
data = QtCore.QMimeData()
@ -29,5 +29,4 @@ class CopyFile(api.Loader):
data.setUrls([url])
# Set to Clipboard
clipboard = app.clipboard()
clipboard.setMimeData(data)

View file

@ -19,11 +19,10 @@ class CopyFilePath(api.Loader):
@staticmethod
def copy_path_to_clipboard(path):
from avalon.vendor.Qt import QtCore, QtWidgets
from avalon.vendor.Qt import QtWidgets
app = QtWidgets.QApplication.instance()
assert app, "Must have running QApplication instance"
clipboard = QtWidgets.QApplication.clipboard()
assert clipboard, "Must have running QApplication instance"
# Set to Clipboard
clipboard = app.clipboard()
clipboard.setText(os.path.normpath(path))

View file

@ -23,123 +23,256 @@ Provides:
import copy
import json
import collections
from avalon import io
import pyblish.api
class CollectAnatomyInstanceData(pyblish.api.InstancePlugin):
"""Collect Instance specific Anatomy data."""
class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
"""Collect Instance specific Anatomy data.
Plugin is running for all instances on context even not active instances.
"""
order = pyblish.api.CollectorOrder + 0.49
label = "Collect Anatomy Instance data"
def process(self, instance):
# get all the stuff from the database
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
instance_asset_entity = instance.data.get("assetEntity")
def process(self, context):
self.log.info("Collecting anatomy data for all instances.")
asset_name = instance.data["asset"]
self.fill_missing_asset_docs(context)
self.fill_latest_versions(context)
self.fill_anatomy_data(context)
# There is possibility that assetEntity on instance is already set
# which can happen in standalone publisher
if (
instance_asset_entity
and instance_asset_entity["name"] == asset_name
):
asset_entity = instance_asset_entity
self.log.info("Anatomy Data collection finished.")
# Check if asset name is the same as what is in context
# - they may be different, e.g. in NukeStudio
elif context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
def fill_missing_asset_docs(self, context):
self.log.debug("Qeurying asset documents for instances.")
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
context_asset_doc = context.data["assetEntity"]
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
instances_with_missing_asset_doc = collections.defaultdict(list)
for instance in context:
instance_asset_doc = instance.data.get("assetEntity")
_asset_name = instance.data["asset"]
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
# There is possibility that assetEntity on instance is already set
# which can happen in standalone publisher
if (
instance_asset_doc
and instance_asset_doc["name"] == _asset_name
):
continue
# Check if asset name is the same as what is in context
# - they may be different, e.g. in NukeStudio
if context_asset_doc["name"] == _asset_name:
instance.data["assetEntity"] = context_asset_doc
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
instances_with_missing_asset_doc[_asset_name].append(instance)
# If version is not specified for instance or context
if version_number is None:
# TODO we should be able to change default version by studio
# preferences (like start with version number `0`)
version_number = 1
# use latest version (+1) if already any exist
if latest_version is not None:
version_number += int(latest_version)
if not instances_with_missing_asset_doc:
self.log.debug("All instances already had right asset document.")
return
anatomy_updates = {
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number
asset_names = list(instances_with_missing_asset_doc.keys())
self.log.debug("Querying asset documents with names: {}".format(
", ".join(["\"{}\"".format(name) for name in asset_names])
))
asset_docs = io.find({
"type": "asset",
"name": {"$in": asset_names}
})
asset_docs_by_name = {
asset_doc["name"]: asset_doc
for asset_doc in asset_docs
}
if (
asset_entity
and asset_entity["_id"] != context_asset_entity["_id"]
):
parents = asset_entity["data"].get("parents") or list()
anatomy_updates["hierarchy"] = "/".join(parents)
task_name = instance.data.get("task")
if task_name:
anatomy_updates["task"] = task_name
not_found_asset_names = []
for asset_name, instances in instances_with_missing_asset_doc.items():
asset_doc = asset_docs_by_name.get(asset_name)
if not asset_doc:
not_found_asset_names.append(asset_name)
continue
# Version should not be collected since may be instance
anatomy_data.update(anatomy_updates)
for _instance in instances:
_instance.data["assetEntity"] = asset_doc
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
if not_found_asset_names:
joined_asset_names = ", ".join(
["\"{}\"".format(name) for name in not_found_asset_names]
)
self.log.warning((
"Not found asset documents with names \"{}\"."
).format(joined_asset_names))
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
def fill_latest_versions(self, context):
"""Try to find latest version for each instance's subset.
pixel_aspect = instance.data.get("pixelAspect")
if pixel_aspect:
anatomy_data["pixel_aspect"] = float("{:0.2f}".format(
float(pixel_aspect)))
Key "latestVersion" is always set to latest version or `None`.
fps = instance.data.get("fps")
if fps:
anatomy_data["fps"] = float("{:0.2f}".format(
float(fps)))
Args:
context (pyblish.Context)
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
# TODO should be version number set here?
instance.data["version"] = version_number
Returns:
None
self.log.info("Instance anatomy Data collected")
self.log.debug(json.dumps(anatomy_data, indent=4))
"""
self.log.debug("Qeurying latest versions for instances.")
hierarchy = {}
subset_names = set()
asset_ids = set()
for instance in context:
# Make sure `"latestVersion"` key is set
latest_version = instance.data.get("latestVersion")
instance.data["latestVersion"] = latest_version
# Skip instances withou "assetEntity"
asset_doc = instance.data.get("assetEntity")
if not asset_doc:
continue
# Store asset ids and subset names for queries
asset_id = asset_doc["_id"]
subset_name = instance.data["subset"]
asset_ids.add(asset_id)
subset_names.add(subset_name)
# Prepare instance hiearchy for faster filling latest versions
if asset_id not in hierarchy:
hierarchy[asset_id] = {}
if subset_name not in hierarchy[asset_id]:
hierarchy[asset_id][subset_name] = []
hierarchy[asset_id][subset_name].append(instance)
subset_docs = list(io.find({
"type": "subset",
"parent": {"$in": list(asset_ids)},
"name": {"$in": list(subset_names)}
}))
subset_ids = [
subset_doc["_id"]
for subset_doc in subset_docs
]
last_version_by_subset_id = self._query_last_versions(subset_ids)
for subset_doc in subset_docs:
subset_id = subset_doc["_id"]
last_version = last_version_by_subset_id.get(subset_id)
if last_version is None:
continue
asset_id = subset_doc["parent"]
subset_name = subset_doc["name"]
_instances = hierarchy[asset_id][subset_name]
for _instance in _instances:
_instance.data["latestVersion"] = last_version
def _query_last_versions(self, subset_ids):
"""Retrieve all latest versions for entered subset_ids.
Args:
subset_ids (list): List of subset ids with type `ObjectId`.
Returns:
dict: Key is subset id and value is last version name.
"""
_pipeline = [
# Find all versions of those subsets
{"$match": {
"type": "version",
"parent": {"$in": subset_ids}
}},
# Sorting versions all together
{"$sort": {"name": 1}},
# Group them by "parent", but only take the last
{"$group": {
"_id": "$parent",
"_version_id": {"$last": "$_id"},
"name": {"$last": "$name"}
}}
]
last_version_by_subset_id = {}
for doc in io.aggregate(_pipeline):
subset_id = doc["_id"]
last_version_by_subset_id[subset_id] = doc["name"]
return last_version_by_subset_id
def fill_anatomy_data(self, context):
self.log.debug("Storing anatomy data to instance data.")
project_doc = context.data["projectEntity"]
context_asset_doc = context.data["assetEntity"]
for instance in context:
version_number = instance.data.get("version")
# If version is not specified for instance or context
if version_number is None:
# TODO we should be able to change default version by studio
# preferences (like start with version number `0`)
version_number = 1
# use latest version (+1) if already any exist
latest_version = instance.data["latestVersion"]
if latest_version is not None:
version_number += int(latest_version)
anatomy_updates = {
"asset": instance.data["asset"],
"family": instance.data["family"],
"subset": instance.data["subset"],
"version": version_number
}
# Hiearchy
asset_doc = instance.data.get("assetEntity")
if asset_doc and asset_doc["_id"] != context_asset_doc["_id"]:
parents = asset_doc["data"].get("parents") or list()
anatomy_updates["hierarchy"] = "/".join(parents)
# Task
task_name = instance.data.get("task")
if task_name:
anatomy_updates["task"] = task_name
# Additional data
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_updates["resolution_width"] = resolution_width
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_updates["resolution_height"] = resolution_height
pixel_aspect = instance.data.get("pixelAspect")
if pixel_aspect:
anatomy_updates["pixel_aspect"] = float(
"{:0.2f}".format(float(pixel_aspect))
)
fps = instance.data.get("fps")
if fps:
anatomy_updates["fps"] = float("{:0.2f}".format(float(fps)))
anatomy_data = copy.deepcopy(context.data["anatomyData"])
anatomy_data.update(anatomy_updates)
# Store anatomy data
instance.data["projectEntity"] = project_doc
instance.data["anatomyData"] = anatomy_data
instance.data["version"] = version_number
# Log collected data
instance_name = instance.data["name"]
instance_label = instance.data.get("label")
if instance_label:
instance_name += "({})".format(instance_label)
self.log.debug("Anatomy data for instance {}: {}".format(
instance_name,
json.dumps(anatomy_data, indent=4)
))

View file

@ -195,11 +195,14 @@ class ExtractBurnin(pype.api.Extractor):
if "delete" in new_repre["tags"]:
new_repre["tags"].remove("delete")
# Update name and outputName to be able have multiple outputs
# Join previous "outputName" with filename suffix
new_name = "_".join([new_repre["outputName"], filename_suffix])
new_repre["name"] = new_name
new_repre["outputName"] = new_name
if len(repre_burnin_defs.keys()) > 1:
# Update name and outputName to be
# able have multiple outputs in case of more burnin presets
# Join previous "outputName" with filename suffix
new_name = "_".join(
[new_repre["outputName"], filename_suffix])
new_repre["name"] = new_name
new_repre["outputName"] = new_name
# Prepare paths and files for process.
self.input_output_paths(new_repre, temp_data, filename_suffix)

View file

@ -1,6 +1,6 @@
import pyblish.api
from avalon import io
from copy import deepcopy
class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
"""Create entities in Avalon based on collected data."""
@ -10,14 +10,35 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
families = ["clip", "shot"]
def process(self, context):
# processing starts here
if "hierarchyContext" not in context.data:
self.log.info("skipping IntegrateHierarchyToAvalon")
return
hierarchy_context = deepcopy(context.data["hierarchyContext"])
if not io.Session:
io.install()
input_data = context.data["hierarchyContext"]
active_assets = []
# filter only the active publishing insatnces
for instance in context:
if instance.data.get("publish") is False:
continue
if not instance.data.get("asset"):
continue
active_assets.append(instance.data["asset"])
# remove duplicity in list
self.active_assets = list(set(active_assets))
self.log.debug("__ self.active_assets: {}".format(self.active_assets))
hierarchy_context = self._get_assets(hierarchy_context)
self.log.debug("__ hierarchy_context: {}".format(hierarchy_context))
input_data = context.data["hierarchyContext"] = hierarchy_context
self.project = None
self.import_to_avalon(input_data)
@ -151,3 +172,24 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
entity_id = io.insert_one(item).inserted_id
return io.find_one({"_id": entity_id})
def _get_assets(self, input_dict):
""" Returns only asset dictionary.
Usually the last part of deep dictionary which
is not having any children
"""
input_dict_copy = deepcopy(input_dict)
for key in input_dict.keys():
self.log.debug("__ key: {}".format(key))
# check if child key is available
if input_dict[key].get("childs"):
# loop deeper
input_dict_copy[key]["childs"] = self._get_assets(
input_dict[key]["childs"])
else:
# filter out unwanted assets
if key not in self.active_assets:
input_dict_copy.pop(key, None)
return input_dict_copy

View file

@ -81,6 +81,11 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
jpeg_items.append("-i {}".format(full_input_path))
# output arguments from presets
jpeg_items.extend(ffmpeg_args.get("output") or [])
# If its a movie file, we just want one frame.
if repre["ext"] == "mov":
jpeg_items.append("-vframes 1")
# output file
jpeg_items.append(full_output_path)

View file

@ -633,6 +633,26 @@ class ExtractReview(pyblish.api.InstancePlugin):
input_width = int(input_data["width"])
input_height = int(input_data["height"])
# Make sure input width and height is not an odd number
input_width_is_odd = bool(input_width % 2 != 0)
input_height_is_odd = bool(input_height % 2 != 0)
if input_width_is_odd or input_height_is_odd:
# Add padding to input and make sure this filter is at first place
filters.append("pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2")
# Change input width or height as first filter will change them
if input_width_is_odd:
self.log.info((
"Converting input width from odd to even number. {} -> {}"
).format(input_width, input_width + 1))
input_width += 1
if input_height_is_odd:
self.log.info((
"Converting input height from odd to even number. {} -> {}"
).format(input_height, input_height + 1))
input_height += 1
self.log.debug("pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug("input_width: `{}`".format(input_width))
self.log.debug("input_height: `{}`".format(input_height))
@ -654,6 +674,22 @@ class ExtractReview(pyblish.api.InstancePlugin):
output_width = int(output_width)
output_height = int(output_height)
# Make sure output width and height is not an odd number
# When this can happen:
# - if output definition has set width and height with odd number
# - `instance.data` contain width and height with odd numbeer
if output_width % 2 != 0:
self.log.warning((
"Converting output width from odd to even number. {} -> {}"
).format(output_width, output_width + 1))
output_width += 1
if output_height % 2 != 0:
self.log.warning((
"Converting output height from odd to even number. {} -> {}"
).format(output_height, output_height + 1))
output_height += 1
self.log.debug(
"Output resolution is {}x{}".format(output_width, output_height)
)

View file

@ -682,6 +682,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data.get('subsetGroup')}}
)
# Update families on subset.
families = [instance.data["family"]]
families.extend(instance.data.get("families", []))
io.update_many(
{"type": "subset", "_id": io.ObjectId(subset["_id"])},
{"$set": {"data.families": families}}
)
return subset
def create_version(self, subset, version_number, data=None):

View file

@ -718,7 +718,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"resolutionWidth": data.get("resolutionWidth", 1920),
"resolutionHeight": data.get("resolutionHeight", 1080),
"multipartExr": data.get("multipartExr", False),
"jobBatchName": data.get("jobBatchName", "")
"jobBatchName": data.get("jobBatchName", ""),
"review": data.get("review", True)
}
if "prerender" in instance.data["families"]:

View file

@ -0,0 +1,31 @@
import pyblish.api
import os
class ValidateIntent(pyblish.api.ContextPlugin):
"""Validate intent of the publish.
It is required to fill the intent of this publish. Chech the log
for more details
"""
order = pyblish.api.ValidatorOrder
label = "Validate Intent"
# TODO: this should be off by default and only activated viac config
tasks = ["animation"]
hosts = ["harmony"]
if os.environ.get("AVALON_TASK") not in tasks:
active = False
def process(self, context):
msg = (
"Please make sure that you select the intent of this publish."
)
intent = context.data.get("intent")
self.log.debug(intent)
assert intent, msg
intent_value = intent.get("value")
assert intent is not "", msg

View file

@ -10,7 +10,7 @@ class ValidateVersion(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder
label = "Validate Version"
hosts = ["nuke", "maya", "blender"]
hosts = ["nuke", "maya", "blender", "standalonepublisher"]
def process(self, instance):
version = instance.data.get("version")

View file

@ -1,7 +1,7 @@
from maya import cmds, mel
import pymel.core as pc
from avalon import api
from avalon import api, io
from avalon.maya.pipeline import containerise
from avalon.maya import lib
@ -58,6 +58,13 @@ class AudioLoader(api.Loader):
type="string"
)
# Set frame range.
version = io.find_one({"_id": representation["parent"]})
subset = io.find_one({"_id": version["parent"]})
asset = io.find_one({"_id": subset["parent"]})
audio_node.sourceStart.set(1 - asset["data"]["frameStart"])
audio_node.sourceEnd.set(asset["data"]["frameEnd"])
def switch(self, container, representation):
self.update(container, representation)

View file

@ -1,7 +1,7 @@
import pymel.core as pc
import maya.cmds as cmds
from avalon import api
from avalon import api, io
from avalon.maya.pipeline import containerise
from avalon.maya import lib
from Qt import QtWidgets
@ -12,7 +12,7 @@ class ImagePlaneLoader(api.Loader):
families = ["plate", "render"]
label = "Create imagePlane on selected camera."
representations = ["mov", "exr", "preview"]
representations = ["mov", "exr", "preview", "png"]
icon = "image"
color = "orange"
@ -29,6 +29,8 @@ class ImagePlaneLoader(api.Loader):
# Getting camera from selection.
selection = pc.ls(selection=True)
camera = None
if len(selection) > 1:
QtWidgets.QMessageBox.critical(
None,
@ -39,25 +41,29 @@ class ImagePlaneLoader(api.Loader):
return
if len(selection) < 1:
QtWidgets.QMessageBox.critical(
result = QtWidgets.QMessageBox.critical(
None,
"Error!",
"No camera selected.",
QtWidgets.QMessageBox.Ok
"No camera selected. Do you want to create a camera?",
QtWidgets.QMessageBox.Ok,
QtWidgets.QMessageBox.Cancel
)
return
relatives = pc.listRelatives(selection[0], shapes=True)
if not pc.ls(relatives, type="camera"):
QtWidgets.QMessageBox.critical(
None,
"Error!",
"Selected node is not a camera.",
QtWidgets.QMessageBox.Ok
)
return
camera = selection[0]
if result == QtWidgets.QMessageBox.Ok:
camera = pc.createNode("camera")
else:
return
else:
relatives = pc.listRelatives(selection[0], shapes=True)
if pc.ls(relatives, type="camera"):
camera = selection[0]
else:
QtWidgets.QMessageBox.critical(
None,
"Error!",
"Selected node is not a camera.",
QtWidgets.QMessageBox.Ok
)
return
try:
camera.displayResolution.set(1)
@ -81,6 +87,7 @@ class ImagePlaneLoader(api.Loader):
image_plane_shape.frameOffset.set(1 - start_frame)
image_plane_shape.frameIn.set(start_frame)
image_plane_shape.frameOut.set(end_frame)
image_plane_shape.frameCache.set(end_frame)
image_plane_shape.useFrameExtension.set(1)
movie_representations = ["mov", "preview"]
@ -140,6 +147,17 @@ class ImagePlaneLoader(api.Loader):
type="string"
)
# Set frame range.
version = io.find_one({"_id": representation["parent"]})
subset = io.find_one({"_id": version["parent"]})
asset = io.find_one({"_id": subset["parent"]})
start_frame = asset["data"]["frameStart"]
end_frame = asset["data"]["frameEnd"]
image_plane_shape.frameOffset.set(1 - start_frame)
image_plane_shape.frameIn.set(start_frame)
image_plane_shape.frameOut.set(end_frame)
image_plane_shape.frameCache.set(end_frame)
def switch(self, container, representation):
self.update(container, representation)

View file

@ -101,7 +101,7 @@ class ExtractCameraMayaScene(pype.api.Extractor):
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
except KeyError:
# no preset found
pass

View file

@ -33,7 +33,7 @@ class ExtractMayaSceneRaw(pype.api.Extractor):
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
except KeyError:
# no preset found
pass
# Define extract output file path

View file

@ -41,7 +41,7 @@ class ExtractModel(pype.api.Extractor):
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
except KeyError:
# no preset found
pass
# Define extract output file path

View file

@ -111,7 +111,7 @@ class ExtractYetiRig(pype.api.Extractor):
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
except KeyError:
# no preset found
pass
yeti_nodes = cmds.ls(instance, type="pgYetiMaya")

View file

@ -15,10 +15,12 @@ class ExtractThumbnail(pype.api.Extractor):
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Thumbnail"
families = ["review", "render.farm"]
families = ["review"]
hosts = ["nuke"]
def process(self, instance):
if "render.farm" in instance.data["families"]:
return
with anlib.maintained_selection():
self.log.debug("instance: {}".format(instance))

View file

@ -273,8 +273,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["clipOut"] -
instance.data["clipIn"])
self.log.debug(
"__ instance.data[parents]: {}".format(
instance.data["parents"]
@ -319,6 +317,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
})
in_info['tasks'] = instance.data['tasks']
in_info["comments"] = instance.data.get("comments", [])
parents = instance.data.get('parents', [])
self.log.debug("__ in_info: {}".format(in_info))

View file

@ -17,7 +17,7 @@ class CollectClipTagComments(api.InstancePlugin):
for tag in instance.data["tags"]:
if tag["name"].lower() == "comment":
instance.data["comments"].append(
tag.metadata().dict()["tag.note"]
tag["metadata"]["tag.note"]
)
# Find tags on the source clip.

View file

@ -76,7 +76,7 @@ class ExtractReviewCutUpVideo(pype.api.Extractor):
# check if audio stream is in input video file
ffprob_cmd = (
"{ffprobe_path} -i {full_input_path} -show_streams "
"{ffprobe_path} -i \"{full_input_path}\" -show_streams "
"-select_streams a -loglevel error"
).format(**locals())
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
@ -106,7 +106,7 @@ class ExtractReviewCutUpVideo(pype.api.Extractor):
# try to get video native resolution data
try:
resolution_output = pype.api.subprocess((
"{ffprobe_path} -i {full_input_path} -v error "
"{ffprobe_path} -i \"{full_input_path}\" -v error "
"-select_streams v:0 -show_entries "
"stream=width,height -of csv=s=x:p=0"
).format(**locals()))
@ -193,7 +193,7 @@ class ExtractReviewCutUpVideo(pype.api.Extractor):
# append ffmpeg input video clip
input_args.append("-ss {:0.2f}".format(start_sec))
input_args.append("-t {:0.2f}".format(duration_sec))
input_args.append("-i {}".format(full_input_path))
input_args.append("-i \"{}\"".format(full_input_path))
# add copy audio video codec if only shortening clip
if ("_cut-bigger" in tags) and (not empty_add):
@ -203,8 +203,7 @@ class ExtractReviewCutUpVideo(pype.api.Extractor):
output_args.append("-intra")
# output filename
output_args.append("-y")
output_args.append(full_output_path)
output_args.append("-y \"{}\"".format(full_output_path))
mov_args = [
ffmpeg_path,

View file

@ -1,5 +1,6 @@
from avalon import api, photoshop
from avalon import api
from avalon.vendor import Qt
from avalon import photoshop
class CreateImage(api.Creator):
@ -13,11 +14,12 @@ class CreateImage(api.Creator):
groups = []
layers = []
create_group = False
group_constant = photoshop.get_com_objects().constants().psLayerSet
stub = photoshop.stub()
if (self.options or {}).get("useSelection"):
multiple_instances = False
selection = photoshop.get_selected_layers()
selection = stub.get_selected_layers()
self.log.info("selection {}".format(selection))
if len(selection) > 1:
# Ask user whether to create one image or image per selected
# item.
@ -40,19 +42,18 @@ class CreateImage(api.Creator):
if multiple_instances:
for item in selection:
if item.LayerType == group_constant:
if item.group:
groups.append(item)
else:
layers.append(item)
else:
group = photoshop.group_selected_layers()
group.Name = self.name
group = stub.group_selected_layers(self.name)
groups.append(group)
elif len(selection) == 1:
# One selected item. Use group if its a LayerSet (group), else
# create a new group.
if selection[0].LayerType == group_constant:
if selection[0].group:
groups.append(selection[0])
else:
layers.append(selection[0])
@ -63,16 +64,14 @@ class CreateImage(api.Creator):
create_group = True
if create_group:
group = photoshop.app().ActiveDocument.LayerSets.Add()
group.Name = self.name
group = stub.create_group(self.name)
groups.append(group)
for layer in layers:
photoshop.select_layers([layer])
group = photoshop.group_selected_layers()
group.Name = layer.Name
stub.select_layers([layer])
group = stub.group_selected_layers(layer.name)
groups.append(group)
for group in groups:
self.data.update({"subset": "image" + group.Name})
photoshop.imprint(group, self.data)
self.data.update({"subset": "image" + group.name})
stub.imprint(group, self.data)

View file

@ -1,5 +1,7 @@
from avalon import api, photoshop
stub = photoshop.stub()
class ImageLoader(api.Loader):
"""Load images
@ -12,7 +14,7 @@ class ImageLoader(api.Loader):
def load(self, context, name=None, namespace=None, data=None):
with photoshop.maintained_selection():
layer = photoshop.import_smart_object(self.fname)
layer = stub.import_smart_object(self.fname)
self[:] = [layer]
@ -28,11 +30,11 @@ class ImageLoader(api.Loader):
layer = container.pop("layer")
with photoshop.maintained_selection():
photoshop.replace_smart_object(
stub.replace_smart_object(
layer, api.get_representation_path(representation)
)
photoshop.imprint(
stub.imprint(
layer, {"representation": str(representation["_id"])}
)

View file

@ -1,6 +1,7 @@
import os
import pyblish.api
from avalon import photoshop
@ -13,5 +14,5 @@ class CollectCurrentFile(pyblish.api.ContextPlugin):
def process(self, context):
context.data["currentFile"] = os.path.normpath(
photoshop.app().ActiveDocument.FullName
photoshop.stub().get_active_document_full_name()
).replace("\\", "/")

View file

@ -1,9 +1,9 @@
import pythoncom
from avalon import photoshop
import pyblish.api
from avalon import photoshop
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by LayerSet and file metadata
@ -27,8 +27,11 @@ class CollectInstances(pyblish.api.ContextPlugin):
# can be.
pythoncom.CoInitialize()
for layer in photoshop.get_layers_in_document():
layer_data = photoshop.read(layer)
stub = photoshop.stub()
layers = stub.get_layers()
layers_meta = stub.get_layers_metadata()
for layer in layers:
layer_data = stub.read(layer, layers_meta)
# Skip layers without metadata.
if layer_data is None:
@ -38,18 +41,19 @@ class CollectInstances(pyblish.api.ContextPlugin):
if "container" in layer_data["id"]:
continue
child_layers = [*layer.Layers]
if not child_layers:
self.log.info("%s skipped, it was empty." % layer.Name)
continue
# child_layers = [*layer.Layers]
# self.log.debug("child_layers {}".format(child_layers))
# if not child_layers:
# self.log.info("%s skipped, it was empty." % layer.Name)
# continue
instance = context.create_instance(layer.Name)
instance = context.create_instance(layer.name)
instance.append(layer)
instance.data.update(layer_data)
instance.data["families"] = self.families_mapping[
layer_data["family"]
]
instance.data["publish"] = layer.Visible
instance.data["publish"] = layer.visible
# Produce diagnostic message for any graphical
# user interface interested in visualising it.

View file

@ -21,35 +21,37 @@ class ExtractImage(pype.api.Extractor):
self.log.info("Outputting image to {}".format(staging_dir))
# Perform extraction
stub = photoshop.stub()
files = {}
with photoshop.maintained_selection():
self.log.info("Extracting %s" % str(list(instance)))
with photoshop.maintained_visibility():
# Hide all other layers.
extract_ids = [
x.id for x in photoshop.get_layers_in_layers([instance[0]])
]
for layer in photoshop.get_layers_in_document():
if layer.id not in extract_ids:
layer.Visible = False
extract_ids = set([ll.id for ll in stub.
get_layers_in_layers([instance[0]])])
save_options = {}
for layer in stub.get_layers():
# limit unnecessary calls to client
if layer.visible and layer.id not in extract_ids:
stub.set_visible(layer.id, False)
if not layer.visible and layer.id in extract_ids:
stub.set_visible(layer.id, True)
save_options = []
if "png" in self.formats:
save_options["png"] = photoshop.com_objects.PNGSaveOptions()
save_options.append('png')
if "jpg" in self.formats:
save_options["jpg"] = photoshop.com_objects.JPEGSaveOptions()
save_options.append('jpg')
file_basename = os.path.splitext(
photoshop.app().ActiveDocument.Name
stub.get_active_document_name()
)[0]
for extension, save_option in save_options.items():
for extension in save_options:
_filename = "{}.{}".format(file_basename, extension)
files[extension] = _filename
full_filename = os.path.join(staging_dir, _filename)
photoshop.app().ActiveDocument.SaveAs(
full_filename, save_option, True
)
stub.saveAs(full_filename, extension, True)
representations = []
for extension, filename in files.items():

View file

@ -13,10 +13,11 @@ class ExtractReview(pype.api.Extractor):
families = ["review"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
self.log.info("Outputting image to {}".format(staging_dir))
stub = photoshop.stub()
layers = []
for image_instance in instance.context:
if image_instance.data["family"] != "image":
@ -25,25 +26,22 @@ class ExtractReview(pype.api.Extractor):
# Perform extraction
output_image = "{}.jpg".format(
os.path.splitext(photoshop.app().ActiveDocument.Name)[0]
os.path.splitext(stub.get_active_document_name())[0]
)
output_image_path = os.path.join(staging_dir, output_image)
with photoshop.maintained_visibility():
# Hide all other layers.
extract_ids = [
x.id for x in photoshop.get_layers_in_layers(layers)
]
for layer in photoshop.get_layers_in_document():
if layer.id in extract_ids:
layer.Visible = True
else:
layer.Visible = False
extract_ids = set([ll.id for ll in stub.
get_layers_in_layers(layers)])
self.log.info("extract_ids {}".format(extract_ids))
for layer in stub.get_layers():
# limit unnecessary calls to client
if layer.visible and layer.id not in extract_ids:
stub.set_visible(layer.id, False)
if not layer.visible and layer.id in extract_ids:
stub.set_visible(layer.id, True)
photoshop.app().ActiveDocument.SaveAs(
output_image_path,
photoshop.com_objects.JPEGSaveOptions(),
True
)
stub.saveAs(output_image_path, 'jpg', True)
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
@ -66,8 +64,6 @@ class ExtractReview(pype.api.Extractor):
]
output = pype.lib._subprocess(args)
self.log.debug(output)
instance.data["representations"].append({
"name": "thumbnail",
"ext": "jpg",
@ -75,7 +71,6 @@ class ExtractReview(pype.api.Extractor):
"stagingDir": staging_dir,
"tags": ["thumbnail"]
})
# Generate mov.
mov_path = os.path.join(staging_dir, "review.mov")
args = [
@ -86,9 +81,7 @@ class ExtractReview(pype.api.Extractor):
mov_path
]
output = pype.lib._subprocess(args)
self.log.debug(output)
instance.data["representations"].append({
"name": "mov",
"ext": "mov",

View file

@ -11,4 +11,4 @@ class ExtractSaveScene(pype.api.Extractor):
families = ["workfile"]
def process(self, instance):
photoshop.app().ActiveDocument.Save()
photoshop.stub().save()

View file

@ -1,6 +1,7 @@
import pyblish.api
from pype.action import get_errored_plugins_from_data
from pype.lib import version_up
from avalon import photoshop
@ -24,6 +25,6 @@ class IncrementWorkfile(pyblish.api.InstancePlugin):
)
scene_path = version_up(instance.context.data["currentFile"])
photoshop.app().ActiveDocument.SaveAs(scene_path)
photoshop.stub().saveAs(scene_path, 'psd', True)
self.log.info("Incremented workfile to: {}".format(scene_path))

View file

@ -23,11 +23,12 @@ class ValidateInstanceAssetRepair(pyblish.api.Action):
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
data = photoshop.read(instance[0])
data = stub.read(instance[0])
data["asset"] = os.environ["AVALON_ASSET"]
photoshop.imprint(instance[0], data)
stub.imprint(instance[0], data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin):

View file

@ -21,13 +21,14 @@ class ValidateNamingRepair(pyblish.api.Action):
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = photoshop.stub()
for instance in instances:
self.log.info("validate_naming instance {}".format(instance))
name = instance.data["name"].replace(" ", "_")
instance[0].Name = name
data = photoshop.read(instance[0])
data = stub.read(instance[0])
data["subset"] = "image" + name
photoshop.imprint(instance[0], data)
stub.imprint(instance[0], data)
return True

View file

@ -17,13 +17,13 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
subsets = {
"referenceMain": {
"family": "review",
"families": ["review", "ftrack"],
"families": ["clip", "ftrack"],
# "ftrackFamily": "review",
"extension": ".mp4"
},
"audioMain": {
"family": "audio",
"families": ["ftrack"],
"families": ["clip", "ftrack"],
# "ftrackFamily": "audio",
"extension": ".wav",
# "version": 1

View file

@ -123,7 +123,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
"label": subset,
"name": subset,
"family": in_data["family"],
"version": in_data.get("version", 1),
# "version": in_data.get("version", 1),
"frameStart": in_data.get("representations", [None])[0].get(
"frameStart", None
),

View file

@ -32,7 +32,7 @@ class CollectEditorial(pyblish.api.InstancePlugin):
actions = []
# presets
extensions = [".mov"]
extensions = [".mov", ".mp4"]
def process(self, instance):
# remove context test attribute

View file

@ -0,0 +1,29 @@
"""
Requires:
Nothing
Provides:
Instance
"""
import pyblish.api
from pprint import pformat
class CollectInstanceData(pyblish.api.InstancePlugin):
"""
Collector with only one reason for its existence - remove 'ftrack'
family implicitly added by Standalone Publisher
"""
label = "Collect instance data"
order = pyblish.api.CollectorOrder + 0.49
families = ["render", "plate"]
hosts = ["standalonepublisher"]
def process(self, instance):
fps = instance.data["assetEntity"]["data"]["fps"]
instance.data.update({
"fps": fps
})
self.log.debug(f"instance.data: {pformat(instance.data)}")

View file

@ -9,7 +9,7 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
"""
label = "Collect Psd Instances"
order = pyblish.api.CollectorOrder + 0.492
order = pyblish.api.CollectorOrder + 0.489
hosts = ["standalonepublisher"]
families = ["background_batch"]
@ -34,8 +34,6 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
context = instance.context
asset_data = instance.data["assetEntity"]
asset_name = instance.data["asset"]
anatomy_data = instance.data["anatomyData"]
for subset_name, subset_data in self.subsets.items():
instance_name = f"{asset_name}_{subset_name}"
task = subset_data.get("task", "background")
@ -55,16 +53,8 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
new_instance.data["label"] = f"{instance_name}"
new_instance.data["subset"] = subset_name
new_instance.data["task"] = task
# fix anatomy data
anatomy_data_new = copy.deepcopy(anatomy_data)
# updating hierarchy data
anatomy_data_new.update({
"asset": asset_data["name"],
"task": task,
"subset": subset_name
})
new_instance.data["anatomyData"] = anatomy_data_new
if subset_name in self.unchecked_by_default:
new_instance.data["publish"] = False

View file

@ -10,7 +10,7 @@ class ExtractShotData(pype.api.Extractor):
label = "Extract Shot Data"
hosts = ["standalonepublisher"]
families = ["review", "audio"]
families = ["clip"]
# presets

View file

@ -64,6 +64,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
else:
# Convert to jpeg if not yet
full_input_path = os.path.join(thumbnail_repre["stagingDir"], file)
full_input_path = '"{}"'.format(full_input_path)
self.log.info("input {}".format(full_input_path))
full_thumbnail_path = tempfile.mkstemp(suffix=".jpg")[1]

View file

@ -1,5 +1,3 @@
import os
import pyblish.api
import pype.api
@ -9,10 +7,14 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
label = "Validate Editorial Resources"
hosts = ["standalonepublisher"]
families = ["audio", "review"]
families = ["clip"]
order = pype.api.ValidateContentsOrder
def process(self, instance):
self.log.debug(
f"Instance: {instance}, Families: "
f"{[instance.data['family']] + instance.data['families']}")
check_file = instance.data["editorialVideoPath"]
msg = f"Missing \"{check_file}\"."
assert check_file, msg

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 120 KiB

Before After
Before After

View file

@ -15,7 +15,7 @@ ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
FFMPEG = (
'{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s'
'{} -loglevel panic -i "%(input)s" %(filters)s %(args)s%(output)s'
).format(ffmpeg_path)
FFPROBE = (

View file

@ -0,0 +1,9 @@
from .lib import (
system_settings,
project_settings
)
__all__ = (
"system_settings",
"project_settings"
)

View file

@ -0,0 +1,42 @@
{
"nuke": {
"root": {
"colorManagement": "Nuke",
"OCIO_config": "nuke-default",
"defaultViewerLUT": "Nuke Root LUTs",
"monitorLut": "sRGB",
"int8Lut": "sRGB",
"int16Lut": "sRGB",
"logLut": "Cineon",
"floatLut": "linear"
},
"viewer": {
"viewerProcess": "sRGB"
},
"write": {
"render": {
"colorspace": "linear"
},
"prerender": {
"colorspace": "linear"
},
"still": {
"colorspace": "sRGB"
}
},
"read": {
"[^-a-zA-Z0-9]beauty[^-a-zA-Z0-9]": "linear",
"[^-a-zA-Z0-9](P|N|Z|crypto)[^-a-zA-Z0-9]": "linear",
"[^-a-zA-Z0-9](plateRef)[^-a-zA-Z0-9]": "sRGB"
}
},
"maya": {
},
"houdini": {
},
"resolve": {
}
}

View file

@ -0,0 +1,55 @@
{
"nuke": {
"nodes": {
"connected": true,
"modifymetadata": {
"_id": "connect_metadata",
"_previous": "ENDING",
"metadata.set.pype_studio_name": "{PYPE_STUDIO_NAME}",
"metadata.set.avalon_project_name": "{AVALON_PROJECT}",
"metadata.set.avalon_project_code": "{PYPE_STUDIO_CODE}",
"metadata.set.avalon_asset_name": "{AVALON_ASSET}"
},
"crop": {
"_id": "connect_crop",
"_previous": "connect_metadata",
"box": [
"{metadata.crop.x}",
"{metadata.crop.y}",
"{metadata.crop.right}",
"{metadata.crop.top}"
]
},
"write": {
"render": {
"_id": "output_write",
"_previous": "connect_crop",
"file_type": "exr",
"datatype": "16 bit half",
"compression": "Zip (1 scanline)",
"autocrop": true,
"tile_color": "0xff0000ff",
"channels": "rgb"
},
"prerender": {
"_id": "output_write",
"_previous": "connect_crop",
"file_type": "exr",
"datatype": "16 bit half",
"compression": "Zip (1 scanline)",
"autocrop": false,
"tile_color": "0xc9892aff",
"channels": "rgba"
},
"still": {
"_previous": "connect_crop",
"channels": "rgba",
"file_type": "tiff",
"datatype": "16 bit",
"compression": "LZW",
"tile_color": "0x4145afff"
}
}
}
}
}

View file

@ -0,0 +1,5 @@
{
"windows": "C:/projects",
"linux": "/mnt/share/projects",
"darwin": "/Volumes/path"
}

View file

@ -0,0 +1,30 @@
{
"version_padding": 3,
"version": "v{version:0>{@version_padding}}",
"frame_padding": 4,
"frame": "{frame:0>{@frame_padding}}",
"work": {
"folder": "{root}/{project[name]}/{hierarchy}/{asset}/work/{task}",
"file": "{project[code]}_{asset}_{task}_{@version}<_{comment}>.{ext}",
"path": "{@folder}/{@file}"
},
"render": {
"folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/render/{subset}/{@version}",
"file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}>.{representation}",
"path": "{@folder}/{@file}"
},
"texture": {
"path": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}"
},
"publish": {
"folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}",
"file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}>.{representation}",
"path": "{@folder}/{@file}",
"thumbnail": "{thumbnail_root}/{project[name]}/{_id}_{thumbnail_type}{ext}"
},
"master": {
"folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/master",
"file": "{project[code]}_{asset}_{subset}_master<_{output}><.{frame}>.{representation}",
"path": "{@folder}/{@file}"
}
}

View file

@ -0,0 +1,16 @@
{
"sync_to_avalon": {
"statuses_name_change": ["not ready", "ready"]
},
"status_update": {
"_ignore_": ["in progress", "ommited", "on hold"],
"Ready": ["not ready"],
"In Progress" : ["_any_"]
},
"status_version_to_task": {
"__description__": "Status `from` (key) must be lowered!",
"in progress": "in progress",
"approved": "approved"
}
}

View file

@ -0,0 +1,165 @@
[{
"label": "FPS",
"key": "fps",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"write_security_role": ["ALL"],
"read_security_role": ["ALL"],
"default": null,
"config": {
"isdecimal": true
}
}, {
"label": "Applications",
"key": "applications",
"type": "enumerator",
"entity_type": "show",
"group": "avalon",
"config": {
"multiselect": true,
"data": [
{"blender_2.80": "Blender 2.80"},
{"blender_2.81": "Blender 2.81"},
{"blender_2.82": "Blender 2.82"},
{"blender_2.83": "Blender 2.83"},
{"celaction_local": "CelAction2D Local"},
{"maya_2017": "Maya 2017"},
{"maya_2018": "Maya 2018"},
{"maya_2019": "Maya 2019"},
{"nuke_10.0": "Nuke 10.0"},
{"nuke_11.2": "Nuke 11.2"},
{"nuke_11.3": "Nuke 11.3"},
{"nuke_12.0": "Nuke 12.0"},
{"nukex_10.0": "NukeX 10.0"},
{"nukex_11.2": "NukeX 11.2"},
{"nukex_11.3": "NukeX 11.3"},
{"nukex_12.0": "NukeX 12.0"},
{"nukestudio_10.0": "NukeStudio 10.0"},
{"nukestudio_11.2": "NukeStudio 11.2"},
{"nukestudio_11.3": "NukeStudio 11.3"},
{"nukestudio_12.0": "NukeStudio 12.0"},
{"harmony_17": "Harmony 17"},
{"houdini_16.5": "Houdini 16.5"},
{"houdini_17": "Houdini 17"},
{"houdini_18": "Houdini 18"},
{"photoshop_2020": "Photoshop 2020"},
{"python_3": "Python 3"},
{"python_2": "Python 2"},
{"premiere_2019": "Premiere Pro 2019"},
{"premiere_2020": "Premiere Pro 2020"},
{"resolve_16": "BM DaVinci Resolve 16"}
]
}
}, {
"label": "Avalon auto-sync",
"key": "avalon_auto_sync",
"type": "boolean",
"entity_type": "show",
"group": "avalon",
"write_security_role": ["API", "Administrator"],
"read_security_role": ["API", "Administrator"]
}, {
"label": "Intent",
"key": "intent",
"type": "enumerator",
"entity_type": "assetversion",
"group": "avalon",
"config": {
"multiselect": false,
"data": [
{"test": "Test"},
{"wip": "WIP"},
{"final": "Final"}
]
}
}, {
"label": "Library Project",
"key": "library_project",
"type": "boolean",
"entity_type": "show",
"group": "avalon",
"write_security_role": ["API", "Administrator"],
"read_security_role": ["API", "Administrator"]
}, {
"label": "Clip in",
"key": "clipIn",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"default": null
}, {
"label": "Clip out",
"key": "clipOut",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"default": null
}, {
"label": "Frame start",
"key": "frameStart",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"default": null
}, {
"label": "Frame end",
"key": "frameEnd",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"default": null
}, {
"label": "Tools",
"key": "tools_env",
"type": "enumerator",
"is_hierarchical": true,
"group": "avalon",
"config": {
"multiselect": true,
"data": [
{"mtoa_3.0.1": "mtoa_3.0.1"},
{"mtoa_3.1.1": "mtoa_3.1.1"},
{"mtoa_3.2.0": "mtoa_3.2.0"},
{"yeti_2.1.2": "yeti_2.1"}
]
}
}, {
"label": "Resolution Width",
"key": "resolutionWidth",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"default": null
}, {
"label": "Resolution Height",
"key": "resolutionHeight",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"default": null
}, {
"label": "Pixel aspect",
"key": "pixelAspect",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"config": {
"isdecimal": true
}
}, {
"label": "Frame handles start",
"key": "handleStart",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"default": null
}, {
"label": "Frame handles end",
"key": "handleEnd",
"type": "number",
"is_hierarchical": true,
"group": "avalon",
"default": null
}
]

View file

@ -0,0 +1,5 @@
{
"server_url": "",
"api_key": "",
"api_user": ""
}

View file

@ -0,0 +1,5 @@
{
"TestAction": {
"ignore_me": true
}
}

View file

@ -0,0 +1,18 @@
{
"fps": 25,
"frameStart": 1001,
"frameEnd": 1100,
"clipIn": 1001,
"clipOut": 1100,
"handleStart": 10,
"handleEnd": 10,
"resolutionHeight": 1080,
"resolutionWidth": 1920,
"pixelAspect": 1.0,
"applications": [
"maya_2019", "nuke_11.3", "nukex_11.3", "nukestudio_11.3", "deadline"
],
"tools_env": [],
"avalon_auto_sync": true
}

View file

@ -0,0 +1,8 @@
{
"Model": ["model"],
"Render Globals": ["light", "render"],
"Layout": ["layout"],
"Set Dress": ["setdress"],
"Look": ["look"],
"Rig": ["rigging"]
}

View file

@ -0,0 +1,22 @@
{
"__project_root__": {
"prod" : {},
"resources" : {
"footage": {
"plates": {},
"offline": {}
},
"audio": {},
"art_dept": {}
},
"editorial" : {},
"assets[ftrack.Library]": {
"characters[ftrack]": {},
"locations[ftrack]": {}
},
"shots[ftrack.Sequence]": {
"scripts": {},
"editorial[ftrack.Folder]": {}
}
}
}

View file

@ -0,0 +1,8 @@
{
"compositing": ["nuke", "ae"],
"modeling": ["maya", "app2"],
"lookdev": ["substance"],
"animation": [],
"lighting": [],
"rigging": []
}

View file

@ -0,0 +1,7 @@
{
"last_workfile_on_startup": [
{
"enabled": false
}
]
}

View file

@ -0,0 +1,108 @@
{
"Codec": {
"compression": "jpg",
"format": "image",
"quality": 95
},
"Display Options": {
"background": [
0.7137254901960784,
0.7137254901960784,
0.7137254901960784
],
"backgroundBottom": [
0.7137254901960784,
0.7137254901960784,
0.7137254901960784
],
"backgroundTop": [
0.7137254901960784,
0.7137254901960784,
0.7137254901960784
],
"override_display": true
},
"Generic": {
"isolate_view": true,
"off_screen": true
},
"IO": {
"name": "",
"open_finished": false,
"raw_frame_numbers": false,
"recent_playblasts": [],
"save_file": false
},
"PanZoom": {
"pan_zoom": true
},
"Renderer": {
"rendererName": "vp2Renderer"
},
"Resolution": {
"height": 1080,
"mode": "Custom",
"percent": 1.0,
"width": 1920
},
"Time Range": {
"end_frame": 25,
"frame": "",
"start_frame": 0,
"time": "Time Slider"
},
"Viewport Options": {
"cameras": false,
"clipGhosts": false,
"controlVertices": false,
"deformers": false,
"dimensions": false,
"displayLights": 0,
"dynamicConstraints": false,
"dynamics": false,
"fluids": false,
"follicles": false,
"gpuCacheDisplayFilter": false,
"greasePencils": false,
"grid": false,
"hairSystems": false,
"handles": false,
"high_quality": true,
"hud": false,
"hulls": false,
"ikHandles": false,
"imagePlane": false,
"joints": false,
"lights": false,
"locators": false,
"manipulators": false,
"motionTrails": false,
"nCloths": false,
"nParticles": false,
"nRigids": false,
"nurbsCurves": false,
"nurbsSurfaces": false,
"override_viewport_options": true,
"particleInstancers": false,
"pivots": false,
"planes": false,
"pluginShapes": false,
"polymeshes": true,
"shadows": false,
"strokes": false,
"subdivSurfaces": false,
"textures": false,
"twoSidedLighting": true
},
"Camera Options": {
"displayGateMask": false,
"displayResolution": false,
"displayFilmGate": false,
"displayFieldChart": false,
"displaySafeAction": false,
"displaySafeTitle": false,
"displayFilmPivot": false,
"displayFilmOrigin": false,
"overscan": 1.0
}
}

View file

@ -0,0 +1,19 @@
{
"3delight": 41,
"arnold": 46,
"arnold_sf": 57,
"gelato": 30,
"harware": 3,
"krakatoa": 51,
"file_layers": 7,
"mentalray": 2,
"mentalray_sf": 6,
"redshift": 55,
"renderman": 29,
"software": 1,
"software_sf": 5,
"turtle": 10,
"vector": 4,
"vray": 37,
"ffmpeg": 48
}

View file

@ -0,0 +1,11 @@
{
"ExtractCelactionDeadline": {
"enabled": true,
"deadline_department": "",
"deadline_priority": 50,
"deadline_pool": "",
"deadline_pool_secondary": "",
"deadline_group": "",
"deadline_chunk_size": 10
}
}

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,7 @@
{
"IntegrateFtrackNote": {
"enabled": false,
"note_with_intent_template": "{intent}: {comment}",
"note_labels": []
}
}

View file

@ -0,0 +1 @@
{}

View file

@ -0,0 +1,98 @@
{
"IntegrateMasterVersion": {
"enabled": false
},
"ExtractJpegEXR": {
"enabled": true,
"ffmpeg_args": {
"input": [
"-gamma 2.2"
],
"output": []
}
},
"ExtractReview": {
"enabled": true,
"profiles": [
{
"families": [],
"hosts": [],
"outputs": {
"h264": {
"ext": "mp4",
"tags": [
"burnin",
"ftrackreview"
],
"ffmpeg_args": {
"video_filters": [],
"audio_filters": [],
"input": [
"-gamma 2.2"
],
"output": [
"-pix_fmt yuv420p",
"-crf 18",
"-intra"
]
},
"filter": {
"families": [
"render",
"review",
"ftrack"
]
}
}
}
}
]
},
"ExtractBurnin": {
"enabled": false,
"options": {
"font_size": 42,
"opacity": 1,
"bg_opacity": 0,
"x_offset": 5,
"y_offset": 5,
"bg_padding": 5
},
"fields": {},
"profiles": [
{
"burnins": {
"burnin": {
"TOP_LEFT": "{yy}-{mm}-{dd}",
"TOP_RIGHT": "{anatomy[version]}",
"TOP_CENTERED": "",
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
"BOTTOM_CENTERED": "{asset}",
"BOTTOM_LEFT": "{username}"
}
}
}
]
},
"IntegrateAssetNew": {
"template_name_profiles": {
"publish": {
"families": [],
"tasks": []
},
"render": {
"families": [
"review",
"render",
"prerender"
]
}
}
},
"ProcessSubmittedJobOnFarm": {
"enabled": false,
"deadline_department": "",
"deadline_pool": "",
"deadline_group": ""
}
}

Some files were not shown because too many files have changed in this diff Show more