mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch '2.x/develop' into feature/686-standalonepublisher-editorial-from-image-sequences
This commit is contained in:
commit
9cb288a9ab
61 changed files with 3660 additions and 2708 deletions
|
|
@ -1,20 +0,0 @@
|
|||
pype.aport package
|
||||
==================
|
||||
|
||||
.. automodule:: pype.aport
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
Submodules
|
||||
----------
|
||||
|
||||
pype.aport.api module
|
||||
---------------------
|
||||
|
||||
.. automodule:: pype.aport.api
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
|
||||
|
|
@ -11,7 +11,6 @@ Subpackages
|
|||
|
||||
.. toctree::
|
||||
|
||||
pype.aport
|
||||
pype.avalon_apps
|
||||
pype.clockify
|
||||
pype.ftrack
|
||||
|
|
|
|||
|
|
@ -39,13 +39,9 @@ from .action import (
|
|||
from .lib import (
|
||||
version_up,
|
||||
get_asset,
|
||||
get_project,
|
||||
get_hierarchy,
|
||||
get_subsets,
|
||||
get_version_from_path,
|
||||
get_last_version_from_path,
|
||||
modified_environ,
|
||||
add_tool_to_environment,
|
||||
source_hash,
|
||||
get_latest_version
|
||||
)
|
||||
|
|
@ -88,14 +84,10 @@ __all__ = [
|
|||
|
||||
# get contextual data
|
||||
"version_up",
|
||||
"get_project",
|
||||
"get_hierarchy",
|
||||
"get_asset",
|
||||
"get_subsets",
|
||||
"get_version_from_path",
|
||||
"get_last_version_from_path",
|
||||
"modified_environ",
|
||||
"add_tool_to_environment",
|
||||
"source_hash",
|
||||
|
||||
"subprocess",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import sys
|
|||
|
||||
from avalon.vendor.Qt import QtGui
|
||||
import avalon.fusion
|
||||
|
||||
from avalon import io
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._project = None
|
||||
|
|
@ -59,3 +59,84 @@ def get_additional_data(container):
|
|||
return {"color": QtGui.QColor.fromRgbF(tile_color["R"],
|
||||
tile_color["G"],
|
||||
tile_color["B"])}
|
||||
|
||||
|
||||
def switch_item(container,
|
||||
asset_name=None,
|
||||
subset_name=None,
|
||||
representation_name=None):
|
||||
"""Switch container asset, subset or representation of a container by name.
|
||||
|
||||
It'll always switch to the latest version - of course a different
|
||||
approach could be implemented.
|
||||
|
||||
Args:
|
||||
container (dict): data of the item to switch with
|
||||
asset_name (str): name of the asset
|
||||
subset_name (str): name of the subset
|
||||
representation_name (str): name of the representation
|
||||
|
||||
Returns:
|
||||
dict
|
||||
|
||||
"""
|
||||
|
||||
if all(not x for x in [asset_name, subset_name, representation_name]):
|
||||
raise ValueError("Must have at least one change provided to switch.")
|
||||
|
||||
# Collect any of current asset, subset and representation if not provided
|
||||
# so we can use the original name from those.
|
||||
if any(not x for x in [asset_name, subset_name, representation_name]):
|
||||
_id = io.ObjectId(container["representation"])
|
||||
representation = io.find_one({"type": "representation", "_id": _id})
|
||||
version, subset, asset, project = io.parenthood(representation)
|
||||
|
||||
if asset_name is None:
|
||||
asset_name = asset["name"]
|
||||
|
||||
if subset_name is None:
|
||||
subset_name = subset["name"]
|
||||
|
||||
if representation_name is None:
|
||||
representation_name = representation["name"]
|
||||
|
||||
# Find the new one
|
||||
asset = io.find_one({
|
||||
"name": asset_name,
|
||||
"type": "asset"
|
||||
})
|
||||
assert asset, ("Could not find asset in the database with the name "
|
||||
"'%s'" % asset_name)
|
||||
|
||||
subset = io.find_one({
|
||||
"name": subset_name,
|
||||
"type": "subset",
|
||||
"parent": asset["_id"]
|
||||
})
|
||||
assert subset, ("Could not find subset in the database with the name "
|
||||
"'%s'" % subset_name)
|
||||
|
||||
version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
},
|
||||
sort=[('name', -1)]
|
||||
)
|
||||
|
||||
assert version, "Could not find a version for {}.{}".format(
|
||||
asset_name, subset_name
|
||||
)
|
||||
|
||||
representation = io.find_one({
|
||||
"name": representation_name,
|
||||
"type": "representation",
|
||||
"parent": version["_id"]}
|
||||
)
|
||||
|
||||
assert representation, ("Could not find representation in the database "
|
||||
"with the name '%s'" % representation_name)
|
||||
|
||||
avalon.api.switch(container, representation)
|
||||
|
||||
return representation
|
||||
|
|
|
|||
|
|
@ -234,7 +234,7 @@ def switch(asset_name, filepath=None, new=True):
|
|||
representations = []
|
||||
for container in containers:
|
||||
try:
|
||||
representation = pype.switch_item(
|
||||
representation = fusion_lib.switch_item(
|
||||
container,
|
||||
asset_name=asset_name)
|
||||
representations.append(representation)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import sys
|
|||
import hiero
|
||||
import pyblish.api
|
||||
import avalon.api as avalon
|
||||
import avalon.io
|
||||
from avalon.vendor.Qt import (QtWidgets, QtGui)
|
||||
import pype.api as pype
|
||||
from pype.api import Logger, Anatomy
|
||||
|
|
@ -58,7 +59,8 @@ def sync_avalon_data_to_workfile():
|
|||
project.setProjectRoot(active_project_root)
|
||||
|
||||
# get project data from avalon db
|
||||
project_data = pype.get_project()["data"]
|
||||
project_doc = avalon.io.find_one({"type": "project"})
|
||||
project_data = project_doc["data"]
|
||||
|
||||
log.debug("project_data: {}".format(project_data))
|
||||
|
||||
|
|
|
|||
|
|
@ -378,14 +378,8 @@ class AExpectedFiles:
|
|||
renderable = False
|
||||
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
|
||||
renderable = True
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
"{}.renderable".format(cam), self.layer
|
||||
):
|
||||
renderable = self.maya_is_true(override)
|
||||
|
||||
if renderable:
|
||||
renderable_cameras.append(cam)
|
||||
|
||||
return renderable_cameras
|
||||
|
||||
def maya_is_true(self, attr_val):
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import math
|
|||
import bson
|
||||
import json
|
||||
import logging
|
||||
import itertools
|
||||
import contextlib
|
||||
from collections import OrderedDict, defaultdict
|
||||
from math import ceil
|
||||
|
|
@ -122,6 +123,12 @@ def float_round(num, places=0, direction=ceil):
|
|||
return direction(num * (10**places)) / float(10**places)
|
||||
|
||||
|
||||
def pairwise(iterable):
|
||||
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
|
||||
a = iter(iterable)
|
||||
return itertools.izip(a, a)
|
||||
|
||||
|
||||
def unique(name):
|
||||
assert isinstance(name, string_types), "`name` must be string"
|
||||
|
||||
|
|
@ -419,12 +426,12 @@ def empty_sets(sets, force=False):
|
|||
plugs=True,
|
||||
connections=True) or []
|
||||
original_connections.extend(connections)
|
||||
for dest, src in lib.pairwise(connections):
|
||||
for dest, src in pairwise(connections):
|
||||
cmds.disconnectAttr(src, dest)
|
||||
yield
|
||||
finally:
|
||||
|
||||
for dest, src in lib.pairwise(original_connections):
|
||||
for dest, src in pairwise(original_connections):
|
||||
cmds.connectAttr(src, dest)
|
||||
|
||||
# Restore original members
|
||||
|
|
@ -1857,8 +1864,8 @@ def set_context_settings():
|
|||
"""
|
||||
|
||||
# Todo (Wijnand): apply renderer and resolution of project
|
||||
|
||||
project_data = lib.get_project()["data"]
|
||||
project_doc = io.find_one({"type": "project"})
|
||||
project_data = project_doc["data"]
|
||||
asset_data = lib.get_asset()["data"]
|
||||
|
||||
# Set project fps
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ def format_anatomy(data):
|
|||
if not version:
|
||||
file = script_name()
|
||||
data["version"] = pype.get_version_from_path(file)
|
||||
project_document = pype.get_project()
|
||||
project_document = io.find_one({"type": "project"})
|
||||
data.update({
|
||||
"subset": data["avalon"]["subset"],
|
||||
"asset": data["avalon"]["asset"],
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import logging
|
||||
|
||||
from avalon.tvpaint.communication_server import register_localization_file
|
||||
from avalon.tvpaint import pipeline
|
||||
import avalon.api
|
||||
import pyblish.api
|
||||
from pype import PLUGINS_DIR
|
||||
|
|
@ -13,6 +14,23 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "tvpaint", "load")
|
|||
CREATE_PATH = os.path.join(PLUGINS_DIR, "tvpaint", "create")
|
||||
|
||||
|
||||
def on_instance_toggle(instance, old_value, new_value):
|
||||
instance_id = instance.data["uuid"]
|
||||
found_idx = None
|
||||
current_instances = pipeline.list_instances()
|
||||
for idx, workfile_instance in enumerate(current_instances):
|
||||
if workfile_instance["uuid"] == instance_id:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
if found_idx is None:
|
||||
return
|
||||
|
||||
if "active" in current_instances[found_idx]:
|
||||
current_instances[found_idx]["active"] = new_value
|
||||
pipeline._write_instances(current_instances)
|
||||
|
||||
|
||||
def install():
|
||||
log.info("Pype - Installing TVPaint integration")
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
|
@ -23,6 +41,12 @@ def install():
|
|||
avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
|
||||
avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
|
||||
|
||||
registered_callbacks = (
|
||||
pyblish.api.registered_callbacks().get("instanceToggled") or []
|
||||
)
|
||||
if on_instance_toggle not in registered_callbacks:
|
||||
pyblish.api.register_callback("instanceToggled", on_instance_toggle)
|
||||
|
||||
|
||||
def uninstall():
|
||||
log.info("Pype - Uninstalling TVPaint integration")
|
||||
|
|
|
|||
1915
pype/lib.py
1915
pype/lib.py
File diff suppressed because it is too large
Load diff
71
pype/lib/__init__.py
Normal file
71
pype/lib/__init__.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Pype lib module."""
|
||||
|
||||
from .deprecated import (
|
||||
get_avalon_database,
|
||||
set_io_database
|
||||
)
|
||||
|
||||
from .avalon_context import (
|
||||
is_latest,
|
||||
any_outdated,
|
||||
get_asset,
|
||||
get_hierarchy,
|
||||
get_linked_assets,
|
||||
get_latest_version,
|
||||
BuildWorkfile
|
||||
)
|
||||
|
||||
from .hooks import PypeHook, execute_hook
|
||||
|
||||
from .applications import (
|
||||
ApplicationLaunchFailed,
|
||||
launch_application,
|
||||
ApplicationAction,
|
||||
_subprocess
|
||||
)
|
||||
|
||||
from .plugin_tools import filter_pyblish_plugins, source_hash
|
||||
|
||||
from .path_tools import (
|
||||
version_up,
|
||||
get_version_from_path,
|
||||
get_last_version_from_path,
|
||||
get_paths_from_environ,
|
||||
get_ffmpeg_tool_path
|
||||
)
|
||||
|
||||
from .ffmpeg_utils import ffprobe_streams
|
||||
|
||||
__all__ = [
|
||||
"get_avalon_database",
|
||||
"set_io_database",
|
||||
|
||||
"is_latest",
|
||||
"any_outdated",
|
||||
"get_asset",
|
||||
"get_hierarchy",
|
||||
"get_linked_assets",
|
||||
"get_latest_version",
|
||||
"BuildWorkfile",
|
||||
|
||||
"PypeHook",
|
||||
"execute_hook",
|
||||
|
||||
"ApplicationLaunchFailed",
|
||||
"launch_application",
|
||||
"ApplicationAction",
|
||||
|
||||
"filter_pyblish_plugins",
|
||||
|
||||
"version_up",
|
||||
"get_version_from_path",
|
||||
"get_last_version_from_path",
|
||||
"get_paths_from_environ",
|
||||
"get_ffmpeg_tool_path",
|
||||
|
||||
"ffprobe_streams",
|
||||
|
||||
"source_hash",
|
||||
"_subprocess"
|
||||
]
|
||||
457
pype/lib/applications.py
Normal file
457
pype/lib/applications.py
Normal file
|
|
@ -0,0 +1,457 @@
|
|||
import os
|
||||
import sys
|
||||
import getpass
|
||||
import copy
|
||||
import platform
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
import acre
|
||||
|
||||
import avalon.lib
|
||||
|
||||
from ..api import Anatomy, Logger, config
|
||||
from .hooks import execute_hook
|
||||
from .deprecated import get_avalon_database
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApplicationLaunchFailed(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def launch_application(project_name, asset_name, task_name, app_name):
|
||||
"""Launch host application with filling required environments.
|
||||
|
||||
TODO(iLLiCiT): This should be split into more parts.
|
||||
"""
|
||||
# `get_avalon_database` is in Pype 3 replaced with using `AvalonMongoDB`
|
||||
database = get_avalon_database()
|
||||
project_document = database[project_name].find_one({"type": "project"})
|
||||
asset_document = database[project_name].find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
|
||||
asset_doc_parents = asset_document["data"].get("parents")
|
||||
hierarchy = "/".join(asset_doc_parents)
|
||||
|
||||
app_def = avalon.lib.get_application(app_name)
|
||||
app_label = app_def.get("ftrack_label", app_def.get("label", app_name))
|
||||
|
||||
host_name = app_def["application_dir"]
|
||||
# Workfile data collection may be special function?
|
||||
data = {
|
||||
"project": {
|
||||
"name": project_document["name"],
|
||||
"code": project_document["data"].get("code")
|
||||
},
|
||||
"task": task_name,
|
||||
"asset": asset_name,
|
||||
"app": host_name,
|
||||
"hierarchy": hierarchy
|
||||
}
|
||||
|
||||
try:
|
||||
anatomy = Anatomy(project_name)
|
||||
anatomy_filled = anatomy.format(data)
|
||||
workdir = os.path.normpath(anatomy_filled["work"]["folder"])
|
||||
|
||||
except Exception as exc:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Error in anatomy.format: {}".format(str(exc))
|
||||
)
|
||||
|
||||
try:
|
||||
os.makedirs(workdir)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
last_workfile_path = None
|
||||
extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(host_name)
|
||||
if extensions:
|
||||
# Find last workfile
|
||||
file_template = anatomy.templates["work"]["file"]
|
||||
data.update({
|
||||
"version": 1,
|
||||
"user": os.environ.get("PYPE_USERNAME") or getpass.getuser(),
|
||||
"ext": extensions[0]
|
||||
})
|
||||
|
||||
last_workfile_path = avalon.api.last_workfile(
|
||||
workdir, file_template, data, extensions, True
|
||||
)
|
||||
|
||||
# set environments for Avalon
|
||||
prep_env = copy.deepcopy(os.environ)
|
||||
prep_env.update({
|
||||
"AVALON_PROJECT": project_name,
|
||||
"AVALON_ASSET": asset_name,
|
||||
"AVALON_TASK": task_name,
|
||||
"AVALON_APP": host_name,
|
||||
"AVALON_APP_NAME": app_name,
|
||||
"AVALON_HIERARCHY": hierarchy,
|
||||
"AVALON_WORKDIR": workdir
|
||||
})
|
||||
|
||||
start_last_workfile = avalon.api.should_start_last_workfile(
|
||||
project_name, host_name, task_name
|
||||
)
|
||||
# Store boolean as "0"(False) or "1"(True)
|
||||
prep_env["AVALON_OPEN_LAST_WORKFILE"] = (
|
||||
str(int(bool(start_last_workfile)))
|
||||
)
|
||||
|
||||
if (
|
||||
start_last_workfile
|
||||
and last_workfile_path
|
||||
and os.path.exists(last_workfile_path)
|
||||
):
|
||||
prep_env["AVALON_LAST_WORKFILE"] = last_workfile_path
|
||||
|
||||
prep_env.update(anatomy.roots_obj.root_environments())
|
||||
|
||||
# collect all the 'environment' attributes from parents
|
||||
tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]]
|
||||
tools_env = asset_document["data"].get("tools_env") or []
|
||||
tools_attr.extend(tools_env)
|
||||
|
||||
tools_env = acre.get_tools(tools_attr)
|
||||
env = acre.compute(tools_env)
|
||||
env = acre.merge(env, current_env=dict(prep_env))
|
||||
|
||||
# Get path to execute
|
||||
st_temp_path = os.environ["PYPE_CONFIG"]
|
||||
os_plat = platform.system().lower()
|
||||
|
||||
# Path to folder with launchers
|
||||
path = os.path.join(st_temp_path, "launchers", os_plat)
|
||||
|
||||
# Full path to executable launcher
|
||||
execfile = None
|
||||
|
||||
launch_hook = app_def.get("launch_hook")
|
||||
if launch_hook:
|
||||
log.info("launching hook: {}".format(launch_hook))
|
||||
ret_val = execute_hook(launch_hook, env=env)
|
||||
if not ret_val:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Hook didn't finish successfully {}".format(app_label)
|
||||
)
|
||||
|
||||
if sys.platform == "win32":
|
||||
for ext in os.environ["PATHEXT"].split(os.pathsep):
|
||||
fpath = os.path.join(path.strip('"'), app_def["executable"] + ext)
|
||||
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
|
||||
execfile = fpath
|
||||
break
|
||||
|
||||
# Run SW if was found executable
|
||||
if execfile is None:
|
||||
raise ApplicationLaunchFailed(
|
||||
"We didn't find launcher for {}".format(app_label)
|
||||
)
|
||||
|
||||
popen = avalon.lib.launch(
|
||||
executable=execfile, args=[], environment=env
|
||||
)
|
||||
|
||||
elif (
|
||||
sys.platform.startswith("linux")
|
||||
or sys.platform.startswith("darwin")
|
||||
):
|
||||
execfile = os.path.join(path.strip('"'), app_def["executable"])
|
||||
# Run SW if was found executable
|
||||
if execfile is None:
|
||||
raise ApplicationLaunchFailed(
|
||||
"We didn't find launcher for {}".format(app_label)
|
||||
)
|
||||
|
||||
if not os.path.isfile(execfile):
|
||||
raise ApplicationLaunchFailed(
|
||||
"Launcher doesn't exist - {}".format(execfile)
|
||||
)
|
||||
|
||||
try:
|
||||
fp = open(execfile)
|
||||
except PermissionError as perm_exc:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Access denied on launcher {} - {}".format(execfile, perm_exc)
|
||||
)
|
||||
|
||||
fp.close()
|
||||
# check executable permission
|
||||
if not os.access(execfile, os.X_OK):
|
||||
raise ApplicationLaunchFailed(
|
||||
"No executable permission - {}".format(execfile)
|
||||
)
|
||||
|
||||
popen = avalon.lib.launch( # noqa: F841
|
||||
"/usr/bin/env", args=["bash", execfile], environment=env
|
||||
)
|
||||
return popen
|
||||
|
||||
|
||||
class ApplicationAction(avalon.api.Action):
|
||||
"""Default application launcher
|
||||
|
||||
This is a convenience application Action that when "config" refers to a
|
||||
parsed application `.toml` this can launch the application.
|
||||
|
||||
"""
|
||||
_log = None
|
||||
config = None
|
||||
group = None
|
||||
variant = None
|
||||
required_session_keys = (
|
||||
"AVALON_PROJECT",
|
||||
"AVALON_ASSET",
|
||||
"AVALON_TASK"
|
||||
)
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if self._log is None:
|
||||
self._log = Logger().get_logger(self.__class__.__name__)
|
||||
return self._log
|
||||
|
||||
def is_compatible(self, session):
|
||||
for key in self.required_session_keys:
|
||||
if key not in session:
|
||||
return False
|
||||
return True
|
||||
|
||||
def process(self, session, **kwargs):
|
||||
"""Process the full Application action"""
|
||||
|
||||
project_name = session["AVALON_PROJECT"]
|
||||
asset_name = session["AVALON_ASSET"]
|
||||
task_name = session["AVALON_TASK"]
|
||||
launch_application(
|
||||
project_name, asset_name, task_name, self.name
|
||||
)
|
||||
|
||||
self._ftrack_after_launch_procedure(
|
||||
project_name, asset_name, task_name
|
||||
)
|
||||
|
||||
def _ftrack_after_launch_procedure(
|
||||
self, project_name, asset_name, task_name
|
||||
):
|
||||
# TODO move to launch hook
|
||||
required_keys = ("FTRACK_SERVER", "FTRACK_API_USER", "FTRACK_API_KEY")
|
||||
for key in required_keys:
|
||||
if not os.environ.get(key):
|
||||
self.log.debug((
|
||||
"Missing required environment \"{}\""
|
||||
" for Ftrack after launch procedure."
|
||||
).format(key))
|
||||
return
|
||||
|
||||
try:
|
||||
import ftrack_api
|
||||
session = ftrack_api.Session(auto_connect_event_hub=True)
|
||||
self.log.debug("Ftrack session created")
|
||||
except Exception:
|
||||
self.log.warning("Couldn't create Ftrack session")
|
||||
return
|
||||
|
||||
try:
|
||||
entity = self._find_ftrack_task_entity(
|
||||
session, project_name, asset_name, task_name
|
||||
)
|
||||
self._ftrack_status_change(session, entity, project_name)
|
||||
self._start_timer(session, entity, ftrack_api)
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Couldn't finish Ftrack procedure.", exc_info=True
|
||||
)
|
||||
return
|
||||
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def _find_ftrack_task_entity(
|
||||
self, session, project_name, asset_name, task_name
|
||||
):
|
||||
project_entity = session.query(
|
||||
"Project where full_name is \"{}\"".format(project_name)
|
||||
).first()
|
||||
if not project_entity:
|
||||
self.log.warning(
|
||||
"Couldn't find project \"{}\" in Ftrack.".format(project_name)
|
||||
)
|
||||
return
|
||||
|
||||
potential_task_entities = session.query((
|
||||
"TypedContext where parent.name is \"{}\" and project_id is \"{}\""
|
||||
).format(asset_name, project_entity["id"])).all()
|
||||
filtered_entities = []
|
||||
for _entity in potential_task_entities:
|
||||
if (
|
||||
_entity.entity_type.lower() == "task"
|
||||
and _entity["name"] == task_name
|
||||
):
|
||||
filtered_entities.append(_entity)
|
||||
|
||||
if not filtered_entities:
|
||||
self.log.warning((
|
||||
"Couldn't find task \"{}\" under parent \"{}\" in Ftrack."
|
||||
).format(task_name, asset_name))
|
||||
return
|
||||
|
||||
if len(filtered_entities) > 1:
|
||||
self.log.warning((
|
||||
"Found more than one task \"{}\""
|
||||
" under parent \"{}\" in Ftrack."
|
||||
).format(task_name, asset_name))
|
||||
return
|
||||
|
||||
return filtered_entities[0]
|
||||
|
||||
def _ftrack_status_change(self, session, entity, project_name):
|
||||
presets = config.get_presets(project_name)["ftrack"]["ftrack_config"]
|
||||
statuses = presets.get("status_update")
|
||||
if not statuses:
|
||||
return
|
||||
|
||||
actual_status = entity["status"]["name"].lower()
|
||||
already_tested = set()
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in entity["link"]]
|
||||
)
|
||||
while True:
|
||||
next_status_name = None
|
||||
for key, value in statuses.items():
|
||||
if key in already_tested:
|
||||
continue
|
||||
if actual_status in value or "_any_" in value:
|
||||
if key != "_ignore_":
|
||||
next_status_name = key
|
||||
already_tested.add(key)
|
||||
break
|
||||
already_tested.add(key)
|
||||
|
||||
if next_status_name is None:
|
||||
break
|
||||
|
||||
try:
|
||||
query = "Status where name is \"{}\"".format(
|
||||
next_status_name
|
||||
)
|
||||
status = session.query(query).one()
|
||||
|
||||
entity["status"] = status
|
||||
session.commit()
|
||||
self.log.debug("Changing status to \"{}\" <{}>".format(
|
||||
next_status_name, ent_path
|
||||
))
|
||||
break
|
||||
|
||||
except Exception:
|
||||
session.rollback()
|
||||
msg = (
|
||||
"Status \"{}\" in presets wasn't found"
|
||||
" on Ftrack entity type \"{}\""
|
||||
).format(next_status_name, entity.entity_type)
|
||||
self.log.warning(msg)
|
||||
|
||||
def _start_timer(self, session, entity, _ftrack_api):
|
||||
self.log.debug("Triggering timer start.")
|
||||
|
||||
user_entity = session.query("User where username is \"{}\"".format(
|
||||
os.environ["FTRACK_API_USER"]
|
||||
)).first()
|
||||
if not user_entity:
|
||||
self.log.warning(
|
||||
"Couldn't find user with username \"{}\" in Ftrack".format(
|
||||
os.environ["FTRACK_API_USER"]
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
source = {
|
||||
"user": {
|
||||
"id": user_entity["id"],
|
||||
"username": user_entity["username"]
|
||||
}
|
||||
}
|
||||
event_data = {
|
||||
"actionIdentifier": "start.timer",
|
||||
"selection": [{"entityId": entity["id"], "entityType": "task"}]
|
||||
}
|
||||
session.event_hub.publish(
|
||||
_ftrack_api.event.base.Event(
|
||||
topic="ftrack.action.launch",
|
||||
data=event_data,
|
||||
source=source
|
||||
),
|
||||
on_error="ignore"
|
||||
)
|
||||
self.log.debug("Timer start triggered successfully.")
|
||||
|
||||
|
||||
# Special naming case for subprocess since its a built-in method.
|
||||
def _subprocess(*args, **kwargs):
|
||||
"""Convenience method for getting output errors for subprocess.
|
||||
|
||||
Entered arguments and keyword arguments are passed to subprocess Popen.
|
||||
|
||||
Args:
|
||||
*args: Variable length arument list passed to Popen.
|
||||
**kwargs : Arbitary keyword arguments passed to Popen. Is possible to
|
||||
pass `logging.Logger` object under "logger" if want to use
|
||||
different than lib's logger.
|
||||
|
||||
Returns:
|
||||
str: Full output of subprocess concatenated stdout and stderr.
|
||||
|
||||
Raises:
|
||||
RuntimeError: Exception is raised if process finished with nonzero
|
||||
return code.
|
||||
"""
|
||||
|
||||
# Get environents from kwarg or use current process environments if were
|
||||
# not passed.
|
||||
env = kwargs.get("env") or os.environ
|
||||
# Make sure environment contains only strings
|
||||
filtered_env = {k: str(v) for k, v in env.items()}
|
||||
|
||||
# Use lib's logger if was not passed with kwargs.
|
||||
logger = kwargs.pop("logger", log)
|
||||
|
||||
# set overrides
|
||||
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
|
||||
kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE)
|
||||
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
|
||||
kwargs['env'] = filtered_env
|
||||
|
||||
proc = subprocess.Popen(*args, **kwargs)
|
||||
|
||||
full_output = ""
|
||||
_stdout, _stderr = proc.communicate()
|
||||
if _stdout:
|
||||
_stdout = _stdout.decode("utf-8")
|
||||
full_output += _stdout
|
||||
logger.debug(_stdout)
|
||||
|
||||
if _stderr:
|
||||
_stderr = _stderr.decode("utf-8")
|
||||
# Add additional line break if output already containt stdout
|
||||
if full_output:
|
||||
full_output += "\n"
|
||||
full_output += _stderr
|
||||
logger.warning(_stderr)
|
||||
|
||||
if proc.returncode != 0:
|
||||
exc_msg = "Executing arguments was not successful: \"{}\"".format(args)
|
||||
if _stdout:
|
||||
exc_msg += "\n\nOutput:\n{}".format(_stdout)
|
||||
|
||||
if _stderr:
|
||||
exc_msg += "Error:\n{}".format(_stderr)
|
||||
|
||||
raise RuntimeError(exc_msg)
|
||||
|
||||
return full_output
|
||||
870
pype/lib/avalon_context.py
Normal file
870
pype/lib/avalon_context.py
Normal file
|
|
@ -0,0 +1,870 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
import logging
|
||||
import collections
|
||||
|
||||
from avalon import io, pipeline
|
||||
from ..api import config
|
||||
import avalon.api
|
||||
|
||||
log = logging.getLogger("AvalonContext")
|
||||
|
||||
|
||||
def is_latest(representation):
|
||||
"""Return whether the representation is from latest version
|
||||
|
||||
Args:
|
||||
representation (dict): The representation document from the database.
|
||||
|
||||
Returns:
|
||||
bool: Whether the representation is of latest version.
|
||||
|
||||
"""
|
||||
|
||||
version = io.find_one({"_id": representation['parent']})
|
||||
if version["type"] == "master_version":
|
||||
return True
|
||||
|
||||
# Get highest version under the parent
|
||||
highest_version = io.find_one({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}, sort=[("name", -1)], projection={"name": True})
|
||||
|
||||
if version['name'] == highest_version['name']:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def any_outdated():
|
||||
"""Return whether the current scene has any outdated content"""
|
||||
|
||||
checked = set()
|
||||
host = avalon.api.registered_host()
|
||||
for container in host.ls():
|
||||
representation = container['representation']
|
||||
if representation in checked:
|
||||
continue
|
||||
|
||||
representation_doc = io.find_one(
|
||||
{
|
||||
"_id": io.ObjectId(representation),
|
||||
"type": "representation"
|
||||
},
|
||||
projection={"parent": True}
|
||||
)
|
||||
if representation_doc and not is_latest(representation_doc):
|
||||
return True
|
||||
elif not representation_doc:
|
||||
log.debug("Container '{objectName}' has an invalid "
|
||||
"representation, it is missing in the "
|
||||
"database".format(**container))
|
||||
|
||||
checked.add(representation)
|
||||
return False
|
||||
|
||||
|
||||
def get_asset(asset_name=None):
|
||||
""" Returning asset document from database by its name.
|
||||
|
||||
Doesn't count with duplicities on asset names!
|
||||
|
||||
Args:
|
||||
asset_name (str)
|
||||
|
||||
Returns:
|
||||
(MongoDB document)
|
||||
"""
|
||||
if not asset_name:
|
||||
asset_name = avalon.api.Session["AVALON_ASSET"]
|
||||
|
||||
asset_document = io.find_one({
|
||||
"name": asset_name,
|
||||
"type": "asset"
|
||||
})
|
||||
|
||||
if not asset_document:
|
||||
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
|
||||
|
||||
return asset_document
|
||||
|
||||
|
||||
def get_hierarchy(asset_name=None):
|
||||
"""
|
||||
Obtain asset hierarchy path string from mongo db
|
||||
|
||||
Args:
|
||||
asset_name (str)
|
||||
|
||||
Returns:
|
||||
(string): asset hierarchy path
|
||||
|
||||
"""
|
||||
if not asset_name:
|
||||
asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"])
|
||||
|
||||
asset_entity = io.find_one({
|
||||
"type": 'asset',
|
||||
"name": asset_name
|
||||
})
|
||||
|
||||
not_set = "PARENTS_NOT_SET"
|
||||
entity_parents = asset_entity.get("data", {}).get("parents", not_set)
|
||||
|
||||
# If entity already have parents then just return joined
|
||||
if entity_parents != not_set:
|
||||
return "/".join(entity_parents)
|
||||
|
||||
# Else query parents through visualParents and store result to entity
|
||||
hierarchy_items = []
|
||||
entity = asset_entity
|
||||
while True:
|
||||
parent_id = entity.get("data", {}).get("visualParent")
|
||||
if not parent_id:
|
||||
break
|
||||
entity = io.find_one({"_id": parent_id})
|
||||
hierarchy_items.append(entity["name"])
|
||||
|
||||
# Add parents to entity data for next query
|
||||
entity_data = asset_entity.get("data", {})
|
||||
entity_data["parents"] = hierarchy_items
|
||||
io.update_many(
|
||||
{"_id": asset_entity["_id"]},
|
||||
{"$set": {"data": entity_data}}
|
||||
)
|
||||
|
||||
return "/".join(hierarchy_items)
|
||||
|
||||
|
||||
def get_linked_assets(asset_entity):
|
||||
"""Return linked assets for `asset_entity` from DB
|
||||
|
||||
Args:
|
||||
asset_entity (dict): asset document from DB
|
||||
|
||||
Returns:
|
||||
(list) of MongoDB documents
|
||||
"""
|
||||
inputs = asset_entity["data"].get("inputs", [])
|
||||
inputs = [io.find_one({"_id": x}) for x in inputs]
|
||||
return inputs
|
||||
|
||||
|
||||
def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
|
||||
"""Retrieve latest version from `asset_name`, and `subset_name`.
|
||||
|
||||
Do not use if you want to query more than 5 latest versions as this method
|
||||
query 3 times to mongo for each call. For those cases is better to use
|
||||
more efficient way, e.g. with help of aggregations.
|
||||
|
||||
Args:
|
||||
asset_name (str): Name of asset.
|
||||
subset_name (str): Name of subset.
|
||||
dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection
|
||||
with Session.
|
||||
project_name (str, optional): Find latest version in specific project.
|
||||
|
||||
Returns:
|
||||
None: If asset, subset or version were not found.
|
||||
dict: Last version document for entered .
|
||||
"""
|
||||
|
||||
if not dbcon:
|
||||
log.debug("Using `avalon.io` for query.")
|
||||
dbcon = io
|
||||
# Make sure is installed
|
||||
io.install()
|
||||
|
||||
if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"):
|
||||
# `avalon.io` has only `_database` attribute
|
||||
# but `AvalonMongoDB` has `database`
|
||||
database = getattr(dbcon, "database", dbcon._database)
|
||||
collection = database[project_name]
|
||||
else:
|
||||
project_name = dbcon.Session.get("AVALON_PROJECT")
|
||||
collection = dbcon
|
||||
|
||||
log.debug((
|
||||
"Getting latest version for Project: \"{}\" Asset: \"{}\""
|
||||
" and Subset: \"{}\""
|
||||
).format(project_name, asset_name, subset_name))
|
||||
|
||||
# Query asset document id by asset name
|
||||
asset_doc = collection.find_one(
|
||||
{"type": "asset", "name": asset_name},
|
||||
{"_id": True}
|
||||
)
|
||||
if not asset_doc:
|
||||
log.info(
|
||||
"Asset \"{}\" was not found in Database.".format(asset_name)
|
||||
)
|
||||
return None
|
||||
|
||||
subset_doc = collection.find_one(
|
||||
{"type": "subset", "name": subset_name, "parent": asset_doc["_id"]},
|
||||
{"_id": True}
|
||||
)
|
||||
if not subset_doc:
|
||||
log.info(
|
||||
"Subset \"{}\" was not found in Database.".format(subset_name)
|
||||
)
|
||||
return None
|
||||
|
||||
version_doc = collection.find_one(
|
||||
{"type": "version", "parent": subset_doc["_id"]},
|
||||
sort=[("name", -1)],
|
||||
)
|
||||
if not version_doc:
|
||||
log.info(
|
||||
"Subset \"{}\" does not have any version yet.".format(subset_name)
|
||||
)
|
||||
return None
|
||||
return version_doc
|
||||
|
||||
|
||||
class BuildWorkfile:
|
||||
"""Wrapper for build workfile process.
|
||||
|
||||
Load representations for current context by build presets. Build presets
|
||||
are host related, since each host has it's loaders.
|
||||
"""
|
||||
|
||||
log = logging.getLogger("BuildWorkfile")
|
||||
|
||||
@staticmethod
|
||||
def map_subsets_by_family(subsets):
|
||||
subsets_by_family = collections.defaultdict(list)
|
||||
for subset in subsets:
|
||||
family = subset["data"].get("family")
|
||||
if not family:
|
||||
families = subset["data"].get("families")
|
||||
if not families:
|
||||
continue
|
||||
family = families[0]
|
||||
|
||||
subsets_by_family[family].append(subset)
|
||||
return subsets_by_family
|
||||
|
||||
def process(self):
|
||||
"""Main method of this wrapper.
|
||||
|
||||
Building of workfile is triggered and is possible to implement
|
||||
post processing of loaded containers if necessary.
|
||||
"""
|
||||
containers = self.build_workfile()
|
||||
|
||||
return containers
|
||||
|
||||
def build_workfile(self):
|
||||
"""Prepares and load containers into workfile.
|
||||
|
||||
Loads latest versions of current and linked assets to workfile by logic
|
||||
stored in Workfile profiles from presets. Profiles are set by host,
|
||||
filtered by current task name and used by families.
|
||||
|
||||
Each family can specify representation names and loaders for
|
||||
representations and first available and successful loaded
|
||||
representation is returned as container.
|
||||
|
||||
At the end you'll get list of loaded containers per each asset.
|
||||
|
||||
loaded_containers [{
|
||||
"asset_entity": <AssetEntity1>,
|
||||
"containers": [<Container1>, <Container2>, ...]
|
||||
}, {
|
||||
"asset_entity": <AssetEntity2>,
|
||||
"containers": [<Container3>, ...]
|
||||
}, {
|
||||
...
|
||||
}]
|
||||
"""
|
||||
# Get current asset name and entity
|
||||
current_asset_name = io.Session["AVALON_ASSET"]
|
||||
current_asset_entity = io.find_one({
|
||||
"type": "asset",
|
||||
"name": current_asset_name
|
||||
})
|
||||
|
||||
# Skip if asset was not found
|
||||
if not current_asset_entity:
|
||||
print("Asset entity with name `{}` was not found".format(
|
||||
current_asset_name
|
||||
))
|
||||
return
|
||||
|
||||
# Prepare available loaders
|
||||
loaders_by_name = {}
|
||||
for loader in avalon.api.discover(avalon.api.Loader):
|
||||
loader_name = loader.__name__
|
||||
if loader_name in loaders_by_name:
|
||||
raise KeyError(
|
||||
"Duplicated loader name {0}!".format(loader_name)
|
||||
)
|
||||
loaders_by_name[loader_name] = loader
|
||||
|
||||
# Skip if there are any loaders
|
||||
if not loaders_by_name:
|
||||
self.log.warning("There are no registered loaders.")
|
||||
return
|
||||
|
||||
# Get current task name
|
||||
current_task_name = io.Session["AVALON_TASK"]
|
||||
|
||||
# Load workfile presets for task
|
||||
self.build_presets = self.get_build_presets(current_task_name)
|
||||
|
||||
# Skip if there are any presets for task
|
||||
if not self.build_presets:
|
||||
self.log.warning(
|
||||
"Current task `{}` does not have any loading preset.".format(
|
||||
current_task_name
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# Get presets for loading current asset
|
||||
current_context_profiles = self.build_presets.get("current_context")
|
||||
# Get presets for loading linked assets
|
||||
link_context_profiles = self.build_presets.get("linked_assets")
|
||||
# Skip if both are missing
|
||||
if not current_context_profiles and not link_context_profiles:
|
||||
self.log.warning(
|
||||
"Current task `{}` has empty loading preset.".format(
|
||||
current_task_name
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
elif not current_context_profiles:
|
||||
self.log.warning((
|
||||
"Current task `{}` doesn't have any loading"
|
||||
" preset for it's context."
|
||||
).format(current_task_name))
|
||||
|
||||
elif not link_context_profiles:
|
||||
self.log.warning((
|
||||
"Current task `{}` doesn't have any"
|
||||
"loading preset for it's linked assets."
|
||||
).format(current_task_name))
|
||||
|
||||
# Prepare assets to process by workfile presets
|
||||
assets = []
|
||||
current_asset_id = None
|
||||
if current_context_profiles:
|
||||
# Add current asset entity if preset has current context set
|
||||
assets.append(current_asset_entity)
|
||||
current_asset_id = current_asset_entity["_id"]
|
||||
|
||||
if link_context_profiles:
|
||||
# Find and append linked assets if preset has set linked mapping
|
||||
link_assets = get_linked_assets(current_asset_entity)
|
||||
if link_assets:
|
||||
assets.extend(link_assets)
|
||||
|
||||
# Skip if there are no assets. This can happen if only linked mapping
|
||||
# is set and there are no links for his asset.
|
||||
if not assets:
|
||||
self.log.warning(
|
||||
"Asset does not have linked assets. Nothing to process."
|
||||
)
|
||||
return
|
||||
|
||||
# Prepare entities from database for assets
|
||||
prepared_entities = self._collect_last_version_repres(assets)
|
||||
|
||||
# Load containers by prepared entities and presets
|
||||
loaded_containers = []
|
||||
# - Current asset containers
|
||||
if current_asset_id and current_asset_id in prepared_entities:
|
||||
current_context_data = prepared_entities.pop(current_asset_id)
|
||||
loaded_data = self.load_containers_by_asset_data(
|
||||
current_context_data, current_context_profiles, loaders_by_name
|
||||
)
|
||||
if loaded_data:
|
||||
loaded_containers.append(loaded_data)
|
||||
|
||||
# - Linked assets container
|
||||
for linked_asset_data in prepared_entities.values():
|
||||
loaded_data = self.load_containers_by_asset_data(
|
||||
linked_asset_data, link_context_profiles, loaders_by_name
|
||||
)
|
||||
if loaded_data:
|
||||
loaded_containers.append(loaded_data)
|
||||
|
||||
# Return list of loaded containers
|
||||
return loaded_containers
|
||||
|
||||
def get_build_presets(self, task_name):
|
||||
""" Returns presets to build workfile for task name.
|
||||
|
||||
Presets are loaded for current project set in
|
||||
io.Session["AVALON_PROJECT"], filtered by registered host
|
||||
and entered task name.
|
||||
|
||||
Args:
|
||||
task_name (str): Task name used for filtering build presets.
|
||||
|
||||
Returns:
|
||||
(dict): preset per entered task name
|
||||
"""
|
||||
host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
|
||||
presets = config.get_presets(io.Session["AVALON_PROJECT"])
|
||||
# Get presets for host
|
||||
build_presets = (
|
||||
presets["plugins"]
|
||||
.get(host_name, {})
|
||||
.get("workfile_build")
|
||||
)
|
||||
if not build_presets:
|
||||
return
|
||||
|
||||
task_name_low = task_name.lower()
|
||||
per_task_preset = None
|
||||
for preset in build_presets:
|
||||
preset_tasks = preset.get("tasks") or []
|
||||
preset_tasks_low = [task.lower() for task in preset_tasks]
|
||||
if task_name_low in preset_tasks_low:
|
||||
per_task_preset = preset
|
||||
break
|
||||
|
||||
return per_task_preset
|
||||
|
||||
def _filter_build_profiles(self, build_profiles, loaders_by_name):
|
||||
""" Filter build profiles by loaders and prepare process data.
|
||||
|
||||
Valid profile must have "loaders", "families" and "repre_names" keys
|
||||
with valid values.
|
||||
- "loaders" expects list of strings representing possible loaders.
|
||||
- "families" expects list of strings for filtering
|
||||
by main subset family.
|
||||
- "repre_names" expects list of strings for filtering by
|
||||
representation name.
|
||||
|
||||
Lowered "families" and "repre_names" are prepared for each profile with
|
||||
all required keys.
|
||||
|
||||
Args:
|
||||
build_profiles (dict): Profiles for building workfile.
|
||||
loaders_by_name (dict): Available loaders per name.
|
||||
|
||||
Returns:
|
||||
(list): Filtered and prepared profiles.
|
||||
"""
|
||||
valid_profiles = []
|
||||
for profile in build_profiles:
|
||||
# Check loaders
|
||||
profile_loaders = profile.get("loaders")
|
||||
if not profile_loaders:
|
||||
self.log.warning((
|
||||
"Build profile has missing loaders configuration: {0}"
|
||||
).format(json.dumps(profile, indent=4)))
|
||||
continue
|
||||
|
||||
# Check if any loader is available
|
||||
loaders_match = False
|
||||
for loader_name in profile_loaders:
|
||||
if loader_name in loaders_by_name:
|
||||
loaders_match = True
|
||||
break
|
||||
|
||||
if not loaders_match:
|
||||
self.log.warning((
|
||||
"All loaders from Build profile are not available: {0}"
|
||||
).format(json.dumps(profile, indent=4)))
|
||||
continue
|
||||
|
||||
# Check families
|
||||
profile_families = profile.get("families")
|
||||
if not profile_families:
|
||||
self.log.warning((
|
||||
"Build profile is missing families configuration: {0}"
|
||||
).format(json.dumps(profile, indent=4)))
|
||||
continue
|
||||
|
||||
# Check representation names
|
||||
profile_repre_names = profile.get("repre_names")
|
||||
if not profile_repre_names:
|
||||
self.log.warning((
|
||||
"Build profile is missing"
|
||||
" representation names filtering: {0}"
|
||||
).format(json.dumps(profile, indent=4)))
|
||||
continue
|
||||
|
||||
# Prepare lowered families and representation names
|
||||
profile["families_lowered"] = [
|
||||
fam.lower() for fam in profile_families
|
||||
]
|
||||
profile["repre_names_lowered"] = [
|
||||
name.lower() for name in profile_repre_names
|
||||
]
|
||||
|
||||
valid_profiles.append(profile)
|
||||
|
||||
return valid_profiles
|
||||
|
||||
def _prepare_profile_for_subsets(self, subsets, profiles):
|
||||
"""Select profile for each subset byt it's data.
|
||||
|
||||
Profiles are filtered for each subset individually.
|
||||
Profile is filtered by subset's family, optionally by name regex and
|
||||
representation names set in profile.
|
||||
It is possible to not find matching profile for subset, in that case
|
||||
subset is skipped and it is possible that none of subsets have
|
||||
matching profile.
|
||||
|
||||
Args:
|
||||
subsets (list): Subset documents.
|
||||
profiles (dict): Build profiles.
|
||||
|
||||
Returns:
|
||||
(dict) Profile by subset's id.
|
||||
"""
|
||||
# Prepare subsets
|
||||
subsets_by_family = self.map_subsets_by_family(subsets)
|
||||
|
||||
profiles_per_subset_id = {}
|
||||
for family, subsets in subsets_by_family.items():
|
||||
family_low = family.lower()
|
||||
for profile in profiles:
|
||||
# Skip profile if does not contain family
|
||||
if family_low not in profile["families_lowered"]:
|
||||
continue
|
||||
|
||||
# Precompile name filters as regexes
|
||||
profile_regexes = profile.get("subset_name_filters")
|
||||
if profile_regexes:
|
||||
_profile_regexes = []
|
||||
for regex in profile_regexes:
|
||||
_profile_regexes.append(re.compile(regex))
|
||||
profile_regexes = _profile_regexes
|
||||
|
||||
# TODO prepare regex compilation
|
||||
for subset in subsets:
|
||||
# Verify regex filtering (optional)
|
||||
if profile_regexes:
|
||||
valid = False
|
||||
for pattern in profile_regexes:
|
||||
if re.match(pattern, subset["name"]):
|
||||
valid = True
|
||||
break
|
||||
|
||||
if not valid:
|
||||
continue
|
||||
|
||||
profiles_per_subset_id[subset["_id"]] = profile
|
||||
|
||||
# break profiles loop on finding the first matching profile
|
||||
break
|
||||
return profiles_per_subset_id
|
||||
|
||||
def load_containers_by_asset_data(
|
||||
self, asset_entity_data, build_profiles, loaders_by_name
|
||||
):
|
||||
"""Load containers for entered asset entity by Build profiles.
|
||||
|
||||
Args:
|
||||
asset_entity_data (dict): Prepared data with subsets, last version
|
||||
and representations for specific asset.
|
||||
build_profiles (dict): Build profiles.
|
||||
loaders_by_name (dict): Available loaders per name.
|
||||
|
||||
Returns:
|
||||
(dict) Output contains asset document and loaded containers.
|
||||
"""
|
||||
|
||||
# Make sure all data are not empty
|
||||
if not asset_entity_data or not build_profiles or not loaders_by_name:
|
||||
return
|
||||
|
||||
asset_entity = asset_entity_data["asset_entity"]
|
||||
|
||||
valid_profiles = self._filter_build_profiles(
|
||||
build_profiles, loaders_by_name
|
||||
)
|
||||
if not valid_profiles:
|
||||
self.log.warning(
|
||||
"There are not valid Workfile profiles. Skipping process."
|
||||
)
|
||||
return
|
||||
|
||||
self.log.debug("Valid Workfile profiles: {}".format(valid_profiles))
|
||||
|
||||
subsets_by_id = {}
|
||||
version_by_subset_id = {}
|
||||
repres_by_version_id = {}
|
||||
for subset_id, in_data in asset_entity_data["subsets"].items():
|
||||
subset_entity = in_data["subset_entity"]
|
||||
subsets_by_id[subset_entity["_id"]] = subset_entity
|
||||
|
||||
version_data = in_data["version"]
|
||||
version_entity = version_data["version_entity"]
|
||||
version_by_subset_id[subset_id] = version_entity
|
||||
repres_by_version_id[version_entity["_id"]] = (
|
||||
version_data["repres"]
|
||||
)
|
||||
|
||||
if not subsets_by_id:
|
||||
self.log.warning("There are not subsets for asset {0}".format(
|
||||
asset_entity["name"]
|
||||
))
|
||||
return
|
||||
|
||||
profiles_per_subset_id = self._prepare_profile_for_subsets(
|
||||
subsets_by_id.values(), valid_profiles
|
||||
)
|
||||
if not profiles_per_subset_id:
|
||||
self.log.warning("There are not valid subsets.")
|
||||
return
|
||||
|
||||
valid_repres_by_subset_id = collections.defaultdict(list)
|
||||
for subset_id, profile in profiles_per_subset_id.items():
|
||||
profile_repre_names = profile["repre_names_lowered"]
|
||||
|
||||
version_entity = version_by_subset_id[subset_id]
|
||||
version_id = version_entity["_id"]
|
||||
repres = repres_by_version_id[version_id]
|
||||
for repre in repres:
|
||||
repre_name_low = repre["name"].lower()
|
||||
if repre_name_low in profile_repre_names:
|
||||
valid_repres_by_subset_id[subset_id].append(repre)
|
||||
|
||||
# DEBUG message
|
||||
msg = "Valid representations for Asset: `{}`".format(
|
||||
asset_entity["name"]
|
||||
)
|
||||
for subset_id, repres in valid_repres_by_subset_id.items():
|
||||
subset = subsets_by_id[subset_id]
|
||||
msg += "\n# Subset Name/ID: `{}`/{}".format(
|
||||
subset["name"], subset_id
|
||||
)
|
||||
for repre in repres:
|
||||
msg += "\n## Repre name: `{}`".format(repre["name"])
|
||||
|
||||
self.log.debug(msg)
|
||||
|
||||
containers = self._load_containers(
|
||||
valid_repres_by_subset_id, subsets_by_id,
|
||||
profiles_per_subset_id, loaders_by_name
|
||||
)
|
||||
|
||||
return {
|
||||
"asset_entity": asset_entity,
|
||||
"containers": containers
|
||||
}
|
||||
|
||||
def _load_containers(
|
||||
self, repres_by_subset_id, subsets_by_id,
|
||||
profiles_per_subset_id, loaders_by_name
|
||||
):
|
||||
"""Real load by collected data happens here.
|
||||
|
||||
Loading of representations per subset happens here. Each subset can
|
||||
loads one representation. Loading is tried in specific order.
|
||||
Representations are tried to load by names defined in configuration.
|
||||
If subset has representation matching representation name each loader
|
||||
is tried to load it until any is successful. If none of them was
|
||||
successful then next reprensentation name is tried.
|
||||
Subset process loop ends when any representation is loaded or
|
||||
all matching representations were already tried.
|
||||
|
||||
Args:
|
||||
repres_by_subset_id (dict): Available representations mapped
|
||||
by their parent (subset) id.
|
||||
subsets_by_id (dict): Subset documents mapped by their id.
|
||||
profiles_per_subset_id (dict): Build profiles mapped by subset id.
|
||||
loaders_by_name (dict): Available loaders per name.
|
||||
|
||||
Returns:
|
||||
(list) Objects of loaded containers.
|
||||
"""
|
||||
loaded_containers = []
|
||||
|
||||
# Get subset id order from build presets.
|
||||
build_presets = self.build_presets.get("current_context", [])
|
||||
build_presets += self.build_presets.get("linked_assets", [])
|
||||
subset_ids_ordered = []
|
||||
for preset in build_presets:
|
||||
for preset_family in preset["families"]:
|
||||
for id, subset in subsets_by_id.items():
|
||||
if preset_family not in subset["data"].get("families", []):
|
||||
continue
|
||||
|
||||
subset_ids_ordered.append(id)
|
||||
|
||||
# Order representations from subsets.
|
||||
print("repres_by_subset_id", repres_by_subset_id)
|
||||
representations_ordered = []
|
||||
representations = []
|
||||
for id in subset_ids_ordered:
|
||||
for subset_id, repres in repres_by_subset_id.items():
|
||||
if repres in representations:
|
||||
continue
|
||||
|
||||
if id == subset_id:
|
||||
representations_ordered.append((subset_id, repres))
|
||||
representations.append(repres)
|
||||
|
||||
print("representations", representations)
|
||||
|
||||
# Load ordered reprensentations.
|
||||
for subset_id, repres in representations_ordered:
|
||||
subset_name = subsets_by_id[subset_id]["name"]
|
||||
|
||||
profile = profiles_per_subset_id[subset_id]
|
||||
loaders_last_idx = len(profile["loaders"]) - 1
|
||||
repre_names_last_idx = len(profile["repre_names_lowered"]) - 1
|
||||
|
||||
repre_by_low_name = {
|
||||
repre["name"].lower(): repre for repre in repres
|
||||
}
|
||||
|
||||
is_loaded = False
|
||||
for repre_name_idx, profile_repre_name in enumerate(
|
||||
profile["repre_names_lowered"]
|
||||
):
|
||||
# Break iteration if representation was already loaded
|
||||
if is_loaded:
|
||||
break
|
||||
|
||||
repre = repre_by_low_name.get(profile_repre_name)
|
||||
if not repre:
|
||||
continue
|
||||
|
||||
for loader_idx, loader_name in enumerate(profile["loaders"]):
|
||||
if is_loaded:
|
||||
break
|
||||
|
||||
loader = loaders_by_name.get(loader_name)
|
||||
if not loader:
|
||||
continue
|
||||
try:
|
||||
container = avalon.api.load(
|
||||
loader,
|
||||
repre["_id"],
|
||||
name=subset_name
|
||||
)
|
||||
loaded_containers.append(container)
|
||||
is_loaded = True
|
||||
|
||||
except Exception as exc:
|
||||
if exc == pipeline.IncompatibleLoaderError:
|
||||
self.log.info((
|
||||
"Loader `{}` is not compatible with"
|
||||
" representation `{}`"
|
||||
).format(loader_name, repre["name"]))
|
||||
|
||||
else:
|
||||
self.log.error(
|
||||
"Unexpected error happened during loading",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
msg = "Loading failed."
|
||||
if loader_idx < loaders_last_idx:
|
||||
msg += " Trying next loader."
|
||||
elif repre_name_idx < repre_names_last_idx:
|
||||
msg += (
|
||||
" Loading of subset `{}` was not successful."
|
||||
).format(subset_name)
|
||||
else:
|
||||
msg += " Trying next representation."
|
||||
self.log.info(msg)
|
||||
|
||||
return loaded_containers
|
||||
|
||||
def _collect_last_version_repres(self, asset_entities):
|
||||
"""Collect subsets, versions and representations for asset_entities.
|
||||
|
||||
Args:
|
||||
asset_entities (list): Asset entities for which want to find data
|
||||
|
||||
Returns:
|
||||
(dict): collected entities
|
||||
|
||||
Example output:
|
||||
```
|
||||
{
|
||||
{Asset ID}: {
|
||||
"asset_entity": <AssetEntity>,
|
||||
"subsets": {
|
||||
{Subset ID}: {
|
||||
"subset_entity": <SubsetEntity>,
|
||||
"version": {
|
||||
"version_entity": <VersionEntity>,
|
||||
"repres": [
|
||||
<RepreEntity1>, <RepreEntity2>, ...
|
||||
]
|
||||
}
|
||||
},
|
||||
...
|
||||
}
|
||||
},
|
||||
...
|
||||
}
|
||||
output[asset_id]["subsets"][subset_id]["version"]["repres"]
|
||||
```
|
||||
"""
|
||||
|
||||
if not asset_entities:
|
||||
return {}
|
||||
|
||||
asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities}
|
||||
|
||||
subsets = list(io.find({
|
||||
"type": "subset",
|
||||
"parent": {"$in": asset_entity_by_ids.keys()}
|
||||
}))
|
||||
subset_entity_by_ids = {subset["_id"]: subset for subset in subsets}
|
||||
|
||||
sorted_versions = list(io.find({
|
||||
"type": "version",
|
||||
"parent": {"$in": subset_entity_by_ids.keys()}
|
||||
}).sort("name", -1))
|
||||
|
||||
subset_id_with_latest_version = []
|
||||
last_versions_by_id = {}
|
||||
for version in sorted_versions:
|
||||
subset_id = version["parent"]
|
||||
if subset_id in subset_id_with_latest_version:
|
||||
continue
|
||||
subset_id_with_latest_version.append(subset_id)
|
||||
last_versions_by_id[version["_id"]] = version
|
||||
|
||||
repres = io.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": last_versions_by_id.keys()}
|
||||
})
|
||||
|
||||
output = {}
|
||||
for repre in repres:
|
||||
version_id = repre["parent"]
|
||||
version = last_versions_by_id[version_id]
|
||||
|
||||
subset_id = version["parent"]
|
||||
subset = subset_entity_by_ids[subset_id]
|
||||
|
||||
asset_id = subset["parent"]
|
||||
asset = asset_entity_by_ids[asset_id]
|
||||
|
||||
if asset_id not in output:
|
||||
output[asset_id] = {
|
||||
"asset_entity": asset,
|
||||
"subsets": {}
|
||||
}
|
||||
|
||||
if subset_id not in output[asset_id]["subsets"]:
|
||||
output[asset_id]["subsets"][subset_id] = {
|
||||
"subset_entity": subset,
|
||||
"version": {
|
||||
"version_entity": version,
|
||||
"repres": []
|
||||
}
|
||||
}
|
||||
|
||||
output[asset_id]["subsets"][subset_id]["version"]["repres"].append(
|
||||
repre
|
||||
)
|
||||
|
||||
return output
|
||||
26
pype/lib/deprecated.py
Normal file
26
pype/lib/deprecated.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import os
|
||||
|
||||
from avalon import io
|
||||
|
||||
|
||||
def get_avalon_database():
|
||||
"""Mongo database used in avalon's io.
|
||||
|
||||
* Function is not used in pype 3.0 where was replaced with usage of
|
||||
AvalonMongoDB.
|
||||
"""
|
||||
if io._database is None:
|
||||
set_io_database()
|
||||
return io._database
|
||||
|
||||
|
||||
def set_io_database():
|
||||
"""Set avalon's io context with environemnts.
|
||||
|
||||
* Function is not used in pype 3.0 where was replaced with usage of
|
||||
AvalonMongoDB.
|
||||
"""
|
||||
required_keys = ["AVALON_PROJECT", "AVALON_ASSET", "AVALON_SILO"]
|
||||
for key in required_keys:
|
||||
os.environ[key] = os.environ.get(key, "")
|
||||
io.install()
|
||||
46
pype/lib/ffmpeg_utils.py
Normal file
46
pype/lib/ffmpeg_utils.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
import logging
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from . import get_ffmpeg_tool_path
|
||||
|
||||
log = logging.getLogger("FFmpeg utils")
|
||||
|
||||
|
||||
def ffprobe_streams(path_to_file, logger=None):
|
||||
"""Load streams from entered filepath via ffprobe.
|
||||
|
||||
Args:
|
||||
path_to_file (str): absolute path
|
||||
logger (logging.getLogger): injected logger, if empty new is created
|
||||
|
||||
"""
|
||||
if not logger:
|
||||
logger = log
|
||||
logger.info(
|
||||
"Getting information about input \"{}\".".format(path_to_file)
|
||||
)
|
||||
args = [
|
||||
"\"{}\"".format(get_ffmpeg_tool_path("ffprobe")),
|
||||
"-v quiet",
|
||||
"-print_format json",
|
||||
"-show_format",
|
||||
"-show_streams",
|
||||
"\"{}\"".format(path_to_file)
|
||||
]
|
||||
command = " ".join(args)
|
||||
logger.debug("FFprobe command: \"{}\"".format(command))
|
||||
popen = subprocess.Popen(
|
||||
command,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
|
||||
popen_stdout, popen_stderr = popen.communicate()
|
||||
if popen_stdout:
|
||||
logger.debug("ffprobe stdout: {}".format(popen_stdout))
|
||||
|
||||
if popen_stderr:
|
||||
logger.debug("ffprobe stderr: {}".format(popen_stderr))
|
||||
return json.loads(popen_stdout)["streams"]
|
||||
71
pype/lib/hooks.py
Normal file
71
pype/lib/hooks.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package containing code for handling hooks."""
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
import logging
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
import six
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class PypeHook:
|
||||
"""Abstract class from all hooks should inherit."""
|
||||
|
||||
def __init__(self):
|
||||
"""Constructor."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def execute(self, *args, **kwargs):
|
||||
"""Abstract execute method."""
|
||||
pass
|
||||
|
||||
|
||||
def execute_hook(hook, *args, **kwargs):
|
||||
"""Execute hook with arguments.
|
||||
|
||||
This will load hook file, instantiate class and call
|
||||
:meth:`PypeHook.execute` method on it. Hook must be in a form::
|
||||
|
||||
$PYPE_SETUP_PATH/repos/pype/path/to/hook.py/HookClass
|
||||
|
||||
This will load `hook.py`, instantiate HookClass and then execute_hook
|
||||
`execute(*args, **kwargs)`
|
||||
|
||||
Args:
|
||||
hook (str): path to hook class.
|
||||
|
||||
"""
|
||||
class_name = hook.split("/")[-1]
|
||||
|
||||
abspath = os.path.join(os.getenv('PYPE_SETUP_PATH'),
|
||||
'repos', 'pype', *hook.split("/")[:-1])
|
||||
|
||||
mod_name, mod_ext = os.path.splitext(os.path.basename(abspath))
|
||||
|
||||
if not mod_ext == ".py":
|
||||
return False
|
||||
|
||||
module = types.ModuleType(mod_name)
|
||||
module.__file__ = abspath
|
||||
|
||||
try:
|
||||
with open(abspath) as f:
|
||||
six.exec_(f.read(), module.__dict__)
|
||||
|
||||
sys.modules[abspath] = module
|
||||
|
||||
except Exception as exp:
|
||||
log.exception("loading hook failed: {}".format(exp),
|
||||
exc_info=True)
|
||||
return False
|
||||
|
||||
obj = getattr(module, class_name)
|
||||
hook_obj = obj()
|
||||
ret_val = hook_obj.execute(*args, **kwargs)
|
||||
return ret_val
|
||||
181
pype/lib/path_tools.py
Normal file
181
pype/lib/path_tools.py
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
import os
|
||||
import re
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_paths_from_environ(env_key, return_first=False):
|
||||
"""Return existing paths from specific envirnment variable.
|
||||
|
||||
Args:
|
||||
env_key (str): Environment key where should look for paths.
|
||||
|
||||
Returns:
|
||||
(bool): Return first path on `True`, list of all on `False`.
|
||||
|
||||
|
||||
Difference when none of paths exists:
|
||||
- when `return_first` is set to `False` then function returns empty list.
|
||||
- when `return_first` is set to `True` then function returns `None`.
|
||||
"""
|
||||
existing_paths = []
|
||||
paths = os.environ.get(env_key) or ""
|
||||
path_items = paths.split(os.pathsep)
|
||||
for path in path_items:
|
||||
# Skip empty string
|
||||
if not path:
|
||||
continue
|
||||
# Normalize path
|
||||
path = os.path.normpath(path)
|
||||
# Check if path exists
|
||||
if os.path.exists(path):
|
||||
# Return path if `return_first` is set to True
|
||||
if return_first:
|
||||
return path
|
||||
# Store path
|
||||
existing_paths.append(path)
|
||||
|
||||
# Return None if none of paths exists
|
||||
if return_first:
|
||||
return None
|
||||
# Return all existing paths from environment variable
|
||||
return existing_paths
|
||||
|
||||
|
||||
def get_ffmpeg_tool_path(tool="ffmpeg"):
|
||||
"""Find path to ffmpeg tool in FFMPEG_PATH paths.
|
||||
|
||||
Function looks for tool in paths set in FFMPEG_PATH environment. If tool
|
||||
exists then returns it's full path.
|
||||
|
||||
Args:
|
||||
tool (string): tool name
|
||||
|
||||
Returns:
|
||||
(str): tool name itself when tool path was not found. (FFmpeg path
|
||||
may be set in PATH environment variable)
|
||||
"""
|
||||
dir_paths = get_paths_from_environ("FFMPEG_PATH")
|
||||
for dir_path in dir_paths:
|
||||
for file_name in os.listdir(dir_path):
|
||||
base, _ext = os.path.splitext(file_name)
|
||||
if base.lower() == tool.lower():
|
||||
return os.path.join(dir_path, tool)
|
||||
return tool
|
||||
|
||||
|
||||
def _rreplace(s, a, b, n=1):
|
||||
"""Replace a with b in string s from right side n times."""
|
||||
return b.join(s.rsplit(a, n))
|
||||
|
||||
|
||||
def version_up(filepath):
|
||||
"""Version up filepath to a new non-existing version.
|
||||
|
||||
Parses for a version identifier like `_v001` or `.v001`
|
||||
When no version present _v001 is appended as suffix.
|
||||
|
||||
Args:
|
||||
filepath (str): full url
|
||||
|
||||
Returns:
|
||||
(str): filepath with increased version number
|
||||
|
||||
"""
|
||||
dirname = os.path.dirname(filepath)
|
||||
basename, ext = os.path.splitext(os.path.basename(filepath))
|
||||
|
||||
regex = r"[._]v\d+"
|
||||
matches = re.findall(regex, str(basename), re.IGNORECASE)
|
||||
if not matches:
|
||||
log.info("Creating version...")
|
||||
new_label = "_v{version:03d}".format(version=1)
|
||||
new_basename = "{}{}".format(basename, new_label)
|
||||
else:
|
||||
label = matches[-1]
|
||||
version = re.search(r"\d+", label).group()
|
||||
padding = len(version)
|
||||
|
||||
new_version = int(version) + 1
|
||||
new_version = '{version:0{padding}d}'.format(version=new_version,
|
||||
padding=padding)
|
||||
new_label = label.replace(version, new_version, 1)
|
||||
new_basename = _rreplace(basename, label, new_label)
|
||||
|
||||
if not new_basename.endswith(new_label):
|
||||
index = (new_basename.find(new_label))
|
||||
index += len(new_label)
|
||||
new_basename = new_basename[:index]
|
||||
|
||||
new_filename = "{}{}".format(new_basename, ext)
|
||||
new_filename = os.path.join(dirname, new_filename)
|
||||
new_filename = os.path.normpath(new_filename)
|
||||
|
||||
if new_filename == filepath:
|
||||
raise RuntimeError("Created path is the same as current file,"
|
||||
"this is a bug")
|
||||
|
||||
for file in os.listdir(dirname):
|
||||
if file.endswith(ext) and file.startswith(new_basename):
|
||||
log.info("Skipping existing version %s" % new_label)
|
||||
return version_up(new_filename)
|
||||
|
||||
log.info("New version %s" % new_label)
|
||||
return new_filename
|
||||
|
||||
|
||||
def get_version_from_path(file):
|
||||
"""Find version number in file path string.
|
||||
|
||||
Args:
|
||||
file (string): file path
|
||||
|
||||
Returns:
|
||||
v: version number in string ('001')
|
||||
|
||||
"""
|
||||
pattern = re.compile(r"[\._]v([0-9]+)", re.IGNORECASE)
|
||||
try:
|
||||
return pattern.findall(file)[0]
|
||||
except IndexError:
|
||||
log.error(
|
||||
"templates:get_version_from_workfile:"
|
||||
"`{}` missing version string."
|
||||
"Example `v004`".format(file)
|
||||
)
|
||||
|
||||
|
||||
def get_last_version_from_path(path_dir, filter):
|
||||
"""Find last version of given directory content.
|
||||
|
||||
Args:
|
||||
path_dir (string): directory path
|
||||
filter (list): list of strings used as file name filter
|
||||
|
||||
Returns:
|
||||
string: file name with last version
|
||||
|
||||
Example:
|
||||
last_version_file = get_last_version_from_path(
|
||||
"/project/shots/shot01/work", ["shot01", "compositing", "nk"])
|
||||
"""
|
||||
assert os.path.isdir(path_dir), "`path_dir` argument needs to be directory"
|
||||
assert isinstance(filter, list) and (
|
||||
len(filter) != 0), "`filter` argument needs to be list and not empty"
|
||||
|
||||
filtred_files = list()
|
||||
|
||||
# form regex for filtering
|
||||
patern = r".*".join(filter)
|
||||
|
||||
for file in os.listdir(path_dir):
|
||||
if not re.findall(patern, file):
|
||||
continue
|
||||
filtred_files.append(file)
|
||||
|
||||
if filtred_files:
|
||||
sorted(filtred_files)
|
||||
return filtred_files[-1]
|
||||
|
||||
return None
|
||||
80
pype/lib/plugin_tools.py
Normal file
80
pype/lib/plugin_tools.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Avalon/Pyblish plugin tools."""
|
||||
import os
|
||||
import inspect
|
||||
import logging
|
||||
|
||||
from ..api import config
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def filter_pyblish_plugins(plugins):
|
||||
"""Filter pyblish plugins by presets.
|
||||
|
||||
This servers as plugin filter / modifier for pyblish. It will load plugin
|
||||
definitions from presets and filter those needed to be excluded.
|
||||
|
||||
Args:
|
||||
plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base`
|
||||
`discover()` method.
|
||||
|
||||
"""
|
||||
from pyblish import api
|
||||
|
||||
host = api.current_host()
|
||||
|
||||
presets = config.get_presets().get('plugins', {})
|
||||
|
||||
# iterate over plugins
|
||||
for plugin in plugins[:]:
|
||||
# skip if there are no presets to process
|
||||
if not presets:
|
||||
continue
|
||||
|
||||
file = os.path.normpath(inspect.getsourcefile(plugin))
|
||||
file = os.path.normpath(file)
|
||||
|
||||
# host determined from path
|
||||
host_from_file = file.split(os.path.sep)[-3:-2][0]
|
||||
plugin_kind = file.split(os.path.sep)[-2:-1][0]
|
||||
|
||||
try:
|
||||
config_data = presets[host]["publish"][plugin.__name__]
|
||||
except KeyError:
|
||||
try:
|
||||
config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
for option, value in config_data.items():
|
||||
if option == "enabled" and value is False:
|
||||
log.info('removing plugin {}'.format(plugin.__name__))
|
||||
plugins.remove(plugin)
|
||||
else:
|
||||
log.info('setting {}:{} on plugin {}'.format(
|
||||
option, value, plugin.__name__))
|
||||
|
||||
setattr(plugin, option, value)
|
||||
|
||||
|
||||
def source_hash(filepath, *args):
|
||||
"""Generate simple identifier for a source file.
|
||||
This is used to identify whether a source file has previously been
|
||||
processe into the pipeline, e.g. a texture.
|
||||
The hash is based on source filepath, modification time and file size.
|
||||
This is only used to identify whether a specific source file was already
|
||||
published before from the same location with the same modification date.
|
||||
We opt to do it this way as opposed to Avalanch C4 hash as this is much
|
||||
faster and predictable enough for all our production use cases.
|
||||
Args:
|
||||
filepath (str): The source file path.
|
||||
You can specify additional arguments in the function
|
||||
to allow for specific 'processing' values to be included.
|
||||
"""
|
||||
# We replace dots with comma because . cannot be a key in a pymongo dict.
|
||||
file_name = os.path.basename(filepath)
|
||||
time = str(os.path.getmtime(filepath))
|
||||
size = str(os.path.getsize(filepath))
|
||||
return "|".join([file_name, time, size] + list(args)).replace(".", ",")
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
import collections
|
||||
import datetime
|
||||
|
||||
import ftrack_api
|
||||
from pype.modules.ftrack import BaseEvent
|
||||
|
||||
|
|
@ -10,17 +12,24 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
cust_attrs_query = (
|
||||
"select id, key, object_type_id, is_hierarchical, default"
|
||||
" from CustomAttributeConfiguration"
|
||||
" where key in ({}) and object_type_id in ({})"
|
||||
" where key in ({}) and"
|
||||
" (object_type_id in ({}) or is_hierarchical is true)"
|
||||
)
|
||||
|
||||
cust_attr_query = (
|
||||
"select value, entity_id from ContextCustomAttributeValue "
|
||||
"where entity_id in ({}) and configuration_id in ({})"
|
||||
)
|
||||
|
||||
interest_entity_types = {"Shot"}
|
||||
interest_attributes = {"frameStart", "frameEnd"}
|
||||
interest_attr_mapping = {
|
||||
"frameStart": "fstart",
|
||||
"frameEnd": "fend"
|
||||
}
|
||||
_cached_task_object_id = None
|
||||
_cached_interest_object_ids = None
|
||||
_cached_user_id = None
|
||||
_cached_changes = []
|
||||
_max_delta = 30
|
||||
|
||||
# Configrable (lists)
|
||||
interest_entity_types = {"Shot"}
|
||||
interest_attributes = {"frameStart", "frameEnd"}
|
||||
|
||||
@staticmethod
|
||||
def join_keys(keys):
|
||||
|
|
@ -49,8 +58,18 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
)
|
||||
return cls._cached_interest_object_ids
|
||||
|
||||
def session_user_id(self, session):
|
||||
if self._cached_user_id is None:
|
||||
user = session.query(
|
||||
"User where username is \"{}\"".format(session.api_user)
|
||||
).one()
|
||||
self._cached_user_id = user["id"]
|
||||
return self._cached_user_id
|
||||
|
||||
def launch(self, session, event):
|
||||
interesting_data = self.extract_interesting_data(session, event)
|
||||
interesting_data, changed_keys_by_object_id = (
|
||||
self.extract_interesting_data(session, event)
|
||||
)
|
||||
if not interesting_data:
|
||||
return
|
||||
|
||||
|
|
@ -66,92 +85,165 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if entity_id not in entities_by_id:
|
||||
interesting_data.pop(entity_id)
|
||||
|
||||
task_entities = self.get_task_entities(session, interesting_data)
|
||||
attrs_by_obj_id, hier_attrs = self.attrs_configurations(session)
|
||||
|
||||
task_object_id = self.task_object_id(session)
|
||||
task_attrs = attrs_by_obj_id.get(task_object_id)
|
||||
# Skip keys that are not both in hierachical and type specific
|
||||
for object_id, keys in changed_keys_by_object_id.items():
|
||||
object_id_attrs = attrs_by_obj_id.get(object_id)
|
||||
for key in keys:
|
||||
if key not in hier_attrs:
|
||||
attrs_by_obj_id[object_id].pop(key)
|
||||
continue
|
||||
|
||||
if (
|
||||
(not object_id_attrs or key not in object_id_attrs)
|
||||
and (not task_attrs or key not in task_attrs)
|
||||
):
|
||||
hier_attrs.pop(key)
|
||||
|
||||
# Clean up empty values
|
||||
for key, value in tuple(attrs_by_obj_id.items()):
|
||||
if not value:
|
||||
attrs_by_obj_id.pop(key)
|
||||
|
||||
attrs_by_obj_id = self.attrs_configurations(session)
|
||||
if not attrs_by_obj_id:
|
||||
self.log.warning((
|
||||
"There is not created Custom Attributes {}"
|
||||
" for \"Task\" entity type."
|
||||
).format(self.join_keys(self.interest_attributes)))
|
||||
"There is not created Custom Attributes {} "
|
||||
" for entity types: {}"
|
||||
).format(
|
||||
self.join_keys(self.interest_attributes),
|
||||
self.join_keys(self.interest_entity_types)
|
||||
))
|
||||
return
|
||||
|
||||
task_entities_by_parent_id = collections.defaultdict(list)
|
||||
# Prepare task entities
|
||||
task_entities = []
|
||||
# If task entity does not contain changed attribute then skip
|
||||
if task_attrs:
|
||||
task_entities = self.get_task_entities(session, interesting_data)
|
||||
|
||||
task_entities_by_id = {}
|
||||
parent_id_by_task_id = {}
|
||||
for task_entity in task_entities:
|
||||
task_entities_by_parent_id[task_entity["parent_id"]].append(
|
||||
task_entity
|
||||
)
|
||||
task_entities_by_id[task_entity["id"]] = task_entity
|
||||
parent_id_by_task_id[task_entity["id"]] = task_entity["parent_id"]
|
||||
|
||||
missing_keys_by_object_name = collections.defaultdict(set)
|
||||
for parent_id, values in interesting_data.items():
|
||||
entities = task_entities_by_parent_id.get(parent_id) or []
|
||||
entities.append(entities_by_id[parent_id])
|
||||
changed_keys = set()
|
||||
for keys in changed_keys_by_object_id.values():
|
||||
changed_keys |= set(keys)
|
||||
|
||||
for hier_key, value in values.items():
|
||||
changed_ids = []
|
||||
for entity in entities:
|
||||
key = self.interest_attr_mapping[hier_key]
|
||||
entity_attrs_mapping = (
|
||||
attrs_by_obj_id.get(entity["object_type_id"])
|
||||
attr_id_to_key = {}
|
||||
for attr_confs in attrs_by_obj_id.values():
|
||||
for key in changed_keys:
|
||||
custom_attr_id = attr_confs.get(key)
|
||||
if custom_attr_id:
|
||||
attr_id_to_key[custom_attr_id] = key
|
||||
|
||||
for key in changed_keys:
|
||||
custom_attr_id = hier_attrs.get(key)
|
||||
if custom_attr_id:
|
||||
attr_id_to_key[custom_attr_id] = key
|
||||
|
||||
entity_ids = (
|
||||
set(interesting_data.keys()) | set(task_entities_by_id.keys())
|
||||
)
|
||||
attr_ids = set(attr_id_to_key.keys())
|
||||
|
||||
current_values_by_id = self.current_values(
|
||||
session, attr_ids, entity_ids, task_entities_by_id, hier_attrs
|
||||
)
|
||||
|
||||
for entity_id, current_values in current_values_by_id.items():
|
||||
parent_id = parent_id_by_task_id.get(entity_id)
|
||||
if not parent_id:
|
||||
parent_id = entity_id
|
||||
values = interesting_data[parent_id]
|
||||
|
||||
for attr_id, old_value in current_values.items():
|
||||
attr_key = attr_id_to_key.get(attr_id)
|
||||
if not attr_key:
|
||||
continue
|
||||
|
||||
# Convert new value from string
|
||||
new_value = values.get(attr_key)
|
||||
if new_value is not None and old_value is not None:
|
||||
try:
|
||||
new_value = type(old_value)(new_value)
|
||||
except Exception:
|
||||
self.log.warning((
|
||||
"Couldn't convert from {} to {}."
|
||||
" Skipping update values."
|
||||
).format(type(new_value), type(old_value)))
|
||||
if new_value == old_value:
|
||||
continue
|
||||
|
||||
entity_key = collections.OrderedDict({
|
||||
"configuration_id": attr_id,
|
||||
"entity_id": entity_id
|
||||
})
|
||||
self._cached_changes.append({
|
||||
"attr_key": attr_key,
|
||||
"entity_id": entity_id,
|
||||
"value": new_value,
|
||||
"time": datetime.datetime.now()
|
||||
})
|
||||
if new_value is None:
|
||||
op = ftrack_api.operation.DeleteEntityOperation(
|
||||
"CustomAttributeValue",
|
||||
entity_key
|
||||
)
|
||||
else:
|
||||
op = ftrack_api.operation.UpdateEntityOperation(
|
||||
"ContextCustomAttributeValue",
|
||||
entity_key,
|
||||
"value",
|
||||
ftrack_api.symbol.NOT_SET,
|
||||
new_value
|
||||
)
|
||||
if not entity_attrs_mapping:
|
||||
missing_keys_by_object_name[entity.entity_type].add(
|
||||
key
|
||||
)
|
||||
continue
|
||||
|
||||
configuration_id = entity_attrs_mapping.get(key)
|
||||
if not configuration_id:
|
||||
missing_keys_by_object_name[entity.entity_type].add(
|
||||
key
|
||||
)
|
||||
continue
|
||||
|
||||
changed_ids.append(entity["id"])
|
||||
entity_key = collections.OrderedDict({
|
||||
"configuration_id": configuration_id,
|
||||
"entity_id": entity["id"]
|
||||
})
|
||||
if value is None:
|
||||
op = ftrack_api.operation.DeleteEntityOperation(
|
||||
"CustomAttributeValue",
|
||||
entity_key
|
||||
)
|
||||
else:
|
||||
op = ftrack_api.operation.UpdateEntityOperation(
|
||||
"ContextCustomAttributeValue",
|
||||
entity_key,
|
||||
"value",
|
||||
ftrack_api.symbol.NOT_SET,
|
||||
value
|
||||
)
|
||||
|
||||
session.recorded_operations.push(op)
|
||||
session.recorded_operations.push(op)
|
||||
self.log.info((
|
||||
"Changing Custom Attribute \"{}\" to value"
|
||||
" \"{}\" on entities: {}"
|
||||
).format(key, value, self.join_keys(changed_ids)))
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning(
|
||||
"Changing of values failed.",
|
||||
exc_info=True
|
||||
)
|
||||
if not missing_keys_by_object_name:
|
||||
return
|
||||
" \"{}\" on entity: {}"
|
||||
).format(attr_key, new_value, entity_id))
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning("Changing of values failed.", exc_info=True)
|
||||
|
||||
msg_items = []
|
||||
for object_name, missing_keys in missing_keys_by_object_name.items():
|
||||
msg_items.append(
|
||||
"{}: ({})".format(object_name, self.join_keys(missing_keys))
|
||||
def current_values(
|
||||
self, session, attr_ids, entity_ids, task_entities_by_id, hier_attrs
|
||||
):
|
||||
current_values_by_id = {}
|
||||
if not attr_ids or not entity_ids:
|
||||
return current_values_by_id
|
||||
joined_conf_ids = self.join_keys(attr_ids)
|
||||
joined_entity_ids = self.join_keys(entity_ids)
|
||||
|
||||
call_expr = [{
|
||||
"action": "query",
|
||||
"expression": self.cust_attr_query.format(
|
||||
joined_entity_ids, joined_conf_ids
|
||||
)
|
||||
}]
|
||||
if hasattr(session, "call"):
|
||||
[values] = session.call(call_expr)
|
||||
else:
|
||||
[values] = session._call(call_expr)
|
||||
|
||||
self.log.warning((
|
||||
"Missing Custom Attribute configuration"
|
||||
" per specific object types: {}"
|
||||
).format(", ".join(msg_items)))
|
||||
for item in values["data"]:
|
||||
entity_id = item["entity_id"]
|
||||
attr_id = item["configuration_id"]
|
||||
if entity_id in task_entities_by_id and attr_id in hier_attrs:
|
||||
continue
|
||||
|
||||
if entity_id not in current_values_by_id:
|
||||
current_values_by_id[entity_id] = {}
|
||||
current_values_by_id[entity_id][attr_id] = item["value"]
|
||||
return current_values_by_id
|
||||
|
||||
def extract_interesting_data(self, session, event):
|
||||
# Filter if event contain relevant data
|
||||
|
|
@ -159,7 +251,18 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if not entities_info:
|
||||
return
|
||||
|
||||
# for key, value in event["data"].items():
|
||||
# self.log.info("{}: {}".format(key, value))
|
||||
session_user_id = self.session_user_id(session)
|
||||
user_data = event["data"].get("user")
|
||||
changed_by_session = False
|
||||
if user_data and user_data.get("userid") == session_user_id:
|
||||
changed_by_session = True
|
||||
|
||||
current_time = datetime.datetime.now()
|
||||
|
||||
interesting_data = {}
|
||||
changed_keys_by_object_id = {}
|
||||
for entity_info in entities_info:
|
||||
# Care only about tasks
|
||||
if entity_info.get("entityType") != "task":
|
||||
|
|
@ -176,16 +279,47 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if key in changes:
|
||||
entity_changes[key] = changes[key]["new"]
|
||||
|
||||
entity_id = entity_info["entityId"]
|
||||
if changed_by_session:
|
||||
for key, new_value in tuple(entity_changes.items()):
|
||||
for cached in tuple(self._cached_changes):
|
||||
if (
|
||||
cached["entity_id"] != entity_id
|
||||
or cached["attr_key"] != key
|
||||
):
|
||||
continue
|
||||
|
||||
cached_value = cached["value"]
|
||||
try:
|
||||
new_value = type(cached_value)(new_value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if cached_value == new_value:
|
||||
self._cached_changes.remove(cached)
|
||||
entity_changes.pop(key)
|
||||
break
|
||||
|
||||
delta = (current_time - cached["time"]).seconds
|
||||
if delta > self._max_delta:
|
||||
self._cached_changes.remove(cached)
|
||||
|
||||
if not entity_changes:
|
||||
continue
|
||||
|
||||
# Do not care about "Task" entity_type
|
||||
task_object_id = self.task_object_id(session)
|
||||
if entity_info.get("objectTypeId") == task_object_id:
|
||||
object_id = entity_info.get("objectTypeId")
|
||||
if not object_id or object_id == task_object_id:
|
||||
continue
|
||||
|
||||
interesting_data[entity_info["entityId"]] = entity_changes
|
||||
return interesting_data
|
||||
interesting_data[entity_id] = entity_changes
|
||||
if object_id not in changed_keys_by_object_id:
|
||||
changed_keys_by_object_id[object_id] = set()
|
||||
|
||||
changed_keys_by_object_id[object_id] |= set(entity_changes.keys())
|
||||
|
||||
return interesting_data, changed_keys_by_object_id
|
||||
|
||||
def get_entities(self, session, interesting_data):
|
||||
entities = session.query(
|
||||
|
|
@ -213,17 +347,21 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
object_ids.append(self.task_object_id(session))
|
||||
|
||||
attrs = session.query(self.cust_attrs_query.format(
|
||||
self.join_keys(self.interest_attr_mapping.values()),
|
||||
self.join_keys(self.interest_attributes),
|
||||
self.join_keys(object_ids)
|
||||
)).all()
|
||||
|
||||
output = {}
|
||||
hiearchical = {}
|
||||
for attr in attrs:
|
||||
if attr["is_hierarchical"]:
|
||||
hiearchical[attr["key"]] = attr["id"]
|
||||
continue
|
||||
obj_id = attr["object_type_id"]
|
||||
if obj_id not in output:
|
||||
output[obj_id] = {}
|
||||
output[obj_id][attr["key"]] = attr["id"]
|
||||
return output
|
||||
return output, hiearchical
|
||||
|
||||
|
||||
def register(session, plugins_presets):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
import collections
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
import pype.api as pype
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
|
|
@ -12,12 +14,13 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
self.log.info('Collecting Audio Data')
|
||||
asset_entity = context.data["assetEntity"]
|
||||
asset_doc = context.data["assetEntity"]
|
||||
|
||||
# get all available representations
|
||||
subsets = pype.get_subsets(asset_entity["name"],
|
||||
representations=["audio", "wav"]
|
||||
)
|
||||
subsets = self.get_subsets(
|
||||
asset_doc,
|
||||
representations=["audio", "wav"]
|
||||
)
|
||||
self.log.info(f"subsets is: {pformat(subsets)}")
|
||||
|
||||
if not subsets.get("audioMain"):
|
||||
|
|
@ -39,3 +42,85 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
'audio_file: {}, has been added to context'.format(audio_file))
|
||||
else:
|
||||
self.log.warning("Couldn't find any audio file on Ftrack.")
|
||||
|
||||
def get_subsets(self, asset_doc, representations):
|
||||
"""
|
||||
Query subsets with filter on name.
|
||||
|
||||
The method will return all found subsets and its defined version
|
||||
and subsets. Version could be specified with number. Representation
|
||||
can be filtered.
|
||||
|
||||
Arguments:
|
||||
asset_doct (dict): Asset (shot) mongo document
|
||||
representations (list): list for all representations
|
||||
|
||||
Returns:
|
||||
dict: subsets with version and representaions in keys
|
||||
"""
|
||||
|
||||
# Query all subsets for asset
|
||||
subset_docs = io.find({
|
||||
"type": "subset",
|
||||
"parent": asset_doc["_id"]
|
||||
})
|
||||
# Collect all subset ids
|
||||
subset_ids = [
|
||||
subset_doc["_id"]
|
||||
for subset_doc in subset_docs
|
||||
]
|
||||
|
||||
# Check if we found anything
|
||||
assert subset_ids, (
|
||||
"No subsets found. Check correct filter. "
|
||||
"Try this for start `r'.*'`: asset: `{}`"
|
||||
).format(asset_doc["name"])
|
||||
|
||||
# Last version aggregation
|
||||
pipeline = [
|
||||
# Find all versions of those subsets
|
||||
{"$match": {
|
||||
"type": "version",
|
||||
"parent": {"$in": subset_ids}
|
||||
}},
|
||||
# Sorting versions all together
|
||||
{"$sort": {"name": 1}},
|
||||
# Group them by "parent", but only take the last
|
||||
{"$group": {
|
||||
"_id": "$parent",
|
||||
"_version_id": {"$last": "$_id"},
|
||||
"name": {"$last": "$name"}
|
||||
}}
|
||||
]
|
||||
last_versions_by_subset_id = dict()
|
||||
for doc in io.aggregate(pipeline):
|
||||
doc["parent"] = doc["_id"]
|
||||
doc["_id"] = doc.pop("_version_id")
|
||||
last_versions_by_subset_id[doc["parent"]] = doc
|
||||
|
||||
version_docs_by_id = {}
|
||||
for version_doc in last_versions_by_subset_id.values():
|
||||
version_docs_by_id[version_doc["_id"]] = version_doc
|
||||
|
||||
repre_docs = io.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": list(version_docs_by_id.keys())},
|
||||
"name": {"$in": representations}
|
||||
})
|
||||
repre_docs_by_version_id = collections.defaultdict(list)
|
||||
for repre_doc in repre_docs:
|
||||
version_id = repre_doc["parent"]
|
||||
repre_docs_by_version_id[version_id].append(repre_doc)
|
||||
|
||||
output_dict = {}
|
||||
for version_id, repre_docs in repre_docs_by_version_id.items():
|
||||
version_doc = version_docs_by_id[version_id]
|
||||
subset_id = version_doc["parent"]
|
||||
subset_doc = last_versions_by_subset_id[subset_id]
|
||||
# Store queried docs by subset name
|
||||
output_dict[subset_doc["name"]] = {
|
||||
"representations": repre_docs,
|
||||
"version": version_doc
|
||||
}
|
||||
|
||||
return output_dict
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import re
|
||||
import json
|
||||
import copy
|
||||
import tempfile
|
||||
|
||||
import pype.api
|
||||
import pyblish
|
||||
|
|
@ -26,7 +27,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
"hiero",
|
||||
"premiere",
|
||||
"standalonepublisher",
|
||||
"harmony"
|
||||
"harmony",
|
||||
"fusion"
|
||||
]
|
||||
optional = True
|
||||
|
|
@ -227,12 +228,30 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
# Dump data to string
|
||||
dumped_script_data = json.dumps(script_data)
|
||||
|
||||
# Store dumped json to temporary file
|
||||
temporary_json_file = tempfile.NamedTemporaryFile(
|
||||
mode="w", suffix=".json", delete=False
|
||||
)
|
||||
temporary_json_file.write(dumped_script_data)
|
||||
temporary_json_file.close()
|
||||
temporary_json_filepath = temporary_json_file.name.replace(
|
||||
"\\", "/"
|
||||
)
|
||||
|
||||
# Prepare subprocess arguments
|
||||
args = [executable, scriptpath, dumped_script_data]
|
||||
self.log.debug("Executing: {}".format(args))
|
||||
args = [
|
||||
"\"{}\"".format(executable),
|
||||
"\"{}\"".format(scriptpath),
|
||||
"\"{}\"".format(temporary_json_filepath)
|
||||
]
|
||||
subprcs_cmd = " ".join(args)
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
|
||||
# Run burnin script
|
||||
pype.api.subprocess(args, shell=True, logger=self.log)
|
||||
pype.api.subprocess(subprcs_cmd, shell=True, logger=self.log)
|
||||
|
||||
# Remove the temporary json
|
||||
os.remove(temporary_json_filepath)
|
||||
|
||||
for filepath in temp_data["full_input_paths"]:
|
||||
filepath = filepath.replace("\\", "/")
|
||||
|
|
|
|||
|
|
@ -30,7 +30,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"premiere",
|
||||
"harmony",
|
||||
"standalonepublisher",
|
||||
"fusion"
|
||||
"fusion",
|
||||
"tvpaint"
|
||||
]
|
||||
|
||||
# Supported extensions
|
||||
|
|
|
|||
|
|
@ -151,6 +151,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
deadline_pool_secondary = ""
|
||||
deadline_group = ""
|
||||
deadline_chunk_size = 1
|
||||
deadline_priority = None
|
||||
|
||||
# regex for finding frame number in string
|
||||
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
|
||||
|
|
@ -902,7 +903,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
render_job["Props"]["User"] = context.data.get(
|
||||
"deadlineUser", getpass.getuser())
|
||||
# Priority is now not handled at all
|
||||
render_job["Props"]["Pri"] = instance.data.get("priority")
|
||||
|
||||
if self.deadline_priority:
|
||||
render_job["Props"]["Pri"] = self.deadline_priority
|
||||
else:
|
||||
render_job["Props"]["Pri"] = instance.data.get("priority")
|
||||
|
||||
render_job["Props"]["Env"] = {
|
||||
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
|
||||
|
|
@ -1033,8 +1038,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
|
||||
if "folder" in anatomy.templates["publish"]:
|
||||
publish_folder = anatomy_filled["publish"]["folder"]
|
||||
if "folder" in anatomy.templates["render"]:
|
||||
publish_folder = anatomy_filled["render"]["folder"]
|
||||
else:
|
||||
# solve deprecated situation when `folder` key is not underneath
|
||||
# `publish` anatomy
|
||||
|
|
@ -1044,7 +1049,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
" key underneath `publish` (in global of for project `{}`)."
|
||||
).format(project_name))
|
||||
|
||||
file_path = anatomy_filled["publish"]["path"]
|
||||
file_path = anatomy_filled["render"]["path"]
|
||||
# Directory
|
||||
publish_folder = os.path.dirname(file_path)
|
||||
|
||||
|
|
|
|||
|
|
@ -29,6 +29,6 @@ class ValidateFFmpegInstalled(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
self.log.info("ffmpeg path: `{}`".format(ffmpeg_path))
|
||||
if self.is_tool("\"{}\"".format(ffmpeg_path)) is False:
|
||||
if self.is_tool("{}".format(ffmpeg_path)) is False:
|
||||
self.log.error("ffmpeg not found in PATH")
|
||||
raise RuntimeError('ffmpeg not installed.')
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
thumbnail_path = os.path.join(path, "thumbnail.png")
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
args = [
|
||||
"\"{}\"".format(ffmpeg_path), "-y",
|
||||
"{}".format(ffmpeg_path), "-y",
|
||||
"-i", os.path.join(path, list(collections[0])[0]),
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
|
|
|
|||
|
|
@ -1,61 +0,0 @@
|
|||
import os
|
||||
import acre
|
||||
|
||||
from avalon import api, lib
|
||||
import pype.api as pype
|
||||
from pype.aport import lib as aportlib
|
||||
|
||||
log = pype.Logger().get_logger(__name__, "aport")
|
||||
|
||||
|
||||
class Aport(api.Action):
|
||||
|
||||
name = "aport"
|
||||
label = "Aport - Avalon's Server"
|
||||
icon = "retweet"
|
||||
order = 996
|
||||
|
||||
def is_compatible(self, session):
|
||||
"""Return whether the action is compatible with the session"""
|
||||
if "AVALON_TASK" in session:
|
||||
return True
|
||||
return False
|
||||
|
||||
def process(self, session, **kwargs):
|
||||
"""Implement the behavior for when the action is triggered
|
||||
|
||||
Args:
|
||||
session (dict): environment dictionary
|
||||
|
||||
Returns:
|
||||
Popen instance of newly spawned process
|
||||
|
||||
"""
|
||||
|
||||
with pype.modified_environ(**session):
|
||||
# Get executable by name
|
||||
print(self.name)
|
||||
app = lib.get_application(self.name)
|
||||
executable = lib.which(app["executable"])
|
||||
|
||||
# Run as server
|
||||
arguments = []
|
||||
|
||||
tools_env = acre.get_tools([self.name])
|
||||
env = acre.compute(tools_env)
|
||||
env = acre.merge(env, current_env=dict(os.environ))
|
||||
|
||||
if not env.get('AVALON_WORKDIR', None):
|
||||
os.environ["AVALON_WORKDIR"] = aportlib.get_workdir_template()
|
||||
|
||||
env.update(dict(os.environ))
|
||||
|
||||
try:
|
||||
lib.launch(
|
||||
executable=executable,
|
||||
args=arguments,
|
||||
environment=env
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(e)
|
||||
return
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
import os
|
||||
import acre
|
||||
|
||||
from avalon import api, lib, io
|
||||
import pype.api as pype
|
||||
|
||||
|
||||
class PremierePro(api.Action):
|
||||
|
||||
name = "premiere_2019"
|
||||
label = "Premiere Pro"
|
||||
icon = "premiere_icon"
|
||||
order = 996
|
||||
|
||||
def is_compatible(self, session):
|
||||
"""Return whether the action is compatible with the session"""
|
||||
if "AVALON_TASK" in session:
|
||||
return True
|
||||
return False
|
||||
|
||||
def process(self, session, **kwargs):
|
||||
"""Implement the behavior for when the action is triggered
|
||||
|
||||
Args:
|
||||
session (dict): environment dictionary
|
||||
|
||||
Returns:
|
||||
Popen instance of newly spawned process
|
||||
|
||||
"""
|
||||
|
||||
with pype.modified_environ(**session):
|
||||
# Get executable by name
|
||||
app = lib.get_application(self.name)
|
||||
executable = lib.which(app["executable"])
|
||||
|
||||
# Run as server
|
||||
arguments = []
|
||||
|
||||
tools_env = acre.get_tools([self.name])
|
||||
env = acre.compute(tools_env)
|
||||
env = acre.merge(env, current_env=dict(os.environ))
|
||||
|
||||
if not env.get('AVALON_WORKDIR', None):
|
||||
project_name = env.get("AVALON_PROJECT")
|
||||
anatomy = pype.Anatomy(project_name)
|
||||
os.environ['AVALON_PROJECT'] = project_name
|
||||
io.Session['AVALON_PROJECT'] = project_name
|
||||
|
||||
task_name = os.environ.get(
|
||||
"AVALON_TASK", io.Session["AVALON_TASK"]
|
||||
)
|
||||
asset_name = os.environ.get(
|
||||
"AVALON_ASSET", io.Session["AVALON_ASSET"]
|
||||
)
|
||||
application = lib.get_application(
|
||||
os.environ["AVALON_APP_NAME"]
|
||||
)
|
||||
|
||||
project_doc = io.find_one({"type": "project"})
|
||||
data = {
|
||||
"task": task_name,
|
||||
"asset": asset_name,
|
||||
"project": {
|
||||
"name": project_doc["name"],
|
||||
"code": project_doc["data"].get("code", '')
|
||||
},
|
||||
"hierarchy": pype.get_hierarchy(),
|
||||
"app": application["application_dir"]
|
||||
}
|
||||
anatomy_filled = anatomy.format(data)
|
||||
workdir = anatomy_filled["work"]["folder"]
|
||||
|
||||
os.environ["AVALON_WORKDIR"] = workdir
|
||||
|
||||
env.update(dict(os.environ))
|
||||
|
||||
lib.launch(
|
||||
executable=executable,
|
||||
args=arguments,
|
||||
environment=env
|
||||
)
|
||||
return
|
||||
|
|
@ -20,7 +20,8 @@ class CollectFtrackFamilies(pyblish.api.InstancePlugin):
|
|||
"model",
|
||||
"animation",
|
||||
"look",
|
||||
"rig"
|
||||
"rig",
|
||||
"camera"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -18,7 +18,12 @@ class CollectRemoveMarked(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
|
||||
self.log.debug(context)
|
||||
# make ftrack publishable
|
||||
instances_to_remove = []
|
||||
for instance in context:
|
||||
if instance.data.get('remove'):
|
||||
context.remove(instance)
|
||||
instances_to_remove.append(instance)
|
||||
|
||||
for instance in instances_to_remove:
|
||||
context.remove(instance)
|
||||
|
|
|
|||
|
|
@ -254,6 +254,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
if self.sync_workfile_version:
|
||||
data["version"] = context.data["version"]
|
||||
|
||||
for instance in context:
|
||||
if instance.data['family'] == "workfile":
|
||||
instance.data["version"] = context.data["version"]
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
for attr in cmds.listAttr(layer, userDefined=True) or list():
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -43,33 +43,38 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
i = 0
|
||||
for inst in instance.context:
|
||||
|
||||
self.log.debug('processing {}'.format(inst))
|
||||
self.log.debug('processing2 {}'.format(instance.context[i]))
|
||||
self.log.debug('filtering {}'.format(inst))
|
||||
data = instance.context[i].data
|
||||
|
||||
if inst.name == reviewable_subset[0]:
|
||||
if data.get('families'):
|
||||
data['families'].append('review')
|
||||
else:
|
||||
data['families'] = ['review']
|
||||
self.log.debug('adding review family to {}'.format(reviewable_subset))
|
||||
data['review_camera'] = camera
|
||||
# data["publish"] = False
|
||||
data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
data['frameStartHandle'] = instance.data["frameStartHandle"]
|
||||
data['frameEndHandle'] = instance.data["frameEndHandle"]
|
||||
data["frameStart"] = instance.data["frameStart"]
|
||||
data["frameEnd"] = instance.data["frameEnd"]
|
||||
data['handles'] = instance.data.get('handles', None)
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
data["isolate"] = instance.data["isolate"]
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
self.log.debug('data {}'.format(instance.context[i].data))
|
||||
instance.context[i].data.update(data)
|
||||
instance.data['remove'] = True
|
||||
i += 1
|
||||
if inst.name != reviewable_subset[0]:
|
||||
self.log.debug('subset name does not match {}'.format(
|
||||
reviewable_subset[0]))
|
||||
i += 1
|
||||
continue
|
||||
|
||||
if data.get('families'):
|
||||
data['families'].append('review')
|
||||
else:
|
||||
data['families'] = ['review']
|
||||
self.log.debug('adding review family to {}'.format(
|
||||
reviewable_subset))
|
||||
data['review_camera'] = camera
|
||||
# data["publish"] = False
|
||||
data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
data['frameStartHandle'] = instance.data["frameStartHandle"]
|
||||
data['frameEndHandle'] = instance.data["frameEndHandle"]
|
||||
data["frameStart"] = instance.data["frameStart"]
|
||||
data["frameEnd"] = instance.data["frameEnd"]
|
||||
data['handles'] = instance.data.get('handles', None)
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
data["isolate"] = instance.data["isolate"]
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
self.log.debug('data {}'.format(instance.context[i].data))
|
||||
instance.context[i].data.update(data)
|
||||
instance.data['remove'] = True
|
||||
self.log.debug('isntance data {}'.format(instance.data))
|
||||
else:
|
||||
if self.legacy:
|
||||
instance.data['subset'] = task + 'Review'
|
||||
|
|
@ -82,8 +87,10 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
instance.data['subset'] = subset
|
||||
|
||||
instance.data['review_camera'] = camera
|
||||
instance.data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
instance.data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
instance.data['frameStartFtrack'] = \
|
||||
instance.data["frameStartHandle"]
|
||||
instance.data['frameEndFtrack'] = \
|
||||
instance.data["frameEndHandle"]
|
||||
|
||||
# make ftrack publishable
|
||||
instance.data["families"] = ['ftrack']
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from maya import cmds
|
|||
import pyblish.api
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.lib import pairwise
|
||||
|
||||
|
||||
SETTINGS = {"renderDensity",
|
||||
|
|
@ -78,7 +77,7 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
connections = cmds.ls(connections, long=True) # Ensure long names
|
||||
|
||||
inputs = []
|
||||
for dest, src in pairwise(connections):
|
||||
for dest, src in lib.pairwise(connections):
|
||||
source_node, source_attr = src.split(".", 1)
|
||||
dest_node, dest_attr = dest.split(".", 1)
|
||||
|
||||
|
|
@ -119,7 +118,7 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
texture_filenames = []
|
||||
if image_search_paths:
|
||||
|
||||
|
||||
|
||||
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
|
||||
# Later on check whether this is pipeline OS cross-compatible.
|
||||
image_search_paths = [p for p in
|
||||
|
|
@ -127,7 +126,7 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
|
|||
|
||||
# find all ${TOKEN} tokens and replace them with $TOKEN env. variable
|
||||
image_search_paths = self._replace_tokens(image_search_paths)
|
||||
|
||||
|
||||
# List all related textures
|
||||
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
|
||||
self.log.info("Found %i texture(s)" % len(texture_filenames))
|
||||
|
|
|
|||
|
|
@ -26,7 +26,15 @@ class ExtractCameraAlembic(pype.api.Extractor):
|
|||
# get settings
|
||||
framerange = [instance.data.get("frameStart", 1),
|
||||
instance.data.get("frameEnd", 1)]
|
||||
handles = instance.data.get("handles", 0)
|
||||
handle_start = instance.data.get("handleStart", 0)
|
||||
handle_end = instance.data.get("handleEnd", 0)
|
||||
|
||||
# TODO: deprecated attribute "handles"
|
||||
|
||||
if handle_start is None:
|
||||
handle_start = instance.data.get("handles", 0)
|
||||
handle_end = instance.data.get("handles", 0)
|
||||
|
||||
step = instance.data.get("step", 1.0)
|
||||
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
|
||||
|
||||
|
|
@ -55,8 +63,10 @@ class ExtractCameraAlembic(pype.api.Extractor):
|
|||
|
||||
job_str = ' -selection -dataFormat "ogawa" '
|
||||
job_str += ' -attrPrefix cb'
|
||||
job_str += ' -frameRange {0} {1} '.format(framerange[0] - handles,
|
||||
framerange[1] + handles)
|
||||
job_str += ' -frameRange {0} {1} '.format(framerange[0]
|
||||
- handle_start,
|
||||
framerange[1]
|
||||
+ handle_end)
|
||||
job_str += ' -step {0} '.format(step)
|
||||
|
||||
if bake_to_worldspace:
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Extract camera as Maya Scene."""
|
||||
import os
|
||||
import itertools
|
||||
|
||||
from maya import cmds
|
||||
|
||||
import avalon.maya
|
||||
import pype.api
|
||||
from pype.lib import grouper
|
||||
from pype.hosts.maya import lib
|
||||
|
||||
|
||||
|
|
@ -36,6 +36,17 @@ def massage_ma_file(path):
|
|||
f.close()
|
||||
|
||||
|
||||
def grouper(iterable, n, fillvalue=None):
|
||||
"""Collect data into fixed-length chunks or blocks.
|
||||
|
||||
Examples:
|
||||
grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
|
||||
|
||||
"""
|
||||
args = [iter(iterable)] * n
|
||||
return itertools.izip_longest(fillvalue=fillvalue, *args)
|
||||
|
||||
|
||||
def unlock(plug):
|
||||
"""Unlocks attribute and disconnects inputs for a plug.
|
||||
|
||||
|
|
@ -107,7 +118,18 @@ class ExtractCameraMayaScene(pype.api.Extractor):
|
|||
|
||||
framerange = [instance.data.get("frameStart", 1),
|
||||
instance.data.get("frameEnd", 1)]
|
||||
handles = instance.data.get("handles", 0)
|
||||
handle_start = instance.data.get("handleStart", 0)
|
||||
handle_end = instance.data.get("handleEnd", 0)
|
||||
|
||||
# TODO: deprecated attribute "handles"
|
||||
|
||||
if handle_start is None:
|
||||
handle_start = instance.data.get("handles", 0)
|
||||
handle_end = instance.data.get("handles", 0)
|
||||
|
||||
range_with_handles = [framerange[0] - handle_start,
|
||||
framerange[1] + handle_end]
|
||||
|
||||
step = instance.data.get("step", 1.0)
|
||||
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
|
||||
|
||||
|
|
@ -121,9 +143,6 @@ class ExtractCameraMayaScene(pype.api.Extractor):
|
|||
cameras = cmds.ls(members, leaf=True, shapes=True, long=True,
|
||||
dag=True, type="camera")
|
||||
|
||||
range_with_handles = [framerange[0] - handles,
|
||||
framerange[1] + handles]
|
||||
|
||||
# validate required settings
|
||||
assert len(cameras) == 1, "Single camera must be found in extraction"
|
||||
assert isinstance(step, float), "Step must be a float value"
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ def preserve_trim(node):
|
|||
"{}".format(script_start))
|
||||
|
||||
|
||||
def loader_shift(node, frame, relative=True):
|
||||
def loader_shift(node, frame, relative=False):
|
||||
"""Shift global in time by i preserving duration
|
||||
|
||||
This moves the loader by i frames preserving global duration. When relative
|
||||
|
|
@ -61,11 +61,12 @@ def loader_shift(node, frame, relative=True):
|
|||
script_start = nuke.root()["first_frame"].value()
|
||||
|
||||
if relative:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(script_start))
|
||||
else:
|
||||
node['frame_mode'].setValue("start at")
|
||||
node['frame'].setValue(str(frame))
|
||||
|
||||
return int(script_start)
|
||||
|
||||
|
||||
class LoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
|
@ -73,10 +74,10 @@ class LoadSequence(api.Loader):
|
|||
families = ["render2d", "source", "plate", "render", "prerender", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
|
||||
|
||||
label = "Load sequence"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
label = "Load Image Sequence"
|
||||
order = -20
|
||||
icon = "file-video-o"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
from avalon.nuke import (
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class ImageLoader(api.Loader):
|
|||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
|
||||
families = ["image"]
|
||||
families = ["image", "render"]
|
||||
representations = ["*"]
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ class ExtractReview(pype.api.Extractor):
|
|||
# Generate thumbnail.
|
||||
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
|
||||
args = [
|
||||
"\"{}\"".format(ffmpeg_path), "-y",
|
||||
"{}".format(ffmpeg_path), "-y",
|
||||
"-i", output_image_path,
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Collect instance data"
|
||||
order = pyblish.api.CollectorOrder + 0.49
|
||||
families = ["render", "plate"]
|
||||
families = ["render", "plate", "review"]
|
||||
hosts = ["standalonepublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -57,8 +57,9 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
|
||||
elif is_jpeg:
|
||||
# use first frame as thumbnail if is sequence of jpegs
|
||||
staging_dir = thumbnail_repre.get("stagingDir")
|
||||
full_thumbnail_path = os.path.join(staging_dir, file)
|
||||
full_thumbnail_path = os.path.join(
|
||||
thumbnail_repre["stagingDir"], file
|
||||
)
|
||||
self.log.info(
|
||||
"For thumbnail is used file: {}".format(full_thumbnail_path)
|
||||
)
|
||||
|
|
|
|||
150
pype/plugins/tvpaint/create/create_render_layer.py
Normal file
150
pype/plugins/tvpaint/create/create_render_layer.py
Normal file
|
|
@ -0,0 +1,150 @@
|
|||
from avalon.tvpaint import pipeline, lib
|
||||
|
||||
|
||||
class CreateRenderlayer(pipeline.Creator):
|
||||
"""Mark layer group as one instance."""
|
||||
name = "render_layer"
|
||||
label = "RenderLayer"
|
||||
family = "renderLayer"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
rename_group = True
|
||||
|
||||
subset_template = "{family}_{name}"
|
||||
rename_script_template = (
|
||||
"tv_layercolor \"setcolor\""
|
||||
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
|
||||
)
|
||||
|
||||
def process(self):
|
||||
self.log.debug("Query data from workfile.")
|
||||
instances = pipeline.list_instances()
|
||||
layers_data = lib.layers_data()
|
||||
|
||||
self.log.debug("Checking for selection groups.")
|
||||
# Collect group ids from selection
|
||||
group_ids = set()
|
||||
for layer in layers_data:
|
||||
if layer["selected"]:
|
||||
group_ids.add(layer["group_id"])
|
||||
|
||||
# Raise if there is no selection
|
||||
if not group_ids:
|
||||
raise AssertionError("Nothing is selected.")
|
||||
|
||||
# This creator should run only on one group
|
||||
if len(group_ids) > 1:
|
||||
raise AssertionError("More than one group is in selection.")
|
||||
|
||||
group_id = tuple(group_ids)[0]
|
||||
# If group id is `0` it is `default` group which is invalid
|
||||
if group_id == 0:
|
||||
raise AssertionError(
|
||||
"Selection is not in group. Can't mark selection as Beauty."
|
||||
)
|
||||
|
||||
self.log.debug(f"Selected group id is \"{group_id}\".")
|
||||
self.data["group_id"] = group_id
|
||||
|
||||
family = self.data["family"]
|
||||
# Extract entered name
|
||||
name = self.data["subset"][len(family):]
|
||||
self.log.info(f"Extracted name from subset name \"{name}\".")
|
||||
self.data["name"] = name
|
||||
|
||||
# Change subset name by template
|
||||
subset_name = self.subset_template.format(**{
|
||||
"family": self.family,
|
||||
"name": name
|
||||
})
|
||||
self.log.info(f"New subset name \"{subset_name}\".")
|
||||
self.data["subset"] = subset_name
|
||||
|
||||
# Check for instances of same group
|
||||
existing_instance = None
|
||||
existing_instance_idx = None
|
||||
# Check if subset name is not already taken
|
||||
same_subset_instance = None
|
||||
same_subset_instance_idx = None
|
||||
for idx, instance in enumerate(instances):
|
||||
if instance["family"] == family:
|
||||
if instance["group_id"] == group_id:
|
||||
existing_instance = instance
|
||||
existing_instance_idx = idx
|
||||
elif instance["subset"] == subset_name:
|
||||
same_subset_instance = instance
|
||||
same_subset_instance_idx = idx
|
||||
|
||||
if (
|
||||
same_subset_instance_idx is not None
|
||||
and existing_instance_idx is not None
|
||||
):
|
||||
break
|
||||
|
||||
if same_subset_instance_idx is not None:
|
||||
if self._ask_user_subset_override(same_subset_instance):
|
||||
instances.pop(same_subset_instance_idx)
|
||||
else:
|
||||
return
|
||||
|
||||
if existing_instance is not None:
|
||||
self.log.info(
|
||||
f"Beauty instance for group id {group_id} already exists"
|
||||
", overriding"
|
||||
)
|
||||
instances[existing_instance_idx] = self.data
|
||||
else:
|
||||
instances.append(self.data)
|
||||
|
||||
self.write_instances(instances)
|
||||
|
||||
if not self.rename_group:
|
||||
self.log.info("Group rename function is turned off. Skipping")
|
||||
return
|
||||
|
||||
self.log.debug("Querying groups data from workfile.")
|
||||
groups_data = lib.groups_data()
|
||||
|
||||
self.log.debug("Changing name of the group.")
|
||||
selected_group = None
|
||||
for group_data in groups_data:
|
||||
if group_data["group_id"] == group_id:
|
||||
selected_group = group_data
|
||||
|
||||
# Rename TVPaint group (keep color same)
|
||||
# - groups can't contain spaces
|
||||
new_group_name = name.replace(" ", "_")
|
||||
rename_script = self.rename_script_template.format(
|
||||
clip_id=selected_group["clip_id"],
|
||||
group_id=selected_group["group_id"],
|
||||
r=selected_group["red"],
|
||||
g=selected_group["green"],
|
||||
b=selected_group["blue"],
|
||||
name=new_group_name
|
||||
)
|
||||
lib.execute_george_through_file(rename_script)
|
||||
|
||||
self.log.info(
|
||||
f"Name of group with index {group_id}"
|
||||
f" was changed to \"{new_group_name}\"."
|
||||
)
|
||||
|
||||
def _ask_user_subset_override(self, instance):
|
||||
from Qt.QtWidgets import QMessageBox
|
||||
|
||||
title = "Subset \"{}\" already exist".format(instance["subset"])
|
||||
text = (
|
||||
"Instance with subset name \"{}\" already exists."
|
||||
"\n\nDo you want to override existing?"
|
||||
).format(instance["subset"])
|
||||
|
||||
dialog = QMessageBox()
|
||||
dialog.setWindowTitle(title)
|
||||
dialog.setText(text)
|
||||
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
|
||||
dialog.setDefaultButton(QMessageBox.Yes)
|
||||
dialog.exec_()
|
||||
if dialog.result() == QMessageBox.Yes:
|
||||
return True
|
||||
return False
|
||||
105
pype/plugins/tvpaint/create/create_render_pass.py
Normal file
105
pype/plugins/tvpaint/create/create_render_pass.py
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
from avalon.tvpaint import pipeline, lib
|
||||
|
||||
|
||||
class CreateRenderPass(pipeline.Creator):
|
||||
"""Render pass is combination of one or more layers from same group.
|
||||
|
||||
Requirement to create Render Pass is to have already created beauty
|
||||
instance. Beauty instance is used as base for subset name.
|
||||
"""
|
||||
name = "render_pass"
|
||||
label = "RenderPass"
|
||||
family = "renderPass"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
subset_template = "{family}_{render_layer}_{pass}"
|
||||
|
||||
def process(self):
|
||||
self.log.debug("Query data from workfile.")
|
||||
instances = pipeline.list_instances()
|
||||
layers_data = lib.layers_data()
|
||||
|
||||
self.log.debug("Checking selection.")
|
||||
# Get all selected layers and their group ids
|
||||
group_ids = set()
|
||||
selected_layers = []
|
||||
for layer in layers_data:
|
||||
if layer["selected"]:
|
||||
selected_layers.append(layer)
|
||||
group_ids.add(layer["group_id"])
|
||||
|
||||
# Raise if nothing is selected
|
||||
if not selected_layers:
|
||||
raise AssertionError("Nothing is selected.")
|
||||
|
||||
# Raise if layers from multiple groups are selected
|
||||
if len(group_ids) != 1:
|
||||
raise AssertionError("More than one group is in selection.")
|
||||
|
||||
group_id = tuple(group_ids)[0]
|
||||
self.log.debug(f"Selected group id is \"{group_id}\".")
|
||||
|
||||
# Find beauty instance for selected layers
|
||||
beauty_instance = None
|
||||
for instance in instances:
|
||||
if (
|
||||
instance["family"] == "renderLayer"
|
||||
and instance["group_id"] == group_id
|
||||
):
|
||||
beauty_instance = instance
|
||||
break
|
||||
|
||||
# Beauty is required for this creator so raise if was not found
|
||||
if beauty_instance is None:
|
||||
raise AssertionError("Beauty pass does not exist yet.")
|
||||
|
||||
render_layer = beauty_instance["name"]
|
||||
|
||||
# Extract entered name
|
||||
family = self.data["family"]
|
||||
name = self.data["subset"]
|
||||
# Is this right way how to get name?
|
||||
name = name[len(family):]
|
||||
self.log.info(f"Extracted name from subset name \"{name}\".")
|
||||
|
||||
self.data["group_id"] = group_id
|
||||
self.data["pass"] = name
|
||||
self.data["render_layer"] = render_layer
|
||||
|
||||
# Collect selected layer ids to be stored into instance
|
||||
layer_ids = [layer["layer_id"] for layer in selected_layers]
|
||||
self.data["layer_ids"] = layer_ids
|
||||
|
||||
# Replace `beauty` in beauty's subset name with entered name
|
||||
subset_name = self.subset_template.format(**{
|
||||
"family": family,
|
||||
"render_layer": render_layer,
|
||||
"pass": name
|
||||
})
|
||||
self.data["subset"] = subset_name
|
||||
self.log.info(f"New subset name is \"{subset_name}\".")
|
||||
|
||||
# Check if same instance already exists
|
||||
existing_instance = None
|
||||
existing_instance_idx = None
|
||||
for idx, instance in enumerate(instances):
|
||||
if (
|
||||
instance["family"] == family
|
||||
and instance["group_id"] == group_id
|
||||
and instance["pass"] == name
|
||||
):
|
||||
existing_instance = instance
|
||||
existing_instance_idx = idx
|
||||
break
|
||||
|
||||
if existing_instance is not None:
|
||||
self.log.info(
|
||||
f"Render pass instance for group id {group_id}"
|
||||
f" and name \"{name}\" already exists, overriding."
|
||||
)
|
||||
instances[existing_instance_idx] = self.data
|
||||
else:
|
||||
instances.append(self.data)
|
||||
|
||||
self.write_instances(instances)
|
||||
18
pype/plugins/tvpaint/create/create_review.py
Normal file
18
pype/plugins/tvpaint/create/create_review.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
from avalon.tvpaint import pipeline
|
||||
|
||||
|
||||
class CreateReview(pipeline.Creator):
|
||||
"""Review for global review of all layers."""
|
||||
name = "review"
|
||||
label = "Review"
|
||||
family = "review"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
def process(self):
|
||||
instances = pipeline.list_instances()
|
||||
for instance in instances:
|
||||
if instance["family"] == self.family:
|
||||
self.log.info("Review family is already Created.")
|
||||
return
|
||||
super(CreateReview, self).process()
|
||||
|
|
@ -1,9 +1,8 @@
|
|||
from avalon import api
|
||||
from avalon.vendor import qargparse
|
||||
from avalon.tvpaint import CommunicatorWrapper
|
||||
from avalon.tvpaint import lib, pipeline
|
||||
|
||||
|
||||
class ImportImage(api.Loader):
|
||||
class ImportImage(pipeline.Loader):
|
||||
"""Load image or image sequence to TVPaint as new layer."""
|
||||
|
||||
families = ["render", "image", "background", "plate"]
|
||||
|
|
@ -80,4 +79,4 @@ class ImportImage(api.Loader):
|
|||
layer_name,
|
||||
load_options_str
|
||||
)
|
||||
return CommunicatorWrapper.execute_george_through_file(george_script)
|
||||
return lib.execute_george_through_file(george_script)
|
||||
|
|
|
|||
244
pype/plugins/tvpaint/load/load_reference_image.py
Normal file
244
pype/plugins/tvpaint/load/load_reference_image.py
Normal file
|
|
@ -0,0 +1,244 @@
|
|||
from avalon.pipeline import get_representation_context
|
||||
from avalon.vendor import qargparse
|
||||
from avalon.tvpaint import lib, pipeline
|
||||
|
||||
|
||||
class LoadImage(pipeline.Loader):
|
||||
"""Load image or image sequence to TVPaint as new layer."""
|
||||
|
||||
families = ["render", "image", "background", "plate"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Load Image"
|
||||
order = 1
|
||||
icon = "image"
|
||||
color = "white"
|
||||
|
||||
import_script = (
|
||||
"filepath = \"{}\"\n"
|
||||
"layer_name = \"{}\"\n"
|
||||
"tv_loadsequence filepath {}PARSE layer_id\n"
|
||||
"tv_layerrename layer_id layer_name"
|
||||
)
|
||||
|
||||
defaults = {
|
||||
"stretch": True,
|
||||
"timestretch": True,
|
||||
"preload": True
|
||||
}
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"stretch",
|
||||
label="Stretch to project size",
|
||||
default=True,
|
||||
help="Stretch loaded image/s to project resolution?"
|
||||
),
|
||||
qargparse.Boolean(
|
||||
"timestretch",
|
||||
label="Stretch to timeline length",
|
||||
default=True,
|
||||
help="Clip loaded image/s to timeline length?"
|
||||
),
|
||||
qargparse.Boolean(
|
||||
"preload",
|
||||
label="Preload loaded image/s",
|
||||
default=True,
|
||||
help="Preload image/s?"
|
||||
)
|
||||
]
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
stretch = options.get("stretch", self.defaults["stretch"])
|
||||
timestretch = options.get("timestretch", self.defaults["timestretch"])
|
||||
preload = options.get("preload", self.defaults["preload"])
|
||||
|
||||
load_options = []
|
||||
if stretch:
|
||||
load_options.append("\"STRETCH\"")
|
||||
if timestretch:
|
||||
load_options.append("\"TIMESTRETCH\"")
|
||||
if preload:
|
||||
load_options.append("\"PRELOAD\"")
|
||||
|
||||
load_options_str = ""
|
||||
for load_option in load_options:
|
||||
load_options_str += (load_option + " ")
|
||||
|
||||
# Prepare layer name
|
||||
asset_name = context["asset"]["name"]
|
||||
subset_name = context["subset"]["name"]
|
||||
layer_name = self.get_unique_layer_name(asset_name, subset_name)
|
||||
|
||||
# Fill import script with filename and layer name
|
||||
# - filename mus not contain backwards slashes
|
||||
george_script = self.import_script.format(
|
||||
self.fname.replace("\\", "/"),
|
||||
layer_name,
|
||||
load_options_str
|
||||
)
|
||||
|
||||
lib.execute_george_through_file(george_script)
|
||||
|
||||
loaded_layer = None
|
||||
layers = lib.layers_data()
|
||||
for layer in layers:
|
||||
if layer["name"] == layer_name:
|
||||
loaded_layer = layer
|
||||
break
|
||||
|
||||
if loaded_layer is None:
|
||||
raise AssertionError(
|
||||
"Loading probably failed during execution of george script."
|
||||
)
|
||||
|
||||
layer_ids = [loaded_layer["layer_id"]]
|
||||
namespace = namespace or layer_name
|
||||
return pipeline.containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
layer_ids=layer_ids,
|
||||
context=context,
|
||||
loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def _remove_layers(self, layer_ids, layers=None):
|
||||
if not layer_ids:
|
||||
return
|
||||
|
||||
if layers is None:
|
||||
layers = lib.layers_data()
|
||||
|
||||
available_ids = set(layer["layer_id"] for layer in layers)
|
||||
layer_ids_to_remove = []
|
||||
|
||||
for layer_id in layer_ids:
|
||||
if layer_id in available_ids:
|
||||
layer_ids_to_remove.append(layer_id)
|
||||
|
||||
if not layer_ids_to_remove:
|
||||
return
|
||||
|
||||
george_script_lines = []
|
||||
for layer_id in layer_ids_to_remove:
|
||||
line = "tv_layerkill {}".format(layer_id)
|
||||
george_script_lines.append(line)
|
||||
george_script = "\n".join(george_script_lines)
|
||||
lib.execute_george_through_file(george_script)
|
||||
|
||||
def remove(self, container):
|
||||
layer_ids = self.layer_ids_from_container(container)
|
||||
self._remove_layers(layer_ids)
|
||||
|
||||
current_containers = pipeline.ls()
|
||||
pop_idx = None
|
||||
for idx, cur_con in enumerate(current_containers):
|
||||
if cur_con["objectName"] == container["objectName"]:
|
||||
pop_idx = idx
|
||||
break
|
||||
|
||||
if pop_idx is None:
|
||||
self.log.warning(
|
||||
"Didn't found container in workfile containers. {}".format(
|
||||
container
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
current_containers.pop(pop_idx)
|
||||
pipeline.write_workfile_metadata(
|
||||
pipeline.SECTION_NAME_CONTAINERS, current_containers
|
||||
)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Replace container with different version.
|
||||
|
||||
New layers are loaded as first step. Then is tried to change data in
|
||||
new layers with data from old layers. When that is done old layers are
|
||||
removed.
|
||||
"""
|
||||
# Create new containers first
|
||||
context = get_representation_context(representation)
|
||||
name = container["name"]
|
||||
namespace = container["namespace"]
|
||||
new_container = self.load(context, name, namespace, {})
|
||||
new_layer_ids = self.layer_ids_from_container(new_container)
|
||||
|
||||
# Get layer ids from previous container
|
||||
old_layer_ids = self.layer_ids_from_container(container)
|
||||
|
||||
layers = lib.layers_data()
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers
|
||||
}
|
||||
|
||||
old_layers = []
|
||||
new_layers = []
|
||||
for layer_id in old_layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
if layer:
|
||||
old_layers.append(layer)
|
||||
|
||||
for layer_id in new_layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
if layer:
|
||||
new_layers.append(layer)
|
||||
|
||||
# Prepare few data
|
||||
new_start_position = None
|
||||
new_group_id = None
|
||||
for layer in old_layers:
|
||||
position = layer["position"]
|
||||
group_id = layer["group_id"]
|
||||
if new_start_position is None:
|
||||
new_start_position = position
|
||||
elif new_start_position > position:
|
||||
new_start_position = position
|
||||
|
||||
if new_group_id is None:
|
||||
new_group_id = group_id
|
||||
elif new_group_id < 0:
|
||||
continue
|
||||
elif new_group_id != group_id:
|
||||
new_group_id = -1
|
||||
|
||||
george_script_lines = []
|
||||
# Group new layers to same group as previous container layers had
|
||||
# - all old layers must be under same group
|
||||
if new_group_id is not None and new_group_id > 0:
|
||||
for layer in new_layers:
|
||||
line = "tv_layercolor \"set\" {} {}".format(
|
||||
layer["layer_id"], new_group_id
|
||||
)
|
||||
george_script_lines.append(line)
|
||||
|
||||
# Rename new layer to have same name
|
||||
# - only if both old and new have one layer
|
||||
if len(old_layers) == 1 and len(new_layers) == 1:
|
||||
layer_name = old_layers[0]["name"]
|
||||
george_script_lines.append(
|
||||
"tv_layerrename {} \"{}\"".format(
|
||||
new_layers[0]["layer_id"], layer_name
|
||||
)
|
||||
)
|
||||
|
||||
# Change position of new layer
|
||||
# - this must be done before remove old layers
|
||||
if len(new_layers) == 1 and new_start_position is not None:
|
||||
new_layer = new_layers[0]
|
||||
george_script_lines.extend([
|
||||
"tv_layerset {}".format(new_layer["layer_id"]),
|
||||
"tv_layermove {}".format(new_start_position)
|
||||
])
|
||||
|
||||
# Execute george scripts if there are any
|
||||
if george_script_lines:
|
||||
george_script = "\n".join(george_script_lines)
|
||||
lib.execute_george_through_file(george_script)
|
||||
|
||||
# Remove old container
|
||||
self.remove(container)
|
||||
172
pype/plugins/tvpaint/publish/collect_instances.py
Normal file
172
pype/plugins/tvpaint/publish/collect_instances.py
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
import json
|
||||
import copy
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
label = "Collect Instances"
|
||||
order = pyblish.api.CollectorOrder - 1
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
workfile_instances = context.data["workfileInstances"]
|
||||
|
||||
self.log.debug("Collected ({}) instances:\n{}".format(
|
||||
len(workfile_instances),
|
||||
json.dumps(workfile_instances, indent=4)
|
||||
))
|
||||
|
||||
for instance_data in workfile_instances:
|
||||
instance_data["fps"] = context.data["fps"]
|
||||
|
||||
# Store workfile instance data to instance data
|
||||
instance_data["originData"] = copy.deepcopy(instance_data)
|
||||
# Global instance data modifications
|
||||
# Fill families
|
||||
family = instance_data["family"]
|
||||
# Add `review` family for thumbnail integration
|
||||
instance_data["families"] = [family, "review"]
|
||||
|
||||
# Instance name
|
||||
subset_name = instance_data["subset"]
|
||||
name = instance_data.get("name", subset_name)
|
||||
instance_data["name"] = name
|
||||
|
||||
active = instance_data.get("active", True)
|
||||
instance_data["active"] = active
|
||||
instance_data["publish"] = active
|
||||
# Add representations key
|
||||
instance_data["representations"] = []
|
||||
|
||||
# Different instance creation based on family
|
||||
instance = None
|
||||
if family == "review":
|
||||
# Change subset name
|
||||
task_name = io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}".format(family, task_name.capitalize())
|
||||
instance_data["subset"] = new_subset_name
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
instance.data["layers"] = context.data["layersData"]
|
||||
# Add ftrack family
|
||||
instance.data["families"].append("ftrack")
|
||||
|
||||
elif family == "renderLayer":
|
||||
instance = self.create_render_layer_instance(
|
||||
context, instance_data
|
||||
)
|
||||
elif family == "renderPass":
|
||||
instance = self.create_render_pass_instance(
|
||||
context, instance_data
|
||||
)
|
||||
else:
|
||||
raise AssertionError(
|
||||
"Instance with unknown family \"{}\": {}".format(
|
||||
family, instance_data
|
||||
)
|
||||
)
|
||||
|
||||
frame_start = context.data["frameStart"]
|
||||
frame_end = frame_start
|
||||
for layer in instance.data["layers"]:
|
||||
_frame_end = layer["frame_end"]
|
||||
if _frame_end > frame_end:
|
||||
frame_end = _frame_end
|
||||
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
|
||||
self.log.debug("Created instance: {}\n{}".format(
|
||||
instance, json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
||||
def create_render_layer_instance(self, context, instance_data):
|
||||
name = instance_data["name"]
|
||||
# Change label
|
||||
subset_name = instance_data["subset"]
|
||||
instance_data["label"] = "{}_Beauty".format(name)
|
||||
|
||||
# Change subset name
|
||||
# Final family of an instance will be `render`
|
||||
new_family = "render"
|
||||
task_name = io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}_{}_Beauty".format(
|
||||
new_family, task_name.capitalize(), name
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
|
||||
subset_name, new_subset_name
|
||||
))
|
||||
|
||||
# Get all layers for the layer
|
||||
layers_data = context.data["layersData"]
|
||||
group_id = instance_data["group_id"]
|
||||
group_layers = []
|
||||
for layer in layers_data:
|
||||
if layer["group_id"] == group_id and layer["visible"]:
|
||||
group_layers.append(layer)
|
||||
|
||||
if not group_layers:
|
||||
# Should be handled here?
|
||||
self.log.warning((
|
||||
f"Group with id {group_id} does not contain any layers."
|
||||
f" Instance \"{name}\" not created."
|
||||
))
|
||||
return None
|
||||
|
||||
instance_data["layers"] = group_layers
|
||||
|
||||
# Add ftrack family
|
||||
instance_data["families"].append("ftrack")
|
||||
|
||||
return context.create_instance(**instance_data)
|
||||
|
||||
def create_render_pass_instance(self, context, instance_data):
|
||||
pass_name = instance_data["pass"]
|
||||
self.log.info(
|
||||
"Creating render pass instance. \"{}\"".format(pass_name)
|
||||
)
|
||||
# Change label
|
||||
render_layer = instance_data["render_layer"]
|
||||
instance_data["label"] = "{}_{}".format(render_layer, pass_name)
|
||||
|
||||
# Change subset name
|
||||
# Final family of an instance will be `render`
|
||||
new_family = "render"
|
||||
old_subset_name = instance_data["subset"]
|
||||
task_name = io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}_{}_{}".format(
|
||||
new_family, task_name.capitalize(), render_layer, pass_name
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
|
||||
old_subset_name, new_subset_name
|
||||
))
|
||||
|
||||
layers_data = context.data["layersData"]
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers_data
|
||||
}
|
||||
|
||||
layer_ids = instance_data["layer_ids"]
|
||||
render_pass_layers = []
|
||||
for layer_id in layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
if not layer:
|
||||
self.log.warning(f"Layer with id {layer_id} was not found.")
|
||||
continue
|
||||
|
||||
render_pass_layers.append(layer)
|
||||
|
||||
if not render_pass_layers:
|
||||
name = instance_data["name"]
|
||||
self.log.warning(
|
||||
f"None of the layers from the RenderPass \"{name}\""
|
||||
" exist anymore. Instance not created."
|
||||
)
|
||||
return None
|
||||
|
||||
instance_data["layers"] = render_pass_layers
|
||||
return context.create_instance(**instance_data)
|
||||
66
pype/plugins/tvpaint/publish/collect_workfile_data.py
Normal file
66
pype/plugins/tvpaint/publish/collect_workfile_data.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
import json
|
||||
|
||||
import pyblish.api
|
||||
from avalon.tvpaint import pipeline, lib
|
||||
|
||||
|
||||
class CollectWorkfileData(pyblish.api.ContextPlugin):
|
||||
label = "Collect Workfile Data"
|
||||
order = pyblish.api.CollectorOrder - 1.01
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
self.log.info("Collecting instance data from workfile")
|
||||
instance_data = pipeline.list_instances()
|
||||
self.log.debug(
|
||||
"Instance data:\"{}".format(json.dumps(instance_data, indent=4))
|
||||
)
|
||||
context.data["workfileInstances"] = instance_data
|
||||
|
||||
self.log.info("Collecting layers data from workfile")
|
||||
layers_data = lib.layers_data()
|
||||
self.log.debug(
|
||||
"Layers data:\"{}".format(json.dumps(layers_data, indent=4))
|
||||
)
|
||||
context.data["layersData"] = layers_data
|
||||
|
||||
self.log.info("Collecting groups data from workfile")
|
||||
group_data = lib.groups_data()
|
||||
self.log.debug(
|
||||
"Group data:\"{}".format(json.dumps(group_data, indent=4))
|
||||
)
|
||||
context.data["groupsData"] = group_data
|
||||
|
||||
self.log.info("Collecting scene data from workfile")
|
||||
workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ")
|
||||
|
||||
frame_start = int(workfile_info_parts.pop(-1))
|
||||
field_order = workfile_info_parts.pop(-1)
|
||||
frame_rate = float(workfile_info_parts.pop(-1))
|
||||
pixel_apsect = float(workfile_info_parts.pop(-1))
|
||||
height = int(workfile_info_parts.pop(-1))
|
||||
width = int(workfile_info_parts.pop(-1))
|
||||
workfile_path = " ".join(workfile_info_parts).replace("\"", "")
|
||||
|
||||
# TODO This is not porper way of getting last frame
|
||||
# - but don't know better
|
||||
last_frame = frame_start
|
||||
for layer in layers_data:
|
||||
frame_end = layer["frame_end"]
|
||||
if frame_end > last_frame:
|
||||
last_frame = frame_end
|
||||
|
||||
scene_data = {
|
||||
"currentFile": workfile_path,
|
||||
"sceneWidth": width,
|
||||
"sceneHeight": height,
|
||||
"pixelAspect": pixel_apsect,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": last_frame,
|
||||
"fps": frame_rate,
|
||||
"fieldOrder": field_order
|
||||
}
|
||||
self.log.debug(
|
||||
"Scene data: {}".format(json.dumps(scene_data, indent=4))
|
||||
)
|
||||
context.data.update(scene_data)
|
||||
352
pype/plugins/tvpaint/publish/extract_sequence.py
Normal file
352
pype/plugins/tvpaint/publish/extract_sequence.py
Normal file
|
|
@ -0,0 +1,352 @@
|
|||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
import pyblish.api
|
||||
from avalon.tvpaint import lib
|
||||
|
||||
|
||||
class ExtractSequence(pyblish.api.Extractor):
|
||||
label = "Extract Sequence"
|
||||
hosts = ["tvpaint"]
|
||||
families = ["review", "renderPass", "renderLayer"]
|
||||
|
||||
save_mode_to_ext = {
|
||||
"avi": ".avi",
|
||||
"bmp": ".bmp",
|
||||
"cin": ".cin",
|
||||
"deep": ".dip",
|
||||
"dps": ".dps",
|
||||
"dpx": ".dpx",
|
||||
"flc": ".fli",
|
||||
"gif": ".gif",
|
||||
"ilbm": ".iff",
|
||||
"jpeg": ".jpg",
|
||||
"pcx": ".pcx",
|
||||
"png": ".png",
|
||||
"psd": ".psd",
|
||||
"qt": ".qt",
|
||||
"rtv": ".rtv",
|
||||
"sun": ".ras",
|
||||
"tiff": ".tiff",
|
||||
"tga": ".tga",
|
||||
"vpb": ".vpb"
|
||||
}
|
||||
sequential_save_mode = {
|
||||
"bmp",
|
||||
"dpx",
|
||||
"ilbm",
|
||||
"jpeg",
|
||||
"png",
|
||||
"sun",
|
||||
"tiff",
|
||||
"tga"
|
||||
}
|
||||
|
||||
default_save_mode = "\"PNG\""
|
||||
save_mode_for_family = {
|
||||
"review": "\"PNG\"",
|
||||
"renderPass": "\"PNG\"",
|
||||
"renderLayer": "\"PNG\"",
|
||||
}
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info(
|
||||
"* Processing instance \"{}\"".format(instance.data["label"])
|
||||
)
|
||||
|
||||
# Get all layers and filter out not visible
|
||||
layers = instance.data["layers"]
|
||||
filtered_layers = [
|
||||
layer
|
||||
for layer in layers
|
||||
if layer["visible"]
|
||||
]
|
||||
layer_ids = [str(layer["layer_id"]) for layer in filtered_layers]
|
||||
if not layer_ids:
|
||||
self.log.info(
|
||||
f"None of the layers from the instance"
|
||||
" are visible. Extraction skipped."
|
||||
)
|
||||
return
|
||||
|
||||
self.log.debug(
|
||||
"Instance has {} layers with ids: {}".format(
|
||||
len(layer_ids), ", ".join(layer_ids)
|
||||
)
|
||||
)
|
||||
# This is plugin attribe cleanup method
|
||||
self._prepare_save_modes()
|
||||
|
||||
family_lowered = instance.data["family"].lower()
|
||||
save_mode = self.save_mode_for_family.get(
|
||||
family_lowered, self.default_save_mode
|
||||
)
|
||||
save_mode_type = self._get_save_mode_type(save_mode)
|
||||
|
||||
if not bool(save_mode_type in self.sequential_save_mode):
|
||||
raise AssertionError((
|
||||
"Plugin can export only sequential frame output"
|
||||
" but save mode for family \"{}\" is not for sequence > {} <"
|
||||
).format(instance.data["family"], save_mode))
|
||||
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
|
||||
filename_template = self._get_filename_template(
|
||||
save_mode_type, save_mode, frame_end
|
||||
)
|
||||
ext = os.path.splitext(filename_template)[1].replace(".", "")
|
||||
|
||||
self.log.debug(
|
||||
"Using save mode > {} < and file template \"{}\"".format(
|
||||
save_mode, filename_template
|
||||
)
|
||||
)
|
||||
|
||||
# Save to staging dir
|
||||
output_dir = instance.data.get("stagingDir")
|
||||
if not output_dir:
|
||||
# Create temp folder if staging dir is not set
|
||||
output_dir = tempfile.mkdtemp().replace("\\", "/")
|
||||
instance.data["stagingDir"] = output_dir
|
||||
|
||||
self.log.debug(
|
||||
"Files will be rendered to folder: {}".format(output_dir)
|
||||
)
|
||||
|
||||
thumbnail_filename = "thumbnail"
|
||||
|
||||
# Render output
|
||||
output_files_by_frame = self.render(
|
||||
save_mode, filename_template, output_dir,
|
||||
filtered_layers, frame_start, frame_end, thumbnail_filename
|
||||
)
|
||||
thumbnail_fullpath = output_files_by_frame.pop(
|
||||
thumbnail_filename, None
|
||||
)
|
||||
|
||||
# Fill gaps in sequence
|
||||
self.fill_missing_frames(
|
||||
output_files_by_frame,
|
||||
frame_start,
|
||||
frame_end,
|
||||
filename_template
|
||||
)
|
||||
|
||||
# Fill tags and new families
|
||||
tags = []
|
||||
if family_lowered in ("review", "renderlayer"):
|
||||
# Add `ftrackreview` tag
|
||||
tags.append("ftrackreview")
|
||||
|
||||
repre_files = [
|
||||
os.path.basename(filepath)
|
||||
for filepath in output_files_by_frame.values()
|
||||
]
|
||||
new_repre = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": repre_files,
|
||||
"stagingDir": output_dir,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"tags": tags
|
||||
}
|
||||
self.log.debug("Creating new representation: {}".format(new_repre))
|
||||
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
||||
if family_lowered in ("renderpass", "renderlayer"):
|
||||
# Change family to render
|
||||
instance.data["family"] = "render"
|
||||
|
||||
if not thumbnail_fullpath:
|
||||
return
|
||||
|
||||
# Create thumbnail representation
|
||||
thumbnail_repre = {
|
||||
"name": "thumbnail",
|
||||
"ext": ext,
|
||||
"files": os.path.basename(thumbnail_fullpath),
|
||||
"stagingDir": output_dir,
|
||||
"tags": ["thumbnail"]
|
||||
}
|
||||
instance.data["representations"].append(thumbnail_repre)
|
||||
|
||||
def _prepare_save_modes(self):
|
||||
"""Lower family names in keys and skip empty values."""
|
||||
new_specifications = {}
|
||||
for key, value in self.save_mode_for_family.items():
|
||||
if value:
|
||||
new_specifications[key.lower()] = value
|
||||
else:
|
||||
self.log.warning((
|
||||
"Save mode for family \"{}\" has empty value."
|
||||
" The family will use default save mode: > {} <."
|
||||
).format(key, self.default_save_mode))
|
||||
self.save_mode_for_family = new_specifications
|
||||
|
||||
def _get_save_mode_type(self, save_mode):
|
||||
"""Extract type of save mode.
|
||||
|
||||
Helps to define output files extension.
|
||||
"""
|
||||
save_mode_type = (
|
||||
save_mode.lower()
|
||||
.split(" ")[0]
|
||||
.replace("\"", "")
|
||||
)
|
||||
self.log.debug("Save mode type is \"{}\"".format(save_mode_type))
|
||||
return save_mode_type
|
||||
|
||||
def _get_filename_template(self, save_mode_type, save_mode, frame_end):
|
||||
"""Get filetemplate for rendered files.
|
||||
|
||||
This is simple template contains `{frame}{ext}` for sequential outputs
|
||||
and `single_file{ext}` for single file output. Output is rendered to
|
||||
temporary folder so filename should not matter as integrator change
|
||||
them.
|
||||
"""
|
||||
ext = self.save_mode_to_ext.get(save_mode_type)
|
||||
if ext is None:
|
||||
raise AssertionError((
|
||||
"Couldn't find file extension for TVPaint's save mode: > {} <"
|
||||
).format(save_mode))
|
||||
|
||||
frame_padding = 4
|
||||
frame_end_str_len = len(str(frame_end))
|
||||
if frame_end_str_len > frame_padding:
|
||||
frame_padding = frame_end_str_len
|
||||
|
||||
return "{{frame:0>{}}}".format(frame_padding) + ext
|
||||
|
||||
def render(
|
||||
self, save_mode, filename_template, output_dir, layers,
|
||||
first_frame, last_frame, thumbnail_filename
|
||||
):
|
||||
""" Export images from TVPaint.
|
||||
|
||||
Args:
|
||||
save_mode (str): Argument for `tv_savemode` george script function.
|
||||
More about save mode in documentation.
|
||||
filename_template (str): Filename template of an output. Template
|
||||
should already contain extension. Template may contain only
|
||||
keyword argument `{frame}` or index argument (for same value).
|
||||
Extension in template must match `save_mode`.
|
||||
layers (list): List of layers to be exported.
|
||||
first_frame (int): Starting frame from which export will begin.
|
||||
last_frame (int): On which frame export will end.
|
||||
|
||||
Retruns:
|
||||
dict: Mapping frame to output filepath.
|
||||
"""
|
||||
|
||||
# Add save mode arguments to function
|
||||
save_mode = "tv_SaveMode {}".format(save_mode)
|
||||
|
||||
# Map layers by position
|
||||
layers_by_position = {
|
||||
layer["position"]: layer
|
||||
for layer in layers
|
||||
}
|
||||
|
||||
# Sort layer positions in reverse order
|
||||
sorted_positions = list(reversed(sorted(layers_by_position.keys())))
|
||||
if not sorted_positions:
|
||||
return
|
||||
|
||||
# Create temporary layer
|
||||
new_layer_id = lib.execute_george("tv_layercreate _tmp_layer")
|
||||
|
||||
# Merge layers to temp layer
|
||||
george_script_lines = []
|
||||
# Set duplicated layer as current
|
||||
george_script_lines.append("tv_layerset {}".format(new_layer_id))
|
||||
for position in sorted_positions:
|
||||
layer = layers_by_position[position]
|
||||
george_script_lines.append(
|
||||
"tv_layermerge {}".format(layer["layer_id"])
|
||||
)
|
||||
|
||||
lib.execute_george_through_file("\n".join(george_script_lines))
|
||||
|
||||
# Frames with keyframe
|
||||
exposure_frames = lib.get_exposure_frames(
|
||||
new_layer_id, first_frame, last_frame
|
||||
)
|
||||
|
||||
# TODO what if there is not exposue frames?
|
||||
# - this force to have first frame all the time
|
||||
if first_frame not in exposure_frames:
|
||||
exposure_frames.insert(0, first_frame)
|
||||
|
||||
# Restart george script lines
|
||||
george_script_lines = []
|
||||
george_script_lines.append(save_mode)
|
||||
|
||||
all_output_files = {}
|
||||
for frame in exposure_frames:
|
||||
filename = filename_template.format(frame, frame=frame)
|
||||
dst_path = "/".join([output_dir, filename])
|
||||
all_output_files[frame] = os.path.normpath(dst_path)
|
||||
|
||||
# Go to frame
|
||||
george_script_lines.append("tv_layerImage {}".format(frame))
|
||||
# Store image to output
|
||||
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
|
||||
|
||||
# Export thumbnail
|
||||
if thumbnail_filename:
|
||||
basename, ext = os.path.splitext(thumbnail_filename)
|
||||
if not ext:
|
||||
ext = ".png"
|
||||
thumbnail_fullpath = "/".join([output_dir, basename + ext])
|
||||
all_output_files[thumbnail_filename] = thumbnail_fullpath
|
||||
# Force save mode to png for thumbnail
|
||||
george_script_lines.append("tv_SaveMode \"PNG\"")
|
||||
# Go to frame
|
||||
george_script_lines.append("tv_layerImage {}".format(first_frame))
|
||||
# Store image to output
|
||||
george_script_lines.append(
|
||||
"tv_saveimage \"{}\"".format(thumbnail_fullpath)
|
||||
)
|
||||
|
||||
# Delete temporary layer
|
||||
george_script_lines.append("tv_layerkill {}".format(new_layer_id))
|
||||
|
||||
lib.execute_george_through_file("\n".join(george_script_lines))
|
||||
|
||||
return all_output_files
|
||||
|
||||
def fill_missing_frames(
|
||||
self, filepaths_by_frame, first_frame, last_frame, filename_template
|
||||
):
|
||||
"""Fill not rendered frames with previous frame.
|
||||
|
||||
Extractor is rendering only frames with keyframes (exposure frames) to
|
||||
get output faster which means there may be gaps between frames.
|
||||
This function fill the missing frames.
|
||||
"""
|
||||
output_dir = None
|
||||
previous_frame_filepath = None
|
||||
for frame in range(first_frame, last_frame + 1):
|
||||
if frame in filepaths_by_frame:
|
||||
previous_frame_filepath = filepaths_by_frame[frame]
|
||||
continue
|
||||
|
||||
elif previous_frame_filepath is None:
|
||||
self.log.warning(
|
||||
"No frames to fill. Seems like nothing was exported."
|
||||
)
|
||||
break
|
||||
|
||||
if output_dir is None:
|
||||
output_dir = os.path.dirname(previous_frame_filepath)
|
||||
|
||||
filename = filename_template.format(frame=frame)
|
||||
space_filepath = os.path.normpath(
|
||||
os.path.join(output_dir, filename)
|
||||
)
|
||||
filepaths_by_frame[frame] = space_filepath
|
||||
shutil.copy(previous_frame_filepath, space_filepath)
|
||||
76
pype/plugins/tvpaint/publish/validate_frame_range.py
Normal file
76
pype/plugins/tvpaint/publish/validate_frame_range.py
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
import collections
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
||||
"""Validate group ids of renderPass layers.
|
||||
|
||||
Validates that all layers are in same group as they were during creation.
|
||||
"""
|
||||
|
||||
label = "Validate Layers Group"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["renderPass"]
|
||||
|
||||
def process(self, instance):
|
||||
# Prepare layers
|
||||
layers_data = instance.context.data["layersData"]
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers_data
|
||||
}
|
||||
|
||||
# Expected group id for instance layers
|
||||
group_id = instance.data["group_id"]
|
||||
# Layers ids of an instance
|
||||
layer_ids = instance.data["layer_ids"]
|
||||
# Check if all layers from render pass are in right group
|
||||
invalid_layers_by_group_id = collections.defaultdict(list)
|
||||
for layer_id in layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
_group_id = layer["group_id"]
|
||||
if _group_id != group_id:
|
||||
invalid_layers_by_group_id[_group_id].append(layer)
|
||||
|
||||
# Everything is OK and skip exception
|
||||
if not invalid_layers_by_group_id:
|
||||
return
|
||||
|
||||
# Exception message preparations
|
||||
groups_data = instance.context.data["groupsData"]
|
||||
groups_by_id = {
|
||||
group["group_id"]: group
|
||||
for group in groups_data
|
||||
}
|
||||
correct_group = groups_by_id[group_id]
|
||||
|
||||
per_group_msgs = []
|
||||
for _group_id, layers in invalid_layers_by_group_id.items():
|
||||
_group = groups_by_id[_group_id]
|
||||
layers_msgs = []
|
||||
for layer in layers:
|
||||
layers_msgs.append(
|
||||
"\"{}\" (id: {})".format(layer["name"], layer["layer_id"])
|
||||
)
|
||||
per_group_msgs.append(
|
||||
"Group \"{}\" (id: {}) < {} >".format(
|
||||
_group["name"],
|
||||
_group["group_id"],
|
||||
", ".join(layers_msgs)
|
||||
)
|
||||
)
|
||||
|
||||
# Raise an error
|
||||
raise AssertionError((
|
||||
# Short message
|
||||
"Layers in wrong group."
|
||||
# Description what's wrong
|
||||
" Layers from render pass \"{}\" must be in group {} (id: {})."
|
||||
# Detailed message
|
||||
" Layers in wrong group: {}"
|
||||
).format(
|
||||
instance.data["label"],
|
||||
correct_group["name"],
|
||||
correct_group["group_id"],
|
||||
" | ".join(per_group_msgs)
|
||||
))
|
||||
|
|
@ -191,7 +191,7 @@ def switch(asset_name, filepath=None, new=True):
|
|||
representations = []
|
||||
for container in containers:
|
||||
try:
|
||||
representation = pype.switch_item(container,
|
||||
representation = fusion_lib.switch_item(container,
|
||||
asset_name=asset_name)
|
||||
representations.append(representation)
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -213,9 +213,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if frame_start is None:
|
||||
replacement_final = replacement_size = str(MISSING_KEY_VALUE)
|
||||
else:
|
||||
replacement_final = "\\'{}\\'".format(
|
||||
r'%%{eif\:n+%d\:d}' % frame_start
|
||||
)
|
||||
replacement_final = "%{eif:n+" + str(frame_start) + ":d}"
|
||||
replacement_size = str(frame_end)
|
||||
|
||||
final_text = final_text.replace(
|
||||
|
|
@ -328,11 +326,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
|
||||
_stdout, _stderr = proc.communicate()
|
||||
if _stdout:
|
||||
print(_stdout.decode("utf-8"))
|
||||
for line in _stdout.split(b"\r\n"):
|
||||
print(line.decode("utf-8"))
|
||||
|
||||
# This will probably never happen as ffmpeg use stdout
|
||||
if _stderr:
|
||||
print(_stderr.decode("utf-8"))
|
||||
for line in _stderr.split(b"\r\n"):
|
||||
print(line.decode("utf-8"))
|
||||
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError(
|
||||
|
|
@ -578,7 +578,10 @@ def burnins_from_data(
|
|||
|
||||
if __name__ == "__main__":
|
||||
print("* Burnin script started")
|
||||
in_data = json.loads(sys.argv[-1])
|
||||
in_data_json_path = sys.argv[-1]
|
||||
with open(in_data_json_path, "r") as file_stream:
|
||||
in_data = json.load(file_stream)
|
||||
|
||||
burnins_from_data(
|
||||
in_data["input"],
|
||||
in_data["output"],
|
||||
|
|
|
|||
4
pype/tests/README.md
Normal file
4
pype/tests/README.md
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
Tests for Pype
|
||||
--------------
|
||||
Trigger by:
|
||||
`pype test --pype`
|
||||
39
pype/tests/test_lib_restructuralization.py
Normal file
39
pype/tests/test_lib_restructuralization.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
# Test for backward compatibility of restructure of lib.py into lib library
|
||||
# Contains simple imports that should still work
|
||||
|
||||
|
||||
def test_backward_compatibility(printer):
|
||||
printer("Test if imports still work")
|
||||
try:
|
||||
from pype.lib import filter_pyblish_plugins
|
||||
from pype.lib import execute_hook
|
||||
from pype.lib import PypeHook
|
||||
|
||||
from pype.lib import get_latest_version
|
||||
from pype.lib import ApplicationLaunchFailed
|
||||
from pype.lib import launch_application
|
||||
from pype.lib import ApplicationAction
|
||||
from pype.lib import get_avalon_database
|
||||
from pype.lib import set_io_database
|
||||
|
||||
from pype.lib import get_ffmpeg_tool_path
|
||||
from pype.lib import get_last_version_from_path
|
||||
from pype.lib import get_paths_from_environ
|
||||
from pype.lib import get_version_from_path
|
||||
from pype.lib import version_up
|
||||
|
||||
from pype.lib import is_latest
|
||||
from pype.lib import any_outdated
|
||||
from pype.lib import get_asset
|
||||
from pype.lib import get_hierarchy
|
||||
from pype.lib import get_linked_assets
|
||||
from pype.lib import get_latest_version
|
||||
from pype.lib import ffprobe_streams
|
||||
|
||||
from pype.hosts.fusion.lib import switch_item
|
||||
|
||||
from pype.lib import source_hash
|
||||
from pype.lib import _subprocess
|
||||
|
||||
except ImportError as e:
|
||||
raise
|
||||
|
|
@ -222,10 +222,6 @@ QToolButton {
|
|||
background: #444;
|
||||
}
|
||||
|
||||
#Header #ArtistTab {
|
||||
background-image: url("img/tab-home.png");
|
||||
}
|
||||
|
||||
#Header #TerminalTab {
|
||||
background-image: url("img/tab-terminal.png");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -522,168 +522,6 @@ class PluginDelegate(QtWidgets.QStyledItemDelegate):
|
|||
return QtCore.QSize(option.rect.width(), 20)
|
||||
|
||||
|
||||
class ArtistDelegate(QtWidgets.QStyledItemDelegate):
|
||||
"""Delegate used on Artist page"""
|
||||
|
||||
def paint(self, painter, option, index):
|
||||
"""Paint checkbox and text
|
||||
|
||||
_______________________________________________
|
||||
| | label | duration |arrow|
|
||||
|toggle |_____________________| | to |
|
||||
| | families | |persp|
|
||||
|_______|_____________________|___________|_____|
|
||||
|
||||
"""
|
||||
|
||||
# Layout
|
||||
spacing = 10
|
||||
|
||||
body_rect = QtCore.QRectF(option.rect).adjusted(2, 2, -8, -2)
|
||||
content_rect = body_rect.adjusted(5, 5, -5, -5)
|
||||
|
||||
perspective_rect = QtCore.QRectF(body_rect)
|
||||
perspective_rect.setWidth(35)
|
||||
perspective_rect.setHeight(35)
|
||||
perspective_rect.translate(
|
||||
content_rect.width() - (perspective_rect.width() / 2) + 10,
|
||||
(content_rect.height() / 2) - (perspective_rect.height() / 2)
|
||||
)
|
||||
|
||||
toggle_rect = QtCore.QRectF(body_rect)
|
||||
toggle_rect.setWidth(7)
|
||||
toggle_rect.adjust(1, 1, 0, -1)
|
||||
|
||||
icon_rect = QtCore.QRectF(content_rect)
|
||||
icon_rect.translate(toggle_rect.width() + spacing, 3)
|
||||
icon_rect.setWidth(35)
|
||||
icon_rect.setHeight(35)
|
||||
|
||||
duration_rect = QtCore.QRectF(content_rect)
|
||||
duration_rect.translate(content_rect.width() - 50, 0)
|
||||
|
||||
# Colors
|
||||
check_color = colors["idle"]
|
||||
|
||||
publish_states = index.data(Roles.PublishFlagsRole)
|
||||
if publish_states is None:
|
||||
return
|
||||
if publish_states & InstanceStates.InProgress:
|
||||
check_color = colors["active"]
|
||||
|
||||
elif publish_states & InstanceStates.HasError:
|
||||
check_color = colors["error"]
|
||||
|
||||
elif publish_states & InstanceStates.HasWarning:
|
||||
check_color = colors["warning"]
|
||||
|
||||
elif publish_states & InstanceStates.HasFinished:
|
||||
check_color = colors["ok"]
|
||||
|
||||
elif not index.data(Roles.IsEnabledRole):
|
||||
check_color = colors["inactive"]
|
||||
|
||||
perspective_icon = icons["angle-right"]
|
||||
|
||||
if not index.data(QtCore.Qt.CheckStateRole):
|
||||
font_color = colors["inactive"]
|
||||
else:
|
||||
font_color = colors["idle"]
|
||||
|
||||
if (
|
||||
option.state
|
||||
& (
|
||||
QtWidgets.QStyle.State_MouseOver
|
||||
or QtWidgets.QStyle.State_Selected
|
||||
)
|
||||
):
|
||||
perspective_color = colors["idle"]
|
||||
else:
|
||||
perspective_color = colors["inactive"]
|
||||
# Maintan reference to state, so we can restore it once we're done
|
||||
painter.save()
|
||||
|
||||
# Draw background
|
||||
painter.fillRect(body_rect, colors["hover"])
|
||||
|
||||
# Draw icon
|
||||
icon = index.data(QtCore.Qt.DecorationRole)
|
||||
|
||||
painter.setFont(fonts["largeAwesome"])
|
||||
painter.setPen(QtGui.QPen(font_color))
|
||||
painter.drawText(icon_rect, icon)
|
||||
|
||||
# Draw label
|
||||
painter.setFont(fonts["h3"])
|
||||
label_rect = QtCore.QRectF(content_rect)
|
||||
label_x_offset = icon_rect.width() + spacing
|
||||
label_rect.translate(
|
||||
label_x_offset,
|
||||
0
|
||||
)
|
||||
metrics = painter.fontMetrics()
|
||||
label_rect.setHeight(metrics.lineSpacing())
|
||||
label_rect.setWidth(
|
||||
content_rect.width()
|
||||
- label_x_offset
|
||||
- perspective_rect.width()
|
||||
)
|
||||
# Elide label
|
||||
label = index.data(QtCore.Qt.DisplayRole)
|
||||
label = metrics.elidedText(
|
||||
label, QtCore.Qt.ElideRight, label_rect.width()
|
||||
)
|
||||
painter.drawText(label_rect, label)
|
||||
|
||||
# Draw families
|
||||
painter.setFont(fonts["h5"])
|
||||
painter.setPen(QtGui.QPen(colors["inactive"]))
|
||||
|
||||
families = ", ".join(index.data(Roles.FamiliesRole))
|
||||
families = painter.fontMetrics().elidedText(
|
||||
families, QtCore.Qt.ElideRight, label_rect.width()
|
||||
)
|
||||
|
||||
families_rect = QtCore.QRectF(label_rect)
|
||||
families_rect.translate(0, label_rect.height() + spacing)
|
||||
|
||||
painter.drawText(families_rect, families)
|
||||
|
||||
painter.setFont(fonts["largeAwesome"])
|
||||
painter.setPen(QtGui.QPen(perspective_color))
|
||||
painter.drawText(perspective_rect, perspective_icon)
|
||||
|
||||
# Draw checkbox
|
||||
pen = QtGui.QPen(check_color, 1)
|
||||
painter.setPen(pen)
|
||||
|
||||
if index.data(Roles.IsOptionalRole):
|
||||
painter.drawRect(toggle_rect)
|
||||
|
||||
if index.data(QtCore.Qt.CheckStateRole):
|
||||
painter.fillRect(toggle_rect, check_color)
|
||||
|
||||
elif (
|
||||
index.data(QtCore.Qt.CheckStateRole)
|
||||
):
|
||||
painter.fillRect(toggle_rect, check_color)
|
||||
|
||||
if option.state & QtWidgets.QStyle.State_MouseOver:
|
||||
painter.fillRect(body_rect, colors["hover"])
|
||||
|
||||
if option.state & QtWidgets.QStyle.State_Selected:
|
||||
painter.fillRect(body_rect, colors["selected"])
|
||||
|
||||
painter.setPen(colors["outline"])
|
||||
painter.drawRect(body_rect)
|
||||
|
||||
# Ok, we're done, tidy up.
|
||||
painter.restore()
|
||||
|
||||
def sizeHint(self, option, index):
|
||||
return QtCore.QSize(option.rect.width(), 80)
|
||||
|
||||
|
||||
class TerminalItem(QtWidgets.QStyledItemDelegate):
|
||||
"""Delegate used exclusively for the Terminal"""
|
||||
|
||||
|
|
|
|||
Binary file not shown.
|
Before Width: | Height: | Size: 313 B |
|
|
@ -717,15 +717,18 @@ class InstanceModel(QtGui.QStandardItemModel):
|
|||
|
||||
def append(self, instance):
|
||||
new_item = InstanceItem(instance)
|
||||
families = new_item.data(Roles.FamiliesRole)
|
||||
group_item = self.group_items.get(families[0])
|
||||
if not group_item:
|
||||
group_item = GroupItem(families[0])
|
||||
self.appendRow(group_item)
|
||||
self.group_items[families[0]] = group_item
|
||||
self.group_created.emit(group_item.index())
|
||||
if new_item.is_context:
|
||||
self.appendRow(new_item)
|
||||
else:
|
||||
families = new_item.data(Roles.FamiliesRole)
|
||||
group_item = self.group_items.get(families[0])
|
||||
if not group_item:
|
||||
group_item = GroupItem(families[0])
|
||||
self.appendRow(group_item)
|
||||
self.group_items[families[0]] = group_item
|
||||
self.group_created.emit(group_item.index())
|
||||
|
||||
group_item.appendRow(new_item)
|
||||
group_item.appendRow(new_item)
|
||||
instance_id = instance.id
|
||||
self.instance_items[instance_id] = new_item
|
||||
|
||||
|
|
@ -842,162 +845,20 @@ class InstanceModel(QtGui.QStandardItemModel):
|
|||
)
|
||||
|
||||
|
||||
class ArtistProxy(QtCore.QAbstractProxyModel):
|
||||
class InstanceSortProxy(QtCore.QSortFilterProxyModel):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.mapping_from = []
|
||||
self.mapping_to = []
|
||||
super(ArtistProxy, self).__init__(*args, **kwargs)
|
||||
super(InstanceSortProxy, self).__init__(*args, **kwargs)
|
||||
# Do not care about lower/upper case
|
||||
self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
|
||||
|
||||
def on_rows_inserted(self, parent_index, from_row, to_row):
|
||||
if not parent_index.isValid():
|
||||
return
|
||||
|
||||
parent_row = parent_index.row()
|
||||
if parent_row >= len(self.mapping_from):
|
||||
self.mapping_from.append(list())
|
||||
|
||||
new_from = None
|
||||
new_to = None
|
||||
for row_num in range(from_row, to_row + 1):
|
||||
new_row = len(self.mapping_to)
|
||||
new_to = new_row
|
||||
if new_from is None:
|
||||
new_from = new_row
|
||||
|
||||
self.mapping_from[parent_row].insert(row_num, new_row)
|
||||
self.mapping_to.insert(new_row, [parent_row, row_num])
|
||||
|
||||
self.rowsInserted.emit(self.parent(), new_from, new_to + 1)
|
||||
|
||||
def _remove_rows(self, parent_row, from_row, to_row):
|
||||
increment_num = self.mapping_from[parent_row][from_row]
|
||||
|
||||
to_end_index = len(self.mapping_from[parent_row]) - 1
|
||||
for _idx in range(0, parent_row):
|
||||
to_end_index += len(self.mapping_from[_idx])
|
||||
|
||||
removed_rows = 0
|
||||
_emit_last = None
|
||||
for row_num in reversed(range(from_row, to_row + 1)):
|
||||
row = self.mapping_from[parent_row].pop(row_num)
|
||||
_emit_last = row
|
||||
removed_rows += 1
|
||||
|
||||
_emit_first = int(increment_num)
|
||||
mapping_from_len = len(self.mapping_from)
|
||||
mapping_from_parent_len = len(self.mapping_from[parent_row])
|
||||
if parent_row < mapping_from_len:
|
||||
for idx in range(from_row, mapping_from_parent_len):
|
||||
self.mapping_from[parent_row][idx] = increment_num
|
||||
increment_num += 1
|
||||
|
||||
if parent_row < mapping_from_len - 1:
|
||||
for idx_i in range(parent_row + 1, mapping_from_len):
|
||||
sub_values = self.mapping_from[idx_i]
|
||||
if not sub_values:
|
||||
continue
|
||||
|
||||
for idx_j in range(0, len(sub_values)):
|
||||
self.mapping_from[idx_i][idx_j] = increment_num
|
||||
increment_num += 1
|
||||
|
||||
for idx in range(removed_rows):
|
||||
self.mapping_to.pop(to_end_index - idx)
|
||||
|
||||
return (_emit_first, _emit_last)
|
||||
|
||||
def on_rows_removed(self, parent_index, from_row, to_row):
|
||||
if parent_index.isValid():
|
||||
parent_row = parent_index.row()
|
||||
_emit_first, _emit_last = self._remove_rows(
|
||||
parent_row, from_row, to_row
|
||||
)
|
||||
self.rowsRemoved.emit(self.parent(), _emit_first, _emit_last)
|
||||
|
||||
else:
|
||||
removed_rows = False
|
||||
emit_first = None
|
||||
emit_last = None
|
||||
for row_num in reversed(range(from_row, to_row + 1)):
|
||||
remaining_rows = self.mapping_from[row_num]
|
||||
if remaining_rows:
|
||||
removed_rows = True
|
||||
_emit_first, _emit_last = self._remove_rows(
|
||||
row_num, 0, len(remaining_rows) - 1
|
||||
)
|
||||
if emit_first is None:
|
||||
emit_first = _emit_first
|
||||
emit_last = _emit_last
|
||||
|
||||
self.mapping_from.pop(row_num)
|
||||
|
||||
diff = to_row - from_row + 1
|
||||
mapping_to_len = len(self.mapping_to)
|
||||
if from_row < mapping_to_len:
|
||||
for idx in range(from_row, mapping_to_len):
|
||||
self.mapping_to[idx][0] -= diff
|
||||
|
||||
if removed_rows:
|
||||
self.rowsRemoved.emit(self.parent(), emit_first, emit_last)
|
||||
|
||||
def on_reset(self):
|
||||
self.modelReset.emit()
|
||||
self.mapping_from = []
|
||||
self.mapping_to = []
|
||||
|
||||
def setSourceModel(self, source_model):
|
||||
super(ArtistProxy, self).setSourceModel(source_model)
|
||||
source_model.rowsInserted.connect(self.on_rows_inserted)
|
||||
source_model.rowsRemoved.connect(self.on_rows_removed)
|
||||
source_model.modelReset.connect(self.on_reset)
|
||||
source_model.dataChanged.connect(self.on_data_changed)
|
||||
|
||||
def on_data_changed(self, from_index, to_index, roles=None):
|
||||
proxy_from_index = self.mapFromSource(from_index)
|
||||
if from_index == to_index:
|
||||
proxy_to_index = proxy_from_index
|
||||
else:
|
||||
proxy_to_index = self.mapFromSource(to_index)
|
||||
|
||||
args = [proxy_from_index, proxy_to_index]
|
||||
if Qt.__binding__ not in ("PyQt4", "PySide"):
|
||||
args.append(roles or [])
|
||||
self.dataChanged.emit(*args)
|
||||
|
||||
def columnCount(self, parent=QtCore.QModelIndex()):
|
||||
# This is not right for global proxy, but in this case it is enough
|
||||
return self.sourceModel().columnCount()
|
||||
|
||||
def rowCount(self, parent=QtCore.QModelIndex()):
|
||||
if parent.isValid():
|
||||
return 0
|
||||
return len(self.mapping_to)
|
||||
|
||||
def mapFromSource(self, index):
|
||||
if not index.isValid():
|
||||
return QtCore.QModelIndex()
|
||||
|
||||
parent_index = index.parent()
|
||||
if not parent_index.isValid():
|
||||
return QtCore.QModelIndex()
|
||||
|
||||
parent_idx = self.mapping_from[parent_index.row()]
|
||||
my_row = parent_idx[index.row()]
|
||||
return self.index(my_row, index.column())
|
||||
|
||||
def mapToSource(self, index):
|
||||
if not index.isValid() or index.row() > len(self.mapping_to):
|
||||
return self.sourceModel().index(index.row(), index.column())
|
||||
|
||||
parent_row, item_row = self.mapping_to[index.row()]
|
||||
parent_index = self.sourceModel().index(parent_row, 0)
|
||||
return self.sourceModel().index(item_row, 0, parent_index)
|
||||
|
||||
def index(self, row, column, parent=QtCore.QModelIndex()):
|
||||
return self.createIndex(row, column, QtCore.QModelIndex())
|
||||
|
||||
def parent(self, index=None):
|
||||
return QtCore.QModelIndex()
|
||||
def lessThan(self, x_index, y_index):
|
||||
x_type = x_index.data(Roles.TypeRole)
|
||||
y_type = y_index.data(Roles.TypeRole)
|
||||
if x_type != y_type:
|
||||
if x_type == GroupType:
|
||||
return False
|
||||
return True
|
||||
return super(InstanceSortProxy, self).lessThan(x_index, y_index)
|
||||
|
||||
|
||||
class TerminalDetailItem(QtGui.QStandardItem):
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ UseLabel = True
|
|||
|
||||
# Customize which tab to start on. Possible choices are: "artist", "overview"
|
||||
# and "terminal".
|
||||
InitialTab = "artist"
|
||||
InitialTab = "overview"
|
||||
|
||||
# Customize the window size.
|
||||
WindowSize = (430, 600)
|
||||
|
|
|
|||
|
|
@ -11,61 +11,6 @@ def _import_widgets():
|
|||
from . import widgets
|
||||
|
||||
|
||||
class ArtistView(QtWidgets.QListView):
|
||||
# An item is requesting to be toggled, with optional forced-state
|
||||
toggled = QtCore.Signal(QtCore.QModelIndex, object)
|
||||
show_perspective = QtCore.Signal(QtCore.QModelIndex)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super(ArtistView, self).__init__(parent)
|
||||
|
||||
self.horizontalScrollBar().hide()
|
||||
self.viewport().setAttribute(QtCore.Qt.WA_Hover, True)
|
||||
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
|
||||
self.setResizeMode(QtWidgets.QListView.Adjust)
|
||||
self.setVerticalScrollMode(QtWidgets.QListView.ScrollPerPixel)
|
||||
|
||||
def event(self, event):
|
||||
if not event.type() == QtCore.QEvent.KeyPress:
|
||||
return super(ArtistView, self).event(event)
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Space:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, None)
|
||||
|
||||
return True
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Backspace:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, False)
|
||||
|
||||
return True
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Return:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, True)
|
||||
|
||||
return True
|
||||
|
||||
return super(ArtistView, self).event(event)
|
||||
|
||||
def focusOutEvent(self, event):
|
||||
self.selectionModel().clear()
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
if event.button() == QtCore.Qt.LeftButton:
|
||||
indexes = self.selectionModel().selectedIndexes()
|
||||
if len(indexes) <= 1 and event.pos().x() < 20:
|
||||
for index in indexes:
|
||||
self.toggled.emit(index, None)
|
||||
if len(indexes) == 1 and event.pos().x() > self.width() - 40:
|
||||
for index in indexes:
|
||||
self.show_perspective.emit(index)
|
||||
|
||||
return super(ArtistView, self).mouseReleaseEvent(event)
|
||||
|
||||
|
||||
class OverviewView(QtWidgets.QTreeView):
|
||||
# An item is requesting to be toggled, with optional forced-state
|
||||
toggled = QtCore.Signal(QtCore.QModelIndex, object)
|
||||
|
|
@ -160,6 +105,8 @@ class PluginView(OverviewView):
|
|||
class InstanceView(OverviewView):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(InstanceView, self).__init__(*args, **kwargs)
|
||||
self.setSortingEnabled(True)
|
||||
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
|
||||
self.viewport().setMouseTracking(True)
|
||||
self._pressed_group_index = None
|
||||
self._pressed_expander = None
|
||||
|
|
|
|||
|
|
@ -97,7 +97,6 @@ class Window(QtWidgets.QDialog):
|
|||
header_widget = QtWidgets.QWidget(parent=main_widget)
|
||||
|
||||
header_tab_widget = QtWidgets.QWidget(header_widget)
|
||||
header_tab_artist = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_tab_overview = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_tab_terminal = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_spacer = QtWidgets.QWidget(header_tab_widget)
|
||||
|
|
@ -125,7 +124,6 @@ class Window(QtWidgets.QDialog):
|
|||
layout_tab = QtWidgets.QHBoxLayout(header_tab_widget)
|
||||
layout_tab.setContentsMargins(0, 0, 0, 0)
|
||||
layout_tab.setSpacing(0)
|
||||
layout_tab.addWidget(header_tab_artist, 0)
|
||||
layout_tab.addWidget(header_tab_overview, 0)
|
||||
layout_tab.addWidget(header_tab_terminal, 0)
|
||||
layout_tab.addWidget(button_suspend_logs_widget, 0)
|
||||
|
|
@ -141,27 +139,6 @@ class Window(QtWidgets.QDialog):
|
|||
|
||||
header_widget.setLayout(layout)
|
||||
|
||||
# Artist Page
|
||||
instance_model = model.InstanceModel(controller)
|
||||
|
||||
artist_page = QtWidgets.QWidget()
|
||||
|
||||
artist_view = view.ArtistView()
|
||||
artist_view.show_perspective.connect(self.toggle_perspective_widget)
|
||||
artist_proxy = model.ArtistProxy()
|
||||
artist_proxy.setSourceModel(instance_model)
|
||||
artist_view.setModel(artist_proxy)
|
||||
|
||||
artist_delegate = delegate.ArtistDelegate()
|
||||
artist_view.setItemDelegate(artist_delegate)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(artist_page)
|
||||
layout.addWidget(artist_view)
|
||||
layout.setContentsMargins(5, 5, 5, 5)
|
||||
layout.setSpacing(0)
|
||||
|
||||
artist_page.setLayout(layout)
|
||||
|
||||
# Overview Page
|
||||
# TODO add parent
|
||||
overview_page = QtWidgets.QWidget()
|
||||
|
|
@ -172,8 +149,12 @@ class Window(QtWidgets.QDialog):
|
|||
overview_instance_delegate = delegate.InstanceDelegate(
|
||||
parent=overview_instance_view
|
||||
)
|
||||
instance_model = model.InstanceModel(controller)
|
||||
instance_sort_proxy = model.InstanceSortProxy()
|
||||
instance_sort_proxy.setSourceModel(instance_model)
|
||||
|
||||
overview_instance_view.setItemDelegate(overview_instance_delegate)
|
||||
overview_instance_view.setModel(instance_model)
|
||||
overview_instance_view.setModel(instance_sort_proxy)
|
||||
|
||||
overview_plugin_view = view.PluginView(
|
||||
animated=settings.Animated, parent=overview_page
|
||||
|
|
@ -223,7 +204,6 @@ class Window(QtWidgets.QDialog):
|
|||
body_widget = QtWidgets.QWidget(main_widget)
|
||||
layout = QtWidgets.QHBoxLayout(body_widget)
|
||||
layout.setContentsMargins(5, 5, 5, 1)
|
||||
layout.addWidget(artist_page)
|
||||
layout.addWidget(overview_page)
|
||||
layout.addWidget(terminal_page)
|
||||
|
||||
|
|
@ -361,12 +341,10 @@ class Window(QtWidgets.QDialog):
|
|||
"Footer": footer_widget,
|
||||
|
||||
# Pages
|
||||
"Artist": artist_page,
|
||||
"Overview": overview_page,
|
||||
"Terminal": terminal_page,
|
||||
|
||||
# Tabs
|
||||
"ArtistTab": header_tab_artist,
|
||||
"OverviewTab": header_tab_overview,
|
||||
"TerminalTab": header_tab_terminal,
|
||||
|
||||
|
|
@ -399,7 +377,6 @@ class Window(QtWidgets.QDialog):
|
|||
pages_widget,
|
||||
header_widget,
|
||||
body_widget,
|
||||
artist_page,
|
||||
comment_box,
|
||||
overview_page,
|
||||
terminal_page,
|
||||
|
|
@ -415,9 +392,6 @@ class Window(QtWidgets.QDialog):
|
|||
_widget.setAttribute(QtCore.Qt.WA_StyledBackground)
|
||||
|
||||
# Signals
|
||||
header_tab_artist.toggled.connect(
|
||||
lambda: self.on_tab_changed("artist")
|
||||
)
|
||||
header_tab_overview.toggled.connect(
|
||||
lambda: self.on_tab_changed("overview")
|
||||
)
|
||||
|
|
@ -450,7 +424,6 @@ class Window(QtWidgets.QDialog):
|
|||
QtCore.Qt.DirectConnection
|
||||
)
|
||||
|
||||
artist_view.toggled.connect(self.on_instance_toggle)
|
||||
overview_instance_view.toggled.connect(self.on_instance_toggle)
|
||||
overview_plugin_view.toggled.connect(self.on_plugin_toggle)
|
||||
|
||||
|
|
@ -466,9 +439,7 @@ class Window(QtWidgets.QDialog):
|
|||
self.on_plugin_action_menu_requested
|
||||
)
|
||||
|
||||
instance_model.group_created.connect(
|
||||
overview_instance_view.expand
|
||||
)
|
||||
instance_model.group_created.connect(self.on_instance_group_created)
|
||||
|
||||
self.main_widget = main_widget
|
||||
|
||||
|
|
@ -490,9 +461,7 @@ class Window(QtWidgets.QDialog):
|
|||
self.plugin_model = plugin_model
|
||||
self.plugin_proxy = plugin_proxy
|
||||
self.instance_model = instance_model
|
||||
|
||||
self.artist_proxy = artist_proxy
|
||||
self.artist_view = artist_view
|
||||
self.instance_sort_proxy = instance_sort_proxy
|
||||
|
||||
self.presets_button = presets_button
|
||||
|
||||
|
|
@ -510,17 +479,15 @@ class Window(QtWidgets.QDialog):
|
|||
self.perspective_widget = perspective_widget
|
||||
|
||||
self.tabs = {
|
||||
"artist": header_tab_artist,
|
||||
"overview": header_tab_overview,
|
||||
"terminal": header_tab_terminal
|
||||
}
|
||||
self.pages = (
|
||||
("artist", artist_page),
|
||||
("overview", overview_page),
|
||||
("terminal", terminal_page)
|
||||
)
|
||||
|
||||
current_page = settings.InitialTab or "artist"
|
||||
current_page = settings.InitialTab or "overview"
|
||||
self.comment_main_widget.setVisible(
|
||||
not current_page == "terminal"
|
||||
)
|
||||
|
|
@ -620,6 +587,10 @@ class Window(QtWidgets.QDialog):
|
|||
|
||||
self.update_compatibility()
|
||||
|
||||
def on_instance_group_created(self, index):
|
||||
_index = self.instance_sort_proxy.mapFromSource(index)
|
||||
self.overview_instance_view.expand(_index)
|
||||
|
||||
def on_plugin_toggle(self, index, state=None):
|
||||
"""An item is requesting to be toggled"""
|
||||
if not index.data(Roles.IsOptionalRole):
|
||||
|
|
@ -1016,11 +987,14 @@ class Window(QtWidgets.QDialog):
|
|||
|
||||
def on_passed_group(self, order):
|
||||
for group_item in self.instance_model.group_items.values():
|
||||
if self.overview_instance_view.isExpanded(group_item.index()):
|
||||
group_index = self.instance_sort_proxy.mapFromSource(
|
||||
group_item.index()
|
||||
)
|
||||
if self.overview_instance_view.isExpanded(group_index):
|
||||
continue
|
||||
|
||||
if group_item.publish_states & GroupStates.HasError:
|
||||
self.overview_instance_view.expand(group_item.index())
|
||||
self.overview_instance_view.expand(group_index)
|
||||
|
||||
for group_item in self.plugin_model.group_items.values():
|
||||
# TODO check only plugins from the group
|
||||
|
|
@ -1030,19 +1004,16 @@ class Window(QtWidgets.QDialog):
|
|||
if order != group_item.order:
|
||||
continue
|
||||
|
||||
group_index = self.plugin_proxy.mapFromSource(group_item.index())
|
||||
if group_item.publish_states & GroupStates.HasError:
|
||||
self.overview_plugin_view.expand(
|
||||
self.plugin_proxy.mapFromSource(group_item.index())
|
||||
)
|
||||
self.overview_plugin_view.expand(group_index)
|
||||
continue
|
||||
|
||||
group_item.setData(
|
||||
{GroupStates.HasFinished: True},
|
||||
Roles.PublishFlagsRole
|
||||
)
|
||||
self.overview_plugin_view.collapse(
|
||||
self.plugin_proxy.mapFromSource(group_item.index())
|
||||
)
|
||||
self.overview_plugin_view.collapse(group_index)
|
||||
|
||||
def on_was_stopped(self):
|
||||
errored = self.controller.errored
|
||||
|
|
@ -1122,11 +1093,6 @@ class Window(QtWidgets.QDialog):
|
|||
for instance_id in existing_ids:
|
||||
self.instance_model.remove(instance_id)
|
||||
|
||||
if result.get("error"):
|
||||
# Toggle from artist to overview tab on error
|
||||
if self.tabs["artist"].isChecked():
|
||||
self.tabs["overview"].toggle()
|
||||
|
||||
result["records"] = self.terminal_model.prepare_records(
|
||||
result,
|
||||
self._suspend_logs
|
||||
|
|
@ -1274,7 +1240,6 @@ class Window(QtWidgets.QDialog):
|
|||
self.terminal_proxy.deleteLater()
|
||||
self.plugin_proxy.deleteLater()
|
||||
|
||||
self.artist_view.setModel(None)
|
||||
self.overview_instance_view.setModel(None)
|
||||
self.overview_plugin_view.setModel(None)
|
||||
self.terminal_view.setModel(None)
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
__version__ = "2.13.4"
|
||||
__version__ = "2.13.6"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue