mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge remote-tracking branch 'origin/develop' into 3.0/build-improvements
This commit is contained in:
commit
6051eac744
219 changed files with 8008 additions and 13299 deletions
6
.gitignore
vendored
6
.gitignore
vendored
|
|
@ -71,6 +71,10 @@ package-lock.json
|
|||
pype/premiere/ppro/js/debug.log
|
||||
|
||||
|
||||
# Idea
|
||||
# IDEA
|
||||
######
|
||||
.idea/
|
||||
|
||||
# VScode files
|
||||
.vscode/
|
||||
.env
|
||||
6
.gitmodules
vendored
6
.gitmodules
vendored
|
|
@ -15,3 +15,9 @@
|
|||
path = repos/acre
|
||||
url = git@github.com:antirotor/acre.git
|
||||
branch = fix/unformatted-tokens
|
||||
[submodule "pype/modules/ftrack/python2_vendor/ftrack-python-api"]
|
||||
path = pype/modules/ftrack/python2_vendor/ftrack-python-api
|
||||
url = https://bitbucket.org/ftrack/ftrack-python-api.git
|
||||
[submodule "pype/modules/ftrack/python2_vendor/arrow"]
|
||||
path = pype/modules/ftrack/python2_vendor/arrow
|
||||
url = git@github.com:arrow-py/arrow.git
|
||||
|
|
|
|||
22
pype.py
22
pype.py
|
|
@ -218,7 +218,7 @@ def boot():
|
|||
def get_info() -> list:
|
||||
"""Print additional information to console."""
|
||||
from pype.lib.mongo import get_default_components
|
||||
from pype.lib.log import LOG_DATABASE_NAME, LOG_COLLECTION_NAME
|
||||
from pype.lib.log import PypeLogger
|
||||
|
||||
components = get_default_components()
|
||||
|
||||
|
|
@ -242,14 +242,18 @@ def get_info() -> list:
|
|||
infos.append(("Using Muster at",
|
||||
os.environ.get("MUSTER_REST_URL")))
|
||||
|
||||
if components["host"]:
|
||||
infos.append(("Logging to MongoDB", components["host"]))
|
||||
infos.append((" - port", components["port"] or "<N/A>"))
|
||||
infos.append((" - database", LOG_DATABASE_NAME))
|
||||
infos.append((" - collection", LOG_COLLECTION_NAME))
|
||||
infos.append((" - user", components["username"] or "<N/A>"))
|
||||
if components["auth_db"]:
|
||||
infos.append((" - auth source", components["auth_db"]))
|
||||
# Reinitialize
|
||||
PypeLogger.initialize()
|
||||
|
||||
log_components = PypeLogger.log_mongo_url_components
|
||||
if log_components["host"]:
|
||||
infos.append(("Logging to MongoDB", log_components["host"]))
|
||||
infos.append((" - port", log_components["port"] or "<N/A>"))
|
||||
infos.append((" - database", PypeLogger.log_database_name))
|
||||
infos.append((" - collection", PypeLogger.log_collection_name))
|
||||
infos.append((" - user", log_components["username"] or "<N/A>"))
|
||||
if log_components["auth_db"]:
|
||||
infos.append((" - auth source", log_components["auth_db"]))
|
||||
|
||||
maximum = max([len(i[0]) for i in infos])
|
||||
formatted = []
|
||||
|
|
|
|||
25
pype/api.py
25
pype/api.py
|
|
@ -9,7 +9,15 @@ from .lib import (
|
|||
PypeLogger,
|
||||
Anatomy,
|
||||
config,
|
||||
execute
|
||||
execute,
|
||||
run_subprocess,
|
||||
version_up,
|
||||
get_asset,
|
||||
get_hierarchy,
|
||||
get_version_from_path,
|
||||
get_last_version_from_path,
|
||||
source_hash,
|
||||
get_latest_version
|
||||
)
|
||||
|
||||
from .lib.mongo import (
|
||||
|
|
@ -37,19 +45,6 @@ from .action import (
|
|||
RepairContextAction
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
version_up,
|
||||
get_asset,
|
||||
get_hierarchy,
|
||||
get_version_from_path,
|
||||
get_last_version_from_path,
|
||||
source_hash,
|
||||
get_latest_version
|
||||
)
|
||||
|
||||
# Special naming case for subprocess since its a built-in method.
|
||||
from .lib import _subprocess as subprocess
|
||||
|
||||
# for backward compatibility with Pype 2
|
||||
Logger = PypeLogger
|
||||
|
||||
|
|
@ -94,6 +89,6 @@ __all__ = [
|
|||
"get_last_version_from_path",
|
||||
"source_hash",
|
||||
|
||||
"subprocess",
|
||||
"run_subprocess",
|
||||
"get_latest_version"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -11,7 +11,9 @@ from pype.api import (
|
|||
from pype.lib import (
|
||||
env_value_to_bool,
|
||||
PreLaunchHook,
|
||||
ApplicationLaunchFailed
|
||||
ApplicationLaunchFailed,
|
||||
get_workdir_data,
|
||||
get_workdir_with_workdir_data,
|
||||
)
|
||||
|
||||
import acre
|
||||
|
|
@ -140,17 +142,15 @@ class GlobalHostDataHook(PreLaunchHook):
|
|||
)
|
||||
return
|
||||
|
||||
workdir_data = self._prepare_workdir_data(
|
||||
project_doc, asset_doc, task_name
|
||||
workdir_data = get_workdir_data(
|
||||
project_doc, asset_doc, task_name, self.host_name
|
||||
)
|
||||
self.data["workdir_data"] = workdir_data
|
||||
|
||||
hierarchy = workdir_data["hierarchy"]
|
||||
anatomy = self.data["anatomy"]
|
||||
|
||||
try:
|
||||
anatomy_filled = anatomy.format(workdir_data)
|
||||
workdir = os.path.normpath(anatomy_filled["work"]["folder"])
|
||||
workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
|
||||
if not os.path.exists(workdir):
|
||||
self.log.debug(
|
||||
"Creating workdir folder: \"{}\"".format(workdir)
|
||||
|
|
@ -168,7 +168,6 @@ class GlobalHostDataHook(PreLaunchHook):
|
|||
"AVALON_TASK": task_name,
|
||||
"AVALON_APP": self.host_name,
|
||||
"AVALON_APP_NAME": self.app_name,
|
||||
"AVALON_HIERARCHY": hierarchy,
|
||||
"AVALON_WORKDIR": workdir
|
||||
}
|
||||
self.log.debug(
|
||||
|
|
@ -180,21 +179,6 @@ class GlobalHostDataHook(PreLaunchHook):
|
|||
|
||||
self.prepare_last_workfile(workdir)
|
||||
|
||||
def _prepare_workdir_data(self, project_doc, asset_doc, task_name):
|
||||
hierarchy = "/".join(asset_doc["data"]["parents"])
|
||||
|
||||
data = {
|
||||
"project": {
|
||||
"name": project_doc["name"],
|
||||
"code": project_doc["data"].get("code")
|
||||
},
|
||||
"task": task_name,
|
||||
"asset": asset_doc["name"],
|
||||
"app": self.host_name,
|
||||
"hierarchy": hierarchy
|
||||
}
|
||||
return data
|
||||
|
||||
def prepare_last_workfile(self, workdir):
|
||||
"""last workfile workflow preparation.
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class LaunchWithWindowsShell(PreLaunchHook):
|
|||
"""
|
||||
|
||||
order = 10
|
||||
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
|
||||
app_groups = ["resolve", "nuke", "nukex", "hiero", "nukestudio"]
|
||||
platforms = ["windows"]
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
|
|
@ -14,8 +14,10 @@ class ResolvePrelaunch(PreLaunchHook):
|
|||
app_groups = ["resolve"]
|
||||
|
||||
def execute(self):
|
||||
# TODO: add OTIO installation from `pype/requirements.py`
|
||||
# making sure pyton 3.6 is installed at provided path
|
||||
py36_dir = os.path.normpath(self.env.get("PYTHON36_RESOLVE", ""))
|
||||
py36_dir = os.path.normpath(
|
||||
self.launch_context.env.get("PYTHON36_RESOLVE", ""))
|
||||
assert os.path.isdir(py36_dir), (
|
||||
"Python 3.6 is not installed at the provided folder path. Either "
|
||||
"make sure the `environments\resolve.json` is having correctly "
|
||||
|
|
@ -23,11 +25,10 @@ class ResolvePrelaunch(PreLaunchHook):
|
|||
f"in given path. \nPYTHON36_RESOLVE: `{py36_dir}`"
|
||||
)
|
||||
self.log.info(f"Path to Resolve Python folder: `{py36_dir}`...")
|
||||
self.env["PYTHON36_RESOLVE"] = py36_dir
|
||||
|
||||
# setting utility scripts dir for scripts syncing
|
||||
us_dir = os.path.normpath(
|
||||
self.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
|
||||
self.launch_context.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
|
||||
)
|
||||
assert os.path.isdir(us_dir), (
|
||||
"Resolve utility script dir does not exists. Either make sure "
|
||||
|
|
@ -38,8 +39,9 @@ class ResolvePrelaunch(PreLaunchHook):
|
|||
self.log.debug(f"-- us_dir: `{us_dir}`")
|
||||
|
||||
# correctly format path for pre python script
|
||||
pre_py_sc = os.path.normpath(self.env.get("PRE_PYTHON_SCRIPT", ""))
|
||||
self.env["PRE_PYTHON_SCRIPT"] = pre_py_sc
|
||||
pre_py_sc = os.path.normpath(
|
||||
self.launch_context.env.get("PRE_PYTHON_SCRIPT", ""))
|
||||
self.launch_context.env["PRE_PYTHON_SCRIPT"] = pre_py_sc
|
||||
self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...")
|
||||
try:
|
||||
__import__("pype.hosts.resolve")
|
||||
|
|
@ -55,4 +57,4 @@ class ResolvePrelaunch(PreLaunchHook):
|
|||
# Resolve Setup integration
|
||||
importlib.reload(utils)
|
||||
self.log.debug(f"-- utils.__file__: `{utils.__file__}`")
|
||||
utils.setup(self.env)
|
||||
utils.setup(self.launch_context.env)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from pyblish import api as pyblish
|
|||
from pype.api import Logger
|
||||
from pype import PLUGINS_DIR
|
||||
|
||||
log = Logger().get_logger(__name__, "fusion")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
AVALON_CONFIG = os.environ["AVALON_CONFIG"]
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import shutil
|
|||
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "fusion")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def _sync_utility_scripts(env=None):
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ __all__ = [
|
|||
]
|
||||
|
||||
# get logger
|
||||
log = Logger().get_logger(__name__, "hiero")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
''' Creating all important host related variables '''
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from pype.api import Logger
|
|||
from .lib import sync_avalon_data_to_workfile, launch_workfiles_app
|
||||
from .tags import add_tags_from_presets
|
||||
|
||||
log = Logger().get_logger(__name__, "hiero")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def startupCompleted(event):
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from avalon.vendor.Qt import (QtWidgets, QtGui)
|
|||
import pype.api as pype
|
||||
from pype.api import Logger, Anatomy
|
||||
|
||||
log = Logger().get_logger(__name__, "hiero")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
cached_process = None
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from .lib import (
|
|||
set_workfiles
|
||||
)
|
||||
|
||||
log = Logger().get_logger(__name__, "hiero")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._change_context_menu = None
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from pprint import pformat
|
|||
from pype.api import Logger
|
||||
from avalon import io
|
||||
|
||||
log = Logger().get_logger(__name__, "hiero")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def tag_data():
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from avalon import api
|
|||
from pype.api import Logger
|
||||
|
||||
|
||||
log = Logger().get_logger(__name__, "hiero")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def file_extensions():
|
||||
|
|
|
|||
|
|
@ -32,6 +32,9 @@ Attributes:
|
|||
ImagePrefixes (dict): Mapping between renderers and their respective
|
||||
image prefix atrribute names.
|
||||
|
||||
Todo:
|
||||
Determine `multipart` from render instance.
|
||||
|
||||
"""
|
||||
|
||||
import types
|
||||
|
|
@ -94,6 +97,10 @@ class ExpectedFiles:
|
|||
|
||||
multipart = False
|
||||
|
||||
def __init__(self, render_instance):
|
||||
"""Constructor."""
|
||||
self._render_instance = render_instance
|
||||
|
||||
def get(self, renderer, layer):
|
||||
"""Get expected files for given renderer and render layer.
|
||||
|
||||
|
|
@ -114,15 +121,20 @@ class ExpectedFiles:
|
|||
renderSetup.instance().switchToLayerUsingLegacyName(layer)
|
||||
|
||||
if renderer.lower() == "arnold":
|
||||
return self._get_files(ExpectedFilesArnold(layer))
|
||||
return self._get_files(ExpectedFilesArnold(layer,
|
||||
self._render_instance))
|
||||
elif renderer.lower() == "vray":
|
||||
return self._get_files(ExpectedFilesVray(layer))
|
||||
return self._get_files(ExpectedFilesVray(
|
||||
layer, self._render_instance))
|
||||
elif renderer.lower() == "redshift":
|
||||
return self._get_files(ExpectedFilesRedshift(layer))
|
||||
return self._get_files(ExpectedFilesRedshift(
|
||||
layer, self._render_instance))
|
||||
elif renderer.lower() == "mentalray":
|
||||
return self._get_files(ExpectedFilesMentalray(layer))
|
||||
return self._get_files(ExpectedFilesMentalray(
|
||||
layer, self._render_instance))
|
||||
elif renderer.lower() == "renderman":
|
||||
return self._get_files(ExpectedFilesRenderman(layer))
|
||||
return self._get_files(ExpectedFilesRenderman(
|
||||
layer, self._render_instance))
|
||||
else:
|
||||
raise UnsupportedRendererException(
|
||||
"unsupported {}".format(renderer)
|
||||
|
|
@ -149,9 +161,10 @@ class AExpectedFiles:
|
|||
layer = None
|
||||
multipart = False
|
||||
|
||||
def __init__(self, layer):
|
||||
def __init__(self, layer, render_instance):
|
||||
"""Constructor."""
|
||||
self.layer = layer
|
||||
self.render_instance = render_instance
|
||||
|
||||
@abstractmethod
|
||||
def get_aovs(self):
|
||||
|
|
@ -460,9 +473,9 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
"maya": "",
|
||||
}
|
||||
|
||||
def __init__(self, layer):
|
||||
def __init__(self, layer, render_instance):
|
||||
"""Constructor."""
|
||||
super(ExpectedFilesArnold, self).__init__(layer)
|
||||
super(ExpectedFilesArnold, self).__init__(layer, render_instance)
|
||||
self.renderer = "arnold"
|
||||
|
||||
def get_aovs(self):
|
||||
|
|
@ -531,9 +544,9 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
class ExpectedFilesVray(AExpectedFiles):
|
||||
"""Expected files for V-Ray renderer."""
|
||||
|
||||
def __init__(self, layer):
|
||||
def __init__(self, layer, render_instance):
|
||||
"""Constructor."""
|
||||
super(ExpectedFilesVray, self).__init__(layer)
|
||||
super(ExpectedFilesVray, self).__init__(layer, render_instance)
|
||||
self.renderer = "vray"
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
|
|
@ -614,24 +627,25 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
|
||||
default_ext = "exr"
|
||||
|
||||
# add beauty as default
|
||||
enabled_aovs.append(
|
||||
(u"beauty", default_ext)
|
||||
)
|
||||
|
||||
if not self.maya_is_true(
|
||||
cmds.getAttr("vraySettings.relements_enableall")
|
||||
):
|
||||
return enabled_aovs
|
||||
# handle aovs from references
|
||||
use_ref_aovs = self.render_instance.data.get(
|
||||
"vrayUseReferencedAovs", False) or False
|
||||
|
||||
# filter all namespace prefixed AOVs - they are pulled in from
|
||||
# references and are not rendered.
|
||||
vr_aovs = [
|
||||
n
|
||||
for n in cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"]
|
||||
)
|
||||
if len(n.split(":")) == 1
|
||||
]
|
||||
# this will have list of all aovs no matter if they are coming from
|
||||
# reference or not.
|
||||
vr_aovs = cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"]) or []
|
||||
if not use_ref_aovs:
|
||||
ref_aovs = cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"],
|
||||
referencedNodes=True) or []
|
||||
# get difference
|
||||
vr_aovs = list(set(vr_aovs) - set(ref_aovs))
|
||||
|
||||
for aov in vr_aovs:
|
||||
enabled = self.maya_is_true(cmds.getAttr("{}.enabled".format(aov)))
|
||||
|
|
@ -703,9 +717,9 @@ class ExpectedFilesRedshift(AExpectedFiles):
|
|||
|
||||
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
|
||||
|
||||
def __init__(self, layer):
|
||||
def __init__(self, layer, render_instance):
|
||||
"""Construtor."""
|
||||
super(ExpectedFilesRedshift, self).__init__(layer)
|
||||
super(ExpectedFilesRedshift, self).__init__(layer, render_instance)
|
||||
self.renderer = "redshift"
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
|
|
@ -822,9 +836,9 @@ class ExpectedFilesRenderman(AExpectedFiles):
|
|||
This is very rudimentary and needs more love and testing.
|
||||
"""
|
||||
|
||||
def __init__(self, layer):
|
||||
def __init__(self, layer, render_instance):
|
||||
"""Constructor."""
|
||||
super(ExpectedFilesRenderman, self).__init__(layer)
|
||||
super(ExpectedFilesRenderman, self).__init__(layer, render_instance)
|
||||
self.renderer = "renderman"
|
||||
|
||||
def get_aovs(self):
|
||||
|
|
@ -887,7 +901,7 @@ class ExpectedFilesRenderman(AExpectedFiles):
|
|||
class ExpectedFilesMentalray(AExpectedFiles):
|
||||
"""Skeleton unimplemented class for Mentalray renderer."""
|
||||
|
||||
def __init__(self, layer):
|
||||
def __init__(self, layer, render_instance):
|
||||
"""Constructor.
|
||||
|
||||
Raises:
|
||||
|
|
|
|||
|
|
@ -13,12 +13,14 @@ self._menu = os.environ.get('PYPE_STUDIO_NAME') or "Pype"
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_menu():
|
||||
def _get_menu(menu_name=None):
|
||||
"""Return the menu instance if it currently exists in Maya"""
|
||||
|
||||
if menu_name is None:
|
||||
menu_name = self._menu
|
||||
widgets = dict((
|
||||
w.objectName(), w) for w in QtWidgets.QApplication.allWidgets())
|
||||
menu = widgets.get(self._menu)
|
||||
menu = widgets.get(menu_name)
|
||||
return menu
|
||||
|
||||
|
||||
|
|
@ -40,10 +42,51 @@ def deferred():
|
|||
command=lambda *args: mayalookassigner.show()
|
||||
)
|
||||
|
||||
def modify_workfiles():
|
||||
from pype.tools import workfiles
|
||||
|
||||
def launch_workfiles_app(*_args, **_kwargs):
|
||||
workfiles.show(
|
||||
os.path.join(
|
||||
cmds.workspace(query=True, rootDirectory=True),
|
||||
cmds.workspace(fileRuleEntry="scene")
|
||||
),
|
||||
parent=pipeline._parent
|
||||
)
|
||||
|
||||
# Find the pipeline menu
|
||||
top_menu = _get_menu(pipeline._menu)
|
||||
|
||||
# Try to find workfile tool action in the menu
|
||||
workfile_action = None
|
||||
for action in top_menu.actions():
|
||||
if action.text() == "Work Files":
|
||||
workfile_action = action
|
||||
break
|
||||
|
||||
# Add at the top of menu if "Work Files" action was not found
|
||||
after_action = ""
|
||||
if workfile_action:
|
||||
# Use action's object name for `insertAfter` argument
|
||||
after_action = workfile_action.objectName()
|
||||
|
||||
# Insert action to menu
|
||||
cmds.menuItem(
|
||||
"Work Files",
|
||||
parent=pipeline._menu,
|
||||
command=launch_workfiles_app,
|
||||
insertAfter=after_action
|
||||
)
|
||||
|
||||
# Remove replaced action
|
||||
if workfile_action:
|
||||
top_menu.removeAction(workfile_action)
|
||||
|
||||
log.info("Attempting to install scripts menu..")
|
||||
|
||||
add_build_workfiles_item()
|
||||
add_look_assigner_item()
|
||||
modify_workfiles()
|
||||
|
||||
try:
|
||||
import scriptsmenu.launchformaya as launchformaya
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ from . import lib
|
|||
|
||||
self = sys.modules[__name__]
|
||||
self.workfiles_launched = False
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ from .presets import (
|
|||
|
||||
from .utils import set_context_favorites
|
||||
|
||||
log = pype.Logger().get_logger(__name__, "nuke")
|
||||
log = pype.Logger().get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._project = None
|
||||
|
|
|
|||
|
|
@ -1,17 +1,37 @@
|
|||
import os
|
||||
import nuke
|
||||
from avalon.api import Session
|
||||
|
||||
from pype.hosts.nuke import lib
|
||||
from ...lib import BuildWorkfile
|
||||
from pype.api import Logger
|
||||
from pype.tools import workfiles
|
||||
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def install():
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.findItem(Session["AVALON_LABEL"])
|
||||
workfile_settings = lib.WorkfileSettings
|
||||
|
||||
# replace reset resolution from avalon core to pype's
|
||||
name = "Work Files..."
|
||||
rm_item = [
|
||||
(i, item) for i, item in enumerate(menu.items()) if name in item.name()
|
||||
][0]
|
||||
|
||||
log.debug("Changing Item: {}".format(rm_item))
|
||||
|
||||
menu.removeItem(rm_item[1].name())
|
||||
menu.addCommand(
|
||||
name,
|
||||
lambda: workfiles.show(
|
||||
os.environ["AVALON_WORKDIR"]
|
||||
),
|
||||
index=(rm_item[0])
|
||||
)
|
||||
|
||||
# replace reset resolution from avalon core to pype's
|
||||
name = "Reset Resolution"
|
||||
new_name = "Set Resolution"
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from pype.api import Anatomy, config, Logger
|
||||
import nuke
|
||||
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def get_anatomy(**kwarg):
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ __all__ = [
|
|||
"ls"
|
||||
]
|
||||
|
||||
log = Logger().get_logger(__name__, "premiere")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def install():
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from pype.widgets.message_window import message
|
|||
from pype import PLUGINS_DIR
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "premiere")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._has_been_setup = False
|
||||
|
|
|
|||
|
|
@ -14,20 +14,32 @@ from .pipeline import (
|
|||
)
|
||||
|
||||
from .lib import (
|
||||
publish_clip_color,
|
||||
get_project_manager,
|
||||
get_current_project,
|
||||
get_current_sequence,
|
||||
get_video_track_names,
|
||||
get_current_track_items,
|
||||
get_track_item_pype_tag,
|
||||
set_track_item_pype_tag,
|
||||
imprint,
|
||||
set_publish_attribute,
|
||||
get_publish_attribute,
|
||||
create_current_sequence_media_bin,
|
||||
create_compound_clip,
|
||||
swap_clips,
|
||||
get_pype_clip_metadata,
|
||||
set_project_manager_to_folder_name
|
||||
set_project_manager_to_folder_name,
|
||||
get_reformated_path,
|
||||
get_otio_clip_instance_data
|
||||
)
|
||||
|
||||
from .menu import launch_pype_menu
|
||||
|
||||
from .plugin import Creator
|
||||
from .plugin import (
|
||||
Creator,
|
||||
PublishClip
|
||||
)
|
||||
|
||||
from .workio import (
|
||||
open_file,
|
||||
|
|
@ -57,21 +69,31 @@ __all__ = [
|
|||
"get_resolve_module",
|
||||
|
||||
# lib
|
||||
"publish_clip_color",
|
||||
"get_project_manager",
|
||||
"get_current_project",
|
||||
"get_current_sequence",
|
||||
"get_video_track_names",
|
||||
"get_current_track_items",
|
||||
"get_track_item_pype_tag",
|
||||
"set_track_item_pype_tag",
|
||||
"imprint",
|
||||
"set_publish_attribute",
|
||||
"get_publish_attribute",
|
||||
"create_current_sequence_media_bin",
|
||||
"create_compound_clip",
|
||||
"swap_clips",
|
||||
"get_pype_clip_metadata",
|
||||
"set_project_manager_to_folder_name",
|
||||
"get_reformated_path",
|
||||
"get_otio_clip_instance_data",
|
||||
|
||||
# menu
|
||||
"launch_pype_menu",
|
||||
|
||||
# plugin
|
||||
"Creator",
|
||||
"PublishClip",
|
||||
|
||||
# workio
|
||||
"open_file",
|
||||
|
|
|
|||
|
|
@ -1,31 +1,47 @@
|
|||
import sys
|
||||
import json
|
||||
import re
|
||||
from opentimelineio import opentime
|
||||
from pprint import pformat
|
||||
import pype
|
||||
|
||||
from .otio import davinci_export as otio_export
|
||||
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.pm = None
|
||||
self.project_manager = None
|
||||
|
||||
# Pype sequencial rename variables
|
||||
self.rename_index = 0
|
||||
self.rename_add = 0
|
||||
self.pype_metadata_key = "VFX Notes"
|
||||
|
||||
self.publish_clip_color = "Pink"
|
||||
self.pype_marker_workflow = True
|
||||
|
||||
# Pype compound clip workflow variable
|
||||
self.pype_tag_name = "VFX Notes"
|
||||
|
||||
# Pype marker workflow variables
|
||||
self.pype_marker_name = "PYPEDATA"
|
||||
self.pype_marker_duration = 1
|
||||
self.pype_marker_color = "Mint"
|
||||
self.temp_marker_frame = None
|
||||
|
||||
|
||||
def get_project_manager():
|
||||
from . import bmdvr
|
||||
if not self.pm:
|
||||
self.pm = bmdvr.GetProjectManager()
|
||||
return self.pm
|
||||
if not self.project_manager:
|
||||
self.project_manager = bmdvr.GetProjectManager()
|
||||
return self.project_manager
|
||||
|
||||
|
||||
def get_current_project():
|
||||
# initialize project manager
|
||||
get_project_manager()
|
||||
|
||||
return self.pm.GetCurrentProject()
|
||||
return self.project_manager.GetCurrentProject()
|
||||
|
||||
|
||||
def get_current_sequence():
|
||||
|
|
@ -35,6 +51,22 @@ def get_current_sequence():
|
|||
return project.GetCurrentTimeline()
|
||||
|
||||
|
||||
def get_video_track_names():
|
||||
tracks = list()
|
||||
track_type = "video"
|
||||
sequence = get_current_sequence()
|
||||
|
||||
# get all tracks count filtered by track type
|
||||
selected_track_count = sequence.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks and get items
|
||||
for track_index in range(1, (int(selected_track_count) + 1)):
|
||||
track_name = sequence.GetTrackName("video", track_index)
|
||||
tracks.append(track_name)
|
||||
|
||||
return tracks
|
||||
|
||||
|
||||
def get_current_track_items(
|
||||
filter=False,
|
||||
track_type=None,
|
||||
|
|
@ -77,13 +109,168 @@ def get_current_track_items(
|
|||
if filter is True:
|
||||
if selecting_color in ti_color:
|
||||
selected_clips.append(data)
|
||||
# ti.ClearClipColor()
|
||||
else:
|
||||
selected_clips.append(data)
|
||||
|
||||
return selected_clips
|
||||
|
||||
|
||||
def get_track_item_pype_tag(track_item):
|
||||
"""
|
||||
Get pype track item tag created by creator or loader plugin.
|
||||
|
||||
Attributes:
|
||||
trackItem (resolve.TimelineItem): hiero object
|
||||
|
||||
Returns:
|
||||
hiero.core.Tag: hierarchy, orig clip attributes
|
||||
"""
|
||||
return_tag = None
|
||||
|
||||
if self.pype_marker_workflow:
|
||||
return_tag = get_pype_marker(track_item)
|
||||
else:
|
||||
media_pool_item = track_item.GetMediaPoolItem()
|
||||
|
||||
# get all tags from track item
|
||||
_tags = media_pool_item.GetMetadata()
|
||||
if not _tags:
|
||||
return None
|
||||
for key, data in _tags.items():
|
||||
# return only correct tag defined by global name
|
||||
if key in self.pype_tag_name:
|
||||
return_tag = json.loads(data)
|
||||
|
||||
return return_tag
|
||||
|
||||
|
||||
def set_track_item_pype_tag(track_item, data=None):
|
||||
"""
|
||||
Set pype track item tag to input track_item.
|
||||
|
||||
Attributes:
|
||||
trackItem (resolve.TimelineItem): resolve api object
|
||||
|
||||
Returns:
|
||||
dict: json loaded data
|
||||
"""
|
||||
data = data or dict()
|
||||
|
||||
# get available pype tag if any
|
||||
tag_data = get_track_item_pype_tag(track_item)
|
||||
|
||||
if self.pype_marker_workflow:
|
||||
# delete tag as it is not updatable
|
||||
if tag_data:
|
||||
delete_pype_marker(track_item)
|
||||
|
||||
tag_data.update(data)
|
||||
set_pype_marker(track_item, tag_data)
|
||||
else:
|
||||
if tag_data:
|
||||
media_pool_item = track_item.GetMediaPoolItem()
|
||||
# it not tag then create one
|
||||
tag_data.update(data)
|
||||
media_pool_item.SetMetadata(
|
||||
self.pype_tag_name, json.dumps(tag_data))
|
||||
else:
|
||||
tag_data = data
|
||||
# if pype tag available then update with input data
|
||||
# add it to the input track item
|
||||
track_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data))
|
||||
|
||||
return tag_data
|
||||
|
||||
|
||||
def imprint(track_item, data=None):
|
||||
"""
|
||||
Adding `Avalon data` into a hiero track item tag.
|
||||
|
||||
Also including publish attribute into tag.
|
||||
|
||||
Arguments:
|
||||
track_item (hiero.core.TrackItem): hiero track item object
|
||||
data (dict): Any data which needst to be imprinted
|
||||
|
||||
Examples:
|
||||
data = {
|
||||
'asset': 'sq020sh0280',
|
||||
'family': 'render',
|
||||
'subset': 'subsetMain'
|
||||
}
|
||||
"""
|
||||
data = data or {}
|
||||
|
||||
set_track_item_pype_tag(track_item, data)
|
||||
|
||||
# add publish attribute
|
||||
set_publish_attribute(track_item, True)
|
||||
|
||||
|
||||
def set_publish_attribute(track_item, value):
|
||||
""" Set Publish attribute in input Tag object
|
||||
|
||||
Attribute:
|
||||
tag (hiero.core.Tag): a tag object
|
||||
value (bool): True or False
|
||||
"""
|
||||
tag_data = get_track_item_pype_tag(track_item)
|
||||
tag_data["publish"] = value
|
||||
# set data to the publish attribute
|
||||
set_track_item_pype_tag(track_item, tag_data)
|
||||
|
||||
|
||||
def get_publish_attribute(track_item):
|
||||
""" Get Publish attribute from input Tag object
|
||||
|
||||
Attribute:
|
||||
tag (hiero.core.Tag): a tag object
|
||||
value (bool): True or False
|
||||
"""
|
||||
tag_data = get_track_item_pype_tag(track_item)
|
||||
return tag_data["publish"]
|
||||
|
||||
|
||||
def set_pype_marker(track_item, tag_data):
|
||||
source_start = track_item.GetLeftOffset()
|
||||
item_duration = track_item.GetDuration()
|
||||
frame = int(source_start + (item_duration / 2))
|
||||
|
||||
# marker attributes
|
||||
frameId = (frame / 10) * 10
|
||||
color = self.pype_marker_color
|
||||
name = self.pype_marker_name
|
||||
note = json.dumps(tag_data)
|
||||
duration = (self.pype_marker_duration / 10) * 10
|
||||
|
||||
track_item.AddMarker(
|
||||
frameId,
|
||||
color,
|
||||
name,
|
||||
note,
|
||||
duration
|
||||
)
|
||||
|
||||
|
||||
def get_pype_marker(track_item):
|
||||
track_item_markers = track_item.GetMarkers()
|
||||
for marker_frame in track_item_markers:
|
||||
note = track_item_markers[marker_frame]["note"]
|
||||
color = track_item_markers[marker_frame]["color"]
|
||||
name = track_item_markers[marker_frame]["name"]
|
||||
print(f"_ marker data: {marker_frame} | {name} | {color} | {note}")
|
||||
if name == self.pype_marker_name and color == self.pype_marker_color:
|
||||
self.temp_marker_frame = marker_frame
|
||||
return json.loads(note)
|
||||
|
||||
return dict()
|
||||
|
||||
|
||||
def delete_pype_marker(track_item):
|
||||
track_item.DeleteMarkerAtFrame(self.temp_marker_frame)
|
||||
self.temp_marker_frame = None
|
||||
|
||||
|
||||
def create_current_sequence_media_bin(sequence):
|
||||
seq_name = sequence.GetName()
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
|
|
@ -178,7 +365,7 @@ def get_name_with_data(clip_data, presets):
|
|||
})
|
||||
|
||||
|
||||
def create_compound_clip(clip_data, folder, rename=False, **kwargs):
|
||||
def create_compound_clip(clip_data, name, folder):
|
||||
"""
|
||||
Convert timeline object into nested timeline object
|
||||
|
||||
|
|
@ -186,8 +373,7 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
|
|||
clip_data (dict): timeline item object packed into dict
|
||||
with project, timeline (sequence)
|
||||
folder (resolve.MediaPool.Folder): media pool folder object,
|
||||
rename (bool)[optional]: renaming in sequence or not
|
||||
kwargs (optional): additional data needed for rename=True (presets)
|
||||
name (str): name for compound clip
|
||||
|
||||
Returns:
|
||||
resolve.MediaPoolItem: media pool item with compound clip timeline(cct)
|
||||
|
|
@ -199,34 +385,12 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
|
|||
|
||||
# get details of objects
|
||||
clip_item = clip["item"]
|
||||
track = clip_data["track"]
|
||||
|
||||
mp = project.GetMediaPool()
|
||||
|
||||
# get clip attributes
|
||||
clip_attributes = get_clip_attributes(clip_item)
|
||||
print(f"_ clip_attributes: {pformat(clip_attributes)}")
|
||||
|
||||
if rename:
|
||||
presets = kwargs.get("presets")
|
||||
if presets:
|
||||
name, data = get_name_with_data(clip_data, presets)
|
||||
# add hirarchy data to clip attributes
|
||||
clip_attributes.update(data)
|
||||
else:
|
||||
name = "{:0>3}_{:0>4}".format(
|
||||
int(track["index"]), int(clip["index"]))
|
||||
else:
|
||||
# build name
|
||||
clip_name_split = clip_item.GetName().split(".")
|
||||
name = "_".join([
|
||||
track["name"],
|
||||
str(track["index"]),
|
||||
clip_name_split[0],
|
||||
str(clip["index"])]
|
||||
)
|
||||
|
||||
# get metadata
|
||||
mp_item = clip_item.GetMediaPoolItem()
|
||||
mp_props = mp_item.GetClipProperty()
|
||||
|
||||
|
|
@ -283,9 +447,9 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
|
|||
project.SetCurrentTimeline(sq_origin)
|
||||
|
||||
# Add collected metadata and attributes to the comound clip:
|
||||
if mp_item.GetMetadata(self.pype_metadata_key):
|
||||
clip_attributes[self.pype_metadata_key] = mp_item.GetMetadata(
|
||||
self.pype_metadata_key)[self.pype_metadata_key]
|
||||
if mp_item.GetMetadata(self.pype_tag_name):
|
||||
clip_attributes[self.pype_tag_name] = mp_item.GetMetadata(
|
||||
self.pype_tag_name)[self.pype_tag_name]
|
||||
|
||||
# stringify
|
||||
clip_attributes = json.dumps(clip_attributes)
|
||||
|
|
@ -295,7 +459,7 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
|
|||
cct.SetMetadata(k, v)
|
||||
|
||||
# add metadata to cct
|
||||
cct.SetMetadata(self.pype_metadata_key, clip_attributes)
|
||||
cct.SetMetadata(self.pype_tag_name, clip_attributes)
|
||||
|
||||
# reset start timecode of the compound clip
|
||||
cct.SetClipProperty("Start TC", mp_props["Start TC"])
|
||||
|
|
@ -314,7 +478,7 @@ def swap_clips(from_clip, to_clip, to_clip_name, to_in_frame, to_out_frame):
|
|||
It will add take and activate it to the frame range which is inputted
|
||||
|
||||
Args:
|
||||
from_clip (resolve.mediaPoolItem)
|
||||
from_clip (resolve.TimelineItem)
|
||||
to_clip (resolve.mediaPoolItem)
|
||||
to_clip_name (str): name of to_clip
|
||||
to_in_frame (float): cut in frame, usually `GetLeftOffset()`
|
||||
|
|
@ -373,7 +537,7 @@ def get_pype_clip_metadata(clip):
|
|||
mp_item = clip.GetMediaPoolItem()
|
||||
metadata = mp_item.GetMetadata()
|
||||
|
||||
return metadata.get(self.pype_metadata_key)
|
||||
return metadata.get(self.pype_tag_name)
|
||||
|
||||
|
||||
def get_clip_attributes(clip):
|
||||
|
|
@ -424,16 +588,16 @@ def set_project_manager_to_folder_name(folder_name):
|
|||
set_folder = False
|
||||
|
||||
# go back to root folder
|
||||
if self.pm.GotoRootFolder():
|
||||
if self.project_manager.GotoRootFolder():
|
||||
log.info(f"Testing existing folder: {folder_name}")
|
||||
folders = convert_resolve_list_type(
|
||||
self.pm.GetFoldersInCurrentFolder())
|
||||
self.project_manager.GetFoldersInCurrentFolder())
|
||||
log.info(f"Testing existing folders: {folders}")
|
||||
# get me first available folder object
|
||||
# with the same name as in `folder_name` else return False
|
||||
if next((f for f in folders if f in folder_name), False):
|
||||
log.info(f"Found existing folder: {folder_name}")
|
||||
set_folder = self.pm.OpenFolder(folder_name)
|
||||
set_folder = self.project_manager.OpenFolder(folder_name)
|
||||
|
||||
if set_folder:
|
||||
return True
|
||||
|
|
@ -441,11 +605,11 @@ def set_project_manager_to_folder_name(folder_name):
|
|||
# if folder by name is not existent then create one
|
||||
# go back to root folder
|
||||
log.info(f"Folder `{folder_name}` not found and will be created")
|
||||
if self.pm.GotoRootFolder():
|
||||
if self.project_manager.GotoRootFolder():
|
||||
try:
|
||||
# create folder by given name
|
||||
self.pm.CreateFolder(folder_name)
|
||||
self.pm.OpenFolder(folder_name)
|
||||
self.project_manager.CreateFolder(folder_name)
|
||||
self.project_manager.OpenFolder(folder_name)
|
||||
return True
|
||||
except NameError as e:
|
||||
log.error((f"Folder with name `{folder_name}` cannot be created!"
|
||||
|
|
@ -462,3 +626,80 @@ def convert_resolve_list_type(resolve_list):
|
|||
"Input argument should be dict() type")
|
||||
|
||||
return [resolve_list[i] for i in sorted(resolve_list.keys())]
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=True):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
type: string with reformated path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
num_pattern = "(\\[\\d+\\-\\d+\\])"
|
||||
padding_pattern = "(\\d+)(?=-)"
|
||||
if "[" in path:
|
||||
padding = len(re.findall(padding_pattern, path).pop())
|
||||
if padded:
|
||||
path = re.sub(num_pattern, f"%0{padding}d", path)
|
||||
else:
|
||||
path = re.sub(num_pattern, f"%d", path)
|
||||
return path
|
||||
|
||||
|
||||
def create_otio_time_range_from_track_item_data(track_item_data):
|
||||
track_item = track_item_data["clip"]["item"]
|
||||
project = track_item_data["project"]
|
||||
timeline = track_item_data["sequence"]
|
||||
timeline_start = timeline.GetStartFrame()
|
||||
|
||||
frame_start = int(track_item.GetStart() - timeline_start)
|
||||
frame_duration = int(track_item.GetDuration())
|
||||
fps = project.GetSetting("timelineFrameRate")
|
||||
|
||||
return otio_export.create_otio_time_range(
|
||||
frame_start, frame_duration, fps)
|
||||
|
||||
|
||||
def get_otio_clip_instance_data(otio_timeline, track_item_data):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
Args:
|
||||
track_item_data (dict): track_item_data from list returned by
|
||||
resolve.get_current_track_items()
|
||||
otio_timeline (otio.schema.Timeline): otio object
|
||||
|
||||
Returns:
|
||||
dict: otio clip object
|
||||
|
||||
"""
|
||||
|
||||
track_item = track_item_data["clip"]["item"]
|
||||
track_name = track_item_data["track"]["name"]
|
||||
timeline_range = create_otio_time_range_from_track_item_data(
|
||||
track_item_data)
|
||||
|
||||
for otio_clip in otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if track_name not in track_name:
|
||||
continue
|
||||
if otio_clip.name not in track_item.GetName():
|
||||
continue
|
||||
if pype.lib.is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if self.pype_marker_name in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -4,6 +4,17 @@ QWidget {
|
|||
font-size: 13px;
|
||||
}
|
||||
|
||||
QComboBox {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
QComboBox QAbstractItemView
|
||||
{
|
||||
color: white;
|
||||
}
|
||||
|
||||
QPushButton {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
|
|
|
|||
324
pype/hosts/resolve/otio/davinci_export.py
Normal file
324
pype/hosts/resolve/otio/davinci_export.py
Normal file
|
|
@ -0,0 +1,324 @@
|
|||
""" compatibility OpenTimelineIO 0.12.0 and older
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
import clique
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.track_types = {
|
||||
"video": otio.schema.TrackKind.Video,
|
||||
"audio": otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
return otio.opentime.RationalTime(
|
||||
float(frame),
|
||||
float(fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_time_range(start_frame, frame_duration, fps):
|
||||
return otio.opentime.TimeRange(
|
||||
start_time=create_otio_rational_time(start_frame, fps),
|
||||
duration=create_otio_rational_time(frame_duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_reference(media_pool_item):
|
||||
metadata = _get_metadata_media_pool_item(media_pool_item)
|
||||
mp_clip_property = media_pool_item.GetClipProperty()
|
||||
path = mp_clip_property["File Path"]
|
||||
reformat_path = utils.get_reformated_path(path, padded=True)
|
||||
padding = utils.get_padding_from_path(path)
|
||||
|
||||
if padding:
|
||||
metadata.update({
|
||||
"isSequence": True,
|
||||
"padding": padding
|
||||
})
|
||||
|
||||
# get clip property regarding to type
|
||||
mp_clip_property = media_pool_item.GetClipProperty()
|
||||
fps = float(mp_clip_property["FPS"])
|
||||
if mp_clip_property["Type"] == "Video":
|
||||
frame_start = int(mp_clip_property["Start"])
|
||||
frame_duration = int(mp_clip_property["Frames"])
|
||||
else:
|
||||
audio_duration = str(mp_clip_property["Duration"])
|
||||
frame_start = 0
|
||||
frame_duration = int(utils.timecode_to_frames(
|
||||
audio_duration, float(fps)))
|
||||
|
||||
otio_ex_ref_item = None
|
||||
|
||||
if padding:
|
||||
# if it is file sequence try to create `ImageSequenceReference`
|
||||
# the OTIO might not be compatible so return nothing and do it old way
|
||||
try:
|
||||
dirname, filename = os.path.split(path)
|
||||
collection = clique.parse(filename, '{head}[{ranges}]{tail}')
|
||||
padding_num = len(re.findall("(\\d+)(?=-)", filename).pop())
|
||||
otio_ex_ref_item = otio.schema.ImageSequenceReference(
|
||||
target_url_base=dirname + os.sep,
|
||||
name_prefix=collection.format("{head}"),
|
||||
name_suffix=collection.format("{tail}"),
|
||||
start_frame=frame_start,
|
||||
frame_zero_padding=padding_num,
|
||||
rate=fps,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if not otio_ex_ref_item:
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
otio_ex_ref_item = otio.schema.ExternalReference(
|
||||
target_url=reformat_path,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
# add metadata to otio item
|
||||
add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata)
|
||||
|
||||
return otio_ex_ref_item
|
||||
|
||||
|
||||
def create_otio_markers(track_item, fps):
|
||||
track_item_markers = track_item.GetMarkers()
|
||||
markers = []
|
||||
for marker_frame in track_item_markers:
|
||||
note = track_item_markers[marker_frame]["note"]
|
||||
if "{" in note and "}" in note:
|
||||
metadata = json.loads(note)
|
||||
else:
|
||||
metadata = {"note": note}
|
||||
markers.append(
|
||||
otio.schema.Marker(
|
||||
name=track_item_markers[marker_frame]["name"],
|
||||
marked_range=create_otio_time_range(
|
||||
marker_frame,
|
||||
track_item_markers[marker_frame]["duration"],
|
||||
fps
|
||||
),
|
||||
color=track_item_markers[marker_frame]["color"].upper(),
|
||||
metadata=metadata
|
||||
)
|
||||
)
|
||||
return markers
|
||||
|
||||
|
||||
def create_otio_clip(track_item):
|
||||
media_pool_item = track_item.GetMediaPoolItem()
|
||||
mp_clip_property = media_pool_item.GetClipProperty()
|
||||
|
||||
if not self.project_fps:
|
||||
fps = mp_clip_property["FPS"]
|
||||
else:
|
||||
fps = self.project_fps
|
||||
|
||||
name = track_item.GetName()
|
||||
|
||||
media_reference = create_otio_reference(media_pool_item)
|
||||
source_range = create_otio_time_range(
|
||||
int(track_item.GetLeftOffset()),
|
||||
int(track_item.GetDuration()),
|
||||
fps
|
||||
)
|
||||
|
||||
if mp_clip_property["Type"] == "Audio":
|
||||
return_clips = list()
|
||||
audio_chanels = mp_clip_property["Audio Ch"]
|
||||
for channel in range(0, int(audio_chanels)):
|
||||
clip = otio.schema.Clip(
|
||||
name=f"{name}_{channel}",
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
for marker in create_otio_markers(track_item, fps):
|
||||
clip.markers.append(marker)
|
||||
return_clips.append(clip)
|
||||
return return_clips
|
||||
else:
|
||||
clip = otio.schema.Clip(
|
||||
name=name,
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
for marker in create_otio_markers(track_item, fps):
|
||||
clip.markers.append(marker)
|
||||
|
||||
return clip
|
||||
|
||||
|
||||
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
||||
return otio.schema.Gap(
|
||||
source_range=create_otio_time_range(
|
||||
gap_start,
|
||||
(clip_start - tl_start_frame) - gap_start,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _create_otio_timeline(project, timeline, fps):
|
||||
metadata = _get_timeline_metadata(project, timeline)
|
||||
start_time = create_otio_rational_time(
|
||||
timeline.GetStartFrame(), fps)
|
||||
otio_timeline = otio.schema.Timeline(
|
||||
name=timeline.GetName(),
|
||||
global_start_time=start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def _get_timeline_metadata(project, timeline):
|
||||
media_pool = project.GetMediaPool()
|
||||
root_folder = media_pool.GetRootFolder()
|
||||
ls_folder = root_folder.GetClipList()
|
||||
timeline = project.GetCurrentTimeline()
|
||||
timeline_name = timeline.GetName()
|
||||
for tl in ls_folder:
|
||||
if tl.GetName() not in timeline_name:
|
||||
continue
|
||||
return _get_metadata_media_pool_item(tl)
|
||||
|
||||
|
||||
def _get_metadata_media_pool_item(media_pool_item):
|
||||
data = dict()
|
||||
data.update({k: v for k, v in media_pool_item.GetMetadata().items()})
|
||||
property = media_pool_item.GetClipProperty() or {}
|
||||
for name, value in property.items():
|
||||
if "Resolution" in name and "" != value:
|
||||
width, height = value.split("x")
|
||||
data.update({
|
||||
"width": int(width),
|
||||
"height": int(height)
|
||||
})
|
||||
if "PAR" in name and "" != value:
|
||||
try:
|
||||
data.update({"pixelAspect": float(value)})
|
||||
except ValueError:
|
||||
if "Square" in value:
|
||||
data.update({"pixelAspect": float(1)})
|
||||
else:
|
||||
data.update({"pixelAspect": float(1)})
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=self.track_types[track_type]
|
||||
)
|
||||
|
||||
|
||||
def add_otio_gap(clip_start, otio_track, track_item, timeline):
|
||||
# if gap between track start and clip start
|
||||
if clip_start > otio_track.available_range().duration.value:
|
||||
# create gap and add it to track
|
||||
otio_track.append(
|
||||
create_otio_gap(
|
||||
otio_track.available_range().duration.value,
|
||||
track_item.GetStart(),
|
||||
timeline.GetStartFrame(),
|
||||
self.project_fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def add_otio_metadata(otio_item, media_pool_item, **kwargs):
|
||||
mp_metadata = media_pool_item.GetMetadata()
|
||||
# add additional metadata from kwargs
|
||||
if kwargs:
|
||||
mp_metadata.update(kwargs)
|
||||
|
||||
# add metadata to otio item metadata
|
||||
for key, value in mp_metadata.items():
|
||||
otio_item.metadata.update({key: value})
|
||||
|
||||
|
||||
def create_otio_timeline(resolve_project):
|
||||
|
||||
# get current timeline
|
||||
self.project_fps = resolve_project.GetSetting("timelineFrameRate")
|
||||
timeline = resolve_project.GetCurrentTimeline()
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline(
|
||||
resolve_project, timeline, self.project_fps)
|
||||
|
||||
# loop all defined track types
|
||||
for track_type in list(self.track_types.keys()):
|
||||
# get total track count
|
||||
track_count = timeline.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks by track indexes
|
||||
for track_index in range(1, int(track_count) + 1):
|
||||
# get current track name
|
||||
track_name = timeline.GetTrackName(track_type, track_index)
|
||||
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
track_type, track_name)
|
||||
|
||||
# get all track items in current track
|
||||
current_track_items = timeline.GetItemListInTrack(
|
||||
track_type, track_index)
|
||||
|
||||
# loop available track items in current track items
|
||||
for track_item in current_track_items:
|
||||
# skip offline track items
|
||||
if track_item.GetMediaPoolItem() is None:
|
||||
continue
|
||||
|
||||
# calculate real clip start
|
||||
clip_start = track_item.GetStart() - timeline.GetStartFrame()
|
||||
|
||||
add_otio_gap(
|
||||
clip_start, otio_track, track_item, timeline)
|
||||
|
||||
# create otio clip and add it to track
|
||||
otio_clip = create_otio_clip(track_item)
|
||||
|
||||
if not isinstance(otio_clip, list):
|
||||
otio_track.append(otio_clip)
|
||||
else:
|
||||
for index, clip in enumerate(otio_clip):
|
||||
if index == 0:
|
||||
otio_track.append(clip)
|
||||
else:
|
||||
# add previouse otio track to timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
track_type, track_name)
|
||||
add_otio_gap(
|
||||
clip_start, otio_track,
|
||||
track_item, timeline)
|
||||
otio_track.append(clip)
|
||||
|
||||
# add track to otio timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def write_to_file(otio_timeline, path):
|
||||
otio.adapters.write_to_file(otio_timeline, path)
|
||||
108
pype/hosts/resolve/otio/davinci_import.py
Normal file
108
pype/hosts/resolve/otio/davinci_import.py
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
import sys
|
||||
import json
|
||||
import DaVinciResolveScript
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.resolve = DaVinciResolveScript.scriptapp('Resolve')
|
||||
self.fusion = DaVinciResolveScript.scriptapp('Fusion')
|
||||
self.project_manager = self.resolve.GetProjectManager()
|
||||
self.current_project = self.project_manager.GetCurrentProject()
|
||||
self.media_pool = self.current_project.GetMediaPool()
|
||||
self.track_types = {
|
||||
"video": otio.schema.TrackKind.Video,
|
||||
"audio": otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
|
||||
|
||||
def build_timeline(otio_timeline):
|
||||
# TODO: build timeline in mediapool `otioImport` folder
|
||||
# TODO: loop otio tracks and build them in the new timeline
|
||||
for clip in otio_timeline.each_clip():
|
||||
# TODO: create track item
|
||||
print(clip.name)
|
||||
print(clip.parent().name)
|
||||
print(clip.range_in_parent())
|
||||
|
||||
|
||||
def _build_track(otio_track):
|
||||
# TODO: _build_track
|
||||
pass
|
||||
|
||||
|
||||
def _build_media_pool_item(otio_media_reference):
|
||||
# TODO: _build_media_pool_item
|
||||
pass
|
||||
|
||||
|
||||
def _build_track_item(otio_clip):
|
||||
# TODO: _build_track_item
|
||||
pass
|
||||
|
||||
|
||||
def _build_gap(otio_clip):
|
||||
# TODO: _build_gap
|
||||
pass
|
||||
|
||||
|
||||
def _build_marker(track_item, otio_marker):
|
||||
frame_start = otio_marker.marked_range.start_time.value
|
||||
frame_duration = otio_marker.marked_range.duration.value
|
||||
|
||||
# marker attributes
|
||||
frameId = (frame_start / 10) * 10
|
||||
color = otio_marker.color
|
||||
name = otio_marker.name
|
||||
note = otio_marker.metadata.get("note") or json.dumps(otio_marker.metadata)
|
||||
duration = (frame_duration / 10) * 10
|
||||
|
||||
track_item.AddMarker(
|
||||
frameId,
|
||||
color,
|
||||
name,
|
||||
note,
|
||||
duration
|
||||
)
|
||||
|
||||
|
||||
def _build_media_pool_folder(name):
|
||||
"""
|
||||
Returns folder with input name and sets it as current folder.
|
||||
|
||||
It will create new media bin if none is found in root media bin
|
||||
|
||||
Args:
|
||||
name (str): name of bin
|
||||
|
||||
Returns:
|
||||
resolve.api.MediaPool.Folder: description
|
||||
|
||||
"""
|
||||
|
||||
root_folder = self.media_pool.GetRootFolder()
|
||||
sub_folders = root_folder.GetSubFolderList()
|
||||
testing_names = list()
|
||||
|
||||
for subfolder in sub_folders:
|
||||
subf_name = subfolder.GetName()
|
||||
if name in subf_name:
|
||||
testing_names.append(subfolder)
|
||||
else:
|
||||
testing_names.append(False)
|
||||
|
||||
matching = next((f for f in testing_names if f is not False), None)
|
||||
|
||||
if not matching:
|
||||
new_folder = self.media_pool.AddSubFolder(root_folder, name)
|
||||
self.media_pool.SetCurrentFolder(new_folder)
|
||||
else:
|
||||
self.media_pool.SetCurrentFolder(matching)
|
||||
|
||||
return self.media_pool.GetCurrentFolder()
|
||||
|
||||
|
||||
def read_from_file(otio_file):
|
||||
otio_timeline = otio.adapters.read_from_file(otio_file)
|
||||
build_timeline(otio_timeline)
|
||||
63
pype/hosts/resolve/otio/utils.py
Normal file
63
pype/hosts/resolve/otio/utils.py
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
import re
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
rt = otio.opentime.from_timecode(timecode, 24)
|
||||
return int(otio.opentime.to_frames(rt))
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_timecode(rt)
|
||||
|
||||
|
||||
def frames_to_secons(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=True):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
type: string with reformated path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
num_pattern = "(\\[\\d+\\-\\d+\\])"
|
||||
padding_pattern = "(\\d+)(?=-)"
|
||||
if "[" in path:
|
||||
padding = len(re.findall(padding_pattern, path).pop())
|
||||
if padded:
|
||||
path = re.sub(num_pattern, f"%0{padding}d", path)
|
||||
else:
|
||||
path = re.sub(num_pattern, f"%d", path)
|
||||
return path
|
||||
|
||||
|
||||
def get_padding_from_path(path):
|
||||
"""
|
||||
Return padding number from DaVinci Resolve sequence path style
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_path("plate.[0001-1008].exr") > 4
|
||||
|
||||
"""
|
||||
padding_pattern = "(\\d+)(?=-)"
|
||||
if "[" in path:
|
||||
return len(re.findall(padding_pattern, path).pop())
|
||||
|
||||
return None
|
||||
|
|
@ -3,13 +3,17 @@ Basic avalon integration
|
|||
"""
|
||||
import os
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
from avalon.tools import workfiles
|
||||
from avalon import api as avalon
|
||||
from avalon import schema
|
||||
from avalon.pipeline import AVALON_CONTAINER_ID
|
||||
from pyblish import api as pyblish
|
||||
import pype
|
||||
from pype.api import Logger
|
||||
from . import lib
|
||||
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
AVALON_CONFIG = os.environ["AVALON_CONFIG"]
|
||||
|
||||
|
|
@ -57,6 +61,9 @@ def install():
|
|||
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
get_resolve_module()
|
||||
|
||||
|
||||
|
|
@ -79,30 +86,50 @@ def uninstall():
|
|||
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
def containerise(obj,
|
||||
|
||||
def containerise(track_item,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
loader=None,
|
||||
data=None):
|
||||
"""Bundle Resolve's object into an assembly and imprint it with metadata
|
||||
"""Bundle Hiero's object into an assembly and imprint it with metadata
|
||||
|
||||
Containerisation enables a tracking of version, author and origin
|
||||
for loaded assets.
|
||||
|
||||
Arguments:
|
||||
obj (obj): Resolve's object to imprint as container
|
||||
track_item (hiero.core.TrackItem): object to imprint as container
|
||||
name (str): Name of resulting assembly
|
||||
namespace (str): Namespace under which to host container
|
||||
context (dict): Asset information
|
||||
loader (str, optional): Name of node used to produce this container.
|
||||
|
||||
Returns:
|
||||
obj (obj): containerised object
|
||||
track_item (hiero.core.TrackItem): containerised object
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
data_imprint = OrderedDict({
|
||||
"schema": "avalon-core:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": str(name),
|
||||
"namespace": str(namespace),
|
||||
"loader": str(loader),
|
||||
"representation": str(context["representation"]["_id"]),
|
||||
})
|
||||
|
||||
if data:
|
||||
for k, v in data.items():
|
||||
data_imprint.update({k: v})
|
||||
|
||||
print("_ data_imprint: {}".format(data_imprint))
|
||||
lib.set_track_item_pype_tag(track_item, data_imprint)
|
||||
|
||||
return track_item
|
||||
|
||||
|
||||
def ls():
|
||||
|
|
@ -115,20 +142,77 @@ def ls():
|
|||
See the `container.json` schema for details on how it should look,
|
||||
and the Maya equivalent, which is in `avalon.maya.pipeline`
|
||||
"""
|
||||
pass
|
||||
|
||||
# get all track items from current timeline
|
||||
all_track_items = lib.get_current_track_items(filter=False)
|
||||
|
||||
for track_item_data in all_track_items:
|
||||
track_item = track_item_data["clip"]["item"]
|
||||
container = parse_container(track_item)
|
||||
if container:
|
||||
yield container
|
||||
|
||||
|
||||
def parse_container(container):
|
||||
"""Return the container node's full container data.
|
||||
def parse_container(track_item, validate=True):
|
||||
"""Return container data from track_item's pype tag.
|
||||
|
||||
Args:
|
||||
container (str): A container node name.
|
||||
track_item (hiero.core.TrackItem): A containerised track item.
|
||||
validate (bool)[optional]: validating with avalon scheme
|
||||
|
||||
Returns:
|
||||
dict: The container schema data for this container node.
|
||||
dict: The container schema data for input containerized track item.
|
||||
|
||||
"""
|
||||
pass
|
||||
# convert tag metadata to normal keys names
|
||||
data = lib.get_track_item_pype_tag(track_item)
|
||||
|
||||
if validate and data and data.get("schema"):
|
||||
schema.validate(data)
|
||||
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
# If not all required data return the empty container
|
||||
required = ['schema', 'id', 'name',
|
||||
'namespace', 'loader', 'representation']
|
||||
|
||||
if not all(key in data for key in required):
|
||||
return
|
||||
|
||||
container = {key: data[key] for key in required}
|
||||
|
||||
container["objectName"] = track_item.name()
|
||||
|
||||
# Store reference to the node object
|
||||
container["_track_item"] = track_item
|
||||
|
||||
return container
|
||||
|
||||
|
||||
def update_container(track_item, data=None):
|
||||
"""Update container data to input track_item's pype tag.
|
||||
|
||||
Args:
|
||||
track_item (hiero.core.TrackItem): A containerised track item.
|
||||
data (dict)[optional]: dictionery with data to be updated
|
||||
|
||||
Returns:
|
||||
bool: True if container was updated correctly
|
||||
|
||||
"""
|
||||
data = data or dict()
|
||||
|
||||
container = lib.get_track_item_pype_tag(track_item)
|
||||
|
||||
for _key, _value in container.items():
|
||||
try:
|
||||
container[_key] = data[_key]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
log.info("Updating container: `{}`".format(track_item))
|
||||
return bool(lib.set_track_item_pype_tag(track_item, container))
|
||||
|
||||
|
||||
def launch_workfiles_app(*args):
|
||||
|
|
@ -163,3 +247,18 @@ def reset_selection():
|
|||
"""Deselect all selected nodes
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node passthrough states on instance toggles."""
|
||||
|
||||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
from pype.hosts.resolve import (
|
||||
set_publish_attribute
|
||||
)
|
||||
|
||||
# Whether instances should be passthrough based on new value
|
||||
track_item = instance.data["item"]
|
||||
set_publish_attribute(track_item, new_value)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import re
|
|||
from avalon import api
|
||||
from pype.hosts import resolve
|
||||
from avalon.vendor import qargparse
|
||||
from pype.api import config
|
||||
from . import lib
|
||||
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
||||
|
|
@ -12,7 +12,7 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
# output items
|
||||
items = dict()
|
||||
|
||||
def __init__(self, name, info, presets, parent=None):
|
||||
def __init__(self, name, info, ui_inputs, parent=None):
|
||||
super(CreatorWidget, self).__init__(parent)
|
||||
|
||||
self.setObjectName(name)
|
||||
|
|
@ -25,6 +25,7 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
self.setWindowTitle(name or "Pype Creator Input")
|
||||
self.resize(500, 700)
|
||||
|
||||
# Where inputs and labels are set
|
||||
self.content_widget = [QtWidgets.QWidget(self)]
|
||||
|
|
@ -35,14 +36,25 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
# first add widget tag line
|
||||
top_layout.addWidget(QtWidgets.QLabel(info))
|
||||
|
||||
top_layout.addWidget(Spacer(5, self))
|
||||
|
||||
# main dynamic layout
|
||||
self.content_widget.append(QtWidgets.QWidget(self))
|
||||
content_layout = QtWidgets.QFormLayout(self.content_widget[-1])
|
||||
self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True)
|
||||
self.scroll_area.setVerticalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAsNeeded)
|
||||
self.scroll_area.setVerticalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAlwaysOn)
|
||||
self.scroll_area.setHorizontalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAlwaysOff)
|
||||
self.scroll_area.setWidgetResizable(True)
|
||||
|
||||
self.content_widget.append(self.scroll_area)
|
||||
|
||||
scroll_widget = QtWidgets.QWidget(self)
|
||||
in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget)
|
||||
self.content_layout = [in_scroll_area]
|
||||
|
||||
# add preset data into input widget layout
|
||||
self.items = self.add_presets_to_layout(content_layout, presets)
|
||||
self.items = self.populate_widgets(ui_inputs)
|
||||
self.scroll_area.setWidget(scroll_widget)
|
||||
|
||||
# Confirmation buttons
|
||||
btns_widget = QtWidgets.QWidget(self)
|
||||
|
|
@ -79,20 +91,33 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
self.result = None
|
||||
self.close()
|
||||
|
||||
def value(self, data):
|
||||
def value(self, data, new_data=None):
|
||||
new_data = new_data or dict()
|
||||
for k, v in data.items():
|
||||
if isinstance(v, dict):
|
||||
print(f"nested: {k}")
|
||||
data[k] = self.value(v)
|
||||
elif getattr(v, "value", None):
|
||||
print(f"normal int: {k}")
|
||||
result = v.value()
|
||||
data[k] = result()
|
||||
else:
|
||||
print(f"normal text: {k}")
|
||||
result = v.text()
|
||||
data[k] = result()
|
||||
return data
|
||||
new_data[k] = {
|
||||
"target": None,
|
||||
"value": None
|
||||
}
|
||||
if v["type"] == "dict":
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = self.value(v["value"])
|
||||
if v["type"] == "section":
|
||||
new_data.pop(k)
|
||||
new_data = self.value(v["value"], new_data)
|
||||
elif getattr(v["value"], "currentText", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].currentText()
|
||||
elif getattr(v["value"], "isChecked", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].isChecked()
|
||||
elif getattr(v["value"], "value", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].value()
|
||||
elif getattr(v["value"], "text", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].text()
|
||||
|
||||
return new_data
|
||||
|
||||
def camel_case_split(self, text):
|
||||
matches = re.finditer(
|
||||
|
|
@ -124,41 +149,115 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
for func, val in kwargs.items():
|
||||
if getattr(item, func):
|
||||
func_attr = getattr(item, func)
|
||||
func_attr(val)
|
||||
if isinstance(val, tuple):
|
||||
func_attr(*val)
|
||||
else:
|
||||
func_attr(val)
|
||||
|
||||
# add to layout
|
||||
layout.addRow(label, item)
|
||||
|
||||
return item
|
||||
|
||||
def add_presets_to_layout(self, content_layout, data):
|
||||
def populate_widgets(self, data, content_layout=None):
|
||||
"""
|
||||
Populate widget from input dict.
|
||||
|
||||
Each plugin has its own set of widget rows defined in dictionary
|
||||
each row values should have following keys: `type`, `target`,
|
||||
`label`, `order`, `value` and optionally also `toolTip`.
|
||||
|
||||
Args:
|
||||
data (dict): widget rows or organized groups defined
|
||||
by types `dict` or `section`
|
||||
content_layout (QtWidgets.QFormLayout)[optional]: used when nesting
|
||||
|
||||
Returns:
|
||||
dict: redefined data dict updated with created widgets
|
||||
|
||||
"""
|
||||
|
||||
content_layout = content_layout or self.content_layout[-1]
|
||||
# fix order of process by defined order value
|
||||
ordered_keys = list(data.keys())
|
||||
for k, v in data.items():
|
||||
if isinstance(v, dict):
|
||||
try:
|
||||
# try removing a key from index which should
|
||||
# be filled with new
|
||||
ordered_keys.pop(v["order"])
|
||||
except IndexError:
|
||||
pass
|
||||
# add key into correct order
|
||||
ordered_keys.insert(v["order"], k)
|
||||
|
||||
# process ordered
|
||||
for k in ordered_keys:
|
||||
v = data[k]
|
||||
tool_tip = v.get("toolTip", "")
|
||||
if v["type"] == "dict":
|
||||
# adding spacer between sections
|
||||
self.content_widget.append(QtWidgets.QWidget(self))
|
||||
devider = QtWidgets.QVBoxLayout(self.content_widget[-1])
|
||||
devider.addWidget(Spacer(5, self))
|
||||
devider.setObjectName("Devider")
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
self.content_layout[-1].setObjectName("sectionHeadline")
|
||||
|
||||
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
|
||||
headline.addWidget(Spacer(20, self))
|
||||
headline.addWidget(QtWidgets.QLabel(v["label"]))
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_widget.append(QtWidgets.QWidget(self))
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
self.content_layout[-1].setObjectName("sectionContent")
|
||||
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_widget[-1])
|
||||
self.content_layout[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
|
||||
# add nested key as label
|
||||
self.create_row(nested_content_layout, "QLabel", k)
|
||||
data[k] = self.add_presets_to_layout(nested_content_layout, v)
|
||||
elif isinstance(v, str):
|
||||
print(f"layout.str: {k}")
|
||||
print(f"content_layout: {content_layout}")
|
||||
data[k] = self.create_row(
|
||||
content_layout, "QLineEdit", k, setText=v)
|
||||
elif isinstance(v, int):
|
||||
print(f"layout.int: {k}")
|
||||
print(f"content_layout: {content_layout}")
|
||||
data[k] = self.create_row(
|
||||
content_layout, "QSpinBox", k, setValue=v)
|
||||
data[k]["value"] = self.populate_widgets(
|
||||
v["value"], nested_content_layout)
|
||||
|
||||
if v["type"] == "section":
|
||||
# adding spacer between sections
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
self.content_layout[-1].setObjectName("sectionHeadline")
|
||||
|
||||
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
|
||||
headline.addWidget(Spacer(20, self))
|
||||
headline.addWidget(QtWidgets.QLabel(v["label"]))
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
self.content_layout[-1].setObjectName("sectionContent")
|
||||
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_layout[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
|
||||
# add nested key as label
|
||||
data[k]["value"] = self.populate_widgets(
|
||||
v["value"], nested_content_layout)
|
||||
|
||||
elif v["type"] == "QLineEdit":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QLineEdit", v["label"],
|
||||
setText=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QComboBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QComboBox", v["label"],
|
||||
addItems=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QCheckBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QCheckBox", v["label"],
|
||||
setChecked=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QSpinBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QSpinBox", v["label"],
|
||||
setRange=(0, 99999),
|
||||
setValue=v["value"],
|
||||
setToolTip=tool_tip)
|
||||
return data
|
||||
|
||||
|
||||
|
|
@ -179,20 +278,6 @@ class Spacer(QtWidgets.QWidget):
|
|||
self.setLayout(layout)
|
||||
|
||||
|
||||
def get_reference_node_parents(ref):
|
||||
"""Return all parent reference nodes of reference node
|
||||
|
||||
Args:
|
||||
ref (str): reference node.
|
||||
|
||||
Returns:
|
||||
list: The upstream parent reference nodes.
|
||||
|
||||
"""
|
||||
parents = []
|
||||
return parents
|
||||
|
||||
|
||||
class SequenceLoader(api.Loader):
|
||||
"""A basic SequenceLoader for Resolve
|
||||
|
||||
|
|
@ -258,8 +343,12 @@ class Creator(api.Creator):
|
|||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
self.presets = config.get_presets()['plugins']["resolve"][
|
||||
"create"].get(self.__class__.__name__, {})
|
||||
from pype.api import get_current_project_settings
|
||||
resolve_p_settings = get_current_project_settings().get("resolve")
|
||||
self.presets = dict()
|
||||
if resolve_p_settings:
|
||||
self.presets = resolve_p_settings["create"].get(
|
||||
self.__class__.__name__, {})
|
||||
|
||||
# adding basic current context resolve objects
|
||||
self.project = resolve.get_current_project()
|
||||
|
|
@ -271,3 +360,310 @@ class Creator(api.Creator):
|
|||
self.selected = resolve.get_current_track_items(filter=False)
|
||||
|
||||
self.widget = CreatorWidget
|
||||
|
||||
|
||||
class PublishClip:
|
||||
"""
|
||||
Convert a track item to publishable instance
|
||||
|
||||
Args:
|
||||
track_item (hiero.core.TrackItem): hiero track item object
|
||||
kwargs (optional): additional data needed for rename=True (presets)
|
||||
|
||||
Returns:
|
||||
hiero.core.TrackItem: hiero track item object with pype tag
|
||||
"""
|
||||
vertical_clip_match = dict()
|
||||
tag_data = dict()
|
||||
types = {
|
||||
"shot": "shot",
|
||||
"folder": "folder",
|
||||
"episode": "episode",
|
||||
"sequence": "sequence",
|
||||
"track": "sequence",
|
||||
}
|
||||
|
||||
# parents search patern
|
||||
parents_search_patern = r"\{([a-z]*?)\}"
|
||||
|
||||
# default templates for non-ui use
|
||||
rename_default = False
|
||||
hierarchy_default = "{_folder_}/{_sequence_}/{_track_}"
|
||||
clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}"
|
||||
subset_name_default = "<track_name>"
|
||||
review_track_default = "< none >"
|
||||
subset_family_default = "plate"
|
||||
count_from_default = 10
|
||||
count_steps_default = 10
|
||||
vertical_sync_default = False
|
||||
driving_layer_default = ""
|
||||
|
||||
def __init__(self, cls, track_item_data, **kwargs):
|
||||
# populate input cls attribute onto self.[attr]
|
||||
self.__dict__.update(cls.__dict__)
|
||||
|
||||
# get main parent objects
|
||||
self.track_item_data = track_item_data
|
||||
self.track_item = track_item_data["clip"]["item"]
|
||||
sequence_name = track_item_data["sequence"].GetName()
|
||||
self.sequence_name = str(sequence_name).replace(" ", "_")
|
||||
|
||||
# track item (clip) main attributes
|
||||
self.ti_name = self.track_item.GetName()
|
||||
self.ti_index = int(track_item_data["clip"]["index"])
|
||||
|
||||
# get track name and index
|
||||
track_name = track_item_data["track"]["name"]
|
||||
self.track_name = str(track_name).replace(" ", "_")
|
||||
self.track_index = int(track_item_data["track"]["index"])
|
||||
|
||||
# adding tag.family into tag
|
||||
if kwargs.get("avalon"):
|
||||
self.tag_data.update(kwargs["avalon"])
|
||||
|
||||
# adding ui inputs if any
|
||||
self.ui_inputs = kwargs.get("ui_inputs", {})
|
||||
|
||||
# adding media pool folder if any
|
||||
self.mp_folder = kwargs.get("mp_folder")
|
||||
|
||||
# populate default data before we get other attributes
|
||||
self._populate_track_item_default_data()
|
||||
|
||||
# use all populated default data to create all important attributes
|
||||
self._populate_attributes()
|
||||
|
||||
# create parents with correct types
|
||||
self._create_parents()
|
||||
|
||||
def convert(self):
|
||||
# solve track item data and add them to tag data
|
||||
self._convert_to_tag_data()
|
||||
|
||||
# if track name is in review track name and also if driving track name
|
||||
# is not in review track name: skip tag creation
|
||||
if (self.track_name in self.review_layer) and (
|
||||
self.driving_layer not in self.review_layer):
|
||||
return
|
||||
|
||||
# deal with clip name
|
||||
new_name = self.tag_data.pop("newClipName")
|
||||
|
||||
if self.rename:
|
||||
self.tag_data["asset"] = new_name
|
||||
else:
|
||||
self.tag_data["asset"] = self.ti_name
|
||||
|
||||
if not lib.pype_marker_workflow:
|
||||
# create compound clip workflow
|
||||
lib.create_compound_clip(
|
||||
self.track_item_data,
|
||||
self.tag_data["asset"],
|
||||
self.mp_folder
|
||||
)
|
||||
|
||||
# add track_item_data selection to tag
|
||||
self.tag_data.update({
|
||||
"track_data": self.track_item_data["track"]
|
||||
})
|
||||
|
||||
# create pype tag on track_item and add data
|
||||
lib.imprint(self.track_item, self.tag_data)
|
||||
|
||||
return self.track_item
|
||||
|
||||
def _populate_track_item_default_data(self):
|
||||
""" Populate default formating data from track item. """
|
||||
|
||||
self.track_item_default_data = {
|
||||
"_folder_": "shots",
|
||||
"_sequence_": self.sequence_name,
|
||||
"_track_": self.track_name,
|
||||
"_clip_": self.ti_name,
|
||||
"_trackIndex_": self.track_index,
|
||||
"_clipIndex_": self.ti_index
|
||||
}
|
||||
|
||||
def _populate_attributes(self):
|
||||
""" Populate main object attributes. """
|
||||
# track item frame range and parent track name for vertical sync check
|
||||
self.clip_in = int(self.track_item.GetStart())
|
||||
self.clip_out = int(self.track_item.GetEnd())
|
||||
|
||||
# define ui inputs if non gui mode was used
|
||||
self.shot_num = self.ti_index
|
||||
print(
|
||||
"____ self.shot_num: {}".format(self.shot_num))
|
||||
|
||||
# ui_inputs data or default values if gui was not used
|
||||
self.rename = self.ui_inputs.get(
|
||||
"clipRename", {}).get("value") or self.rename_default
|
||||
self.clip_name = self.ui_inputs.get(
|
||||
"clipName", {}).get("value") or self.clip_name_default
|
||||
self.hierarchy = self.ui_inputs.get(
|
||||
"hierarchy", {}).get("value") or self.hierarchy_default
|
||||
self.hierarchy_data = self.ui_inputs.get(
|
||||
"hierarchyData", {}).get("value") or \
|
||||
self.track_item_default_data.copy()
|
||||
self.count_from = self.ui_inputs.get(
|
||||
"countFrom", {}).get("value") or self.count_from_default
|
||||
self.count_steps = self.ui_inputs.get(
|
||||
"countSteps", {}).get("value") or self.count_steps_default
|
||||
self.subset_name = self.ui_inputs.get(
|
||||
"subsetName", {}).get("value") or self.subset_name_default
|
||||
self.subset_family = self.ui_inputs.get(
|
||||
"subsetFamily", {}).get("value") or self.subset_family_default
|
||||
self.vertical_sync = self.ui_inputs.get(
|
||||
"vSyncOn", {}).get("value") or self.vertical_sync_default
|
||||
self.driving_layer = self.ui_inputs.get(
|
||||
"vSyncTrack", {}).get("value") or self.driving_layer_default
|
||||
self.review_track = self.ui_inputs.get(
|
||||
"reviewTrack", {}).get("value") or self.review_track_default
|
||||
|
||||
# build subset name from layer name
|
||||
if self.subset_name == "<track_name>":
|
||||
self.subset_name = self.track_name
|
||||
|
||||
# create subset for publishing
|
||||
self.subset = self.subset_family + self.subset_name.capitalize()
|
||||
|
||||
def _replace_hash_to_expression(self, name, text):
|
||||
""" Replace hash with number in correct padding. """
|
||||
_spl = text.split("#")
|
||||
_len = (len(_spl) - 1)
|
||||
_repl = "{{{0}:0>{1}}}".format(name, _len)
|
||||
new_text = text.replace(("#" * _len), _repl)
|
||||
return new_text
|
||||
|
||||
def _convert_to_tag_data(self):
|
||||
""" Convert internal data to tag data.
|
||||
|
||||
Populating the tag data into internal variable self.tag_data
|
||||
"""
|
||||
# define vertical sync attributes
|
||||
master_layer = True
|
||||
self.review_layer = ""
|
||||
if self.vertical_sync:
|
||||
# check if track name is not in driving layer
|
||||
if self.track_name not in self.driving_layer:
|
||||
# if it is not then define vertical sync as None
|
||||
master_layer = False
|
||||
|
||||
# increasing steps by index of rename iteration
|
||||
self.count_steps *= self.rename_index
|
||||
|
||||
hierarchy_formating_data = dict()
|
||||
_data = self.track_item_default_data.copy()
|
||||
if self.ui_inputs:
|
||||
# adding tag metadata from ui
|
||||
for _k, _v in self.ui_inputs.items():
|
||||
if _v["target"] == "tag":
|
||||
self.tag_data[_k] = _v["value"]
|
||||
|
||||
# driving layer is set as positive match
|
||||
if master_layer or self.vertical_sync:
|
||||
# mark review layer
|
||||
if self.review_track and (
|
||||
self.review_track not in self.review_track_default):
|
||||
# if review layer is defined and not the same as defalut
|
||||
self.review_layer = self.review_track
|
||||
# shot num calculate
|
||||
if self.rename_index == 0:
|
||||
self.shot_num = self.count_from
|
||||
else:
|
||||
self.shot_num = self.count_from + self.count_steps
|
||||
|
||||
# clip name sequence number
|
||||
_data.update({"shot": self.shot_num})
|
||||
|
||||
# solve # in test to pythonic expression
|
||||
for _k, _v in self.hierarchy_data.items():
|
||||
if "#" not in _v["value"]:
|
||||
continue
|
||||
self.hierarchy_data[
|
||||
_k]["value"] = self._replace_hash_to_expression(
|
||||
_k, _v["value"])
|
||||
|
||||
# fill up pythonic expresisons in hierarchy data
|
||||
for k, _v in self.hierarchy_data.items():
|
||||
hierarchy_formating_data[k] = _v["value"].format(**_data)
|
||||
else:
|
||||
# if no gui mode then just pass default data
|
||||
hierarchy_formating_data = self.hierarchy_data
|
||||
|
||||
tag_hierarchy_data = self._solve_tag_hierarchy_data(
|
||||
hierarchy_formating_data
|
||||
)
|
||||
|
||||
tag_hierarchy_data.update({"masterLayer": True})
|
||||
if master_layer and self.vertical_sync:
|
||||
# tag_hierarchy_data.update({"masterLayer": True})
|
||||
self.vertical_clip_match.update({
|
||||
(self.clip_in, self.clip_out): tag_hierarchy_data
|
||||
})
|
||||
|
||||
if not master_layer and self.vertical_sync:
|
||||
# driving layer is set as negative match
|
||||
for (_in, _out), master_data in self.vertical_clip_match.items():
|
||||
master_data.update({"masterLayer": False})
|
||||
if _in == self.clip_in and _out == self.clip_out:
|
||||
data_subset = master_data["subset"]
|
||||
# add track index in case duplicity of names in master data
|
||||
if self.subset in data_subset:
|
||||
master_data["subset"] = self.subset + str(
|
||||
self.track_index)
|
||||
# in case track name and subset name is the same then add
|
||||
if self.subset_name == self.track_name:
|
||||
master_data["subset"] = self.subset
|
||||
# assing data to return hierarchy data to tag
|
||||
tag_hierarchy_data = master_data
|
||||
|
||||
# add data to return data dict
|
||||
self.tag_data.update(tag_hierarchy_data)
|
||||
|
||||
if master_layer and self.review_layer:
|
||||
self.tag_data.update({"reviewTrack": self.review_layer})
|
||||
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
|
||||
""" Solve tag data from hierarchy data and templates. """
|
||||
# fill up clip name and hierarchy keys
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formating_data)
|
||||
|
||||
return {
|
||||
"newClipName": clip_name_filled,
|
||||
"hierarchy": hierarchy_filled,
|
||||
"parents": self.parents,
|
||||
"hierarchyData": hierarchy_formating_data,
|
||||
"subset": self.subset,
|
||||
"family": self.subset_family,
|
||||
"families": ["clip"]
|
||||
}
|
||||
|
||||
def _convert_to_entity(self, key):
|
||||
""" Converting input key to key with type. """
|
||||
# convert to entity type
|
||||
entity_type = self.types.get(key, None)
|
||||
|
||||
assert entity_type, "Missing entity type for `{}`".format(
|
||||
key
|
||||
)
|
||||
|
||||
return {
|
||||
"entity_type": entity_type,
|
||||
"entity_name": self.hierarchy_data[key]["value"].format(
|
||||
**self.track_item_default_data
|
||||
)
|
||||
}
|
||||
|
||||
def _create_parents(self):
|
||||
""" Create parents and return it in list. """
|
||||
self.parents = list()
|
||||
|
||||
patern = re.compile(self.parents_search_patern)
|
||||
par_split = [patern.findall(t).pop()
|
||||
for t in self.hierarchy.split("/")]
|
||||
|
||||
for key in par_split:
|
||||
parent = self._convert_to_entity(key)
|
||||
self.parents.append(parent)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import time
|
|||
from pype.hosts.resolve.utils import get_resolve_module
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
wait_delay = 2.5
|
||||
wait = 0.00
|
||||
|
|
|
|||
134
pype/hosts/resolve/todo-rendering.py
Normal file
134
pype/hosts/resolve/todo-rendering.py
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
#!/usr/bin/env python
|
||||
# TODO: convert this script to be usable with PYPE
|
||||
"""
|
||||
Example DaVinci Resolve script:
|
||||
Load a still from DRX file, apply the still to all clips in all timelines.
|
||||
Set render format and codec, add render jobs for all timelines, render
|
||||
to specified path and wait for rendering completion.
|
||||
Once render is complete, delete all jobs
|
||||
"""
|
||||
# clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py # noqa
|
||||
|
||||
from python_get_resolve import GetResolve
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def AddTimelineToRender(project, timeline, presetName,
|
||||
targetDirectory, renderFormat, renderCodec):
|
||||
project.SetCurrentTimeline(timeline)
|
||||
project.LoadRenderPreset(presetName)
|
||||
|
||||
if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec):
|
||||
return False
|
||||
|
||||
project.SetRenderSettings(
|
||||
{"SelectAllFrames": 1, "TargetDir": targetDirectory})
|
||||
return project.AddRenderJob()
|
||||
|
||||
|
||||
def RenderAllTimelines(resolve, presetName, targetDirectory,
|
||||
renderFormat, renderCodec):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
|
||||
resolve.OpenPage("Deliver")
|
||||
timelineCount = project.GetTimelineCount()
|
||||
|
||||
for index in range(0, int(timelineCount)):
|
||||
if not AddTimelineToRender(
|
||||
project,
|
||||
project.GetTimelineByIndex(index + 1),
|
||||
presetName,
|
||||
targetDirectory,
|
||||
renderFormat,
|
||||
renderCodec):
|
||||
return False
|
||||
return project.StartRendering()
|
||||
|
||||
|
||||
def IsRenderingInProgress(resolve):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
|
||||
return project.IsRenderingInProgress()
|
||||
|
||||
|
||||
def WaitForRenderingCompletion(resolve):
|
||||
while IsRenderingInProgress(resolve):
|
||||
time.sleep(1)
|
||||
return
|
||||
|
||||
|
||||
def ApplyDRXToAllTimelineClips(timeline, path, gradeMode=0):
|
||||
trackCount = timeline.GetTrackCount("video")
|
||||
|
||||
clips = {}
|
||||
for index in range(1, int(trackCount) + 1):
|
||||
clips.update(timeline.GetItemsInTrack("video", index))
|
||||
return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips)
|
||||
|
||||
|
||||
def ApplyDRXToAllTimelines(resolve, path, gradeMode=0):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
timelineCount = project.GetTimelineCount()
|
||||
|
||||
for index in range(0, int(timelineCount)):
|
||||
timeline = project.GetTimelineByIndex(index + 1)
|
||||
project.SetCurrentTimeline(timeline)
|
||||
if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def DeleteAllRenderJobs(resolve):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
project.DeleteAllRenderJobs()
|
||||
return
|
||||
|
||||
|
||||
# Inputs:
|
||||
# - DRX file to import grade still and apply it for clips
|
||||
# - grade mode (0, 1 or 2)
|
||||
# - preset name for rendering
|
||||
# - render path
|
||||
# - render format
|
||||
# - render codec
|
||||
if len(sys.argv) < 7:
|
||||
print(
|
||||
"input parameters for scripts are [drx file path] [grade mode] "
|
||||
"[render preset name] [render path] [render format] [render codec]")
|
||||
sys.exit()
|
||||
|
||||
drxPath = sys.argv[1]
|
||||
gradeMode = sys.argv[2]
|
||||
renderPresetName = sys.argv[3]
|
||||
renderPath = sys.argv[4]
|
||||
renderFormat = sys.argv[5]
|
||||
renderCodec = sys.argv[6]
|
||||
|
||||
# Get currently open project
|
||||
resolve = GetResolve()
|
||||
|
||||
if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode):
|
||||
print("Unable to apply a still from drx file to all timelines")
|
||||
sys.exit()
|
||||
|
||||
if not RenderAllTimelines(resolve, renderPresetName, renderPath,
|
||||
renderFormat, renderCodec):
|
||||
print("Unable to set all timelines for rendering")
|
||||
sys.exit()
|
||||
|
||||
WaitForRenderingCompletion(resolve)
|
||||
|
||||
DeleteAllRenderJobs(resolve)
|
||||
|
||||
print("Rendering is completed.")
|
||||
84
pype/hosts/resolve/utility_scripts/OTIO_export.py
Normal file
84
pype/hosts/resolve/utility_scripts/OTIO_export.py
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
from pype.hosts.resolve.otio import davinci_export as otio_export
|
||||
|
||||
resolve = bmd.scriptapp("Resolve") # noqa
|
||||
fu = resolve.Fusion()
|
||||
|
||||
ui = fu.UIManager
|
||||
disp = bmd.UIDispatcher(fu.UIManager) # noqa
|
||||
|
||||
|
||||
title_font = ui.Font({"PixelSize": 18})
|
||||
dlg = disp.AddWindow(
|
||||
{
|
||||
"WindowTitle": "Export OTIO",
|
||||
"ID": "OTIOwin",
|
||||
"Geometry": [250, 250, 250, 100],
|
||||
"Spacing": 0,
|
||||
"Margin": 10
|
||||
},
|
||||
[
|
||||
ui.VGroup(
|
||||
{
|
||||
"Spacing": 2
|
||||
},
|
||||
[
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "exportfilebttn",
|
||||
"Text": "Select Destination",
|
||||
"Weight": 1.25,
|
||||
"ToolTip": "Choose where to save the otio",
|
||||
"Flat": False
|
||||
}
|
||||
),
|
||||
ui.VGap(),
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "exportbttn",
|
||||
"Text": "Export",
|
||||
"Weight": 2,
|
||||
"ToolTip": "Export the current timeline",
|
||||
"Flat": False
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
itm = dlg.GetItems()
|
||||
|
||||
|
||||
def _close_window(event):
|
||||
disp.ExitLoop()
|
||||
|
||||
|
||||
def _export_button(event):
|
||||
pm = resolve.GetProjectManager()
|
||||
project = pm.GetCurrentProject()
|
||||
fps = project.GetSetting("timelineFrameRate")
|
||||
timeline = project.GetCurrentTimeline()
|
||||
otio_timeline = otio_export.create_otio_timeline(timeline, fps)
|
||||
otio_path = os.path.join(
|
||||
itm["exportfilebttn"].Text,
|
||||
timeline.GetName() + ".otio")
|
||||
print(otio_path)
|
||||
otio_export.write_to_file(
|
||||
otio_timeline,
|
||||
otio_path)
|
||||
_close_window(None)
|
||||
|
||||
|
||||
def _export_file_pressed(event):
|
||||
selectedPath = fu.RequestDir(os.path.expanduser("~/Documents"))
|
||||
itm["exportfilebttn"].Text = selectedPath
|
||||
|
||||
|
||||
dlg.On.OTIOwin.Close = _close_window
|
||||
dlg.On.exportfilebttn.Clicked = _export_file_pressed
|
||||
dlg.On.exportbttn.Clicked = _export_button
|
||||
dlg.Show()
|
||||
disp.RunLoop()
|
||||
dlg.Hide()
|
||||
72
pype/hosts/resolve/utility_scripts/OTIO_import.py
Normal file
72
pype/hosts/resolve/utility_scripts/OTIO_import.py
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
from pype.hosts.resolve.otio import davinci_import as otio_import
|
||||
|
||||
resolve = bmd.scriptapp("Resolve") # noqa
|
||||
fu = resolve.Fusion()
|
||||
ui = fu.UIManager
|
||||
disp = bmd.UIDispatcher(fu.UIManager) # noqa
|
||||
|
||||
|
||||
title_font = ui.Font({"PixelSize": 18})
|
||||
dlg = disp.AddWindow(
|
||||
{
|
||||
"WindowTitle": "Import OTIO",
|
||||
"ID": "OTIOwin",
|
||||
"Geometry": [250, 250, 250, 100],
|
||||
"Spacing": 0,
|
||||
"Margin": 10
|
||||
},
|
||||
[
|
||||
ui.VGroup(
|
||||
{
|
||||
"Spacing": 2
|
||||
},
|
||||
[
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "importOTIOfileButton",
|
||||
"Text": "Select OTIO File Path",
|
||||
"Weight": 1.25,
|
||||
"ToolTip": "Choose otio file to import from",
|
||||
"Flat": False
|
||||
}
|
||||
),
|
||||
ui.VGap(),
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "importButton",
|
||||
"Text": "Import",
|
||||
"Weight": 2,
|
||||
"ToolTip": "Import otio to new timeline",
|
||||
"Flat": False
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
itm = dlg.GetItems()
|
||||
|
||||
|
||||
def _close_window(event):
|
||||
disp.ExitLoop()
|
||||
|
||||
|
||||
def _import_button(event):
|
||||
otio_import.read_from_file(itm["importOTIOfileButton"].Text)
|
||||
_close_window(None)
|
||||
|
||||
|
||||
def _import_file_pressed(event):
|
||||
selected_path = fu.RequestFile(os.path.expanduser("~/Documents"))
|
||||
itm["importOTIOfileButton"].Text = selected_path
|
||||
|
||||
|
||||
dlg.On.OTIOwin.Close = _close_window
|
||||
dlg.On.importOTIOfileButton.Clicked = _import_file_pressed
|
||||
dlg.On.importButton.Clicked = _import_button
|
||||
dlg.Show()
|
||||
disp.RunLoop()
|
||||
dlg.Hide()
|
||||
16
pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py
Normal file
16
pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
import sys
|
||||
import pype
|
||||
|
||||
|
||||
def main(env):
|
||||
import pype.hosts.resolve as bmdvr
|
||||
# Registers pype's Global pyblish plugins
|
||||
pype.install()
|
||||
bmdvr.setup(env)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main(os.environ)
|
||||
sys.exit(not bool(result))
|
||||
22
pype/hosts/resolve/utility_scripts/resolve_dev_scriping.py
Normal file
22
pype/hosts/resolve/utility_scripts/resolve_dev_scriping.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
|
||||
def main():
|
||||
import pype.hosts.resolve as bmdvr
|
||||
bmdvr.utils.get_resolve_module()
|
||||
|
||||
tracks = list()
|
||||
track_type = "video"
|
||||
sequence = bmdvr.get_current_sequence()
|
||||
|
||||
# get all tracks count filtered by track type
|
||||
selected_track_count = sequence.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks and get items
|
||||
for track_index in range(1, (int(selected_track_count) + 1)):
|
||||
track_name = sequence.GetTrackName("video", track_index)
|
||||
tracks.append(track_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,19 +1,24 @@
|
|||
#! python3
|
||||
import sys
|
||||
from pype.api import Logger
|
||||
import DaVinciResolveScript as bmdvr
|
||||
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
import pype.hosts.resolve as bmdvr
|
||||
bm = bmdvr.utils.get_resolve_module()
|
||||
log.info(f"blackmagicmodule: {bm}")
|
||||
|
||||
|
||||
print(f"_>> bmdvr.scriptapp(Resolve): {bmdvr.scriptapp('Resolve')}")
|
||||
resolve = bmdvr.scriptapp('Resolve')
|
||||
print(f"resolve: {resolve}")
|
||||
project_manager = resolve.GetProjectManager()
|
||||
project = project_manager.GetCurrentProject()
|
||||
media_pool = project.GetMediaPool()
|
||||
root_folder = media_pool.GetRootFolder()
|
||||
ls_folder = root_folder.GetClipList()
|
||||
timeline = project.GetCurrentTimeline()
|
||||
timeline_name = timeline.GetName()
|
||||
for tl in ls_folder:
|
||||
if tl.GetName() not in timeline_name:
|
||||
continue
|
||||
print(tl.GetName())
|
||||
print(tl.GetMetadata())
|
||||
print(tl.GetClipProperty())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import os
|
|||
import shutil
|
||||
|
||||
from pype.api import Logger
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def get_resolve_module():
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from . import (
|
|||
)
|
||||
|
||||
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
exported_projet_ext = ".drp"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from pype.lib import (
|
||||
PreLaunchHook,
|
||||
ApplicationLaunchFailed,
|
||||
_subprocess
|
||||
run_subprocess
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -25,7 +25,7 @@ class PreInstallPyWin(PreLaunchHook):
|
|||
return
|
||||
|
||||
try:
|
||||
output = _subprocess(
|
||||
output = run_subprocess(
|
||||
["pip", "install", "pywin32==227"]
|
||||
)
|
||||
self.log.debug("Pip install pywin32 output:\n{}'".format(output))
|
||||
|
|
|
|||
|
|
@ -2,7 +2,10 @@
|
|||
"""Pype module API."""
|
||||
|
||||
from .terminal import Terminal
|
||||
from .execute import execute
|
||||
from .execute import (
|
||||
execute,
|
||||
run_subprocess
|
||||
)
|
||||
from .log import PypeLogger, timeit
|
||||
from .mongo import (
|
||||
decompose_url,
|
||||
|
|
@ -10,17 +13,13 @@ from .mongo import (
|
|||
get_default_components,
|
||||
PypeMongoConnection
|
||||
)
|
||||
from .anatomy import Anatomy
|
||||
|
||||
from .config import (
|
||||
get_datetime_data,
|
||||
load_json,
|
||||
collect_json_from_path,
|
||||
get_presets,
|
||||
get_init_presets,
|
||||
update_dict
|
||||
from .anatomy import (
|
||||
merge_dict,
|
||||
Anatomy
|
||||
)
|
||||
|
||||
from .config import get_datetime_data
|
||||
|
||||
from .env_tools import (
|
||||
env_value_to_bool,
|
||||
get_paths_from_environ
|
||||
|
|
@ -39,6 +38,15 @@ from .avalon_context import (
|
|||
get_hierarchy,
|
||||
get_linked_assets,
|
||||
get_latest_version,
|
||||
|
||||
get_workdir_data,
|
||||
get_workdir,
|
||||
get_workdir_with_workdir_data,
|
||||
|
||||
create_workfile_doc,
|
||||
save_workfile_data_to_doc,
|
||||
get_workfile_doc,
|
||||
|
||||
BuildWorkfile
|
||||
)
|
||||
|
||||
|
|
@ -48,15 +56,18 @@ from .applications import (
|
|||
ApplicationNotFound,
|
||||
ApplicationManager,
|
||||
PreLaunchHook,
|
||||
PostLaunchHook,
|
||||
_subprocess
|
||||
PostLaunchHook
|
||||
)
|
||||
|
||||
from .plugin_tools import (
|
||||
filter_pyblish_plugins,
|
||||
source_hash,
|
||||
get_unique_layer_name,
|
||||
get_background_layers
|
||||
get_background_layers,
|
||||
oiio_supported,
|
||||
decompress,
|
||||
get_decompress_dir,
|
||||
should_decompress
|
||||
)
|
||||
|
||||
from .user_settings import (
|
||||
|
|
@ -76,9 +87,23 @@ from .ffmpeg_utils import (
|
|||
ffprobe_streams
|
||||
)
|
||||
|
||||
from .editorial import (
|
||||
is_overlapping_otio_ranges,
|
||||
otio_range_to_frame_range,
|
||||
otio_range_with_handles,
|
||||
convert_to_padded_path,
|
||||
trim_media_range,
|
||||
range_from_frames,
|
||||
frames_to_secons,
|
||||
make_sequence_collection
|
||||
)
|
||||
|
||||
terminal = Terminal
|
||||
|
||||
__all__ = [
|
||||
"execute",
|
||||
"run_subprocess",
|
||||
|
||||
"env_value_to_bool",
|
||||
"get_paths_from_environ",
|
||||
|
||||
|
|
@ -92,10 +117,16 @@ __all__ = [
|
|||
"get_hierarchy",
|
||||
"get_linked_assets",
|
||||
"get_latest_version",
|
||||
"BuildWorkfile",
|
||||
|
||||
"PypeHook",
|
||||
"execute_hook",
|
||||
"get_workdir_data",
|
||||
"get_workdir",
|
||||
"get_workdir_with_workdir_data",
|
||||
|
||||
"create_workfile_doc",
|
||||
"save_workfile_data_to_doc",
|
||||
"get_workfile_doc",
|
||||
|
||||
"BuildWorkfile",
|
||||
|
||||
"ApplicationLaunchFailed",
|
||||
"ApplictionExecutableNotFound",
|
||||
|
|
@ -108,6 +139,10 @@ __all__ = [
|
|||
"source_hash",
|
||||
"get_unique_layer_name",
|
||||
"get_background_layers",
|
||||
"oiio_supported",
|
||||
"decompress",
|
||||
"get_decompress_dir",
|
||||
"should_decompress",
|
||||
|
||||
"version_up",
|
||||
"get_version_from_path",
|
||||
|
|
@ -116,17 +151,13 @@ __all__ = [
|
|||
"ffprobe_streams",
|
||||
"get_ffmpeg_tool_path",
|
||||
|
||||
"_subprocess",
|
||||
|
||||
"terminal",
|
||||
|
||||
"merge_dict",
|
||||
"Anatomy",
|
||||
|
||||
"get_datetime_data",
|
||||
"load_json",
|
||||
"collect_json_from_path",
|
||||
"get_presets",
|
||||
"get_init_presets",
|
||||
"update_dict",
|
||||
"execute",
|
||||
|
||||
"PypeLogger",
|
||||
"decompose_url",
|
||||
"compose_url",
|
||||
|
|
@ -136,5 +167,14 @@ __all__ = [
|
|||
"IniSettingRegistry",
|
||||
"JSONSettingRegistry",
|
||||
"PypeSettingsRegistry",
|
||||
"timeit"
|
||||
"timeit",
|
||||
|
||||
"is_overlapping_otio_ranges",
|
||||
"otio_range_with_handles",
|
||||
"convert_to_padded_path",
|
||||
"otio_range_to_frame_range",
|
||||
"trim_media_range",
|
||||
"range_from_frames",
|
||||
"frames_to_secons",
|
||||
"make_sequence_collection"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ from pype.settings.lib import (
|
|||
get_default_anatomy_settings,
|
||||
get_anatomy_settings
|
||||
)
|
||||
from . import config
|
||||
from .log import PypeLogger
|
||||
|
||||
log = PypeLogger().get_logger(__name__)
|
||||
|
|
@ -20,6 +19,32 @@ except NameError:
|
|||
StringType = str
|
||||
|
||||
|
||||
def merge_dict(main_dict, enhance_dict):
|
||||
"""Merges dictionaries by keys.
|
||||
|
||||
Function call itself if value on key is again dictionary.
|
||||
|
||||
Args:
|
||||
main_dict (dict): First dict to merge second one into.
|
||||
enhance_dict (dict): Second dict to be merged.
|
||||
|
||||
Returns:
|
||||
dict: Merged result.
|
||||
|
||||
.. note:: does not overrides whole value on first found key
|
||||
but only values differences from enhance_dict
|
||||
|
||||
"""
|
||||
for key, value in enhance_dict.items():
|
||||
if key not in main_dict:
|
||||
main_dict[key] = value
|
||||
elif isinstance(value, dict) and isinstance(main_dict[key], dict):
|
||||
main_dict[key] = merge_dict(main_dict[key], value)
|
||||
else:
|
||||
main_dict[key] = value
|
||||
return main_dict
|
||||
|
||||
|
||||
class ProjectNotSet(Exception):
|
||||
"""Exception raised when is created Anatomy without project name."""
|
||||
|
||||
|
|
@ -395,9 +420,7 @@ class TemplatesDict(dict):
|
|||
if key in invalid_types:
|
||||
continue
|
||||
_invalid_types[key] = val
|
||||
invalid_types = config.update_dict(
|
||||
invalid_types, _invalid_types
|
||||
)
|
||||
invalid_types = merge_dict(invalid_types, _invalid_types)
|
||||
return invalid_types
|
||||
|
||||
@property
|
||||
|
|
@ -405,7 +428,7 @@ class TemplatesDict(dict):
|
|||
"""Return used values for all children templates."""
|
||||
used_values = {}
|
||||
for value in self.values():
|
||||
used_values = config.update_dict(used_values, value.used_values)
|
||||
used_values = merge_dict(used_values, value.used_values)
|
||||
return used_values
|
||||
|
||||
def get_solved(self):
|
||||
|
|
@ -840,7 +863,7 @@ class Templates:
|
|||
|
||||
root_key = "{" + root_key + "}"
|
||||
|
||||
roots_dict = config.update_dict(
|
||||
roots_dict = merge_dict(
|
||||
roots_dict,
|
||||
self._keys_to_dicts(used_root_keys, root_key)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import os
|
||||
import copy
|
||||
import platform
|
||||
import inspect
|
||||
import subprocess
|
||||
|
|
@ -16,8 +15,6 @@ from .python_module_tools import (
|
|||
classes_from_module
|
||||
)
|
||||
|
||||
log = PypeLogger().get_logger(__name__)
|
||||
|
||||
|
||||
class ApplicationNotFound(Exception):
|
||||
"""Application was not found in ApplicationManager by name."""
|
||||
|
|
@ -67,71 +64,6 @@ class ApplicationLaunchFailed(Exception):
|
|||
pass
|
||||
|
||||
|
||||
# Special naming case for subprocess since its a built-in method.
|
||||
def _subprocess(*args, **kwargs):
|
||||
"""Convenience method for getting output errors for subprocess.
|
||||
|
||||
Entered arguments and keyword arguments are passed to subprocess Popen.
|
||||
|
||||
Args:
|
||||
*args: Variable length arument list passed to Popen.
|
||||
**kwargs : Arbitary keyword arguments passed to Popen. Is possible to
|
||||
pass `logging.Logger` object under "logger" if want to use
|
||||
different than lib's logger.
|
||||
|
||||
Returns:
|
||||
str: Full output of subprocess concatenated stdout and stderr.
|
||||
|
||||
Raises:
|
||||
RuntimeError: Exception is raised if process finished with nonzero
|
||||
return code.
|
||||
"""
|
||||
|
||||
# Get environents from kwarg or use current process environments if were
|
||||
# not passed.
|
||||
env = kwargs.get("env") or os.environ
|
||||
# Make sure environment contains only strings
|
||||
filtered_env = {k: str(v) for k, v in env.items()}
|
||||
|
||||
# Use lib's logger if was not passed with kwargs.
|
||||
logger = kwargs.pop("logger", log)
|
||||
|
||||
# set overrides
|
||||
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
|
||||
kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE)
|
||||
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
|
||||
kwargs['env'] = filtered_env
|
||||
|
||||
proc = subprocess.Popen(*args, **kwargs)
|
||||
|
||||
full_output = ""
|
||||
_stdout, _stderr = proc.communicate()
|
||||
if _stdout:
|
||||
_stdout = _stdout.decode("utf-8")
|
||||
full_output += _stdout
|
||||
logger.debug(_stdout)
|
||||
|
||||
if _stderr:
|
||||
_stderr = _stderr.decode("utf-8")
|
||||
# Add additional line break if output already containt stdout
|
||||
if full_output:
|
||||
full_output += "\n"
|
||||
full_output += _stderr
|
||||
logger.warning(_stderr)
|
||||
|
||||
if proc.returncode != 0:
|
||||
exc_msg = "Executing arguments was not successful: \"{}\"".format(args)
|
||||
if _stdout:
|
||||
exc_msg += "\n\nOutput:\n{}".format(_stdout)
|
||||
|
||||
if _stderr:
|
||||
exc_msg += "Error:\n{}".format(_stderr)
|
||||
|
||||
raise RuntimeError(exc_msg)
|
||||
|
||||
return full_output
|
||||
|
||||
|
||||
class ApplicationManager:
|
||||
def __init__(self):
|
||||
self.log = PypeLogger().get_logger(self.__class__.__name__)
|
||||
|
|
@ -531,15 +463,23 @@ class ApplicationLaunchContext:
|
|||
self.launch_args = executable.as_args()
|
||||
|
||||
# Handle launch environemtns
|
||||
passed_env = self.data.pop("env", None)
|
||||
if passed_env is None:
|
||||
env = self.data.pop("env", None)
|
||||
if env is not None and not isinstance(env, dict):
|
||||
self.log.warning((
|
||||
"Passed `env` kwarg has invalid type: {}. Expected: `dict`."
|
||||
" Using `os.environ` instead."
|
||||
).format(str(type(env))))
|
||||
env = None
|
||||
|
||||
if env is None:
|
||||
env = os.environ
|
||||
else:
|
||||
env = passed_env
|
||||
|
||||
# subprocess.Popen keyword arguments
|
||||
self.kwargs = {
|
||||
"env": copy.deepcopy(env)
|
||||
"env": {
|
||||
key: str(value)
|
||||
for key, value in env.items()
|
||||
}
|
||||
}
|
||||
|
||||
if platform.system().lower() == "windows":
|
||||
|
|
@ -580,7 +520,6 @@ class ApplicationLaunchContext:
|
|||
paths = []
|
||||
|
||||
# TODO load additional studio paths from settings
|
||||
# TODO add paths based on used modules (like `ftrack`)
|
||||
import pype
|
||||
pype_dir = os.path.dirname(os.path.abspath(pype.__file__))
|
||||
|
||||
|
|
@ -610,6 +549,13 @@ class ApplicationLaunchContext:
|
|||
and path not in paths
|
||||
):
|
||||
paths.append(path)
|
||||
|
||||
# Load modules paths
|
||||
from pype.modules import ModulesManager
|
||||
|
||||
manager = ModulesManager()
|
||||
paths.extend(manager.collect_launch_hook_paths())
|
||||
|
||||
return paths
|
||||
|
||||
def discover_launch_hooks(self, force=False):
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
import copy
|
||||
import logging
|
||||
import collections
|
||||
import functools
|
||||
|
||||
from pype.settings import get_project_settings
|
||||
from .anatomy import Anatomy
|
||||
|
||||
# avalon module is not imported at the top
|
||||
# - may not be in path at the time of pype.lib initialization
|
||||
|
|
@ -246,6 +248,229 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
|
|||
return version_doc
|
||||
|
||||
|
||||
def get_workdir_data(project_doc, asset_doc, task_name, host_name):
|
||||
"""Prepare data for workdir template filling from entered information.
|
||||
|
||||
Args:
|
||||
project_doc (dict): Mongo document of project from MongoDB.
|
||||
asset_doc (dict): Mongo document of asset from MongoDB.
|
||||
task_name (str): Task name for which are workdir data preapred.
|
||||
host_name (str): Host which is used to workdir. This is required
|
||||
because workdir template may contain `{app}` key.
|
||||
|
||||
Returns:
|
||||
dict: Data prepared for filling workdir template.
|
||||
"""
|
||||
hierarchy = "/".join(asset_doc["data"]["parents"])
|
||||
|
||||
data = {
|
||||
"project": {
|
||||
"name": project_doc["name"],
|
||||
"code": project_doc["data"].get("code")
|
||||
},
|
||||
"task": task_name,
|
||||
"asset": asset_doc["name"],
|
||||
"app": host_name,
|
||||
"hierarchy": hierarchy
|
||||
}
|
||||
return data
|
||||
|
||||
|
||||
def get_workdir_with_workdir_data(
|
||||
workdir_data, anatomy=None, project_name=None, template_key=None
|
||||
):
|
||||
"""Fill workdir path from entered data and project's anatomy.
|
||||
|
||||
It is possible to pass only project's name instead of project's anatomy but
|
||||
one of them **must** be entered. It is preffered to enter anatomy if is
|
||||
available as initialization of a new Anatomy object may be time consuming.
|
||||
|
||||
Args:
|
||||
workdir_data (dict): Data to fill workdir template.
|
||||
anatomy (Anatomy): Anatomy object for specific project. Optional if
|
||||
`project_name` is entered.
|
||||
project_name (str): Project's name. Optional if `anatomy` is entered
|
||||
otherwise Anatomy object is created with using the project name.
|
||||
template_key (str): Key of work templates in anatomy templates. By
|
||||
default is seto to `"work"`.
|
||||
|
||||
Returns:
|
||||
TemplateResult: Workdir path.
|
||||
|
||||
Raises:
|
||||
ValueError: When both `anatomy` and `project_name` are set to None.
|
||||
"""
|
||||
if not anatomy and not project_name:
|
||||
raise ValueError((
|
||||
"Missing required arguments one of `project_name` or `anatomy`"
|
||||
" must be entered."
|
||||
))
|
||||
|
||||
if not anatomy:
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
if not template_key:
|
||||
template_key = "work"
|
||||
|
||||
anatomy_filled = anatomy.format(workdir_data)
|
||||
# Output is TemplateResult object which contain usefull data
|
||||
return anatomy_filled[template_key]["folder"]
|
||||
|
||||
|
||||
def get_workdir(
|
||||
project_doc,
|
||||
asset_doc,
|
||||
task_name,
|
||||
host_name,
|
||||
anatomy=None,
|
||||
template_key=None
|
||||
):
|
||||
"""Fill workdir path from entered data and project's anatomy.
|
||||
|
||||
Args:
|
||||
project_doc (dict): Mongo document of project from MongoDB.
|
||||
asset_doc (dict): Mongo document of asset from MongoDB.
|
||||
task_name (str): Task name for which are workdir data preapred.
|
||||
host_name (str): Host which is used to workdir. This is required
|
||||
because workdir template may contain `{app}` key. In `Session`
|
||||
is stored under `AVALON_APP` key.
|
||||
anatomy (Anatomy): Optional argument. Anatomy object is created using
|
||||
project name from `project_doc`. It is preffered to pass this
|
||||
argument as initialization of a new Anatomy object may be time
|
||||
consuming.
|
||||
template_key (str): Key of work templates in anatomy templates. Default
|
||||
value is defined in `get_workdir_with_workdir_data`.
|
||||
|
||||
Returns:
|
||||
TemplateResult: Workdir path.
|
||||
"""
|
||||
if not anatomy:
|
||||
anatomy = Anatomy(project_doc["name"])
|
||||
|
||||
workdir_data = get_workdir_data(
|
||||
project_doc, asset_doc, task_name, host_name
|
||||
)
|
||||
# Output is TemplateResult object which contain usefull data
|
||||
return get_workdir_with_workdir_data(workdir_data, anatomy, template_key)
|
||||
|
||||
|
||||
@with_avalon
|
||||
def get_workfile_doc(asset_id, task_name, filename, dbcon=None):
|
||||
"""Return workfile document for entered context.
|
||||
|
||||
Do not use this method to get more than one document. In that cases use
|
||||
custom query as this will return documents from database one by one.
|
||||
|
||||
Args:
|
||||
asset_id (ObjectId): Mongo ID of an asset under which workfile belongs.
|
||||
task_name (str): Name of task under which the workfile belongs.
|
||||
filename (str): Name of a workfile.
|
||||
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
|
||||
`avalon.io` is used if not entered.
|
||||
|
||||
Returns:
|
||||
dict: Workfile document or None.
|
||||
"""
|
||||
# Use avalon.io if dbcon is not entered
|
||||
if not dbcon:
|
||||
dbcon = avalon.io
|
||||
|
||||
return dbcon.find_one({
|
||||
"type": "workfile",
|
||||
"parent": asset_id,
|
||||
"task_name": task_name,
|
||||
"filename": filename
|
||||
})
|
||||
|
||||
|
||||
@with_avalon
|
||||
def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
|
||||
"""Creates or replace workfile document in mongo.
|
||||
|
||||
Do not use this method to update data. This method will remove all
|
||||
additional data from existing document.
|
||||
|
||||
Args:
|
||||
asset_doc (dict): Document of asset under which workfile belongs.
|
||||
task_name (str): Name of task for which is workfile related to.
|
||||
filename (str): Filename of workfile.
|
||||
workdir (str): Path to directory where `filename` is located.
|
||||
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
|
||||
`avalon.io` is used if not entered.
|
||||
"""
|
||||
# Use avalon.io if dbcon is not entered
|
||||
if not dbcon:
|
||||
dbcon = avalon.io
|
||||
|
||||
# Filter of workfile document
|
||||
doc_filter = {
|
||||
"type": "workfile",
|
||||
"parent": asset_doc["_id"],
|
||||
"task_name": task_name,
|
||||
"filename": filename
|
||||
}
|
||||
# Document data are copy of filter
|
||||
doc_data = copy.deepcopy(doc_filter)
|
||||
|
||||
# Prepare project for workdir data
|
||||
project_doc = dbcon.find_one({"type": "project"})
|
||||
workdir_data = get_workdir_data(
|
||||
project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"]
|
||||
)
|
||||
# Prepare anatomy
|
||||
anatomy = Anatomy(project_doc["name"])
|
||||
# Get workdir path (result is anatomy.TemplateResult)
|
||||
template_workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
|
||||
template_workdir_path = str(template_workdir).replace("\\", "/")
|
||||
|
||||
# Replace slashses in workdir path where workfile is located
|
||||
mod_workdir = workdir.replace("\\", "/")
|
||||
|
||||
# Replace workdir from templates with rootless workdir
|
||||
rootles_workdir = mod_workdir.replace(
|
||||
template_workdir_path,
|
||||
template_workdir.rootless.replace("\\", "/")
|
||||
)
|
||||
|
||||
doc_data["schema"] = "pype:workfile-1.0"
|
||||
doc_data["files"] = ["/".join([rootles_workdir, filename])]
|
||||
doc_data["data"] = {}
|
||||
|
||||
dbcon.replace_one(
|
||||
doc_filter,
|
||||
doc_data,
|
||||
upsert=True
|
||||
)
|
||||
|
||||
|
||||
@with_avalon
|
||||
def save_workfile_data_to_doc(workfile_doc, data, dbcon=None):
|
||||
if not workfile_doc:
|
||||
# TODO add log message
|
||||
return
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
# Use avalon.io if dbcon is not entered
|
||||
if not dbcon:
|
||||
dbcon = avalon.io
|
||||
|
||||
# Convert data to mongo modification keys/values
|
||||
# - this is naive implementation which does not expect nested
|
||||
# dictionaries
|
||||
set_data = {}
|
||||
for key, value in data.items():
|
||||
new_key = "data.{}".format(key)
|
||||
set_data[new_key] = value
|
||||
|
||||
# Update workfile document with data
|
||||
dbcon.update_one(
|
||||
{"_id": workfile_doc["_id"]},
|
||||
{"$set": set_data}
|
||||
)
|
||||
|
||||
|
||||
class BuildWorkfile:
|
||||
"""Wrapper for build workfile process.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Get configuration data."""
|
||||
import os
|
||||
import json
|
||||
import datetime
|
||||
from .log import PypeLogger
|
||||
|
||||
log = PypeLogger().get_logger(__name__)
|
||||
|
||||
|
||||
def get_datetime_data(datetime_obj=None):
|
||||
|
|
@ -79,233 +74,3 @@ def get_datetime_data(datetime_obj=None):
|
|||
"S": str(int(seconds)),
|
||||
"SS": str(seconds),
|
||||
}
|
||||
|
||||
|
||||
def load_json(fpath, first_run=False):
|
||||
"""Load JSON data.
|
||||
|
||||
Args:
|
||||
fpath (str): Path to JSON file.
|
||||
first_run (bool): Flag to run checks if file is loaded for the first
|
||||
time.
|
||||
Returns:
|
||||
dict: parsed JSON object.
|
||||
|
||||
"""
|
||||
# Load json data
|
||||
with open(fpath, "r") as opened_file:
|
||||
lines = opened_file.read().splitlines()
|
||||
|
||||
# prepare json string
|
||||
standard_json = ""
|
||||
for line in lines:
|
||||
# Remove all whitespace on both sides
|
||||
line = line.strip()
|
||||
|
||||
# Skip blank lines
|
||||
if len(line) == 0:
|
||||
continue
|
||||
|
||||
standard_json += line
|
||||
|
||||
# Check if has extra commas
|
||||
extra_comma = False
|
||||
if ",]" in standard_json or ",}" in standard_json:
|
||||
extra_comma = True
|
||||
standard_json = standard_json.replace(",]", "]")
|
||||
standard_json = standard_json.replace(",}", "}")
|
||||
|
||||
if extra_comma and first_run:
|
||||
log.error("Extra comma in json file: \"{}\"".format(fpath))
|
||||
|
||||
# return empty dict if file is empty
|
||||
if standard_json == "":
|
||||
if first_run:
|
||||
log.error("Empty json file: \"{}\"".format(fpath))
|
||||
return {}
|
||||
|
||||
# Try to parse string
|
||||
try:
|
||||
return json.loads(standard_json)
|
||||
|
||||
except json.decoder.JSONDecodeError:
|
||||
# Return empty dict if it is first time that decode error happened
|
||||
if not first_run:
|
||||
return {}
|
||||
|
||||
# Repreduce the exact same exception but traceback contains better
|
||||
# information about position of error in the loaded json
|
||||
try:
|
||||
with open(fpath, "r") as opened_file:
|
||||
json.load(opened_file)
|
||||
|
||||
except json.decoder.JSONDecodeError:
|
||||
log.warning(
|
||||
"File has invalid json format \"{}\"".format(fpath),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def collect_json_from_path(input_path, first_run=False):
|
||||
"""Collect JSON file from path.
|
||||
|
||||
Iterate through all subfolders and JSON files in `input_path`.
|
||||
|
||||
Args:
|
||||
input_path (str): Path from JSONs will be collected.
|
||||
first_run (bool): Flag to run checks if file is loaded for the first
|
||||
time.
|
||||
|
||||
Returns:
|
||||
dict: Collected JSONs.
|
||||
|
||||
Examples:
|
||||
|
||||
Imagine path::
|
||||
`{input_path}/path/to/file.json`
|
||||
|
||||
>>> collect_json_from_path(input_path)
|
||||
{'path':
|
||||
{'to':
|
||||
{'file': {JSON}
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
output = None
|
||||
if os.path.isdir(input_path):
|
||||
output = {}
|
||||
for file in os.listdir(input_path):
|
||||
full_path = os.path.sep.join([input_path, file])
|
||||
if os.path.isdir(full_path):
|
||||
loaded = collect_json_from_path(full_path, first_run)
|
||||
if loaded:
|
||||
output[file] = loaded
|
||||
else:
|
||||
basename, ext = os.path.splitext(os.path.basename(file))
|
||||
if ext == '.json':
|
||||
output[basename] = load_json(full_path, first_run)
|
||||
else:
|
||||
basename, ext = os.path.splitext(os.path.basename(input_path))
|
||||
if ext == '.json':
|
||||
output = load_json(input_path, first_run)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def get_presets(project=None, first_run=False):
|
||||
"""Loads preset files with usage of ``collect_json_from_path``.
|
||||
|
||||
Default preset path is set to: `{PYPE_CONFIG}/presets`
|
||||
Project preset path is set to: `{PYPE_PROJECT_CONFIGS}/project_name`
|
||||
|
||||
Environment variable `PYPE_STUDIO_CONFIG` is required
|
||||
`PYPE_STUDIO_CONFIGS` only if want to use overrides per project.
|
||||
|
||||
Args:
|
||||
project (str): Project name.
|
||||
first_run (bool): Flag to run checks if file is loaded for the first
|
||||
time.
|
||||
|
||||
Returns:
|
||||
None: If default path does not exist.
|
||||
default presets (dict): If project_name is not set or
|
||||
if project's presets folder does not exist.
|
||||
project presets (dict): If project_name is set and include
|
||||
override data.
|
||||
|
||||
"""
|
||||
# config_path should be set from environments?
|
||||
config_path = os.path.normpath(os.environ['PYPE_CONFIG'])
|
||||
preset_items = [config_path, 'presets']
|
||||
config_path = os.path.sep.join(preset_items)
|
||||
if not os.path.isdir(config_path):
|
||||
log.error('Preset path was not found: "{}"'.format(config_path))
|
||||
return None
|
||||
default_data = collect_json_from_path(config_path, first_run)
|
||||
|
||||
if not project:
|
||||
project = os.environ.get('AVALON_PROJECT', None)
|
||||
|
||||
if not project:
|
||||
return default_data
|
||||
|
||||
project_configs_path = os.environ.get('PYPE_PROJECT_CONFIGS')
|
||||
if not project_configs_path:
|
||||
return default_data
|
||||
|
||||
project_configs_path = os.path.normpath(project_configs_path)
|
||||
project_config_items = [project_configs_path, project, 'presets']
|
||||
project_config_path = os.path.sep.join(project_config_items)
|
||||
|
||||
if not os.path.isdir(project_config_path):
|
||||
log.warning('Preset path for project {} not found: "{}"'.format(
|
||||
project, project_config_path
|
||||
))
|
||||
return default_data
|
||||
project_data = collect_json_from_path(project_config_path, first_run)
|
||||
|
||||
return update_dict(default_data, project_data)
|
||||
|
||||
|
||||
def get_init_presets(project=None):
|
||||
"""Loads content of presets.
|
||||
|
||||
Like :func:`get_presets()`` but also evaluate `init.json`
|
||||
pointer to default presets.
|
||||
|
||||
Args:
|
||||
project(str): Project name.
|
||||
|
||||
Returns:
|
||||
None: If default path does not exist
|
||||
default presets (dict): If project_name is not set or if project's
|
||||
presets folder does not exist.
|
||||
project presets (dict): If project_name is set and include
|
||||
override data.
|
||||
"""
|
||||
presets = get_presets(project)
|
||||
|
||||
try:
|
||||
# try if it is not in projects custom directory
|
||||
# `{PYPE_PROJECT_CONFIGS}/[PROJECT_NAME]/init.json`
|
||||
# init.json define preset names to be used
|
||||
p_init = presets["init"]
|
||||
presets["colorspace"] = presets["colorspace"][p_init["colorspace"]]
|
||||
presets["dataflow"] = presets["dataflow"][p_init["dataflow"]]
|
||||
except KeyError:
|
||||
log.warning("No projects custom preset available...")
|
||||
presets["colorspace"] = presets["colorspace"]["default"]
|
||||
presets["dataflow"] = presets["dataflow"]["default"]
|
||||
log.info(("Presets `colorspace` and `dataflow` "
|
||||
"loaded from `default`..."))
|
||||
|
||||
return presets
|
||||
|
||||
|
||||
def update_dict(main_dict, enhance_dict):
|
||||
"""Merges dictionaries by keys.
|
||||
|
||||
Function call itself if value on key is again dictionary.
|
||||
|
||||
Args:
|
||||
main_dict (dict): First dict to merge second one into.
|
||||
enhance_dict (dict): Second dict to be merged.
|
||||
|
||||
Returns:
|
||||
dict: Merged result.
|
||||
|
||||
.. note:: does not overrides whole value on first found key
|
||||
but only values differences from enhance_dict
|
||||
|
||||
"""
|
||||
for key, value in enhance_dict.items():
|
||||
if key not in main_dict:
|
||||
main_dict[key] = value
|
||||
elif isinstance(value, dict) and isinstance(main_dict[key], dict):
|
||||
main_dict[key] = update_dict(main_dict[key], value)
|
||||
else:
|
||||
main_dict[key] = value
|
||||
return main_dict
|
||||
|
|
|
|||
160
pype/lib/editorial.py
Normal file
160
pype/lib/editorial.py
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
import os
|
||||
import re
|
||||
import clique
|
||||
from opentimelineio import opentime
|
||||
from opentimelineio.opentime import (
|
||||
to_frames, RationalTime, TimeRange)
|
||||
|
||||
|
||||
def otio_range_to_frame_range(otio_range):
|
||||
start = to_frames(
|
||||
otio_range.start_time, otio_range.start_time.rate)
|
||||
end = start + to_frames(
|
||||
otio_range.duration, otio_range.duration.rate) - 1
|
||||
return start, end
|
||||
|
||||
|
||||
def otio_range_with_handles(otio_range, instance):
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
handles_duration = handle_start + handle_end
|
||||
fps = float(otio_range.start_time.rate)
|
||||
start = to_frames(otio_range.start_time, fps)
|
||||
duration = to_frames(otio_range.duration, fps)
|
||||
|
||||
return TimeRange(
|
||||
start_time=RationalTime((start - handle_start), fps),
|
||||
duration=RationalTime((duration + handles_duration), fps)
|
||||
)
|
||||
|
||||
|
||||
def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False):
|
||||
test_start, test_end = otio_range_to_frame_range(test_otio_range)
|
||||
main_start, main_end = otio_range_to_frame_range(main_otio_range)
|
||||
covering_exp = bool(
|
||||
(test_start <= main_start) and (test_end >= main_end)
|
||||
)
|
||||
inside_exp = bool(
|
||||
(test_start >= main_start) and (test_end <= main_end)
|
||||
)
|
||||
overlaying_right_exp = bool(
|
||||
(test_start <= main_end) and (test_end >= main_end)
|
||||
)
|
||||
overlaying_left_exp = bool(
|
||||
(test_end >= main_start) and (test_start <= main_start)
|
||||
)
|
||||
|
||||
if not strict:
|
||||
return any((
|
||||
covering_exp,
|
||||
inside_exp,
|
||||
overlaying_right_exp,
|
||||
overlaying_left_exp
|
||||
))
|
||||
else:
|
||||
return covering_exp
|
||||
|
||||
|
||||
def convert_to_padded_path(path, padding):
|
||||
"""
|
||||
Return correct padding in sequence string
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
padding (int): number of padding
|
||||
|
||||
Returns:
|
||||
type: string with reformated path
|
||||
|
||||
Example:
|
||||
convert_to_padded_path("plate.%d.exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
if "%d" in path:
|
||||
path = re.sub("%d", "%0{padding}d".format(padding=padding), path)
|
||||
return path
|
||||
|
||||
|
||||
def trim_media_range(media_range, source_range):
|
||||
"""
|
||||
Trim input media range with clip source range.
|
||||
|
||||
Args:
|
||||
media_range (otio.opentime.TimeRange): available range of media
|
||||
source_range (otio.opentime.TimeRange): clip required range
|
||||
|
||||
Returns:
|
||||
otio.opentime.TimeRange: trimmed media range
|
||||
|
||||
"""
|
||||
rw_media_start = RationalTime(
|
||||
media_range.start_time.value + source_range.start_time.value,
|
||||
media_range.start_time.rate
|
||||
)
|
||||
rw_media_duration = RationalTime(
|
||||
source_range.duration.value,
|
||||
media_range.duration.rate
|
||||
)
|
||||
return TimeRange(
|
||||
rw_media_start, rw_media_duration)
|
||||
|
||||
|
||||
def range_from_frames(start, duration, fps):
|
||||
"""
|
||||
Returns otio time range.
|
||||
|
||||
Args:
|
||||
start (int): frame start
|
||||
duration (int): frame duration
|
||||
fps (float): frame range
|
||||
|
||||
Returns:
|
||||
otio.opentime.TimeRange: crated range
|
||||
|
||||
"""
|
||||
return TimeRange(
|
||||
RationalTime(start, fps),
|
||||
RationalTime(duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def frames_to_secons(frames, framerate):
|
||||
"""
|
||||
Returning secons.
|
||||
|
||||
Args:
|
||||
frames (int): frame
|
||||
framerate (flaot): frame rate
|
||||
|
||||
Returns:
|
||||
float: second value
|
||||
|
||||
"""
|
||||
rt = opentime.from_frames(frames, framerate)
|
||||
return opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def make_sequence_collection(path, otio_range, metadata):
|
||||
"""
|
||||
Make collection from path otio range and otio metadata.
|
||||
|
||||
Args:
|
||||
path (str): path to image sequence with `%d`
|
||||
otio_range (otio.opentime.TimeRange): range to be used
|
||||
metadata (dict): data where padding value can be found
|
||||
|
||||
Returns:
|
||||
list: dir_path (str): path to sequence, collection object
|
||||
|
||||
"""
|
||||
if "%" not in path:
|
||||
return None
|
||||
file_name = os.path.basename(path)
|
||||
dir_path = os.path.dirname(path)
|
||||
head = file_name.split("%")[0]
|
||||
tail = os.path.splitext(file_name)[-1]
|
||||
first, last = otio_range_to_frame_range(otio_range)
|
||||
collection = clique.Collection(
|
||||
head=head, tail=tail, padding=metadata["padding"])
|
||||
collection.indexes.update([i for i in range(first, (last + 1))])
|
||||
return dir_path, collection
|
||||
|
|
@ -69,42 +69,67 @@ def execute(args,
|
|||
return popen.returncode
|
||||
|
||||
|
||||
def _subprocess(*args, **kwargs):
|
||||
def run_subprocess(*args, **kwargs):
|
||||
"""Convenience method for getting output errors for subprocess.
|
||||
|
||||
.. seealso:: :mod:`subprocess`
|
||||
Output logged when process finish.
|
||||
|
||||
Entered arguments and keyword arguments are passed to subprocess Popen.
|
||||
|
||||
Args:
|
||||
*args: Variable length arument list passed to Popen.
|
||||
**kwargs : Arbitary keyword arguments passed to Popen. Is possible to
|
||||
pass `logging.Logger` object under "logger" if want to use
|
||||
different than lib's logger.
|
||||
|
||||
Returns:
|
||||
str: Full output of subprocess concatenated stdout and stderr.
|
||||
|
||||
Raises:
|
||||
RuntimeError: Exception is raised if process finished with nonzero
|
||||
return code.
|
||||
"""
|
||||
# make sure environment contains only strings
|
||||
if not kwargs.get("env"):
|
||||
filtered_env = {k: str(v) for k, v in os.environ.items()}
|
||||
else:
|
||||
filtered_env = {k: str(v) for k, v in kwargs.get("env").items()}
|
||||
|
||||
# Get environents from kwarg or use current process environments if were
|
||||
# not passed.
|
||||
env = kwargs.get("env") or os.environ
|
||||
# Make sure environment contains only strings
|
||||
filtered_env = {k: str(v) for k, v in env.items()}
|
||||
|
||||
# Use lib's logger if was not passed with kwargs.
|
||||
logger = kwargs.pop("logger", log)
|
||||
|
||||
# set overrides
|
||||
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
|
||||
kwargs['stderr'] = kwargs.get('stderr', subprocess.STDOUT)
|
||||
kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE)
|
||||
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
|
||||
kwargs['env'] = filtered_env
|
||||
|
||||
proc = subprocess.Popen(*args, **kwargs)
|
||||
|
||||
output, error = proc.communicate()
|
||||
full_output = ""
|
||||
_stdout, _stderr = proc.communicate()
|
||||
if _stdout:
|
||||
_stdout = _stdout.decode("utf-8")
|
||||
full_output += _stdout
|
||||
logger.debug(_stdout)
|
||||
|
||||
if output:
|
||||
output = output.decode("utf-8")
|
||||
output += "\n"
|
||||
for line in output.strip().split("\n"):
|
||||
log.info(line)
|
||||
|
||||
if error:
|
||||
error = error.decode("utf-8")
|
||||
error += "\n"
|
||||
for line in error.strip().split("\n"):
|
||||
log.error(line)
|
||||
if _stderr:
|
||||
_stderr = _stderr.decode("utf-8")
|
||||
# Add additional line break if output already containt stdout
|
||||
if full_output:
|
||||
full_output += "\n"
|
||||
full_output += _stderr
|
||||
logger.warning(_stderr)
|
||||
|
||||
if proc.returncode != 0:
|
||||
raise ValueError(
|
||||
"\"{}\" was not successful:\nOutput: {}\nError: {}".format(
|
||||
args, output, error))
|
||||
return output
|
||||
exc_msg = "Executing arguments was not successful: \"{}\"".format(args)
|
||||
if _stdout:
|
||||
exc_msg += "\n\nOutput:\n{}".format(_stdout)
|
||||
|
||||
if _stderr:
|
||||
exc_msg += "Error:\n{}".format(_stderr)
|
||||
|
||||
raise RuntimeError(exc_msg)
|
||||
|
||||
return full_output
|
||||
|
|
|
|||
445
pype/lib/log.py
445
pype/lib/log.py
|
|
@ -21,99 +21,24 @@ import socket
|
|||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from logging.handlers import TimedRotatingFileHandler
|
||||
import threading
|
||||
import copy
|
||||
|
||||
from . import Terminal
|
||||
from .mongo import (
|
||||
MongoEnvNotSet,
|
||||
decompose_url,
|
||||
compose_url,
|
||||
get_default_components
|
||||
PypeMongoConnection
|
||||
)
|
||||
|
||||
try:
|
||||
import log4mongo
|
||||
from log4mongo.handlers import MongoHandler
|
||||
from bson.objectid import ObjectId
|
||||
MONGO_PROCESS_ID = ObjectId()
|
||||
except ImportError:
|
||||
_mongo_logging = False
|
||||
else:
|
||||
_mongo_logging = True
|
||||
log4mongo = None
|
||||
MongoHandler = type("NOT_SET", (), {})
|
||||
|
||||
try:
|
||||
unicode
|
||||
_unicode = True
|
||||
except NameError:
|
||||
_unicode = False
|
||||
|
||||
|
||||
PYPE_DEBUG = int(os.getenv("PYPE_DEBUG", "0"))
|
||||
LOG_DATABASE_NAME = os.environ.get("PYPE_LOG_MONGO_DB") or "pype"
|
||||
LOG_COLLECTION_NAME = os.environ.get("PYPE_LOG_MONGO_COL") or "logs"
|
||||
|
||||
system_name, pc_name = platform.uname()[:2]
|
||||
host_name = socket.gethostname()
|
||||
try:
|
||||
ip = socket.gethostbyname(host_name)
|
||||
except socket.gaierror:
|
||||
ip = "127.0.0.1"
|
||||
|
||||
# Get process name
|
||||
if len(sys.argv) > 0 and os.path.basename(sys.argv[0]) == "tray.py":
|
||||
process_name = "Tray"
|
||||
else:
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process(os.getpid())
|
||||
process_name = process.name()
|
||||
|
||||
except ImportError:
|
||||
process_name = os.environ.get("AVALON_APP_NAME")
|
||||
if not process_name:
|
||||
process_name = os.path.basename(sys.executable)
|
||||
|
||||
|
||||
def _log_mongo_components():
|
||||
mongo_url = os.environ.get("PYPE_LOG_MONGO_URL")
|
||||
if mongo_url is not None:
|
||||
components = decompose_url(mongo_url)
|
||||
else:
|
||||
components = get_default_components()
|
||||
return components
|
||||
|
||||
|
||||
def _bootstrap_mongo_log(components=None):
|
||||
"""
|
||||
This will check if database and collection for logging exist on server.
|
||||
"""
|
||||
import pymongo
|
||||
|
||||
if components is None:
|
||||
components = _log_mongo_components()
|
||||
|
||||
if not components["host"]:
|
||||
# fail silently
|
||||
return
|
||||
|
||||
timeout = int(os.environ.get("AVALON_TIMEOUT", 1000))
|
||||
kwargs = {
|
||||
"host": compose_url(**components),
|
||||
"serverSelectionTimeoutMS": timeout
|
||||
}
|
||||
|
||||
port = components.get("port")
|
||||
if port is not None:
|
||||
kwargs["port"] = int(port)
|
||||
client = pymongo.MongoClient(**kwargs)
|
||||
logdb = client[LOG_DATABASE_NAME]
|
||||
|
||||
collist = logdb.list_collection_names()
|
||||
if LOG_COLLECTION_NAME not in collist:
|
||||
logdb.create_collection(
|
||||
LOG_COLLECTION_NAME, capped=True, max=5000, size=1073741824
|
||||
)
|
||||
return logdb
|
||||
# Check for `unicode` in builtins
|
||||
USE_UNICODE = hasattr(__builtins__, "unicode")
|
||||
|
||||
|
||||
class PypeStreamHandler(logging.StreamHandler):
|
||||
|
|
@ -148,7 +73,8 @@ class PypeStreamHandler(logging.StreamHandler):
|
|||
msg = Terminal.log(msg)
|
||||
stream = self.stream
|
||||
fs = "%s\n"
|
||||
if not _unicode: # if no unicode support...
|
||||
# if no unicode support...
|
||||
if not USE_UNICODE:
|
||||
stream.write(fs % msg)
|
||||
else:
|
||||
try:
|
||||
|
|
@ -225,23 +151,18 @@ class PypeMongoFormatter(logging.Formatter):
|
|||
'fileName': record.pathname,
|
||||
'module': record.module,
|
||||
'method': record.funcName,
|
||||
'lineNumber': record.lineno,
|
||||
'process_id': MONGO_PROCESS_ID,
|
||||
'hostname': host_name,
|
||||
'hostip': ip,
|
||||
'username': getpass.getuser(),
|
||||
'system_name': system_name,
|
||||
'process_name': process_name
|
||||
'lineNumber': record.lineno
|
||||
}
|
||||
document.update(PypeLogger.get_process_data())
|
||||
|
||||
# Standard document decorated with exception info
|
||||
if record.exc_info is not None:
|
||||
document.update({
|
||||
'exception': {
|
||||
'message': str(record.exc_info[1]),
|
||||
'code': 0,
|
||||
'stackTrace': self.formatException(record.exc_info)
|
||||
}
|
||||
})
|
||||
document['exception'] = {
|
||||
'message': str(record.exc_info[1]),
|
||||
'code': 0,
|
||||
'stackTrace': self.formatException(record.exc_info)
|
||||
}
|
||||
|
||||
# Standard document decorated with extra contextual information
|
||||
if len(self.DEFAULT_PROPERTIES) != len(record.__dict__):
|
||||
contextual_extra = set(record.__dict__).difference(
|
||||
|
|
@ -253,9 +174,6 @@ class PypeMongoFormatter(logging.Formatter):
|
|||
|
||||
|
||||
class PypeLogger:
|
||||
|
||||
PYPE_DEBUG = 0
|
||||
|
||||
DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] '
|
||||
DBG = " - { %(name)s }: [ %(message)s ] "
|
||||
INF = ">>> [ %(message)s ] "
|
||||
|
|
@ -271,55 +189,97 @@ class PypeLogger:
|
|||
logging.CRITICAL: CRI,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.PYPE_DEBUG = int(os.environ.get("PYPE_DEBUG", "0"))
|
||||
# Is static class initialized
|
||||
bootstraped = False
|
||||
initialized = False
|
||||
_init_lock = threading.Lock()
|
||||
|
||||
@staticmethod
|
||||
def get_file_path(host='pype'):
|
||||
# Defines if mongo logging should be used
|
||||
use_mongo_logging = None
|
||||
mongo_process_id = None
|
||||
|
||||
ts = time.time()
|
||||
log_name = datetime.datetime.fromtimestamp(ts).strftime(
|
||||
'%Y-%m-%d' # '%Y-%m-%d_%H-%M-%S'
|
||||
)
|
||||
# Information about mongo url
|
||||
log_mongo_url = None
|
||||
log_mongo_url_components = None
|
||||
log_database_name = None
|
||||
log_collection_name = None
|
||||
|
||||
logger_file_root = os.path.join(
|
||||
os.path.expanduser("~"),
|
||||
".pype-setup"
|
||||
)
|
||||
# PYPE_DEBUG
|
||||
pype_debug = 0
|
||||
|
||||
logger_file_path = os.path.join(
|
||||
logger_file_root,
|
||||
"{}-{}.{}".format(host, log_name, 'log')
|
||||
)
|
||||
# Data same for all record documents
|
||||
process_data = None
|
||||
# Cached process name or ability to set different process name
|
||||
_process_name = None
|
||||
|
||||
if not os.path.exists(logger_file_root):
|
||||
os.mkdir(logger_file_root)
|
||||
@classmethod
|
||||
def get_logger(cls, name=None, _host=None):
|
||||
if not cls.initialized:
|
||||
cls.initialize()
|
||||
|
||||
return logger_file_path
|
||||
logger = logging.getLogger(name or "__main__")
|
||||
|
||||
def _get_file_handler(self, host):
|
||||
logger_file_path = PypeLogger.get_file_path(host)
|
||||
if cls.pype_debug > 1:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
else:
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
formatter = PypeFormatter(self.FORMAT_FILE)
|
||||
add_mongo_handler = cls.use_mongo_logging
|
||||
add_console_handler = True
|
||||
|
||||
file_handler = TimedRotatingFileHandler(
|
||||
logger_file_path,
|
||||
when='midnight'
|
||||
)
|
||||
file_handler.set_name("PypeFileHandler")
|
||||
file_handler.setFormatter(formatter)
|
||||
return file_handler
|
||||
for handler in logger.handlers:
|
||||
if isinstance(handler, MongoHandler):
|
||||
add_mongo_handler = False
|
||||
elif isinstance(handler, PypeStreamHandler):
|
||||
add_console_handler = False
|
||||
|
||||
def _get_mongo_handler(self):
|
||||
components = _log_mongo_components()
|
||||
# Check existence of mongo connection before creating Mongo handler
|
||||
if log4mongo.handlers._connection is None:
|
||||
_bootstrap_mongo_log(components)
|
||||
if add_console_handler:
|
||||
logger.addHandler(cls._get_console_handler())
|
||||
|
||||
if add_mongo_handler:
|
||||
try:
|
||||
handler = cls._get_mongo_handler()
|
||||
if handler:
|
||||
logger.addHandler(handler)
|
||||
|
||||
except MongoEnvNotSet:
|
||||
# Skip if mongo environments are not set yet
|
||||
cls.use_mongo_logging = False
|
||||
|
||||
except Exception:
|
||||
lines = traceback.format_exception(*sys.exc_info())
|
||||
for line in lines:
|
||||
if line.endswith("\n"):
|
||||
line = line[:-1]
|
||||
Terminal.echo(line)
|
||||
cls.use_mongo_logging = False
|
||||
|
||||
# Do not propagate logs to root logger
|
||||
logger.propagate = False
|
||||
|
||||
if _host is not None:
|
||||
# Warn about deprecated argument
|
||||
# TODO remove backwards compatibility of host argument which is
|
||||
# not used for more than a year
|
||||
logger.warning(
|
||||
"Logger \"{}\" is using argument `host` on `get_logger`"
|
||||
" which is deprecated. Please remove as backwards"
|
||||
" compatibility will be removed soon."
|
||||
)
|
||||
return logger
|
||||
|
||||
@classmethod
|
||||
def _get_mongo_handler(cls):
|
||||
cls.bootstrap_mongo_log()
|
||||
|
||||
if not cls.use_mongo_logging:
|
||||
return
|
||||
|
||||
components = cls.log_mongo_url_components
|
||||
kwargs = {
|
||||
"host": compose_url(**components),
|
||||
"database_name": LOG_DATABASE_NAME,
|
||||
"collection": LOG_COLLECTION_NAME,
|
||||
"host": cls.log_mongo_url,
|
||||
"database_name": cls.log_database_name,
|
||||
"collection": cls.log_collection_name,
|
||||
"username": components["username"],
|
||||
"password": components["password"],
|
||||
"capped": True,
|
||||
|
|
@ -332,56 +292,193 @@ class PypeLogger:
|
|||
|
||||
return MongoHandler(**kwargs)
|
||||
|
||||
def _get_console_handler(self):
|
||||
|
||||
formatter = PypeFormatter(self.FORMAT_FILE)
|
||||
@classmethod
|
||||
def _get_console_handler(cls):
|
||||
formatter = PypeFormatter(cls.FORMAT_FILE)
|
||||
console_handler = PypeStreamHandler()
|
||||
|
||||
console_handler.set_name("PypeStreamHandler")
|
||||
console_handler.setFormatter(formatter)
|
||||
return console_handler
|
||||
|
||||
def get_logger(self, name=None, host=None):
|
||||
logger = logging.getLogger(name or '__main__')
|
||||
|
||||
if self.PYPE_DEBUG > 1:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
@classmethod
|
||||
def initialize(cls):
|
||||
# TODO update already created loggers on re-initialization
|
||||
if not cls._init_lock.locked():
|
||||
with cls._init_lock:
|
||||
cls._initialize()
|
||||
else:
|
||||
logger.setLevel(logging.INFO)
|
||||
# If lock is locked wait until is finished
|
||||
while cls._init_lock.locked():
|
||||
time.sleep(0.1)
|
||||
|
||||
global _mongo_logging
|
||||
add_mongo_handler = _mongo_logging
|
||||
add_console_handler = True
|
||||
@classmethod
|
||||
def _initialize(cls):
|
||||
# Change initialization state to prevent runtime changes
|
||||
# if is executed during runtime
|
||||
cls.initialized = False
|
||||
|
||||
for handler in logger.handlers:
|
||||
if isinstance(handler, MongoHandler):
|
||||
add_mongo_handler = False
|
||||
elif isinstance(handler, PypeStreamHandler):
|
||||
add_console_handler = False
|
||||
|
||||
if add_console_handler:
|
||||
logger.addHandler(self._get_console_handler())
|
||||
|
||||
if add_mongo_handler:
|
||||
# Define if should logging to mongo be used
|
||||
use_mongo_logging = bool(log4mongo is not None)
|
||||
# Set mongo id for process (ONLY ONCE)
|
||||
if use_mongo_logging and cls.mongo_process_id is None:
|
||||
try:
|
||||
logger.addHandler(self._get_mongo_handler())
|
||||
|
||||
except MongoEnvNotSet:
|
||||
# Skip if mongo environments are not set yet
|
||||
_mongo_logging = False
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
except Exception:
|
||||
lines = traceback.format_exception(*sys.exc_info())
|
||||
for line in lines:
|
||||
if line.endswith("\n"):
|
||||
line = line[:-1]
|
||||
Terminal.echo(line)
|
||||
_mongo_logging = False
|
||||
use_mongo_logging = False
|
||||
|
||||
# Do not propagate logs to root logger
|
||||
logger.propagate = False
|
||||
# Check if mongo id was passed with environments and pop it
|
||||
# - This is for subprocesses that are part of another process
|
||||
# like Ftrack event server has 3 other subprocesses that should
|
||||
# use same mongo id
|
||||
if use_mongo_logging:
|
||||
mongo_id = os.environ.pop("PYPE_PROCESS_MONGO_ID", None)
|
||||
if not mongo_id:
|
||||
# Create new object id
|
||||
mongo_id = ObjectId()
|
||||
else:
|
||||
# Convert string to ObjectId object
|
||||
mongo_id = ObjectId(mongo_id)
|
||||
cls.mongo_process_id = mongo_id
|
||||
|
||||
return logger
|
||||
# Store result to class definition
|
||||
cls.use_mongo_logging = use_mongo_logging
|
||||
|
||||
# Define if is in PYPE_DEBUG mode
|
||||
cls.pype_debug = int(os.getenv("PYPE_DEBUG") or "0")
|
||||
|
||||
# Mongo URL where logs will be stored
|
||||
cls.log_mongo_url = (
|
||||
os.environ.get("PYPE_LOG_MONGO_URL")
|
||||
or os.environ.get("PYPE_MONGO")
|
||||
)
|
||||
if not cls.log_mongo_url:
|
||||
cls.use_mongo_logging = False
|
||||
else:
|
||||
# Decompose url
|
||||
cls.log_mongo_url_components = decompose_url(cls.log_mongo_url)
|
||||
|
||||
# Database name in Mongo
|
||||
cls.log_database_name = (
|
||||
os.environ.get("PYPE_LOG_MONGO_DB") or "pype"
|
||||
)
|
||||
# Collection name under database in Mongo
|
||||
cls.log_collection_name = (
|
||||
os.environ.get("PYPE_LOG_MONGO_COL") or "logs"
|
||||
)
|
||||
|
||||
# Mark as initialized
|
||||
cls.initialized = True
|
||||
|
||||
@classmethod
|
||||
def get_process_data(cls):
|
||||
"""Data about current process which should be same for all records.
|
||||
|
||||
Process data are used for each record sent to mongo database.
|
||||
"""
|
||||
if cls.process_data is not None:
|
||||
return copy.deepcopy(cls.process_data)
|
||||
|
||||
if not cls.initialized:
|
||||
cls.initialize()
|
||||
|
||||
host_name = socket.gethostname()
|
||||
try:
|
||||
host_ip = socket.gethostbyname(host_name)
|
||||
except socket.gaierror:
|
||||
host_ip = "127.0.0.1"
|
||||
|
||||
process_name = cls.get_process_name()
|
||||
|
||||
cls.process_data = {
|
||||
"process_id": cls.mongo_process_id,
|
||||
"hostname": host_name,
|
||||
"hostip": host_ip,
|
||||
"username": getpass.getuser(),
|
||||
"system_name": platform.system(),
|
||||
"process_name": process_name
|
||||
}
|
||||
return copy.deepcopy(cls.process_data)
|
||||
|
||||
@classmethod
|
||||
def set_process_name(cls, process_name):
|
||||
"""Set process name for mongo logs."""
|
||||
# Just change the attribute
|
||||
cls._process_name = process_name
|
||||
# Update process data if are already set
|
||||
if cls.process_data is not None:
|
||||
cls.process_data["process_name"] = process_name
|
||||
|
||||
@classmethod
|
||||
def get_process_name(cls):
|
||||
"""Process name that is like "label" of a process.
|
||||
|
||||
Pype's logging can be used from pype itseld of from hosts. Even in Pype
|
||||
it's good to know if logs are from Pype tray or from pype's event
|
||||
server. This should help to identify that information.
|
||||
"""
|
||||
if cls._process_name is not None:
|
||||
return cls._process_name
|
||||
|
||||
# Get process name
|
||||
process_name = os.environ.get("AVALON_APP_NAME")
|
||||
if not process_name:
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process(os.getpid())
|
||||
process_name = process.name()
|
||||
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if not process_name:
|
||||
process_name = os.path.basename(sys.executable)
|
||||
|
||||
cls._process_name = process_name
|
||||
return cls._process_name
|
||||
|
||||
@classmethod
|
||||
def bootstrap_mongo_log(cls):
|
||||
"""Prepare mongo logging."""
|
||||
if cls.bootstraped:
|
||||
return
|
||||
|
||||
if not cls.initialized:
|
||||
cls.initialize()
|
||||
|
||||
if not cls.use_mongo_logging:
|
||||
return
|
||||
|
||||
client = log4mongo.handlers._connection
|
||||
if not client:
|
||||
client = cls.get_log_mongo_connection()
|
||||
# Set the client inside log4mongo handlers to not create another
|
||||
# mongo db connection.
|
||||
log4mongo.handlers._connection = client
|
||||
|
||||
logdb = client[cls.log_database_name]
|
||||
|
||||
collist = logdb.list_collection_names()
|
||||
if cls.log_collection_name not in collist:
|
||||
logdb.create_collection(
|
||||
cls.log_collection_name,
|
||||
capped=True,
|
||||
max=5000,
|
||||
size=1073741824
|
||||
)
|
||||
cls.bootstraped = True
|
||||
|
||||
@classmethod
|
||||
def get_log_mongo_connection(cls):
|
||||
"""Mongo connection that allows to get to log collection.
|
||||
|
||||
This is implemented to prevent multiple connections to mongo from same
|
||||
process.
|
||||
"""
|
||||
if not cls.initialized:
|
||||
cls.initialize()
|
||||
|
||||
return PypeMongoConnection.get_mongo_client(cls.log_mongo_url)
|
||||
|
||||
|
||||
def timeit(method):
|
||||
|
|
|
|||
|
|
@ -5,6 +5,9 @@ import inspect
|
|||
import logging
|
||||
import re
|
||||
import json
|
||||
import tempfile
|
||||
|
||||
from .execute import run_subprocess
|
||||
|
||||
from pype.settings import get_project_settings
|
||||
|
||||
|
|
@ -134,3 +137,115 @@ def get_background_layers(file_url):
|
|||
layer.get("filename")).
|
||||
replace("\\", "/"))
|
||||
return layers
|
||||
|
||||
|
||||
def oiio_supported():
|
||||
"""
|
||||
Checks if oiiotool is configured for this platform.
|
||||
|
||||
Expects full path to executable.
|
||||
|
||||
'should_decompress' will throw exception if configured,
|
||||
but not present or not working.
|
||||
Returns:
|
||||
(bool)
|
||||
"""
|
||||
oiio_path = os.getenv("PYPE_OIIO_PATH", "")
|
||||
if not oiio_path or not os.path.exists(oiio_path):
|
||||
log.debug("OIIOTool is not configured or not present at {}".
|
||||
format(oiio_path))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def decompress(target_dir, file_url,
|
||||
input_frame_start=None, input_frame_end=None, log=None):
|
||||
"""
|
||||
Decompresses DWAA 'file_url' .exr to 'target_dir'.
|
||||
|
||||
Creates uncompressed files in 'target_dir', they need to be cleaned.
|
||||
|
||||
File url could be for single file or for a sequence, in that case
|
||||
%0Xd will be as a placeholder for frame number AND input_frame* will
|
||||
be filled.
|
||||
In that case single oiio command with '--frames' will be triggered for
|
||||
all frames, this should be faster then looping and running sequentially
|
||||
|
||||
Args:
|
||||
target_dir (str): extended from stagingDir
|
||||
file_url (str): full urls to source file (with or without %0Xd)
|
||||
input_frame_start (int) (optional): first frame
|
||||
input_frame_end (int) (optional): last frame
|
||||
log (Logger) (optional): pype logger
|
||||
"""
|
||||
is_sequence = input_frame_start is not None and \
|
||||
input_frame_end is not None and \
|
||||
(int(input_frame_end) > int(input_frame_start))
|
||||
|
||||
oiio_cmd = []
|
||||
oiio_cmd.append(os.getenv("PYPE_OIIO_PATH"))
|
||||
|
||||
oiio_cmd.append("--compression none")
|
||||
|
||||
base_file_name = os.path.basename(file_url)
|
||||
oiio_cmd.append(file_url)
|
||||
|
||||
if is_sequence:
|
||||
oiio_cmd.append("--frames {}-{}".format(input_frame_start,
|
||||
input_frame_end))
|
||||
|
||||
oiio_cmd.append("-o")
|
||||
oiio_cmd.append(os.path.join(target_dir, base_file_name))
|
||||
|
||||
subprocess_exr = " ".join(oiio_cmd)
|
||||
|
||||
if not log:
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
log.debug("Decompressing {}".format(subprocess_exr))
|
||||
run_subprocess(
|
||||
subprocess_exr, shell=True, logger=log
|
||||
)
|
||||
|
||||
|
||||
def get_decompress_dir():
|
||||
"""
|
||||
Creates temporary folder for decompressing.
|
||||
Its local, in case of farm it is 'local' to the farm machine.
|
||||
|
||||
Should be much faster, needs to be cleaned up later.
|
||||
"""
|
||||
return os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
|
||||
|
||||
def should_decompress(file_url):
|
||||
"""
|
||||
Tests that 'file_url' is compressed with DWAA.
|
||||
|
||||
Uses 'oiio_supported' to check that OIIO tool is available for this
|
||||
platform.
|
||||
|
||||
Shouldn't throw exception as oiiotool is guarded by check function.
|
||||
Currently implemented this way as there is no support for Mac and Linux
|
||||
In the future, it should be more strict and throws exception on
|
||||
misconfiguration.
|
||||
|
||||
Args:
|
||||
file_url (str): path to rendered file (in sequence it would be
|
||||
first file, if that compressed it is expected that whole seq
|
||||
will be too)
|
||||
Returns:
|
||||
(bool): 'file_url' is DWAA compressed and should be decompressed
|
||||
and we can decompress (oiiotool supported)
|
||||
"""
|
||||
if oiio_supported():
|
||||
output = run_subprocess([
|
||||
os.getenv("PYPE_OIIO_PATH"),
|
||||
"--info", "-v", file_url])
|
||||
return "compression: \"dwaa\"" in output or \
|
||||
"compression: \"dwab\"" in output
|
||||
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@
|
|||
# ..---===[[ PyP3 Setup ]]===---...
|
||||
#
|
||||
import re
|
||||
import time
|
||||
import threading
|
||||
|
||||
|
||||
class Terminal:
|
||||
|
|
@ -24,6 +26,8 @@ class Terminal:
|
|||
|
||||
# Is Terminal initialized
|
||||
_initialized = False
|
||||
# Thread lock for initialization to avoid race conditions
|
||||
_init_lock = threading.Lock()
|
||||
# Use colorized output
|
||||
use_colors = True
|
||||
# Output message replacements mapping - set on initialization
|
||||
|
|
@ -39,16 +43,17 @@ class Terminal:
|
|||
Then tries to import python module that do the colors magic and create
|
||||
it's terminal object. Colorized output is not used if import of python
|
||||
module or terminal object creation fails.
|
||||
"""
|
||||
# Mark that Terminal's initialization was already triggered
|
||||
Terminal._initialized = True
|
||||
|
||||
from . import env_value_to_bool
|
||||
Set `_initialized` attribute to `True` when is done.
|
||||
"""
|
||||
|
||||
from pype.lib import env_value_to_bool
|
||||
use_colors = env_value_to_bool(
|
||||
"PYPE_LOG_NO_COLORS", default=Terminal.use_colors
|
||||
)
|
||||
if not use_colors:
|
||||
Terminal.use_colors = use_colors
|
||||
Terminal._initialized = True
|
||||
return
|
||||
|
||||
try:
|
||||
|
|
@ -59,10 +64,11 @@ class Terminal:
|
|||
except Exception:
|
||||
# Do not use colors if crashed
|
||||
Terminal.use_colors = False
|
||||
Terminal.echo(
|
||||
print(
|
||||
"Module `blessed` failed on import or terminal creation."
|
||||
" Pype terminal won't use colors."
|
||||
)
|
||||
Terminal._initialized = True
|
||||
return
|
||||
|
||||
# shortcuts for blessed codes
|
||||
|
|
@ -117,6 +123,8 @@ class Terminal:
|
|||
Terminal._Y = _Y
|
||||
Terminal._W = _W
|
||||
|
||||
Terminal._initialized = True
|
||||
|
||||
@staticmethod
|
||||
def _multiple_replace(text, adict):
|
||||
"""Replace multiple tokens defined in dict.
|
||||
|
|
@ -169,8 +177,18 @@ class Terminal:
|
|||
|
||||
"""
|
||||
T = Terminal
|
||||
# Initialize if not yet initialized and use thread lock to avoid race
|
||||
# condition issues
|
||||
if not T._initialized:
|
||||
T._initialize()
|
||||
# Check if lock is already locked to be sure `_initialize` is not
|
||||
# executed multiple times
|
||||
if not T._init_lock.locked():
|
||||
with T._init_lock:
|
||||
T._initialize()
|
||||
else:
|
||||
# If lock is locked wait until is finished
|
||||
while T._init_lock.locked():
|
||||
time.sleep(0.1)
|
||||
|
||||
# if we dont want colors, just print raw message
|
||||
if not T.use_colors:
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from .base import (
|
|||
ITrayAction,
|
||||
ITrayService,
|
||||
IPluginPaths,
|
||||
ILaunchHookPaths,
|
||||
ModulesManager,
|
||||
TrayModulesManager
|
||||
)
|
||||
|
|
@ -32,8 +33,9 @@ from .ftrack import (
|
|||
IFtrackEventHandlerPaths
|
||||
)
|
||||
from .clockify import ClockifyModule
|
||||
from .logging import LoggingModule
|
||||
from .log_viewer import LogViewModule
|
||||
from .muster import MusterModule
|
||||
from .deadline import DeadlineModule
|
||||
from .standalonepublish_action import StandAlonePublishAction
|
||||
from .websocket_server import WebsocketModule
|
||||
from .sync_server import SyncServer
|
||||
|
|
@ -45,6 +47,7 @@ __all__ = (
|
|||
"ITrayAction",
|
||||
"ITrayService",
|
||||
"IPluginPaths",
|
||||
"ILaunchHookPaths",
|
||||
"ModulesManager",
|
||||
"TrayModulesManager",
|
||||
|
||||
|
|
@ -70,8 +73,9 @@ __all__ = (
|
|||
|
||||
"ClockifyModule",
|
||||
"IdleManager",
|
||||
"LoggingModule",
|
||||
"LogViewModule",
|
||||
"MusterModule",
|
||||
"DeadlineModule",
|
||||
"StandAlonePublishAction",
|
||||
|
||||
"WebsocketModule",
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Base class for Pype Modules."""
|
||||
import time
|
||||
import inspect
|
||||
import logging
|
||||
import collections
|
||||
from uuid import uuid4
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import six
|
||||
|
|
@ -84,6 +86,19 @@ class IPluginPaths:
|
|||
pass
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class ILaunchHookPaths:
|
||||
"""Module has launch hook paths to return.
|
||||
|
||||
Expected result is list of paths.
|
||||
["path/to/launch_hooks_dir"]
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_launch_hook_paths(self):
|
||||
pass
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class ITrayModule:
|
||||
"""Module has special procedures when used in Pype Tray.
|
||||
|
|
@ -255,12 +270,17 @@ class ITrayService(ITrayModule):
|
|||
|
||||
|
||||
class ModulesManager:
|
||||
# Helper attributes for report
|
||||
_report_total_key = "Total"
|
||||
|
||||
def __init__(self):
|
||||
self.log = logging.getLogger(self.__class__.__name__)
|
||||
|
||||
self.modules = []
|
||||
self.modules_by_id = {}
|
||||
self.modules_by_name = {}
|
||||
# For report of time consumption
|
||||
self._report = {}
|
||||
|
||||
self.initialize_modules()
|
||||
self.connect_modules()
|
||||
|
|
@ -270,6 +290,11 @@ class ModulesManager:
|
|||
self.log.debug("*** Pype modules initialization.")
|
||||
# Prepare settings for modules
|
||||
modules_settings = get_system_settings()["modules"]
|
||||
|
||||
report = {}
|
||||
time_start = time.time()
|
||||
prev_start_time = time_start
|
||||
|
||||
# Go through globals in `pype.modules`
|
||||
for name in dir(pype.modules):
|
||||
modules_item = getattr(pype.modules, name, None)
|
||||
|
|
@ -308,17 +333,28 @@ class ModulesManager:
|
|||
enabled_str = " "
|
||||
self.log.debug("[{}] {}".format(enabled_str, name))
|
||||
|
||||
now = time.time()
|
||||
report[module.__class__.__name__] = now - prev_start_time
|
||||
prev_start_time = now
|
||||
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Initialization of module {} failed.".format(name),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
if self._report is not None:
|
||||
report[self._report_total_key] = time.time() - time_start
|
||||
self._report["Initialization"] = report
|
||||
|
||||
def connect_modules(self):
|
||||
"""Trigger connection with other enabled modules.
|
||||
|
||||
Modules should handle their interfaces in `connect_with_modules`.
|
||||
"""
|
||||
report = {}
|
||||
time_start = time.time()
|
||||
prev_start_time = time_start
|
||||
enabled_modules = self.get_enabled_modules()
|
||||
self.log.debug("Has {} enabled modules.".format(len(enabled_modules)))
|
||||
for module in enabled_modules:
|
||||
|
|
@ -330,6 +366,14 @@ class ModulesManager:
|
|||
exc_info=True
|
||||
)
|
||||
|
||||
now = time.time()
|
||||
report[module.__class__.__name__] = now - prev_start_time
|
||||
prev_start_time = now
|
||||
|
||||
if self._report is not None:
|
||||
report[self._report_total_key] = time.time() - time_start
|
||||
self._report["Connect modules"] = report
|
||||
|
||||
def get_enabled_modules(self):
|
||||
"""Enabled modules initialized by the manager.
|
||||
|
||||
|
|
@ -421,6 +465,156 @@ class ModulesManager:
|
|||
).format(expected_keys, " | ".join(msg_items)))
|
||||
return output
|
||||
|
||||
def collect_launch_hook_paths(self):
|
||||
"""Helper to collect hooks from modules inherited ILaunchHookPaths.
|
||||
|
||||
Returns:
|
||||
list: Paths to launch hook directories.
|
||||
"""
|
||||
str_type = type("")
|
||||
expected_types = (list, tuple, set)
|
||||
|
||||
output = []
|
||||
for module in self.get_enabled_modules():
|
||||
# Skip module that do not inherit from `ILaunchHookPaths`
|
||||
if not isinstance(module, ILaunchHookPaths):
|
||||
continue
|
||||
|
||||
hook_paths = module.get_launch_hook_paths()
|
||||
if not hook_paths:
|
||||
continue
|
||||
|
||||
# Convert string to list
|
||||
if isinstance(hook_paths, str_type):
|
||||
hook_paths = [hook_paths]
|
||||
|
||||
# Skip invalid types
|
||||
if not isinstance(hook_paths, expected_types):
|
||||
self.log.warning((
|
||||
"Result of `get_launch_hook_paths`"
|
||||
" has invalid type {}. Expected {}"
|
||||
).format(type(hook_paths), expected_types))
|
||||
continue
|
||||
|
||||
output.extend(hook_paths)
|
||||
return output
|
||||
|
||||
def print_report(self):
|
||||
"""Print out report of time spent on modules initialization parts.
|
||||
|
||||
Reporting is not automated must be implemented for each initialization
|
||||
part separatelly. Reports must be stored to `_report` attribute.
|
||||
Print is skipped if `_report` is empty.
|
||||
|
||||
Attribute `_report` is dictionary where key is "label" describing
|
||||
the processed part and value is dictionary where key is module's
|
||||
class name and value is time delta of it's processing.
|
||||
|
||||
It is good idea to add total time delta on processed part under key
|
||||
which is defined in attribute `_report_total_key`. By default has value
|
||||
`"Total"` but use the attribute please.
|
||||
|
||||
```javascript
|
||||
{
|
||||
"Initialization": {
|
||||
"FtrackModule": 0.003,
|
||||
...
|
||||
"Total": 1.003,
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
"""
|
||||
if not self._report:
|
||||
return
|
||||
|
||||
available_col_names = set()
|
||||
for module_names in self._report.values():
|
||||
available_col_names |= set(module_names.keys())
|
||||
|
||||
# Prepare ordered dictionary for columns
|
||||
cols = collections.OrderedDict()
|
||||
# Add module names to first columnt
|
||||
cols["Module name"] = list(sorted(
|
||||
module.__class__.__name__
|
||||
for module in self.modules
|
||||
if module.__class__.__name__ in available_col_names
|
||||
))
|
||||
# Add total key (as last module)
|
||||
cols["Module name"].append(self._report_total_key)
|
||||
|
||||
# Add columns from report
|
||||
for label in self._report.keys():
|
||||
cols[label] = []
|
||||
|
||||
total_module_times = {}
|
||||
for module_name in cols["Module name"]:
|
||||
total_module_times[module_name] = 0
|
||||
|
||||
for label, reported in self._report.items():
|
||||
for module_name in cols["Module name"]:
|
||||
col_time = reported.get(module_name)
|
||||
if col_time is None:
|
||||
cols[label].append("N/A")
|
||||
continue
|
||||
cols[label].append("{:.3f}".format(col_time))
|
||||
total_module_times[module_name] += col_time
|
||||
|
||||
# Add to also total column that should sum the row
|
||||
cols[self._report_total_key] = []
|
||||
for module_name in cols["Module name"]:
|
||||
cols[self._report_total_key].append(
|
||||
"{:.3f}".format(total_module_times[module_name])
|
||||
)
|
||||
|
||||
# Prepare column widths and total row count
|
||||
# - column width is by
|
||||
col_widths = {}
|
||||
total_rows = None
|
||||
for key, values in cols.items():
|
||||
if total_rows is None:
|
||||
total_rows = 1 + len(values)
|
||||
max_width = len(key)
|
||||
for value in values:
|
||||
value_length = len(value)
|
||||
if value_length > max_width:
|
||||
max_width = value_length
|
||||
col_widths[key] = max_width
|
||||
|
||||
rows = []
|
||||
for _idx in range(total_rows):
|
||||
rows.append([])
|
||||
|
||||
for key, values in cols.items():
|
||||
width = col_widths[key]
|
||||
idx = 0
|
||||
rows[idx].append(key.ljust(width))
|
||||
for value in values:
|
||||
idx += 1
|
||||
rows[idx].append(value.ljust(width))
|
||||
|
||||
filler_parts = []
|
||||
for width in col_widths.values():
|
||||
filler_parts.append(width * "-")
|
||||
filler = "+".join(filler_parts)
|
||||
|
||||
formatted_rows = [filler]
|
||||
last_row_idx = len(rows) - 1
|
||||
for idx, row in enumerate(rows):
|
||||
# Add filler before last row
|
||||
if idx == last_row_idx:
|
||||
formatted_rows.append(filler)
|
||||
|
||||
formatted_rows.append("|".join(row))
|
||||
|
||||
# Add filler after first row
|
||||
if idx == 0:
|
||||
formatted_rows.append(filler)
|
||||
|
||||
# Join rows with newline char and add new line at the end
|
||||
output = "\n".join(formatted_rows) + "\n"
|
||||
print(output)
|
||||
|
||||
|
||||
class TrayModulesManager(ModulesManager):
|
||||
# Define order of modules in menu
|
||||
|
|
@ -442,6 +636,7 @@ class TrayModulesManager(ModulesManager):
|
|||
self.modules = []
|
||||
self.modules_by_id = {}
|
||||
self.modules_by_name = {}
|
||||
self._report = {}
|
||||
|
||||
def initialize(self, tray_menu):
|
||||
self.initialize_modules()
|
||||
|
|
@ -457,6 +652,9 @@ class TrayModulesManager(ModulesManager):
|
|||
return output
|
||||
|
||||
def tray_init(self):
|
||||
report = {}
|
||||
time_start = time.time()
|
||||
prev_start_time = time_start
|
||||
for module in self.get_enabled_tray_modules():
|
||||
try:
|
||||
module.tray_init()
|
||||
|
|
@ -469,6 +667,14 @@ class TrayModulesManager(ModulesManager):
|
|||
exc_info=True
|
||||
)
|
||||
|
||||
now = time.time()
|
||||
report[module.__class__.__name__] = now - prev_start_time
|
||||
prev_start_time = now
|
||||
|
||||
if self._report is not None:
|
||||
report[self._report_total_key] = time.time() - time_start
|
||||
self._report["Tray init"] = report
|
||||
|
||||
def tray_menu(self, tray_menu):
|
||||
ordered_modules = []
|
||||
enabled_by_name = {
|
||||
|
|
@ -482,6 +688,9 @@ class TrayModulesManager(ModulesManager):
|
|||
ordered_modules.append(module_by_name)
|
||||
ordered_modules.extend(enabled_by_name.values())
|
||||
|
||||
report = {}
|
||||
time_start = time.time()
|
||||
prev_start_time = time_start
|
||||
for module in ordered_modules:
|
||||
if not module.tray_initialized:
|
||||
continue
|
||||
|
|
@ -497,8 +706,18 @@ class TrayModulesManager(ModulesManager):
|
|||
),
|
||||
exc_info=True
|
||||
)
|
||||
now = time.time()
|
||||
report[module.__class__.__name__] = now - prev_start_time
|
||||
prev_start_time = now
|
||||
|
||||
if self._report is not None:
|
||||
report[self._report_total_key] = time.time() - time_start
|
||||
self._report["Tray menu"] = report
|
||||
|
||||
def start_modules(self):
|
||||
report = {}
|
||||
time_start = time.time()
|
||||
prev_start_time = time_start
|
||||
for module in self.get_enabled_tray_modules():
|
||||
if not module.tray_initialized:
|
||||
if isinstance(module, ITrayService):
|
||||
|
|
@ -514,6 +733,13 @@ class TrayModulesManager(ModulesManager):
|
|||
),
|
||||
exc_info=True
|
||||
)
|
||||
now = time.time()
|
||||
report[module.__class__.__name__] = now - prev_start_time
|
||||
prev_start_time = now
|
||||
|
||||
if self._report is not None:
|
||||
report[self._report_total_key] = time.time() - time_start
|
||||
self._report["Modules start"] = report
|
||||
|
||||
def on_exit(self):
|
||||
for module in self.get_enabled_tray_modules():
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from pype.api import Logger
|
|||
from pype.modules.clockify.clockify_api import ClockifyAPI
|
||||
|
||||
|
||||
log = Logger().get_logger(__name__, "clockify_start")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
class ClockifyStart(api.Action):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from avalon import api, io
|
||||
from pype.modules.clockify.clockify_api import ClockifyAPI
|
||||
from pype.api import Logger
|
||||
log = Logger().get_logger(__name__, "clockify_sync")
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
class ClockifySync(api.Action):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
from .. import PypeModule
|
||||
import os
|
||||
from pype.modules import (
|
||||
PypeModule, IPluginPaths)
|
||||
|
||||
|
||||
class DeadlineModule(PypeModule):
|
||||
class DeadlineModule(PypeModule, IPluginPaths):
|
||||
name = "deadline"
|
||||
|
||||
def initialize(self, modules_settings):
|
||||
|
|
@ -18,3 +20,10 @@ class DeadlineModule(PypeModule):
|
|||
|
||||
def connect_with_modules(self, *_a, **_kw):
|
||||
return
|
||||
|
||||
def get_plugin_paths(self):
|
||||
"""Deadline plugin paths."""
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
return {
|
||||
"publish": [os.path.join(current_dir, "plugins", "publish")]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
|
|||
|
||||
label = "Validate Deadline Web Service"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
hosts = ["maya"]
|
||||
hosts = ["maya", "nuke"]
|
||||
families = ["renderlayer"]
|
||||
|
||||
def process(self, context):
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
from .ftrack_module import (
|
||||
FtrackModule,
|
||||
IFtrackEventHandlerPaths
|
||||
IFtrackEventHandlerPaths,
|
||||
FTRACK_MODULE_DIR
|
||||
)
|
||||
from . import ftrack_server
|
||||
from .ftrack_server import FtrackServer, check_ftrack_url
|
||||
|
|
@ -9,6 +10,7 @@ from .lib import BaseHandler, BaseEvent, BaseAction, ServerAction
|
|||
__all__ = (
|
||||
"FtrackModule",
|
||||
"IFtrackEventHandlerPaths",
|
||||
"FTRACK_MODULE_DIR",
|
||||
|
||||
"ftrack_server",
|
||||
"FtrackServer",
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ class CleanHierarchicalAttrsAction(BaseAction):
|
|||
label = "Pype Admin"
|
||||
variant = "- Clean hierarchical custom attributes"
|
||||
description = "Unset empty hierarchical attribute values."
|
||||
role_list = ["Pypeclub", "Administrator", "Project Manager"]
|
||||
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
|
||||
|
||||
all_project_entities_query = (
|
||||
|
|
@ -20,12 +19,17 @@ class CleanHierarchicalAttrsAction(BaseAction):
|
|||
"select value, entity_id from CustomAttributeValue "
|
||||
"where entity_id in ({}) and configuration_id is \"{}\""
|
||||
)
|
||||
settings_key = "clean_hierarchical_attr"
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
"""Show only on project entity."""
|
||||
if len(entities) == 1 and entities[0].entity_type.lower() == "project":
|
||||
return True
|
||||
return False
|
||||
if (
|
||||
len(entities) != 1
|
||||
or entities[0].entity_type.lower() != "project"
|
||||
):
|
||||
return False
|
||||
|
||||
return self.valid_roles(session, entities, event)
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
project = entities[0]
|
||||
|
|
|
|||
|
|
@ -94,8 +94,8 @@ Example:
|
|||
"avalon_auto_sync": {
|
||||
"label": "Avalon auto-sync",
|
||||
"type": "boolean",
|
||||
"write_security_role": ["API", "Administrator"],
|
||||
"read_security_role": ["API", "Administrator"]
|
||||
"write_security_roles": ["API", "Administrator"],
|
||||
"read_security_roles": ["API", "Administrator"]
|
||||
}
|
||||
},
|
||||
"is_hierarchical": {
|
||||
|
|
@ -131,13 +131,16 @@ class CustomAttributes(BaseAction):
|
|||
variant = '- Create/Update Avalon Attributes'
|
||||
#: Action description.
|
||||
description = 'Creates Avalon/Mongo ID for double check'
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ['Pypeclub', 'Administrator']
|
||||
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
|
||||
settings_key = "create_update_attributes"
|
||||
|
||||
required_keys = ("key", "label", "type")
|
||||
|
||||
presetable_keys = ("default", "write_security_role", "read_security_role")
|
||||
presetable_keys = (
|
||||
"default",
|
||||
"write_security_roles",
|
||||
"read_security_roles"
|
||||
)
|
||||
hierarchical_key = "is_hierarchical"
|
||||
|
||||
type_posibilities = (
|
||||
|
|
@ -150,7 +153,7 @@ class CustomAttributes(BaseAction):
|
|||
Validation
|
||||
- action is only for Administrators
|
||||
'''
|
||||
return True
|
||||
return self.valid_roles(session, entities, event)
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
# JOB SETTINGS
|
||||
|
|
@ -212,17 +215,17 @@ class CustomAttributes(BaseAction):
|
|||
self.groups = {}
|
||||
|
||||
self.ftrack_settings = get_system_settings()["modules"]["ftrack"]
|
||||
self.attrs_presets = self.prepare_attribute_pressets()
|
||||
self.attrs_settings = self.prepare_attribute_settings()
|
||||
|
||||
def prepare_attribute_pressets(self):
|
||||
def prepare_attribute_settings(self):
|
||||
output = {}
|
||||
attr_presets = self.ftrack_settings["custom_attributes"]
|
||||
for entity_type, preset in attr_presets.items():
|
||||
attr_settings = self.ftrack_settings["custom_attributes"]
|
||||
for entity_type, attr_data in attr_settings.items():
|
||||
# Lower entity type
|
||||
entity_type = entity_type.lower()
|
||||
# Just store if entity type is not "task"
|
||||
if entity_type != "task":
|
||||
output[entity_type] = preset
|
||||
output[entity_type] = attr_data
|
||||
continue
|
||||
|
||||
# Prepare empty dictionary for entity type if not set yet
|
||||
|
|
@ -230,7 +233,7 @@ class CustomAttributes(BaseAction):
|
|||
output[entity_type] = {}
|
||||
|
||||
# Store presets per lowered object type
|
||||
for obj_type, _preset in preset.items():
|
||||
for obj_type, _preset in attr_data.items():
|
||||
output[entity_type][obj_type.lower()] = _preset
|
||||
|
||||
return output
|
||||
|
|
@ -267,14 +270,11 @@ class CustomAttributes(BaseAction):
|
|||
|
||||
def create_hierarchical_mongo_attr(self, session, event):
|
||||
# Set security roles for attribute
|
||||
default_role_list = ("API", "Administrator", "Pypeclub")
|
||||
data = {
|
||||
"key": CUST_ATTR_ID_KEY,
|
||||
"label": "Avalon/Mongo ID",
|
||||
"type": "text",
|
||||
"default": "",
|
||||
"write_security_roles": default_role_list,
|
||||
"read_security_roles": default_role_list,
|
||||
"group": CUST_ATTR_GROUP,
|
||||
"is_hierarchical": True,
|
||||
"config": {"markdown": False}
|
||||
|
|
@ -497,21 +497,20 @@ class CustomAttributes(BaseAction):
|
|||
else:
|
||||
entity_key = attr_data["entity_type"]
|
||||
|
||||
entity_presets = self.attrs_presets.get(entity_key) or {}
|
||||
entity_settings = self.attrs_settings.get(entity_key) or {}
|
||||
if entity_key.lower() == "task":
|
||||
object_type = attr_data["object_type"]
|
||||
entity_presets = entity_presets.get(object_type.lower()) or {}
|
||||
entity_settings = entity_settings.get(object_type.lower()) or {}
|
||||
|
||||
key_presets = entity_presets.get(attr_key) or {}
|
||||
|
||||
for key, value in key_presets.items():
|
||||
key_settings = entity_settings.get(attr_key) or {}
|
||||
for key, value in key_settings.items():
|
||||
if key in self.presetable_keys and value:
|
||||
output[key] = value
|
||||
return output
|
||||
|
||||
def process_attr_data(self, cust_attr_data, event):
|
||||
attr_presets = self.presets_for_attr_data(cust_attr_data)
|
||||
cust_attr_data.update(attr_presets)
|
||||
attr_settings = self.presets_for_attr_data(cust_attr_data)
|
||||
cust_attr_data.update(attr_settings)
|
||||
|
||||
try:
|
||||
data = {}
|
||||
|
|
@ -779,9 +778,9 @@ class CustomAttributes(BaseAction):
|
|||
roles_read = attr["read_security_roles"]
|
||||
if "write_security_roles" in attr:
|
||||
roles_write = attr["write_security_roles"]
|
||||
output['read_security_roles'] = self.get_security_roles(roles_read)
|
||||
output['write_security_roles'] = self.get_security_roles(roles_write)
|
||||
|
||||
output["read_security_roles"] = self.get_security_roles(roles_read)
|
||||
output["write_security_roles"] = self.get_security_roles(roles_write)
|
||||
return output
|
||||
|
||||
def get_entity_type(self, attr):
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ class DeleteAssetSubset(BaseAction):
|
|||
#: Action description.
|
||||
description = "Removes from Avalon with all childs and asset from Ftrack"
|
||||
icon = statics_icon("ftrack", "action_icons", "DeleteAsset.svg")
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ["Pypeclub", "Administrator", "Project Manager"]
|
||||
|
||||
settings_key = "delete_asset_subset"
|
||||
#: Db connection
|
||||
dbcon = AvalonMongoDB()
|
||||
|
||||
|
|
@ -32,17 +32,21 @@ class DeleteAssetSubset(BaseAction):
|
|||
""" Validation """
|
||||
task_ids = []
|
||||
for ent_info in event["data"]["selection"]:
|
||||
entType = ent_info.get("entityType", "")
|
||||
if entType == "task":
|
||||
if ent_info.get("entityType") == "task":
|
||||
task_ids.append(ent_info["entityId"])
|
||||
|
||||
is_valid = False
|
||||
for entity in entities:
|
||||
ftrack_id = entity["id"]
|
||||
if ftrack_id not in task_ids:
|
||||
continue
|
||||
if entity.entity_type.lower() != "task":
|
||||
return True
|
||||
return False
|
||||
if (
|
||||
entity["id"] in task_ids
|
||||
and entity.entity_type.lower() != "task"
|
||||
):
|
||||
is_valid = True
|
||||
break
|
||||
|
||||
if is_valid:
|
||||
is_valid = self.valid_roles(session, entities, event)
|
||||
return is_valid
|
||||
|
||||
def _launch(self, event):
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ class DeleteOldVersions(BaseAction):
|
|||
"Delete files from older publishes so project can be"
|
||||
" archived with only lates versions."
|
||||
)
|
||||
role_list = ["Pypeclub", "Project Manager", "Administrator"]
|
||||
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
|
||||
|
||||
dbcon = AvalonMongoDB()
|
||||
|
|
@ -31,13 +30,16 @@ class DeleteOldVersions(BaseAction):
|
|||
sequence_splitter = "__sequence_splitter__"
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
selection = event["data"].get("selection") or []
|
||||
for entity in selection:
|
||||
entity_type = (entity.get("entityType") or "").lower()
|
||||
if entity_type == "assetversion":
|
||||
return True
|
||||
return False
|
||||
""" Validation. """
|
||||
is_valid = False
|
||||
for entity in entities:
|
||||
if entity.entity_type.lower() == "assetversion":
|
||||
is_valid = True
|
||||
break
|
||||
|
||||
if is_valid:
|
||||
is_valid = self.valid_roles(session, entities, event)
|
||||
return is_valid
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
# TODO Add roots existence validation
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ class Delivery(BaseAction):
|
|||
description = "Deliver data to client"
|
||||
role_list = ["Pypeclub", "Administrator", "Project manager"]
|
||||
icon = statics_icon("ftrack", "action_icons", "Delivery.svg")
|
||||
settings_key = "delivery_action"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.db_con = AvalonMongoDB()
|
||||
|
|
@ -30,11 +31,15 @@ class Delivery(BaseAction):
|
|||
super(Delivery, self).__init__(*args, **kwargs)
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
is_valid = False
|
||||
for entity in entities:
|
||||
if entity.entity_type.lower() == "assetversion":
|
||||
return True
|
||||
is_valid = True
|
||||
break
|
||||
|
||||
return False
|
||||
if is_valid:
|
||||
is_valid = self.valid_roles(session, entities, event)
|
||||
return is_valid
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
if event["data"].get("values", {}):
|
||||
|
|
|
|||
|
|
@ -13,13 +13,12 @@ class JobKiller(BaseAction):
|
|||
#: Action description.
|
||||
description = 'Killing selected running jobs'
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ['Pypeclub', 'Administrator']
|
||||
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
|
||||
settings_key = "job_killer"
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
|
||||
return True
|
||||
return self.valid_roles(session, entities, event)
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
if not event['data'].get('values', {}):
|
||||
|
|
|
|||
|
|
@ -16,22 +16,23 @@ class PrepareProject(BaseAction):
|
|||
#: Action description.
|
||||
description = 'Set basic attributes on the project'
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ["Pypeclub", "Administrator", "Project manager"]
|
||||
icon = statics_icon("ftrack", "action_icons", "PrepareProject.svg")
|
||||
|
||||
settings_key = "prepare_project"
|
||||
|
||||
# Key to store info about trigerring create folder structure
|
||||
create_project_structure_key = "create_folder_structure"
|
||||
item_splitter = {'type': 'label', 'value': '---'}
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
if len(entities) != 1:
|
||||
if (
|
||||
len(entities) != 1
|
||||
or entities[0].entity_type.lower() != "project"
|
||||
):
|
||||
return False
|
||||
|
||||
if entities[0].entity_type.lower() != "project":
|
||||
return False
|
||||
|
||||
return True
|
||||
return self.valid_roles(session, entities, event)
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
if event['data'].get('values', {}):
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@ class SeedDebugProject(BaseAction):
|
|||
#: priority
|
||||
priority = 100
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ["Pypeclub"]
|
||||
icon = statics_icon("ftrack", "action_icons", "SeedProject.svg")
|
||||
|
||||
# Asset names which will be created in `Assets` entity
|
||||
|
|
@ -58,9 +57,12 @@ class SeedDebugProject(BaseAction):
|
|||
existing_projects = None
|
||||
new_project_item = "< New Project >"
|
||||
current_project_item = "< Current Project >"
|
||||
settings_key = "seed_project"
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
if not self.valid_roles(session, entities, event):
|
||||
return False
|
||||
return True
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ class StoreThumbnailsToAvalon(BaseAction):
|
|||
# Action description
|
||||
description = 'Test action'
|
||||
# roles that are allowed to register this action
|
||||
role_list = ["Pypeclub", "Administrator", "Project Manager"]
|
||||
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
|
||||
settings_key = "store_thubmnail_to_avalon"
|
||||
|
||||
thumbnail_key = "AVALON_THUMBNAIL_ROOT"
|
||||
|
||||
|
|
@ -31,10 +31,15 @@ class StoreThumbnailsToAvalon(BaseAction):
|
|||
super(StoreThumbnailsToAvalon, self).__init__(*args, **kwargs)
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
is_valid = False
|
||||
for entity in entities:
|
||||
if entity.entity_type.lower() == "assetversion":
|
||||
return True
|
||||
return False
|
||||
is_valid = True
|
||||
break
|
||||
|
||||
if is_valid:
|
||||
is_valid = self.valid_roles(session, entities, event)
|
||||
return is_valid
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
user = session.query(
|
||||
|
|
|
|||
|
|
@ -41,20 +41,26 @@ class SyncToAvalonLocal(BaseAction):
|
|||
#: priority
|
||||
priority = 200
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ["Pypeclub"]
|
||||
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
|
||||
|
||||
settings_key = "sync_to_avalon_local"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.entities_factory = SyncEntitiesFactory(self.log, self.session)
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
""" Validate selection. """
|
||||
is_valid = False
|
||||
for ent in event["data"]["selection"]:
|
||||
# Ignore entities that are not tasks or projects
|
||||
if ent["entityType"].lower() in ["show", "task"]:
|
||||
return True
|
||||
return False
|
||||
is_valid = True
|
||||
break
|
||||
|
||||
if is_valid:
|
||||
is_valid = self.valid_roles(session, entities, event)
|
||||
return is_valid
|
||||
|
||||
def launch(self, session, in_entities, event):
|
||||
time_start = time.time()
|
||||
|
|
|
|||
|
|
@ -15,11 +15,9 @@ class ThumbToChildren(BaseAction):
|
|||
icon = statics_icon("ftrack", "action_icons", "Thumbnail.svg")
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
|
||||
if (len(entities) != 1 or entities[0].entity_type in ['Project']):
|
||||
"""Show only on project."""
|
||||
if (len(entities) != 1 or entities[0].entity_type in ["Project"]):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
|
|
|
|||
|
|
@ -59,18 +59,22 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
)
|
||||
|
||||
# configurable
|
||||
interest_entity_types = ["Shot"]
|
||||
interest_attributes = ["frameStart", "frameEnd"]
|
||||
role_list = ["Pypeclub", "Administrator", "Project Manager"]
|
||||
settings_key = "sync_hier_entity_attributes"
|
||||
settings_enabled_key = "action_enabled"
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
""" Validation """
|
||||
# Check if selection is valid
|
||||
is_valid = False
|
||||
for ent in event["data"]["selection"]:
|
||||
# Ignore entities that are not tasks or projects
|
||||
if ent["entityType"].lower() in ("task", "show"):
|
||||
return True
|
||||
return False
|
||||
is_valid = True
|
||||
break
|
||||
|
||||
if is_valid:
|
||||
is_valid = self.valid_roles(session, entities, event)
|
||||
return is_valid
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
self.log.debug("{}: Creating job".format(self.label))
|
||||
|
|
@ -88,7 +92,7 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
session.commit()
|
||||
|
||||
try:
|
||||
result = self.propagate_values(session, entities)
|
||||
result = self.propagate_values(session, event, entities)
|
||||
job["status"] = "done"
|
||||
session.commit()
|
||||
|
||||
|
|
@ -111,9 +115,9 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
job["status"] = "failed"
|
||||
session.commit()
|
||||
|
||||
def attrs_configurations(self, session, object_ids):
|
||||
def attrs_configurations(self, session, object_ids, interest_attributes):
|
||||
attrs = session.query(self.cust_attrs_query.format(
|
||||
self.join_query_keys(self.interest_attributes),
|
||||
self.join_query_keys(interest_attributes),
|
||||
self.join_query_keys(object_ids)
|
||||
)).all()
|
||||
|
||||
|
|
@ -129,7 +133,14 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
output[obj_id].append(attr)
|
||||
return output, hiearchical
|
||||
|
||||
def propagate_values(self, session, selected_entities):
|
||||
def propagate_values(self, session, event, selected_entities):
|
||||
ftrack_settings = self.get_ftrack_settings(
|
||||
session, event, selected_entities
|
||||
)
|
||||
action_settings = (
|
||||
ftrack_settings[self.settings_frack_subkey][self.settings_key]
|
||||
)
|
||||
|
||||
project_entity = self.get_project_from_entity(selected_entities[0])
|
||||
selected_ids = [entity["id"] for entity in selected_entities]
|
||||
|
||||
|
|
@ -138,7 +149,7 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
))
|
||||
interest_entity_types = tuple(
|
||||
ent_type.lower()
|
||||
for ent_type in self.interest_entity_types
|
||||
for ent_type in action_settings["interest_entity_types"]
|
||||
)
|
||||
all_object_types = session.query("ObjectType").all()
|
||||
object_types_by_low_name = {
|
||||
|
|
@ -158,9 +169,10 @@ class PushHierValuesToNonHier(ServerAction):
|
|||
for obj_type in destination_object_types
|
||||
)
|
||||
|
||||
interest_attributes = action_settings["interest_attributes"]
|
||||
# Find custom attributes definitions
|
||||
attrs_by_obj_id, hier_attrs = self.attrs_configurations(
|
||||
session, destination_object_type_ids
|
||||
session, destination_object_type_ids, interest_attributes
|
||||
)
|
||||
# Filter destination object types if they have any object specific
|
||||
# custom attribute
|
||||
|
|
|
|||
|
|
@ -1,19 +1,79 @@
|
|||
import operator
|
||||
import collections
|
||||
from pype.modules.ftrack import BaseEvent
|
||||
|
||||
|
||||
class NextTaskUpdate(BaseEvent):
|
||||
def filter_entities_info(self, session, event):
|
||||
"""Change status on following Task.
|
||||
|
||||
Handler cares about changes of status id on Task entities. When new status
|
||||
has state "Done" it will try to find following task and change it's status.
|
||||
It is expected following task should be marked as "Ready to work on".
|
||||
|
||||
By default all tasks with same task type must have state "Done" to do any
|
||||
changes. And when all tasks with same task type are "done" it will change
|
||||
statuses on all tasks with next task type.
|
||||
|
||||
# Enable
|
||||
Handler is based on settings, handler can be turned on/off with "enabled"
|
||||
key.
|
||||
```
|
||||
"enabled": True
|
||||
```
|
||||
|
||||
# Status mappings
|
||||
Must have set mappings of new statuses:
|
||||
```
|
||||
"mapping": {
|
||||
# From -> To
|
||||
"Not Ready": "Ready",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
If current status name is not found then status change is skipped.
|
||||
|
||||
# Ignored statuses
|
||||
These status names are skipping as they would be in "Done" state. Best
|
||||
example is status "Omitted" which in most of cases is "Blocked" state but
|
||||
it will never change.
|
||||
```
|
||||
"ignored_statuses": [
|
||||
"Omitted",
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
# Change statuses sorted by task type and by name
|
||||
Change behaviour of task type batching. Statuses are not checked and set
|
||||
by batches of tasks by Task type but one by one. Tasks are sorted by
|
||||
Task type and then by name if all previous tasks are "Done" the following
|
||||
will change status.
|
||||
```
|
||||
"name_sorting": True
|
||||
```
|
||||
"""
|
||||
settings_key = "next_task_update"
|
||||
|
||||
def launch(self, session, event):
|
||||
'''Propagates status from version to task when changed'''
|
||||
|
||||
filtered_entities_info = self.filter_entities_info(event)
|
||||
if not filtered_entities_info:
|
||||
return
|
||||
|
||||
for project_id, entities_info in filtered_entities_info.items():
|
||||
self.process_by_project(session, event, project_id, entities_info)
|
||||
|
||||
def filter_entities_info(self, event):
|
||||
# Filter if event contain relevant data
|
||||
entities_info = event["data"].get("entities")
|
||||
if not entities_info:
|
||||
return
|
||||
|
||||
first_filtered_entities = []
|
||||
filtered_entities_info = collections.defaultdict(list)
|
||||
for entity_info in entities_info:
|
||||
# Care only about tasks
|
||||
if entity_info.get("entityType") != "task":
|
||||
# Care only about Task `entity_type`
|
||||
if entity_info.get("entity_type") != "Task":
|
||||
continue
|
||||
|
||||
# Care only about changes of status
|
||||
|
|
@ -25,204 +85,353 @@ class NextTaskUpdate(BaseEvent):
|
|||
):
|
||||
continue
|
||||
|
||||
first_filtered_entities.append(entity_info)
|
||||
project_id = None
|
||||
for parent_info in reversed(entity_info["parents"]):
|
||||
if parent_info["entityType"] == "show":
|
||||
project_id = parent_info["entityId"]
|
||||
break
|
||||
|
||||
if not first_filtered_entities:
|
||||
return first_filtered_entities
|
||||
if project_id:
|
||||
filtered_entities_info[project_id].append(entity_info)
|
||||
return filtered_entities_info
|
||||
|
||||
status_ids = [
|
||||
entity_info["changes"]["statusid"]["new"]
|
||||
for entity_info in first_filtered_entities
|
||||
]
|
||||
statuses_by_id = self.get_statuses_by_id(
|
||||
session, status_ids=status_ids
|
||||
def process_by_project(self, session, event, project_id, _entities_info):
|
||||
project_name = self.get_project_name_from_event(
|
||||
session, event, project_id
|
||||
)
|
||||
# Make sure `entity_type` is "Task"
|
||||
task_object_type = session.query(
|
||||
"select id, name from ObjectType where name is \"Task\""
|
||||
).one()
|
||||
|
||||
# Care only about tasks having status with state `Done`
|
||||
filtered_entities = []
|
||||
for entity_info in first_filtered_entities:
|
||||
if entity_info["objectTypeId"] != task_object_type["id"]:
|
||||
continue
|
||||
status_id = entity_info["changes"]["statusid"]["new"]
|
||||
status_entity = statuses_by_id[status_id]
|
||||
if status_entity["state"]["name"].lower() == "done":
|
||||
filtered_entities.append(entity_info)
|
||||
|
||||
return filtered_entities
|
||||
|
||||
def get_parents_by_id(self, session, entities_info):
|
||||
parent_ids = [
|
||||
"\"{}\"".format(entity_info["parentId"])
|
||||
for entity_info in entities_info
|
||||
]
|
||||
parent_entities = session.query(
|
||||
"TypedContext where id in ({})".format(", ".join(parent_ids))
|
||||
).all()
|
||||
|
||||
return {
|
||||
entity["id"]: entity
|
||||
for entity in parent_entities
|
||||
}
|
||||
|
||||
def get_tasks_by_id(self, session, parent_ids):
|
||||
joined_parent_ids = ",".join([
|
||||
"\"{}\"".format(parent_id)
|
||||
for parent_id in parent_ids
|
||||
])
|
||||
task_entities = session.query(
|
||||
"Task where parent_id in ({})".format(joined_parent_ids)
|
||||
).all()
|
||||
|
||||
return {
|
||||
entity["id"]: entity
|
||||
for entity in task_entities
|
||||
}
|
||||
|
||||
def get_statuses_by_id(self, session, task_entities=None, status_ids=None):
|
||||
if task_entities is None and status_ids is None:
|
||||
return {}
|
||||
|
||||
if status_ids is None:
|
||||
status_ids = []
|
||||
for task_entity in task_entities:
|
||||
status_ids.append(task_entity["status_id"])
|
||||
|
||||
if not status_ids:
|
||||
return {}
|
||||
|
||||
status_entities = session.query(
|
||||
"Status where id in ({})".format(", ".join(status_ids))
|
||||
).all()
|
||||
|
||||
return {
|
||||
entity["id"]: entity
|
||||
for entity in status_entities
|
||||
}
|
||||
|
||||
def get_sorted_task_types(self, session):
|
||||
data = {
|
||||
_type: _type.get("sort")
|
||||
for _type in session.query("Type").all()
|
||||
if _type.get("sort") is not None
|
||||
}
|
||||
|
||||
return [
|
||||
item[0]
|
||||
for item in sorted(data.items(), key=operator.itemgetter(1))
|
||||
]
|
||||
|
||||
def launch(self, session, event):
|
||||
'''Propagates status from version to task when changed'''
|
||||
|
||||
entities_info = self.filter_entities_info(session, event)
|
||||
if not entities_info:
|
||||
return
|
||||
|
||||
parents_by_id = self.get_parents_by_id(session, entities_info)
|
||||
tasks_by_id = self.get_tasks_by_id(
|
||||
session, tuple(parents_by_id.keys())
|
||||
# Load settings
|
||||
project_settings = self.get_project_settings_from_event(
|
||||
event, project_name
|
||||
)
|
||||
|
||||
tasks_to_parent_id = collections.defaultdict(list)
|
||||
for task_entity in tasks_by_id.values():
|
||||
tasks_to_parent_id[task_entity["parent_id"]].append(task_entity)
|
||||
|
||||
statuses_by_id = self.get_statuses_by_id(session, tasks_by_id.values())
|
||||
|
||||
next_status_name = "Ready"
|
||||
next_status = session.query(
|
||||
"Status where name is \"{}\"".format(next_status_name)
|
||||
).first()
|
||||
if not next_status:
|
||||
self.log.warning("Couldn't find status with name \"{}\"".format(
|
||||
next_status_name
|
||||
# Load status mapping from presets
|
||||
event_settings = (
|
||||
project_settings["ftrack"]["events"][self.settings_key]
|
||||
)
|
||||
if not event_settings["enabled"]:
|
||||
self.log.debug("Project \"{}\" has disabled {}.".format(
|
||||
project_name, self.__class__.__name__
|
||||
))
|
||||
return
|
||||
|
||||
statuses = session.query("Status").all()
|
||||
|
||||
entities_info = self.filter_by_status_state(_entities_info, statuses)
|
||||
if not entities_info:
|
||||
return
|
||||
|
||||
parent_ids = set()
|
||||
event_task_ids_by_parent_id = collections.defaultdict(list)
|
||||
for entity_info in entities_info:
|
||||
parent_id = entity_info["parentId"]
|
||||
task_id = entity_info["entityId"]
|
||||
task_entity = tasks_by_id[task_id]
|
||||
entity_id = entity_info["entityId"]
|
||||
parent_ids.add(parent_id)
|
||||
event_task_ids_by_parent_id[parent_id].append(entity_id)
|
||||
|
||||
all_same_type_taks_done = True
|
||||
for parents_task in tasks_to_parent_id[parent_id]:
|
||||
if (
|
||||
parents_task["id"] == task_id
|
||||
or parents_task["type_id"] != task_entity["type_id"]
|
||||
):
|
||||
continue
|
||||
# From now it doesn't matter what was in event data
|
||||
task_entities = session.query(
|
||||
(
|
||||
"select id, type_id, status_id, parent_id, link from Task"
|
||||
" where parent_id in ({})"
|
||||
).format(self.join_query_keys(parent_ids))
|
||||
).all()
|
||||
|
||||
parents_task_status = statuses_by_id[parents_task["status_id"]]
|
||||
low_status_name = parents_task_status["name"].lower()
|
||||
# Skip if task's status name "Omitted"
|
||||
if low_status_name == "omitted":
|
||||
continue
|
||||
tasks_by_parent_id = collections.defaultdict(list)
|
||||
for task_entity in task_entities:
|
||||
tasks_by_parent_id[task_entity["parent_id"]].append(task_entity)
|
||||
|
||||
low_state_name = parents_task_status["state"]["name"].lower()
|
||||
if low_state_name != "done":
|
||||
all_same_type_taks_done = False
|
||||
break
|
||||
project_entity = session.get("Project", project_id)
|
||||
self.set_next_task_statuses(
|
||||
session,
|
||||
tasks_by_parent_id,
|
||||
event_task_ids_by_parent_id,
|
||||
statuses,
|
||||
project_entity,
|
||||
event_settings
|
||||
)
|
||||
|
||||
if not all_same_type_taks_done:
|
||||
continue
|
||||
def filter_by_status_state(self, entities_info, statuses):
|
||||
statuses_by_id = {
|
||||
status["id"]: status
|
||||
for status in statuses
|
||||
}
|
||||
|
||||
# Prepare all task types
|
||||
sorted_task_types = self.get_sorted_task_types(session)
|
||||
sorted_task_types_len = len(sorted_task_types)
|
||||
# Care only about tasks having status with state `Done`
|
||||
filtered_entities_info = []
|
||||
for entity_info in entities_info:
|
||||
status_id = entity_info["changes"]["statusid"]["new"]
|
||||
status_entity = statuses_by_id[status_id]
|
||||
if status_entity["state"]["name"].lower() == "done":
|
||||
filtered_entities_info.append(entity_info)
|
||||
return filtered_entities_info
|
||||
|
||||
from_idx = None
|
||||
for idx, task_type in enumerate(sorted_task_types):
|
||||
if task_type["id"] == task_entity["type_id"]:
|
||||
from_idx = idx + 1
|
||||
break
|
||||
def set_next_task_statuses(
|
||||
self,
|
||||
session,
|
||||
tasks_by_parent_id,
|
||||
event_task_ids_by_parent_id,
|
||||
statuses,
|
||||
project_entity,
|
||||
event_settings
|
||||
):
|
||||
statuses_by_id = {
|
||||
status["id"]: status
|
||||
for status in statuses
|
||||
}
|
||||
|
||||
# Current task type is last in order
|
||||
if from_idx is None or from_idx >= sorted_task_types_len:
|
||||
continue
|
||||
# Lower ignored statuses
|
||||
ignored_statuses = set(
|
||||
status_name.lower()
|
||||
for status_name in event_settings["ignored_statuses"]
|
||||
)
|
||||
# Lower both key and value of mapped statuses
|
||||
mapping = {
|
||||
status_from.lower(): status_to.lower()
|
||||
for status_from, status_to in event_settings["mapping"].items()
|
||||
}
|
||||
# Should use name sorting or not
|
||||
name_sorting = event_settings["name_sorting"]
|
||||
|
||||
next_task_type_id = None
|
||||
next_task_type_tasks = []
|
||||
for idx in range(from_idx, sorted_task_types_len):
|
||||
next_task_type = sorted_task_types[idx]
|
||||
for parents_task in tasks_to_parent_id[parent_id]:
|
||||
if next_task_type_id is None:
|
||||
if parents_task["type_id"] != next_task_type["id"]:
|
||||
continue
|
||||
next_task_type_id = next_task_type["id"]
|
||||
# Collect task type ids from changed entities
|
||||
task_type_ids = set()
|
||||
for task_entities in tasks_by_parent_id.values():
|
||||
for task_entity in task_entities:
|
||||
task_type_ids.add(task_entity["type_id"])
|
||||
|
||||
if parents_task["type_id"] == next_task_type_id:
|
||||
next_task_type_tasks.append(parents_task)
|
||||
statusese_by_obj_id = self.statuses_for_tasks(
|
||||
task_type_ids, project_entity
|
||||
)
|
||||
|
||||
if next_task_type_id is not None:
|
||||
break
|
||||
sorted_task_type_ids = self.get_sorted_task_type_ids(session)
|
||||
|
||||
for next_task_entity in next_task_type_tasks:
|
||||
if next_task_entity["status"]["name"].lower() != "not ready":
|
||||
continue
|
||||
for parent_id, _task_entities in tasks_by_parent_id.items():
|
||||
task_entities_by_type_id = collections.defaultdict(list)
|
||||
for _task_entity in _task_entities:
|
||||
type_id = _task_entity["type_id"]
|
||||
task_entities_by_type_id[type_id].append(_task_entity)
|
||||
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in next_task_entity["link"]]
|
||||
event_ids = set(event_task_ids_by_parent_id[parent_id])
|
||||
if name_sorting:
|
||||
# Sort entities by name
|
||||
self.sort_by_name_task_entities_by_type(
|
||||
task_entities_by_type_id
|
||||
)
|
||||
try:
|
||||
next_task_entity["status"] = next_status
|
||||
session.commit()
|
||||
self.log.info(
|
||||
"\"{}\" updated status to \"{}\"".format(
|
||||
ent_path, next_status_name
|
||||
)
|
||||
# Sort entities by type id
|
||||
sorted_task_entities = []
|
||||
for type_id in sorted_task_type_ids:
|
||||
task_entities = task_entities_by_type_id.get(type_id)
|
||||
if task_entities:
|
||||
sorted_task_entities.extend(task_entities)
|
||||
|
||||
next_tasks = self.next_tasks_with_name_sorting(
|
||||
sorted_task_entities,
|
||||
event_ids,
|
||||
statuses_by_id,
|
||||
ignored_statuses
|
||||
)
|
||||
|
||||
else:
|
||||
next_tasks = self.next_tasks_with_type_sorting(
|
||||
task_entities_by_type_id,
|
||||
sorted_task_type_ids,
|
||||
event_ids,
|
||||
statuses_by_id,
|
||||
ignored_statuses
|
||||
)
|
||||
|
||||
for task_entity in next_tasks:
|
||||
if task_entity["status"]["state"]["name"].lower() == "done":
|
||||
continue
|
||||
|
||||
task_status = statuses_by_id[task_entity["status_id"]]
|
||||
old_status_name = task_status["name"].lower()
|
||||
if old_status_name in ignored_statuses:
|
||||
continue
|
||||
|
||||
new_task_name = mapping.get(old_status_name)
|
||||
if not new_task_name:
|
||||
self.log.debug(
|
||||
"Didn't found mapping for status \"{}\".".format(
|
||||
task_status["name"]
|
||||
)
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning(
|
||||
"\"{}\" status couldnt be set to \"{}\"".format(
|
||||
ent_path, next_status_name
|
||||
),
|
||||
exc_info=True
|
||||
)
|
||||
continue
|
||||
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in task_entity["link"]]
|
||||
)
|
||||
type_id = task_entity["type_id"]
|
||||
new_status = statusese_by_obj_id[type_id].get(new_task_name)
|
||||
if new_status is None:
|
||||
self.log.warning((
|
||||
"\"{}\" does not have available status name \"{}\""
|
||||
).format(ent_path, new_task_name))
|
||||
continue
|
||||
|
||||
try:
|
||||
task_entity["status_id"] = new_status["id"]
|
||||
session.commit()
|
||||
self.log.info(
|
||||
"\"{}\" updated status to \"{}\"".format(
|
||||
ent_path, new_status["name"]
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning(
|
||||
"\"{}\" status couldnt be set to \"{}\"".format(
|
||||
ent_path, new_status["name"]
|
||||
),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
def next_tasks_with_name_sorting(
|
||||
self,
|
||||
sorted_task_entities,
|
||||
event_ids,
|
||||
statuses_by_id,
|
||||
ignored_statuses,
|
||||
):
|
||||
# Pre sort task entities by name
|
||||
use_next_task = False
|
||||
next_tasks = []
|
||||
for task_entity in sorted_task_entities:
|
||||
if task_entity["id"] in event_ids:
|
||||
event_ids.remove(task_entity["id"])
|
||||
use_next_task = True
|
||||
continue
|
||||
|
||||
if not use_next_task:
|
||||
continue
|
||||
|
||||
task_status = statuses_by_id[task_entity["status_id"]]
|
||||
low_status_name = task_status["name"].lower()
|
||||
if low_status_name in ignored_statuses:
|
||||
continue
|
||||
|
||||
next_tasks.append(task_entity)
|
||||
use_next_task = False
|
||||
if not event_ids:
|
||||
break
|
||||
|
||||
return next_tasks
|
||||
|
||||
def check_statuses_done(
|
||||
self, task_entities, ignored_statuses, statuses_by_id
|
||||
):
|
||||
all_are_done = True
|
||||
for task_entity in task_entities:
|
||||
task_status = statuses_by_id[task_entity["status_id"]]
|
||||
low_status_name = task_status["name"].lower()
|
||||
if low_status_name in ignored_statuses:
|
||||
continue
|
||||
|
||||
low_state_name = task_status["state"]["name"].lower()
|
||||
if low_state_name != "done":
|
||||
all_are_done = False
|
||||
break
|
||||
return all_are_done
|
||||
|
||||
def next_tasks_with_type_sorting(
|
||||
self,
|
||||
task_entities_by_type_id,
|
||||
sorted_task_type_ids,
|
||||
event_ids,
|
||||
statuses_by_id,
|
||||
ignored_statuses
|
||||
):
|
||||
# `use_next_task` is used only if `name_sorting` is enabled!
|
||||
next_tasks = []
|
||||
use_next_tasks = False
|
||||
for type_id in sorted_task_type_ids:
|
||||
if type_id not in task_entities_by_type_id:
|
||||
continue
|
||||
|
||||
task_entities = task_entities_by_type_id[type_id]
|
||||
|
||||
# Check if any task was in event
|
||||
event_id_in_tasks = False
|
||||
for task_entity in task_entities:
|
||||
task_id = task_entity["id"]
|
||||
if task_id in event_ids:
|
||||
event_ids.remove(task_id)
|
||||
event_id_in_tasks = True
|
||||
|
||||
if use_next_tasks:
|
||||
# Check if next tasks are not done already
|
||||
all_in_type_done = self.check_statuses_done(
|
||||
task_entities, ignored_statuses, statuses_by_id
|
||||
)
|
||||
if all_in_type_done:
|
||||
continue
|
||||
|
||||
next_tasks.extend(task_entities)
|
||||
use_next_tasks = False
|
||||
if not event_ids:
|
||||
break
|
||||
|
||||
if not event_id_in_tasks:
|
||||
continue
|
||||
|
||||
all_in_type_done = self.check_statuses_done(
|
||||
task_entities, ignored_statuses, statuses_by_id
|
||||
)
|
||||
use_next_tasks = all_in_type_done
|
||||
if all_in_type_done:
|
||||
continue
|
||||
|
||||
if not event_ids:
|
||||
break
|
||||
|
||||
use_next_tasks = False
|
||||
|
||||
return next_tasks
|
||||
|
||||
def statuses_for_tasks(self, task_type_ids, project_entity):
|
||||
project_schema = project_entity["project_schema"]
|
||||
output = {}
|
||||
for task_type_id in task_type_ids:
|
||||
statuses = project_schema.get_statuses("Task", task_type_id)
|
||||
output[task_type_id] = {
|
||||
status["name"].lower(): status
|
||||
for status in statuses
|
||||
}
|
||||
|
||||
return output
|
||||
|
||||
def get_sorted_task_type_ids(self, session):
|
||||
types_by_order = collections.defaultdict(list)
|
||||
for _type in session.query("Type").all():
|
||||
sort_oder = _type.get("sort")
|
||||
if sort_oder is not None:
|
||||
types_by_order[sort_oder].append(_type["id"])
|
||||
|
||||
types = []
|
||||
for sort_oder in sorted(types_by_order.keys()):
|
||||
types.extend(types_by_order[sort_oder])
|
||||
return types
|
||||
|
||||
@staticmethod
|
||||
def sort_by_name_task_entities_by_type(task_entities_by_type_id):
|
||||
_task_entities_by_type_id = {}
|
||||
for type_id, task_entities in task_entities_by_type_id.items():
|
||||
# Store tasks by name
|
||||
task_entities_by_name = {}
|
||||
for task_entity in task_entities:
|
||||
task_name = task_entity["name"]
|
||||
task_entities_by_name[task_name] = task_entity
|
||||
|
||||
# Store task entities by sorted names
|
||||
sorted_task_entities = []
|
||||
for task_name in sorted(task_entities_by_name.keys()):
|
||||
task_entity = task_entities_by_name[task_name]
|
||||
sorted_task_entities.append(task_entity)
|
||||
# Store result to temp dictionary
|
||||
_task_entities_by_type_id[type_id] = sorted_task_entities
|
||||
|
||||
# Override values in source object
|
||||
for type_id, value in _task_entities_by_type_id.items():
|
||||
task_entities_by_type_id[type_id] = value
|
||||
|
||||
|
||||
def register(session):
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@ from pype.modules.ftrack import BaseEvent
|
|||
|
||||
class PushFrameValuesToTaskEvent(BaseEvent):
|
||||
# Ignore event handler by default
|
||||
ignore_me = True
|
||||
|
||||
cust_attrs_query = (
|
||||
"select id, key, object_type_id, is_hierarchical, default"
|
||||
" from CustomAttributeConfiguration"
|
||||
|
|
@ -27,36 +25,7 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
_cached_changes = []
|
||||
_max_delta = 30
|
||||
|
||||
# Configrable (lists)
|
||||
interest_entity_types = {"Shot"}
|
||||
interest_attributes = {"frameStart", "frameEnd"}
|
||||
|
||||
@staticmethod
|
||||
def join_keys(keys):
|
||||
return ",".join(["\"{}\"".format(key) for key in keys])
|
||||
|
||||
@classmethod
|
||||
def task_object_id(cls, session):
|
||||
if cls._cached_task_object_id is None:
|
||||
task_object_type = session.query(
|
||||
"ObjectType where name is \"Task\""
|
||||
).one()
|
||||
cls._cached_task_object_id = task_object_type["id"]
|
||||
return cls._cached_task_object_id
|
||||
|
||||
@classmethod
|
||||
def interest_object_ids(cls, session):
|
||||
if cls._cached_interest_object_ids is None:
|
||||
object_types = session.query(
|
||||
"ObjectType where name in ({})".format(
|
||||
cls.join_keys(cls.interest_entity_types)
|
||||
)
|
||||
).all()
|
||||
cls._cached_interest_object_ids = tuple(
|
||||
object_type["id"]
|
||||
for object_type in object_types
|
||||
)
|
||||
return cls._cached_interest_object_ids
|
||||
settings_key = "sync_hier_entity_attributes"
|
||||
|
||||
def session_user_id(self, session):
|
||||
if self._cached_user_id is None:
|
||||
|
|
@ -67,30 +36,146 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
return self._cached_user_id
|
||||
|
||||
def launch(self, session, event):
|
||||
interesting_data, changed_keys_by_object_id = (
|
||||
self.extract_interesting_data(session, event)
|
||||
filtered_entities_info = self.filter_entities_info(event)
|
||||
if not filtered_entities_info:
|
||||
return
|
||||
|
||||
for project_id, entities_info in filtered_entities_info.items():
|
||||
self.process_by_project(session, event, project_id, entities_info)
|
||||
|
||||
def filter_entities_info(self, event):
|
||||
# Filter if event contain relevant data
|
||||
entities_info = event["data"].get("entities")
|
||||
if not entities_info:
|
||||
return
|
||||
|
||||
entities_info_by_project_id = {}
|
||||
for entity_info in entities_info:
|
||||
# Care only about tasks
|
||||
if entity_info.get("entityType") != "task":
|
||||
continue
|
||||
|
||||
# Skip `Task` entity type
|
||||
if entity_info["entity_type"].lower() == "task":
|
||||
continue
|
||||
|
||||
# Care only about changes of status
|
||||
changes = entity_info.get("changes")
|
||||
if not changes:
|
||||
continue
|
||||
|
||||
# Get project id from entity info
|
||||
project_id = None
|
||||
for parent_item in reversed(entity_info["parents"]):
|
||||
if parent_item["entityType"] == "show":
|
||||
project_id = parent_item["entityId"]
|
||||
break
|
||||
|
||||
if project_id is None:
|
||||
continue
|
||||
|
||||
if project_id not in entities_info_by_project_id:
|
||||
entities_info_by_project_id[project_id] = []
|
||||
entities_info_by_project_id[project_id].append(entity_info)
|
||||
|
||||
return entities_info_by_project_id
|
||||
|
||||
def process_by_project(self, session, event, project_id, entities_info):
|
||||
project_name = self.get_project_name_from_event(
|
||||
session, event, project_id
|
||||
)
|
||||
# Load settings
|
||||
project_settings = self.get_project_settings_from_event(
|
||||
event, project_name
|
||||
)
|
||||
# Load status mapping from presets
|
||||
event_settings = (
|
||||
project_settings
|
||||
["ftrack"]
|
||||
["events"]
|
||||
["sync_hier_entity_attributes"]
|
||||
)
|
||||
# Skip if event is not enabled
|
||||
if not event_settings["enabled"]:
|
||||
self.log.debug("Project \"{}\" has disabled {}".format(
|
||||
project_name, self.__class__.__name__
|
||||
))
|
||||
return
|
||||
|
||||
interest_attributes = event_settings["interest_attributes"]
|
||||
if not interest_attributes:
|
||||
self.log.info((
|
||||
"Project \"{}\" does not have filled 'interest_attributes',"
|
||||
" skipping."
|
||||
))
|
||||
return
|
||||
interest_entity_types = event_settings["interest_entity_types"]
|
||||
if not interest_entity_types:
|
||||
self.log.info((
|
||||
"Project \"{}\" does not have filled 'interest_entity_types',"
|
||||
" skipping."
|
||||
))
|
||||
return
|
||||
|
||||
# Filter entities info with changes
|
||||
interesting_data, changed_keys_by_object_id = self.filter_changes(
|
||||
session, event, entities_info, interest_attributes
|
||||
)
|
||||
if not interesting_data:
|
||||
return
|
||||
|
||||
entities = self.get_entities(session, interesting_data)
|
||||
# Prepare object types
|
||||
object_types = session.query("select id, name from ObjectType").all()
|
||||
object_types_by_name = {}
|
||||
for object_type in object_types:
|
||||
name_low = object_type["name"].lower()
|
||||
object_types_by_name[name_low] = object_type
|
||||
|
||||
# Prepare task object id
|
||||
task_object_id = object_types_by_name["task"]["id"]
|
||||
|
||||
# Collect object type ids based on settings
|
||||
interest_object_ids = []
|
||||
for entity_type in interest_entity_types:
|
||||
_entity_type = entity_type.lower()
|
||||
object_type = object_types_by_name.get(_entity_type)
|
||||
if not object_type:
|
||||
self.log.warning("Couldn't find object type \"{}\"".format(
|
||||
entity_type
|
||||
))
|
||||
|
||||
interest_object_ids.append(object_type["id"])
|
||||
|
||||
# Query entities by filtered data and object ids
|
||||
entities = self.get_entities(
|
||||
session, interesting_data, interest_object_ids
|
||||
)
|
||||
if not entities:
|
||||
return
|
||||
|
||||
entities_by_id = {
|
||||
entity["id"]: entity
|
||||
# Pop not found entities from interesting data
|
||||
entity_ids = set(
|
||||
entity["id"]
|
||||
for entity in entities
|
||||
}
|
||||
)
|
||||
for entity_id in tuple(interesting_data.keys()):
|
||||
if entity_id not in entities_by_id:
|
||||
if entity_id not in entity_ids:
|
||||
interesting_data.pop(entity_id)
|
||||
|
||||
attrs_by_obj_id, hier_attrs = self.attrs_configurations(session)
|
||||
# Add task object type to list
|
||||
attr_obj_ids = list(interest_object_ids)
|
||||
attr_obj_ids.append(task_object_id)
|
||||
|
||||
attrs_by_obj_id, hier_attrs = self.attrs_configurations(
|
||||
session, attr_obj_ids, interest_attributes
|
||||
)
|
||||
|
||||
task_object_id = self.task_object_id(session)
|
||||
task_attrs = attrs_by_obj_id.get(task_object_id)
|
||||
|
||||
changed_keys = set()
|
||||
# Skip keys that are not both in hierachical and type specific
|
||||
for object_id, keys in changed_keys_by_object_id.items():
|
||||
changed_keys |= set(keys)
|
||||
object_id_attrs = attrs_by_obj_id.get(object_id)
|
||||
for key in keys:
|
||||
if key not in hier_attrs:
|
||||
|
|
@ -113,8 +198,8 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
"There is not created Custom Attributes {} "
|
||||
" for entity types: {}"
|
||||
).format(
|
||||
self.join_keys(self.interest_attributes),
|
||||
self.join_keys(self.interest_entity_types)
|
||||
self.join_query_keys(interest_attributes),
|
||||
self.join_query_keys(interest_entity_types)
|
||||
))
|
||||
return
|
||||
|
||||
|
|
@ -124,16 +209,24 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if task_attrs:
|
||||
task_entities = self.get_task_entities(session, interesting_data)
|
||||
|
||||
task_entities_by_id = {}
|
||||
task_entity_ids = set()
|
||||
parent_id_by_task_id = {}
|
||||
for task_entity in task_entities:
|
||||
task_entities_by_id[task_entity["id"]] = task_entity
|
||||
parent_id_by_task_id[task_entity["id"]] = task_entity["parent_id"]
|
||||
task_id = task_entity["id"]
|
||||
task_entity_ids.add(task_id)
|
||||
parent_id_by_task_id[task_id] = task_entity["parent_id"]
|
||||
|
||||
changed_keys = set()
|
||||
for keys in changed_keys_by_object_id.values():
|
||||
changed_keys |= set(keys)
|
||||
self.finalize(
|
||||
session, interesting_data,
|
||||
changed_keys, attrs_by_obj_id, hier_attrs,
|
||||
task_entity_ids, parent_id_by_task_id
|
||||
)
|
||||
|
||||
def finalize(
|
||||
self, session, interesting_data,
|
||||
changed_keys, attrs_by_obj_id, hier_attrs,
|
||||
task_entity_ids, parent_id_by_task_id
|
||||
):
|
||||
attr_id_to_key = {}
|
||||
for attr_confs in attrs_by_obj_id.values():
|
||||
for key in changed_keys:
|
||||
|
|
@ -147,12 +240,12 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
attr_id_to_key[custom_attr_id] = key
|
||||
|
||||
entity_ids = (
|
||||
set(interesting_data.keys()) | set(task_entities_by_id.keys())
|
||||
set(interesting_data.keys()) | task_entity_ids
|
||||
)
|
||||
attr_ids = set(attr_id_to_key.keys())
|
||||
|
||||
current_values_by_id = self.current_values(
|
||||
session, attr_ids, entity_ids, task_entities_by_id, hier_attrs
|
||||
session, attr_ids, entity_ids, task_entity_ids, hier_attrs
|
||||
)
|
||||
|
||||
for entity_id, current_values in current_values_by_id.items():
|
||||
|
|
@ -214,45 +307,9 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
session.rollback()
|
||||
self.log.warning("Changing of values failed.", exc_info=True)
|
||||
|
||||
def current_values(
|
||||
self, session, attr_ids, entity_ids, task_entities_by_id, hier_attrs
|
||||
def filter_changes(
|
||||
self, session, event, entities_info, interest_attributes
|
||||
):
|
||||
current_values_by_id = {}
|
||||
if not attr_ids or not entity_ids:
|
||||
return current_values_by_id
|
||||
joined_conf_ids = self.join_keys(attr_ids)
|
||||
joined_entity_ids = self.join_keys(entity_ids)
|
||||
|
||||
call_expr = [{
|
||||
"action": "query",
|
||||
"expression": self.cust_attr_query.format(
|
||||
joined_entity_ids, joined_conf_ids
|
||||
)
|
||||
}]
|
||||
if hasattr(session, "call"):
|
||||
[values] = session.call(call_expr)
|
||||
else:
|
||||
[values] = session._call(call_expr)
|
||||
|
||||
for item in values["data"]:
|
||||
entity_id = item["entity_id"]
|
||||
attr_id = item["configuration_id"]
|
||||
if entity_id in task_entities_by_id and attr_id in hier_attrs:
|
||||
continue
|
||||
|
||||
if entity_id not in current_values_by_id:
|
||||
current_values_by_id[entity_id] = {}
|
||||
current_values_by_id[entity_id][attr_id] = item["value"]
|
||||
return current_values_by_id
|
||||
|
||||
def extract_interesting_data(self, session, event):
|
||||
# Filter if event contain relevant data
|
||||
entities_info = event["data"].get("entities")
|
||||
if not entities_info:
|
||||
return
|
||||
|
||||
# for key, value in event["data"].items():
|
||||
# self.log.info("{}: {}".format(key, value))
|
||||
session_user_id = self.session_user_id(session)
|
||||
user_data = event["data"].get("user")
|
||||
changed_by_session = False
|
||||
|
|
@ -264,18 +321,10 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
interesting_data = {}
|
||||
changed_keys_by_object_id = {}
|
||||
for entity_info in entities_info:
|
||||
# Care only about tasks
|
||||
if entity_info.get("entityType") != "task":
|
||||
continue
|
||||
|
||||
# Care only about changes of status
|
||||
changes = entity_info.get("changes") or {}
|
||||
if not changes:
|
||||
continue
|
||||
|
||||
# Care only about changes if specific keys
|
||||
entity_changes = {}
|
||||
for key in self.interest_attributes:
|
||||
changes = entity_info["changes"]
|
||||
for key in interest_attributes:
|
||||
if key in changes:
|
||||
entity_changes[key] = changes[key]["new"]
|
||||
|
||||
|
|
@ -307,48 +356,66 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if not entity_changes:
|
||||
continue
|
||||
|
||||
# Do not care about "Task" entity_type
|
||||
task_object_id = self.task_object_id(session)
|
||||
object_id = entity_info.get("objectTypeId")
|
||||
if not object_id or object_id == task_object_id:
|
||||
continue
|
||||
|
||||
entity_id = entity_info["entityId"]
|
||||
object_id = entity_info["objectTypeId"]
|
||||
interesting_data[entity_id] = entity_changes
|
||||
if object_id not in changed_keys_by_object_id:
|
||||
changed_keys_by_object_id[object_id] = set()
|
||||
|
||||
changed_keys_by_object_id[object_id] |= set(entity_changes.keys())
|
||||
|
||||
return interesting_data, changed_keys_by_object_id
|
||||
|
||||
def get_entities(self, session, interesting_data):
|
||||
entities = session.query(
|
||||
"TypedContext where id in ({})".format(
|
||||
self.join_keys(interesting_data.keys())
|
||||
)
|
||||
).all()
|
||||
def current_values(
|
||||
self, session, attr_ids, entity_ids, task_entity_ids, hier_attrs
|
||||
):
|
||||
current_values_by_id = {}
|
||||
if not attr_ids or not entity_ids:
|
||||
return current_values_by_id
|
||||
joined_conf_ids = self.join_query_keys(attr_ids)
|
||||
joined_entity_ids = self.join_query_keys(entity_ids)
|
||||
|
||||
output = []
|
||||
interest_object_ids = self.interest_object_ids(session)
|
||||
for entity in entities:
|
||||
if entity["object_type_id"] in interest_object_ids:
|
||||
output.append(entity)
|
||||
return output
|
||||
call_expr = [{
|
||||
"action": "query",
|
||||
"expression": self.cust_attr_query.format(
|
||||
joined_entity_ids, joined_conf_ids
|
||||
)
|
||||
}]
|
||||
if hasattr(session, "call"):
|
||||
[values] = session.call(call_expr)
|
||||
else:
|
||||
[values] = session._call(call_expr)
|
||||
|
||||
for item in values["data"]:
|
||||
entity_id = item["entity_id"]
|
||||
attr_id = item["configuration_id"]
|
||||
if entity_id in task_entity_ids and attr_id in hier_attrs:
|
||||
continue
|
||||
|
||||
if entity_id not in current_values_by_id:
|
||||
current_values_by_id[entity_id] = {}
|
||||
current_values_by_id[entity_id][attr_id] = item["value"]
|
||||
return current_values_by_id
|
||||
|
||||
def get_entities(self, session, interesting_data, interest_object_ids):
|
||||
return session.query((
|
||||
"select id from TypedContext"
|
||||
" where id in ({}) and object_type_id in ({})"
|
||||
).format(
|
||||
self.join_query_keys(interesting_data.keys()),
|
||||
self.join_query_keys(interest_object_ids)
|
||||
)).all()
|
||||
|
||||
def get_task_entities(self, session, interesting_data):
|
||||
return session.query(
|
||||
"Task where parent_id in ({})".format(
|
||||
self.join_keys(interesting_data.keys())
|
||||
"select id, parent_id from Task where parent_id in ({})".format(
|
||||
self.join_query_keys(interesting_data.keys())
|
||||
)
|
||||
).all()
|
||||
|
||||
def attrs_configurations(self, session):
|
||||
object_ids = list(self.interest_object_ids(session))
|
||||
object_ids.append(self.task_object_id(session))
|
||||
|
||||
def attrs_configurations(self, session, object_ids, interest_attributes):
|
||||
attrs = session.query(self.cust_attrs_query.format(
|
||||
self.join_keys(self.interest_attributes),
|
||||
self.join_keys(object_ids)
|
||||
self.join_query_keys(interest_attributes),
|
||||
self.join_query_keys(object_ids)
|
||||
)).all()
|
||||
|
||||
output = {}
|
||||
|
|
|
|||
|
|
@ -56,17 +56,16 @@ class TaskStatusToParent(BaseEvent):
|
|||
return filtered_entity_info
|
||||
|
||||
def process_by_project(self, session, event, project_id, entities_info):
|
||||
# Get project entity
|
||||
project_entity = self.get_project_entity_from_event(
|
||||
# Get project name
|
||||
project_name = self.get_project_name_from_event(
|
||||
session, event, project_id
|
||||
)
|
||||
# Load settings
|
||||
project_settings = self.get_settings_for_project(
|
||||
session, event, project_entity=project_entity
|
||||
project_settings = self.get_project_settings_from_event(
|
||||
event, project_name
|
||||
)
|
||||
|
||||
# Prepare loaded settings and check if can be processed
|
||||
project_name = project_entity["full_name"]
|
||||
result = self.prepare_settings(project_settings, project_name)
|
||||
if not result:
|
||||
return
|
||||
|
|
@ -133,6 +132,7 @@ class TaskStatusToParent(BaseEvent):
|
|||
obj_id = object_type["id"]
|
||||
object_type_name_by_id[obj_id] = types_mapping[mapping_name]
|
||||
|
||||
project_entity = session.get("Project", project_id)
|
||||
project_schema = project_entity["project_schema"]
|
||||
available_statuses_by_obj_id = {}
|
||||
for obj_id in obj_ids:
|
||||
|
|
|
|||
|
|
@ -99,14 +99,14 @@ class TaskToVersionStatus(BaseEvent):
|
|||
if not entities_info:
|
||||
return
|
||||
|
||||
project_entity = self.get_project_entity_from_event(
|
||||
project_name = self.get_project_name_from_event(
|
||||
session, event, project_id
|
||||
)
|
||||
project_settings = self.get_settings_for_project(
|
||||
session, event, project_entity=project_entity
|
||||
# Load settings
|
||||
project_settings = self.get_project_settings_from_event(
|
||||
event, project_name
|
||||
)
|
||||
|
||||
project_name = project_entity["full_name"]
|
||||
event_settings = (
|
||||
project_settings["ftrack"]["events"][self.settings_key]
|
||||
)
|
||||
|
|
@ -171,6 +171,7 @@ class TaskToVersionStatus(BaseEvent):
|
|||
}
|
||||
|
||||
# Final process of changing statuses
|
||||
project_entity = session.get("Project", project_id)
|
||||
av_statuses_by_low_name, av_statuses_by_id = (
|
||||
self.get_asset_version_statuses(project_entity)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -19,14 +19,14 @@ class ThumbnailEvents(BaseEvent):
|
|||
def process_project_entities(
|
||||
self, session, event, project_id, entities_info
|
||||
):
|
||||
project_entity = self.get_project_entity_from_event(
|
||||
project_name = self.get_project_name_from_event(
|
||||
session, event, project_id
|
||||
)
|
||||
project_settings = self.get_settings_for_project(
|
||||
session, event, project_entity=project_entity
|
||||
# Load settings
|
||||
project_settings = self.get_project_settings_from_event(
|
||||
event, project_name
|
||||
)
|
||||
|
||||
project_name = project_entity["full_name"]
|
||||
event_settings = (
|
||||
project_settings
|
||||
["ftrack"]
|
||||
|
|
|
|||
|
|
@ -47,15 +47,14 @@ class VersionToTaskStatus(BaseEvent):
|
|||
|
||||
def process_by_project(self, session, event, project_id, entities_info):
|
||||
# Check for project data if event is enabled for event handler
|
||||
status_mapping = None
|
||||
project_entity = self.get_project_entity_from_event(
|
||||
project_name = self.get_project_name_from_event(
|
||||
session, event, project_id
|
||||
)
|
||||
project_settings = self.get_settings_for_project(
|
||||
session, event, project_entity=project_entity
|
||||
# Load settings
|
||||
project_settings = self.get_project_settings_from_event(
|
||||
event, project_name
|
||||
)
|
||||
|
||||
project_name = project_entity["full_name"]
|
||||
# Load status mapping from presets
|
||||
event_settings = (
|
||||
project_settings["ftrack"]["events"]["status_version_to_task"]
|
||||
|
|
@ -147,7 +146,7 @@ class VersionToTaskStatus(BaseEvent):
|
|||
|
||||
# Qeury statuses
|
||||
statusese_by_obj_id = self.statuses_for_tasks(
|
||||
session, task_entities, project_entity
|
||||
session, task_entities, project_id
|
||||
)
|
||||
# Prepare status names by their ids
|
||||
status_name_by_id = {
|
||||
|
|
@ -224,11 +223,12 @@ class VersionToTaskStatus(BaseEvent):
|
|||
exc_info=True
|
||||
)
|
||||
|
||||
def statuses_for_tasks(self, session, task_entities, project_entity):
|
||||
def statuses_for_tasks(self, session, task_entities, project_id):
|
||||
task_type_ids = set()
|
||||
for task_entity in task_entities:
|
||||
task_type_ids.add(task_entity["type_id"])
|
||||
|
||||
project_entity = session.get("Project", project_id)
|
||||
project_schema = project_entity["project_schema"]
|
||||
output = {}
|
||||
for task_type_id in task_type_ids:
|
||||
|
|
|
|||
|
|
@ -3,9 +3,16 @@ from abc import ABCMeta, abstractmethod
|
|||
import six
|
||||
import pype
|
||||
from pype.modules import (
|
||||
PypeModule, ITrayModule, IPluginPaths, ITimersManager, IUserModule
|
||||
PypeModule,
|
||||
ITrayModule,
|
||||
IPluginPaths,
|
||||
ITimersManager,
|
||||
IUserModule,
|
||||
ILaunchHookPaths
|
||||
)
|
||||
|
||||
FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class IFtrackEventHandlerPaths:
|
||||
|
|
@ -19,7 +26,12 @@ class IFtrackEventHandlerPaths:
|
|||
|
||||
|
||||
class FtrackModule(
|
||||
PypeModule, ITrayModule, IPluginPaths, ITimersManager, IUserModule
|
||||
PypeModule,
|
||||
ITrayModule,
|
||||
IPluginPaths,
|
||||
ITimersManager,
|
||||
IUserModule,
|
||||
ILaunchHookPaths
|
||||
):
|
||||
name = "ftrack"
|
||||
|
||||
|
|
@ -54,6 +66,10 @@ class FtrackModule(
|
|||
"publish": [os.path.join(pype.PLUGINS_DIR, "ftrack", "publish")]
|
||||
}
|
||||
|
||||
def get_launch_hook_paths(self):
|
||||
"""Implementation of `ILaunchHookPaths`."""
|
||||
return os.path.join(FTRACK_MODULE_DIR, "launch_hooks")
|
||||
|
||||
def connect_with_modules(self, enabled_modules):
|
||||
for module in enabled_modules:
|
||||
if not isinstance(module, IFtrackEventHandlerPaths):
|
||||
|
|
|
|||
|
|
@ -55,6 +55,8 @@ class SocketThread(threading.Thread):
|
|||
"Running Socked thread on {}:{}".format(*server_address)
|
||||
)
|
||||
|
||||
env = os.environ.copy()
|
||||
env["PYPE_PROCESS_MONGO_ID"] = str(Logger.mongo_process_id)
|
||||
self.subproc = subprocess.Popen(
|
||||
[
|
||||
sys.executable,
|
||||
|
|
@ -62,6 +64,7 @@ class SocketThread(threading.Thread):
|
|||
*self.additional_args,
|
||||
str(self.port)
|
||||
],
|
||||
env=env,
|
||||
stdin=subprocess.PIPE
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -51,6 +51,8 @@ def main(args):
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
Logger.set_process_name("Ftrack User server")
|
||||
|
||||
# Register interupt signal
|
||||
def signal_handler(sig, frame):
|
||||
log.info(
|
||||
|
|
|
|||
40
pype/modules/ftrack/launch_hooks/pre_python2_vendor.py
Normal file
40
pype/modules/ftrack/launch_hooks/pre_python2_vendor.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
import os
|
||||
from pype.lib import PreLaunchHook
|
||||
from pype.modules.ftrack import FTRACK_MODULE_DIR
|
||||
|
||||
|
||||
class PrePyhton2Support(PreLaunchHook):
|
||||
"""Add python ftrack api module for Python 2 to PYTHONPATH.
|
||||
|
||||
Path to vendor modules is added to the beggining of PYTHONPATH.
|
||||
"""
|
||||
# There will be needed more granular filtering in future
|
||||
app_groups = ["maya", "nuke", "nukex", "hiero", "nukestudio"]
|
||||
|
||||
def execute(self):
|
||||
# Prepare vendor dir path
|
||||
python_2_vendor = os.path.join(FTRACK_MODULE_DIR, "python2_vendor")
|
||||
|
||||
# Add Python 2 modules
|
||||
python_paths = [
|
||||
# `python-ftrack-api`
|
||||
os.path.join(python_2_vendor, "ftrack-python-api", "source"),
|
||||
# `arrow`
|
||||
os.path.join(python_2_vendor, "arrow"),
|
||||
# `builtins` from `python-future`
|
||||
# - `python-future` is strict Python 2 module that cause crashes
|
||||
# of Python 3 scripts executed through pype (burnin script etc.)
|
||||
os.path.join(python_2_vendor, "builtins"),
|
||||
# `backports.functools_lru_cache`
|
||||
os.path.join(
|
||||
python_2_vendor, "backports.functools_lru_cache"
|
||||
)
|
||||
]
|
||||
|
||||
# Load PYTHONPATH from current launch context
|
||||
python_path = self.launch_context.env.get("PYTHONPATH")
|
||||
if python_path:
|
||||
python_paths.append(python_path)
|
||||
|
||||
# Set new PYTHONPATH to launch context environments
|
||||
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths)
|
||||
|
|
@ -284,7 +284,7 @@ class SyncEntitiesFactory:
|
|||
" from Project where full_name is \"{}\""
|
||||
)
|
||||
entities_query = (
|
||||
"select id, name, parent_id, link"
|
||||
"select id, name, type_id, parent_id, link"
|
||||
" from TypedContext where project_id is \"{}\""
|
||||
)
|
||||
ignore_custom_attr_key = "avalon_ignore_sync"
|
||||
|
|
@ -399,11 +399,6 @@ class SyncEntitiesFactory:
|
|||
"message": "Synchronization failed"
|
||||
}
|
||||
|
||||
# Find all entities in project
|
||||
all_project_entities = self.session.query(
|
||||
self.entities_query.format(ft_project_id)
|
||||
).all()
|
||||
|
||||
# Store entities by `id` and `parent_id`
|
||||
entities_dict = collections.defaultdict(lambda: {
|
||||
"children": list(),
|
||||
|
|
@ -417,6 +412,15 @@ class SyncEntitiesFactory:
|
|||
"tasks": {}
|
||||
})
|
||||
|
||||
# Find all entities in project
|
||||
all_project_entities = self.session.query(
|
||||
self.entities_query.format(ft_project_id)
|
||||
).all()
|
||||
task_types = self.session.query("select id, name from Type").all()
|
||||
task_type_names_by_id = {
|
||||
task_type["id"]: task_type["name"]
|
||||
for task_type in task_types
|
||||
}
|
||||
for entity in all_project_entities:
|
||||
parent_id = entity["parent_id"]
|
||||
entity_type = entity.entity_type
|
||||
|
|
@ -426,7 +430,8 @@ class SyncEntitiesFactory:
|
|||
|
||||
elif entity_type_low == "task":
|
||||
# enrich task info with additional metadata
|
||||
task = {"type": entity["type"]["name"]}
|
||||
task_type_name = task_type_names_by_id[entity["type_id"]]
|
||||
task = {"type": task_type_name}
|
||||
entities_dict[parent_id]["tasks"][entity["name"]] = task
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -2,15 +2,11 @@
|
|||
"show": {
|
||||
"avalon_auto_sync": {
|
||||
"label": "Avalon auto-sync",
|
||||
"type": "boolean",
|
||||
"write_security_role": ["API", "Administrator"],
|
||||
"read_security_role": ["API", "Administrator"]
|
||||
"type": "boolean"
|
||||
},
|
||||
"library_project": {
|
||||
"label": "Library Project",
|
||||
"type": "boolean",
|
||||
"write_security_role": ["API", "Administrator"],
|
||||
"read_security_role": ["API", "Administrator"]
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"is_hierarchical": {
|
||||
|
|
|
|||
|
|
@ -29,6 +29,9 @@ class BaseAction(BaseHandler):
|
|||
icon = None
|
||||
type = 'Action'
|
||||
|
||||
settings_frack_subkey = "user_handlers"
|
||||
settings_enabled_key = "enabled"
|
||||
|
||||
def __init__(self, session):
|
||||
'''Expects a ftrack_api.Session instance'''
|
||||
if self.label is None:
|
||||
|
|
@ -67,6 +70,9 @@ class BaseAction(BaseHandler):
|
|||
|
||||
def _discover(self, event):
|
||||
entities = self._translate_event(event)
|
||||
if not entities:
|
||||
return
|
||||
|
||||
accepts = self.discover(self.session, entities, event)
|
||||
if not accepts:
|
||||
return
|
||||
|
|
@ -146,21 +152,18 @@ class BaseAction(BaseHandler):
|
|||
|
||||
def _launch(self, event):
|
||||
entities = self._translate_event(event)
|
||||
if not entities:
|
||||
return
|
||||
|
||||
preactions_launched = self._handle_preactions(self.session, event)
|
||||
if preactions_launched is False:
|
||||
return
|
||||
|
||||
interface = self._interface(
|
||||
self.session, entities, event
|
||||
)
|
||||
|
||||
interface = self._interface(self.session, entities, event)
|
||||
if interface:
|
||||
return interface
|
||||
|
||||
response = self.launch(
|
||||
self.session, entities, event
|
||||
)
|
||||
response = self.launch(self.session, entities, event)
|
||||
|
||||
return self._handle_result(response)
|
||||
|
||||
|
|
@ -196,50 +199,29 @@ class BaseAction(BaseHandler):
|
|||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def roles_check(settings_roles, user_roles, default=True):
|
||||
"""Compare roles from setting and user's roles.
|
||||
|
||||
class ServerAction(BaseAction):
|
||||
"""Action class meant to be used on event server.
|
||||
Args:
|
||||
settings_roles(list): List of role names from settings.
|
||||
user_roles(list): User's lowered role names.
|
||||
default(bool): If `settings_roles` is empty list.
|
||||
|
||||
Unlike the `BaseAction` roles are not checked on register but on discover.
|
||||
For the same reason register is modified to not filter topics by username.
|
||||
"""
|
||||
Returns:
|
||||
bool: `True` if user has at least one role from settings or
|
||||
default if `settings_roles` is empty.
|
||||
"""
|
||||
if not settings_roles:
|
||||
return default
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if not self.role_list:
|
||||
self.role_list = set()
|
||||
else:
|
||||
self.role_list = set(
|
||||
role_name.lower()
|
||||
for role_name in self.role_list
|
||||
)
|
||||
super(ServerAction, self).__init__(*args, **kwargs)
|
||||
|
||||
def _register_role_check(self):
|
||||
# Skip register role check.
|
||||
return
|
||||
|
||||
def _discover(self, event):
|
||||
"""Check user discover availability."""
|
||||
if not self._check_user_discover(event):
|
||||
return
|
||||
return super(ServerAction, self)._discover(event)
|
||||
|
||||
def _check_user_discover(self, event):
|
||||
"""Should be action discovered by user trying to show actions."""
|
||||
if not self.role_list:
|
||||
return True
|
||||
|
||||
user_entity = self._get_user_entity(event)
|
||||
if not user_entity:
|
||||
return False
|
||||
|
||||
for role in user_entity["user_security_roles"]:
|
||||
lowered_role = role["security_role"]["name"].lower()
|
||||
if lowered_role in self.role_list:
|
||||
for role_name in settings_roles:
|
||||
if role_name.lower() in user_roles:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_user_entity(self, event):
|
||||
@classmethod
|
||||
def get_user_entity_from_event(cls, session, event):
|
||||
"""Query user entity from event."""
|
||||
not_set = object()
|
||||
|
||||
|
|
@ -251,17 +233,91 @@ class ServerAction(BaseAction):
|
|||
user_id = user_info.get("id")
|
||||
username = user_info.get("username")
|
||||
if user_id:
|
||||
user_entity = self.session.query(
|
||||
user_entity = session.query(
|
||||
"User where id is {}".format(user_id)
|
||||
).first()
|
||||
if not user_entity and username:
|
||||
user_entity = self.session.query(
|
||||
user_entity = session.query(
|
||||
"User where username is {}".format(username)
|
||||
).first()
|
||||
event["data"]["user_entity"] = user_entity
|
||||
|
||||
return user_entity
|
||||
|
||||
@classmethod
|
||||
def get_user_roles_from_event(cls, session, event):
|
||||
"""Query user entity from event."""
|
||||
not_set = object()
|
||||
|
||||
user_roles = event["data"].get("user_roles", not_set)
|
||||
if user_roles is not_set:
|
||||
user_roles = []
|
||||
user_entity = cls.get_user_entity_from_event(session, event)
|
||||
for role in user_entity["user_security_roles"]:
|
||||
user_roles.append(role["security_role"]["name"].lower())
|
||||
event["data"]["user_roles"] = user_roles
|
||||
return user_roles
|
||||
|
||||
def get_project_name_from_event(self, session, event, entities):
|
||||
"""Load or query and fill project entity from/to event data.
|
||||
|
||||
Project data are stored by ftrack id because in most cases it is
|
||||
easier to access project id than project name.
|
||||
|
||||
Args:
|
||||
session (ftrack_api.Session): Current session.
|
||||
event (ftrack_api.Event): Processed event by session.
|
||||
entities (list): Ftrack entities of selection.
|
||||
"""
|
||||
|
||||
# Try to get project entity from event
|
||||
project_name = event["data"].get("project_name")
|
||||
if not project_name:
|
||||
project_entity = self.get_project_from_entity(
|
||||
entities[0], session
|
||||
)
|
||||
project_name = project_entity["full_name"]
|
||||
|
||||
event["data"]["project_name"] = project_name
|
||||
return project_name
|
||||
|
||||
def get_ftrack_settings(self, session, event, entities):
|
||||
project_name = self.get_project_name_from_event(
|
||||
session, event, entities
|
||||
)
|
||||
project_settings = self.get_project_settings_from_event(
|
||||
event, project_name
|
||||
)
|
||||
return project_settings["ftrack"]
|
||||
|
||||
def valid_roles(self, session, entities, event):
|
||||
"""Validate user roles by settings.
|
||||
|
||||
Method requires to have set `settings_key` attribute.
|
||||
"""
|
||||
ftrack_settings = self.get_ftrack_settings(session, event, entities)
|
||||
settings = (
|
||||
ftrack_settings[self.settings_frack_subkey][self.settings_key]
|
||||
)
|
||||
if self.settings_enabled_key:
|
||||
if not settings.get(self.settings_enabled_key, True):
|
||||
return False
|
||||
|
||||
user_role_list = self.get_user_roles_from_event(session, event)
|
||||
if not self.roles_check(settings.get("role_list"), user_role_list):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class ServerAction(BaseAction):
|
||||
"""Action class meant to be used on event server.
|
||||
|
||||
Unlike the `BaseAction` roles are not checked on register but on discover.
|
||||
For the same reason register is modified to not filter topics by username.
|
||||
"""
|
||||
|
||||
settings_frack_subkey = "events"
|
||||
|
||||
def register(self):
|
||||
"""Register subcription to Ftrack event hub."""
|
||||
self.session.event_hub.subscribe(
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ class BaseHandler(object):
|
|||
type = 'No-type'
|
||||
ignore_me = False
|
||||
preactions = []
|
||||
role_list = []
|
||||
|
||||
@staticmethod
|
||||
def join_query_keys(keys):
|
||||
|
|
@ -142,28 +141,7 @@ class BaseHandler(object):
|
|||
def reset_session(self):
|
||||
self.session.reset()
|
||||
|
||||
def _register_role_check(self):
|
||||
if not self.role_list or not isinstance(self.role_list, (list, tuple)):
|
||||
return
|
||||
|
||||
user_entity = self.session.query(
|
||||
"User where username is \"{}\"".format(self.session.api_user)
|
||||
).one()
|
||||
available = False
|
||||
lowercase_rolelist = [
|
||||
role_name.lower()
|
||||
for role_name in self.role_list
|
||||
]
|
||||
for role in user_entity["user_security_roles"]:
|
||||
if role["security_role"]["name"].lower() in lowercase_rolelist:
|
||||
available = True
|
||||
break
|
||||
if available is False:
|
||||
raise MissingPermision
|
||||
|
||||
def _preregister(self):
|
||||
self._register_role_check()
|
||||
|
||||
# Custom validations
|
||||
result = self.preregister()
|
||||
if result is None:
|
||||
|
|
@ -550,7 +528,7 @@ class BaseHandler(object):
|
|||
"Publishing event: {}"
|
||||
).format(str(event.__dict__)))
|
||||
|
||||
def get_project_from_entity(self, entity):
|
||||
def get_project_from_entity(self, entity, session=None):
|
||||
low_entity_type = entity.entity_type.lower()
|
||||
if low_entity_type == "project":
|
||||
return entity
|
||||
|
|
@ -571,72 +549,32 @@ class BaseHandler(object):
|
|||
return parent["project"]
|
||||
|
||||
project_data = entity["link"][0]
|
||||
return self.session.query(
|
||||
|
||||
if session is None:
|
||||
session = self.session
|
||||
return session.query(
|
||||
"Project where id is {}".format(project_data["id"])
|
||||
).one()
|
||||
|
||||
def get_project_entity_from_event(self, session, event, project_id):
|
||||
"""Load or query and fill project entity from/to event data.
|
||||
|
||||
Project data are stored by ftrack id because in most cases it is
|
||||
easier to access project id than project name.
|
||||
|
||||
Args:
|
||||
session (ftrack_api.Session): Current session.
|
||||
event (ftrack_api.Event): Processed event by session.
|
||||
project_id (str): Ftrack project id.
|
||||
"""
|
||||
if not project_id:
|
||||
raise ValueError(
|
||||
"Entered `project_id` is not valid. {} ({})".format(
|
||||
str(project_id), str(type(project_id))
|
||||
)
|
||||
)
|
||||
# Try to get project entity from event
|
||||
project_entities = event["data"].get("project_entities")
|
||||
if not project_entities:
|
||||
project_entities = {}
|
||||
event["data"]["project_entities"] = project_entities
|
||||
|
||||
project_entity = project_entities.get(project_id)
|
||||
if not project_entity:
|
||||
# Get project entity from task and store to event
|
||||
project_entity = session.get("Project", project_id)
|
||||
event["data"]["project_entities"][project_id] = project_entity
|
||||
return project_entity
|
||||
|
||||
def get_settings_for_project(
|
||||
self, session, event, project_id=None, project_entity=None
|
||||
):
|
||||
def get_project_settings_from_event(self, event, project_name):
|
||||
"""Load or fill pype's project settings from event data.
|
||||
|
||||
Project data are stored by ftrack id because in most cases it is
|
||||
easier to access project id than project name.
|
||||
|
||||
Args:
|
||||
session (ftrack_api.Session): Current session.
|
||||
event (ftrack_api.Event): Processed event by session.
|
||||
project_id (str): Ftrack project id. Must be entered if
|
||||
project_entity is not.
|
||||
project_entity (ftrack_api.Entity): Project entity. Must be entered
|
||||
if project_id is not.
|
||||
project_entity (ftrack_api.Entity): Project entity.
|
||||
"""
|
||||
if not project_entity:
|
||||
project_entity = self.get_project_entity_from_event(
|
||||
session, event, project_id
|
||||
)
|
||||
|
||||
project_name = project_entity["full_name"]
|
||||
|
||||
project_settings_by_id = event["data"].get("project_settings")
|
||||
if not project_settings_by_id:
|
||||
project_settings_by_id = {}
|
||||
event["data"]["project_settings"] = project_settings_by_id
|
||||
|
||||
project_settings = project_settings_by_id.get(project_id)
|
||||
project_settings = project_settings_by_id.get(project_name)
|
||||
if not project_settings:
|
||||
project_settings = get_project_settings(project_name)
|
||||
event["data"]["project_settings"][project_id] = project_settings
|
||||
event["data"]["project_settings"][project_name] = project_settings
|
||||
return project_settings
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
|
@ -46,3 +46,34 @@ class BaseEvent(BaseHandler):
|
|||
session,
|
||||
ignore=['socialfeed', 'socialnotification']
|
||||
)
|
||||
|
||||
def get_project_name_from_event(self, session, event, project_id):
|
||||
"""Load or query and fill project entity from/to event data.
|
||||
|
||||
Project data are stored by ftrack id because in most cases it is
|
||||
easier to access project id than project name.
|
||||
|
||||
Args:
|
||||
session (ftrack_api.Session): Current session.
|
||||
event (ftrack_api.Event): Processed event by session.
|
||||
project_id (str): Ftrack project id.
|
||||
"""
|
||||
if not project_id:
|
||||
raise ValueError(
|
||||
"Entered `project_id` is not valid. {} ({})".format(
|
||||
str(project_id), str(type(project_id))
|
||||
)
|
||||
)
|
||||
# Try to get project entity from event
|
||||
project_data = event["data"].get("project_data")
|
||||
if not project_data:
|
||||
project_data = {}
|
||||
event["data"]["project_data"] = project_data
|
||||
|
||||
project_name = project_data.get(project_id)
|
||||
if not project_name:
|
||||
# Get project entity from task and store to event
|
||||
project_entity = session.get("Project", project_id)
|
||||
project_name = project_entity["full_name"]
|
||||
event["data"]["project_data"][project_id] = project_name
|
||||
return project_name
|
||||
|
|
|
|||
1
pype/modules/ftrack/python2_vendor/arrow
Submodule
1
pype/modules/ftrack/python2_vendor/arrow
Submodule
|
|
@ -0,0 +1 @@
|
|||
Subproject commit b746fedf7286c3755a46f07ab72f4c414cd41fc0
|
||||
|
|
@ -0,0 +1 @@
|
|||
Subproject commit d277f474ab016e7b53479c36af87cb861d0cc53e
|
||||
|
|
@ -13,7 +13,7 @@ from . import login_dialog
|
|||
from pype.api import Logger, resources
|
||||
|
||||
|
||||
log = Logger().get_logger("FtrackModule", "ftrack")
|
||||
log = Logger().get_logger("FtrackModule")
|
||||
|
||||
|
||||
class FtrackTrayWrapper:
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue