Merge branch 'develop' into feature/get_rid_of_subprocess

This commit is contained in:
Milan Kolar 2021-01-08 22:44:19 +01:00
commit 5f98463e40
179 changed files with 6949 additions and 12757 deletions

6
.gitignore vendored
View file

@ -71,6 +71,10 @@ package-lock.json
pype/premiere/ppro/js/debug.log
# Idea
# IDEA
######
.idea/
# VScode files
.vscode/
.env

6
.gitmodules vendored
View file

@ -15,3 +15,9 @@
path = repos/acre
url = git@github.com:antirotor/acre.git
branch = fix/unformatted-tokens
[submodule "pype/modules/ftrack/python2_vendor/ftrack-python-api"]
path = pype/modules/ftrack/python2_vendor/ftrack-python-api
url = https://bitbucket.org/ftrack/ftrack-python-api.git
[submodule "pype/modules/ftrack/python2_vendor/arrow"]
path = pype/modules/ftrack/python2_vendor/arrow
url = git@github.com:arrow-py/arrow.git

22
pype.py
View file

@ -218,7 +218,7 @@ def boot():
def get_info() -> list:
"""Print additional information to console."""
from pype.lib.mongo import get_default_components
from pype.lib.log import LOG_DATABASE_NAME, LOG_COLLECTION_NAME
from pype.lib.log import PypeLogger
components = get_default_components()
@ -242,14 +242,18 @@ def get_info() -> list:
infos.append(("Using Muster at",
os.environ.get("MUSTER_REST_URL")))
if components["host"]:
infos.append(("Logging to MongoDB", components["host"]))
infos.append((" - port", components["port"] or "<N/A>"))
infos.append((" - database", LOG_DATABASE_NAME))
infos.append((" - collection", LOG_COLLECTION_NAME))
infos.append((" - user", components["username"] or "<N/A>"))
if components["auth_db"]:
infos.append((" - auth source", components["auth_db"]))
# Reinitialize
PypeLogger.initialize()
log_components = PypeLogger.log_mongo_url_components
if log_components["host"]:
infos.append(("Logging to MongoDB", log_components["host"]))
infos.append((" - port", log_components["port"] or "<N/A>"))
infos.append((" - database", PypeLogger.log_database_name))
infos.append((" - collection", PypeLogger.log_collection_name))
infos.append((" - user", log_components["username"] or "<N/A>"))
if log_components["auth_db"]:
infos.append((" - auth source", log_components["auth_db"]))
maximum = max([len(i[0]) for i in infos])
formatted = []

View file

@ -11,7 +11,9 @@ from pype.api import (
from pype.lib import (
env_value_to_bool,
PreLaunchHook,
ApplicationLaunchFailed
ApplicationLaunchFailed,
get_workdir_data,
get_workdir_with_workdir_data,
)
import acre
@ -140,17 +142,15 @@ class GlobalHostDataHook(PreLaunchHook):
)
return
workdir_data = self._prepare_workdir_data(
project_doc, asset_doc, task_name
workdir_data = get_workdir_data(
project_doc, asset_doc, task_name, self.host_name
)
self.data["workdir_data"] = workdir_data
hierarchy = workdir_data["hierarchy"]
anatomy = self.data["anatomy"]
try:
anatomy_filled = anatomy.format(workdir_data)
workdir = os.path.normpath(anatomy_filled["work"]["folder"])
workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
if not os.path.exists(workdir):
self.log.debug(
"Creating workdir folder: \"{}\"".format(workdir)
@ -168,7 +168,6 @@ class GlobalHostDataHook(PreLaunchHook):
"AVALON_TASK": task_name,
"AVALON_APP": self.host_name,
"AVALON_APP_NAME": self.app_name,
"AVALON_HIERARCHY": hierarchy,
"AVALON_WORKDIR": workdir
}
self.log.debug(
@ -180,21 +179,6 @@ class GlobalHostDataHook(PreLaunchHook):
self.prepare_last_workfile(workdir)
def _prepare_workdir_data(self, project_doc, asset_doc, task_name):
hierarchy = "/".join(asset_doc["data"]["parents"])
data = {
"project": {
"name": project_doc["name"],
"code": project_doc["data"].get("code")
},
"task": task_name,
"asset": asset_doc["name"],
"app": self.host_name,
"hierarchy": hierarchy
}
return data
def prepare_last_workfile(self, workdir):
"""last workfile workflow preparation.

View file

@ -11,7 +11,7 @@ class LaunchWithWindowsShell(PreLaunchHook):
"""
order = 10
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
app_groups = ["resolve", "nuke", "nukex", "hiero", "nukestudio"]
platforms = ["windows"]
def execute(self):

View file

@ -14,8 +14,10 @@ class ResolvePrelaunch(PreLaunchHook):
app_groups = ["resolve"]
def execute(self):
# TODO: add OTIO installation from `pype/requirements.py`
# making sure pyton 3.6 is installed at provided path
py36_dir = os.path.normpath(self.env.get("PYTHON36_RESOLVE", ""))
py36_dir = os.path.normpath(
self.launch_context.env.get("PYTHON36_RESOLVE", ""))
assert os.path.isdir(py36_dir), (
"Python 3.6 is not installed at the provided folder path. Either "
"make sure the `environments\resolve.json` is having correctly "
@ -23,11 +25,10 @@ class ResolvePrelaunch(PreLaunchHook):
f"in given path. \nPYTHON36_RESOLVE: `{py36_dir}`"
)
self.log.info(f"Path to Resolve Python folder: `{py36_dir}`...")
self.env["PYTHON36_RESOLVE"] = py36_dir
# setting utility scripts dir for scripts syncing
us_dir = os.path.normpath(
self.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
self.launch_context.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
)
assert os.path.isdir(us_dir), (
"Resolve utility script dir does not exists. Either make sure "
@ -38,8 +39,9 @@ class ResolvePrelaunch(PreLaunchHook):
self.log.debug(f"-- us_dir: `{us_dir}`")
# correctly format path for pre python script
pre_py_sc = os.path.normpath(self.env.get("PRE_PYTHON_SCRIPT", ""))
self.env["PRE_PYTHON_SCRIPT"] = pre_py_sc
pre_py_sc = os.path.normpath(
self.launch_context.env.get("PRE_PYTHON_SCRIPT", ""))
self.launch_context.env["PRE_PYTHON_SCRIPT"] = pre_py_sc
self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...")
try:
__import__("pype.hosts.resolve")
@ -55,4 +57,4 @@ class ResolvePrelaunch(PreLaunchHook):
# Resolve Setup integration
importlib.reload(utils)
self.log.debug(f"-- utils.__file__: `{utils.__file__}`")
utils.setup(self.env)
utils.setup(self.launch_context.env)

View file

@ -9,7 +9,7 @@ from pyblish import api as pyblish
from pype.api import Logger
from pype import PLUGINS_DIR
log = Logger().get_logger(__name__, "fusion")
log = Logger().get_logger(__name__)
AVALON_CONFIG = os.environ["AVALON_CONFIG"]

View file

@ -9,7 +9,7 @@ import shutil
from pype.api import Logger
log = Logger().get_logger(__name__, "fusion")
log = Logger().get_logger(__name__)
def _sync_utility_scripts(env=None):

View file

@ -31,7 +31,7 @@ __all__ = [
]
# get logger
log = Logger().get_logger(__name__, "hiero")
log = Logger().get_logger(__name__)
''' Creating all important host related variables '''

View file

@ -4,7 +4,7 @@ from pype.api import Logger
from .lib import sync_avalon_data_to_workfile, launch_workfiles_app
from .tags import add_tags_from_presets
log = Logger().get_logger(__name__, "hiero")
log = Logger().get_logger(__name__)
def startupCompleted(event):

View file

@ -9,7 +9,7 @@ from avalon.vendor.Qt import (QtWidgets, QtGui)
import pype.api as pype
from pype.api import Logger, Anatomy
log = Logger().get_logger(__name__, "hiero")
log = Logger().get_logger(__name__)
cached_process = None

View file

@ -12,7 +12,7 @@ from .lib import (
set_workfiles
)
log = Logger().get_logger(__name__, "hiero")
log = Logger().get_logger(__name__)
self = sys.modules[__name__]
self._change_context_menu = None

View file

@ -8,7 +8,7 @@ from pprint import pformat
from pype.api import Logger
from avalon import io
log = Logger().get_logger(__name__, "hiero")
log = Logger().get_logger(__name__)
def tag_data():

View file

@ -4,7 +4,7 @@ from avalon import api
from pype.api import Logger
log = Logger().get_logger(__name__, "hiero")
log = Logger().get_logger(__name__)
def file_extensions():

View file

@ -13,12 +13,14 @@ self._menu = os.environ.get('PYPE_STUDIO_NAME') or "Pype"
log = logging.getLogger(__name__)
def _get_menu():
def _get_menu(menu_name=None):
"""Return the menu instance if it currently exists in Maya"""
if menu_name is None:
menu_name = self._menu
widgets = dict((
w.objectName(), w) for w in QtWidgets.QApplication.allWidgets())
menu = widgets.get(self._menu)
menu = widgets.get(menu_name)
return menu
@ -40,10 +42,51 @@ def deferred():
command=lambda *args: mayalookassigner.show()
)
def modify_workfiles():
from pype.tools import workfiles
def launch_workfiles_app(*_args, **_kwargs):
workfiles.show(
os.path.join(
cmds.workspace(query=True, rootDirectory=True),
cmds.workspace(fileRuleEntry="scene")
),
parent=pipeline._parent
)
# Find the pipeline menu
top_menu = _get_menu(pipeline._menu)
# Try to find workfile tool action in the menu
workfile_action = None
for action in top_menu.actions():
if action.text() == "Work Files":
workfile_action = action
break
# Add at the top of menu if "Work Files" action was not found
after_action = ""
if workfile_action:
# Use action's object name for `insertAfter` argument
after_action = workfile_action.objectName()
# Insert action to menu
cmds.menuItem(
"Work Files",
parent=pipeline._menu,
command=launch_workfiles_app,
insertAfter=after_action
)
# Remove replaced action
if workfile_action:
top_menu.removeAction(workfile_action)
log.info("Attempting to install scripts menu..")
add_build_workfiles_item()
add_look_assigner_item()
modify_workfiles()
try:
import scriptsmenu.launchformaya as launchformaya

View file

@ -15,7 +15,7 @@ from . import lib
self = sys.modules[__name__]
self.workfiles_launched = False
log = Logger().get_logger(__name__, "nuke")
log = Logger().get_logger(__name__)
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")

View file

@ -20,7 +20,7 @@ from .presets import (
from .utils import set_context_favorites
log = pype.Logger().get_logger(__name__, "nuke")
log = pype.Logger().get_logger(__name__)
self = sys.modules[__name__]
self._project = None

View file

@ -1,17 +1,37 @@
import os
import nuke
from avalon.api import Session
from pype.hosts.nuke import lib
from ...lib import BuildWorkfile
from pype.api import Logger
from pype.tools import workfiles
log = Logger().get_logger(__name__, "nuke")
log = Logger().get_logger(__name__)
def install():
menubar = nuke.menu("Nuke")
menu = menubar.findItem(Session["AVALON_LABEL"])
workfile_settings = lib.WorkfileSettings
# replace reset resolution from avalon core to pype's
name = "Work Files..."
rm_item = [
(i, item) for i, item in enumerate(menu.items()) if name in item.name()
][0]
log.debug("Changing Item: {}".format(rm_item))
menu.removeItem(rm_item[1].name())
menu.addCommand(
name,
lambda: workfiles.show(
os.environ["AVALON_WORKDIR"]
),
index=(rm_item[0])
)
# replace reset resolution from avalon core to pype's
name = "Reset Resolution"
new_name = "Set Resolution"

View file

@ -1,7 +1,7 @@
from pype.api import Anatomy, config, Logger
import nuke
log = Logger().get_logger(__name__, "nuke")
log = Logger().get_logger(__name__)
def get_anatomy(**kwarg):

View file

@ -18,7 +18,7 @@ __all__ = [
"ls"
]
log = Logger().get_logger(__name__, "premiere")
log = Logger().get_logger(__name__)
def install():

View file

@ -10,7 +10,7 @@ from pype.widgets.message_window import message
from pype import PLUGINS_DIR
from pype.api import Logger
log = Logger().get_logger(__name__, "premiere")
log = Logger().get_logger(__name__)
self = sys.modules[__name__]
self._has_been_setup = False

View file

@ -14,20 +14,32 @@ from .pipeline import (
)
from .lib import (
publish_clip_color,
get_project_manager,
get_current_project,
get_current_sequence,
get_video_track_names,
get_current_track_items,
get_track_item_pype_tag,
set_track_item_pype_tag,
imprint,
set_publish_attribute,
get_publish_attribute,
create_current_sequence_media_bin,
create_compound_clip,
swap_clips,
get_pype_clip_metadata,
set_project_manager_to_folder_name
set_project_manager_to_folder_name,
get_reformated_path,
get_otio_clip_instance_data
)
from .menu import launch_pype_menu
from .plugin import Creator
from .plugin import (
Creator,
PublishClip
)
from .workio import (
open_file,
@ -57,21 +69,31 @@ __all__ = [
"get_resolve_module",
# lib
"publish_clip_color",
"get_project_manager",
"get_current_project",
"get_current_sequence",
"get_video_track_names",
"get_current_track_items",
"get_track_item_pype_tag",
"set_track_item_pype_tag",
"imprint",
"set_publish_attribute",
"get_publish_attribute",
"create_current_sequence_media_bin",
"create_compound_clip",
"swap_clips",
"get_pype_clip_metadata",
"set_project_manager_to_folder_name",
"get_reformated_path",
"get_otio_clip_instance_data",
# menu
"launch_pype_menu",
# plugin
"Creator",
"PublishClip",
# workio
"open_file",

View file

@ -1,31 +1,47 @@
import sys
import json
import re
from opentimelineio import opentime
from pprint import pformat
import pype
from .otio import davinci_export as otio_export
from pype.api import Logger
log = Logger().get_logger(__name__, "resolve")
log = Logger().get_logger(__name__)
self = sys.modules[__name__]
self.pm = None
self.project_manager = None
# Pype sequencial rename variables
self.rename_index = 0
self.rename_add = 0
self.pype_metadata_key = "VFX Notes"
self.publish_clip_color = "Pink"
self.pype_marker_workflow = True
# Pype compound clip workflow variable
self.pype_tag_name = "VFX Notes"
# Pype marker workflow variables
self.pype_marker_name = "PYPEDATA"
self.pype_marker_duration = 1
self.pype_marker_color = "Mint"
self.temp_marker_frame = None
def get_project_manager():
from . import bmdvr
if not self.pm:
self.pm = bmdvr.GetProjectManager()
return self.pm
if not self.project_manager:
self.project_manager = bmdvr.GetProjectManager()
return self.project_manager
def get_current_project():
# initialize project manager
get_project_manager()
return self.pm.GetCurrentProject()
return self.project_manager.GetCurrentProject()
def get_current_sequence():
@ -35,6 +51,22 @@ def get_current_sequence():
return project.GetCurrentTimeline()
def get_video_track_names():
tracks = list()
track_type = "video"
sequence = get_current_sequence()
# get all tracks count filtered by track type
selected_track_count = sequence.GetTrackCount(track_type)
# loop all tracks and get items
for track_index in range(1, (int(selected_track_count) + 1)):
track_name = sequence.GetTrackName("video", track_index)
tracks.append(track_name)
return tracks
def get_current_track_items(
filter=False,
track_type=None,
@ -77,13 +109,168 @@ def get_current_track_items(
if filter is True:
if selecting_color in ti_color:
selected_clips.append(data)
# ti.ClearClipColor()
else:
selected_clips.append(data)
return selected_clips
def get_track_item_pype_tag(track_item):
"""
Get pype track item tag created by creator or loader plugin.
Attributes:
trackItem (resolve.TimelineItem): hiero object
Returns:
hiero.core.Tag: hierarchy, orig clip attributes
"""
return_tag = None
if self.pype_marker_workflow:
return_tag = get_pype_marker(track_item)
else:
media_pool_item = track_item.GetMediaPoolItem()
# get all tags from track item
_tags = media_pool_item.GetMetadata()
if not _tags:
return None
for key, data in _tags.items():
# return only correct tag defined by global name
if key in self.pype_tag_name:
return_tag = json.loads(data)
return return_tag
def set_track_item_pype_tag(track_item, data=None):
"""
Set pype track item tag to input track_item.
Attributes:
trackItem (resolve.TimelineItem): resolve api object
Returns:
dict: json loaded data
"""
data = data or dict()
# get available pype tag if any
tag_data = get_track_item_pype_tag(track_item)
if self.pype_marker_workflow:
# delete tag as it is not updatable
if tag_data:
delete_pype_marker(track_item)
tag_data.update(data)
set_pype_marker(track_item, tag_data)
else:
if tag_data:
media_pool_item = track_item.GetMediaPoolItem()
# it not tag then create one
tag_data.update(data)
media_pool_item.SetMetadata(
self.pype_tag_name, json.dumps(tag_data))
else:
tag_data = data
# if pype tag available then update with input data
# add it to the input track item
track_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data))
return tag_data
def imprint(track_item, data=None):
"""
Adding `Avalon data` into a hiero track item tag.
Also including publish attribute into tag.
Arguments:
track_item (hiero.core.TrackItem): hiero track item object
data (dict): Any data which needst to be imprinted
Examples:
data = {
'asset': 'sq020sh0280',
'family': 'render',
'subset': 'subsetMain'
}
"""
data = data or {}
set_track_item_pype_tag(track_item, data)
# add publish attribute
set_publish_attribute(track_item, True)
def set_publish_attribute(track_item, value):
""" Set Publish attribute in input Tag object
Attribute:
tag (hiero.core.Tag): a tag object
value (bool): True or False
"""
tag_data = get_track_item_pype_tag(track_item)
tag_data["publish"] = value
# set data to the publish attribute
set_track_item_pype_tag(track_item, tag_data)
def get_publish_attribute(track_item):
""" Get Publish attribute from input Tag object
Attribute:
tag (hiero.core.Tag): a tag object
value (bool): True or False
"""
tag_data = get_track_item_pype_tag(track_item)
return tag_data["publish"]
def set_pype_marker(track_item, tag_data):
source_start = track_item.GetLeftOffset()
item_duration = track_item.GetDuration()
frame = int(source_start + (item_duration / 2))
# marker attributes
frameId = (frame / 10) * 10
color = self.pype_marker_color
name = self.pype_marker_name
note = json.dumps(tag_data)
duration = (self.pype_marker_duration / 10) * 10
track_item.AddMarker(
frameId,
color,
name,
note,
duration
)
def get_pype_marker(track_item):
track_item_markers = track_item.GetMarkers()
for marker_frame in track_item_markers:
note = track_item_markers[marker_frame]["note"]
color = track_item_markers[marker_frame]["color"]
name = track_item_markers[marker_frame]["name"]
print(f"_ marker data: {marker_frame} | {name} | {color} | {note}")
if name == self.pype_marker_name and color == self.pype_marker_color:
self.temp_marker_frame = marker_frame
return json.loads(note)
return dict()
def delete_pype_marker(track_item):
track_item.DeleteMarkerAtFrame(self.temp_marker_frame)
self.temp_marker_frame = None
def create_current_sequence_media_bin(sequence):
seq_name = sequence.GetName()
media_pool = get_current_project().GetMediaPool()
@ -178,7 +365,7 @@ def get_name_with_data(clip_data, presets):
})
def create_compound_clip(clip_data, folder, rename=False, **kwargs):
def create_compound_clip(clip_data, name, folder):
"""
Convert timeline object into nested timeline object
@ -186,8 +373,7 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
clip_data (dict): timeline item object packed into dict
with project, timeline (sequence)
folder (resolve.MediaPool.Folder): media pool folder object,
rename (bool)[optional]: renaming in sequence or not
kwargs (optional): additional data needed for rename=True (presets)
name (str): name for compound clip
Returns:
resolve.MediaPoolItem: media pool item with compound clip timeline(cct)
@ -199,34 +385,12 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
# get details of objects
clip_item = clip["item"]
track = clip_data["track"]
mp = project.GetMediaPool()
# get clip attributes
clip_attributes = get_clip_attributes(clip_item)
print(f"_ clip_attributes: {pformat(clip_attributes)}")
if rename:
presets = kwargs.get("presets")
if presets:
name, data = get_name_with_data(clip_data, presets)
# add hirarchy data to clip attributes
clip_attributes.update(data)
else:
name = "{:0>3}_{:0>4}".format(
int(track["index"]), int(clip["index"]))
else:
# build name
clip_name_split = clip_item.GetName().split(".")
name = "_".join([
track["name"],
str(track["index"]),
clip_name_split[0],
str(clip["index"])]
)
# get metadata
mp_item = clip_item.GetMediaPoolItem()
mp_props = mp_item.GetClipProperty()
@ -283,9 +447,9 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
project.SetCurrentTimeline(sq_origin)
# Add collected metadata and attributes to the comound clip:
if mp_item.GetMetadata(self.pype_metadata_key):
clip_attributes[self.pype_metadata_key] = mp_item.GetMetadata(
self.pype_metadata_key)[self.pype_metadata_key]
if mp_item.GetMetadata(self.pype_tag_name):
clip_attributes[self.pype_tag_name] = mp_item.GetMetadata(
self.pype_tag_name)[self.pype_tag_name]
# stringify
clip_attributes = json.dumps(clip_attributes)
@ -295,7 +459,7 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
cct.SetMetadata(k, v)
# add metadata to cct
cct.SetMetadata(self.pype_metadata_key, clip_attributes)
cct.SetMetadata(self.pype_tag_name, clip_attributes)
# reset start timecode of the compound clip
cct.SetClipProperty("Start TC", mp_props["Start TC"])
@ -314,7 +478,7 @@ def swap_clips(from_clip, to_clip, to_clip_name, to_in_frame, to_out_frame):
It will add take and activate it to the frame range which is inputted
Args:
from_clip (resolve.mediaPoolItem)
from_clip (resolve.TimelineItem)
to_clip (resolve.mediaPoolItem)
to_clip_name (str): name of to_clip
to_in_frame (float): cut in frame, usually `GetLeftOffset()`
@ -373,7 +537,7 @@ def get_pype_clip_metadata(clip):
mp_item = clip.GetMediaPoolItem()
metadata = mp_item.GetMetadata()
return metadata.get(self.pype_metadata_key)
return metadata.get(self.pype_tag_name)
def get_clip_attributes(clip):
@ -424,16 +588,16 @@ def set_project_manager_to_folder_name(folder_name):
set_folder = False
# go back to root folder
if self.pm.GotoRootFolder():
if self.project_manager.GotoRootFolder():
log.info(f"Testing existing folder: {folder_name}")
folders = convert_resolve_list_type(
self.pm.GetFoldersInCurrentFolder())
self.project_manager.GetFoldersInCurrentFolder())
log.info(f"Testing existing folders: {folders}")
# get me first available folder object
# with the same name as in `folder_name` else return False
if next((f for f in folders if f in folder_name), False):
log.info(f"Found existing folder: {folder_name}")
set_folder = self.pm.OpenFolder(folder_name)
set_folder = self.project_manager.OpenFolder(folder_name)
if set_folder:
return True
@ -441,11 +605,11 @@ def set_project_manager_to_folder_name(folder_name):
# if folder by name is not existent then create one
# go back to root folder
log.info(f"Folder `{folder_name}` not found and will be created")
if self.pm.GotoRootFolder():
if self.project_manager.GotoRootFolder():
try:
# create folder by given name
self.pm.CreateFolder(folder_name)
self.pm.OpenFolder(folder_name)
self.project_manager.CreateFolder(folder_name)
self.project_manager.OpenFolder(folder_name)
return True
except NameError as e:
log.error((f"Folder with name `{folder_name}` cannot be created!"
@ -462,3 +626,80 @@ def convert_resolve_list_type(resolve_list):
"Input argument should be dict() type")
return [resolve_list[i] for i in sorted(resolve_list.keys())]
def get_reformated_path(path, padded=True):
"""
Return fixed python expression path
Args:
path (str): path url or simple file name
Returns:
type: string with reformated path
Example:
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
"""
num_pattern = "(\\[\\d+\\-\\d+\\])"
padding_pattern = "(\\d+)(?=-)"
if "[" in path:
padding = len(re.findall(padding_pattern, path).pop())
if padded:
path = re.sub(num_pattern, f"%0{padding}d", path)
else:
path = re.sub(num_pattern, f"%d", path)
return path
def create_otio_time_range_from_track_item_data(track_item_data):
track_item = track_item_data["clip"]["item"]
project = track_item_data["project"]
timeline = track_item_data["sequence"]
timeline_start = timeline.GetStartFrame()
frame_start = int(track_item.GetStart() - timeline_start)
frame_duration = int(track_item.GetDuration())
fps = project.GetSetting("timelineFrameRate")
return otio_export.create_otio_time_range(
frame_start, frame_duration, fps)
def get_otio_clip_instance_data(otio_timeline, track_item_data):
"""
Return otio objects for timeline, track and clip
Args:
track_item_data (dict): track_item_data from list returned by
resolve.get_current_track_items()
otio_timeline (otio.schema.Timeline): otio object
Returns:
dict: otio clip object
"""
track_item = track_item_data["clip"]["item"]
track_name = track_item_data["track"]["name"]
timeline_range = create_otio_time_range_from_track_item_data(
track_item_data)
for otio_clip in otio_timeline.each_clip():
track_name = otio_clip.parent().name
parent_range = otio_clip.range_in_parent()
if track_name not in track_name:
continue
if otio_clip.name not in track_item.GetName():
continue
if pype.lib.is_overlapping_otio_ranges(
parent_range, timeline_range, strict=True):
# add pypedata marker to otio_clip metadata
for marker in otio_clip.markers:
if self.pype_marker_name in marker.name:
otio_clip.metadata.update(marker.metadata)
return {"otioClip": otio_clip}
return None

View file

@ -4,6 +4,17 @@ QWidget {
font-size: 13px;
}
QComboBox {
border: 1px solid #090909;
background-color: #201f1f;
color: #ffffff;
}
QComboBox QAbstractItemView
{
color: white;
}
QPushButton {
border: 1px solid #090909;
background-color: #201f1f;

View file

View file

@ -0,0 +1,324 @@
""" compatibility OpenTimelineIO 0.12.0 and older
"""
import os
import re
import sys
import json
import opentimelineio as otio
from . import utils
import clique
self = sys.modules[__name__]
self.track_types = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
self.project_fps = None
def create_otio_rational_time(frame, fps):
return otio.opentime.RationalTime(
float(frame),
float(fps)
)
def create_otio_time_range(start_frame, frame_duration, fps):
return otio.opentime.TimeRange(
start_time=create_otio_rational_time(start_frame, fps),
duration=create_otio_rational_time(frame_duration, fps)
)
def create_otio_reference(media_pool_item):
metadata = _get_metadata_media_pool_item(media_pool_item)
mp_clip_property = media_pool_item.GetClipProperty()
path = mp_clip_property["File Path"]
reformat_path = utils.get_reformated_path(path, padded=True)
padding = utils.get_padding_from_path(path)
if padding:
metadata.update({
"isSequence": True,
"padding": padding
})
# get clip property regarding to type
mp_clip_property = media_pool_item.GetClipProperty()
fps = float(mp_clip_property["FPS"])
if mp_clip_property["Type"] == "Video":
frame_start = int(mp_clip_property["Start"])
frame_duration = int(mp_clip_property["Frames"])
else:
audio_duration = str(mp_clip_property["Duration"])
frame_start = 0
frame_duration = int(utils.timecode_to_frames(
audio_duration, float(fps)))
otio_ex_ref_item = None
if padding:
# if it is file sequence try to create `ImageSequenceReference`
# the OTIO might not be compatible so return nothing and do it old way
try:
dirname, filename = os.path.split(path)
collection = clique.parse(filename, '{head}[{ranges}]{tail}')
padding_num = len(re.findall("(\\d+)(?=-)", filename).pop())
otio_ex_ref_item = otio.schema.ImageSequenceReference(
target_url_base=dirname + os.sep,
name_prefix=collection.format("{head}"),
name_suffix=collection.format("{tail}"),
start_frame=frame_start,
frame_zero_padding=padding_num,
rate=fps,
available_range=create_otio_time_range(
frame_start,
frame_duration,
fps
)
)
except AttributeError:
pass
if not otio_ex_ref_item:
# in case old OTIO or video file create `ExternalReference`
otio_ex_ref_item = otio.schema.ExternalReference(
target_url=reformat_path,
available_range=create_otio_time_range(
frame_start,
frame_duration,
fps
)
)
# add metadata to otio item
add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata)
return otio_ex_ref_item
def create_otio_markers(track_item, fps):
track_item_markers = track_item.GetMarkers()
markers = []
for marker_frame in track_item_markers:
note = track_item_markers[marker_frame]["note"]
if "{" in note and "}" in note:
metadata = json.loads(note)
else:
metadata = {"note": note}
markers.append(
otio.schema.Marker(
name=track_item_markers[marker_frame]["name"],
marked_range=create_otio_time_range(
marker_frame,
track_item_markers[marker_frame]["duration"],
fps
),
color=track_item_markers[marker_frame]["color"].upper(),
metadata=metadata
)
)
return markers
def create_otio_clip(track_item):
media_pool_item = track_item.GetMediaPoolItem()
mp_clip_property = media_pool_item.GetClipProperty()
if not self.project_fps:
fps = mp_clip_property["FPS"]
else:
fps = self.project_fps
name = track_item.GetName()
media_reference = create_otio_reference(media_pool_item)
source_range = create_otio_time_range(
int(track_item.GetLeftOffset()),
int(track_item.GetDuration()),
fps
)
if mp_clip_property["Type"] == "Audio":
return_clips = list()
audio_chanels = mp_clip_property["Audio Ch"]
for channel in range(0, int(audio_chanels)):
clip = otio.schema.Clip(
name=f"{name}_{channel}",
source_range=source_range,
media_reference=media_reference
)
for marker in create_otio_markers(track_item, fps):
clip.markers.append(marker)
return_clips.append(clip)
return return_clips
else:
clip = otio.schema.Clip(
name=name,
source_range=source_range,
media_reference=media_reference
)
for marker in create_otio_markers(track_item, fps):
clip.markers.append(marker)
return clip
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
return otio.schema.Gap(
source_range=create_otio_time_range(
gap_start,
(clip_start - tl_start_frame) - gap_start,
fps
)
)
def _create_otio_timeline(project, timeline, fps):
metadata = _get_timeline_metadata(project, timeline)
start_time = create_otio_rational_time(
timeline.GetStartFrame(), fps)
otio_timeline = otio.schema.Timeline(
name=timeline.GetName(),
global_start_time=start_time,
metadata=metadata
)
return otio_timeline
def _get_timeline_metadata(project, timeline):
media_pool = project.GetMediaPool()
root_folder = media_pool.GetRootFolder()
ls_folder = root_folder.GetClipList()
timeline = project.GetCurrentTimeline()
timeline_name = timeline.GetName()
for tl in ls_folder:
if tl.GetName() not in timeline_name:
continue
return _get_metadata_media_pool_item(tl)
def _get_metadata_media_pool_item(media_pool_item):
data = dict()
data.update({k: v for k, v in media_pool_item.GetMetadata().items()})
property = media_pool_item.GetClipProperty() or {}
for name, value in property.items():
if "Resolution" in name and "" != value:
width, height = value.split("x")
data.update({
"width": int(width),
"height": int(height)
})
if "PAR" in name and "" != value:
try:
data.update({"pixelAspect": float(value)})
except ValueError:
if "Square" in value:
data.update({"pixelAspect": float(1)})
else:
data.update({"pixelAspect": float(1)})
return data
def create_otio_track(track_type, track_name):
return otio.schema.Track(
name=track_name,
kind=self.track_types[track_type]
)
def add_otio_gap(clip_start, otio_track, track_item, timeline):
# if gap between track start and clip start
if clip_start > otio_track.available_range().duration.value:
# create gap and add it to track
otio_track.append(
create_otio_gap(
otio_track.available_range().duration.value,
track_item.GetStart(),
timeline.GetStartFrame(),
self.project_fps
)
)
def add_otio_metadata(otio_item, media_pool_item, **kwargs):
mp_metadata = media_pool_item.GetMetadata()
# add additional metadata from kwargs
if kwargs:
mp_metadata.update(kwargs)
# add metadata to otio item metadata
for key, value in mp_metadata.items():
otio_item.metadata.update({key: value})
def create_otio_timeline(resolve_project):
# get current timeline
self.project_fps = resolve_project.GetSetting("timelineFrameRate")
timeline = resolve_project.GetCurrentTimeline()
# convert timeline to otio
otio_timeline = _create_otio_timeline(
resolve_project, timeline, self.project_fps)
# loop all defined track types
for track_type in list(self.track_types.keys()):
# get total track count
track_count = timeline.GetTrackCount(track_type)
# loop all tracks by track indexes
for track_index in range(1, int(track_count) + 1):
# get current track name
track_name = timeline.GetTrackName(track_type, track_index)
# convert track to otio
otio_track = create_otio_track(
track_type, track_name)
# get all track items in current track
current_track_items = timeline.GetItemListInTrack(
track_type, track_index)
# loop available track items in current track items
for track_item in current_track_items:
# skip offline track items
if track_item.GetMediaPoolItem() is None:
continue
# calculate real clip start
clip_start = track_item.GetStart() - timeline.GetStartFrame()
add_otio_gap(
clip_start, otio_track, track_item, timeline)
# create otio clip and add it to track
otio_clip = create_otio_clip(track_item)
if not isinstance(otio_clip, list):
otio_track.append(otio_clip)
else:
for index, clip in enumerate(otio_clip):
if index == 0:
otio_track.append(clip)
else:
# add previouse otio track to timeline
otio_timeline.tracks.append(otio_track)
# convert track to otio
otio_track = create_otio_track(
track_type, track_name)
add_otio_gap(
clip_start, otio_track,
track_item, timeline)
otio_track.append(clip)
# add track to otio timeline
otio_timeline.tracks.append(otio_track)
return otio_timeline
def write_to_file(otio_timeline, path):
otio.adapters.write_to_file(otio_timeline, path)

View file

@ -0,0 +1,108 @@
import sys
import json
import DaVinciResolveScript
import opentimelineio as otio
self = sys.modules[__name__]
self.resolve = DaVinciResolveScript.scriptapp('Resolve')
self.fusion = DaVinciResolveScript.scriptapp('Fusion')
self.project_manager = self.resolve.GetProjectManager()
self.current_project = self.project_manager.GetCurrentProject()
self.media_pool = self.current_project.GetMediaPool()
self.track_types = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
self.project_fps = None
def build_timeline(otio_timeline):
# TODO: build timeline in mediapool `otioImport` folder
# TODO: loop otio tracks and build them in the new timeline
for clip in otio_timeline.each_clip():
# TODO: create track item
print(clip.name)
print(clip.parent().name)
print(clip.range_in_parent())
def _build_track(otio_track):
# TODO: _build_track
pass
def _build_media_pool_item(otio_media_reference):
# TODO: _build_media_pool_item
pass
def _build_track_item(otio_clip):
# TODO: _build_track_item
pass
def _build_gap(otio_clip):
# TODO: _build_gap
pass
def _build_marker(track_item, otio_marker):
frame_start = otio_marker.marked_range.start_time.value
frame_duration = otio_marker.marked_range.duration.value
# marker attributes
frameId = (frame_start / 10) * 10
color = otio_marker.color
name = otio_marker.name
note = otio_marker.metadata.get("note") or json.dumps(otio_marker.metadata)
duration = (frame_duration / 10) * 10
track_item.AddMarker(
frameId,
color,
name,
note,
duration
)
def _build_media_pool_folder(name):
"""
Returns folder with input name and sets it as current folder.
It will create new media bin if none is found in root media bin
Args:
name (str): name of bin
Returns:
resolve.api.MediaPool.Folder: description
"""
root_folder = self.media_pool.GetRootFolder()
sub_folders = root_folder.GetSubFolderList()
testing_names = list()
for subfolder in sub_folders:
subf_name = subfolder.GetName()
if name in subf_name:
testing_names.append(subfolder)
else:
testing_names.append(False)
matching = next((f for f in testing_names if f is not False), None)
if not matching:
new_folder = self.media_pool.AddSubFolder(root_folder, name)
self.media_pool.SetCurrentFolder(new_folder)
else:
self.media_pool.SetCurrentFolder(matching)
return self.media_pool.GetCurrentFolder()
def read_from_file(otio_file):
otio_timeline = otio.adapters.read_from_file(otio_file)
build_timeline(otio_timeline)

View file

@ -0,0 +1,63 @@
import re
import opentimelineio as otio
def timecode_to_frames(timecode, framerate):
rt = otio.opentime.from_timecode(timecode, 24)
return int(otio.opentime.to_frames(rt))
def frames_to_timecode(frames, framerate):
rt = otio.opentime.from_frames(frames, framerate)
return otio.opentime.to_timecode(rt)
def frames_to_secons(frames, framerate):
rt = otio.opentime.from_frames(frames, framerate)
return otio.opentime.to_seconds(rt)
def get_reformated_path(path, padded=True):
"""
Return fixed python expression path
Args:
path (str): path url or simple file name
Returns:
type: string with reformated path
Example:
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
"""
num_pattern = "(\\[\\d+\\-\\d+\\])"
padding_pattern = "(\\d+)(?=-)"
if "[" in path:
padding = len(re.findall(padding_pattern, path).pop())
if padded:
path = re.sub(num_pattern, f"%0{padding}d", path)
else:
path = re.sub(num_pattern, f"%d", path)
return path
def get_padding_from_path(path):
"""
Return padding number from DaVinci Resolve sequence path style
Args:
path (str): path url or simple file name
Returns:
int: padding number
Example:
get_padding_from_path("plate.[0001-1008].exr") > 4
"""
padding_pattern = "(\\d+)(?=-)"
if "[" in path:
return len(re.findall(padding_pattern, path).pop())
return None

View file

@ -3,13 +3,17 @@ Basic avalon integration
"""
import os
import contextlib
from collections import OrderedDict
from avalon.tools import workfiles
from avalon import api as avalon
from avalon import schema
from avalon.pipeline import AVALON_CONTAINER_ID
from pyblish import api as pyblish
import pype
from pype.api import Logger
from . import lib
log = Logger().get_logger(__name__, "resolve")
log = Logger().get_logger(__name__)
AVALON_CONFIG = os.environ["AVALON_CONFIG"]
@ -57,6 +61,9 @@ def install():
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
get_resolve_module()
@ -79,30 +86,50 @@ def uninstall():
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
def containerise(obj,
def containerise(track_item,
name,
namespace,
context,
loader=None,
data=None):
"""Bundle Resolve's object into an assembly and imprint it with metadata
"""Bundle Hiero's object into an assembly and imprint it with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
obj (obj): Resolve's object to imprint as container
track_item (hiero.core.TrackItem): object to imprint as container
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
context (dict): Asset information
loader (str, optional): Name of node used to produce this container.
Returns:
obj (obj): containerised object
track_item (hiero.core.TrackItem): containerised object
"""
pass
data_imprint = OrderedDict({
"schema": "avalon-core:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": str(name),
"namespace": str(namespace),
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
})
if data:
for k, v in data.items():
data_imprint.update({k: v})
print("_ data_imprint: {}".format(data_imprint))
lib.set_track_item_pype_tag(track_item, data_imprint)
return track_item
def ls():
@ -115,20 +142,77 @@ def ls():
See the `container.json` schema for details on how it should look,
and the Maya equivalent, which is in `avalon.maya.pipeline`
"""
pass
# get all track items from current timeline
all_track_items = lib.get_current_track_items(filter=False)
for track_item_data in all_track_items:
track_item = track_item_data["clip"]["item"]
container = parse_container(track_item)
if container:
yield container
def parse_container(container):
"""Return the container node's full container data.
def parse_container(track_item, validate=True):
"""Return container data from track_item's pype tag.
Args:
container (str): A container node name.
track_item (hiero.core.TrackItem): A containerised track item.
validate (bool)[optional]: validating with avalon scheme
Returns:
dict: The container schema data for this container node.
dict: The container schema data for input containerized track item.
"""
pass
# convert tag metadata to normal keys names
data = lib.get_track_item_pype_tag(track_item)
if validate and data and data.get("schema"):
schema.validate(data)
if not isinstance(data, dict):
return
# If not all required data return the empty container
required = ['schema', 'id', 'name',
'namespace', 'loader', 'representation']
if not all(key in data for key in required):
return
container = {key: data[key] for key in required}
container["objectName"] = track_item.name()
# Store reference to the node object
container["_track_item"] = track_item
return container
def update_container(track_item, data=None):
"""Update container data to input track_item's pype tag.
Args:
track_item (hiero.core.TrackItem): A containerised track item.
data (dict)[optional]: dictionery with data to be updated
Returns:
bool: True if container was updated correctly
"""
data = data or dict()
container = lib.get_track_item_pype_tag(track_item)
for _key, _value in container.items():
try:
container[_key] = data[_key]
except KeyError:
pass
log.info("Updating container: `{}`".format(track_item))
return bool(lib.set_track_item_pype_tag(track_item, container))
def launch_workfiles_app(*args):
@ -163,3 +247,18 @@ def reset_selection():
"""Deselect all selected nodes
"""
pass
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
instance, old_value, new_value))
from pype.hosts.resolve import (
set_publish_attribute
)
# Whether instances should be passthrough based on new value
track_item = instance.data["item"]
set_publish_attribute(track_item, new_value)

View file

@ -2,7 +2,7 @@ import re
from avalon import api
from pype.hosts import resolve
from avalon.vendor import qargparse
from pype.api import config
from . import lib
from Qt import QtWidgets, QtCore
@ -12,7 +12,7 @@ class CreatorWidget(QtWidgets.QDialog):
# output items
items = dict()
def __init__(self, name, info, presets, parent=None):
def __init__(self, name, info, ui_inputs, parent=None):
super(CreatorWidget, self).__init__(parent)
self.setObjectName(name)
@ -25,6 +25,7 @@ class CreatorWidget(QtWidgets.QDialog):
| QtCore.Qt.WindowStaysOnTopHint
)
self.setWindowTitle(name or "Pype Creator Input")
self.resize(500, 700)
# Where inputs and labels are set
self.content_widget = [QtWidgets.QWidget(self)]
@ -35,14 +36,25 @@ class CreatorWidget(QtWidgets.QDialog):
# first add widget tag line
top_layout.addWidget(QtWidgets.QLabel(info))
top_layout.addWidget(Spacer(5, self))
# main dynamic layout
self.content_widget.append(QtWidgets.QWidget(self))
content_layout = QtWidgets.QFormLayout(self.content_widget[-1])
self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True)
self.scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_area.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOff)
self.scroll_area.setWidgetResizable(True)
self.content_widget.append(self.scroll_area)
scroll_widget = QtWidgets.QWidget(self)
in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget)
self.content_layout = [in_scroll_area]
# add preset data into input widget layout
self.items = self.add_presets_to_layout(content_layout, presets)
self.items = self.populate_widgets(ui_inputs)
self.scroll_area.setWidget(scroll_widget)
# Confirmation buttons
btns_widget = QtWidgets.QWidget(self)
@ -79,20 +91,33 @@ class CreatorWidget(QtWidgets.QDialog):
self.result = None
self.close()
def value(self, data):
def value(self, data, new_data=None):
new_data = new_data or dict()
for k, v in data.items():
if isinstance(v, dict):
print(f"nested: {k}")
data[k] = self.value(v)
elif getattr(v, "value", None):
print(f"normal int: {k}")
result = v.value()
data[k] = result()
else:
print(f"normal text: {k}")
result = v.text()
data[k] = result()
return data
new_data[k] = {
"target": None,
"value": None
}
if v["type"] == "dict":
new_data[k]["target"] = v["target"]
new_data[k]["value"] = self.value(v["value"])
if v["type"] == "section":
new_data.pop(k)
new_data = self.value(v["value"], new_data)
elif getattr(v["value"], "currentText", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].currentText()
elif getattr(v["value"], "isChecked", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].isChecked()
elif getattr(v["value"], "value", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].value()
elif getattr(v["value"], "text", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].text()
return new_data
def camel_case_split(self, text):
matches = re.finditer(
@ -124,41 +149,115 @@ class CreatorWidget(QtWidgets.QDialog):
for func, val in kwargs.items():
if getattr(item, func):
func_attr = getattr(item, func)
func_attr(val)
if isinstance(val, tuple):
func_attr(*val)
else:
func_attr(val)
# add to layout
layout.addRow(label, item)
return item
def add_presets_to_layout(self, content_layout, data):
def populate_widgets(self, data, content_layout=None):
"""
Populate widget from input dict.
Each plugin has its own set of widget rows defined in dictionary
each row values should have following keys: `type`, `target`,
`label`, `order`, `value` and optionally also `toolTip`.
Args:
data (dict): widget rows or organized groups defined
by types `dict` or `section`
content_layout (QtWidgets.QFormLayout)[optional]: used when nesting
Returns:
dict: redefined data dict updated with created widgets
"""
content_layout = content_layout or self.content_layout[-1]
# fix order of process by defined order value
ordered_keys = list(data.keys())
for k, v in data.items():
if isinstance(v, dict):
try:
# try removing a key from index which should
# be filled with new
ordered_keys.pop(v["order"])
except IndexError:
pass
# add key into correct order
ordered_keys.insert(v["order"], k)
# process ordered
for k in ordered_keys:
v = data[k]
tool_tip = v.get("toolTip", "")
if v["type"] == "dict":
# adding spacer between sections
self.content_widget.append(QtWidgets.QWidget(self))
devider = QtWidgets.QVBoxLayout(self.content_widget[-1])
devider.addWidget(Spacer(5, self))
devider.setObjectName("Devider")
self.content_layout.append(QtWidgets.QWidget(self))
content_layout.addWidget(self.content_layout[-1])
self.content_layout[-1].setObjectName("sectionHeadline")
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
headline.addWidget(Spacer(20, self))
headline.addWidget(QtWidgets.QLabel(v["label"]))
# adding nested layout with label
self.content_widget.append(QtWidgets.QWidget(self))
self.content_layout.append(QtWidgets.QWidget(self))
self.content_layout[-1].setObjectName("sectionContent")
nested_content_layout = QtWidgets.QFormLayout(
self.content_widget[-1])
self.content_layout[-1])
nested_content_layout.setObjectName("NestedContentLayout")
content_layout.addWidget(self.content_layout[-1])
# add nested key as label
self.create_row(nested_content_layout, "QLabel", k)
data[k] = self.add_presets_to_layout(nested_content_layout, v)
elif isinstance(v, str):
print(f"layout.str: {k}")
print(f"content_layout: {content_layout}")
data[k] = self.create_row(
content_layout, "QLineEdit", k, setText=v)
elif isinstance(v, int):
print(f"layout.int: {k}")
print(f"content_layout: {content_layout}")
data[k] = self.create_row(
content_layout, "QSpinBox", k, setValue=v)
data[k]["value"] = self.populate_widgets(
v["value"], nested_content_layout)
if v["type"] == "section":
# adding spacer between sections
self.content_layout.append(QtWidgets.QWidget(self))
content_layout.addWidget(self.content_layout[-1])
self.content_layout[-1].setObjectName("sectionHeadline")
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
headline.addWidget(Spacer(20, self))
headline.addWidget(QtWidgets.QLabel(v["label"]))
# adding nested layout with label
self.content_layout.append(QtWidgets.QWidget(self))
self.content_layout[-1].setObjectName("sectionContent")
nested_content_layout = QtWidgets.QFormLayout(
self.content_layout[-1])
nested_content_layout.setObjectName("NestedContentLayout")
content_layout.addWidget(self.content_layout[-1])
# add nested key as label
data[k]["value"] = self.populate_widgets(
v["value"], nested_content_layout)
elif v["type"] == "QLineEdit":
data[k]["value"] = self.create_row(
content_layout, "QLineEdit", v["label"],
setText=v["value"], setToolTip=tool_tip)
elif v["type"] == "QComboBox":
data[k]["value"] = self.create_row(
content_layout, "QComboBox", v["label"],
addItems=v["value"], setToolTip=tool_tip)
elif v["type"] == "QCheckBox":
data[k]["value"] = self.create_row(
content_layout, "QCheckBox", v["label"],
setChecked=v["value"], setToolTip=tool_tip)
elif v["type"] == "QSpinBox":
data[k]["value"] = self.create_row(
content_layout, "QSpinBox", v["label"],
setRange=(0, 99999),
setValue=v["value"],
setToolTip=tool_tip)
return data
@ -179,20 +278,6 @@ class Spacer(QtWidgets.QWidget):
self.setLayout(layout)
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
"""
parents = []
return parents
class SequenceLoader(api.Loader):
"""A basic SequenceLoader for Resolve
@ -258,8 +343,12 @@ class Creator(api.Creator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
self.presets = config.get_presets()['plugins']["resolve"][
"create"].get(self.__class__.__name__, {})
from pype.api import get_current_project_settings
resolve_p_settings = get_current_project_settings().get("resolve")
self.presets = dict()
if resolve_p_settings:
self.presets = resolve_p_settings["create"].get(
self.__class__.__name__, {})
# adding basic current context resolve objects
self.project = resolve.get_current_project()
@ -271,3 +360,310 @@ class Creator(api.Creator):
self.selected = resolve.get_current_track_items(filter=False)
self.widget = CreatorWidget
class PublishClip:
"""
Convert a track item to publishable instance
Args:
track_item (hiero.core.TrackItem): hiero track item object
kwargs (optional): additional data needed for rename=True (presets)
Returns:
hiero.core.TrackItem: hiero track item object with pype tag
"""
vertical_clip_match = dict()
tag_data = dict()
types = {
"shot": "shot",
"folder": "folder",
"episode": "episode",
"sequence": "sequence",
"track": "sequence",
}
# parents search patern
parents_search_patern = r"\{([a-z]*?)\}"
# default templates for non-ui use
rename_default = False
hierarchy_default = "{_folder_}/{_sequence_}/{_track_}"
clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}"
subset_name_default = "<track_name>"
review_track_default = "< none >"
subset_family_default = "plate"
count_from_default = 10
count_steps_default = 10
vertical_sync_default = False
driving_layer_default = ""
def __init__(self, cls, track_item_data, **kwargs):
# populate input cls attribute onto self.[attr]
self.__dict__.update(cls.__dict__)
# get main parent objects
self.track_item_data = track_item_data
self.track_item = track_item_data["clip"]["item"]
sequence_name = track_item_data["sequence"].GetName()
self.sequence_name = str(sequence_name).replace(" ", "_")
# track item (clip) main attributes
self.ti_name = self.track_item.GetName()
self.ti_index = int(track_item_data["clip"]["index"])
# get track name and index
track_name = track_item_data["track"]["name"]
self.track_name = str(track_name).replace(" ", "_")
self.track_index = int(track_item_data["track"]["index"])
# adding tag.family into tag
if kwargs.get("avalon"):
self.tag_data.update(kwargs["avalon"])
# adding ui inputs if any
self.ui_inputs = kwargs.get("ui_inputs", {})
# adding media pool folder if any
self.mp_folder = kwargs.get("mp_folder")
# populate default data before we get other attributes
self._populate_track_item_default_data()
# use all populated default data to create all important attributes
self._populate_attributes()
# create parents with correct types
self._create_parents()
def convert(self):
# solve track item data and add them to tag data
self._convert_to_tag_data()
# if track name is in review track name and also if driving track name
# is not in review track name: skip tag creation
if (self.track_name in self.review_layer) and (
self.driving_layer not in self.review_layer):
return
# deal with clip name
new_name = self.tag_data.pop("newClipName")
if self.rename:
self.tag_data["asset"] = new_name
else:
self.tag_data["asset"] = self.ti_name
if not lib.pype_marker_workflow:
# create compound clip workflow
lib.create_compound_clip(
self.track_item_data,
self.tag_data["asset"],
self.mp_folder
)
# add track_item_data selection to tag
self.tag_data.update({
"track_data": self.track_item_data["track"]
})
# create pype tag on track_item and add data
lib.imprint(self.track_item, self.tag_data)
return self.track_item
def _populate_track_item_default_data(self):
""" Populate default formating data from track item. """
self.track_item_default_data = {
"_folder_": "shots",
"_sequence_": self.sequence_name,
"_track_": self.track_name,
"_clip_": self.ti_name,
"_trackIndex_": self.track_index,
"_clipIndex_": self.ti_index
}
def _populate_attributes(self):
""" Populate main object attributes. """
# track item frame range and parent track name for vertical sync check
self.clip_in = int(self.track_item.GetStart())
self.clip_out = int(self.track_item.GetEnd())
# define ui inputs if non gui mode was used
self.shot_num = self.ti_index
print(
"____ self.shot_num: {}".format(self.shot_num))
# ui_inputs data or default values if gui was not used
self.rename = self.ui_inputs.get(
"clipRename", {}).get("value") or self.rename_default
self.clip_name = self.ui_inputs.get(
"clipName", {}).get("value") or self.clip_name_default
self.hierarchy = self.ui_inputs.get(
"hierarchy", {}).get("value") or self.hierarchy_default
self.hierarchy_data = self.ui_inputs.get(
"hierarchyData", {}).get("value") or \
self.track_item_default_data.copy()
self.count_from = self.ui_inputs.get(
"countFrom", {}).get("value") or self.count_from_default
self.count_steps = self.ui_inputs.get(
"countSteps", {}).get("value") or self.count_steps_default
self.subset_name = self.ui_inputs.get(
"subsetName", {}).get("value") or self.subset_name_default
self.subset_family = self.ui_inputs.get(
"subsetFamily", {}).get("value") or self.subset_family_default
self.vertical_sync = self.ui_inputs.get(
"vSyncOn", {}).get("value") or self.vertical_sync_default
self.driving_layer = self.ui_inputs.get(
"vSyncTrack", {}).get("value") or self.driving_layer_default
self.review_track = self.ui_inputs.get(
"reviewTrack", {}).get("value") or self.review_track_default
# build subset name from layer name
if self.subset_name == "<track_name>":
self.subset_name = self.track_name
# create subset for publishing
self.subset = self.subset_family + self.subset_name.capitalize()
def _replace_hash_to_expression(self, name, text):
""" Replace hash with number in correct padding. """
_spl = text.split("#")
_len = (len(_spl) - 1)
_repl = "{{{0}:0>{1}}}".format(name, _len)
new_text = text.replace(("#" * _len), _repl)
return new_text
def _convert_to_tag_data(self):
""" Convert internal data to tag data.
Populating the tag data into internal variable self.tag_data
"""
# define vertical sync attributes
master_layer = True
self.review_layer = ""
if self.vertical_sync:
# check if track name is not in driving layer
if self.track_name not in self.driving_layer:
# if it is not then define vertical sync as None
master_layer = False
# increasing steps by index of rename iteration
self.count_steps *= self.rename_index
hierarchy_formating_data = dict()
_data = self.track_item_default_data.copy()
if self.ui_inputs:
# adding tag metadata from ui
for _k, _v in self.ui_inputs.items():
if _v["target"] == "tag":
self.tag_data[_k] = _v["value"]
# driving layer is set as positive match
if master_layer or self.vertical_sync:
# mark review layer
if self.review_track and (
self.review_track not in self.review_track_default):
# if review layer is defined and not the same as defalut
self.review_layer = self.review_track
# shot num calculate
if self.rename_index == 0:
self.shot_num = self.count_from
else:
self.shot_num = self.count_from + self.count_steps
# clip name sequence number
_data.update({"shot": self.shot_num})
# solve # in test to pythonic expression
for _k, _v in self.hierarchy_data.items():
if "#" not in _v["value"]:
continue
self.hierarchy_data[
_k]["value"] = self._replace_hash_to_expression(
_k, _v["value"])
# fill up pythonic expresisons in hierarchy data
for k, _v in self.hierarchy_data.items():
hierarchy_formating_data[k] = _v["value"].format(**_data)
else:
# if no gui mode then just pass default data
hierarchy_formating_data = self.hierarchy_data
tag_hierarchy_data = self._solve_tag_hierarchy_data(
hierarchy_formating_data
)
tag_hierarchy_data.update({"masterLayer": True})
if master_layer and self.vertical_sync:
# tag_hierarchy_data.update({"masterLayer": True})
self.vertical_clip_match.update({
(self.clip_in, self.clip_out): tag_hierarchy_data
})
if not master_layer and self.vertical_sync:
# driving layer is set as negative match
for (_in, _out), master_data in self.vertical_clip_match.items():
master_data.update({"masterLayer": False})
if _in == self.clip_in and _out == self.clip_out:
data_subset = master_data["subset"]
# add track index in case duplicity of names in master data
if self.subset in data_subset:
master_data["subset"] = self.subset + str(
self.track_index)
# in case track name and subset name is the same then add
if self.subset_name == self.track_name:
master_data["subset"] = self.subset
# assing data to return hierarchy data to tag
tag_hierarchy_data = master_data
# add data to return data dict
self.tag_data.update(tag_hierarchy_data)
if master_layer and self.review_layer:
self.tag_data.update({"reviewTrack": self.review_layer})
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
""" Solve tag data from hierarchy data and templates. """
# fill up clip name and hierarchy keys
hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data)
clip_name_filled = self.clip_name.format(**hierarchy_formating_data)
return {
"newClipName": clip_name_filled,
"hierarchy": hierarchy_filled,
"parents": self.parents,
"hierarchyData": hierarchy_formating_data,
"subset": self.subset,
"family": self.subset_family,
"families": ["clip"]
}
def _convert_to_entity(self, key):
""" Converting input key to key with type. """
# convert to entity type
entity_type = self.types.get(key, None)
assert entity_type, "Missing entity type for `{}`".format(
key
)
return {
"entity_type": entity_type,
"entity_name": self.hierarchy_data[key]["value"].format(
**self.track_item_default_data
)
}
def _create_parents(self):
""" Create parents and return it in list. """
self.parents = list()
patern = re.compile(self.parents_search_patern)
par_split = [patern.findall(t).pop()
for t in self.hierarchy.split("/")]
for key in par_split:
parent = self._convert_to_entity(key)
self.parents.append(parent)

View file

@ -3,7 +3,7 @@ import time
from pype.hosts.resolve.utils import get_resolve_module
from pype.api import Logger
log = Logger().get_logger(__name__, "resolve")
log = Logger().get_logger(__name__)
wait_delay = 2.5
wait = 0.00

View file

@ -0,0 +1,134 @@
#!/usr/bin/env python
# TODO: convert this script to be usable with PYPE
"""
Example DaVinci Resolve script:
Load a still from DRX file, apply the still to all clips in all timelines.
Set render format and codec, add render jobs for all timelines, render
to specified path and wait for rendering completion.
Once render is complete, delete all jobs
"""
# clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py # noqa
from python_get_resolve import GetResolve
import sys
import time
def AddTimelineToRender(project, timeline, presetName,
targetDirectory, renderFormat, renderCodec):
project.SetCurrentTimeline(timeline)
project.LoadRenderPreset(presetName)
if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec):
return False
project.SetRenderSettings(
{"SelectAllFrames": 1, "TargetDir": targetDirectory})
return project.AddRenderJob()
def RenderAllTimelines(resolve, presetName, targetDirectory,
renderFormat, renderCodec):
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
if not project:
return False
resolve.OpenPage("Deliver")
timelineCount = project.GetTimelineCount()
for index in range(0, int(timelineCount)):
if not AddTimelineToRender(
project,
project.GetTimelineByIndex(index + 1),
presetName,
targetDirectory,
renderFormat,
renderCodec):
return False
return project.StartRendering()
def IsRenderingInProgress(resolve):
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
if not project:
return False
return project.IsRenderingInProgress()
def WaitForRenderingCompletion(resolve):
while IsRenderingInProgress(resolve):
time.sleep(1)
return
def ApplyDRXToAllTimelineClips(timeline, path, gradeMode=0):
trackCount = timeline.GetTrackCount("video")
clips = {}
for index in range(1, int(trackCount) + 1):
clips.update(timeline.GetItemsInTrack("video", index))
return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips)
def ApplyDRXToAllTimelines(resolve, path, gradeMode=0):
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
if not project:
return False
timelineCount = project.GetTimelineCount()
for index in range(0, int(timelineCount)):
timeline = project.GetTimelineByIndex(index + 1)
project.SetCurrentTimeline(timeline)
if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode):
return False
return True
def DeleteAllRenderJobs(resolve):
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
project.DeleteAllRenderJobs()
return
# Inputs:
# - DRX file to import grade still and apply it for clips
# - grade mode (0, 1 or 2)
# - preset name for rendering
# - render path
# - render format
# - render codec
if len(sys.argv) < 7:
print(
"input parameters for scripts are [drx file path] [grade mode] "
"[render preset name] [render path] [render format] [render codec]")
sys.exit()
drxPath = sys.argv[1]
gradeMode = sys.argv[2]
renderPresetName = sys.argv[3]
renderPath = sys.argv[4]
renderFormat = sys.argv[5]
renderCodec = sys.argv[6]
# Get currently open project
resolve = GetResolve()
if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode):
print("Unable to apply a still from drx file to all timelines")
sys.exit()
if not RenderAllTimelines(resolve, renderPresetName, renderPath,
renderFormat, renderCodec):
print("Unable to set all timelines for rendering")
sys.exit()
WaitForRenderingCompletion(resolve)
DeleteAllRenderJobs(resolve)
print("Rendering is completed.")

View file

@ -0,0 +1,84 @@
#!/usr/bin/env python
import os
from pype.hosts.resolve.otio import davinci_export as otio_export
resolve = bmd.scriptapp("Resolve") # noqa
fu = resolve.Fusion()
ui = fu.UIManager
disp = bmd.UIDispatcher(fu.UIManager) # noqa
title_font = ui.Font({"PixelSize": 18})
dlg = disp.AddWindow(
{
"WindowTitle": "Export OTIO",
"ID": "OTIOwin",
"Geometry": [250, 250, 250, 100],
"Spacing": 0,
"Margin": 10
},
[
ui.VGroup(
{
"Spacing": 2
},
[
ui.Button(
{
"ID": "exportfilebttn",
"Text": "Select Destination",
"Weight": 1.25,
"ToolTip": "Choose where to save the otio",
"Flat": False
}
),
ui.VGap(),
ui.Button(
{
"ID": "exportbttn",
"Text": "Export",
"Weight": 2,
"ToolTip": "Export the current timeline",
"Flat": False
}
)
]
)
]
)
itm = dlg.GetItems()
def _close_window(event):
disp.ExitLoop()
def _export_button(event):
pm = resolve.GetProjectManager()
project = pm.GetCurrentProject()
fps = project.GetSetting("timelineFrameRate")
timeline = project.GetCurrentTimeline()
otio_timeline = otio_export.create_otio_timeline(timeline, fps)
otio_path = os.path.join(
itm["exportfilebttn"].Text,
timeline.GetName() + ".otio")
print(otio_path)
otio_export.write_to_file(
otio_timeline,
otio_path)
_close_window(None)
def _export_file_pressed(event):
selectedPath = fu.RequestDir(os.path.expanduser("~/Documents"))
itm["exportfilebttn"].Text = selectedPath
dlg.On.OTIOwin.Close = _close_window
dlg.On.exportfilebttn.Clicked = _export_file_pressed
dlg.On.exportbttn.Clicked = _export_button
dlg.Show()
disp.RunLoop()
dlg.Hide()

View file

@ -0,0 +1,72 @@
#!/usr/bin/env python
import os
from pype.hosts.resolve.otio import davinci_import as otio_import
resolve = bmd.scriptapp("Resolve") # noqa
fu = resolve.Fusion()
ui = fu.UIManager
disp = bmd.UIDispatcher(fu.UIManager) # noqa
title_font = ui.Font({"PixelSize": 18})
dlg = disp.AddWindow(
{
"WindowTitle": "Import OTIO",
"ID": "OTIOwin",
"Geometry": [250, 250, 250, 100],
"Spacing": 0,
"Margin": 10
},
[
ui.VGroup(
{
"Spacing": 2
},
[
ui.Button(
{
"ID": "importOTIOfileButton",
"Text": "Select OTIO File Path",
"Weight": 1.25,
"ToolTip": "Choose otio file to import from",
"Flat": False
}
),
ui.VGap(),
ui.Button(
{
"ID": "importButton",
"Text": "Import",
"Weight": 2,
"ToolTip": "Import otio to new timeline",
"Flat": False
}
)
]
)
]
)
itm = dlg.GetItems()
def _close_window(event):
disp.ExitLoop()
def _import_button(event):
otio_import.read_from_file(itm["importOTIOfileButton"].Text)
_close_window(None)
def _import_file_pressed(event):
selected_path = fu.RequestFile(os.path.expanduser("~/Documents"))
itm["importOTIOfileButton"].Text = selected_path
dlg.On.OTIOwin.Close = _close_window
dlg.On.importOTIOfileButton.Clicked = _import_file_pressed
dlg.On.importButton.Clicked = _import_button
dlg.Show()
disp.RunLoop()
dlg.Hide()

View file

@ -0,0 +1,16 @@
#!/usr/bin/env python
import os
import sys
import pype
def main(env):
import pype.hosts.resolve as bmdvr
# Registers pype's Global pyblish plugins
pype.install()
bmdvr.setup(env)
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

View file

@ -0,0 +1,22 @@
#!/usr/bin/env python
def main():
import pype.hosts.resolve as bmdvr
bmdvr.utils.get_resolve_module()
tracks = list()
track_type = "video"
sequence = bmdvr.get_current_sequence()
# get all tracks count filtered by track type
selected_track_count = sequence.GetTrackCount(track_type)
# loop all tracks and get items
for track_index in range(1, (int(selected_track_count) + 1)):
track_name = sequence.GetTrackName("video", track_index)
tracks.append(track_name)
if __name__ == "__main__":
main()

View file

@ -1,19 +1,24 @@
#! python3
import sys
from pype.api import Logger
import DaVinciResolveScript as bmdvr
log = Logger().get_logger(__name__)
def main():
import pype.hosts.resolve as bmdvr
bm = bmdvr.utils.get_resolve_module()
log.info(f"blackmagicmodule: {bm}")
print(f"_>> bmdvr.scriptapp(Resolve): {bmdvr.scriptapp('Resolve')}")
resolve = bmdvr.scriptapp('Resolve')
print(f"resolve: {resolve}")
project_manager = resolve.GetProjectManager()
project = project_manager.GetCurrentProject()
media_pool = project.GetMediaPool()
root_folder = media_pool.GetRootFolder()
ls_folder = root_folder.GetClipList()
timeline = project.GetCurrentTimeline()
timeline_name = timeline.GetName()
for tl in ls_folder:
if tl.GetName() not in timeline_name:
continue
print(tl.GetName())
print(tl.GetMetadata())
print(tl.GetClipProperty())
if __name__ == "__main__":

View file

@ -9,7 +9,7 @@ import os
import shutil
from pype.api import Logger
log = Logger().get_logger(__name__, "resolve")
log = Logger().get_logger(__name__)
def get_resolve_module():

View file

@ -9,7 +9,7 @@ from . import (
)
log = Logger().get_logger(__name__, "resolve")
log = Logger().get_logger(__name__)
exported_projet_ext = ".drp"

View file

@ -13,17 +13,13 @@ from .mongo import (
get_default_components,
PypeMongoConnection
)
from .anatomy import Anatomy
from .config import (
get_datetime_data,
load_json,
collect_json_from_path,
get_presets,
get_init_presets,
update_dict
from .anatomy import (
merge_dict,
Anatomy
)
from .config import get_datetime_data
from .env_tools import (
env_value_to_bool,
get_paths_from_environ
@ -42,6 +38,15 @@ from .avalon_context import (
get_hierarchy,
get_linked_assets,
get_latest_version,
get_workdir_data,
get_workdir,
get_workdir_with_workdir_data,
create_workfile_doc,
save_workfile_data_to_doc,
get_workfile_doc,
BuildWorkfile
)
@ -82,6 +87,17 @@ from .ffmpeg_utils import (
ffprobe_streams
)
from .editorial import (
is_overlapping_otio_ranges,
otio_range_to_frame_range,
otio_range_with_handles,
convert_to_padded_path,
trim_media_range,
range_from_frames,
frames_to_secons,
make_sequence_collection
)
terminal = Terminal
__all__ = [
@ -101,6 +117,15 @@ __all__ = [
"get_hierarchy",
"get_linked_assets",
"get_latest_version",
"get_workdir_data",
"get_workdir",
"get_workdir_with_workdir_data",
"create_workfile_doc",
"save_workfile_data_to_doc",
"get_workfile_doc",
"BuildWorkfile",
"ApplicationLaunchFailed",
@ -127,13 +152,11 @@ __all__ = [
"get_ffmpeg_tool_path",
"terminal",
"merge_dict",
"Anatomy",
"get_datetime_data",
"load_json",
"collect_json_from_path",
"get_presets",
"get_init_presets",
"update_dict",
"PypeLogger",
"decompose_url",
@ -144,5 +167,14 @@ __all__ = [
"IniSettingRegistry",
"JSONSettingRegistry",
"PypeSettingsRegistry",
"timeit"
"timeit",
"is_overlapping_otio_ranges",
"otio_range_with_handles",
"convert_to_padded_path",
"otio_range_to_frame_range",
"trim_media_range",
"range_from_frames",
"frames_to_secons",
"make_sequence_collection"
]

View file

@ -9,7 +9,6 @@ from pype.settings.lib import (
get_default_anatomy_settings,
get_anatomy_settings
)
from . import config
from .log import PypeLogger
log = PypeLogger().get_logger(__name__)
@ -20,6 +19,32 @@ except NameError:
StringType = str
def merge_dict(main_dict, enhance_dict):
"""Merges dictionaries by keys.
Function call itself if value on key is again dictionary.
Args:
main_dict (dict): First dict to merge second one into.
enhance_dict (dict): Second dict to be merged.
Returns:
dict: Merged result.
.. note:: does not overrides whole value on first found key
but only values differences from enhance_dict
"""
for key, value in enhance_dict.items():
if key not in main_dict:
main_dict[key] = value
elif isinstance(value, dict) and isinstance(main_dict[key], dict):
main_dict[key] = merge_dict(main_dict[key], value)
else:
main_dict[key] = value
return main_dict
class ProjectNotSet(Exception):
"""Exception raised when is created Anatomy without project name."""
@ -395,9 +420,7 @@ class TemplatesDict(dict):
if key in invalid_types:
continue
_invalid_types[key] = val
invalid_types = config.update_dict(
invalid_types, _invalid_types
)
invalid_types = merge_dict(invalid_types, _invalid_types)
return invalid_types
@property
@ -405,7 +428,7 @@ class TemplatesDict(dict):
"""Return used values for all children templates."""
used_values = {}
for value in self.values():
used_values = config.update_dict(used_values, value.used_values)
used_values = merge_dict(used_values, value.used_values)
return used_values
def get_solved(self):
@ -840,7 +863,7 @@ class Templates:
root_key = "{" + root_key + "}"
roots_dict = config.update_dict(
roots_dict = merge_dict(
roots_dict,
self._keys_to_dicts(used_root_keys, root_key)
)

View file

@ -1,5 +1,4 @@
import os
import copy
import platform
import inspect
import subprocess
@ -16,8 +15,6 @@ from .python_module_tools import (
classes_from_module
)
log = PypeLogger().get_logger(__name__)
class ApplicationNotFound(Exception):
"""Application was not found in ApplicationManager by name."""
@ -466,15 +463,23 @@ class ApplicationLaunchContext:
self.launch_args = executable.as_args()
# Handle launch environemtns
passed_env = self.data.pop("env", None)
if passed_env is None:
env = self.data.pop("env", None)
if env is not None and not isinstance(env, dict):
self.log.warning((
"Passed `env` kwarg has invalid type: {}. Expected: `dict`."
" Using `os.environ` instead."
).format(str(type(env))))
env = None
if env is None:
env = os.environ
else:
env = passed_env
# subprocess.Popen keyword arguments
self.kwargs = {
"env": copy.deepcopy(env)
"env": {
key: str(value)
for key, value in env.items()
}
}
if platform.system().lower() == "windows":

View file

@ -1,11 +1,13 @@
import os
import json
import re
import copy
import logging
import collections
import functools
from pype.settings import get_project_settings
from .anatomy import Anatomy
# avalon module is not imported at the top
# - may not be in path at the time of pype.lib initialization
@ -246,6 +248,229 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
return version_doc
def get_workdir_data(project_doc, asset_doc, task_name, host_name):
"""Prepare data for workdir template filling from entered information.
Args:
project_doc (dict): Mongo document of project from MongoDB.
asset_doc (dict): Mongo document of asset from MongoDB.
task_name (str): Task name for which are workdir data preapred.
host_name (str): Host which is used to workdir. This is required
because workdir template may contain `{app}` key.
Returns:
dict: Data prepared for filling workdir template.
"""
hierarchy = "/".join(asset_doc["data"]["parents"])
data = {
"project": {
"name": project_doc["name"],
"code": project_doc["data"].get("code")
},
"task": task_name,
"asset": asset_doc["name"],
"app": host_name,
"hierarchy": hierarchy
}
return data
def get_workdir_with_workdir_data(
workdir_data, anatomy=None, project_name=None, template_key=None
):
"""Fill workdir path from entered data and project's anatomy.
It is possible to pass only project's name instead of project's anatomy but
one of them **must** be entered. It is preffered to enter anatomy if is
available as initialization of a new Anatomy object may be time consuming.
Args:
workdir_data (dict): Data to fill workdir template.
anatomy (Anatomy): Anatomy object for specific project. Optional if
`project_name` is entered.
project_name (str): Project's name. Optional if `anatomy` is entered
otherwise Anatomy object is created with using the project name.
template_key (str): Key of work templates in anatomy templates. By
default is seto to `"work"`.
Returns:
TemplateResult: Workdir path.
Raises:
ValueError: When both `anatomy` and `project_name` are set to None.
"""
if not anatomy and not project_name:
raise ValueError((
"Missing required arguments one of `project_name` or `anatomy`"
" must be entered."
))
if not anatomy:
anatomy = Anatomy(project_name)
if not template_key:
template_key = "work"
anatomy_filled = anatomy.format(workdir_data)
# Output is TemplateResult object which contain usefull data
return anatomy_filled[template_key]["folder"]
def get_workdir(
project_doc,
asset_doc,
task_name,
host_name,
anatomy=None,
template_key=None
):
"""Fill workdir path from entered data and project's anatomy.
Args:
project_doc (dict): Mongo document of project from MongoDB.
asset_doc (dict): Mongo document of asset from MongoDB.
task_name (str): Task name for which are workdir data preapred.
host_name (str): Host which is used to workdir. This is required
because workdir template may contain `{app}` key. In `Session`
is stored under `AVALON_APP` key.
anatomy (Anatomy): Optional argument. Anatomy object is created using
project name from `project_doc`. It is preffered to pass this
argument as initialization of a new Anatomy object may be time
consuming.
template_key (str): Key of work templates in anatomy templates. Default
value is defined in `get_workdir_with_workdir_data`.
Returns:
TemplateResult: Workdir path.
"""
if not anatomy:
anatomy = Anatomy(project_doc["name"])
workdir_data = get_workdir_data(
project_doc, asset_doc, task_name, host_name
)
# Output is TemplateResult object which contain usefull data
return get_workdir_with_workdir_data(workdir_data, anatomy, template_key)
@with_avalon
def get_workfile_doc(asset_id, task_name, filename, dbcon=None):
"""Return workfile document for entered context.
Do not use this method to get more than one document. In that cases use
custom query as this will return documents from database one by one.
Args:
asset_id (ObjectId): Mongo ID of an asset under which workfile belongs.
task_name (str): Name of task under which the workfile belongs.
filename (str): Name of a workfile.
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
`avalon.io` is used if not entered.
Returns:
dict: Workfile document or None.
"""
# Use avalon.io if dbcon is not entered
if not dbcon:
dbcon = avalon.io
return dbcon.find_one({
"type": "workfile",
"parent": asset_id,
"task_name": task_name,
"filename": filename
})
@with_avalon
def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
"""Creates or replace workfile document in mongo.
Do not use this method to update data. This method will remove all
additional data from existing document.
Args:
asset_doc (dict): Document of asset under which workfile belongs.
task_name (str): Name of task for which is workfile related to.
filename (str): Filename of workfile.
workdir (str): Path to directory where `filename` is located.
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
`avalon.io` is used if not entered.
"""
# Use avalon.io if dbcon is not entered
if not dbcon:
dbcon = avalon.io
# Filter of workfile document
doc_filter = {
"type": "workfile",
"parent": asset_doc["_id"],
"task_name": task_name,
"filename": filename
}
# Document data are copy of filter
doc_data = copy.deepcopy(doc_filter)
# Prepare project for workdir data
project_doc = dbcon.find_one({"type": "project"})
workdir_data = get_workdir_data(
project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"]
)
# Prepare anatomy
anatomy = Anatomy(project_doc["name"])
# Get workdir path (result is anatomy.TemplateResult)
template_workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
template_workdir_path = str(template_workdir).replace("\\", "/")
# Replace slashses in workdir path where workfile is located
mod_workdir = workdir.replace("\\", "/")
# Replace workdir from templates with rootless workdir
rootles_workdir = mod_workdir.replace(
template_workdir_path,
template_workdir.rootless.replace("\\", "/")
)
doc_data["schema"] = "pype:workfile-1.0"
doc_data["files"] = ["/".join([rootles_workdir, filename])]
doc_data["data"] = {}
dbcon.replace_one(
doc_filter,
doc_data,
upsert=True
)
@with_avalon
def save_workfile_data_to_doc(workfile_doc, data, dbcon=None):
if not workfile_doc:
# TODO add log message
return
if not data:
return
# Use avalon.io if dbcon is not entered
if not dbcon:
dbcon = avalon.io
# Convert data to mongo modification keys/values
# - this is naive implementation which does not expect nested
# dictionaries
set_data = {}
for key, value in data.items():
new_key = "data.{}".format(key)
set_data[new_key] = value
# Update workfile document with data
dbcon.update_one(
{"_id": workfile_doc["_id"]},
{"$set": set_data}
)
class BuildWorkfile:
"""Wrapper for build workfile process.

View file

@ -1,11 +1,6 @@
# -*- coding: utf-8 -*-
"""Get configuration data."""
import os
import json
import datetime
from .log import PypeLogger
log = PypeLogger().get_logger(__name__)
def get_datetime_data(datetime_obj=None):
@ -79,233 +74,3 @@ def get_datetime_data(datetime_obj=None):
"S": str(int(seconds)),
"SS": str(seconds),
}
def load_json(fpath, first_run=False):
"""Load JSON data.
Args:
fpath (str): Path to JSON file.
first_run (bool): Flag to run checks if file is loaded for the first
time.
Returns:
dict: parsed JSON object.
"""
# Load json data
with open(fpath, "r") as opened_file:
lines = opened_file.read().splitlines()
# prepare json string
standard_json = ""
for line in lines:
# Remove all whitespace on both sides
line = line.strip()
# Skip blank lines
if len(line) == 0:
continue
standard_json += line
# Check if has extra commas
extra_comma = False
if ",]" in standard_json or ",}" in standard_json:
extra_comma = True
standard_json = standard_json.replace(",]", "]")
standard_json = standard_json.replace(",}", "}")
if extra_comma and first_run:
log.error("Extra comma in json file: \"{}\"".format(fpath))
# return empty dict if file is empty
if standard_json == "":
if first_run:
log.error("Empty json file: \"{}\"".format(fpath))
return {}
# Try to parse string
try:
return json.loads(standard_json)
except json.decoder.JSONDecodeError:
# Return empty dict if it is first time that decode error happened
if not first_run:
return {}
# Repreduce the exact same exception but traceback contains better
# information about position of error in the loaded json
try:
with open(fpath, "r") as opened_file:
json.load(opened_file)
except json.decoder.JSONDecodeError:
log.warning(
"File has invalid json format \"{}\"".format(fpath),
exc_info=True
)
return {}
def collect_json_from_path(input_path, first_run=False):
"""Collect JSON file from path.
Iterate through all subfolders and JSON files in `input_path`.
Args:
input_path (str): Path from JSONs will be collected.
first_run (bool): Flag to run checks if file is loaded for the first
time.
Returns:
dict: Collected JSONs.
Examples:
Imagine path::
`{input_path}/path/to/file.json`
>>> collect_json_from_path(input_path)
{'path':
{'to':
{'file': {JSON}
}
}
"""
output = None
if os.path.isdir(input_path):
output = {}
for file in os.listdir(input_path):
full_path = os.path.sep.join([input_path, file])
if os.path.isdir(full_path):
loaded = collect_json_from_path(full_path, first_run)
if loaded:
output[file] = loaded
else:
basename, ext = os.path.splitext(os.path.basename(file))
if ext == '.json':
output[basename] = load_json(full_path, first_run)
else:
basename, ext = os.path.splitext(os.path.basename(input_path))
if ext == '.json':
output = load_json(input_path, first_run)
return output
def get_presets(project=None, first_run=False):
"""Loads preset files with usage of ``collect_json_from_path``.
Default preset path is set to: `{PYPE_CONFIG}/presets`
Project preset path is set to: `{PYPE_PROJECT_CONFIGS}/project_name`
Environment variable `PYPE_STUDIO_CONFIG` is required
`PYPE_STUDIO_CONFIGS` only if want to use overrides per project.
Args:
project (str): Project name.
first_run (bool): Flag to run checks if file is loaded for the first
time.
Returns:
None: If default path does not exist.
default presets (dict): If project_name is not set or
if project's presets folder does not exist.
project presets (dict): If project_name is set and include
override data.
"""
# config_path should be set from environments?
config_path = os.path.normpath(os.environ['PYPE_CONFIG'])
preset_items = [config_path, 'presets']
config_path = os.path.sep.join(preset_items)
if not os.path.isdir(config_path):
log.error('Preset path was not found: "{}"'.format(config_path))
return None
default_data = collect_json_from_path(config_path, first_run)
if not project:
project = os.environ.get('AVALON_PROJECT', None)
if not project:
return default_data
project_configs_path = os.environ.get('PYPE_PROJECT_CONFIGS')
if not project_configs_path:
return default_data
project_configs_path = os.path.normpath(project_configs_path)
project_config_items = [project_configs_path, project, 'presets']
project_config_path = os.path.sep.join(project_config_items)
if not os.path.isdir(project_config_path):
log.warning('Preset path for project {} not found: "{}"'.format(
project, project_config_path
))
return default_data
project_data = collect_json_from_path(project_config_path, first_run)
return update_dict(default_data, project_data)
def get_init_presets(project=None):
"""Loads content of presets.
Like :func:`get_presets()`` but also evaluate `init.json`
pointer to default presets.
Args:
project(str): Project name.
Returns:
None: If default path does not exist
default presets (dict): If project_name is not set or if project's
presets folder does not exist.
project presets (dict): If project_name is set and include
override data.
"""
presets = get_presets(project)
try:
# try if it is not in projects custom directory
# `{PYPE_PROJECT_CONFIGS}/[PROJECT_NAME]/init.json`
# init.json define preset names to be used
p_init = presets["init"]
presets["colorspace"] = presets["colorspace"][p_init["colorspace"]]
presets["dataflow"] = presets["dataflow"][p_init["dataflow"]]
except KeyError:
log.warning("No projects custom preset available...")
presets["colorspace"] = presets["colorspace"]["default"]
presets["dataflow"] = presets["dataflow"]["default"]
log.info(("Presets `colorspace` and `dataflow` "
"loaded from `default`..."))
return presets
def update_dict(main_dict, enhance_dict):
"""Merges dictionaries by keys.
Function call itself if value on key is again dictionary.
Args:
main_dict (dict): First dict to merge second one into.
enhance_dict (dict): Second dict to be merged.
Returns:
dict: Merged result.
.. note:: does not overrides whole value on first found key
but only values differences from enhance_dict
"""
for key, value in enhance_dict.items():
if key not in main_dict:
main_dict[key] = value
elif isinstance(value, dict) and isinstance(main_dict[key], dict):
main_dict[key] = update_dict(main_dict[key], value)
else:
main_dict[key] = value
return main_dict

160
pype/lib/editorial.py Normal file
View file

@ -0,0 +1,160 @@
import os
import re
import clique
from opentimelineio import opentime
from opentimelineio.opentime import (
to_frames, RationalTime, TimeRange)
def otio_range_to_frame_range(otio_range):
start = to_frames(
otio_range.start_time, otio_range.start_time.rate)
end = start + to_frames(
otio_range.duration, otio_range.duration.rate) - 1
return start, end
def otio_range_with_handles(otio_range, instance):
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
handles_duration = handle_start + handle_end
fps = float(otio_range.start_time.rate)
start = to_frames(otio_range.start_time, fps)
duration = to_frames(otio_range.duration, fps)
return TimeRange(
start_time=RationalTime((start - handle_start), fps),
duration=RationalTime((duration + handles_duration), fps)
)
def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False):
test_start, test_end = otio_range_to_frame_range(test_otio_range)
main_start, main_end = otio_range_to_frame_range(main_otio_range)
covering_exp = bool(
(test_start <= main_start) and (test_end >= main_end)
)
inside_exp = bool(
(test_start >= main_start) and (test_end <= main_end)
)
overlaying_right_exp = bool(
(test_start <= main_end) and (test_end >= main_end)
)
overlaying_left_exp = bool(
(test_end >= main_start) and (test_start <= main_start)
)
if not strict:
return any((
covering_exp,
inside_exp,
overlaying_right_exp,
overlaying_left_exp
))
else:
return covering_exp
def convert_to_padded_path(path, padding):
"""
Return correct padding in sequence string
Args:
path (str): path url or simple file name
padding (int): number of padding
Returns:
type: string with reformated path
Example:
convert_to_padded_path("plate.%d.exr") > plate.%04d.exr
"""
if "%d" in path:
path = re.sub("%d", "%0{padding}d".format(padding=padding), path)
return path
def trim_media_range(media_range, source_range):
"""
Trim input media range with clip source range.
Args:
media_range (otio.opentime.TimeRange): available range of media
source_range (otio.opentime.TimeRange): clip required range
Returns:
otio.opentime.TimeRange: trimmed media range
"""
rw_media_start = RationalTime(
media_range.start_time.value + source_range.start_time.value,
media_range.start_time.rate
)
rw_media_duration = RationalTime(
source_range.duration.value,
media_range.duration.rate
)
return TimeRange(
rw_media_start, rw_media_duration)
def range_from_frames(start, duration, fps):
"""
Returns otio time range.
Args:
start (int): frame start
duration (int): frame duration
fps (float): frame range
Returns:
otio.opentime.TimeRange: crated range
"""
return TimeRange(
RationalTime(start, fps),
RationalTime(duration, fps)
)
def frames_to_secons(frames, framerate):
"""
Returning secons.
Args:
frames (int): frame
framerate (flaot): frame rate
Returns:
float: second value
"""
rt = opentime.from_frames(frames, framerate)
return opentime.to_seconds(rt)
def make_sequence_collection(path, otio_range, metadata):
"""
Make collection from path otio range and otio metadata.
Args:
path (str): path to image sequence with `%d`
otio_range (otio.opentime.TimeRange): range to be used
metadata (dict): data where padding value can be found
Returns:
list: dir_path (str): path to sequence, collection object
"""
if "%" not in path:
return None
file_name = os.path.basename(path)
dir_path = os.path.dirname(path)
head = file_name.split("%")[0]
tail = os.path.splitext(file_name)[-1]
first, last = otio_range_to_frame_range(otio_range)
collection = clique.Collection(
head=head, tail=tail, padding=metadata["padding"])
collection.indexes.update([i for i in range(first, (last + 1))])
return dir_path, collection

View file

@ -21,99 +21,24 @@ import socket
import sys
import time
import traceback
from logging.handlers import TimedRotatingFileHandler
import threading
import copy
from . import Terminal
from .mongo import (
MongoEnvNotSet,
decompose_url,
compose_url,
get_default_components
PypeMongoConnection
)
try:
import log4mongo
from log4mongo.handlers import MongoHandler
from bson.objectid import ObjectId
MONGO_PROCESS_ID = ObjectId()
except ImportError:
_mongo_logging = False
else:
_mongo_logging = True
log4mongo = None
MongoHandler = type("NOT_SET", (), {})
try:
unicode
_unicode = True
except NameError:
_unicode = False
PYPE_DEBUG = int(os.getenv("PYPE_DEBUG", "0"))
LOG_DATABASE_NAME = os.environ.get("PYPE_LOG_MONGO_DB") or "pype"
LOG_COLLECTION_NAME = os.environ.get("PYPE_LOG_MONGO_COL") or "logs"
system_name, pc_name = platform.uname()[:2]
host_name = socket.gethostname()
try:
ip = socket.gethostbyname(host_name)
except socket.gaierror:
ip = "127.0.0.1"
# Get process name
if len(sys.argv) > 0 and os.path.basename(sys.argv[0]) == "tray.py":
process_name = "Tray"
else:
try:
import psutil
process = psutil.Process(os.getpid())
process_name = process.name()
except ImportError:
process_name = os.environ.get("AVALON_APP_NAME")
if not process_name:
process_name = os.path.basename(sys.executable)
def _log_mongo_components():
mongo_url = os.environ.get("PYPE_LOG_MONGO_URL")
if mongo_url is not None:
components = decompose_url(mongo_url)
else:
components = get_default_components()
return components
def _bootstrap_mongo_log(components=None):
"""
This will check if database and collection for logging exist on server.
"""
import pymongo
if components is None:
components = _log_mongo_components()
if not components["host"]:
# fail silently
return
timeout = int(os.environ.get("AVALON_TIMEOUT", 1000))
kwargs = {
"host": compose_url(**components),
"serverSelectionTimeoutMS": timeout
}
port = components.get("port")
if port is not None:
kwargs["port"] = int(port)
client = pymongo.MongoClient(**kwargs)
logdb = client[LOG_DATABASE_NAME]
collist = logdb.list_collection_names()
if LOG_COLLECTION_NAME not in collist:
logdb.create_collection(
LOG_COLLECTION_NAME, capped=True, max=5000, size=1073741824
)
return logdb
# Check for `unicode` in builtins
USE_UNICODE = hasattr(__builtins__, "unicode")
class PypeStreamHandler(logging.StreamHandler):
@ -148,7 +73,8 @@ class PypeStreamHandler(logging.StreamHandler):
msg = Terminal.log(msg)
stream = self.stream
fs = "%s\n"
if not _unicode: # if no unicode support...
# if no unicode support...
if not USE_UNICODE:
stream.write(fs % msg)
else:
try:
@ -225,23 +151,18 @@ class PypeMongoFormatter(logging.Formatter):
'fileName': record.pathname,
'module': record.module,
'method': record.funcName,
'lineNumber': record.lineno,
'process_id': MONGO_PROCESS_ID,
'hostname': host_name,
'hostip': ip,
'username': getpass.getuser(),
'system_name': system_name,
'process_name': process_name
'lineNumber': record.lineno
}
document.update(PypeLogger.get_process_data())
# Standard document decorated with exception info
if record.exc_info is not None:
document.update({
'exception': {
'message': str(record.exc_info[1]),
'code': 0,
'stackTrace': self.formatException(record.exc_info)
}
})
document['exception'] = {
'message': str(record.exc_info[1]),
'code': 0,
'stackTrace': self.formatException(record.exc_info)
}
# Standard document decorated with extra contextual information
if len(self.DEFAULT_PROPERTIES) != len(record.__dict__):
contextual_extra = set(record.__dict__).difference(
@ -253,9 +174,6 @@ class PypeMongoFormatter(logging.Formatter):
class PypeLogger:
PYPE_DEBUG = 0
DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] '
DBG = " - { %(name)s }: [ %(message)s ] "
INF = ">>> [ %(message)s ] "
@ -271,55 +189,97 @@ class PypeLogger:
logging.CRITICAL: CRI,
}
def __init__(self):
self.PYPE_DEBUG = int(os.environ.get("PYPE_DEBUG", "0"))
# Is static class initialized
bootstraped = False
initialized = False
_init_lock = threading.Lock()
@staticmethod
def get_file_path(host='pype'):
# Defines if mongo logging should be used
use_mongo_logging = None
mongo_process_id = None
ts = time.time()
log_name = datetime.datetime.fromtimestamp(ts).strftime(
'%Y-%m-%d' # '%Y-%m-%d_%H-%M-%S'
)
# Information about mongo url
log_mongo_url = None
log_mongo_url_components = None
log_database_name = None
log_collection_name = None
logger_file_root = os.path.join(
os.path.expanduser("~"),
".pype-setup"
)
# PYPE_DEBUG
pype_debug = 0
logger_file_path = os.path.join(
logger_file_root,
"{}-{}.{}".format(host, log_name, 'log')
)
# Data same for all record documents
process_data = None
# Cached process name or ability to set different process name
_process_name = None
if not os.path.exists(logger_file_root):
os.mkdir(logger_file_root)
@classmethod
def get_logger(cls, name=None, _host=None):
if not cls.initialized:
cls.initialize()
return logger_file_path
logger = logging.getLogger(name or "__main__")
def _get_file_handler(self, host):
logger_file_path = PypeLogger.get_file_path(host)
if cls.pype_debug > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
formatter = PypeFormatter(self.FORMAT_FILE)
add_mongo_handler = cls.use_mongo_logging
add_console_handler = True
file_handler = TimedRotatingFileHandler(
logger_file_path,
when='midnight'
)
file_handler.set_name("PypeFileHandler")
file_handler.setFormatter(formatter)
return file_handler
for handler in logger.handlers:
if isinstance(handler, MongoHandler):
add_mongo_handler = False
elif isinstance(handler, PypeStreamHandler):
add_console_handler = False
def _get_mongo_handler(self):
components = _log_mongo_components()
# Check existence of mongo connection before creating Mongo handler
if log4mongo.handlers._connection is None:
_bootstrap_mongo_log(components)
if add_console_handler:
logger.addHandler(cls._get_console_handler())
if add_mongo_handler:
try:
handler = cls._get_mongo_handler()
if handler:
logger.addHandler(handler)
except MongoEnvNotSet:
# Skip if mongo environments are not set yet
cls.use_mongo_logging = False
except Exception:
lines = traceback.format_exception(*sys.exc_info())
for line in lines:
if line.endswith("\n"):
line = line[:-1]
Terminal.echo(line)
cls.use_mongo_logging = False
# Do not propagate logs to root logger
logger.propagate = False
if _host is not None:
# Warn about deprecated argument
# TODO remove backwards compatibility of host argument which is
# not used for more than a year
logger.warning(
"Logger \"{}\" is using argument `host` on `get_logger`"
" which is deprecated. Please remove as backwards"
" compatibility will be removed soon."
)
return logger
@classmethod
def _get_mongo_handler(cls):
cls.bootstrap_mongo_log()
if not cls.use_mongo_logging:
return
components = cls.log_mongo_url_components
kwargs = {
"host": compose_url(**components),
"database_name": LOG_DATABASE_NAME,
"collection": LOG_COLLECTION_NAME,
"host": cls.log_mongo_url,
"database_name": cls.log_database_name,
"collection": cls.log_collection_name,
"username": components["username"],
"password": components["password"],
"capped": True,
@ -332,56 +292,193 @@ class PypeLogger:
return MongoHandler(**kwargs)
def _get_console_handler(self):
formatter = PypeFormatter(self.FORMAT_FILE)
@classmethod
def _get_console_handler(cls):
formatter = PypeFormatter(cls.FORMAT_FILE)
console_handler = PypeStreamHandler()
console_handler.set_name("PypeStreamHandler")
console_handler.setFormatter(formatter)
return console_handler
def get_logger(self, name=None, host=None):
logger = logging.getLogger(name or '__main__')
if self.PYPE_DEBUG > 1:
logger.setLevel(logging.DEBUG)
@classmethod
def initialize(cls):
# TODO update already created loggers on re-initialization
if not cls._init_lock.locked():
with cls._init_lock:
cls._initialize()
else:
logger.setLevel(logging.INFO)
# If lock is locked wait until is finished
while cls._init_lock.locked():
time.sleep(0.1)
global _mongo_logging
add_mongo_handler = _mongo_logging
add_console_handler = True
@classmethod
def _initialize(cls):
# Change initialization state to prevent runtime changes
# if is executed during runtime
cls.initialized = False
for handler in logger.handlers:
if isinstance(handler, MongoHandler):
add_mongo_handler = False
elif isinstance(handler, PypeStreamHandler):
add_console_handler = False
if add_console_handler:
logger.addHandler(self._get_console_handler())
if add_mongo_handler:
# Define if should logging to mongo be used
use_mongo_logging = bool(log4mongo is not None)
# Set mongo id for process (ONLY ONCE)
if use_mongo_logging and cls.mongo_process_id is None:
try:
logger.addHandler(self._get_mongo_handler())
except MongoEnvNotSet:
# Skip if mongo environments are not set yet
_mongo_logging = False
from bson.objectid import ObjectId
except Exception:
lines = traceback.format_exception(*sys.exc_info())
for line in lines:
if line.endswith("\n"):
line = line[:-1]
Terminal.echo(line)
_mongo_logging = False
use_mongo_logging = False
# Do not propagate logs to root logger
logger.propagate = False
# Check if mongo id was passed with environments and pop it
# - This is for subprocesses that are part of another process
# like Ftrack event server has 3 other subprocesses that should
# use same mongo id
if use_mongo_logging:
mongo_id = os.environ.pop("PYPE_PROCESS_MONGO_ID", None)
if not mongo_id:
# Create new object id
mongo_id = ObjectId()
else:
# Convert string to ObjectId object
mongo_id = ObjectId(mongo_id)
cls.mongo_process_id = mongo_id
return logger
# Store result to class definition
cls.use_mongo_logging = use_mongo_logging
# Define if is in PYPE_DEBUG mode
cls.pype_debug = int(os.getenv("PYPE_DEBUG") or "0")
# Mongo URL where logs will be stored
cls.log_mongo_url = (
os.environ.get("PYPE_LOG_MONGO_URL")
or os.environ.get("PYPE_MONGO")
)
if not cls.log_mongo_url:
cls.use_mongo_logging = False
else:
# Decompose url
cls.log_mongo_url_components = decompose_url(cls.log_mongo_url)
# Database name in Mongo
cls.log_database_name = (
os.environ.get("PYPE_LOG_MONGO_DB") or "pype"
)
# Collection name under database in Mongo
cls.log_collection_name = (
os.environ.get("PYPE_LOG_MONGO_COL") or "logs"
)
# Mark as initialized
cls.initialized = True
@classmethod
def get_process_data(cls):
"""Data about current process which should be same for all records.
Process data are used for each record sent to mongo database.
"""
if cls.process_data is not None:
return copy.deepcopy(cls.process_data)
if not cls.initialized:
cls.initialize()
host_name = socket.gethostname()
try:
host_ip = socket.gethostbyname(host_name)
except socket.gaierror:
host_ip = "127.0.0.1"
process_name = cls.get_process_name()
cls.process_data = {
"process_id": cls.mongo_process_id,
"hostname": host_name,
"hostip": host_ip,
"username": getpass.getuser(),
"system_name": platform.system(),
"process_name": process_name
}
return copy.deepcopy(cls.process_data)
@classmethod
def set_process_name(cls, process_name):
"""Set process name for mongo logs."""
# Just change the attribute
cls._process_name = process_name
# Update process data if are already set
if cls.process_data is not None:
cls.process_data["process_name"] = process_name
@classmethod
def get_process_name(cls):
"""Process name that is like "label" of a process.
Pype's logging can be used from pype itseld of from hosts. Even in Pype
it's good to know if logs are from Pype tray or from pype's event
server. This should help to identify that information.
"""
if cls._process_name is not None:
return cls._process_name
# Get process name
process_name = os.environ.get("AVALON_APP_NAME")
if not process_name:
try:
import psutil
process = psutil.Process(os.getpid())
process_name = process.name()
except ImportError:
pass
if not process_name:
process_name = os.path.basename(sys.executable)
cls._process_name = process_name
return cls._process_name
@classmethod
def bootstrap_mongo_log(cls):
"""Prepare mongo logging."""
if cls.bootstraped:
return
if not cls.initialized:
cls.initialize()
if not cls.use_mongo_logging:
return
client = log4mongo.handlers._connection
if not client:
client = cls.get_log_mongo_connection()
# Set the client inside log4mongo handlers to not create another
# mongo db connection.
log4mongo.handlers._connection = client
logdb = client[cls.log_database_name]
collist = logdb.list_collection_names()
if cls.log_collection_name not in collist:
logdb.create_collection(
cls.log_collection_name,
capped=True,
max=5000,
size=1073741824
)
cls.bootstraped = True
@classmethod
def get_log_mongo_connection(cls):
"""Mongo connection that allows to get to log collection.
This is implemented to prevent multiple connections to mongo from same
process.
"""
if not cls.initialized:
cls.initialize()
return PypeMongoConnection.get_mongo_client(cls.log_mongo_url)
def timeit(method):

View file

@ -64,7 +64,7 @@ class Terminal:
except Exception:
# Do not use colors if crashed
Terminal.use_colors = False
Terminal.echo(
print(
"Module `blessed` failed on import or terminal creation."
" Pype terminal won't use colors."
)

View file

@ -35,6 +35,7 @@ from .ftrack import (
from .clockify import ClockifyModule
from .log_viewer import LogViewModule
from .muster import MusterModule
from .deadline import DeadlineModule
from .standalonepublish_action import StandAlonePublishAction
from .websocket_server import WebsocketModule
from .sync_server import SyncServer
@ -74,6 +75,7 @@ __all__ = (
"IdleManager",
"LogViewModule",
"MusterModule",
"DeadlineModule",
"StandAlonePublishAction",
"WebsocketModule",

View file

@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
"""Base class for Pype Modules."""
import time
import inspect
import logging
import collections
from uuid import uuid4
from abc import ABCMeta, abstractmethod
import six
@ -268,12 +270,17 @@ class ITrayService(ITrayModule):
class ModulesManager:
# Helper attributes for report
_report_total_key = "Total"
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
self.modules = []
self.modules_by_id = {}
self.modules_by_name = {}
# For report of time consumption
self._report = {}
self.initialize_modules()
self.connect_modules()
@ -283,6 +290,11 @@ class ModulesManager:
self.log.debug("*** Pype modules initialization.")
# Prepare settings for modules
modules_settings = get_system_settings()["modules"]
report = {}
time_start = time.time()
prev_start_time = time_start
# Go through globals in `pype.modules`
for name in dir(pype.modules):
modules_item = getattr(pype.modules, name, None)
@ -321,17 +333,28 @@ class ModulesManager:
enabled_str = " "
self.log.debug("[{}] {}".format(enabled_str, name))
now = time.time()
report[module.__class__.__name__] = now - prev_start_time
prev_start_time = now
except Exception:
self.log.warning(
"Initialization of module {} failed.".format(name),
exc_info=True
)
if self._report is not None:
report[self._report_total_key] = time.time() - time_start
self._report["Initialization"] = report
def connect_modules(self):
"""Trigger connection with other enabled modules.
Modules should handle their interfaces in `connect_with_modules`.
"""
report = {}
time_start = time.time()
prev_start_time = time_start
enabled_modules = self.get_enabled_modules()
self.log.debug("Has {} enabled modules.".format(len(enabled_modules)))
for module in enabled_modules:
@ -343,6 +366,14 @@ class ModulesManager:
exc_info=True
)
now = time.time()
report[module.__class__.__name__] = now - prev_start_time
prev_start_time = now
if self._report is not None:
report[self._report_total_key] = time.time() - time_start
self._report["Connect modules"] = report
def get_enabled_modules(self):
"""Enabled modules initialized by the manager.
@ -468,6 +499,122 @@ class ModulesManager:
output.extend(hook_paths)
return output
def print_report(self):
"""Print out report of time spent on modules initialization parts.
Reporting is not automated must be implemented for each initialization
part separatelly. Reports must be stored to `_report` attribute.
Print is skipped if `_report` is empty.
Attribute `_report` is dictionary where key is "label" describing
the processed part and value is dictionary where key is module's
class name and value is time delta of it's processing.
It is good idea to add total time delta on processed part under key
which is defined in attribute `_report_total_key`. By default has value
`"Total"` but use the attribute please.
```javascript
{
"Initialization": {
"FtrackModule": 0.003,
...
"Total": 1.003,
},
...
}
```
"""
if not self._report:
return
available_col_names = set()
for module_names in self._report.values():
available_col_names |= set(module_names.keys())
# Prepare ordered dictionary for columns
cols = collections.OrderedDict()
# Add module names to first columnt
cols["Module name"] = list(sorted(
module.__class__.__name__
for module in self.modules
if module.__class__.__name__ in available_col_names
))
# Add total key (as last module)
cols["Module name"].append(self._report_total_key)
# Add columns from report
for label in self._report.keys():
cols[label] = []
total_module_times = {}
for module_name in cols["Module name"]:
total_module_times[module_name] = 0
for label, reported in self._report.items():
for module_name in cols["Module name"]:
col_time = reported.get(module_name)
if col_time is None:
cols[label].append("N/A")
continue
cols[label].append("{:.3f}".format(col_time))
total_module_times[module_name] += col_time
# Add to also total column that should sum the row
cols[self._report_total_key] = []
for module_name in cols["Module name"]:
cols[self._report_total_key].append(
"{:.3f}".format(total_module_times[module_name])
)
# Prepare column widths and total row count
# - column width is by
col_widths = {}
total_rows = None
for key, values in cols.items():
if total_rows is None:
total_rows = 1 + len(values)
max_width = len(key)
for value in values:
value_length = len(value)
if value_length > max_width:
max_width = value_length
col_widths[key] = max_width
rows = []
for _idx in range(total_rows):
rows.append([])
for key, values in cols.items():
width = col_widths[key]
idx = 0
rows[idx].append(key.ljust(width))
for value in values:
idx += 1
rows[idx].append(value.ljust(width))
filler_parts = []
for width in col_widths.values():
filler_parts.append(width * "-")
filler = "+".join(filler_parts)
formatted_rows = [filler]
last_row_idx = len(rows) - 1
for idx, row in enumerate(rows):
# Add filler before last row
if idx == last_row_idx:
formatted_rows.append(filler)
formatted_rows.append("|".join(row))
# Add filler after first row
if idx == 0:
formatted_rows.append(filler)
# Join rows with newline char and add new line at the end
output = "\n".join(formatted_rows) + "\n"
print(output)
class TrayModulesManager(ModulesManager):
# Define order of modules in menu
@ -489,6 +636,7 @@ class TrayModulesManager(ModulesManager):
self.modules = []
self.modules_by_id = {}
self.modules_by_name = {}
self._report = {}
def initialize(self, tray_menu):
self.initialize_modules()
@ -504,6 +652,9 @@ class TrayModulesManager(ModulesManager):
return output
def tray_init(self):
report = {}
time_start = time.time()
prev_start_time = time_start
for module in self.get_enabled_tray_modules():
try:
module.tray_init()
@ -516,6 +667,14 @@ class TrayModulesManager(ModulesManager):
exc_info=True
)
now = time.time()
report[module.__class__.__name__] = now - prev_start_time
prev_start_time = now
if self._report is not None:
report[self._report_total_key] = time.time() - time_start
self._report["Tray init"] = report
def tray_menu(self, tray_menu):
ordered_modules = []
enabled_by_name = {
@ -529,6 +688,9 @@ class TrayModulesManager(ModulesManager):
ordered_modules.append(module_by_name)
ordered_modules.extend(enabled_by_name.values())
report = {}
time_start = time.time()
prev_start_time = time_start
for module in ordered_modules:
if not module.tray_initialized:
continue
@ -544,8 +706,18 @@ class TrayModulesManager(ModulesManager):
),
exc_info=True
)
now = time.time()
report[module.__class__.__name__] = now - prev_start_time
prev_start_time = now
if self._report is not None:
report[self._report_total_key] = time.time() - time_start
self._report["Tray menu"] = report
def start_modules(self):
report = {}
time_start = time.time()
prev_start_time = time_start
for module in self.get_enabled_tray_modules():
if not module.tray_initialized:
if isinstance(module, ITrayService):
@ -561,6 +733,13 @@ class TrayModulesManager(ModulesManager):
),
exc_info=True
)
now = time.time()
report[module.__class__.__name__] = now - prev_start_time
prev_start_time = now
if self._report is not None:
report[self._report_total_key] = time.time() - time_start
self._report["Modules start"] = report
def on_exit(self):
for module in self.get_enabled_tray_modules():

View file

@ -3,7 +3,7 @@ from pype.api import Logger
from pype.modules.clockify.clockify_api import ClockifyAPI
log = Logger().get_logger(__name__, "clockify_start")
log = Logger().get_logger(__name__)
class ClockifyStart(api.Action):

View file

@ -1,7 +1,7 @@
from avalon import api, io
from pype.modules.clockify.clockify_api import ClockifyAPI
from pype.api import Logger
log = Logger().get_logger(__name__, "clockify_sync")
log = Logger().get_logger(__name__)
class ClockifySync(api.Action):

View file

@ -1,7 +1,9 @@
from .. import PypeModule
import os
from pype.modules import (
PypeModule, IPluginPaths)
class DeadlineModule(PypeModule):
class DeadlineModule(PypeModule, IPluginPaths):
name = "deadline"
def initialize(self, modules_settings):
@ -18,3 +20,10 @@ class DeadlineModule(PypeModule):
def connect_with_modules(self, *_a, **_kw):
return
def get_plugin_paths(self):
"""Deadline plugin paths."""
current_dir = os.path.dirname(os.path.abspath(__file__))
return {
"publish": [os.path.join(current_dir, "plugins", "publish")]
}

View file

@ -10,7 +10,7 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
label = "Validate Deadline Web Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
hosts = ["maya", "nuke"]
families = ["renderlayer"]
def process(self, context):

View file

@ -1,6 +1,7 @@
from .ftrack_module import (
FtrackModule,
IFtrackEventHandlerPaths
IFtrackEventHandlerPaths,
FTRACK_MODULE_DIR
)
from . import ftrack_server
from .ftrack_server import FtrackServer, check_ftrack_url
@ -9,6 +10,7 @@ from .lib import BaseHandler, BaseEvent, BaseAction, ServerAction
__all__ = (
"FtrackModule",
"IFtrackEventHandlerPaths",
"FTRACK_MODULE_DIR",
"ftrack_server",
"FtrackServer",

View file

@ -94,8 +94,8 @@ Example:
"avalon_auto_sync": {
"label": "Avalon auto-sync",
"type": "boolean",
"write_security_role": ["API", "Administrator"],
"read_security_role": ["API", "Administrator"]
"write_security_roles": ["API", "Administrator"],
"read_security_roles": ["API", "Administrator"]
}
},
"is_hierarchical": {
@ -136,7 +136,11 @@ class CustomAttributes(BaseAction):
required_keys = ("key", "label", "type")
presetable_keys = ("default", "write_security_role", "read_security_role")
presetable_keys = (
"default",
"write_security_roles",
"read_security_roles"
)
hierarchical_key = "is_hierarchical"
type_posibilities = (
@ -211,17 +215,17 @@ class CustomAttributes(BaseAction):
self.groups = {}
self.ftrack_settings = get_system_settings()["modules"]["ftrack"]
self.attrs_presets = self.prepare_attribute_pressets()
self.attrs_settings = self.prepare_attribute_settings()
def prepare_attribute_pressets(self):
def prepare_attribute_settings(self):
output = {}
attr_presets = self.ftrack_settings["custom_attributes"]
for entity_type, preset in attr_presets.items():
attr_settings = self.ftrack_settings["custom_attributes"]
for entity_type, attr_data in attr_settings.items():
# Lower entity type
entity_type = entity_type.lower()
# Just store if entity type is not "task"
if entity_type != "task":
output[entity_type] = preset
output[entity_type] = attr_data
continue
# Prepare empty dictionary for entity type if not set yet
@ -229,7 +233,7 @@ class CustomAttributes(BaseAction):
output[entity_type] = {}
# Store presets per lowered object type
for obj_type, _preset in preset.items():
for obj_type, _preset in attr_data.items():
output[entity_type][obj_type.lower()] = _preset
return output
@ -266,14 +270,11 @@ class CustomAttributes(BaseAction):
def create_hierarchical_mongo_attr(self, session, event):
# Set security roles for attribute
default_role_list = ("API", "Administrator", "Pypeclub")
data = {
"key": CUST_ATTR_ID_KEY,
"label": "Avalon/Mongo ID",
"type": "text",
"default": "",
"write_security_roles": default_role_list,
"read_security_roles": default_role_list,
"group": CUST_ATTR_GROUP,
"is_hierarchical": True,
"config": {"markdown": False}
@ -496,21 +497,20 @@ class CustomAttributes(BaseAction):
else:
entity_key = attr_data["entity_type"]
entity_presets = self.attrs_presets.get(entity_key) or {}
entity_settings = self.attrs_settings.get(entity_key) or {}
if entity_key.lower() == "task":
object_type = attr_data["object_type"]
entity_presets = entity_presets.get(object_type.lower()) or {}
entity_settings = entity_settings.get(object_type.lower()) or {}
key_presets = entity_presets.get(attr_key) or {}
for key, value in key_presets.items():
key_settings = entity_settings.get(attr_key) or {}
for key, value in key_settings.items():
if key in self.presetable_keys and value:
output[key] = value
return output
def process_attr_data(self, cust_attr_data, event):
attr_presets = self.presets_for_attr_data(cust_attr_data)
cust_attr_data.update(attr_presets)
attr_settings = self.presets_for_attr_data(cust_attr_data)
cust_attr_data.update(attr_settings)
try:
data = {}
@ -778,9 +778,9 @@ class CustomAttributes(BaseAction):
roles_read = attr["read_security_roles"]
if "write_security_roles" in attr:
roles_write = attr["write_security_roles"]
output['read_security_roles'] = self.get_security_roles(roles_read)
output['write_security_roles'] = self.get_security_roles(roles_write)
output["read_security_roles"] = self.get_security_roles(roles_read)
output["write_security_roles"] = self.get_security_roles(roles_write)
return output
def get_entity_type(self, attr):

View file

@ -1,19 +1,79 @@
import operator
import collections
from pype.modules.ftrack import BaseEvent
class NextTaskUpdate(BaseEvent):
def filter_entities_info(self, session, event):
"""Change status on following Task.
Handler cares about changes of status id on Task entities. When new status
has state "Done" it will try to find following task and change it's status.
It is expected following task should be marked as "Ready to work on".
By default all tasks with same task type must have state "Done" to do any
changes. And when all tasks with same task type are "done" it will change
statuses on all tasks with next task type.
# Enable
Handler is based on settings, handler can be turned on/off with "enabled"
key.
```
"enabled": True
```
# Status mappings
Must have set mappings of new statuses:
```
"mapping": {
# From -> To
"Not Ready": "Ready",
...
}
```
If current status name is not found then status change is skipped.
# Ignored statuses
These status names are skipping as they would be in "Done" state. Best
example is status "Omitted" which in most of cases is "Blocked" state but
it will never change.
```
"ignored_statuses": [
"Omitted",
...
]
```
# Change statuses sorted by task type and by name
Change behaviour of task type batching. Statuses are not checked and set
by batches of tasks by Task type but one by one. Tasks are sorted by
Task type and then by name if all previous tasks are "Done" the following
will change status.
```
"name_sorting": True
```
"""
settings_key = "next_task_update"
def launch(self, session, event):
'''Propagates status from version to task when changed'''
filtered_entities_info = self.filter_entities_info(event)
if not filtered_entities_info:
return
for project_id, entities_info in filtered_entities_info.items():
self.process_by_project(session, event, project_id, entities_info)
def filter_entities_info(self, event):
# Filter if event contain relevant data
entities_info = event["data"].get("entities")
if not entities_info:
return
first_filtered_entities = []
filtered_entities_info = collections.defaultdict(list)
for entity_info in entities_info:
# Care only about tasks
if entity_info.get("entityType") != "task":
# Care only about Task `entity_type`
if entity_info.get("entity_type") != "Task":
continue
# Care only about changes of status
@ -25,204 +85,353 @@ class NextTaskUpdate(BaseEvent):
):
continue
first_filtered_entities.append(entity_info)
project_id = None
for parent_info in reversed(entity_info["parents"]):
if parent_info["entityType"] == "show":
project_id = parent_info["entityId"]
break
if not first_filtered_entities:
return first_filtered_entities
if project_id:
filtered_entities_info[project_id].append(entity_info)
return filtered_entities_info
status_ids = [
entity_info["changes"]["statusid"]["new"]
for entity_info in first_filtered_entities
]
statuses_by_id = self.get_statuses_by_id(
session, status_ids=status_ids
def process_by_project(self, session, event, project_id, _entities_info):
project_name = self.get_project_name_from_event(
session, event, project_id
)
# Make sure `entity_type` is "Task"
task_object_type = session.query(
"select id, name from ObjectType where name is \"Task\""
).one()
# Care only about tasks having status with state `Done`
filtered_entities = []
for entity_info in first_filtered_entities:
if entity_info["objectTypeId"] != task_object_type["id"]:
continue
status_id = entity_info["changes"]["statusid"]["new"]
status_entity = statuses_by_id[status_id]
if status_entity["state"]["name"].lower() == "done":
filtered_entities.append(entity_info)
return filtered_entities
def get_parents_by_id(self, session, entities_info):
parent_ids = [
"\"{}\"".format(entity_info["parentId"])
for entity_info in entities_info
]
parent_entities = session.query(
"TypedContext where id in ({})".format(", ".join(parent_ids))
).all()
return {
entity["id"]: entity
for entity in parent_entities
}
def get_tasks_by_id(self, session, parent_ids):
joined_parent_ids = ",".join([
"\"{}\"".format(parent_id)
for parent_id in parent_ids
])
task_entities = session.query(
"Task where parent_id in ({})".format(joined_parent_ids)
).all()
return {
entity["id"]: entity
for entity in task_entities
}
def get_statuses_by_id(self, session, task_entities=None, status_ids=None):
if task_entities is None and status_ids is None:
return {}
if status_ids is None:
status_ids = []
for task_entity in task_entities:
status_ids.append(task_entity["status_id"])
if not status_ids:
return {}
status_entities = session.query(
"Status where id in ({})".format(", ".join(status_ids))
).all()
return {
entity["id"]: entity
for entity in status_entities
}
def get_sorted_task_types(self, session):
data = {
_type: _type.get("sort")
for _type in session.query("Type").all()
if _type.get("sort") is not None
}
return [
item[0]
for item in sorted(data.items(), key=operator.itemgetter(1))
]
def launch(self, session, event):
'''Propagates status from version to task when changed'''
entities_info = self.filter_entities_info(session, event)
if not entities_info:
return
parents_by_id = self.get_parents_by_id(session, entities_info)
tasks_by_id = self.get_tasks_by_id(
session, tuple(parents_by_id.keys())
# Load settings
project_settings = self.get_project_settings_from_event(
event, project_name
)
tasks_to_parent_id = collections.defaultdict(list)
for task_entity in tasks_by_id.values():
tasks_to_parent_id[task_entity["parent_id"]].append(task_entity)
statuses_by_id = self.get_statuses_by_id(session, tasks_by_id.values())
next_status_name = "Ready"
next_status = session.query(
"Status where name is \"{}\"".format(next_status_name)
).first()
if not next_status:
self.log.warning("Couldn't find status with name \"{}\"".format(
next_status_name
# Load status mapping from presets
event_settings = (
project_settings["ftrack"]["events"][self.settings_key]
)
if not event_settings["enabled"]:
self.log.debug("Project \"{}\" has disabled {}.".format(
project_name, self.__class__.__name__
))
return
statuses = session.query("Status").all()
entities_info = self.filter_by_status_state(_entities_info, statuses)
if not entities_info:
return
parent_ids = set()
event_task_ids_by_parent_id = collections.defaultdict(list)
for entity_info in entities_info:
parent_id = entity_info["parentId"]
task_id = entity_info["entityId"]
task_entity = tasks_by_id[task_id]
entity_id = entity_info["entityId"]
parent_ids.add(parent_id)
event_task_ids_by_parent_id[parent_id].append(entity_id)
all_same_type_taks_done = True
for parents_task in tasks_to_parent_id[parent_id]:
if (
parents_task["id"] == task_id
or parents_task["type_id"] != task_entity["type_id"]
):
continue
# From now it doesn't matter what was in event data
task_entities = session.query(
(
"select id, type_id, status_id, parent_id, link from Task"
" where parent_id in ({})"
).format(self.join_query_keys(parent_ids))
).all()
parents_task_status = statuses_by_id[parents_task["status_id"]]
low_status_name = parents_task_status["name"].lower()
# Skip if task's status name "Omitted"
if low_status_name == "omitted":
continue
tasks_by_parent_id = collections.defaultdict(list)
for task_entity in task_entities:
tasks_by_parent_id[task_entity["parent_id"]].append(task_entity)
low_state_name = parents_task_status["state"]["name"].lower()
if low_state_name != "done":
all_same_type_taks_done = False
break
project_entity = session.get("Project", project_id)
self.set_next_task_statuses(
session,
tasks_by_parent_id,
event_task_ids_by_parent_id,
statuses,
project_entity,
event_settings
)
if not all_same_type_taks_done:
continue
def filter_by_status_state(self, entities_info, statuses):
statuses_by_id = {
status["id"]: status
for status in statuses
}
# Prepare all task types
sorted_task_types = self.get_sorted_task_types(session)
sorted_task_types_len = len(sorted_task_types)
# Care only about tasks having status with state `Done`
filtered_entities_info = []
for entity_info in entities_info:
status_id = entity_info["changes"]["statusid"]["new"]
status_entity = statuses_by_id[status_id]
if status_entity["state"]["name"].lower() == "done":
filtered_entities_info.append(entity_info)
return filtered_entities_info
from_idx = None
for idx, task_type in enumerate(sorted_task_types):
if task_type["id"] == task_entity["type_id"]:
from_idx = idx + 1
break
def set_next_task_statuses(
self,
session,
tasks_by_parent_id,
event_task_ids_by_parent_id,
statuses,
project_entity,
event_settings
):
statuses_by_id = {
status["id"]: status
for status in statuses
}
# Current task type is last in order
if from_idx is None or from_idx >= sorted_task_types_len:
continue
# Lower ignored statuses
ignored_statuses = set(
status_name.lower()
for status_name in event_settings["ignored_statuses"]
)
# Lower both key and value of mapped statuses
mapping = {
status_from.lower(): status_to.lower()
for status_from, status_to in event_settings["mapping"].items()
}
# Should use name sorting or not
name_sorting = event_settings["name_sorting"]
next_task_type_id = None
next_task_type_tasks = []
for idx in range(from_idx, sorted_task_types_len):
next_task_type = sorted_task_types[idx]
for parents_task in tasks_to_parent_id[parent_id]:
if next_task_type_id is None:
if parents_task["type_id"] != next_task_type["id"]:
continue
next_task_type_id = next_task_type["id"]
# Collect task type ids from changed entities
task_type_ids = set()
for task_entities in tasks_by_parent_id.values():
for task_entity in task_entities:
task_type_ids.add(task_entity["type_id"])
if parents_task["type_id"] == next_task_type_id:
next_task_type_tasks.append(parents_task)
statusese_by_obj_id = self.statuses_for_tasks(
task_type_ids, project_entity
)
if next_task_type_id is not None:
break
sorted_task_type_ids = self.get_sorted_task_type_ids(session)
for next_task_entity in next_task_type_tasks:
if next_task_entity["status"]["name"].lower() != "not ready":
continue
for parent_id, _task_entities in tasks_by_parent_id.items():
task_entities_by_type_id = collections.defaultdict(list)
for _task_entity in _task_entities:
type_id = _task_entity["type_id"]
task_entities_by_type_id[type_id].append(_task_entity)
ent_path = "/".join(
[ent["name"] for ent in next_task_entity["link"]]
event_ids = set(event_task_ids_by_parent_id[parent_id])
if name_sorting:
# Sort entities by name
self.sort_by_name_task_entities_by_type(
task_entities_by_type_id
)
try:
next_task_entity["status"] = next_status
session.commit()
self.log.info(
"\"{}\" updated status to \"{}\"".format(
ent_path, next_status_name
)
# Sort entities by type id
sorted_task_entities = []
for type_id in sorted_task_type_ids:
task_entities = task_entities_by_type_id.get(type_id)
if task_entities:
sorted_task_entities.extend(task_entities)
next_tasks = self.next_tasks_with_name_sorting(
sorted_task_entities,
event_ids,
statuses_by_id,
ignored_statuses
)
else:
next_tasks = self.next_tasks_with_type_sorting(
task_entities_by_type_id,
sorted_task_type_ids,
event_ids,
statuses_by_id,
ignored_statuses
)
for task_entity in next_tasks:
if task_entity["status"]["state"]["name"].lower() == "done":
continue
task_status = statuses_by_id[task_entity["status_id"]]
old_status_name = task_status["name"].lower()
if old_status_name in ignored_statuses:
continue
new_task_name = mapping.get(old_status_name)
if not new_task_name:
self.log.debug(
"Didn't found mapping for status \"{}\".".format(
task_status["name"]
)
except Exception:
session.rollback()
self.log.warning(
"\"{}\" status couldnt be set to \"{}\"".format(
ent_path, next_status_name
),
exc_info=True
)
continue
ent_path = "/".join(
[ent["name"] for ent in task_entity["link"]]
)
type_id = task_entity["type_id"]
new_status = statusese_by_obj_id[type_id].get(new_task_name)
if new_status is None:
self.log.warning((
"\"{}\" does not have available status name \"{}\""
).format(ent_path, new_task_name))
continue
try:
task_entity["status_id"] = new_status["id"]
session.commit()
self.log.info(
"\"{}\" updated status to \"{}\"".format(
ent_path, new_status["name"]
)
)
except Exception:
session.rollback()
self.log.warning(
"\"{}\" status couldnt be set to \"{}\"".format(
ent_path, new_status["name"]
),
exc_info=True
)
def next_tasks_with_name_sorting(
self,
sorted_task_entities,
event_ids,
statuses_by_id,
ignored_statuses,
):
# Pre sort task entities by name
use_next_task = False
next_tasks = []
for task_entity in sorted_task_entities:
if task_entity["id"] in event_ids:
event_ids.remove(task_entity["id"])
use_next_task = True
continue
if not use_next_task:
continue
task_status = statuses_by_id[task_entity["status_id"]]
low_status_name = task_status["name"].lower()
if low_status_name in ignored_statuses:
continue
next_tasks.append(task_entity)
use_next_task = False
if not event_ids:
break
return next_tasks
def check_statuses_done(
self, task_entities, ignored_statuses, statuses_by_id
):
all_are_done = True
for task_entity in task_entities:
task_status = statuses_by_id[task_entity["status_id"]]
low_status_name = task_status["name"].lower()
if low_status_name in ignored_statuses:
continue
low_state_name = task_status["state"]["name"].lower()
if low_state_name != "done":
all_are_done = False
break
return all_are_done
def next_tasks_with_type_sorting(
self,
task_entities_by_type_id,
sorted_task_type_ids,
event_ids,
statuses_by_id,
ignored_statuses
):
# `use_next_task` is used only if `name_sorting` is enabled!
next_tasks = []
use_next_tasks = False
for type_id in sorted_task_type_ids:
if type_id not in task_entities_by_type_id:
continue
task_entities = task_entities_by_type_id[type_id]
# Check if any task was in event
event_id_in_tasks = False
for task_entity in task_entities:
task_id = task_entity["id"]
if task_id in event_ids:
event_ids.remove(task_id)
event_id_in_tasks = True
if use_next_tasks:
# Check if next tasks are not done already
all_in_type_done = self.check_statuses_done(
task_entities, ignored_statuses, statuses_by_id
)
if all_in_type_done:
continue
next_tasks.extend(task_entities)
use_next_tasks = False
if not event_ids:
break
if not event_id_in_tasks:
continue
all_in_type_done = self.check_statuses_done(
task_entities, ignored_statuses, statuses_by_id
)
use_next_tasks = all_in_type_done
if all_in_type_done:
continue
if not event_ids:
break
use_next_tasks = False
return next_tasks
def statuses_for_tasks(self, task_type_ids, project_entity):
project_schema = project_entity["project_schema"]
output = {}
for task_type_id in task_type_ids:
statuses = project_schema.get_statuses("Task", task_type_id)
output[task_type_id] = {
status["name"].lower(): status
for status in statuses
}
return output
def get_sorted_task_type_ids(self, session):
types_by_order = collections.defaultdict(list)
for _type in session.query("Type").all():
sort_oder = _type.get("sort")
if sort_oder is not None:
types_by_order[sort_oder].append(_type["id"])
types = []
for sort_oder in sorted(types_by_order.keys()):
types.extend(types_by_order[sort_oder])
return types
@staticmethod
def sort_by_name_task_entities_by_type(task_entities_by_type_id):
_task_entities_by_type_id = {}
for type_id, task_entities in task_entities_by_type_id.items():
# Store tasks by name
task_entities_by_name = {}
for task_entity in task_entities:
task_name = task_entity["name"]
task_entities_by_name[task_name] = task_entity
# Store task entities by sorted names
sorted_task_entities = []
for task_name in sorted(task_entities_by_name.keys()):
task_entity = task_entities_by_name[task_name]
sorted_task_entities.append(task_entity)
# Store result to temp dictionary
_task_entities_by_type_id[type_id] = sorted_task_entities
# Override values in source object
for type_id, value in _task_entities_by_type_id.items():
task_entities_by_type_id[type_id] = value
def register(session):

View file

@ -47,8 +47,6 @@ class VersionToTaskStatus(BaseEvent):
def process_by_project(self, session, event, project_id, entities_info):
# Check for project data if event is enabled for event handler
status_mapping = None
project_name = self.get_project_name_from_event(
session, event, project_id
)

View file

@ -55,6 +55,8 @@ class SocketThread(threading.Thread):
"Running Socked thread on {}:{}".format(*server_address)
)
env = os.environ.copy()
env["PYPE_PROCESS_MONGO_ID"] = str(Logger.mongo_process_id)
self.subproc = subprocess.Popen(
[
sys.executable,
@ -62,6 +64,7 @@ class SocketThread(threading.Thread):
*self.additional_args,
str(self.port)
],
env=env,
stdin=subprocess.PIPE
)

View file

@ -51,6 +51,8 @@ def main(args):
if __name__ == "__main__":
Logger.set_process_name("Ftrack User server")
# Register interupt signal
def signal_handler(sig, frame):
log.info(

View file

@ -0,0 +1,40 @@
import os
from pype.lib import PreLaunchHook
from pype.modules.ftrack import FTRACK_MODULE_DIR
class PrePyhton2Support(PreLaunchHook):
"""Add python ftrack api module for Python 2 to PYTHONPATH.
Path to vendor modules is added to the beggining of PYTHONPATH.
"""
# There will be needed more granular filtering in future
app_groups = ["maya", "nuke", "nukex", "hiero", "nukestudio"]
def execute(self):
# Prepare vendor dir path
python_2_vendor = os.path.join(FTRACK_MODULE_DIR, "python2_vendor")
# Add Python 2 modules
python_paths = [
# `python-ftrack-api`
os.path.join(python_2_vendor, "ftrack-python-api", "source"),
# `arrow`
os.path.join(python_2_vendor, "arrow"),
# `builtins` from `python-future`
# - `python-future` is strict Python 2 module that cause crashes
# of Python 3 scripts executed through pype (burnin script etc.)
os.path.join(python_2_vendor, "builtins"),
# `backports.functools_lru_cache`
os.path.join(
python_2_vendor, "backports.functools_lru_cache"
)
]
# Load PYTHONPATH from current launch context
python_path = self.launch_context.env.get("PYTHONPATH")
if python_path:
python_paths.append(python_path)
# Set new PYTHONPATH to launch context environments
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths)

View file

@ -284,7 +284,7 @@ class SyncEntitiesFactory:
" from Project where full_name is \"{}\""
)
entities_query = (
"select id, name, parent_id, link"
"select id, name, type_id, parent_id, link"
" from TypedContext where project_id is \"{}\""
)
ignore_custom_attr_key = "avalon_ignore_sync"
@ -399,11 +399,6 @@ class SyncEntitiesFactory:
"message": "Synchronization failed"
}
# Find all entities in project
all_project_entities = self.session.query(
self.entities_query.format(ft_project_id)
).all()
# Store entities by `id` and `parent_id`
entities_dict = collections.defaultdict(lambda: {
"children": list(),
@ -417,6 +412,15 @@ class SyncEntitiesFactory:
"tasks": {}
})
# Find all entities in project
all_project_entities = self.session.query(
self.entities_query.format(ft_project_id)
).all()
task_types = self.session.query("select id, name from Type").all()
task_type_names_by_id = {
task_type["id"]: task_type["name"]
for task_type in task_types
}
for entity in all_project_entities:
parent_id = entity["parent_id"]
entity_type = entity.entity_type
@ -426,7 +430,8 @@ class SyncEntitiesFactory:
elif entity_type_low == "task":
# enrich task info with additional metadata
task = {"type": entity["type"]["name"]}
task_type_name = task_type_names_by_id[entity["type_id"]]
task = {"type": task_type_name}
entities_dict[parent_id]["tasks"][entity["name"]] = task
continue

View file

@ -2,15 +2,11 @@
"show": {
"avalon_auto_sync": {
"label": "Avalon auto-sync",
"type": "boolean",
"write_security_role": ["API", "Administrator"],
"read_security_role": ["API", "Administrator"]
"type": "boolean"
},
"library_project": {
"label": "Library Project",
"type": "boolean",
"write_security_role": ["API", "Administrator"],
"read_security_role": ["API", "Administrator"]
"type": "boolean"
}
},
"is_hierarchical": {

@ -0,0 +1 @@
Subproject commit b746fedf7286c3755a46f07ab72f4c414cd41fc0

@ -0,0 +1 @@
Subproject commit d277f474ab016e7b53479c36af87cb861d0cc53e

View file

@ -13,7 +13,7 @@ from . import login_dialog
from pype.api import Logger, resources
log = Logger().get_logger("FtrackModule", "ftrack")
log = Logger().get_logger("FtrackModule")
class FtrackTrayWrapper:

View file

@ -1,9 +1,6 @@
import collections
from Qt import QtCore, QtGui
from pype.api import Logger
from pype.lib.log import _bootstrap_mongo_log, LOG_COLLECTION_NAME
log = Logger().get_logger("LogModel", "LoggingModule")
from pype.lib.log import PypeLogger
class LogModel(QtGui.QStandardItemModel):
@ -44,9 +41,14 @@ class LogModel(QtGui.QStandardItemModel):
self.dbcon = None
# Crash if connection is not possible to skip this module
database = _bootstrap_mongo_log()
if LOG_COLLECTION_NAME in database.list_collection_names():
self.dbcon = database[LOG_COLLECTION_NAME]
if not PypeLogger.initialized:
PypeLogger.initialize()
connection = PypeLogger.get_log_mongo_connection()
if connection:
PypeLogger.bootstrap_mongo_log()
database = connection[PypeLogger.log_database_name]
self.dbcon = database[PypeLogger.log_collection_name]
def headerData(self, section, orientation, role):
if (

View file

@ -131,26 +131,61 @@ class RestApiModule(PypeModule, ITrayService):
module.rest_api_initialization(self)
def find_port(self):
start_port = self.default_port
exclude_ports = self.exclude_ports
@staticmethod
def find_free_port(port_from, port_to=None, exclude_ports=None, host=None):
"""Find available socket port from entered range.
It is also possible to only check if entered port is available.
Args:
port_from (int): Port number which is checked as first.
port_to (int): Last port that is checked in sequence from entered
`port_from`. Only `port_from` is checked if is not entered.
Nothing is processed if is equeal to `port_from`!
exclude_ports (list, tuple, set): List of ports that won't be
checked form entered range.
host (str): Host where will check for free ports. Set to
"localhost" by default.
"""
# Check only entered port if `port_to` is not defined
if port_to is None:
port_to = port_from
# Excluded ports (e.g. reserved for other servers/clients)
if exclude_ports is None:
exclude_ports = []
# Default host is localhost but it is possible to look for other hosts
if host is None:
host = "localhost"
found_port = None
# port check takes time so it's lowered to 100 ports
for port in range(start_port, start_port+100):
for port in range(port_from, port_to + 1):
if port in exclude_ports:
continue
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
result = sock.connect_ex(("localhost", port))
if result != 0:
found_port = port
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, port))
found_port = port
except socket.error:
continue
finally:
if sock:
sock.close()
if found_port is not None:
break
if found_port is None:
return None
return found_port
def tray_init(self):
port = self.find_port()
port = self.find_free_port(
self.default_port, self.default_port + 100, self.exclude_ports
)
self.rest_api_url = "http://localhost:{}".format(port)
self.rest_api_thread = RestApiThread(self, port)
self.register_statics("/res", resources.RESOURCES_DIR)

View file

@ -45,8 +45,16 @@ class SettingsAction(PypeModule, ITrayAction):
if not self.settings_window:
raise AssertionError("Window is not initialized.")
# Store if was visible
was_visible = self.settings_window.isVisible()
# Show settings gui
self.settings_window.show()
# Pull window to the front.
self.settings_window.raise_()
self.settings_window.activateWindow()
# Reset content if was not visible
if not was_visible:
self.settings_window.reset()

View file

@ -1,11 +1,6 @@
import os
import pyblish.api
import logging
try:
import ftrack_api_old as ftrack_api
except Exception:
import ftrack_api
import pyblish.api
class CollectFtrackApi(pyblish.api.ContextPlugin):
@ -22,12 +17,14 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
ftrack_log.setLevel(logging.WARNING)
# Collect session
# NOTE Import python module here to know if import was successful
import ftrack_api
session = ftrack_api.Session(auto_connect_event_hub=True)
self.log.debug("Ftrack user: \"{0}\"".format(session.api_user))
context.data["ftrackSession"] = session
# Collect task
project_name = os.environ.get('AVALON_PROJECT', '')
asset_name = os.environ.get('AVALON_ASSET', '')
task_name = os.environ.get('AVALON_TASK', None)

View file

@ -36,7 +36,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder - 0.04
label = 'Integrate Hierarchy To Ftrack'
families = ["shot"]
hosts = ["hiero"]
hosts = ["hiero", "resolve"]
optional = False
def process(self, context):

View file

@ -0,0 +1,113 @@
import pyblish.api
import avalon.api as avalon
class CollectHierarchy(pyblish.api.ContextPlugin):
"""Collecting hierarchy from `parents`.
present in `clip` family instances coming from the request json data file
It will add `hierarchical_context` into each instance for integrate
plugins to be able to create needed parents for the context if they
don't exist yet
"""
label = "Collect Hierarchy"
order = pyblish.api.CollectorOrder - 0.57
families = ["shot"]
hosts = ["resolve"]
def process(self, context):
temp_context = {}
project_name = avalon.Session["AVALON_PROJECT"]
final_context = {}
final_context[project_name] = {}
final_context[project_name]['entity_type'] = 'Project'
for instance in context:
self.log.info("Processing instance: `{}` ...".format(instance))
# shot data dict
shot_data = {}
family = instance.data.get("family")
# filter out all unepropriate instances
if not instance.data["publish"]:
continue
# exclude other families then self.families with intersection
if not set(self.families).intersection([family]):
continue
# exclude if not masterLayer True
if not instance.data.get("masterLayer"):
continue
# get asset build data if any available
shot_data["inputs"] = [
x["_id"] for x in instance.data.get("assetbuilds", [])
]
# suppose that all instances are Shots
shot_data['entity_type'] = 'Shot'
shot_data['tasks'] = instance.data.get("tasks") or []
shot_data["comments"] = instance.data.get("comments", [])
shot_data['custom_attributes'] = {
"handleStart": instance.data["handleStart"],
"handleEnd": instance.data["handleEnd"],
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
'fps': instance.context.data["fps"],
"resolutionWidth": instance.data["resolutionWidth"],
"resolutionHeight": instance.data["resolutionHeight"],
"pixelAspect": instance.data["pixelAspect"]
}
actual = {instance.data["asset"]: shot_data}
for parent in reversed(instance.data["parents"]):
next_dict = {}
parent_name = parent["entity_name"]
next_dict[parent_name] = {}
next_dict[parent_name]["entity_type"] = parent[
"entity_type"].capitalize()
next_dict[parent_name]["childs"] = actual
actual = next_dict
temp_context = self._update_dict(temp_context, actual)
# skip if nothing for hierarchy available
if not temp_context:
return
final_context[project_name]['childs'] = temp_context
# adding hierarchy context to context
context.data["hierarchyContext"] = final_context
self.log.debug("context.data[hierarchyContext] is: {}".format(
context.data["hierarchyContext"]))
def _update_dict(self, parent_dict, child_dict):
"""
Nesting each children into its parent.
Args:
parent_dict (dict): parent dict wich should be nested with children
child_dict (dict): children dict which should be injested
"""
for key in parent_dict:
if key in child_dict and isinstance(parent_dict[key], dict):
child_dict[key] = self._update_dict(
parent_dict[key], child_dict[key]
)
else:
if parent_dict.get(key) and child_dict.get(key):
continue
else:
child_dict[key] = parent_dict[key]
return child_dict

View file

@ -0,0 +1,70 @@
"""
Requires:
otioTimeline -> context data attribute
review -> instance data attribute
masterLayer -> instance data attribute
otioClipRange -> instance data attribute
"""
# import os
import opentimelineio as otio
import pyblish.api
import pype.lib
from pprint import pformat
class CollectOcioFrameRanges(pyblish.api.InstancePlugin):
"""Getting otio ranges from otio_clip
Adding timeline and source ranges to instance data"""
label = "Collect OTIO Frame Ranges"
order = pyblish.api.CollectorOrder - 0.58
families = ["shot", "clip"]
hosts = ["resolve"]
def process(self, instance):
# get basic variables
otio_clip = instance.data["otioClip"]
workfile_start = instance.data["workfileFrameStart"]
# get ranges
otio_tl_range = otio_clip.range_in_parent()
otio_src_range = otio_clip.source_range
otio_avalable_range = otio_clip.available_range()
otio_tl_range_handles = pype.lib.otio_range_with_handles(
otio_tl_range, instance)
otio_src_range_handles = pype.lib.otio_range_with_handles(
otio_src_range, instance)
# get source avalable start frame
src_starting_from = otio.opentime.to_frames(
otio_avalable_range.start_time,
otio_avalable_range.start_time.rate)
# convert to frames
range_convert = pype.lib.otio_range_to_frame_range
tl_start, tl_end = range_convert(otio_tl_range)
tl_start_h, tl_end_h = range_convert(otio_tl_range_handles)
src_start, src_end = range_convert(otio_src_range)
src_start_h, src_end_h = range_convert(otio_src_range_handles)
frame_start = workfile_start
frame_end = frame_start + otio.opentime.to_frames(
otio_tl_range.duration, otio_tl_range.duration.rate) - 1
data = {
"frameStart": frame_start,
"frameEnd": frame_end,
"clipIn": tl_start,
"clipOut": tl_end,
"clipInH": tl_start_h,
"clipOutH": tl_end_h,
"sourceStart": src_starting_from + src_start,
"sourceEnd": src_starting_from + src_end,
"sourceStartH": src_starting_from + src_start_h,
"sourceEndH": src_starting_from + src_end_h,
}
instance.data.update(data)
self.log.debug(
"_ data: {}".format(pformat(data)))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))

View file

@ -0,0 +1,99 @@
"""
Requires:
instance -> otioClip
context -> otioTimeline
Optional:
otioClip.metadata -> masterLayer
Provides:
instance -> otioReviewClips
instance -> families (adding ["review", "ftrack"])
"""
import opentimelineio as otio
import pyblish.api
from pprint import pformat
class CollectOcioReview(pyblish.api.InstancePlugin):
"""Get matching otio track from defined review layer"""
label = "Collect OTIO Review"
order = pyblish.api.CollectorOrder - 0.57
families = ["clip"]
hosts = ["resolve"]
def process(self, instance):
# get basic variables
otio_review_clips = list()
otio_timeline = instance.context.data["otioTimeline"]
otio_clip = instance.data["otioClip"]
# optionally get `reviewTrack`
review_track_name = otio_clip.metadata.get("reviewTrack")
# generate range in parent
otio_tl_range = otio_clip.range_in_parent()
# calculate real timeline end needed for the clip
clip_end_frame = int(
otio_tl_range.start_time.value + otio_tl_range.duration.value)
# skip if no review track available
if not review_track_name:
return
# loop all tracks and match with name in `reviewTrack`
for track in otio_timeline.tracks:
if review_track_name not in track.name:
continue
# process correct track
# establish gap
otio_gap = None
# get track parent range
track_rip = track.range_in_parent()
# calculate real track end frame
track_end_frame = int(
track_rip.start_time.value + track_rip.duration.value)
# check if the end of track is not lower then clip requirement
if clip_end_frame > track_end_frame:
# calculate diference duration
gap_duration = clip_end_frame - track_end_frame
# create rational time range for gap
otio_gap_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(
float(0),
track_rip.start_time.rate
),
duration=otio.opentime.RationalTime(
float(gap_duration),
track_rip.start_time.rate
)
)
# crate gap
otio_gap = otio.schema.Gap(source_range=otio_gap_range)
# trim available clips from devined track as reviewable source
otio_review_clips = otio.algorithms.track_trimmed_to_range(
track,
otio_tl_range
)
# add gap at the end if track end is shorter then needed
if otio_gap:
otio_review_clips.append(otio_gap)
if otio_review_clips:
instance.data["families"] += ["review", "ftrack"]
instance.data["otioReviewClips"] = otio_review_clips
self.log.info(
"Creating review track: {}".format(otio_review_clips))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
self.log.debug(
"_ families: {}".format(instance.data["families"]))

View file

@ -0,0 +1,182 @@
# TODO: this head doc string
"""
Requires:
instance -> otio_clip
Provides:
instance -> otioReviewClips
"""
import os
import clique
import opentimelineio as otio
import pyblish.api
import pype
class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
"""Get Resources for a subset version"""
label = "Collect OTIO Subset Resources"
order = pyblish.api.CollectorOrder - 0.57
families = ["clip"]
hosts = ["resolve"]
def process(self, instance):
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()
# get basic variables
otio_clip = instance.data["otioClip"]
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
# generate range in parent
otio_src_range = otio_clip.source_range
otio_avalable_range = otio_clip.available_range()
trimmed_media_range = pype.lib.trim_media_range(
otio_avalable_range, otio_src_range)
# calculate wth handles
otio_src_range_handles = pype.lib.otio_range_with_handles(
otio_src_range, instance)
trimmed_media_range_h = pype.lib.trim_media_range(
otio_avalable_range, otio_src_range_handles)
# frame start and end from media
s_frame_start, s_frame_end = pype.lib.otio_range_to_frame_range(
trimmed_media_range)
a_frame_start, a_frame_end = pype.lib.otio_range_to_frame_range(
otio_avalable_range)
a_frame_start_h, a_frame_end_h = pype.lib.otio_range_to_frame_range(
trimmed_media_range_h)
# fix frame_start and frame_end frame to be in range of media
if a_frame_start_h < a_frame_start:
a_frame_start_h = a_frame_start
if a_frame_end_h > a_frame_end:
a_frame_end_h = a_frame_end
# count the difference for frame_start and frame_end
diff_start = s_frame_start - a_frame_start_h
diff_end = a_frame_end_h - s_frame_end
# add to version data start and end range data
# for loader plugins to be correctly displayed and loaded
version_data.update({
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": diff_start,
"handleEnd": diff_end,
"fps": otio_avalable_range.start_time.rate
})
# change frame_start and frame_end values
# for representation to be correctly renumbered in integrate_new
frame_start -= diff_start
frame_end += diff_end
media_ref = otio_clip.media_reference
metadata = media_ref.metadata
# check in two way if it is sequence
if hasattr(otio.schema, "ImageSequenceReference"):
# for OpenTimelineIO 0.13 and newer
if isinstance(media_ref,
otio.schema.ImageSequenceReference):
is_sequence = True
else:
# for OpenTimelineIO 0.12 and older
if metadata.get("padding"):
is_sequence = True
self.log.info(
"frame_start-frame_end: {}-{}".format(frame_start, frame_end))
if is_sequence:
# file sequence way
if hasattr(media_ref, "target_url_base"):
self.staging_dir = media_ref.target_url_base
head = media_ref.name_prefix
tail = media_ref.name_suffix
collection = clique.Collection(
head=head,
tail=tail,
padding=media_ref.frame_zero_padding
)
collection.indexes.update(
[i for i in range(a_frame_start_h, (a_frame_end_h + 1))])
self.log.debug(collection)
repre = self._create_representation(
frame_start, frame_end, collection=collection)
else:
# in case it is file sequence but not new OTIO schema
# `ImageSequenceReference`
path = media_ref.target_url
collection_data = pype.lib.make_sequence_collection(
path, trimmed_media_range, metadata)
self.staging_dir, collection = collection_data
self.log.debug(collection)
repre = self._create_representation(
frame_start, frame_end, collection=collection)
else:
dirname, filename = os.path.split(media_ref.target_url)
self.staging_dir = dirname
self.log.debug(path)
repre = self._create_representation(
frame_start, frame_end, file=filename)
if repre:
instance.data["versionData"] = version_data
self.log.debug(">>>>>>>> version data {}".format(version_data))
# add representation to instance data
instance.data["representations"].append(repre)
self.log.debug(">>>>>>>> {}".format(repre))
def _create_representation(self, start, end, **kwargs):
"""
Creating representation data.
Args:
start (int): start frame
end (int): end frame
kwargs (dict): optional data
Returns:
dict: representation data
"""
# create default representation data
representation_data = {
"frameStart": start,
"frameEnd": end,
"stagingDir": self.staging_dir
}
if kwargs.get("collection"):
collection = kwargs.get("collection")
files = [f for f in collection]
ext = collection.format("{tail}")
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": files,
"frameStart": start,
"frameEnd": end,
})
return representation_data
if kwargs.get("file"):
file = kwargs.get("file")
ext = os.path.splitext(file)[-1]
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": file,
"frameStart": start,
"frameEnd": end,
})
return representation_data

View file

@ -32,7 +32,8 @@ class ExtractBurnin(pype.api.Extractor):
"standalonepublisher",
"harmony",
"fusion",
"aftereffects"
"aftereffects",
# "resolve"
]
optional = True

View file

@ -12,9 +12,12 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
"""Create jpg thumbnail from sequence using ffmpeg"""
label = "Extract Jpeg EXR"
hosts = ["shell", "fusion"]
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "render2d", "source"]
families = [
"imagesequence", "render", "render2d",
"source", "plate", "take"
]
hosts = ["shell", "fusion", "resolve"]
enabled = False
# presetable attribute
@ -50,7 +53,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
if not isinstance(repre['files'], (list, tuple)):
input_file = repre['files']
else:
input_file = repre['files'][0]
file_index = int(float(len(repre['files'])) * 0.5)
input_file = repre['files'][file_index]
stagingdir = os.path.normpath(repre.get("stagingDir"))

View file

@ -0,0 +1,41 @@
import os
import pyblish.api
import pype.api
import opentimelineio as otio
class ExtractOTIOFile(pype.api.Extractor):
"""
Extractor export OTIO file
"""
label = "Extract OTIO file"
order = pyblish.api.ExtractorOrder - 0.45
families = ["workfile"]
hosts = ["resolve"]
def process(self, instance):
# create representation data
if "representations" not in instance.data:
instance.data["representations"] = []
name = instance.data["name"]
staging_dir = self.staging_dir(instance)
otio_timeline = instance.context.data["otioTimeline"]
# create otio timeline representation
otio_file_name = name + ".otio"
otio_file_path = os.path.join(staging_dir, otio_file_name)
otio.adapters.write_to_file(otio_timeline, otio_file_path)
representation_otio = {
'name': "otio",
'ext': "otio",
'files': otio_file_name,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation_otio)
self.log.info("Added OTIO file representation: {}".format(
representation_otio))

View file

@ -0,0 +1,426 @@
"""
Requires:
instance -> handleStart
instance -> handleEnd
instance -> otioClip
instance -> otioReviewClips
Optional:
instance -> workfileFrameStart
instance -> resolutionWidth
instance -> resolutionHeight
Provides:
instance -> otioReviewClips
"""
import os
import clique
import opentimelineio as otio
from pyblish import api
import pype
class ExtractOTIOReview(pype.api.Extractor):
"""
Extract OTIO timeline into one concuted image sequence file.
The `otioReviewClip` is holding trimmed range of clips relative to
the `otioClip`. Handles are added during looping by available list
of Gap and clips in the track. Handle start (head) is added before
first Gap or Clip and Handle end (tail) is added at the end of last
Clip or Gap. In case there is missing source material after the
handles addition Gap will be added. At the end all Gaps are converted
to black frames and available material is converted to image sequence
frames. At the end representation is created and added to the instance.
At the moment only image sequence output is supported
"""
order = api.ExtractorOrder - 0.45
label = "Extract OTIO review"
hosts = ["resolve"]
families = ["review"]
# plugin default attributes
temp_file_head = "tempFile."
to_width = 1280
to_height = 720
output_ext = ".jpg"
def process(self, instance):
# TODO: convert resulting image sequence to mp4
# TODO: add oudio ouput to the mp4 if audio in review is on.
# get otio clip and other time info from instance clip
# TODO: what if handles are different in `versionData`?
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
otio_review_clips = instance.data["otioReviewClips"]
# add plugin wide attributes
self.representation_files = list()
self.used_frames = list()
self.workfile_start = int(instance.data.get(
"workfileFrameStart", 1001)) - handle_start
self.padding = len(str(self.workfile_start))
self.used_frames.append(self.workfile_start)
self.to_width = instance.data.get(
"resolutionWidth") or self.to_width
self.to_height = instance.data.get(
"resolutionHeight") or self.to_height
# skip instance if no reviewable data available
if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \
and (len(otio_review_clips) == 1):
self.log.warning(
"Instance `{}` has nothing to process".format(instance))
return
else:
self.staging_dir = self.staging_dir(instance)
if not instance.data.get("representations"):
instance.data["representations"] = list()
# loop available clips in otio track
for index, r_otio_cl in enumerate(otio_review_clips):
# QUESTION: what if transition on clip?
# get frame range values
src_range = r_otio_cl.source_range
start = src_range.start_time.value
duration = src_range.duration.value
available_range = None
self.actual_fps = src_range.duration.rate
# add available range only if not gap
if isinstance(r_otio_cl, otio.schema.Clip):
available_range = r_otio_cl.available_range()
self.actual_fps = available_range.duration.rate
# reframing handles conditions
if (len(otio_review_clips) > 1) and (index == 0):
# more clips | first clip reframing with handle
start -= handle_start
duration += handle_start
elif len(otio_review_clips) > 1 \
and (index == len(otio_review_clips) - 1):
# more clips | last clip reframing with handle
duration += handle_end
elif len(otio_review_clips) == 1:
# one clip | add both handles
start -= handle_start
duration += (handle_start + handle_end)
if available_range:
available_range = self._trim_available_range(
available_range, start, duration, self.actual_fps)
# process all track items of the track
if isinstance(r_otio_cl, otio.schema.Clip):
# process Clip
media_ref = r_otio_cl.media_reference
metadata = media_ref.metadata
is_sequence = None
# check in two way if it is sequence
if hasattr(otio.schema, "ImageSequenceReference"):
# for OpenTimelineIO 0.13 and newer
if isinstance(media_ref,
otio.schema.ImageSequenceReference):
is_sequence = True
else:
# for OpenTimelineIO 0.12 and older
if metadata.get("padding"):
is_sequence = True
if is_sequence:
# file sequence way
if hasattr(media_ref, "target_url_base"):
dirname = media_ref.target_url_base
head = media_ref.name_prefix
tail = media_ref.name_suffix
first, last = pype.lib.otio_range_to_frame_range(
available_range)
collection = clique.Collection(
head=head,
tail=tail,
padding=media_ref.frame_zero_padding
)
collection.indexes.update(
[i for i in range(first, (last + 1))])
# render segment
self._render_seqment(
sequence=[dirname, collection])
# generate used frames
self._generate_used_frames(
len(collection.indexes))
else:
# in case it is file sequence but not new OTIO schema
# `ImageSequenceReference`
path = media_ref.target_url
collection_data = pype.lib.make_sequence_collection(
path, available_range, metadata)
dir_path, collection = collection_data
# render segment
self._render_seqment(
sequence=[dir_path, collection])
# generate used frames
self._generate_used_frames(
len(collection.indexes))
else:
# single video file way
path = media_ref.target_url
# render video file to sequence
self._render_seqment(
video=[path, available_range])
# generate used frames
self._generate_used_frames(
available_range.duration.value)
# QUESTION: what if nested track composition is in place?
else:
# at last process a Gap
self._render_seqment(gap=duration)
# generate used frames
self._generate_used_frames(duration)
# creating and registering representation
representation = self._create_representation(start, duration)
instance.data["representations"].append(representation)
self.log.info(f"Adding representation: {representation}")
def _create_representation(self, start, duration):
"""
Creating representation data.
Args:
start (int): start frame
duration (int): duration frames
Returns:
dict: representation data
"""
end = start + duration
# create default representation data
representation_data = {
"frameStart": start,
"frameEnd": end,
"stagingDir": self.staging_dir,
"tags": ["review", "ftrackreview", "delete"]
}
collection = clique.Collection(
self.temp_file_head,
tail=self.output_ext,
padding=self.padding,
indexes=set(self.used_frames)
)
start = min(collection.indexes)
end = max(collection.indexes)
files = [f for f in collection]
ext = collection.format("{tail}")
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": files,
"frameStart": start,
"frameEnd": end,
})
return representation_data
def _trim_available_range(self, avl_range, start, duration, fps):
"""
Trim available media range to source range.
If missing media range is detected it will convert it into
black frames gaps.
Args:
avl_range (otio.time.TimeRange): media available time range
start (int): start frame
duration (int): duration frames
fps (float): frame rate
Returns:
otio.time.TimeRange: trimmed available range
"""
avl_start = int(avl_range.start_time.value)
src_start = int(avl_start + start)
avl_durtation = int(avl_range.duration.value)
# if media start is les then clip requires
if src_start < avl_start:
# calculate gap
gap_duration = avl_start - src_start
# create gap data to disk
self._render_seqment(gap=gap_duration)
# generate used frames
self._generate_used_frames(gap_duration)
# fix start and end to correct values
start = 0
duration -= gap_duration
# if media duration is shorter then clip requirement
if duration > avl_durtation:
# calculate gap
gap_start = int(src_start + avl_durtation)
gap_end = int(src_start + duration)
gap_duration = gap_end - gap_start
# create gap data to disk
self._render_seqment(gap=gap_duration, end_offset=avl_durtation)
# generate used frames
self._generate_used_frames(gap_duration, end_offset=avl_durtation)
# fix duration lenght
duration = avl_durtation
# return correct trimmed range
return pype.lib.trim_media_range(
avl_range, pype.lib.range_from_frames(start, duration, fps)
)
def _render_seqment(self, sequence=None,
video=None, gap=None, end_offset=None):
"""
Render seqment into image sequence frames.
Using ffmpeg to convert compatible video and image source
to defined image sequence format.
Args:
sequence (list): input dir path string, collection object in list
video (list)[optional]: video_path string, otio_range in list
gap (int)[optional]: gap duration
end_offset (int)[optional]: offset gap frame start in frames
Returns:
otio.time.TimeRange: trimmed available range
"""
# get rendering app path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# create path and frame start to destination
output_path, out_frame_start = self._get_ffmpeg_output()
if end_offset:
out_frame_start += end_offset
# start command list
command = [ffmpeg_path]
if sequence:
input_dir, collection = sequence
in_frame_start = min(collection.indexes)
# converting image sequence to image sequence
input_file = collection.format("{head}{padding}{tail}")
input_path = os.path.join(input_dir, input_file)
# form command for rendering gap files
command.extend([
"-start_number {}".format(in_frame_start),
"-i {}".format(input_path)
])
elif video:
video_path, otio_range = video
frame_start = otio_range.start_time.value
input_fps = otio_range.start_time.rate
frame_duration = otio_range.duration.value
sec_start = pype.lib.frames_to_secons(frame_start, input_fps)
sec_duration = pype.lib.frames_to_secons(frame_duration, input_fps)
# form command for rendering gap files
command.extend([
"-ss {}".format(sec_start),
"-t {}".format(sec_duration),
"-i {}".format(video_path)
])
elif gap:
sec_duration = pype.lib.frames_to_secons(
gap, self.actual_fps)
# form command for rendering gap files
command.extend([
"-t {} -r {}".format(sec_duration, self.actual_fps),
"-f lavfi",
"-i color=c=black:s={}x{}".format(self.to_width,
self.to_height),
"-tune stillimage"
])
# add output attributes
command.extend([
"-start_number {}".format(out_frame_start),
output_path
])
# execute
self.log.debug("Executing: {}".format(" ".join(command)))
output = pype.api.subprocess(" ".join(command), shell=True)
self.log.debug("Output: {}".format(output))
def _generate_used_frames(self, duration, end_offset=None):
"""
Generating used frames into plugin argument `used_frames`.
The argument `used_frames` is used for checking next available
frame to start with during rendering sequence segments.
Args:
duration (int): duration of frames needed to be generated
end_offset (int)[optional]: in case frames need to be offseted
"""
padding = "{{:0{}d}}".format(self.padding)
if end_offset:
new_frames = list()
start_frame = self.used_frames[-1]
for index in range((end_offset + 1),
(int(end_offset + duration) + 1)):
seq_number = padding.format(start_frame + index)
self.log.debug(
f"index: `{index}` | seq_number: `{seq_number}`")
new_frames.append(int(seq_number))
new_frames += self.used_frames
self.used_frames = new_frames
else:
for _i in range(1, (int(duration) + 1)):
if self.used_frames[-1] == self.workfile_start:
seq_number = padding.format(self.used_frames[-1])
self.workfile_start -= 1
else:
seq_number = padding.format(self.used_frames[-1] + 1)
self.used_frames.append(int(seq_number))
def _get_ffmpeg_output(self):
"""
Returning ffmpeg output command arguments.
Returns:
str: output_path is path for image sequence output
int: out_frame_start is starting sequence frame
"""
output_file = "{}{}{}".format(
self.temp_file_head,
"%0{}d".format(self.padding),
self.output_ext
)
# create path to destination
output_path = os.path.join(self.staging_dir, output_file)
# generate frame start
out_frame_start = self.used_frames[-1] + 1
if self.used_frames[-1] == self.workfile_start:
out_frame_start = self.used_frames[-1]
return output_path, out_frame_start

View file

@ -33,7 +33,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
"harmony",
"standalonepublisher",
"fusion",
"tvpaint"
"tvpaint",
"resolve"
]
# Supported extensions

View file

@ -329,6 +329,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if repre.get("outputName"):
template_data["output"] = repre['outputName']
template_data["representation"] = repre["name"]
ext = repre["ext"]
if ext.startswith("."):
self.log.warning((
"Implementaion warning: <\"{}\">"
" Representation's extension stored under \"ext\" key "
" started with dot (\"{}\")."
).format(repre["name"], ext))
ext = ext[1:]
repre["ext"] = ext
template_data["ext"] = ext
template = os.path.normpath(
anatomy.templates[template_name]["path"])
@ -355,7 +368,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = repre['ext']
template_data["frame"] = src_padding_exp % i
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
@ -376,6 +388,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
index_frame_start = None
# TODO use frame padding from right template group
if repre.get("frameStart") is not None:
frame_start_padding = int(
anatomy.templates["render"].get(
@ -411,7 +424,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst = "{0}{1}{2}".format(
dst_head,
dst_padding,
dst_tail).replace("..", ".")
dst_tail
)
self.log.debug("destination: `{}`".format(dst))
src = os.path.join(stagingdir, src_file_name)
@ -431,7 +445,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_head,
dst_start_frame,
dst_tail
).replace("..", ".")
)
repre['published_path'] = dst
else:
@ -449,13 +463,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"Given file name is a full path"
)
template_data["representation"] = repre['ext']
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
repre_context = template_filled.used_values
dst = os.path.normpath(template_filled).replace("..", ".")
dst = os.path.normpath(template_filled)
instance.data["transfers"].append([src, dst])

View file

@ -95,7 +95,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
template_data.update({
"_id": str(thumbnail_id),
"thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"),
"ext": file_extension,
"ext": file_extension[1:],
"thumbnail_type": "thumbnail"
})

View file

@ -42,7 +42,7 @@ class ExtractAlembic(pype.api.Extractor):
representation = {
'name': 'abc',
'ext': '.abc',
'ext': 'abc',
'files': file_name,
"stagingDir": staging_dir,
}

View file

@ -42,7 +42,7 @@ class ExtractVDBCache(pype.api.Extractor):
representation = {
'name': 'mov',
'ext': '.mov',
'ext': 'mov',
'files': output,
"stagingDir": staging_dir,
}

View file

@ -2,7 +2,7 @@ from avalon import api, lib
from pype.api import Logger
log = Logger().get_logger(__name__, "asset_creator")
log = Logger().get_logger(__name__)
class AssetCreator(api.Action):

View file

@ -127,18 +127,18 @@ class CreateRender(avalon.maya.Creator):
system_settings = get_system_settings()["modules"]
deadline_enabled = system_settings["deadline"]["enabled"]
muster_enabled = system_settings["muster"]["enabled"]
deadline_url = system_settings["deadline"]["DEADLINE_REST_URL"]
muster_url = system_settings["muster"]["MUSTER_REST_URL"]
if deadline_url and muster_url:
if deadline_enabled and muster_enabled:
self.log.error(
"Both Deadline and Muster are enabled. " "Cannot support both."
)
raise RuntimeError("Both Deadline and Muster are enabled")
if deadline_url is None:
self.log.warning("Deadline REST API url not found.")
else:
if deadline_enabled:
argument = "{}/api/pools?NamesOnly=true".format(deadline_url)
try:
response = self._requests_get(argument)
@ -155,9 +155,7 @@ class CreateRender(avalon.maya.Creator):
# set any secondary pools
self.data["secondaryPool"] = ["-"] + pools
if muster_url is None:
self.log.warning("Muster REST API URL not found.")
else:
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")
self._load_credentials()
self.log.info(">>> Getting pools ...")

View file

@ -5,7 +5,7 @@
from avalon import api
from pype.api import Logger
log = Logger().get_logger(__name__, "nuke")
log = Logger().get_logger(__name__)
class SetFrameRangeLoader(api.Loader):

View file

@ -0,0 +1,38 @@
import pyblish.api
class CollectClipResolution(pyblish.api.InstancePlugin):
"""Collect clip geometry resolution"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Clip Resoluton"
hosts = ["resolve"]
families = ["clip"]
def process(self, instance):
sequence = instance.context.data['activeSequence']
item = instance.data["item"]
source_resolution = instance.data.get("sourceResolution", None)
resolution_width = int(sequence.format().width())
resolution_height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
# source exception
if source_resolution:
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = item.source().mediaSource().pixelAspect()
resolution_data = {
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
}
# add to instacne data
instance.data.update(resolution_data)
self.log.info("Resolution of instance '{}' is: {}".format(
instance,
resolution_data
))

View file

@ -1,4 +1,4 @@
from pprint import pformat
# from pprint import pformat
from pype.hosts import resolve
from pype.hosts.resolve import lib
@ -6,45 +6,216 @@ from pype.hosts.resolve import lib
class CreateShotClip(resolve.Creator):
"""Publishable clip"""
label = "Shot"
label = "Create Publishable Clip"
family = "clip"
icon = "film"
defaults = ["Main"]
gui_name = "Pype sequencial rename with hirerarchy"
gui_info = "Define sequencial rename and fill hierarchy data."
gui_tracks = resolve.get_video_track_names()
gui_name = "Pype publish attributes creator"
gui_info = "Define sequential rename and fill hierarchy data."
gui_inputs = {
"clipName": "{episode}{sequence}{shot}",
"hierarchy": "{folder}/{sequence}/{shot}",
"countFrom": 10,
"steps": 10,
"renameHierarchy": {
"type": "section",
"label": "Shot Hierarchy And Rename Settings",
"target": "ui",
"order": 0,
"value": {
"hierarchy": {
"value": "{folder}/{sequence}",
"type": "QLineEdit",
"label": "Shot Parent Hierarchy",
"target": "tag",
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
"order": 0},
"clipRename": {
"value": False,
"type": "QCheckBox",
"label": "Rename clips",
"target": "ui",
"toolTip": "Renaming selected clips on fly", # noqa
"order": 1},
"clipName": {
"value": "{sequence}{shot}",
"type": "QLineEdit",
"label": "Clip Name Template",
"target": "ui",
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
"order": 2},
"countFrom": {
"value": 10,
"type": "QSpinBox",
"label": "Count sequence from",
"target": "ui",
"toolTip": "Set when the sequence number stafrom", # noqa
"order": 3},
"countSteps": {
"value": 10,
"type": "QSpinBox",
"label": "Stepping number",
"target": "ui",
"toolTip": "What number is adding every new step", # noqa
"order": 4},
}
},
"hierarchyData": {
"folder": "shots",
"shot": "sh####",
"track": "{track}",
"sequence": "sc010",
"episode": "ep01"
"type": "dict",
"label": "Shot Template Keywords",
"target": "tag",
"order": 1,
"value": {
"folder": {
"value": "shots",
"type": "QLineEdit",
"label": "{folder}",
"target": "tag",
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 0},
"episode": {
"value": "ep01",
"type": "QLineEdit",
"label": "{episode}",
"target": "tag",
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 1},
"sequence": {
"value": "sq01",
"type": "QLineEdit",
"label": "{sequence}",
"target": "tag",
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 2},
"track": {
"value": "{_track_}",
"type": "QLineEdit",
"label": "{track}",
"target": "tag",
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 3},
"shot": {
"value": "sh###",
"type": "QLineEdit",
"label": "{shot}",
"target": "tag",
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 4}
}
},
"verticalSync": {
"type": "section",
"label": "Vertical Synchronization Of Attributes",
"target": "ui",
"order": 2,
"value": {
"vSyncOn": {
"value": True,
"type": "QCheckBox",
"label": "Enable Vertical Sync",
"target": "ui",
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
"order": 0},
"vSyncTrack": {
"value": gui_tracks, # noqa
"type": "QComboBox",
"label": "Master track",
"target": "ui",
"toolTip": "Select driving track name which should be mastering all others", # noqa
"order": 1}
}
},
"publishSettings": {
"type": "section",
"label": "Publish Settings",
"target": "ui",
"order": 3,
"value": {
"subsetName": {
"value": ["<track_name>", "main", "bg", "fg", "bg",
"animatic"],
"type": "QComboBox",
"label": "Subset Name",
"target": "ui",
"toolTip": "chose subset name patern, if <track_name> is selected, name of track layer will be used", # noqa
"order": 0},
"subsetFamily": {
"value": ["plate", "take"],
"type": "QComboBox",
"label": "Subset Family",
"target": "ui", "toolTip": "What use of this subset is for", # noqa
"order": 1},
"reviewTrack": {
"value": ["< none >"] + gui_tracks,
"type": "QComboBox",
"label": "Use Review Track",
"target": "ui",
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
"order": 2},
"audio": {
"value": False,
"type": "QCheckBox",
"label": "Include audio",
"target": "tag",
"toolTip": "Process subsets with corresponding audio", # noqa
"order": 3},
"sourceResolution": {
"value": False,
"type": "QCheckBox",
"label": "Source resolution",
"target": "tag",
"toolTip": "Is resloution taken from timeline or source?", # noqa
"order": 4},
}
},
"shotAttr": {
"type": "section",
"label": "Shot Attributes",
"target": "ui",
"order": 4,
"value": {
"workfileFrameStart": {
"value": 1001,
"type": "QSpinBox",
"label": "Workfiles Start Frame",
"target": "tag",
"toolTip": "Set workfile starting frame number", # noqa
"order": 0},
"handleStart": {
"value": 0,
"type": "QSpinBox",
"label": "Handle start (head)",
"target": "tag",
"toolTip": "Handle at start of clip", # noqa
"order": 1},
"handleEnd": {
"value": 0,
"type": "QSpinBox",
"label": "Handle end (tail)",
"target": "tag",
"toolTip": "Handle at end of clip", # noqa
"order": 2},
}
}
}
presets = None
def process(self):
# solve gui inputs overwrites from presets
# overwrite gui inputs from presets
# get key pares from presets and match it on ui inputs
for k, v in self.gui_inputs.items():
if isinstance(v, dict):
# nested dictionary (only one level allowed)
for _k, _v in v.items():
if self.presets.get(_k):
self.gui_inputs[k][_k] = self.presets[_k]
if v["type"] in ("dict", "section"):
# nested dictionary (only one level allowed
# for sections and dict)
for _k, _v in v["value"].items():
if self.presets.get(_k) is not None:
self.gui_inputs[k][
"value"][_k]["value"] = self.presets[_k]
if self.presets.get(k):
self.gui_inputs[k] = self.presets[k]
self.gui_inputs[k]["value"] = self.presets[k]
# open widget for plugins inputs
widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs)
widget.exec_()
print(f"__ selected_clips: {self.selected}")
if len(self.selected) < 1:
return
@ -52,28 +223,41 @@ class CreateShotClip(resolve.Creator):
print("Operation aborted")
return
self.rename_add = 0
# get ui output for track name for vertical sync
v_sync_track = widget.result["vSyncTrack"]["value"]
# sort selected trackItems by
sorted_selected_track_items = list()
unsorted_selected_track_items = list()
for track_item_data in self.selected:
if track_item_data["track"]["name"] in v_sync_track:
sorted_selected_track_items.append(track_item_data)
else:
unsorted_selected_track_items.append(track_item_data)
sorted_selected_track_items.extend(unsorted_selected_track_items)
# sequence attrs
sq_frame_start = self.sequence.GetStartFrame()
sq_markers = self.sequence.GetMarkers()
print(f"__ sq_frame_start: {pformat(sq_frame_start)}")
print(f"__ seq_markers: {pformat(sq_markers)}")
# create media bin for compound clips (trackItems)
mp_folder = resolve.create_current_sequence_media_bin(self.sequence)
print(f"_ mp_folder: {mp_folder.GetName()}")
lib.rename_add = 0
for i, t_data in enumerate(self.selected):
lib.rename_index = i
kwargs = {
"ui_inputs": widget.result,
"avalon": self.data,
"mp_folder": mp_folder,
"sq_frame_start": sq_frame_start,
"sq_markers": sq_markers
}
# clear color after it is done
t_data["clip"]["item"].ClearClipColor()
for i, track_item_data in enumerate(sorted_selected_track_items):
self.rename_index = i
# convert track item to timeline media pool item
resolve.create_compound_clip(
t_data,
mp_folder,
rename=True,
**dict(
{"presets": widget.result})
)
track_item = resolve.PublishClip(
self, track_item_data, **kwargs).convert()
track_item.SetClipColor(lib.publish_clip_color)

View file

@ -0,0 +1,129 @@
import pyblish
from pype.hosts import resolve
# # developer reload modules
from pprint import pformat
class CollectInstances(pyblish.api.ContextPlugin):
"""Collect all Track items selection."""
order = pyblish.api.CollectorOrder - 0.59
label = "Collect Instances"
hosts = ["resolve"]
def process(self, context):
otio_timeline = context.data["otioTimeline"]
selected_track_items = resolve.get_current_track_items(
filter=True, selecting_color=resolve.publish_clip_color)
self.log.info(
"Processing enabled track items: {}".format(
len(selected_track_items)))
for track_item_data in selected_track_items:
data = dict()
track_item = track_item_data["clip"]["item"]
# get pype tag data
tag_data = resolve.get_track_item_pype_tag(track_item)
self.log.debug(f"__ tag_data: {pformat(tag_data)}")
if not tag_data:
continue
if tag_data.get("id") != "pyblish.avalon.instance":
continue
media_pool_item = track_item.GetMediaPoolItem()
clip_property = media_pool_item.GetClipProperty()
self.log.debug(f"clip_property: {clip_property}")
# add tag data to instance data
data.update({
k: v for k, v in tag_data.items()
if k not in ("id", "applieswhole", "label")
})
asset = tag_data["asset"]
subset = tag_data["subset"]
# insert family into families
family = tag_data["family"]
families = [str(f) for f in tag_data["families"]]
families.insert(0, str(family))
data.update({
"name": "{} {} {}".format(asset, subset, families),
"asset": asset,
"item": track_item,
"families": families,
"publish": resolve.get_publish_attribute(track_item),
"fps": context.data["fps"]
})
# otio clip data
otio_data = resolve.get_otio_clip_instance_data(
otio_timeline, track_item_data) or {}
data.update(otio_data)
# add resolution
self.get_resolution_to_data(data, context)
# create instance
instance = context.create_instance(**data)
# create shot instance for shot attributes create/update
self.create_shot_instance(context, track_item, **data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
def get_resolution_to_data(self, data, context):
assert data.get("otioClip"), "Missing `otioClip` data"
# solve source resolution option
if data.get("sourceResolution", None):
otio_clip_metadata = data[
"otioClip"].media_reference.metadata
data.update({
"resolutionWidth": otio_clip_metadata["width"],
"resolutionHeight": otio_clip_metadata["height"],
"pixelAspect": otio_clip_metadata["pixelAspect"]
})
else:
otio_tl_metadata = context.data["otioTimeline"].metadata
data.update({
"resolutionWidth": otio_tl_metadata["width"],
"resolutionHeight": otio_tl_metadata["height"],
"pixelAspect": otio_tl_metadata["pixelAspect"]
})
def create_shot_instance(self, context, track_item, **data):
master_layer = data.get("masterLayer")
hierarchy_data = data.get("hierarchyData")
if not master_layer:
return
if not hierarchy_data:
return
asset = data["asset"]
subset = "shotMain"
# insert family into families
family = "shot"
data.update({
"name": "{} {} {}".format(asset, subset, family),
"subset": subset,
"asset": asset,
"family": family,
"families": [],
"publish": resolve.get_publish_attribute(track_item)
})
context.create_instance(**data)

Some files were not shown because too many files have changed in this diff Show more