mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 16:34:53 +01:00
Merge branch '2.x/develop' into bugfix/237-Updating_a_look_where_the_shader_name_changed_leaves_the_geo_without_a_shader
This commit is contained in:
commit
ea521f8e12
311 changed files with 15444 additions and 1992 deletions
|
|
@ -1,3 +1,7 @@
|
|||
from .settings import (
|
||||
system_settings,
|
||||
project_settings
|
||||
)
|
||||
from pypeapp import (
|
||||
Logger,
|
||||
Anatomy,
|
||||
|
|
@ -49,6 +53,9 @@ from .lib import (
|
|||
from .lib import _subprocess as subprocess
|
||||
|
||||
__all__ = [
|
||||
"system_settings",
|
||||
"project_settings",
|
||||
|
||||
"Logger",
|
||||
"Anatomy",
|
||||
"project_overrides_dir_path",
|
||||
|
|
|
|||
|
|
@ -46,13 +46,14 @@ class ResolvePrelaunch(PypeHook):
|
|||
"`RESOLVE_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n"
|
||||
f"RESOLVE_UTILITY_SCRIPTS_DIR: `{us_dir}`"
|
||||
)
|
||||
self.log.debug(f"-- us_dir: `{us_dir}`")
|
||||
|
||||
# correctly format path for pre python script
|
||||
pre_py_sc = os.path.normpath(env.get("PRE_PYTHON_SCRIPT", ""))
|
||||
env["PRE_PYTHON_SCRIPT"] = pre_py_sc
|
||||
|
||||
self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...")
|
||||
try:
|
||||
__import__("pype.resolve")
|
||||
__import__("pype.hosts.resolve")
|
||||
__import__("pyblish")
|
||||
|
||||
except ImportError as e:
|
||||
|
|
@ -62,6 +63,7 @@ class ResolvePrelaunch(PypeHook):
|
|||
else:
|
||||
# Resolve Setup integration
|
||||
importlib.reload(utils)
|
||||
self.log.debug(f"-- utils.__file__: `{utils.__file__}`")
|
||||
utils.setup(env)
|
||||
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -18,12 +18,7 @@ def set_scene_settings(settings):
|
|||
if (args[0]["frameStart"] && args[0]["frameEnd"])
|
||||
{
|
||||
var duration = args[0]["frameEnd"] - args[0]["frameStart"] + 1
|
||||
if (frame.numberOf() > duration)
|
||||
{
|
||||
frame.remove(
|
||||
duration, frame.numberOf() - duration
|
||||
);
|
||||
}
|
||||
|
||||
if (frame.numberOf() < duration)
|
||||
{
|
||||
frame.insert(
|
||||
|
|
|
|||
|
|
@ -174,6 +174,25 @@ class ReferenceLoader(api.Loader):
|
|||
|
||||
assert os.path.exists(path), "%s does not exist." % path
|
||||
|
||||
# Need to save alembic settings and reapply, cause referencing resets
|
||||
# them to incoming data.
|
||||
alembic_attrs = ["speed", "offset", "cycleType"]
|
||||
alembic_data = {}
|
||||
if representation["name"] == "abc":
|
||||
alembic_nodes = cmds.ls(
|
||||
"{}:*".format(members[0].split(":")[0]), type="AlembicNode"
|
||||
)
|
||||
if alembic_nodes:
|
||||
for attr in alembic_attrs:
|
||||
node_attr = "{}.{}".format(alembic_nodes[0], attr)
|
||||
alembic_data[attr] = cmds.getAttr(node_attr)
|
||||
else:
|
||||
cmds.warning(
|
||||
"No alembic nodes found in {}".format(
|
||||
cmds.ls("{}:*".format(members[0].split(":")[0]))
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
content = cmds.file(path,
|
||||
loadReference=reference_node,
|
||||
|
|
@ -195,6 +214,16 @@ class ReferenceLoader(api.Loader):
|
|||
|
||||
self.log.warning("Ignoring file read error:\n%s", exc)
|
||||
|
||||
# Reapply alembic settings.
|
||||
if representation["name"] == "abc":
|
||||
alembic_nodes = cmds.ls(
|
||||
"{}:*".format(members[0].split(":")[0]), type="AlembicNode"
|
||||
)
|
||||
if alembic_nodes:
|
||||
for attr in alembic_attrs:
|
||||
value = alembic_data[attr]
|
||||
cmds.setAttr("{}.{}".format(alembic_nodes[0], attr), value)
|
||||
|
||||
# Fix PLN-40 for older containers created with Avalon that had the
|
||||
# `.verticesOnlySet` set to True.
|
||||
if cmds.getAttr("{}.verticesOnlySet".format(node)):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
import getpass
|
||||
from collections import OrderedDict
|
||||
|
||||
from avalon import api, io, lib
|
||||
|
|
@ -1060,310 +1059,6 @@ def get_write_node_template_attr(node):
|
|||
return avalon.nuke.lib.fix_data_for_node_create(correct_data)
|
||||
|
||||
|
||||
class BuildWorkfile(WorkfileSettings):
|
||||
"""
|
||||
Building first version of workfile.
|
||||
|
||||
Settings are taken from presets and db. It will add all subsets
|
||||
in last version for defined representaions
|
||||
|
||||
Arguments:
|
||||
variable (type): description
|
||||
|
||||
"""
|
||||
xpos = 0
|
||||
ypos = 0
|
||||
xpos_size = 80
|
||||
ypos_size = 90
|
||||
xpos_gap = 50
|
||||
ypos_gap = 50
|
||||
pos_layer = 10
|
||||
|
||||
def __init__(self,
|
||||
root_path=None,
|
||||
root_node=None,
|
||||
nodes=None,
|
||||
to_script=None,
|
||||
**kwargs):
|
||||
"""
|
||||
A short description.
|
||||
|
||||
A bit longer description.
|
||||
|
||||
Argumetns:
|
||||
root_path (str): description
|
||||
root_node (nuke.Node): description
|
||||
nodes (list): list of nuke.Node
|
||||
nodes_effects (dict): dictionary with subsets
|
||||
|
||||
Example:
|
||||
nodes_effects = {
|
||||
"plateMain": {
|
||||
"nodes": [
|
||||
[("Class", "Reformat"),
|
||||
("resize", "distort"),
|
||||
("flip", True)],
|
||||
|
||||
[("Class", "Grade"),
|
||||
("blackpoint", 0.5),
|
||||
("multiply", 0.4)]
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
WorkfileSettings.__init__(self,
|
||||
root_node=root_node,
|
||||
nodes=nodes,
|
||||
**kwargs)
|
||||
self.to_script = to_script
|
||||
# collect data for formating
|
||||
self.data_tmp = {
|
||||
"project": {"name": self._project["name"],
|
||||
"code": self._project["data"].get("code", "")},
|
||||
"asset": self._asset or os.environ["AVALON_ASSET"],
|
||||
"task": kwargs.get("task") or api.Session["AVALON_TASK"],
|
||||
"hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
|
||||
"version": kwargs.get("version", {}).get("name", 1),
|
||||
"user": getpass.getuser(),
|
||||
"comment": "firstBuild",
|
||||
"ext": "nk"
|
||||
}
|
||||
|
||||
# get presets from anatomy
|
||||
anatomy = get_anatomy()
|
||||
# format anatomy
|
||||
anatomy_filled = anatomy.format(self.data_tmp)
|
||||
|
||||
# get dir and file for workfile
|
||||
self.work_dir = anatomy_filled["work"]["folder"]
|
||||
self.work_file = anatomy_filled["work"]["file"]
|
||||
|
||||
def save_script_as(self, path=None):
|
||||
# first clear anything in open window
|
||||
nuke.scriptClear()
|
||||
|
||||
if not path:
|
||||
dir = self.work_dir
|
||||
path = os.path.join(
|
||||
self.work_dir,
|
||||
self.work_file).replace("\\", "/")
|
||||
else:
|
||||
dir = os.path.dirname(path)
|
||||
|
||||
# check if folder is created
|
||||
if not os.path.exists(dir):
|
||||
os.makedirs(dir)
|
||||
|
||||
# save script to path
|
||||
nuke.scriptSaveAs(path)
|
||||
|
||||
def process(self,
|
||||
regex_filter=None,
|
||||
version=None,
|
||||
representations=["exr", "dpx", "lutJson", "mov",
|
||||
"preview", "png", "jpeg", "jpg"]):
|
||||
"""
|
||||
A short description.
|
||||
|
||||
A bit longer description.
|
||||
|
||||
Args:
|
||||
regex_filter (raw string): regex pattern to filter out subsets
|
||||
version (int): define a particular version, None gets last
|
||||
representations (list):
|
||||
|
||||
Returns:
|
||||
type: description
|
||||
|
||||
Raises:
|
||||
Exception: description
|
||||
|
||||
"""
|
||||
|
||||
if not self.to_script:
|
||||
# save the script
|
||||
self.save_script_as()
|
||||
|
||||
# create viewer and reset frame range
|
||||
viewer = self.get_nodes(nodes_filter=["Viewer"])
|
||||
if not viewer:
|
||||
vn = nuke.createNode("Viewer")
|
||||
vn["xpos"].setValue(self.xpos)
|
||||
vn["ypos"].setValue(self.ypos)
|
||||
else:
|
||||
vn = viewer[-1]
|
||||
|
||||
# move position
|
||||
self.position_up()
|
||||
|
||||
wn = self.write_create()
|
||||
wn["xpos"].setValue(self.xpos)
|
||||
wn["ypos"].setValue(self.ypos)
|
||||
wn["render"].setValue(True)
|
||||
vn.setInput(0, wn)
|
||||
|
||||
# adding backdrop under write
|
||||
self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
|
||||
color='0xcc1102ff', layer=-1,
|
||||
nodes=[wn])
|
||||
|
||||
# move position
|
||||
self.position_up(4)
|
||||
|
||||
# set frame range for new viewer
|
||||
self.reset_frame_range_handles()
|
||||
|
||||
# get all available representations
|
||||
subsets = pype.get_subsets(self._asset,
|
||||
regex_filter=regex_filter,
|
||||
version=version,
|
||||
representations=representations)
|
||||
|
||||
for name, subset in subsets.items():
|
||||
log.debug("___________________")
|
||||
log.debug(name)
|
||||
log.debug(subset["version"])
|
||||
|
||||
nodes_backdrop = list()
|
||||
for name, subset in subsets.items():
|
||||
if "lut" in name:
|
||||
continue
|
||||
log.info("Building Loader to: `{}`".format(name))
|
||||
version = subset["version"]
|
||||
log.info("Version to: `{}`".format(version["name"]))
|
||||
representations = subset["representaions"]
|
||||
for repr in representations:
|
||||
rn = self.read_loader(repr)
|
||||
rn["xpos"].setValue(self.xpos)
|
||||
rn["ypos"].setValue(self.ypos)
|
||||
wn.setInput(0, rn)
|
||||
|
||||
# get editional nodes
|
||||
lut_subset = [s for n, s in subsets.items()
|
||||
if "lut{}".format(name.lower()) in n.lower()]
|
||||
log.debug(">> lut_subset: `{}`".format(lut_subset))
|
||||
|
||||
if len(lut_subset) > 0:
|
||||
lsub = lut_subset[0]
|
||||
fxn = self.effect_loader(lsub["representaions"][-1])
|
||||
fxn_ypos = fxn["ypos"].value()
|
||||
fxn["ypos"].setValue(fxn_ypos - 100)
|
||||
nodes_backdrop.append(fxn)
|
||||
|
||||
nodes_backdrop.append(rn)
|
||||
# move position
|
||||
self.position_right()
|
||||
|
||||
# adding backdrop under all read nodes
|
||||
self.create_backdrop(label="Loaded Reads",
|
||||
color='0x2d7702ff', layer=-1,
|
||||
nodes=nodes_backdrop)
|
||||
|
||||
def read_loader(self, representation):
|
||||
"""
|
||||
Gets Loader plugin for image sequence or mov
|
||||
|
||||
Arguments:
|
||||
representation (dict): avalon db entity
|
||||
|
||||
"""
|
||||
context = representation["context"]
|
||||
|
||||
loader_name = "LoadSequence"
|
||||
if "mov" in context["representation"]:
|
||||
loader_name = "LoadMov"
|
||||
|
||||
loader_plugin = None
|
||||
for Loader in api.discover(api.Loader):
|
||||
if Loader.__name__ != loader_name:
|
||||
continue
|
||||
|
||||
loader_plugin = Loader
|
||||
|
||||
return api.load(Loader=loader_plugin,
|
||||
representation=representation["_id"])
|
||||
|
||||
def effect_loader(self, representation):
|
||||
"""
|
||||
Gets Loader plugin for effects
|
||||
|
||||
Arguments:
|
||||
representation (dict): avalon db entity
|
||||
|
||||
"""
|
||||
loader_name = "LoadLuts"
|
||||
|
||||
loader_plugin = None
|
||||
for Loader in api.discover(api.Loader):
|
||||
if Loader.__name__ != loader_name:
|
||||
continue
|
||||
|
||||
loader_plugin = Loader
|
||||
|
||||
return api.load(Loader=loader_plugin,
|
||||
representation=representation["_id"])
|
||||
|
||||
def write_create(self):
|
||||
"""
|
||||
Create render write
|
||||
|
||||
Arguments:
|
||||
representation (dict): avalon db entity
|
||||
|
||||
"""
|
||||
task = self.data_tmp["task"]
|
||||
sanitized_task = re.sub('[^0-9a-zA-Z]+', '', task)
|
||||
subset_name = "render{}Main".format(
|
||||
sanitized_task.capitalize())
|
||||
|
||||
Create_name = "CreateWriteRender"
|
||||
|
||||
creator_plugin = None
|
||||
for Creator in api.discover(api.Creator):
|
||||
if Creator.__name__ != Create_name:
|
||||
continue
|
||||
|
||||
creator_plugin = Creator
|
||||
|
||||
# return api.create()
|
||||
return creator_plugin(subset_name, self._asset).process()
|
||||
|
||||
def create_backdrop(self, label="", color=None, layer=0,
|
||||
nodes=None):
|
||||
"""
|
||||
Create Backdrop node
|
||||
|
||||
Arguments:
|
||||
color (str): nuke compatible string with color code
|
||||
layer (int): layer of node usually used (self.pos_layer - 1)
|
||||
label (str): the message
|
||||
nodes (list): list of nodes to be wrapped into backdrop
|
||||
|
||||
"""
|
||||
assert isinstance(nodes, list), "`nodes` should be a list of nodes"
|
||||
layer = self.pos_layer + layer
|
||||
|
||||
create_backdrop(label=label, color=color, layer=layer, nodes=nodes)
|
||||
|
||||
def position_reset(self, xpos=0, ypos=0):
|
||||
self.xpos = xpos
|
||||
self.ypos = ypos
|
||||
|
||||
def position_right(self, multiply=1):
|
||||
self.xpos += (self.xpos_size * multiply) + self.xpos_gap
|
||||
|
||||
def position_left(self, multiply=1):
|
||||
self.xpos -= (self.xpos_size * multiply) + self.xpos_gap
|
||||
|
||||
def position_down(self, multiply=1):
|
||||
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
|
||||
|
||||
def position_up(self, multiply=1):
|
||||
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
|
||||
|
||||
|
||||
class ExporterReview:
|
||||
"""
|
||||
Base class object for generating review data from Nuke
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@ import nuke
|
|||
from avalon.api import Session
|
||||
|
||||
from pype.hosts.nuke import lib
|
||||
from ...lib import BuildWorkfile
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
|
||||
|
||||
def install():
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.findItem(Session["AVALON_LABEL"])
|
||||
|
|
@ -20,7 +22,11 @@ def install():
|
|||
log.debug("Changing Item: {}".format(rm_item))
|
||||
# rm_item[1].setEnabled(False)
|
||||
menu.removeItem(rm_item[1].name())
|
||||
menu.addCommand(new_name, lambda: workfile_settings().reset_resolution(), index=(rm_item[0]))
|
||||
menu.addCommand(
|
||||
new_name,
|
||||
lambda: workfile_settings().reset_resolution(),
|
||||
index=(rm_item[0])
|
||||
)
|
||||
|
||||
# replace reset frame range from avalon core to pype's
|
||||
name = "Reset Frame Range"
|
||||
|
|
@ -31,33 +37,38 @@ def install():
|
|||
log.debug("Changing Item: {}".format(rm_item))
|
||||
# rm_item[1].setEnabled(False)
|
||||
menu.removeItem(rm_item[1].name())
|
||||
menu.addCommand(new_name, lambda: workfile_settings().reset_frame_range_handles(), index=(rm_item[0]))
|
||||
menu.addCommand(
|
||||
new_name,
|
||||
lambda: workfile_settings().reset_frame_range_handles(),
|
||||
index=(rm_item[0])
|
||||
)
|
||||
|
||||
# add colorspace menu item
|
||||
name = "Set colorspace"
|
||||
name = "Set Colorspace"
|
||||
menu.addCommand(
|
||||
name, lambda: workfile_settings().set_colorspace(),
|
||||
index=(rm_item[0]+2)
|
||||
index=(rm_item[0] + 2)
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
# add workfile builder menu item
|
||||
name = "Build First Workfile.."
|
||||
name = "Build Workfile"
|
||||
menu.addCommand(
|
||||
name, lambda: lib.BuildWorkfile().process(),
|
||||
index=(rm_item[0]+7)
|
||||
name, lambda: BuildWorkfile().process(),
|
||||
index=(rm_item[0] + 7)
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
# add item that applies all setting above
|
||||
name = "Apply all settings"
|
||||
name = "Apply All Settings"
|
||||
menu.addCommand(
|
||||
name, lambda: workfile_settings().set_context_settings(), index=(rm_item[0]+3)
|
||||
name,
|
||||
lambda: workfile_settings().set_context_settings(),
|
||||
index=(rm_item[0] + 3)
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
|
||||
|
||||
def uninstall():
|
||||
|
||||
menubar = nuke.menu("Nuke")
|
||||
|
|
|
|||
|
|
@ -1,17 +1,34 @@
|
|||
from .utils import (
|
||||
setup,
|
||||
get_resolve_module
|
||||
)
|
||||
|
||||
from .pipeline import (
|
||||
install,
|
||||
uninstall,
|
||||
ls,
|
||||
containerise,
|
||||
publish,
|
||||
launch_workfiles_app
|
||||
launch_workfiles_app,
|
||||
maintained_selection
|
||||
)
|
||||
|
||||
from .utils import (
|
||||
setup,
|
||||
get_resolve_module
|
||||
from .lib import (
|
||||
get_project_manager,
|
||||
get_current_project,
|
||||
get_current_sequence,
|
||||
get_current_track_items,
|
||||
create_current_sequence_media_bin,
|
||||
create_compound_clip,
|
||||
swap_clips,
|
||||
get_pype_clip_metadata,
|
||||
set_project_manager_to_folder_name
|
||||
)
|
||||
|
||||
from .menu import launch_pype_menu
|
||||
|
||||
from .plugin import Creator
|
||||
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
|
|
@ -21,12 +38,8 @@ from .workio import (
|
|||
work_root
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
get_project_manager,
|
||||
set_project_manager_to_folder_name
|
||||
)
|
||||
|
||||
from .menu import launch_pype_menu
|
||||
bmdvr = None
|
||||
bmdvf = None
|
||||
|
||||
__all__ = [
|
||||
# pipeline
|
||||
|
|
@ -37,6 +50,7 @@ __all__ = [
|
|||
"reload_pipeline",
|
||||
"publish",
|
||||
"launch_workfiles_app",
|
||||
"maintained_selection",
|
||||
|
||||
# utils
|
||||
"setup",
|
||||
|
|
@ -44,16 +58,30 @@ __all__ = [
|
|||
|
||||
# lib
|
||||
"get_project_manager",
|
||||
"get_current_project",
|
||||
"get_current_sequence",
|
||||
"get_current_track_items",
|
||||
"create_current_sequence_media_bin",
|
||||
"create_compound_clip",
|
||||
"swap_clips",
|
||||
"get_pype_clip_metadata",
|
||||
"set_project_manager_to_folder_name",
|
||||
|
||||
# menu
|
||||
"launch_pype_menu",
|
||||
|
||||
# plugin
|
||||
"Creator",
|
||||
|
||||
# workio
|
||||
"open_file",
|
||||
"save_file",
|
||||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root"
|
||||
"work_root",
|
||||
|
||||
# singleton with black magic resolve module
|
||||
"bmdvr",
|
||||
"bmdvf"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -21,9 +21,9 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
def process(self, context, plugin):
|
||||
|
||||
try:
|
||||
from pype.hosts.resolve.utils import get_resolve_module
|
||||
resolve = get_resolve_module()
|
||||
self.log.debug(resolve)
|
||||
from . import get_project_manager
|
||||
pm = get_project_manager()
|
||||
self.log.debug(pm)
|
||||
except ImportError:
|
||||
raise ImportError("Current host is not Resolve")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,20 +1,406 @@
|
|||
import sys
|
||||
from .utils import get_resolve_module
|
||||
from pypeapp import Logger
|
||||
import json
|
||||
from opentimelineio import opentime
|
||||
from pprint import pformat
|
||||
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.pm = None
|
||||
self.rename_index = 0
|
||||
self.rename_add = 0
|
||||
self.pype_metadata_key = "VFX Notes"
|
||||
|
||||
|
||||
def get_project_manager():
|
||||
from . import bmdvr
|
||||
if not self.pm:
|
||||
resolve = get_resolve_module()
|
||||
self.pm = resolve.GetProjectManager()
|
||||
self.pm = bmdvr.GetProjectManager()
|
||||
return self.pm
|
||||
|
||||
|
||||
def get_current_project():
|
||||
# initialize project manager
|
||||
get_project_manager()
|
||||
|
||||
return self.pm.GetCurrentProject()
|
||||
|
||||
|
||||
def get_current_sequence():
|
||||
# get current project
|
||||
project = get_current_project()
|
||||
|
||||
return project.GetCurrentTimeline()
|
||||
|
||||
|
||||
def get_current_track_items(
|
||||
filter=False,
|
||||
track_type=None,
|
||||
selecting_color=None):
|
||||
""" Gets all available current timeline track items
|
||||
"""
|
||||
track_type = track_type or "video"
|
||||
selecting_color = selecting_color or "Chocolate"
|
||||
project = get_current_project()
|
||||
sequence = get_current_sequence()
|
||||
selected_clips = list()
|
||||
|
||||
# get all tracks count filtered by track type
|
||||
selected_track_count = sequence.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks and get items
|
||||
_clips = dict()
|
||||
for track_index in range(1, (int(selected_track_count) + 1)):
|
||||
track_name = sequence.GetTrackName(track_type, track_index)
|
||||
track_track_items = sequence.GetItemListInTrack(
|
||||
track_type, track_index)
|
||||
_clips[track_index] = track_track_items
|
||||
|
||||
_data = {
|
||||
"project": project,
|
||||
"sequence": sequence,
|
||||
"track": {
|
||||
"name": track_name,
|
||||
"index": track_index,
|
||||
"type": track_type}
|
||||
}
|
||||
# get track item object and its color
|
||||
for clip_index, ti in enumerate(_clips[track_index]):
|
||||
data = _data.copy()
|
||||
data["clip"] = {
|
||||
"item": ti,
|
||||
"index": clip_index
|
||||
}
|
||||
ti_color = ti.GetClipColor()
|
||||
if filter is True:
|
||||
if selecting_color in ti_color:
|
||||
selected_clips.append(data)
|
||||
# ti.ClearClipColor()
|
||||
else:
|
||||
selected_clips.append(data)
|
||||
|
||||
return selected_clips
|
||||
|
||||
|
||||
def create_current_sequence_media_bin(sequence):
|
||||
seq_name = sequence.GetName()
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
root_folder = media_pool.GetRootFolder()
|
||||
sub_folders = root_folder.GetSubFolderList()
|
||||
testing_names = list()
|
||||
|
||||
print(f"_ sub_folders: {sub_folders}")
|
||||
for subfolder in sub_folders:
|
||||
subf_name = subfolder.GetName()
|
||||
if seq_name in subf_name:
|
||||
testing_names.append(subfolder)
|
||||
else:
|
||||
testing_names.append(False)
|
||||
|
||||
matching = next((f for f in testing_names if f is not False), None)
|
||||
|
||||
if not matching:
|
||||
new_folder = media_pool.AddSubFolder(root_folder, seq_name)
|
||||
media_pool.SetCurrentFolder(new_folder)
|
||||
else:
|
||||
media_pool.SetCurrentFolder(matching)
|
||||
|
||||
return media_pool.GetCurrentFolder()
|
||||
|
||||
|
||||
def get_name_with_data(clip_data, presets):
|
||||
"""
|
||||
Take hierarchy data from presets and build name with parents data
|
||||
|
||||
Args:
|
||||
clip_data (dict): clip data from `get_current_track_items()`
|
||||
presets (dict): data from create plugin
|
||||
|
||||
Returns:
|
||||
list: name, data
|
||||
|
||||
"""
|
||||
def _replace_hash_to_expression(name, text):
|
||||
_spl = text.split("#")
|
||||
_len = (len(_spl) - 1)
|
||||
_repl = f"{{{name}:0>{_len}}}"
|
||||
new_text = text.replace(("#" * _len), _repl)
|
||||
return new_text
|
||||
|
||||
# presets data
|
||||
clip_name = presets["clipName"]
|
||||
hierarchy = presets["hierarchy"]
|
||||
hierarchy_data = presets["hierarchyData"].copy()
|
||||
count_from = presets["countFrom"]
|
||||
steps = presets["steps"]
|
||||
|
||||
# reset rename_add
|
||||
if self.rename_add < count_from:
|
||||
self.rename_add = count_from
|
||||
|
||||
# shot num calculate
|
||||
if self.rename_index == 0:
|
||||
shot_num = self.rename_add
|
||||
else:
|
||||
shot_num = self.rename_add + steps
|
||||
|
||||
print(f"shot_num: {shot_num}")
|
||||
|
||||
# clip data
|
||||
_data = {
|
||||
"sequence": clip_data["sequence"].GetName(),
|
||||
"track": clip_data["track"]["name"].replace(" ", "_"),
|
||||
"shot": shot_num
|
||||
}
|
||||
|
||||
# solve # in test to pythonic explression
|
||||
for k, v in hierarchy_data.items():
|
||||
if "#" not in v:
|
||||
continue
|
||||
hierarchy_data[k] = _replace_hash_to_expression(k, v)
|
||||
|
||||
# fill up pythonic expresisons
|
||||
for k, v in hierarchy_data.items():
|
||||
hierarchy_data[k] = v.format(**_data)
|
||||
|
||||
# fill up clip name and hierarchy keys
|
||||
hierarchy = hierarchy.format(**hierarchy_data)
|
||||
clip_name = clip_name.format(**hierarchy_data)
|
||||
|
||||
self.rename_add = shot_num
|
||||
print(f"shot_num: {shot_num}")
|
||||
|
||||
return (clip_name, {
|
||||
"hierarchy": hierarchy,
|
||||
"hierarchyData": hierarchy_data
|
||||
})
|
||||
|
||||
|
||||
def create_compound_clip(clip_data, folder, rename=False, **kwargs):
|
||||
"""
|
||||
Convert timeline object into nested timeline object
|
||||
|
||||
Args:
|
||||
clip_data (dict): timeline item object packed into dict
|
||||
with project, timeline (sequence)
|
||||
folder (resolve.MediaPool.Folder): media pool folder object,
|
||||
rename (bool)[optional]: renaming in sequence or not
|
||||
kwargs (optional): additional data needed for rename=True (presets)
|
||||
|
||||
Returns:
|
||||
resolve.MediaPoolItem: media pool item with compound clip timeline(cct)
|
||||
"""
|
||||
# get basic objects form data
|
||||
project = clip_data["project"]
|
||||
sequence = clip_data["sequence"]
|
||||
clip = clip_data["clip"]
|
||||
|
||||
# get details of objects
|
||||
clip_item = clip["item"]
|
||||
track = clip_data["track"]
|
||||
|
||||
mp = project.GetMediaPool()
|
||||
|
||||
# get clip attributes
|
||||
clip_attributes = get_clip_attributes(clip_item)
|
||||
print(f"_ clip_attributes: {pformat(clip_attributes)}")
|
||||
|
||||
if rename:
|
||||
presets = kwargs.get("presets")
|
||||
if presets:
|
||||
name, data = get_name_with_data(clip_data, presets)
|
||||
# add hirarchy data to clip attributes
|
||||
clip_attributes.update(data)
|
||||
else:
|
||||
name = "{:0>3}_{:0>4}".format(
|
||||
int(track["index"]), int(clip["index"]))
|
||||
else:
|
||||
# build name
|
||||
clip_name_split = clip_item.GetName().split(".")
|
||||
name = "_".join([
|
||||
track["name"],
|
||||
str(track["index"]),
|
||||
clip_name_split[0],
|
||||
str(clip["index"])]
|
||||
)
|
||||
|
||||
# get metadata
|
||||
mp_item = clip_item.GetMediaPoolItem()
|
||||
mp_props = mp_item.GetClipProperty()
|
||||
|
||||
mp_first_frame = int(mp_props["Start"])
|
||||
mp_last_frame = int(mp_props["End"])
|
||||
|
||||
# initialize basic source timing for otio
|
||||
ci_l_offset = clip_item.GetLeftOffset()
|
||||
ci_duration = clip_item.GetDuration()
|
||||
rate = float(mp_props["FPS"])
|
||||
|
||||
# source rational times
|
||||
mp_in_rc = opentime.RationalTime((ci_l_offset), rate)
|
||||
mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate)
|
||||
|
||||
# get frame in and out for clip swaping
|
||||
in_frame = opentime.to_frames(mp_in_rc)
|
||||
out_frame = opentime.to_frames(mp_out_rc)
|
||||
|
||||
# keep original sequence
|
||||
sq_origin = sequence
|
||||
|
||||
# Set current folder to input media_pool_folder:
|
||||
mp.SetCurrentFolder(folder)
|
||||
|
||||
# check if clip doesnt exist already:
|
||||
clips = folder.GetClipList()
|
||||
cct = next((c for c in clips
|
||||
if c.GetName() in name), None)
|
||||
|
||||
if cct:
|
||||
print(f"_ cct exists: {cct}")
|
||||
else:
|
||||
# Create empty timeline in current folder and give name:
|
||||
cct = mp.CreateEmptyTimeline(name)
|
||||
|
||||
# check if clip doesnt exist already:
|
||||
clips = folder.GetClipList()
|
||||
cct = next((c for c in clips
|
||||
if c.GetName() in name), None)
|
||||
print(f"_ cct created: {cct}")
|
||||
|
||||
# Set current timeline to created timeline:
|
||||
project.SetCurrentTimeline(cct)
|
||||
|
||||
# Add input clip to the current timeline:
|
||||
mp.AppendToTimeline([{
|
||||
"mediaPoolItem": mp_item,
|
||||
"startFrame": mp_first_frame,
|
||||
"endFrame": mp_last_frame
|
||||
}])
|
||||
|
||||
# Set current timeline to the working timeline:
|
||||
project.SetCurrentTimeline(sq_origin)
|
||||
|
||||
# Add collected metadata and attributes to the comound clip:
|
||||
if mp_item.GetMetadata(self.pype_metadata_key):
|
||||
clip_attributes[self.pype_metadata_key] = mp_item.GetMetadata(
|
||||
self.pype_metadata_key)[self.pype_metadata_key]
|
||||
|
||||
# stringify
|
||||
clip_attributes = json.dumps(clip_attributes)
|
||||
|
||||
# add attributes to metadata
|
||||
for k, v in mp_item.GetMetadata().items():
|
||||
cct.SetMetadata(k, v)
|
||||
|
||||
# add metadata to cct
|
||||
cct.SetMetadata(self.pype_metadata_key, clip_attributes)
|
||||
|
||||
# reset start timecode of the compound clip
|
||||
cct.SetClipProperty("Start TC", mp_props["Start TC"])
|
||||
|
||||
# swap clips on timeline
|
||||
swap_clips(clip_item, cct, name, in_frame, out_frame)
|
||||
|
||||
cct.SetClipColor("Pink")
|
||||
return cct
|
||||
|
||||
|
||||
def swap_clips(from_clip, to_clip, to_clip_name, to_in_frame, to_out_frame):
|
||||
"""
|
||||
Swaping clips on timeline in timelineItem
|
||||
|
||||
It will add take and activate it to the frame range which is inputted
|
||||
|
||||
Args:
|
||||
from_clip (resolve.mediaPoolItem)
|
||||
to_clip (resolve.mediaPoolItem)
|
||||
to_clip_name (str): name of to_clip
|
||||
to_in_frame (float): cut in frame, usually `GetLeftOffset()`
|
||||
to_out_frame (float): cut out frame, usually left offset plus duration
|
||||
|
||||
Returns:
|
||||
bool: True if successfully replaced
|
||||
|
||||
"""
|
||||
# add clip item as take to timeline
|
||||
take = from_clip.AddTake(
|
||||
to_clip,
|
||||
float(to_in_frame),
|
||||
float(to_out_frame)
|
||||
)
|
||||
|
||||
if not take:
|
||||
return False
|
||||
|
||||
for take_index in range(1, (int(from_clip.GetTakesCount()) + 1)):
|
||||
take_item = from_clip.GetTakeByIndex(take_index)
|
||||
take_mp_item = take_item["mediaPoolItem"]
|
||||
if to_clip_name in take_mp_item.GetName():
|
||||
from_clip.SelectTakeByIndex(take_index)
|
||||
from_clip.FinalizeTake()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def validate_tc(x):
|
||||
# Validate and reformat timecode string
|
||||
|
||||
if len(x) != 11:
|
||||
print('Invalid timecode. Try again.')
|
||||
|
||||
c = ':'
|
||||
colonized = x[:2] + c + x[3:5] + c + x[6:8] + c + x[9:]
|
||||
|
||||
if colonized.replace(':', '').isdigit():
|
||||
print(f"_ colonized: {colonized}")
|
||||
return colonized
|
||||
else:
|
||||
print('Invalid timecode. Try again.')
|
||||
|
||||
|
||||
def get_pype_clip_metadata(clip):
|
||||
"""
|
||||
Get pype metadata created by creator plugin
|
||||
|
||||
Attributes:
|
||||
clip (resolve.TimelineItem): resolve's object
|
||||
|
||||
Returns:
|
||||
dict: hierarchy, orig clip attributes
|
||||
"""
|
||||
mp_item = clip.GetMediaPoolItem()
|
||||
metadata = mp_item.GetMetadata()
|
||||
|
||||
return metadata.get(self.pype_metadata_key)
|
||||
|
||||
|
||||
def get_clip_attributes(clip):
|
||||
"""
|
||||
Collect basic atrributes from resolve timeline item
|
||||
|
||||
Args:
|
||||
clip (resolve.TimelineItem): timeline item object
|
||||
|
||||
Returns:
|
||||
dict: all collected attributres as key: values
|
||||
"""
|
||||
mp_item = clip.GetMediaPoolItem()
|
||||
|
||||
data = {
|
||||
"clipIn": clip.GetStart(),
|
||||
"clipOut": clip.GetEnd(),
|
||||
"clipLeftOffset": clip.GetLeftOffset(),
|
||||
"clipRightOffset": clip.GetRightOffset(),
|
||||
"clipMarkers": clip.GetMarkers(),
|
||||
"clipFlags": clip.GetFlagList(),
|
||||
"sourceId": mp_item.GetMediaId(),
|
||||
"sourceProperties": mp_item.GetClipProperty()
|
||||
}
|
||||
return data
|
||||
|
||||
|
||||
def set_project_manager_to_folder_name(folder_name):
|
||||
"""
|
||||
Sets context of Project manager to given folder by name.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
QWidget {
|
||||
background-color: #282828;
|
||||
border-radius: 3;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
QPushButton {
|
||||
|
|
@ -20,10 +21,38 @@ QPushButton:hover {
|
|||
color: #e64b3d;
|
||||
}
|
||||
|
||||
QSpinBox {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
padding: 2;
|
||||
max-width: 8em;
|
||||
qproperty-alignment: AlignCenter;
|
||||
}
|
||||
|
||||
QLineEdit {
|
||||
border: 1px solid #090909;
|
||||
border-radius: 3px;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
padding: 2;
|
||||
min-width: 10em;
|
||||
qproperty-alignment: AlignCenter;
|
||||
}
|
||||
|
||||
#PypeMenu {
|
||||
border: 1px solid #fef9ef;
|
||||
}
|
||||
|
||||
#Spacer {
|
||||
QVBoxLayout {
|
||||
background-color: #282828;
|
||||
}
|
||||
|
||||
#Devider {
|
||||
border: 1px solid #090909;
|
||||
background-color: #585858;
|
||||
}
|
||||
|
||||
QLabel {
|
||||
color: #77776b;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,27 +2,23 @@
|
|||
Basic avalon integration
|
||||
"""
|
||||
import os
|
||||
# import sys
|
||||
import contextlib
|
||||
from avalon.tools import workfiles
|
||||
from avalon import api as avalon
|
||||
from pyblish import api as pyblish
|
||||
from pypeapp import Logger
|
||||
import pype
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
|
||||
# self = sys.modules[__name__]
|
||||
|
||||
AVALON_CONFIG = os.environ["AVALON_CONFIG"]
|
||||
PARENT_DIR = os.path.dirname(__file__)
|
||||
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
|
||||
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
|
||||
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "resolve", "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "resolve", "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "resolve", "inventory")
|
||||
LOAD_PATH = os.path.join(pype.PLUGINS_DIR, "resolve", "load")
|
||||
CREATE_PATH = os.path.join(pype.PLUGINS_DIR, "resolve", "create")
|
||||
INVENTORY_PATH = os.path.join(pype.PLUGINS_DIR, "resolve", "inventory")
|
||||
|
||||
PUBLISH_PATH = os.path.join(
|
||||
PLUGINS_DIR, "resolve", "publish"
|
||||
pype.PLUGINS_DIR, "resolve", "publish"
|
||||
).replace("\\", "/")
|
||||
|
||||
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
|
||||
|
|
@ -40,11 +36,13 @@ def install():
|
|||
See the Maya equivalent for inspiration on how to implement this.
|
||||
|
||||
"""
|
||||
from . import get_resolve_module
|
||||
|
||||
# Disable all families except for the ones we explicitly want to see
|
||||
family_states = [
|
||||
"imagesequence",
|
||||
"mov"
|
||||
"mov",
|
||||
"clip"
|
||||
]
|
||||
avalon.data["familiesStateDefault"] = False
|
||||
avalon.data["familiesStateToggled"] = family_states
|
||||
|
|
@ -59,6 +57,8 @@ def install():
|
|||
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
|
||||
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
|
||||
|
||||
get_resolve_module()
|
||||
|
||||
|
||||
def uninstall():
|
||||
"""Uninstall all tha was installed
|
||||
|
|
@ -140,3 +140,26 @@ def publish(parent):
|
|||
"""Shorthand to publish from within host"""
|
||||
from avalon.tools import publish
|
||||
return publish.show(parent)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
"""Maintain selection during context
|
||||
|
||||
Example:
|
||||
>>> with maintained_selection():
|
||||
... node['selected'].setValue(True)
|
||||
>>> print(node['selected'].value())
|
||||
False
|
||||
"""
|
||||
try:
|
||||
# do the operation
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
def reset_selection():
|
||||
"""Deselect all selected nodes
|
||||
"""
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -1,6 +1,182 @@
|
|||
import re
|
||||
from avalon import api
|
||||
# from pype.hosts.resolve import lib as drlib
|
||||
from pype.hosts import resolve
|
||||
from avalon.vendor import qargparse
|
||||
from pype.api import config
|
||||
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
||||
|
||||
class CreatorWidget(QtWidgets.QDialog):
|
||||
|
||||
# output items
|
||||
items = dict()
|
||||
|
||||
def __init__(self, name, info, presets, parent=None):
|
||||
super(CreatorWidget, self).__init__(parent)
|
||||
|
||||
self.setObjectName(name)
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.CustomizeWindowHint
|
||||
| QtCore.Qt.WindowTitleHint
|
||||
| QtCore.Qt.WindowCloseButtonHint
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
self.setWindowTitle(name or "Pype Creator Input")
|
||||
|
||||
# Where inputs and labels are set
|
||||
self.content_widget = [QtWidgets.QWidget(self)]
|
||||
top_layout = QtWidgets.QFormLayout(self.content_widget[0])
|
||||
top_layout.setObjectName("ContentLayout")
|
||||
top_layout.addWidget(Spacer(5, self))
|
||||
|
||||
# first add widget tag line
|
||||
top_layout.addWidget(QtWidgets.QLabel(info))
|
||||
|
||||
top_layout.addWidget(Spacer(5, self))
|
||||
|
||||
# main dynamic layout
|
||||
self.content_widget.append(QtWidgets.QWidget(self))
|
||||
content_layout = QtWidgets.QFormLayout(self.content_widget[-1])
|
||||
|
||||
# add preset data into input widget layout
|
||||
self.items = self.add_presets_to_layout(content_layout, presets)
|
||||
|
||||
# Confirmation buttons
|
||||
btns_widget = QtWidgets.QWidget(self)
|
||||
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
|
||||
|
||||
cancel_btn = QtWidgets.QPushButton("Cancel")
|
||||
btns_layout.addWidget(cancel_btn)
|
||||
|
||||
ok_btn = QtWidgets.QPushButton("Ok")
|
||||
btns_layout.addWidget(ok_btn)
|
||||
|
||||
# Main layout of the dialog
|
||||
main_layout = QtWidgets.QVBoxLayout(self)
|
||||
main_layout.setContentsMargins(10, 10, 10, 10)
|
||||
main_layout.setSpacing(0)
|
||||
|
||||
# adding content widget
|
||||
for w in self.content_widget:
|
||||
main_layout.addWidget(w)
|
||||
|
||||
main_layout.addWidget(btns_widget)
|
||||
|
||||
ok_btn.clicked.connect(self._on_ok_clicked)
|
||||
cancel_btn.clicked.connect(self._on_cancel_clicked)
|
||||
|
||||
stylesheet = resolve.menu.load_stylesheet()
|
||||
self.setStyleSheet(stylesheet)
|
||||
|
||||
def _on_ok_clicked(self):
|
||||
self.result = self.value(self.items)
|
||||
self.close()
|
||||
|
||||
def _on_cancel_clicked(self):
|
||||
self.result = None
|
||||
self.close()
|
||||
|
||||
def value(self, data):
|
||||
for k, v in data.items():
|
||||
if isinstance(v, dict):
|
||||
print(f"nested: {k}")
|
||||
data[k] = self.value(v)
|
||||
elif getattr(v, "value", None):
|
||||
print(f"normal int: {k}")
|
||||
result = v.value()
|
||||
data[k] = result()
|
||||
else:
|
||||
print(f"normal text: {k}")
|
||||
result = v.text()
|
||||
data[k] = result()
|
||||
return data
|
||||
|
||||
def camel_case_split(self, text):
|
||||
matches = re.finditer(
|
||||
'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
|
||||
return " ".join([str(m.group(0)).capitalize() for m in matches])
|
||||
|
||||
def create_row(self, layout, type, text, **kwargs):
|
||||
# get type attribute from qwidgets
|
||||
attr = getattr(QtWidgets, type)
|
||||
|
||||
# convert label text to normal capitalized text with spaces
|
||||
label_text = self.camel_case_split(text)
|
||||
|
||||
# assign the new text to lable widget
|
||||
label = QtWidgets.QLabel(label_text)
|
||||
label.setObjectName("LineLabel")
|
||||
|
||||
# create attribute name text strip of spaces
|
||||
attr_name = text.replace(" ", "")
|
||||
|
||||
# create attribute and assign default values
|
||||
setattr(
|
||||
self,
|
||||
attr_name,
|
||||
attr(parent=self))
|
||||
|
||||
# assign the created attribute to variable
|
||||
item = getattr(self, attr_name)
|
||||
for func, val in kwargs.items():
|
||||
if getattr(item, func):
|
||||
func_attr = getattr(item, func)
|
||||
func_attr(val)
|
||||
|
||||
# add to layout
|
||||
layout.addRow(label, item)
|
||||
|
||||
return item
|
||||
|
||||
def add_presets_to_layout(self, content_layout, data):
|
||||
for k, v in data.items():
|
||||
if isinstance(v, dict):
|
||||
# adding spacer between sections
|
||||
self.content_widget.append(QtWidgets.QWidget(self))
|
||||
devider = QtWidgets.QVBoxLayout(self.content_widget[-1])
|
||||
devider.addWidget(Spacer(5, self))
|
||||
devider.setObjectName("Devider")
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_widget.append(QtWidgets.QWidget(self))
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_widget[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
|
||||
# add nested key as label
|
||||
self.create_row(nested_content_layout, "QLabel", k)
|
||||
data[k] = self.add_presets_to_layout(nested_content_layout, v)
|
||||
elif isinstance(v, str):
|
||||
print(f"layout.str: {k}")
|
||||
print(f"content_layout: {content_layout}")
|
||||
data[k] = self.create_row(
|
||||
content_layout, "QLineEdit", k, setText=v)
|
||||
elif isinstance(v, int):
|
||||
print(f"layout.int: {k}")
|
||||
print(f"content_layout: {content_layout}")
|
||||
data[k] = self.create_row(
|
||||
content_layout, "QSpinBox", k, setValue=v)
|
||||
return data
|
||||
|
||||
|
||||
class Spacer(QtWidgets.QWidget):
|
||||
def __init__(self, height, *args, **kwargs):
|
||||
super(self.__class__, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setFixedHeight(height)
|
||||
|
||||
real_spacer = QtWidgets.QWidget(self)
|
||||
real_spacer.setObjectName("Spacer")
|
||||
real_spacer.setFixedHeight(height)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(real_spacer)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
|
||||
def get_reference_node_parents(ref):
|
||||
|
|
@ -73,3 +249,25 @@ class SequenceLoader(api.Loader):
|
|||
"""Remove an existing `container`
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Creator(api.Creator):
|
||||
"""Creator class wrapper
|
||||
"""
|
||||
marker_color = "Purple"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
self.presets = config.get_presets()['plugins']["resolve"][
|
||||
"create"].get(self.__class__.__name__, {})
|
||||
|
||||
# adding basic current context resolve objects
|
||||
self.project = resolve.get_current_project()
|
||||
self.sequence = resolve.get_current_sequence()
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
self.selected = resolve.get_current_track_items(filter=True)
|
||||
else:
|
||||
self.selected = resolve.get_current_track_items(filter=False)
|
||||
|
||||
self.widget = CreatorWidget
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
import time
|
||||
from pype.hosts.resolve.utils import get_resolve_module
|
||||
from pypeapp import Logger
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import sys
|
|||
import avalon.api as avalon
|
||||
import pype
|
||||
|
||||
from pypeapp import Logger
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,65 +0,0 @@
|
|||
#! python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# convert clip def
|
||||
def convert_clip(timeline=None):
|
||||
"""Convert timeline item (clip) into compound clip pype container
|
||||
|
||||
Args:
|
||||
timeline (MediaPool.Timeline): Object of timeline
|
||||
|
||||
Returns:
|
||||
bool: `True` if success
|
||||
|
||||
Raises:
|
||||
Exception: description
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# decorator function create_current_timeline_media_bin()
|
||||
def create_current_timeline_media_bin(timeline=None):
|
||||
"""Convert timeline item (clip) into compound clip pype container
|
||||
|
||||
Args:
|
||||
timeline (MediaPool.Timeline): Object of timeline
|
||||
|
||||
Returns:
|
||||
bool: `True` if success
|
||||
|
||||
Raises:
|
||||
Exception: description
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# decorator function get_selected_track_items()
|
||||
def get_selected_track_items():
|
||||
"""Convert timeline item (clip) into compound clip pype container
|
||||
|
||||
Args:
|
||||
timeline (MediaPool.Timeline): Object of timeline
|
||||
|
||||
Returns:
|
||||
bool: `True` if success
|
||||
|
||||
Raises:
|
||||
Exception: description
|
||||
|
||||
"""
|
||||
print("testText")
|
||||
|
||||
|
||||
# PypeCompoundClip() class
|
||||
class PypeCompoundClip(object):
|
||||
"""docstring for ."""
|
||||
|
||||
def __init__(self, arg):
|
||||
super(self).__init__()
|
||||
self.arg = arg
|
||||
|
||||
def create_compound_clip(self):
|
||||
pass
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import pype
|
||||
import importlib
|
||||
import pyblish.api
|
||||
import pyblish.util
|
||||
import avalon.api
|
||||
from avalon.tools import publish
|
||||
from pypeapp import Logger
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def main(env):
|
||||
# Registers pype's Global pyblish plugins
|
||||
pype.install()
|
||||
|
||||
# Register Host (and it's pyblish plugins)
|
||||
host_name = env["AVALON_APP"]
|
||||
# TODO not sure if use "pype." or "avalon." for host import
|
||||
host_import_str = f"pype.{host_name}"
|
||||
|
||||
try:
|
||||
host_module = importlib.import_module(host_import_str)
|
||||
except ModuleNotFoundError:
|
||||
log.error((
|
||||
f"Host \"{host_name}\" can't be imported."
|
||||
f" Import string \"{host_import_str}\" failed."
|
||||
))
|
||||
return False
|
||||
|
||||
avalon.api.install(host_module)
|
||||
|
||||
# Register additional paths
|
||||
addition_paths_str = env.get("PUBLISH_PATHS") or ""
|
||||
addition_paths = addition_paths_str.split(os.pathsep)
|
||||
for path in addition_paths:
|
||||
path = os.path.normpath(path)
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
|
||||
pyblish.api.register_plugin_path(path)
|
||||
|
||||
# Register project specific plugins
|
||||
project_name = os.environ["AVALON_PROJECT"]
|
||||
project_plugins_paths = env.get("PYPE_PROJECT_PLUGINS") or ""
|
||||
for path in project_plugins_paths.split(os.pathsep):
|
||||
plugin_path = os.path.join(path, project_name, "plugins")
|
||||
if os.path.exists(plugin_path):
|
||||
pyblish.api.register_plugin_path(plugin_path)
|
||||
|
||||
return publish.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main(os.environ)
|
||||
sys.exit(not bool(result))
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
#! python3
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
from pypeapp import execute, Logger
|
||||
from pype.hosts.resolve.utils import get_resolve_module
|
||||
|
||||
log = Logger().get_logger("Resolve")
|
||||
|
||||
CURRENT_DIR = os.getenv("RESOLVE_UTILITY_SCRIPTS_DIR", "")
|
||||
python_dir = os.getenv("PYTHON36_RESOLVE")
|
||||
python_exe = os.path.normpath(
|
||||
os.path.join(python_dir, "python.exe")
|
||||
)
|
||||
|
||||
resolve = get_resolve_module()
|
||||
PM = resolve.GetProjectManager()
|
||||
P = PM.GetCurrentProject()
|
||||
|
||||
log.info(P.GetName())
|
||||
|
||||
|
||||
# ______________________________________________________
|
||||
# testing subprocessing Scripts
|
||||
testing_py = os.path.join(CURRENT_DIR, "ResolvePageSwitcher.py")
|
||||
testing_py = os.path.normpath(testing_py)
|
||||
log.info(f"Testing path to script: `{testing_py}`")
|
||||
|
||||
returncode = execute(
|
||||
[python_exe, os.path.normpath(testing_py)],
|
||||
env=dict(os.environ)
|
||||
)
|
||||
|
||||
# Check if output file exists
|
||||
if returncode != 0:
|
||||
log.error("Executing failed!")
|
||||
21
pype/hosts/resolve/utility_scripts/test.py
Normal file
21
pype/hosts/resolve/utility_scripts/test.py
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
#! python3
|
||||
import sys
|
||||
from pype.api import Logger
|
||||
import DaVinciResolveScript as bmdvr
|
||||
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
import pype.hosts.resolve as bmdvr
|
||||
bm = bmdvr.utils.get_resolve_module()
|
||||
log.info(f"blackmagicmodule: {bm}")
|
||||
|
||||
|
||||
print(f"_>> bmdvr.scriptapp(Resolve): {bmdvr.scriptapp('Resolve')}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main()
|
||||
sys.exit(not bool(result))
|
||||
|
|
@ -9,18 +9,16 @@ import os
|
|||
import shutil
|
||||
|
||||
from pypeapp import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "resolve")
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.bmd = None
|
||||
|
||||
|
||||
def get_resolve_module():
|
||||
from pype.hosts import resolve
|
||||
# dont run if already loaded
|
||||
if self.bmd:
|
||||
return self.bmd
|
||||
|
||||
if resolve.bmdvr:
|
||||
log.info(("resolve module is assigned to "
|
||||
f"`pype.hosts.resolve.bmdvr`: {resolve.bmdvr}"))
|
||||
return resolve.bmdvr
|
||||
try:
|
||||
"""
|
||||
The PYTHONPATH needs to be set correctly for this import
|
||||
|
|
@ -71,8 +69,14 @@ def get_resolve_module():
|
|||
)
|
||||
sys.exit()
|
||||
# assign global var and return
|
||||
self.bmd = bmd.scriptapp("Resolve")
|
||||
return self.bmd
|
||||
bmdvr = bmd.scriptapp("Resolve")
|
||||
# bmdvf = bmd.scriptapp("Fusion")
|
||||
resolve.bmdvr = bmdvr
|
||||
resolve.bmdvf = bmdvr.Fusion()
|
||||
log.info(("Assigning resolve module to "
|
||||
f"`pype.hosts.resolve.bmdvr`: {resolve.bmdvr}"))
|
||||
log.info(("Assigning resolve module to "
|
||||
f"`pype.hosts.resolve.bmdvf`: {resolve.bmdvf}"))
|
||||
|
||||
|
||||
def _sync_utility_scripts(env=None):
|
||||
|
|
|
|||
|
|
@ -2,8 +2,9 @@
|
|||
|
||||
import os
|
||||
from pypeapp import Logger
|
||||
from .lib import (
|
||||
from . import (
|
||||
get_project_manager,
|
||||
get_current_project,
|
||||
set_project_manager_to_folder_name
|
||||
)
|
||||
|
||||
|
|
@ -26,7 +27,7 @@ def save_file(filepath):
|
|||
pm = get_project_manager()
|
||||
file = os.path.basename(filepath)
|
||||
fname, _ = os.path.splitext(file)
|
||||
project = pm.GetCurrentProject()
|
||||
project = get_current_project()
|
||||
name = project.GetName()
|
||||
|
||||
if "Untitled Project" not in name:
|
||||
|
|
|
|||
171
pype/lib.py
171
pype/lib.py
|
|
@ -19,7 +19,7 @@ from abc import ABCMeta, abstractmethod
|
|||
from avalon import io, pipeline
|
||||
import six
|
||||
import avalon.api
|
||||
from .api import config, Anatomy
|
||||
from .api import config, Anatomy, Logger
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -1622,7 +1622,7 @@ class ApplicationAction(avalon.api.Action):
|
|||
parsed application `.toml` this can launch the application.
|
||||
|
||||
"""
|
||||
|
||||
_log = None
|
||||
config = None
|
||||
group = None
|
||||
variant = None
|
||||
|
|
@ -1632,6 +1632,12 @@ class ApplicationAction(avalon.api.Action):
|
|||
"AVALON_TASK"
|
||||
)
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if self._log is None:
|
||||
self._log = Logger().get_logger(self.__class__.__name__)
|
||||
return self._log
|
||||
|
||||
def is_compatible(self, session):
|
||||
for key in self.required_session_keys:
|
||||
if key not in session:
|
||||
|
|
@ -1644,6 +1650,165 @@ class ApplicationAction(avalon.api.Action):
|
|||
project_name = session["AVALON_PROJECT"]
|
||||
asset_name = session["AVALON_ASSET"]
|
||||
task_name = session["AVALON_TASK"]
|
||||
return launch_application(
|
||||
launch_application(
|
||||
project_name, asset_name, task_name, self.name
|
||||
)
|
||||
|
||||
self._ftrack_after_launch_procedure(
|
||||
project_name, asset_name, task_name
|
||||
)
|
||||
|
||||
def _ftrack_after_launch_procedure(
|
||||
self, project_name, asset_name, task_name
|
||||
):
|
||||
# TODO move to launch hook
|
||||
required_keys = ("FTRACK_SERVER", "FTRACK_API_USER", "FTRACK_API_KEY")
|
||||
for key in required_keys:
|
||||
if not os.environ.get(key):
|
||||
self.log.debug((
|
||||
"Missing required environment \"{}\""
|
||||
" for Ftrack after launch procedure."
|
||||
).format(key))
|
||||
return
|
||||
|
||||
try:
|
||||
import ftrack_api
|
||||
session = ftrack_api.Session(auto_connect_event_hub=True)
|
||||
self.log.debug("Ftrack session created")
|
||||
except Exception:
|
||||
self.log.warning("Couldn't create Ftrack session")
|
||||
return
|
||||
|
||||
try:
|
||||
entity = self._find_ftrack_task_entity(
|
||||
session, project_name, asset_name, task_name
|
||||
)
|
||||
self._ftrack_status_change(session, entity, project_name)
|
||||
self._start_timer(session, entity, ftrack_api)
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Couldn't finish Ftrack procedure.", exc_info=True
|
||||
)
|
||||
return
|
||||
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def _find_ftrack_task_entity(
|
||||
self, session, project_name, asset_name, task_name
|
||||
):
|
||||
project_entity = session.query(
|
||||
"Project where full_name is \"{}\"".format(project_name)
|
||||
).first()
|
||||
if not project_entity:
|
||||
self.log.warning(
|
||||
"Couldn't find project \"{}\" in Ftrack.".format(project_name)
|
||||
)
|
||||
return
|
||||
|
||||
potential_task_entities = session.query((
|
||||
"TypedContext where parent.name is \"{}\" and project_id is \"{}\""
|
||||
).format(asset_name, project_entity["id"])).all()
|
||||
filtered_entities = []
|
||||
for _entity in potential_task_entities:
|
||||
if (
|
||||
_entity.entity_type.lower() == "task"
|
||||
and _entity["name"] == task_name
|
||||
):
|
||||
filtered_entities.append(_entity)
|
||||
|
||||
if not filtered_entities:
|
||||
self.log.warning((
|
||||
"Couldn't find task \"{}\" under parent \"{}\" in Ftrack."
|
||||
).format(task_name, asset_name))
|
||||
return
|
||||
|
||||
if len(filtered_entities) > 1:
|
||||
self.log.warning((
|
||||
"Found more than one task \"{}\""
|
||||
" under parent \"{}\" in Ftrack."
|
||||
).format(task_name, asset_name))
|
||||
return
|
||||
|
||||
return filtered_entities[0]
|
||||
|
||||
def _ftrack_status_change(self, session, entity, project_name):
|
||||
presets = config.get_presets(project_name)["ftrack"]["ftrack_config"]
|
||||
statuses = presets.get("status_update")
|
||||
if not statuses:
|
||||
return
|
||||
|
||||
actual_status = entity["status"]["name"].lower()
|
||||
already_tested = set()
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in entity["link"]]
|
||||
)
|
||||
while True:
|
||||
next_status_name = None
|
||||
for key, value in statuses.items():
|
||||
if key in already_tested:
|
||||
continue
|
||||
if actual_status in value or "_any_" in value:
|
||||
if key != "_ignore_":
|
||||
next_status_name = key
|
||||
already_tested.add(key)
|
||||
break
|
||||
already_tested.add(key)
|
||||
|
||||
if next_status_name is None:
|
||||
break
|
||||
|
||||
try:
|
||||
query = "Status where name is \"{}\"".format(
|
||||
next_status_name
|
||||
)
|
||||
status = session.query(query).one()
|
||||
|
||||
entity["status"] = status
|
||||
session.commit()
|
||||
self.log.debug("Changing status to \"{}\" <{}>".format(
|
||||
next_status_name, ent_path
|
||||
))
|
||||
break
|
||||
|
||||
except Exception:
|
||||
session.rollback()
|
||||
msg = (
|
||||
"Status \"{}\" in presets wasn't found"
|
||||
" on Ftrack entity type \"{}\""
|
||||
).format(next_status_name, entity.entity_type)
|
||||
self.log.warning(msg)
|
||||
|
||||
def _start_timer(self, session, entity, _ftrack_api):
|
||||
self.log.debug("Triggering timer start.")
|
||||
|
||||
user_entity = session.query("User where username is \"{}\"".format(
|
||||
os.environ["FTRACK_API_USER"]
|
||||
)).first()
|
||||
if not user_entity:
|
||||
self.log.warning(
|
||||
"Couldn't find user with username \"{}\" in Ftrack".format(
|
||||
os.environ["FTRACK_API_USER"]
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
source = {
|
||||
"user": {
|
||||
"id": user_entity["id"],
|
||||
"username": user_entity["username"]
|
||||
}
|
||||
}
|
||||
event_data = {
|
||||
"actionIdentifier": "start.timer",
|
||||
"selection": [{"entityId": entity["id"], "entityType": "task"}]
|
||||
}
|
||||
session.event_hub.publish(
|
||||
_ftrack_api.event.base.Event(
|
||||
topic="ftrack.action.launch",
|
||||
data=event_data,
|
||||
source=source
|
||||
),
|
||||
on_error="ignore"
|
||||
)
|
||||
self.log.debug("Timer start triggered successfully.")
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
from .io_nonsingleton import DbConnector
|
||||
from .rest_api import AdobeRestApi, PUBLISH_PATHS
|
||||
|
||||
__all__ = [
|
||||
"PUBLISH_PATHS",
|
||||
"DbConnector",
|
||||
"AdobeRestApi"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,460 +0,0 @@
|
|||
"""
|
||||
Wrapper around interactions with the database
|
||||
|
||||
Copy of io module in avalon-core.
|
||||
- In this case not working as singleton with api.Session!
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import errno
|
||||
import shutil
|
||||
import logging
|
||||
import tempfile
|
||||
import functools
|
||||
import contextlib
|
||||
|
||||
from avalon import schema
|
||||
from avalon.vendor import requests
|
||||
from avalon.io import extract_port_from_url
|
||||
|
||||
# Third-party dependencies
|
||||
import pymongo
|
||||
|
||||
|
||||
def auto_reconnect(func):
|
||||
"""Handling auto reconnect in 3 retry times"""
|
||||
@functools.wraps(func)
|
||||
def decorated(*args, **kwargs):
|
||||
object = args[0]
|
||||
for retry in range(3):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except pymongo.errors.AutoReconnect:
|
||||
object.log.error("Reconnecting..")
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
raise
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
class DbConnector(object):
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def __init__(self):
|
||||
self.Session = {}
|
||||
self._mongo_client = None
|
||||
self._sentry_client = None
|
||||
self._sentry_logging_handler = None
|
||||
self._database = None
|
||||
self._is_installed = False
|
||||
|
||||
def __getitem__(self, key):
|
||||
# gives direct access to collection withou setting `active_table`
|
||||
return self._database[key]
|
||||
|
||||
def __getattribute__(self, attr):
|
||||
# not all methods of PyMongo database are implemented with this it is
|
||||
# possible to use them too
|
||||
try:
|
||||
return super(DbConnector, self).__getattribute__(attr)
|
||||
except AttributeError:
|
||||
cur_proj = self.Session["AVALON_PROJECT"]
|
||||
return self._database[cur_proj].__getattribute__(attr)
|
||||
|
||||
def install(self):
|
||||
"""Establish a persistent connection to the database"""
|
||||
if self._is_installed:
|
||||
return
|
||||
|
||||
logging.basicConfig()
|
||||
self.Session.update(self._from_environment())
|
||||
|
||||
timeout = int(self.Session["AVALON_TIMEOUT"])
|
||||
mongo_url = self.Session["AVALON_MONGO"]
|
||||
kwargs = {
|
||||
"host": mongo_url,
|
||||
"serverSelectionTimeoutMS": timeout
|
||||
}
|
||||
|
||||
port = extract_port_from_url(mongo_url)
|
||||
if port is not None:
|
||||
kwargs["port"] = int(port)
|
||||
|
||||
self._mongo_client = pymongo.MongoClient(**kwargs)
|
||||
|
||||
for retry in range(3):
|
||||
try:
|
||||
t1 = time.time()
|
||||
self._mongo_client.server_info()
|
||||
|
||||
except Exception:
|
||||
self.log.error("Retrying..")
|
||||
time.sleep(1)
|
||||
timeout *= 1.5
|
||||
|
||||
else:
|
||||
break
|
||||
|
||||
else:
|
||||
raise IOError(
|
||||
"ERROR: Couldn't connect to %s in "
|
||||
"less than %.3f ms" % (self.Session["AVALON_MONGO"], timeout))
|
||||
|
||||
self.log.info("Connected to %s, delay %.3f s" % (
|
||||
self.Session["AVALON_MONGO"], time.time() - t1))
|
||||
|
||||
self._install_sentry()
|
||||
|
||||
self._database = self._mongo_client[self.Session["AVALON_DB"]]
|
||||
self._is_installed = True
|
||||
|
||||
def _install_sentry(self):
|
||||
if "AVALON_SENTRY" not in self.Session:
|
||||
return
|
||||
|
||||
try:
|
||||
from raven import Client
|
||||
from raven.handlers.logging import SentryHandler
|
||||
from raven.conf import setup_logging
|
||||
except ImportError:
|
||||
# Note: There was a Sentry address in this Session
|
||||
return self.log.warning("Sentry disabled, raven not installed")
|
||||
|
||||
client = Client(self.Session["AVALON_SENTRY"])
|
||||
|
||||
# Transmit log messages to Sentry
|
||||
handler = SentryHandler(client)
|
||||
handler.setLevel(logging.WARNING)
|
||||
|
||||
setup_logging(handler)
|
||||
|
||||
self._sentry_client = client
|
||||
self._sentry_logging_handler = handler
|
||||
self.log.info(
|
||||
"Connected to Sentry @ %s" % self.Session["AVALON_SENTRY"]
|
||||
)
|
||||
|
||||
def _from_environment(self):
|
||||
Session = {
|
||||
item[0]: os.getenv(item[0], item[1])
|
||||
for item in (
|
||||
# Root directory of projects on disk
|
||||
("AVALON_PROJECTS", None),
|
||||
|
||||
# Name of current Project
|
||||
("AVALON_PROJECT", ""),
|
||||
|
||||
# Name of current Asset
|
||||
("AVALON_ASSET", ""),
|
||||
|
||||
# Name of current silo
|
||||
("AVALON_SILO", ""),
|
||||
|
||||
# Name of current task
|
||||
("AVALON_TASK", None),
|
||||
|
||||
# Name of current app
|
||||
("AVALON_APP", None),
|
||||
|
||||
# Path to working directory
|
||||
("AVALON_WORKDIR", None),
|
||||
|
||||
# Name of current Config
|
||||
# TODO(marcus): Establish a suitable default config
|
||||
("AVALON_CONFIG", "no_config"),
|
||||
|
||||
# Name of Avalon in graphical user interfaces
|
||||
# Use this to customise the visual appearance of Avalon
|
||||
# to better integrate with your surrounding pipeline
|
||||
("AVALON_LABEL", "Avalon"),
|
||||
|
||||
# Used during any connections to the outside world
|
||||
("AVALON_TIMEOUT", "1000"),
|
||||
|
||||
# Address to Asset Database
|
||||
("AVALON_MONGO", "mongodb://localhost:27017"),
|
||||
|
||||
# Name of database used in MongoDB
|
||||
("AVALON_DB", "avalon"),
|
||||
|
||||
# Address to Sentry
|
||||
("AVALON_SENTRY", None),
|
||||
|
||||
# Address to Deadline Web Service
|
||||
# E.g. http://192.167.0.1:8082
|
||||
("AVALON_DEADLINE", None),
|
||||
|
||||
# Enable features not necessarily stable. The user's own risk
|
||||
("AVALON_EARLY_ADOPTER", None),
|
||||
|
||||
# Address of central asset repository, contains
|
||||
# the following interface:
|
||||
# /upload
|
||||
# /download
|
||||
# /manager (optional)
|
||||
("AVALON_LOCATION", "http://127.0.0.1"),
|
||||
|
||||
# Boolean of whether to upload published material
|
||||
# to central asset repository
|
||||
("AVALON_UPLOAD", None),
|
||||
|
||||
# Generic username and password
|
||||
("AVALON_USERNAME", "avalon"),
|
||||
("AVALON_PASSWORD", "secret"),
|
||||
|
||||
# Unique identifier for instances in working files
|
||||
("AVALON_INSTANCE_ID", "avalon.instance"),
|
||||
("AVALON_CONTAINER_ID", "avalon.container"),
|
||||
|
||||
# Enable debugging
|
||||
("AVALON_DEBUG", None),
|
||||
|
||||
) if os.getenv(item[0], item[1]) is not None
|
||||
}
|
||||
|
||||
Session["schema"] = "avalon-core:session-2.0"
|
||||
try:
|
||||
schema.validate(Session)
|
||||
except schema.ValidationError as e:
|
||||
# TODO(marcus): Make this mandatory
|
||||
self.log.warning(e)
|
||||
|
||||
return Session
|
||||
|
||||
def uninstall(self):
|
||||
"""Close any connection to the database"""
|
||||
try:
|
||||
self._mongo_client.close()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
self._mongo_client = None
|
||||
self._database = None
|
||||
self._is_installed = False
|
||||
|
||||
def active_project(self):
|
||||
"""Return the name of the active project"""
|
||||
return self.Session["AVALON_PROJECT"]
|
||||
|
||||
def activate_project(self, project_name):
|
||||
self.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
def projects(self):
|
||||
"""List available projects
|
||||
|
||||
Returns:
|
||||
list of project documents
|
||||
|
||||
"""
|
||||
|
||||
collection_names = self.collections()
|
||||
for project in collection_names:
|
||||
if project in ("system.indexes",):
|
||||
continue
|
||||
|
||||
# Each collection will have exactly one project document
|
||||
document = self.find_project(project)
|
||||
|
||||
if document is not None:
|
||||
yield document
|
||||
|
||||
def locate(self, path):
|
||||
"""Traverse a hierarchy from top-to-bottom
|
||||
|
||||
Example:
|
||||
representation = locate(["hulk", "Bruce", "modelDefault", 1, "ma"])
|
||||
|
||||
Returns:
|
||||
representation (ObjectId)
|
||||
|
||||
"""
|
||||
|
||||
components = zip(
|
||||
("project", "asset", "subset", "version", "representation"),
|
||||
path
|
||||
)
|
||||
|
||||
parent = None
|
||||
for type_, name in components:
|
||||
latest = (type_ == "version") and name in (None, -1)
|
||||
|
||||
try:
|
||||
if latest:
|
||||
parent = self.find_one(
|
||||
filter={
|
||||
"type": type_,
|
||||
"parent": parent
|
||||
},
|
||||
projection={"_id": 1},
|
||||
sort=[("name", -1)]
|
||||
)["_id"]
|
||||
else:
|
||||
parent = self.find_one(
|
||||
filter={
|
||||
"type": type_,
|
||||
"name": name,
|
||||
"parent": parent
|
||||
},
|
||||
projection={"_id": 1},
|
||||
)["_id"]
|
||||
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
return parent
|
||||
|
||||
@auto_reconnect
|
||||
def collections(self):
|
||||
return self._database.collection_names()
|
||||
|
||||
@auto_reconnect
|
||||
def find_project(self, project):
|
||||
return self._database[project].find_one({"type": "project"})
|
||||
|
||||
@auto_reconnect
|
||||
def insert_one(self, item):
|
||||
assert isinstance(item, dict), "item must be of type <dict>"
|
||||
schema.validate(item)
|
||||
return self._database[self.Session["AVALON_PROJECT"]].insert_one(item)
|
||||
|
||||
@auto_reconnect
|
||||
def insert_many(self, items, ordered=True):
|
||||
# check if all items are valid
|
||||
assert isinstance(items, list), "`items` must be of type <list>"
|
||||
for item in items:
|
||||
assert isinstance(item, dict), "`item` must be of type <dict>"
|
||||
schema.validate(item)
|
||||
|
||||
return self._database[self.Session["AVALON_PROJECT"]].insert_many(
|
||||
items,
|
||||
ordered=ordered)
|
||||
|
||||
@auto_reconnect
|
||||
def find(self, filter, projection=None, sort=None):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].find(
|
||||
filter=filter,
|
||||
projection=projection,
|
||||
sort=sort
|
||||
)
|
||||
|
||||
@auto_reconnect
|
||||
def find_one(self, filter, projection=None, sort=None):
|
||||
assert isinstance(filter, dict), "filter must be <dict>"
|
||||
|
||||
return self._database[self.Session["AVALON_PROJECT"]].find_one(
|
||||
filter=filter,
|
||||
projection=projection,
|
||||
sort=sort
|
||||
)
|
||||
|
||||
@auto_reconnect
|
||||
def save(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].save(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def replace_one(self, filter, replacement):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].replace_one(
|
||||
filter, replacement)
|
||||
|
||||
@auto_reconnect
|
||||
def update_many(self, filter, update):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].update_many(
|
||||
filter, update)
|
||||
|
||||
@auto_reconnect
|
||||
def distinct(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].distinct(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def drop(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].drop(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def delete_many(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].delete_many(
|
||||
*args, **kwargs)
|
||||
|
||||
def parenthood(self, document):
|
||||
assert document is not None, "This is a bug"
|
||||
|
||||
parents = list()
|
||||
|
||||
while document.get("parent") is not None:
|
||||
document = self.find_one({"_id": document["parent"]})
|
||||
|
||||
if document is None:
|
||||
break
|
||||
|
||||
if document.get("type") == "master_version":
|
||||
_document = self.find_one({"_id": document["version_id"]})
|
||||
document["data"] = _document["data"]
|
||||
|
||||
parents.append(document)
|
||||
|
||||
return parents
|
||||
|
||||
@contextlib.contextmanager
|
||||
def tempdir(self):
|
||||
tempdir = tempfile.mkdtemp()
|
||||
try:
|
||||
yield tempdir
|
||||
finally:
|
||||
shutil.rmtree(tempdir)
|
||||
|
||||
def download(self, src, dst):
|
||||
"""Download `src` to `dst`
|
||||
|
||||
Arguments:
|
||||
src (str): URL to source file
|
||||
dst (str): Absolute path to destination file
|
||||
|
||||
Yields tuple (progress, error):
|
||||
progress (int): Between 0-100
|
||||
error (Exception): Any exception raised when first making connection
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
src,
|
||||
stream=True,
|
||||
auth=requests.auth.HTTPBasicAuth(
|
||||
self.Session["AVALON_USERNAME"],
|
||||
self.Session["AVALON_PASSWORD"]
|
||||
)
|
||||
)
|
||||
except requests.ConnectionError as e:
|
||||
yield None, e
|
||||
return
|
||||
|
||||
with self.tempdir() as dirname:
|
||||
tmp = os.path.join(dirname, os.path.basename(src))
|
||||
|
||||
with open(tmp, "wb") as f:
|
||||
total_length = response.headers.get("content-length")
|
||||
|
||||
if total_length is None: # no content length header
|
||||
f.write(response.content)
|
||||
else:
|
||||
downloaded = 0
|
||||
total_length = int(total_length)
|
||||
for data in response.iter_content(chunk_size=4096):
|
||||
downloaded += len(data)
|
||||
f.write(data)
|
||||
|
||||
yield int(100.0 * downloaded / total_length), None
|
||||
|
||||
try:
|
||||
os.makedirs(os.path.dirname(dst))
|
||||
except OSError as e:
|
||||
# An already existing destination directory is fine.
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
shutil.copy(tmp, dst)
|
||||
|
|
@ -2,7 +2,7 @@ import os
|
|||
import sys
|
||||
import copy
|
||||
from pype.modules.rest_api import RestApi, route, abort, CallbackResult
|
||||
from .io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
from pype.api import config, execute, Logger
|
||||
|
||||
log = Logger().get_logger("AdobeCommunicator")
|
||||
|
|
@ -14,7 +14,7 @@ PUBLISH_PATHS = []
|
|||
|
||||
|
||||
class AdobeRestApi(RestApi):
|
||||
dbcon = DbConnector()
|
||||
dbcon = AvalonMongoDB()
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -4,14 +4,14 @@ import json
|
|||
import bson
|
||||
import bson.json_util
|
||||
from pype.modules.rest_api import RestApi, abort, CallbackResult
|
||||
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
|
||||
class AvalonRestApi(RestApi):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.dbcon = DbConnector()
|
||||
self.dbcon = AvalonMongoDB()
|
||||
self.dbcon.install()
|
||||
|
||||
@RestApi.route("/projects/<project_name>", url_prefix="/avalon", methods="GET")
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from queue import Queue
|
|||
|
||||
from bson.objectid import ObjectId
|
||||
from pype.modules.ftrack.lib import BaseAction, statics_icon
|
||||
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
|
||||
class DeleteAssetSubset(BaseAction):
|
||||
|
|
@ -21,7 +21,7 @@ class DeleteAssetSubset(BaseAction):
|
|||
#: roles that are allowed to register this action
|
||||
role_list = ["Pypeclub", "Administrator", "Project Manager"]
|
||||
#: Db connection
|
||||
dbcon = DbConnector()
|
||||
dbcon = AvalonMongoDB()
|
||||
|
||||
splitter = {"type": "label", "value": "---"}
|
||||
action_data_by_id = {}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import clique
|
|||
from pymongo import UpdateOne
|
||||
|
||||
from pype.modules.ftrack.lib import BaseAction, statics_icon
|
||||
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
from pype.api import Anatomy
|
||||
|
||||
import avalon.pipeline
|
||||
|
|
@ -24,7 +24,7 @@ class DeleteOldVersions(BaseAction):
|
|||
role_list = ["Pypeclub", "Project Manager", "Administrator"]
|
||||
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
|
||||
|
||||
dbcon = DbConnector()
|
||||
dbcon = AvalonMongoDB()
|
||||
|
||||
inteface_title = "Choose your preferences"
|
||||
splitter_item = {"type": "label", "value": "---"}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import copy
|
||||
import json
|
||||
import shutil
|
||||
import collections
|
||||
|
||||
|
|
@ -9,10 +10,10 @@ from bson.objectid import ObjectId
|
|||
from avalon import pipeline
|
||||
from avalon.vendor import filelink
|
||||
|
||||
from pype.api import Anatomy
|
||||
from pype.api import Anatomy, config
|
||||
from pype.modules.ftrack.lib import BaseAction, statics_icon
|
||||
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
|
||||
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
|
||||
class Delivery(BaseAction):
|
||||
|
|
@ -23,7 +24,7 @@ class Delivery(BaseAction):
|
|||
role_list = ["Pypeclub", "Administrator", "Project manager"]
|
||||
icon = statics_icon("ftrack", "action_icons", "Delivery.svg")
|
||||
|
||||
db_con = DbConnector()
|
||||
db_con = AvalonMongoDB()
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
for entity in entities:
|
||||
|
|
@ -41,36 +42,22 @@ class Delivery(BaseAction):
|
|||
items = []
|
||||
item_splitter = {"type": "label", "value": "---"}
|
||||
|
||||
# Prepare component names for processing
|
||||
components = None
|
||||
project = None
|
||||
for entity in entities:
|
||||
if project is None:
|
||||
project_id = None
|
||||
for ent_info in entity["link"]:
|
||||
if ent_info["type"].lower() == "project":
|
||||
project_id = ent_info["id"]
|
||||
break
|
||||
project_entity = self.get_project_from_entity(entities[0])
|
||||
project_name = project_entity["full_name"]
|
||||
self.db_con.install()
|
||||
self.db_con.Session["AVALON_PROJECT"] = project_name
|
||||
project_doc = self.db_con.find_one({"type": "project"})
|
||||
if not project_doc:
|
||||
return {
|
||||
"success": False,
|
||||
"message": (
|
||||
"Didn't found project \"{}\" in avalon."
|
||||
).format(project_name)
|
||||
}
|
||||
|
||||
if project_id is None:
|
||||
project = entity["asset"]["parent"]["project"]
|
||||
else:
|
||||
project = session.query((
|
||||
"select id, full_name from Project where id is \"{}\""
|
||||
).format(project_id)).one()
|
||||
repre_names = self._get_repre_names(entities)
|
||||
self.db_con.uninstall()
|
||||
|
||||
_components = set(
|
||||
[component["name"] for component in entity["components"]]
|
||||
)
|
||||
if components is None:
|
||||
components = _components
|
||||
continue
|
||||
|
||||
components = components.intersection(_components)
|
||||
if not components:
|
||||
break
|
||||
|
||||
project_name = project["full_name"]
|
||||
items.append({
|
||||
"type": "hidden",
|
||||
"name": "__project_name__",
|
||||
|
|
@ -93,7 +80,7 @@ class Delivery(BaseAction):
|
|||
|
||||
skipped = False
|
||||
# Add message if there are any common components
|
||||
if not components or not new_anatomies:
|
||||
if not repre_names or not new_anatomies:
|
||||
skipped = True
|
||||
items.append({
|
||||
"type": "label",
|
||||
|
|
@ -106,7 +93,7 @@ class Delivery(BaseAction):
|
|||
"value": skipped
|
||||
})
|
||||
|
||||
if not components:
|
||||
if not repre_names:
|
||||
if len(entities) == 1:
|
||||
items.append({
|
||||
"type": "label",
|
||||
|
|
@ -143,12 +130,12 @@ class Delivery(BaseAction):
|
|||
"type": "label"
|
||||
})
|
||||
|
||||
for component in components:
|
||||
for repre_name in repre_names:
|
||||
items.append({
|
||||
"type": "boolean",
|
||||
"value": False,
|
||||
"label": component,
|
||||
"name": component
|
||||
"label": repre_name,
|
||||
"name": repre_name
|
||||
})
|
||||
|
||||
items.append(item_splitter)
|
||||
|
|
@ -198,27 +185,233 @@ class Delivery(BaseAction):
|
|||
"title": title
|
||||
}
|
||||
|
||||
def _get_repre_names(self, entities):
|
||||
version_ids = self._get_interest_version_ids(entities)
|
||||
repre_docs = self.db_con.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": version_ids}
|
||||
})
|
||||
return list(sorted(repre_docs.distinct("name")))
|
||||
|
||||
def _get_interest_version_ids(self, entities):
|
||||
parent_ent_by_id = {}
|
||||
subset_names = set()
|
||||
version_nums = set()
|
||||
for entity in entities:
|
||||
asset = entity["asset"]
|
||||
parent = asset["parent"]
|
||||
parent_ent_by_id[parent["id"]] = parent
|
||||
|
||||
subset_name = asset["name"]
|
||||
subset_names.add(subset_name)
|
||||
|
||||
version = entity["version"]
|
||||
version_nums.add(version)
|
||||
|
||||
asset_docs_by_ftrack_id = self._get_asset_docs(parent_ent_by_id)
|
||||
subset_docs = self._get_subset_docs(
|
||||
asset_docs_by_ftrack_id, subset_names, entities
|
||||
)
|
||||
version_docs = self._get_version_docs(
|
||||
asset_docs_by_ftrack_id, subset_docs, version_nums, entities
|
||||
)
|
||||
|
||||
return [version_doc["_id"] for version_doc in version_docs]
|
||||
|
||||
def _get_version_docs(
|
||||
self, asset_docs_by_ftrack_id, subset_docs, version_nums, entities
|
||||
):
|
||||
subset_docs_by_id = {
|
||||
subset_doc["_id"]: subset_doc
|
||||
for subset_doc in subset_docs
|
||||
}
|
||||
version_docs = list(self.db_con.find({
|
||||
"type": "version",
|
||||
"parent": {"$in": list(subset_docs_by_id.keys())},
|
||||
"name": {"$in": list(version_nums)}
|
||||
}))
|
||||
version_docs_by_parent_id = collections.defaultdict(dict)
|
||||
for version_doc in version_docs:
|
||||
subset_doc = subset_docs_by_id[version_doc["parent"]]
|
||||
|
||||
asset_id = subset_doc["parent"]
|
||||
subset_name = subset_doc["name"]
|
||||
version = version_doc["name"]
|
||||
if version_docs_by_parent_id[asset_id].get(subset_name) is None:
|
||||
version_docs_by_parent_id[asset_id][subset_name] = {}
|
||||
|
||||
version_docs_by_parent_id[asset_id][subset_name][version] = (
|
||||
version_doc
|
||||
)
|
||||
|
||||
filtered_versions = []
|
||||
for entity in entities:
|
||||
asset = entity["asset"]
|
||||
|
||||
parent = asset["parent"]
|
||||
asset_doc = asset_docs_by_ftrack_id[parent["id"]]
|
||||
|
||||
subsets_by_name = version_docs_by_parent_id.get(asset_doc["_id"])
|
||||
if not subsets_by_name:
|
||||
continue
|
||||
|
||||
subset_name = asset["name"]
|
||||
version_docs_by_version = subsets_by_name.get(subset_name)
|
||||
if not version_docs_by_version:
|
||||
continue
|
||||
|
||||
version = entity["version"]
|
||||
version_doc = version_docs_by_version.get(version)
|
||||
if version_doc:
|
||||
filtered_versions.append(version_doc)
|
||||
return filtered_versions
|
||||
|
||||
def _get_subset_docs(
|
||||
self, asset_docs_by_ftrack_id, subset_names, entities
|
||||
):
|
||||
asset_doc_ids = list()
|
||||
for asset_doc in asset_docs_by_ftrack_id.values():
|
||||
asset_doc_ids.append(asset_doc["_id"])
|
||||
|
||||
subset_docs = list(self.db_con.find({
|
||||
"type": "subset",
|
||||
"parent": {"$in": asset_doc_ids},
|
||||
"name": {"$in": list(subset_names)}
|
||||
}))
|
||||
subset_docs_by_parent_id = collections.defaultdict(dict)
|
||||
for subset_doc in subset_docs:
|
||||
asset_id = subset_doc["parent"]
|
||||
subset_name = subset_doc["name"]
|
||||
subset_docs_by_parent_id[asset_id][subset_name] = subset_doc
|
||||
|
||||
filtered_subsets = []
|
||||
for entity in entities:
|
||||
asset = entity["asset"]
|
||||
|
||||
parent = asset["parent"]
|
||||
asset_doc = asset_docs_by_ftrack_id[parent["id"]]
|
||||
|
||||
subsets_by_name = subset_docs_by_parent_id.get(asset_doc["_id"])
|
||||
if not subsets_by_name:
|
||||
continue
|
||||
|
||||
subset_name = asset["name"]
|
||||
subset_doc = subsets_by_name.get(subset_name)
|
||||
if subset_doc:
|
||||
filtered_subsets.append(subset_doc)
|
||||
return filtered_subsets
|
||||
|
||||
def _get_asset_docs(self, parent_ent_by_id):
|
||||
asset_docs = list(self.db_con.find({
|
||||
"type": "asset",
|
||||
"data.ftrackId": {"$in": list(parent_ent_by_id.keys())}
|
||||
}))
|
||||
asset_docs_by_ftrack_id = {
|
||||
asset_doc["data"]["ftrackId"]: asset_doc
|
||||
for asset_doc in asset_docs
|
||||
}
|
||||
|
||||
entities_by_mongo_id = {}
|
||||
entities_by_names = {}
|
||||
for ftrack_id, entity in parent_ent_by_id.items():
|
||||
if ftrack_id not in asset_docs_by_ftrack_id:
|
||||
parent_mongo_id = entity["custom_attributes"].get(
|
||||
CUST_ATTR_ID_KEY
|
||||
)
|
||||
if parent_mongo_id:
|
||||
entities_by_mongo_id[ObjectId(parent_mongo_id)] = entity
|
||||
else:
|
||||
entities_by_names[entity["name"]] = entity
|
||||
|
||||
expressions = []
|
||||
if entities_by_mongo_id:
|
||||
expression = {
|
||||
"type": "asset",
|
||||
"_id": {"$in": list(entities_by_mongo_id.keys())}
|
||||
}
|
||||
expressions.append(expression)
|
||||
|
||||
if entities_by_names:
|
||||
expression = {
|
||||
"type": "asset",
|
||||
"name": {"$in": list(entities_by_names.keys())}
|
||||
}
|
||||
expressions.append(expression)
|
||||
|
||||
if expressions:
|
||||
if len(expressions) == 1:
|
||||
filter = expressions[0]
|
||||
else:
|
||||
filter = {"$or": expressions}
|
||||
|
||||
asset_docs = self.db_con.find(filter)
|
||||
for asset_doc in asset_docs:
|
||||
if asset_doc["_id"] in entities_by_mongo_id:
|
||||
entity = entities_by_mongo_id[asset_doc["_id"]]
|
||||
asset_docs_by_ftrack_id[entity["id"]] = asset_doc
|
||||
|
||||
elif asset_doc["name"] in entities_by_names:
|
||||
entity = entities_by_names[asset_doc["name"]]
|
||||
asset_docs_by_ftrack_id[entity["id"]] = asset_doc
|
||||
|
||||
return asset_docs_by_ftrack_id
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
if "values" not in event["data"]:
|
||||
return
|
||||
|
||||
self.report_items = collections.defaultdict(list)
|
||||
|
||||
values = event["data"]["values"]
|
||||
skipped = values.pop("__skipped__")
|
||||
if skipped:
|
||||
return None
|
||||
|
||||
component_names = []
|
||||
user_id = event["source"]["user"]["id"]
|
||||
user_entity = session.query(
|
||||
"User where id is {}".format(user_id)
|
||||
).one()
|
||||
|
||||
job = session.create("Job", {
|
||||
"user": user_entity,
|
||||
"status": "running",
|
||||
"data": json.dumps({
|
||||
"description": "Delivery processing."
|
||||
})
|
||||
})
|
||||
session.commit()
|
||||
|
||||
try:
|
||||
self.db_con.install()
|
||||
self.real_launch(session, entities, event)
|
||||
job["status"] = "done"
|
||||
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Failed during processing delivery action.",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
finally:
|
||||
if job["status"] != "done":
|
||||
job["status"] = "failed"
|
||||
session.commit()
|
||||
self.db_con.uninstall()
|
||||
|
||||
def real_launch(self, session, entities, event):
|
||||
self.log.info("Delivery action just started.")
|
||||
report_items = collections.defaultdict(list)
|
||||
|
||||
values = event["data"]["values"]
|
||||
|
||||
location_path = values.pop("__location_path__")
|
||||
anatomy_name = values.pop("__new_anatomies__")
|
||||
project_name = values.pop("__project_name__")
|
||||
|
||||
repre_names = []
|
||||
for key, value in values.items():
|
||||
if value is True:
|
||||
component_names.append(key)
|
||||
repre_names.append(key)
|
||||
|
||||
if not component_names:
|
||||
if not repre_names:
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Not selected components to deliver."
|
||||
|
|
@ -230,64 +423,15 @@ class Delivery(BaseAction):
|
|||
if not os.path.exists(location_path):
|
||||
os.makedirs(location_path)
|
||||
|
||||
self.db_con.install()
|
||||
self.db_con.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
repres_to_deliver = []
|
||||
for entity in entities:
|
||||
asset = entity["asset"]
|
||||
subset_name = asset["name"]
|
||||
version = entity["version"]
|
||||
|
||||
parent = asset["parent"]
|
||||
parent_mongo_id = parent["custom_attributes"].get(CUST_ATTR_ID_KEY)
|
||||
if parent_mongo_id:
|
||||
parent_mongo_id = ObjectId(parent_mongo_id)
|
||||
else:
|
||||
asset_ent = self.db_con.find_one({
|
||||
"type": "asset",
|
||||
"data.ftrackId": parent["id"]
|
||||
})
|
||||
if not asset_ent:
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in parent["link"]]
|
||||
)
|
||||
msg = "Not synchronized entities to avalon"
|
||||
self.report_items[msg].append(ent_path)
|
||||
self.log.warning("{} <{}>".format(msg, ent_path))
|
||||
continue
|
||||
|
||||
parent_mongo_id = asset_ent["_id"]
|
||||
|
||||
subset_ent = self.db_con.find_one({
|
||||
"type": "subset",
|
||||
"parent": parent_mongo_id,
|
||||
"name": subset_name
|
||||
})
|
||||
|
||||
version_ent = self.db_con.find_one({
|
||||
"type": "version",
|
||||
"name": version,
|
||||
"parent": subset_ent["_id"]
|
||||
})
|
||||
|
||||
repre_ents = self.db_con.find({
|
||||
"type": "representation",
|
||||
"parent": version_ent["_id"]
|
||||
})
|
||||
|
||||
repres_by_name = {}
|
||||
for repre in repre_ents:
|
||||
repre_name = repre["name"]
|
||||
repres_by_name[repre_name] = repre
|
||||
|
||||
for component in entity["components"]:
|
||||
comp_name = component["name"]
|
||||
if comp_name not in component_names:
|
||||
continue
|
||||
|
||||
repre = repres_by_name.get(comp_name)
|
||||
repres_to_deliver.append(repre)
|
||||
self.log.debug("Collecting representations to process.")
|
||||
version_ids = self._get_interest_version_ids(entities)
|
||||
repres_to_deliver = list(self.db_con.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": version_ids},
|
||||
"name": {"$in": repre_names}
|
||||
}))
|
||||
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
|
|
@ -304,9 +448,17 @@ class Delivery(BaseAction):
|
|||
for name in root_names:
|
||||
format_dict["root"][name] = location_path
|
||||
|
||||
datetime_data = config.get_datetime_data()
|
||||
for repre in repres_to_deliver:
|
||||
source_path = repre.get("data", {}).get("path")
|
||||
debug_msg = "Processing representation {}".format(repre["_id"])
|
||||
if source_path:
|
||||
debug_msg += " with published path {}.".format(source_path)
|
||||
self.log.debug(debug_msg)
|
||||
|
||||
# Get destination repre path
|
||||
anatomy_data = copy.deepcopy(repre["context"])
|
||||
anatomy_data.update(datetime_data)
|
||||
anatomy_filled = anatomy.format_all(anatomy_data)
|
||||
test_path = anatomy_filled["delivery"][anatomy_name]
|
||||
|
||||
|
|
@ -333,7 +485,7 @@ class Delivery(BaseAction):
|
|||
"- Invalid value DataType: \"{}\"<br>"
|
||||
).format(str(repre["_id"]), keys)
|
||||
|
||||
self.report_items[msg].append(sub_msg)
|
||||
report_items[msg].append(sub_msg)
|
||||
self.log.warning(
|
||||
"{} Representation: \"{}\" Filled: <{}>".format(
|
||||
msg, str(repre["_id"]), str(test_path)
|
||||
|
|
@ -355,20 +507,19 @@ class Delivery(BaseAction):
|
|||
anatomy,
|
||||
anatomy_name,
|
||||
anatomy_data,
|
||||
format_dict
|
||||
format_dict,
|
||||
report_items
|
||||
)
|
||||
|
||||
if not frame:
|
||||
self.process_single_file(*args)
|
||||
else:
|
||||
self.process_sequence(*args)
|
||||
|
||||
self.db_con.uninstall()
|
||||
|
||||
return self.report()
|
||||
return self.report(report_items)
|
||||
|
||||
def process_single_file(
|
||||
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict
|
||||
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict,
|
||||
report_items
|
||||
):
|
||||
anatomy_filled = anatomy.format(anatomy_data)
|
||||
if format_dict:
|
||||
|
|
@ -384,7 +535,8 @@ class Delivery(BaseAction):
|
|||
self.copy_file(repre_path, delivery_path)
|
||||
|
||||
def process_sequence(
|
||||
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict
|
||||
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict,
|
||||
report_items
|
||||
):
|
||||
dir_path, file_name = os.path.split(str(repre_path))
|
||||
|
||||
|
|
@ -398,7 +550,7 @@ class Delivery(BaseAction):
|
|||
|
||||
if not file_name_items:
|
||||
msg = "Source file was not found"
|
||||
self.report_items[msg].append(repre_path)
|
||||
report_items[msg].append(repre_path)
|
||||
self.log.warning("{} <{}>".format(msg, repre_path))
|
||||
return
|
||||
|
||||
|
|
@ -418,7 +570,7 @@ class Delivery(BaseAction):
|
|||
if src_collection is None:
|
||||
# TODO log error!
|
||||
msg = "Source collection of files was not found"
|
||||
self.report_items[msg].append(repre_path)
|
||||
report_items[msg].append(repre_path)
|
||||
self.log.warning("{} <{}>".format(msg, repre_path))
|
||||
return
|
||||
|
||||
|
|
@ -491,10 +643,10 @@ class Delivery(BaseAction):
|
|||
except OSError:
|
||||
shutil.copyfile(src_path, dst_path)
|
||||
|
||||
def report(self):
|
||||
def report(self, report_items):
|
||||
items = []
|
||||
title = "Delivery report"
|
||||
for msg, _items in self.report_items.items():
|
||||
for msg, _items in report_items.items():
|
||||
if not _items:
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import json
|
|||
from bson.objectid import ObjectId
|
||||
from pype.modules.ftrack.lib import BaseAction, statics_icon
|
||||
from pype.api import Anatomy
|
||||
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
|
||||
|
||||
|
|
@ -25,7 +25,7 @@ class StoreThumbnailsToAvalon(BaseAction):
|
|||
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
|
||||
|
||||
thumbnail_key = "AVALON_THUMBNAIL_ROOT"
|
||||
db_con = DbConnector()
|
||||
db_con = AvalonMongoDB()
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
for entity in entities:
|
||||
|
|
|
|||
|
|
@ -19,12 +19,12 @@ from pype.modules.ftrack.lib.avalon_sync import (
|
|||
import ftrack_api
|
||||
from pype.modules.ftrack import BaseEvent
|
||||
|
||||
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
|
||||
class SyncToAvalonEvent(BaseEvent):
|
||||
|
||||
dbcon = DbConnector()
|
||||
dbcon = AvalonMongoDB()
|
||||
|
||||
interest_entTypes = ["show", "task"]
|
||||
ignore_ent_types = ["Milestone"]
|
||||
|
|
@ -717,6 +717,9 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if not self.ftrack_removed:
|
||||
return
|
||||
ent_infos = self.ftrack_removed
|
||||
self.log.debug(
|
||||
"Processing removed entities: {}".format(str(ent_infos))
|
||||
)
|
||||
removable_ids = []
|
||||
recreate_ents = []
|
||||
removed_names = []
|
||||
|
|
@ -878,8 +881,9 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
self.process_session.commit()
|
||||
|
||||
found_idx = None
|
||||
for idx, _entity in enumerate(self._avalon_ents):
|
||||
if _entity["_id"] == avalon_entity["_id"]:
|
||||
proj_doc, asset_docs = self._avalon_ents
|
||||
for idx, asset_doc in enumerate(asset_docs):
|
||||
if asset_doc["_id"] == avalon_entity["_id"]:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
|
|
@ -894,7 +898,8 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
new_entity_id
|
||||
)
|
||||
# Update cached entities
|
||||
self._avalon_ents[found_idx] = avalon_entity
|
||||
asset_docs[found_idx] = avalon_entity
|
||||
self._avalon_ents = proj_doc, asset_docs
|
||||
|
||||
if self._avalon_ents_by_id is not None:
|
||||
mongo_id = avalon_entity["_id"]
|
||||
|
|
@ -1258,6 +1263,10 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if not ent_infos:
|
||||
return
|
||||
|
||||
self.log.debug(
|
||||
"Processing renamed entities: {}".format(str(ent_infos))
|
||||
)
|
||||
|
||||
renamed_tasks = {}
|
||||
not_found = {}
|
||||
changeable_queue = queue.Queue()
|
||||
|
|
@ -1453,6 +1462,10 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if not ent_infos:
|
||||
return
|
||||
|
||||
self.log.debug(
|
||||
"Processing added entities: {}".format(str(ent_infos))
|
||||
)
|
||||
|
||||
cust_attrs, hier_attrs = self.avalon_cust_attrs
|
||||
entity_type_conf_ids = {}
|
||||
# Skip if already exit in avalon db or tasks entities
|
||||
|
|
@ -1729,6 +1742,10 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if not self.ftrack_moved:
|
||||
return
|
||||
|
||||
self.log.debug(
|
||||
"Processing moved entities: {}".format(str(self.ftrack_moved))
|
||||
)
|
||||
|
||||
ftrack_moved = {k: v for k, v in sorted(
|
||||
self.ftrack_moved.items(),
|
||||
key=(lambda line: len(
|
||||
|
|
@ -1859,6 +1876,10 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if not self.ftrack_updated:
|
||||
return
|
||||
|
||||
self.log.debug(
|
||||
"Processing updated entities: {}".format(str(self.ftrack_updated))
|
||||
)
|
||||
|
||||
ent_infos = self.ftrack_updated
|
||||
ftrack_mongo_mapping = {}
|
||||
not_found_ids = []
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import subprocess
|
|||
|
||||
from pype.modules.ftrack import BaseEvent
|
||||
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
|
||||
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ class UserAssigmentEvent(BaseEvent):
|
|||
3) path to publish files of task user was (de)assigned to
|
||||
"""
|
||||
|
||||
db_con = DbConnector()
|
||||
db_con = AvalonMongoDB()
|
||||
|
||||
def error(self, *err):
|
||||
for e in err:
|
||||
|
|
|
|||
|
|
@ -205,10 +205,16 @@ class ProcessEventHub(SocketBaseEventHub):
|
|||
else:
|
||||
try:
|
||||
self._handle(event)
|
||||
|
||||
mongo_id = event["data"].get("_event_mongo_id")
|
||||
if mongo_id is None:
|
||||
continue
|
||||
|
||||
self.dbcon.update_one(
|
||||
{"id": event["id"]},
|
||||
{"_id": mongo_id},
|
||||
{"$set": {"pype_data.is_processed": True}}
|
||||
)
|
||||
|
||||
except pymongo.errors.AutoReconnect:
|
||||
self.pypelog.error((
|
||||
"Mongo server \"{}\" is not responding, exiting."
|
||||
|
|
@ -244,6 +250,7 @@ class ProcessEventHub(SocketBaseEventHub):
|
|||
}
|
||||
try:
|
||||
event = ftrack_api.event.base.Event(**new_event_data)
|
||||
event["data"]["_event_mongo_id"] = event_data["_id"]
|
||||
except Exception:
|
||||
self.logger.exception(L(
|
||||
'Failed to convert payload into event: {0}',
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from pype.modules.ftrack.ftrack_server.lib import (
|
|||
SocketSession, StatusEventHub,
|
||||
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
|
||||
)
|
||||
from pype.api import Logger, config
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger("Event storer")
|
||||
action_identifier = (
|
||||
|
|
@ -23,17 +23,7 @@ action_data = {
|
|||
"label": "Pype Admin",
|
||||
"variant": "- Event server Status ({})".format(host_ip),
|
||||
"description": "Get Infromation about event server",
|
||||
"actionIdentifier": action_identifier,
|
||||
"icon": "{}/ftrack/action_icons/PypeAdmin.svg".format(
|
||||
os.environ.get(
|
||||
"PYPE_STATICS_SERVER",
|
||||
"http://localhost:{}".format(
|
||||
config.get_presets().get("services", {}).get(
|
||||
"rest_api", {}
|
||||
).get("default_port", 8021)
|
||||
)
|
||||
)
|
||||
)
|
||||
"actionIdentifier": action_identifier
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import json
|
|||
import collections
|
||||
import copy
|
||||
|
||||
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from avalon.api import AvalonMongoDB
|
||||
|
||||
import avalon
|
||||
import avalon.api
|
||||
|
|
@ -240,7 +240,7 @@ def get_hierarchical_attributes(session, entity, attr_names, attr_defaults={}):
|
|||
|
||||
|
||||
class SyncEntitiesFactory:
|
||||
dbcon = DbConnector()
|
||||
dbcon = AvalonMongoDB()
|
||||
|
||||
project_query = (
|
||||
"select full_name, name, custom_attributes"
|
||||
|
|
@ -2144,6 +2144,7 @@ class SyncEntitiesFactory:
|
|||
"name": _name,
|
||||
"parent": parent_entity
|
||||
})
|
||||
self.session.commit()
|
||||
|
||||
final_entity = {}
|
||||
for k, v in av_entity.items():
|
||||
|
|
|
|||
|
|
@ -1,460 +0,0 @@
|
|||
"""
|
||||
Wrapper around interactions with the database
|
||||
|
||||
Copy of io module in avalon-core.
|
||||
- In this case not working as singleton with api.Session!
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import errno
|
||||
import shutil
|
||||
import logging
|
||||
import tempfile
|
||||
import functools
|
||||
import contextlib
|
||||
|
||||
from avalon import schema
|
||||
from avalon.vendor import requests
|
||||
from avalon.io import extract_port_from_url
|
||||
|
||||
# Third-party dependencies
|
||||
import pymongo
|
||||
|
||||
|
||||
def auto_reconnect(func):
|
||||
"""Handling auto reconnect in 3 retry times"""
|
||||
@functools.wraps(func)
|
||||
def decorated(*args, **kwargs):
|
||||
object = args[0]
|
||||
for retry in range(3):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except pymongo.errors.AutoReconnect:
|
||||
object.log.error("Reconnecting..")
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
raise
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
class DbConnector(object):
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def __init__(self):
|
||||
self.Session = {}
|
||||
self._mongo_client = None
|
||||
self._sentry_client = None
|
||||
self._sentry_logging_handler = None
|
||||
self._database = None
|
||||
self._is_installed = False
|
||||
|
||||
def __getitem__(self, key):
|
||||
# gives direct access to collection withou setting `active_table`
|
||||
return self._database[key]
|
||||
|
||||
def __getattribute__(self, attr):
|
||||
# not all methods of PyMongo database are implemented with this it is
|
||||
# possible to use them too
|
||||
try:
|
||||
return super(DbConnector, self).__getattribute__(attr)
|
||||
except AttributeError:
|
||||
cur_proj = self.Session["AVALON_PROJECT"]
|
||||
return self._database[cur_proj].__getattribute__(attr)
|
||||
|
||||
def install(self):
|
||||
"""Establish a persistent connection to the database"""
|
||||
if self._is_installed:
|
||||
return
|
||||
|
||||
logging.basicConfig()
|
||||
self.Session.update(self._from_environment())
|
||||
|
||||
timeout = int(self.Session["AVALON_TIMEOUT"])
|
||||
mongo_url = self.Session["AVALON_MONGO"]
|
||||
kwargs = {
|
||||
"host": mongo_url,
|
||||
"serverSelectionTimeoutMS": timeout
|
||||
}
|
||||
|
||||
port = extract_port_from_url(mongo_url)
|
||||
if port is not None:
|
||||
kwargs["port"] = int(port)
|
||||
|
||||
self._mongo_client = pymongo.MongoClient(**kwargs)
|
||||
|
||||
for retry in range(3):
|
||||
try:
|
||||
t1 = time.time()
|
||||
self._mongo_client.server_info()
|
||||
|
||||
except Exception:
|
||||
self.log.error("Retrying..")
|
||||
time.sleep(1)
|
||||
timeout *= 1.5
|
||||
|
||||
else:
|
||||
break
|
||||
|
||||
else:
|
||||
raise IOError(
|
||||
"ERROR: Couldn't connect to %s in "
|
||||
"less than %.3f ms" % (self.Session["AVALON_MONGO"], timeout))
|
||||
|
||||
self.log.info("Connected to %s, delay %.3f s" % (
|
||||
self.Session["AVALON_MONGO"], time.time() - t1))
|
||||
|
||||
self._install_sentry()
|
||||
|
||||
self._database = self._mongo_client[self.Session["AVALON_DB"]]
|
||||
self._is_installed = True
|
||||
|
||||
def _install_sentry(self):
|
||||
if "AVALON_SENTRY" not in self.Session:
|
||||
return
|
||||
|
||||
try:
|
||||
from raven import Client
|
||||
from raven.handlers.logging import SentryHandler
|
||||
from raven.conf import setup_logging
|
||||
except ImportError:
|
||||
# Note: There was a Sentry address in this Session
|
||||
return self.log.warning("Sentry disabled, raven not installed")
|
||||
|
||||
client = Client(self.Session["AVALON_SENTRY"])
|
||||
|
||||
# Transmit log messages to Sentry
|
||||
handler = SentryHandler(client)
|
||||
handler.setLevel(logging.WARNING)
|
||||
|
||||
setup_logging(handler)
|
||||
|
||||
self._sentry_client = client
|
||||
self._sentry_logging_handler = handler
|
||||
self.log.info(
|
||||
"Connected to Sentry @ %s" % self.Session["AVALON_SENTRY"]
|
||||
)
|
||||
|
||||
def _from_environment(self):
|
||||
Session = {
|
||||
item[0]: os.getenv(item[0], item[1])
|
||||
for item in (
|
||||
# Root directory of projects on disk
|
||||
("AVALON_PROJECTS", None),
|
||||
|
||||
# Name of current Project
|
||||
("AVALON_PROJECT", ""),
|
||||
|
||||
# Name of current Asset
|
||||
("AVALON_ASSET", ""),
|
||||
|
||||
# Name of current silo
|
||||
("AVALON_SILO", ""),
|
||||
|
||||
# Name of current task
|
||||
("AVALON_TASK", None),
|
||||
|
||||
# Name of current app
|
||||
("AVALON_APP", None),
|
||||
|
||||
# Path to working directory
|
||||
("AVALON_WORKDIR", None),
|
||||
|
||||
# Name of current Config
|
||||
# TODO(marcus): Establish a suitable default config
|
||||
("AVALON_CONFIG", "no_config"),
|
||||
|
||||
# Name of Avalon in graphical user interfaces
|
||||
# Use this to customise the visual appearance of Avalon
|
||||
# to better integrate with your surrounding pipeline
|
||||
("AVALON_LABEL", "Avalon"),
|
||||
|
||||
# Used during any connections to the outside world
|
||||
("AVALON_TIMEOUT", "1000"),
|
||||
|
||||
# Address to Asset Database
|
||||
("AVALON_MONGO", "mongodb://localhost:27017"),
|
||||
|
||||
# Name of database used in MongoDB
|
||||
("AVALON_DB", "avalon"),
|
||||
|
||||
# Address to Sentry
|
||||
("AVALON_SENTRY", None),
|
||||
|
||||
# Address to Deadline Web Service
|
||||
# E.g. http://192.167.0.1:8082
|
||||
("AVALON_DEADLINE", None),
|
||||
|
||||
# Enable features not necessarily stable. The user's own risk
|
||||
("AVALON_EARLY_ADOPTER", None),
|
||||
|
||||
# Address of central asset repository, contains
|
||||
# the following interface:
|
||||
# /upload
|
||||
# /download
|
||||
# /manager (optional)
|
||||
("AVALON_LOCATION", "http://127.0.0.1"),
|
||||
|
||||
# Boolean of whether to upload published material
|
||||
# to central asset repository
|
||||
("AVALON_UPLOAD", None),
|
||||
|
||||
# Generic username and password
|
||||
("AVALON_USERNAME", "avalon"),
|
||||
("AVALON_PASSWORD", "secret"),
|
||||
|
||||
# Unique identifier for instances in working files
|
||||
("AVALON_INSTANCE_ID", "avalon.instance"),
|
||||
("AVALON_CONTAINER_ID", "avalon.container"),
|
||||
|
||||
# Enable debugging
|
||||
("AVALON_DEBUG", None),
|
||||
|
||||
) if os.getenv(item[0], item[1]) is not None
|
||||
}
|
||||
|
||||
Session["schema"] = "avalon-core:session-2.0"
|
||||
try:
|
||||
schema.validate(Session)
|
||||
except schema.ValidationError as e:
|
||||
# TODO(marcus): Make this mandatory
|
||||
self.log.warning(e)
|
||||
|
||||
return Session
|
||||
|
||||
def uninstall(self):
|
||||
"""Close any connection to the database"""
|
||||
try:
|
||||
self._mongo_client.close()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
self._mongo_client = None
|
||||
self._database = None
|
||||
self._is_installed = False
|
||||
|
||||
def active_project(self):
|
||||
"""Return the name of the active project"""
|
||||
return self.Session["AVALON_PROJECT"]
|
||||
|
||||
def activate_project(self, project_name):
|
||||
self.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
def projects(self):
|
||||
"""List available projects
|
||||
|
||||
Returns:
|
||||
list of project documents
|
||||
|
||||
"""
|
||||
|
||||
collection_names = self.collections()
|
||||
for project in collection_names:
|
||||
if project in ("system.indexes",):
|
||||
continue
|
||||
|
||||
# Each collection will have exactly one project document
|
||||
document = self.find_project(project)
|
||||
|
||||
if document is not None:
|
||||
yield document
|
||||
|
||||
def locate(self, path):
|
||||
"""Traverse a hierarchy from top-to-bottom
|
||||
|
||||
Example:
|
||||
representation = locate(["hulk", "Bruce", "modelDefault", 1, "ma"])
|
||||
|
||||
Returns:
|
||||
representation (ObjectId)
|
||||
|
||||
"""
|
||||
|
||||
components = zip(
|
||||
("project", "asset", "subset", "version", "representation"),
|
||||
path
|
||||
)
|
||||
|
||||
parent = None
|
||||
for type_, name in components:
|
||||
latest = (type_ == "version") and name in (None, -1)
|
||||
|
||||
try:
|
||||
if latest:
|
||||
parent = self.find_one(
|
||||
filter={
|
||||
"type": type_,
|
||||
"parent": parent
|
||||
},
|
||||
projection={"_id": 1},
|
||||
sort=[("name", -1)]
|
||||
)["_id"]
|
||||
else:
|
||||
parent = self.find_one(
|
||||
filter={
|
||||
"type": type_,
|
||||
"name": name,
|
||||
"parent": parent
|
||||
},
|
||||
projection={"_id": 1},
|
||||
)["_id"]
|
||||
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
return parent
|
||||
|
||||
@auto_reconnect
|
||||
def collections(self):
|
||||
return self._database.collection_names()
|
||||
|
||||
@auto_reconnect
|
||||
def find_project(self, project):
|
||||
return self._database[project].find_one({"type": "project"})
|
||||
|
||||
@auto_reconnect
|
||||
def insert_one(self, item):
|
||||
assert isinstance(item, dict), "item must be of type <dict>"
|
||||
schema.validate(item)
|
||||
return self._database[self.Session["AVALON_PROJECT"]].insert_one(item)
|
||||
|
||||
@auto_reconnect
|
||||
def insert_many(self, items, ordered=True):
|
||||
# check if all items are valid
|
||||
assert isinstance(items, list), "`items` must be of type <list>"
|
||||
for item in items:
|
||||
assert isinstance(item, dict), "`item` must be of type <dict>"
|
||||
schema.validate(item)
|
||||
|
||||
return self._database[self.Session["AVALON_PROJECT"]].insert_many(
|
||||
items,
|
||||
ordered=ordered)
|
||||
|
||||
@auto_reconnect
|
||||
def find(self, filter, projection=None, sort=None):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].find(
|
||||
filter=filter,
|
||||
projection=projection,
|
||||
sort=sort
|
||||
)
|
||||
|
||||
@auto_reconnect
|
||||
def find_one(self, filter, projection=None, sort=None):
|
||||
assert isinstance(filter, dict), "filter must be <dict>"
|
||||
|
||||
return self._database[self.Session["AVALON_PROJECT"]].find_one(
|
||||
filter=filter,
|
||||
projection=projection,
|
||||
sort=sort
|
||||
)
|
||||
|
||||
@auto_reconnect
|
||||
def save(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].save(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def replace_one(self, filter, replacement):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].replace_one(
|
||||
filter, replacement)
|
||||
|
||||
@auto_reconnect
|
||||
def update_many(self, filter, update):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].update_many(
|
||||
filter, update)
|
||||
|
||||
@auto_reconnect
|
||||
def distinct(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].distinct(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def drop(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].drop(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def delete_many(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].delete_many(
|
||||
*args, **kwargs)
|
||||
|
||||
def parenthood(self, document):
|
||||
assert document is not None, "This is a bug"
|
||||
|
||||
parents = list()
|
||||
|
||||
while document.get("parent") is not None:
|
||||
document = self.find_one({"_id": document["parent"]})
|
||||
|
||||
if document is None:
|
||||
break
|
||||
|
||||
if document.get("type") == "master_version":
|
||||
_document = self.find_one({"_id": document["version_id"]})
|
||||
document["data"] = _document["data"]
|
||||
|
||||
parents.append(document)
|
||||
|
||||
return parents
|
||||
|
||||
@contextlib.contextmanager
|
||||
def tempdir(self):
|
||||
tempdir = tempfile.mkdtemp()
|
||||
try:
|
||||
yield tempdir
|
||||
finally:
|
||||
shutil.rmtree(tempdir)
|
||||
|
||||
def download(self, src, dst):
|
||||
"""Download `src` to `dst`
|
||||
|
||||
Arguments:
|
||||
src (str): URL to source file
|
||||
dst (str): Absolute path to destination file
|
||||
|
||||
Yields tuple (progress, error):
|
||||
progress (int): Between 0-100
|
||||
error (Exception): Any exception raised when first making connection
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
src,
|
||||
stream=True,
|
||||
auth=requests.auth.HTTPBasicAuth(
|
||||
self.Session["AVALON_USERNAME"],
|
||||
self.Session["AVALON_PASSWORD"]
|
||||
)
|
||||
)
|
||||
except requests.ConnectionError as e:
|
||||
yield None, e
|
||||
return
|
||||
|
||||
with self.tempdir() as dirname:
|
||||
tmp = os.path.join(dirname, os.path.basename(src))
|
||||
|
||||
with open(tmp, "wb") as f:
|
||||
total_length = response.headers.get("content-length")
|
||||
|
||||
if total_length is None: # no content length header
|
||||
f.write(response.content)
|
||||
else:
|
||||
downloaded = 0
|
||||
total_length = int(total_length)
|
||||
for data in response.iter_content(chunk_size=4096):
|
||||
downloaded += len(data)
|
||||
f.write(data)
|
||||
|
||||
yield int(100.0 * downloaded / total_length), None
|
||||
|
||||
try:
|
||||
os.makedirs(os.path.dirname(dst))
|
||||
except OSError as e:
|
||||
# An already existing destination directory is fine.
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
shutil.copy(tmp, dst)
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
import os
|
||||
import socket
|
||||
from Qt import QtCore
|
||||
import threading
|
||||
|
||||
from socketserver import ThreadingMixIn
|
||||
from http.server import HTTPServer
|
||||
|
|
@ -155,14 +155,15 @@ class RestApiServer:
|
|||
def is_running(self):
|
||||
return self.rest_api_thread.is_running
|
||||
|
||||
def tray_exit(self):
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
self.rest_api_thread.is_running = False
|
||||
|
||||
def thread_stopped(self):
|
||||
self._is_running = False
|
||||
self.rest_api_thread.stop()
|
||||
self.rest_api_thread.join()
|
||||
|
||||
|
||||
class RestApiThread(QtCore.QThread):
|
||||
class RestApiThread(threading.Thread):
|
||||
""" Listener for REST requests.
|
||||
|
||||
It is possible to register callbacks for url paths.
|
||||
|
|
@ -174,6 +175,12 @@ class RestApiThread(QtCore.QThread):
|
|||
self.is_running = False
|
||||
self.module = module
|
||||
self.port = port
|
||||
self.httpd = None
|
||||
|
||||
def stop(self):
|
||||
self.is_running = False
|
||||
if self.httpd:
|
||||
self.httpd.server_close()
|
||||
|
||||
def run(self):
|
||||
self.is_running = True
|
||||
|
|
@ -185,12 +192,14 @@ class RestApiThread(QtCore.QThread):
|
|||
)
|
||||
|
||||
with ThreadingSimpleServer(("", self.port), Handler) as httpd:
|
||||
self.httpd = httpd
|
||||
while self.is_running:
|
||||
httpd.handle_request()
|
||||
|
||||
except Exception:
|
||||
log.warning(
|
||||
"Rest Api Server service has failed", exc_info=True
|
||||
)
|
||||
|
||||
self.httpd = None
|
||||
self.is_running = False
|
||||
self.module.thread_stopped()
|
||||
|
|
|
|||
5
pype/modules/websocket_server/__init__.py
Normal file
5
pype/modules/websocket_server/__init__.py
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
from .websocket_server import WebSocketServer
|
||||
|
||||
|
||||
def tray_init(tray_widget, main_widget):
|
||||
return WebSocketServer()
|
||||
0
pype/modules/websocket_server/hosts/__init__.py
Normal file
0
pype/modules/websocket_server/hosts/__init__.py
Normal file
47
pype/modules/websocket_server/hosts/external_app_1.py
Normal file
47
pype/modules/websocket_server/hosts/external_app_1.py
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
import asyncio
|
||||
|
||||
from pype.api import Logger
|
||||
from wsrpc_aiohttp import WebSocketRoute
|
||||
|
||||
log = Logger().get_logger("WebsocketServer")
|
||||
|
||||
|
||||
class ExternalApp1(WebSocketRoute):
|
||||
"""
|
||||
One route, mimicking external application (like Harmony, etc).
|
||||
All functions could be called from client.
|
||||
'do_notify' function calls function on the client - mimicking
|
||||
notification after long running job on the server or similar
|
||||
"""
|
||||
|
||||
def init(self, **kwargs):
|
||||
# Python __init__ must be return "self".
|
||||
# This method might return anything.
|
||||
log.debug("someone called ExternalApp1 route")
|
||||
return kwargs
|
||||
|
||||
async def server_function_one(self):
|
||||
log.info('In function one')
|
||||
|
||||
async def server_function_two(self):
|
||||
log.info('In function two')
|
||||
return 'function two'
|
||||
|
||||
async def server_function_three(self):
|
||||
log.info('In function three')
|
||||
asyncio.ensure_future(self.do_notify())
|
||||
return '{"message":"function tree"}'
|
||||
|
||||
async def server_function_four(self, *args, **kwargs):
|
||||
log.info('In function four args {} kwargs {}'.format(args, kwargs))
|
||||
ret = dict(**kwargs)
|
||||
ret["message"] = "function four received arguments"
|
||||
return str(ret)
|
||||
|
||||
# This method calls function on the client side
|
||||
async def do_notify(self):
|
||||
import time
|
||||
time.sleep(5)
|
||||
log.info('Calling function on server after delay')
|
||||
awesome = 'Somebody server_function_three method!'
|
||||
await self.socket.call('notify', result=awesome)
|
||||
64
pype/modules/websocket_server/hosts/photoshop.py
Normal file
64
pype/modules/websocket_server/hosts/photoshop.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
from pype.api import Logger
|
||||
from wsrpc_aiohttp import WebSocketRoute
|
||||
import functools
|
||||
|
||||
import avalon.photoshop as photoshop
|
||||
|
||||
log = Logger().get_logger("WebsocketServer")
|
||||
|
||||
|
||||
class Photoshop(WebSocketRoute):
|
||||
"""
|
||||
One route, mimicking external application (like Harmony, etc).
|
||||
All functions could be called from client.
|
||||
'do_notify' function calls function on the client - mimicking
|
||||
notification after long running job on the server or similar
|
||||
"""
|
||||
instance = None
|
||||
|
||||
def init(self, **kwargs):
|
||||
# Python __init__ must be return "self".
|
||||
# This method might return anything.
|
||||
log.debug("someone called Photoshop route")
|
||||
self.instance = self
|
||||
return kwargs
|
||||
|
||||
# server functions
|
||||
async def ping(self):
|
||||
log.debug("someone called Photoshop route ping")
|
||||
|
||||
# This method calls function on the client side
|
||||
# client functions
|
||||
|
||||
async def read(self):
|
||||
log.debug("photoshop.read client calls server server calls "
|
||||
"Photo client")
|
||||
return await self.socket.call('Photoshop.read')
|
||||
|
||||
# panel routes for tools
|
||||
async def creator_route(self):
|
||||
self._tool_route("creator")
|
||||
|
||||
async def workfiles_route(self):
|
||||
self._tool_route("workfiles")
|
||||
|
||||
async def loader_route(self):
|
||||
self._tool_route("loader")
|
||||
|
||||
async def publish_route(self):
|
||||
self._tool_route("publish")
|
||||
|
||||
async def sceneinventory_route(self):
|
||||
self._tool_route("sceneinventory")
|
||||
|
||||
async def projectmanager_route(self):
|
||||
self._tool_route("projectmanager")
|
||||
|
||||
def _tool_route(self, tool_name):
|
||||
"""The address accessed when clicking on the buttons."""
|
||||
partial_method = functools.partial(photoshop.show, tool_name)
|
||||
|
||||
photoshop.execute_in_main_thread(partial_method)
|
||||
|
||||
# Required return statement.
|
||||
return "nothing"
|
||||
283
pype/modules/websocket_server/stubs/photoshop_server_stub.py
Normal file
283
pype/modules/websocket_server/stubs/photoshop_server_stub.py
Normal file
|
|
@ -0,0 +1,283 @@
|
|||
from pype.modules.websocket_server import WebSocketServer
|
||||
"""
|
||||
Stub handling connection from server to client.
|
||||
Used anywhere solution is calling client methods.
|
||||
"""
|
||||
import json
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class PhotoshopServerStub():
|
||||
"""
|
||||
Stub for calling function on client (Photoshop js) side.
|
||||
Expects that client is already connected (started when avalon menu
|
||||
is opened).
|
||||
'self.websocketserver.call' is used as async wrapper
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.websocketserver = WebSocketServer.get_instance()
|
||||
self.client = self.websocketserver.get_client()
|
||||
|
||||
def open(self, path):
|
||||
"""
|
||||
Open file located at 'path' (local).
|
||||
:param path: <string> file path locally
|
||||
:return: None
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('Photoshop.open', path=path)
|
||||
)
|
||||
|
||||
def read(self, layer, layers_meta=None):
|
||||
"""
|
||||
Parses layer metadata from Headline field of active document
|
||||
:param layer: <namedTuple Layer("id":XX, "name":"YYY")
|
||||
:param layers_meta: full list from Headline (for performance in loops)
|
||||
:return:
|
||||
"""
|
||||
if layers_meta is None:
|
||||
layers_meta = self.get_layers_metadata()
|
||||
|
||||
return layers_meta.get(str(layer.id))
|
||||
|
||||
def imprint(self, layer, data, all_layers=None, layers_meta=None):
|
||||
"""
|
||||
Save layer metadata to Headline field of active document
|
||||
:param layer: <namedTuple> Layer("id": XXX, "name":'YYY')
|
||||
:param data: <string> json representation for single layer
|
||||
:param all_layers: <list of namedTuples> - for performance, could be
|
||||
injected for usage in loop, if not, single call will be
|
||||
triggered
|
||||
:param layers_meta: <string> json representation from Headline
|
||||
(for performance - provide only if imprint is in
|
||||
loop - value should be same)
|
||||
:return: None
|
||||
"""
|
||||
if not layers_meta:
|
||||
layers_meta = self.get_layers_metadata()
|
||||
# json.dumps writes integer values in a dictionary to string, so
|
||||
# anticipating it here.
|
||||
if str(layer.id) in layers_meta and layers_meta[str(layer.id)]:
|
||||
layers_meta[str(layer.id)].update(data)
|
||||
else:
|
||||
layers_meta[str(layer.id)] = data
|
||||
|
||||
# Ensure only valid ids are stored.
|
||||
if not all_layers:
|
||||
all_layers = self.get_layers()
|
||||
layer_ids = [layer.id for layer in all_layers]
|
||||
cleaned_data = {}
|
||||
|
||||
for id in layers_meta:
|
||||
if int(id) in layer_ids:
|
||||
cleaned_data[id] = layers_meta[id]
|
||||
|
||||
payload = json.dumps(cleaned_data, indent=4)
|
||||
|
||||
self.websocketserver.call(self.client.call
|
||||
('Photoshop.imprint', payload=payload)
|
||||
)
|
||||
|
||||
def get_layers(self):
|
||||
"""
|
||||
Returns JSON document with all(?) layers in active document.
|
||||
|
||||
:return: <list of namedtuples>
|
||||
Format of tuple: { 'id':'123',
|
||||
'name': 'My Layer 1',
|
||||
'type': 'GUIDE'|'FG'|'BG'|'OBJ'
|
||||
'visible': 'true'|'false'
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('Photoshop.get_layers'))
|
||||
|
||||
return self._to_records(res)
|
||||
|
||||
def get_layers_in_layers(self, layers):
|
||||
"""
|
||||
Return all layers that belong to layers (might be groups).
|
||||
:param layers: <list of namedTuples>
|
||||
:return: <list of namedTuples>
|
||||
"""
|
||||
all_layers = self.get_layers()
|
||||
ret = []
|
||||
parent_ids = set([lay.id for lay in layers])
|
||||
|
||||
for layer in all_layers:
|
||||
parents = set(layer.parents)
|
||||
if len(parent_ids & parents) > 0:
|
||||
ret.append(layer)
|
||||
if layer.id in parent_ids:
|
||||
ret.append(layer)
|
||||
|
||||
return ret
|
||||
|
||||
def create_group(self, name):
|
||||
"""
|
||||
Create new group (eg. LayerSet)
|
||||
:return: <namedTuple Layer("id":XX, "name":"YYY")>
|
||||
"""
|
||||
ret = self.websocketserver.call(self.client.call
|
||||
('Photoshop.create_group',
|
||||
name=name))
|
||||
# create group on PS is asynchronous, returns only id
|
||||
layer = {"id": ret, "name": name, "group": True}
|
||||
return namedtuple('Layer', layer.keys())(*layer.values())
|
||||
|
||||
def group_selected_layers(self, name):
|
||||
"""
|
||||
Group selected layers into new LayerSet (eg. group)
|
||||
:return: <json representation of Layer>
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('Photoshop.group_selected_layers',
|
||||
name=name)
|
||||
)
|
||||
return self._to_records(res)
|
||||
|
||||
def get_selected_layers(self):
|
||||
"""
|
||||
Get a list of actually selected layers
|
||||
:return: <list of Layer('id':XX, 'name':"YYY")>
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('Photoshop.get_selected_layers'))
|
||||
return self._to_records(res)
|
||||
|
||||
def select_layers(self, layers):
|
||||
"""
|
||||
Selecte specified layers in Photoshop
|
||||
:param layers: <list of Layer('id':XX, 'name':"YYY")>
|
||||
:return: None
|
||||
"""
|
||||
layer_ids = [layer.id for layer in layers]
|
||||
|
||||
self.websocketserver.call(self.client.call
|
||||
('Photoshop.get_layers',
|
||||
layers=layer_ids)
|
||||
)
|
||||
|
||||
def get_active_document_full_name(self):
|
||||
"""
|
||||
Returns full name with path of active document via ws call
|
||||
:return: <string> full path with name
|
||||
"""
|
||||
res = self.websocketserver.call(
|
||||
self.client.call('Photoshop.get_active_document_full_name'))
|
||||
|
||||
return res
|
||||
|
||||
def get_active_document_name(self):
|
||||
"""
|
||||
Returns just a name of active document via ws call
|
||||
:return: <string> file name
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('Photoshop.get_active_document_name'))
|
||||
|
||||
return res
|
||||
|
||||
def is_saved(self):
|
||||
"""
|
||||
Returns true if no changes in active document
|
||||
:return: <boolean>
|
||||
"""
|
||||
return self.websocketserver.call(self.client.call
|
||||
('Photoshop.is_saved'))
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Saves active document
|
||||
:return: None
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('Photoshop.save'))
|
||||
|
||||
def saveAs(self, image_path, ext, as_copy):
|
||||
"""
|
||||
Saves active document to psd (copy) or png or jpg
|
||||
:param image_path: <string> full local path
|
||||
:param ext: <string psd|jpg|png>
|
||||
:param as_copy: <boolean>
|
||||
:return: None
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('Photoshop.saveAs',
|
||||
image_path=image_path,
|
||||
ext=ext,
|
||||
as_copy=as_copy))
|
||||
|
||||
def set_visible(self, layer_id, visibility):
|
||||
"""
|
||||
Set layer with 'layer_id' to 'visibility'
|
||||
:param layer_id: <int>
|
||||
:param visibility: <true - set visible, false - hide>
|
||||
:return: None
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('Photoshop.set_visible',
|
||||
layer_id=layer_id,
|
||||
visibility=visibility))
|
||||
|
||||
def get_layers_metadata(self):
|
||||
"""
|
||||
Reads layers metadata from Headline from active document in PS.
|
||||
(Headline accessible by File > File Info)
|
||||
:return: <string> - json documents
|
||||
"""
|
||||
layers_data = {}
|
||||
res = self.websocketserver.call(self.client.call('Photoshop.read'))
|
||||
try:
|
||||
layers_data = json.loads(res)
|
||||
except json.decoder.JSONDecodeError:
|
||||
pass
|
||||
return layers_data
|
||||
|
||||
def import_smart_object(self, path):
|
||||
"""
|
||||
Import the file at `path` as a smart object to active document.
|
||||
|
||||
Args:
|
||||
path (str): File path to import.
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('Photoshop.import_smart_object',
|
||||
path=path))
|
||||
|
||||
return self._to_records(res).pop()
|
||||
|
||||
def replace_smart_object(self, layer, path):
|
||||
"""
|
||||
Replace the smart object `layer` with file at `path`
|
||||
|
||||
Args:
|
||||
layer (namedTuple): Layer("id":XX, "name":"YY"..).
|
||||
path (str): File to import.
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('Photoshop.replace_smart_object',
|
||||
layer=layer,
|
||||
path=path))
|
||||
|
||||
def close(self):
|
||||
self.client.close()
|
||||
|
||||
def _to_records(self, res):
|
||||
"""
|
||||
Converts string json representation into list of named tuples for
|
||||
dot notation access to work.
|
||||
:return: <list of named tuples>
|
||||
:param res: <string> - json representation
|
||||
"""
|
||||
try:
|
||||
layers_data = json.loads(res)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ValueError("Received broken JSON {}".format(res))
|
||||
ret = []
|
||||
# convert to namedtuple to use dot donation
|
||||
if isinstance(layers_data, dict): # TODO refactore
|
||||
layers_data = [layers_data]
|
||||
for d in layers_data:
|
||||
ret.append(namedtuple('Layer', d.keys())(*d.values()))
|
||||
return ret
|
||||
179
pype/modules/websocket_server/test_client/wsrpc_client.html
Normal file
179
pype/modules/websocket_server/test_client/wsrpc_client.html
Normal file
|
|
@ -0,0 +1,179 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Title</title>
|
||||
|
||||
<!-- CSS only -->
|
||||
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css" integrity="sha384-9aIt2nRpC12Uk9gS9baDl411NQApFmC26EwAOH8WgZl5MYYxFfc+NcPb1dKGj7Sk" crossorigin="anonymous">
|
||||
|
||||
<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js" integrity="sha384-DfXdz2htPH0lsSSs5nCTpuj/zy4C+OGpamoFVy38MVBnE+IbbVYUew+OrCXaRkfj" crossorigin="anonymous"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js" integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo" crossorigin="anonymous"></script>
|
||||
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.5.0/js/bootstrap.min.js" integrity="sha384-OgVRvuATP1z7JjHLkuOU7Xw704+h835Lr+6QL9UvYjZE3Ipu6Tp75j7Bh/kR0JKI" crossorigin="anonymous"></script>
|
||||
|
||||
<script type="text/javascript" src="//unpkg.com/@wsrpc/client"></script>
|
||||
<script>
|
||||
WSRPC.DEBUG = true;
|
||||
WSRPC.TRACE = true;
|
||||
var url = (window.location.protocol==="https):"?"wss://":"ws://") + window.location.host + '/ws/';
|
||||
url = 'ws://localhost:8099/ws/';
|
||||
RPC = new WSRPC(url, 5000);
|
||||
|
||||
console.log(RPC.state());
|
||||
// Configure client API, that can be called from server
|
||||
RPC.addRoute('notify', function (data) {
|
||||
console.log('Server called client route "notify":', data);
|
||||
alert('Server called client route "notify":', data)
|
||||
return data.result;
|
||||
});
|
||||
RPC.connect();
|
||||
console.log(RPC.state());
|
||||
|
||||
$(document).ready(function() {
|
||||
function NoReturn(){
|
||||
// Call stateful route
|
||||
// After you call that route, server would execute 'notify' route on the
|
||||
// client, that is registered above.
|
||||
RPC.call('ExternalApp1.server_function_one').then(function (data) {
|
||||
console.log('Result for calling server route "server_function_one": ', data);
|
||||
alert('Function "server_function_two" returned: '+data);
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
}
|
||||
|
||||
function ReturnValue(){
|
||||
// Call stateful route
|
||||
// After you call that route, server would execute 'notify' route on the
|
||||
// client, that is registered above.
|
||||
RPC.call('ExternalApp1.server_function_two').then(function (data) {
|
||||
console.log('Result for calling server route "server_function_two": ', data);
|
||||
alert('Function "server_function_two" returned: '+data);
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
}
|
||||
|
||||
function ValueAndNotify(){
|
||||
// After you call that route, server would execute 'notify' route on the
|
||||
// client, that is registered above.
|
||||
RPC.call('ExternalApp1.server_function_three').then(function (data) {
|
||||
console.log('Result for calling server route "server_function_three": ', data);
|
||||
alert('Function "server_function_three" returned: '+data);
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
}
|
||||
|
||||
function SendValue(){
|
||||
// After you call that route, server would execute 'notify' route on the
|
||||
// client, that is registered above.
|
||||
RPC.call('ExternalApp1.server_function_four', {foo: 'one', bar:'two'}).then(function (data) {
|
||||
console.log('Result for calling server route "server_function_four": ', data);
|
||||
alert('Function "server_function_four" returned: '+data);
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
}
|
||||
|
||||
$('#noReturn').click(function() {
|
||||
NoReturn();
|
||||
})
|
||||
|
||||
$('#returnValue').click(function() {
|
||||
ReturnValue();
|
||||
})
|
||||
|
||||
$('#valueAndNotify').click(function() {
|
||||
ValueAndNotify();
|
||||
})
|
||||
|
||||
$('#sendValue').click(function() {
|
||||
SendValue();
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
<!-- // Call stateless method-->
|
||||
<!-- RPC.call('test2').then(function (data) {-->
|
||||
<!-- console.log('Result for calling server route "test2"', data);-->
|
||||
<!-- });-->
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div class="d-flex flex-column flex-md-row align-items-center p-3 px-md-4 mb-3 bg-white border-bottom shadow-sm">
|
||||
<h5 class="my-0 mr-md-auto font-weight-normal">Test of wsrpc javascript client</h5>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<div class="card-deck mb-3 text-center">
|
||||
<div class="card mb-4 shadow-sm">
|
||||
<div class="card-header">
|
||||
<h4 class="my-0 font-weight-normal">No return value</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<ul class="list-unstyled mt-3 mb-4">
|
||||
<li>Calls server_function_one</li>
|
||||
<li>Function only logs on server</li>
|
||||
<li>No return value</li>
|
||||
<li> </li>
|
||||
<li> </li>
|
||||
<li> </li>
|
||||
</ul>
|
||||
<button type="button" id="noReturn" class="btn btn-lg btn-block btn-outline-primary">Call server</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card mb-4 shadow-sm">
|
||||
<div class="card-header">
|
||||
<h4 class="my-0 font-weight-normal">Return value</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<ul class="list-unstyled mt-3 mb-4">
|
||||
<li>Calls server_function_two</li>
|
||||
<li>Function logs on server</li>
|
||||
<li>Returns simple text value</li>
|
||||
<li> </li>
|
||||
<li> </li>
|
||||
<li> </li>
|
||||
</ul>
|
||||
<button type="button" id="returnValue" class="btn btn-lg btn-block btn-outline-primary">Call server</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card mb-4 shadow-sm">
|
||||
<div class="card-header">
|
||||
<h4 class="my-0 font-weight-normal">Notify</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<ul class="list-unstyled mt-3 mb-4">
|
||||
<li>Calls server_function_three</li>
|
||||
<li>Function logs on server</li>
|
||||
<li>Returns json payload </li>
|
||||
<li>Server then calls function ON the client after delay</li>
|
||||
<li> </li>
|
||||
</ul>
|
||||
<button type="button" id="valueAndNotify" class="btn btn-lg btn-block btn-outline-primary">Call server</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card mb-4 shadow-sm">
|
||||
<div class="card-header">
|
||||
<h4 class="my-0 font-weight-normal">Send value</h4>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<ul class="list-unstyled mt-3 mb-4">
|
||||
<li>Calls server_function_four</li>
|
||||
<li>Function logs on server</li>
|
||||
<li>Returns modified sent values</li>
|
||||
<li> </li>
|
||||
<li> </li>
|
||||
<li> </li>
|
||||
</ul>
|
||||
<button type="button" id="sendValue" class="btn btn-lg btn-block btn-outline-primary">Call server</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
34
pype/modules/websocket_server/test_client/wsrpc_client.py
Normal file
34
pype/modules/websocket_server/test_client/wsrpc_client.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
import asyncio
|
||||
|
||||
from wsrpc_aiohttp import WSRPCClient
|
||||
|
||||
"""
|
||||
Simple testing Python client for wsrpc_aiohttp
|
||||
Calls sequentially multiple methods on server
|
||||
"""
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
|
||||
async def main():
|
||||
print("main")
|
||||
client = WSRPCClient("ws://127.0.0.1:8099/ws/",
|
||||
loop=asyncio.get_event_loop())
|
||||
|
||||
client.add_route('notify', notify)
|
||||
await client.connect()
|
||||
print("connected")
|
||||
print(await client.proxy.ExternalApp1.server_function_one())
|
||||
print(await client.proxy.ExternalApp1.server_function_two())
|
||||
print(await client.proxy.ExternalApp1.server_function_three())
|
||||
print(await client.proxy.ExternalApp1.server_function_four(foo="one"))
|
||||
await client.close()
|
||||
|
||||
|
||||
def notify(socket, *args, **kwargs):
|
||||
print("called from server")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# loop.run_until_complete(main())
|
||||
asyncio.run(main())
|
||||
221
pype/modules/websocket_server/websocket_server.py
Normal file
221
pype/modules/websocket_server/websocket_server.py
Normal file
|
|
@ -0,0 +1,221 @@
|
|||
from pype.api import Logger
|
||||
|
||||
import threading
|
||||
from aiohttp import web
|
||||
import asyncio
|
||||
from wsrpc_aiohttp import STATIC_DIR, WebSocketAsync
|
||||
|
||||
import os
|
||||
import sys
|
||||
import pyclbr
|
||||
import importlib
|
||||
import urllib
|
||||
|
||||
log = Logger().get_logger("WebsocketServer")
|
||||
|
||||
|
||||
class WebSocketServer():
|
||||
"""
|
||||
Basic POC implementation of asychronic websocket RPC server.
|
||||
Uses class in external_app_1.py to mimic implementation for single
|
||||
external application.
|
||||
'test_client' folder contains two test implementations of client
|
||||
"""
|
||||
_instance = None
|
||||
|
||||
def __init__(self):
|
||||
self.qaction = None
|
||||
self.failed_icon = None
|
||||
self._is_running = False
|
||||
WebSocketServer._instance = self
|
||||
self.client = None
|
||||
self.handlers = {}
|
||||
|
||||
websocket_url = os.getenv("WEBSOCKET_URL")
|
||||
if websocket_url:
|
||||
parsed = urllib.parse.urlparse(websocket_url)
|
||||
port = parsed.port
|
||||
if not port:
|
||||
port = 8099 # fallback
|
||||
|
||||
self.app = web.Application()
|
||||
|
||||
self.app.router.add_route("*", "/ws/", WebSocketAsync)
|
||||
self.app.router.add_static("/js", STATIC_DIR)
|
||||
self.app.router.add_static("/", ".")
|
||||
|
||||
# add route with multiple methods for single "external app"
|
||||
directories_with_routes = ['hosts']
|
||||
self.add_routes_for_directories(directories_with_routes)
|
||||
|
||||
self.websocket_thread = WebsocketServerThread(self, port)
|
||||
|
||||
def add_routes_for_directories(self, directories_with_routes):
|
||||
""" Loops through selected directories to find all modules and
|
||||
in them all classes implementing 'WebSocketRoute' that could be
|
||||
used as route.
|
||||
All methods in these classes are registered automatically.
|
||||
"""
|
||||
for dir_name in directories_with_routes:
|
||||
dir_name = os.path.join(os.path.dirname(__file__), dir_name)
|
||||
for file_name in os.listdir(dir_name):
|
||||
if '.py' in file_name and '__' not in file_name:
|
||||
self.add_routes_for_module(file_name, dir_name)
|
||||
|
||||
def add_routes_for_module(self, file_name, dir_name):
|
||||
""" Auto routes for all classes implementing 'WebSocketRoute'
|
||||
in 'file_name' in 'dir_name'
|
||||
"""
|
||||
module_name = file_name.replace('.py', '')
|
||||
module_info = pyclbr.readmodule(module_name, [dir_name])
|
||||
|
||||
for class_name, cls_object in module_info.items():
|
||||
sys.path.append(dir_name)
|
||||
if 'WebSocketRoute' in cls_object.super:
|
||||
log.debug('Adding route for {}'.format(class_name))
|
||||
module = importlib.import_module(module_name)
|
||||
cls = getattr(module, class_name)
|
||||
WebSocketAsync.add_route(class_name, cls)
|
||||
sys.path.pop()
|
||||
|
||||
def call(self, func):
|
||||
log.debug("websocket.call {}".format(func))
|
||||
future = asyncio.run_coroutine_threadsafe(func,
|
||||
self.websocket_thread.loop)
|
||||
result = future.result()
|
||||
return result
|
||||
|
||||
def get_client(self):
|
||||
"""
|
||||
Return first connected client to WebSocket
|
||||
TODO implement selection by Route
|
||||
:return: <WebSocketAsync> client
|
||||
"""
|
||||
clients = WebSocketAsync.get_clients()
|
||||
client = None
|
||||
if len(clients) > 0:
|
||||
key = list(clients.keys())[0]
|
||||
client = clients.get(key)
|
||||
|
||||
return client
|
||||
|
||||
@staticmethod
|
||||
def get_instance():
|
||||
if WebSocketServer._instance is None:
|
||||
WebSocketServer()
|
||||
return WebSocketServer._instance
|
||||
|
||||
def tray_start(self):
|
||||
self.websocket_thread.start()
|
||||
|
||||
def tray_exit(self):
|
||||
self.stop()
|
||||
|
||||
def stop_websocket_server(self):
|
||||
|
||||
self.stop()
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
return self.websocket_thread.is_running
|
||||
|
||||
def stop(self):
|
||||
if not self.is_running:
|
||||
return
|
||||
try:
|
||||
log.debug("Stopping websocket server")
|
||||
self.websocket_thread.is_running = False
|
||||
self.websocket_thread.stop()
|
||||
except Exception:
|
||||
log.warning(
|
||||
"Error has happened during Killing websocket server",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
def thread_stopped(self):
|
||||
self._is_running = False
|
||||
|
||||
|
||||
class WebsocketServerThread(threading.Thread):
|
||||
""" Listener for websocket rpc requests.
|
||||
|
||||
It would be probably better to "attach" this to main thread (as for
|
||||
example Harmony needs to run something on main thread), but currently
|
||||
it creates separate thread and separate asyncio event loop
|
||||
"""
|
||||
def __init__(self, module, port):
|
||||
super(WebsocketServerThread, self).__init__()
|
||||
self.is_running = False
|
||||
self.port = port
|
||||
self.module = module
|
||||
self.loop = None
|
||||
self.runner = None
|
||||
self.site = None
|
||||
self.tasks = []
|
||||
|
||||
def run(self):
|
||||
self.is_running = True
|
||||
|
||||
try:
|
||||
log.info("Starting websocket server")
|
||||
self.loop = asyncio.new_event_loop() # create new loop for thread
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self.loop.run_until_complete(self.start_server())
|
||||
|
||||
log.debug(
|
||||
"Running Websocket server on URL:"
|
||||
" \"ws://localhost:{}\"".format(self.port)
|
||||
)
|
||||
|
||||
asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
|
||||
self.loop.run_forever()
|
||||
except Exception:
|
||||
log.warning(
|
||||
"Websocket Server service has failed", exc_info=True
|
||||
)
|
||||
finally:
|
||||
self.loop.close() # optional
|
||||
|
||||
self.is_running = False
|
||||
self.module.thread_stopped()
|
||||
log.info("Websocket server stopped")
|
||||
|
||||
async def start_server(self):
|
||||
""" Starts runner and TCPsite """
|
||||
self.runner = web.AppRunner(self.module.app)
|
||||
await self.runner.setup()
|
||||
self.site = web.TCPSite(self.runner, 'localhost', self.port)
|
||||
await self.site.start()
|
||||
|
||||
def stop(self):
|
||||
"""Sets is_running flag to false, 'check_shutdown' shuts server down"""
|
||||
self.is_running = False
|
||||
|
||||
async def check_shutdown(self):
|
||||
""" Future that is running and checks if server should be running
|
||||
periodically.
|
||||
"""
|
||||
while self.is_running:
|
||||
while self.tasks:
|
||||
task = self.tasks.pop(0)
|
||||
log.debug("waiting for task {}".format(task))
|
||||
await task
|
||||
log.debug("returned value {}".format(task.result))
|
||||
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
log.debug("Starting shutdown")
|
||||
await self.site.stop()
|
||||
log.debug("Site stopped")
|
||||
await self.runner.cleanup()
|
||||
log.debug("Runner stopped")
|
||||
tasks = [task for task in asyncio.all_tasks() if
|
||||
task is not asyncio.current_task()]
|
||||
list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
log.debug(f'Finished awaiting cancelled tasks, results: {results}...')
|
||||
await self.loop.shutdown_asyncgens()
|
||||
# to really make sure everything else has time to stop
|
||||
await asyncio.sleep(0.07)
|
||||
self.loop.stop()
|
||||
|
|
@ -97,6 +97,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -178,6 +179,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# Adding metadata
|
||||
|
|
@ -228,6 +230,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# Adding metadata
|
||||
|
|
@ -242,6 +245,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
self.log.warning((
|
||||
"Comment was not possible to set for AssetVersion"
|
||||
"\"{0}\". Can't set it's value to: \"{1}\""
|
||||
|
|
@ -258,6 +262,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
continue
|
||||
except Exception:
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
|
||||
self.log.warning((
|
||||
"Custom Attrubute \"{0}\""
|
||||
|
|
@ -272,6 +277,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# Component
|
||||
|
|
@ -316,6 +322,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# Reset members in memory
|
||||
|
|
@ -432,6 +439,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
if assetversion_entity not in used_asset_versions:
|
||||
|
|
|
|||
|
|
@ -88,8 +88,14 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
instance.data["frameEnd"] - instance.data["frameStart"]
|
||||
)
|
||||
|
||||
if not comp.get('fps'):
|
||||
comp['fps'] = instance.context.data['fps']
|
||||
fps = comp.get('fps')
|
||||
if fps is None:
|
||||
fps = instance.data.get(
|
||||
"fps", instance.context.data['fps']
|
||||
)
|
||||
|
||||
comp['fps'] = fps
|
||||
|
||||
location = self.get_ftrack_location(
|
||||
'ftrack.server', ft_session
|
||||
)
|
||||
|
|
|
|||
|
|
@ -145,4 +145,5 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import sys
|
|||
import six
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
from pprint import pformat
|
||||
|
||||
try:
|
||||
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_AUTO_SYNC
|
||||
|
|
@ -40,9 +41,14 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
self.context = context
|
||||
if "hierarchyContext" not in context.data:
|
||||
if "hierarchyContext" not in self.context.data:
|
||||
return
|
||||
|
||||
hierarchy_context = self.context.data["hierarchyContext"]
|
||||
|
||||
self.log.debug(
|
||||
f"__ hierarchy_context: `{pformat(hierarchy_context)}`")
|
||||
|
||||
self.session = self.context.data["ftrackSession"]
|
||||
project_name = self.context.data["projectEntity"]["name"]
|
||||
query = 'Project where full_name is "{}"'.format(project_name)
|
||||
|
|
@ -55,7 +61,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
|
||||
self.ft_project = None
|
||||
|
||||
input_data = context.data["hierarchyContext"]
|
||||
input_data = hierarchy_context
|
||||
|
||||
# disable termporarily ftrack project's autosyncing
|
||||
if auto_sync_state:
|
||||
|
|
@ -128,6 +134,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# TASKS
|
||||
|
|
@ -156,6 +163,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# Incoming links.
|
||||
|
|
@ -165,8 +173,31 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# Create notes.
|
||||
user = self.session.query(
|
||||
"User where username is \"{}\"".format(self.session.api_user)
|
||||
).first()
|
||||
if user:
|
||||
for comment in entity_data.get("comments", []):
|
||||
entity.create_note(comment, user)
|
||||
else:
|
||||
self.log.warning(
|
||||
"Was not able to query current User {}".format(
|
||||
self.session.api_user
|
||||
)
|
||||
)
|
||||
try:
|
||||
self.session.commit()
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# Import children.
|
||||
if 'childs' in entity_data:
|
||||
self.import_to_ftrack(
|
||||
entity_data['childs'], entity)
|
||||
|
|
@ -180,6 +211,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
# Create new links.
|
||||
|
|
@ -221,6 +253,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
return task
|
||||
|
|
@ -235,6 +268,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
return entity
|
||||
|
|
@ -249,7 +283,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
raise
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
def auto_sync_on(self, project):
|
||||
|
||||
|
|
@ -262,4 +297,5 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
raise
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ class CopyFile(api.Loader):
|
|||
def copy_file_to_clipboard(path):
|
||||
from avalon.vendor.Qt import QtCore, QtWidgets
|
||||
|
||||
app = QtWidgets.QApplication.instance()
|
||||
assert app, "Must have running QApplication instance"
|
||||
clipboard = QtWidgets.QApplication.clipboard()
|
||||
assert clipboard, "Must have running QApplication instance"
|
||||
|
||||
# Build mime data for clipboard
|
||||
data = QtCore.QMimeData()
|
||||
|
|
@ -29,5 +29,4 @@ class CopyFile(api.Loader):
|
|||
data.setUrls([url])
|
||||
|
||||
# Set to Clipboard
|
||||
clipboard = app.clipboard()
|
||||
clipboard.setMimeData(data)
|
||||
|
|
|
|||
|
|
@ -19,11 +19,10 @@ class CopyFilePath(api.Loader):
|
|||
|
||||
@staticmethod
|
||||
def copy_path_to_clipboard(path):
|
||||
from avalon.vendor.Qt import QtCore, QtWidgets
|
||||
from avalon.vendor.Qt import QtWidgets
|
||||
|
||||
app = QtWidgets.QApplication.instance()
|
||||
assert app, "Must have running QApplication instance"
|
||||
clipboard = QtWidgets.QApplication.clipboard()
|
||||
assert clipboard, "Must have running QApplication instance"
|
||||
|
||||
# Set to Clipboard
|
||||
clipboard = app.clipboard()
|
||||
clipboard.setText(os.path.normpath(path))
|
||||
|
|
|
|||
|
|
@ -25,7 +25,8 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
"shell",
|
||||
"nukestudio",
|
||||
"premiere",
|
||||
"standalonepublisher"
|
||||
"standalonepublisher",
|
||||
"harmony"
|
||||
]
|
||||
optional = True
|
||||
|
||||
|
|
@ -194,11 +195,14 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
if "delete" in new_repre["tags"]:
|
||||
new_repre["tags"].remove("delete")
|
||||
|
||||
# Update name and outputName to be able have multiple outputs
|
||||
# Join previous "outputName" with filename suffix
|
||||
new_name = "_".join([new_repre["outputName"], filename_suffix])
|
||||
new_repre["name"] = new_name
|
||||
new_repre["outputName"] = new_name
|
||||
if len(repre_burnin_defs.keys()) > 1:
|
||||
# Update name and outputName to be
|
||||
# able have multiple outputs in case of more burnin presets
|
||||
# Join previous "outputName" with filename suffix
|
||||
new_name = "_".join(
|
||||
[new_repre["outputName"], filename_suffix])
|
||||
new_repre["name"] = new_name
|
||||
new_repre["outputName"] = new_name
|
||||
|
||||
# Prepare paths and files for process.
|
||||
self.input_output_paths(new_repre, temp_data, filename_suffix)
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
families = ["clip", "shot"]
|
||||
|
||||
def process(self, context):
|
||||
# processing starts here
|
||||
if "hierarchyContext" not in context.data:
|
||||
self.log.info("skipping IntegrateHierarchyToAvalon")
|
||||
return
|
||||
|
|
@ -17,7 +18,29 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
if not io.Session:
|
||||
io.install()
|
||||
|
||||
input_data = context.data["hierarchyContext"]
|
||||
active_assets = []
|
||||
hierarchy_context = context.data["hierarchyContext"]
|
||||
hierarchy_assets = self._get_assets(hierarchy_context)
|
||||
|
||||
# filter only the active publishing insatnces
|
||||
for instance in context:
|
||||
if instance.data.get("publish") is False:
|
||||
continue
|
||||
|
||||
if not instance.data.get("asset"):
|
||||
continue
|
||||
|
||||
active_assets.append(instance.data["asset"])
|
||||
|
||||
# filter out only assets which are activated as isntances
|
||||
new_hierarchy_assets = {k: v for k, v in hierarchy_assets.items()
|
||||
if k in active_assets}
|
||||
|
||||
# modify the hierarchy context so there are only fitred assets
|
||||
self._set_assets(hierarchy_context, new_hierarchy_assets)
|
||||
|
||||
input_data = context.data["hierarchyContext"] = hierarchy_context
|
||||
|
||||
self.project = None
|
||||
self.import_to_avalon(input_data)
|
||||
|
||||
|
|
@ -83,7 +106,6 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
for task_name in new_tasks:
|
||||
if task_name not in cur_entity_data["tasks"]:
|
||||
cur_entity_data["tasks"].append(task_name)
|
||||
|
||||
cur_entity_data.update(data)
|
||||
data = cur_entity_data
|
||||
else:
|
||||
|
|
@ -150,3 +172,41 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
entity_id = io.insert_one(item).inserted_id
|
||||
|
||||
return io.find_one({"_id": entity_id})
|
||||
|
||||
def _get_assets(self, input_dict):
|
||||
""" Returns only asset dictionary.
|
||||
Usually the last part of deep dictionary which
|
||||
is not having any children
|
||||
"""
|
||||
for key in input_dict.keys():
|
||||
# check if child key is available
|
||||
if input_dict[key].get("childs"):
|
||||
# loop deeper
|
||||
return self._get_assets(input_dict[key]["childs"])
|
||||
else:
|
||||
# give the dictionary with assets
|
||||
return input_dict
|
||||
|
||||
def _set_assets(self, input_dict, new_assets=None):
|
||||
""" Modify the hierarchy context dictionary.
|
||||
It will replace the asset dictionary with only the filtred one.
|
||||
"""
|
||||
for key in input_dict.keys():
|
||||
# check if child key is available
|
||||
if input_dict[key].get("childs"):
|
||||
# return if this is just for testing purpose and no
|
||||
# new_assets property is avalable
|
||||
if not new_assets:
|
||||
return True
|
||||
|
||||
# test for deeper inner children availabelity
|
||||
if self._set_assets(input_dict[key]["childs"]):
|
||||
# if one level deeper is still children available
|
||||
# then process farther
|
||||
self._set_assets(input_dict[key]["childs"], new_assets)
|
||||
else:
|
||||
# or just assign the filtred asset ditionary
|
||||
input_dict[key]["childs"] = new_assets
|
||||
else:
|
||||
# test didnt find more childs in input dictionary
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -81,6 +81,11 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
jpeg_items.append("-i {}".format(full_input_path))
|
||||
# output arguments from presets
|
||||
jpeg_items.extend(ffmpeg_args.get("output") or [])
|
||||
|
||||
# If its a movie file, we just want one frame.
|
||||
if repre["ext"] == "mov":
|
||||
jpeg_items.append("-vframes 1")
|
||||
|
||||
# output file
|
||||
jpeg_items.append(full_output_path)
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ import copy
|
|||
import clique
|
||||
import errno
|
||||
import six
|
||||
import re
|
||||
import shutil
|
||||
|
||||
from pymongo import DeleteOne, InsertOne
|
||||
import pyblish.api
|
||||
|
|
@ -680,6 +682,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
instance.data.get('subsetGroup')}}
|
||||
)
|
||||
|
||||
# Update families on subset.
|
||||
families = [instance.data["family"]]
|
||||
families.extend(instance.data.get("families", []))
|
||||
io.update_many(
|
||||
{"type": "subset", "_id": io.ObjectId(subset["_id"])},
|
||||
{"$set": {"data.families": families}}
|
||||
)
|
||||
|
||||
return subset
|
||||
|
||||
def create_version(self, subset, version_number, data=None):
|
||||
|
|
@ -952,21 +962,37 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
if integrated_file_sizes:
|
||||
for file_url, _file_size in integrated_file_sizes.items():
|
||||
if not os.path.exists(file_url):
|
||||
self.log.debug(
|
||||
"File {} was not found.".format(file_url)
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
if mode == 'remove':
|
||||
self.log.debug("Removing file ...{}".format(file_url))
|
||||
self.log.debug("Removing file {}".format(file_url))
|
||||
os.remove(file_url)
|
||||
if mode == 'finalize':
|
||||
self.log.debug("Renaming file ...{}".format(file_url))
|
||||
import re
|
||||
os.rename(file_url,
|
||||
re.sub('\.{}$'.format(self.TMP_FILE_EXT),
|
||||
'',
|
||||
file_url)
|
||||
)
|
||||
new_name = re.sub(
|
||||
r'\.{}$'.format(self.TMP_FILE_EXT),
|
||||
'',
|
||||
file_url
|
||||
)
|
||||
|
||||
except FileNotFoundError:
|
||||
pass # file not there, nothing to delete
|
||||
if os.path.exists(new_name):
|
||||
self.log.debug(
|
||||
"Overwriting file {} to {}".format(
|
||||
file_url, new_name
|
||||
)
|
||||
)
|
||||
shutil.copy(file_url, new_name)
|
||||
else:
|
||||
self.log.debug(
|
||||
"Renaming file {} to {}".format(
|
||||
file_url, new_name
|
||||
)
|
||||
)
|
||||
os.rename(file_url, new_name)
|
||||
except OSError:
|
||||
self.log.error("Cannot {} file {}".format(mode, file_url),
|
||||
exc_info=True)
|
||||
|
|
|
|||
|
|
@ -428,7 +428,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"to render, don't know what to do "
|
||||
"with them.")
|
||||
col = rem[0]
|
||||
_, ext = os.path.splitext(col)
|
||||
ext = os.path.splitext(col)[1].lstrip(".")
|
||||
else:
|
||||
# but we really expect only one collection.
|
||||
# Nothing else make sense.
|
||||
|
|
@ -718,7 +718,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"resolutionWidth": data.get("resolutionWidth", 1920),
|
||||
"resolutionHeight": data.get("resolutionHeight", 1080),
|
||||
"multipartExr": data.get("multipartExr", False),
|
||||
"jobBatchName": data.get("jobBatchName", "")
|
||||
"jobBatchName": data.get("jobBatchName", ""),
|
||||
"review": data.get("review", True)
|
||||
}
|
||||
|
||||
if "prerender" in instance.data["families"]:
|
||||
|
|
|
|||
31
pype/plugins/global/publish/validate_intent.py
Normal file
31
pype/plugins/global/publish/validate_intent.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
|
||||
|
||||
class ValidateIntent(pyblish.api.ContextPlugin):
|
||||
"""Validate intent of the publish.
|
||||
|
||||
It is required to fill the intent of this publish. Chech the log
|
||||
for more details
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
|
||||
label = "Validate Intent"
|
||||
# TODO: this should be off by default and only activated viac config
|
||||
tasks = ["animation"]
|
||||
hosts = ["harmony"]
|
||||
if os.environ.get("AVALON_TASK") not in tasks:
|
||||
active = False
|
||||
|
||||
def process(self, context):
|
||||
msg = (
|
||||
"Please make sure that you select the intent of this publish."
|
||||
)
|
||||
|
||||
intent = context.data.get("intent")
|
||||
self.log.debug(intent)
|
||||
assert intent, msg
|
||||
|
||||
intent_value = intent.get("value")
|
||||
assert intent is not "", msg
|
||||
|
|
@ -10,7 +10,7 @@ class ValidateVersion(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.ValidatorOrder
|
||||
|
||||
label = "Validate Version"
|
||||
hosts = ["nuke", "maya", "blender"]
|
||||
hosts = ["nuke", "maya", "blender", "standalonepublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
version = instance.data.get("version")
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func
|
|||
class ImportAudioLoader(api.Loader):
|
||||
"""Import audio."""
|
||||
|
||||
families = ["shot"]
|
||||
families = ["shot", "audio"]
|
||||
representations = ["wav"]
|
||||
label = "Import Audio"
|
||||
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ class ImageSequenceLoader(api.Loader):
|
|||
"""Load images
|
||||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
families = ["shot", "render", "image", "plate"]
|
||||
families = ["shot", "render", "image", "plate", "reference"]
|
||||
representations = ["jpeg", "png", "jpg"]
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import subprocess
|
|||
|
||||
import pyblish.api
|
||||
from avalon import harmony
|
||||
import pype.lib
|
||||
|
||||
import clique
|
||||
|
||||
|
|
@ -43,6 +44,9 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
frame_start = result[4]
|
||||
frame_end = result[5]
|
||||
audio_path = result[6]
|
||||
if audio_path:
|
||||
instance.data["audio"] = [{"filename": audio_path}]
|
||||
instance.data["fps"] = frame_rate
|
||||
|
||||
# Set output path to temp folder.
|
||||
path = tempfile.mkdtemp()
|
||||
|
|
@ -87,17 +91,13 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
if len(list(col)) > 1:
|
||||
collection = col
|
||||
else:
|
||||
# assert len(collections) == 1, (
|
||||
# "There should only be one image sequence in {}. Found: {}".format(
|
||||
# path, len(collections)
|
||||
# )
|
||||
# )
|
||||
collection = collections[0]
|
||||
|
||||
# Generate thumbnail.
|
||||
thumbnail_path = os.path.join(path, "thumbnail.png")
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
args = [
|
||||
"ffmpeg", "-y",
|
||||
ffmpeg_path, "-y",
|
||||
"-i", os.path.join(path, list(collections[0])[0]),
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
|
|
@ -117,57 +117,17 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.debug(output.decode("utf-8"))
|
||||
|
||||
# Generate mov.
|
||||
mov_path = os.path.join(path, instance.data["name"] + ".mov")
|
||||
if os.path.isfile(audio_path):
|
||||
args = [
|
||||
"ffmpeg", "-y",
|
||||
"-i", audio_path,
|
||||
"-i",
|
||||
os.path.join(path, collection.head + "%04d" + collection.tail),
|
||||
mov_path
|
||||
]
|
||||
else:
|
||||
args = [
|
||||
"ffmpeg", "-y",
|
||||
"-i",
|
||||
os.path.join(path, collection.head + "%04d" + collection.tail),
|
||||
mov_path
|
||||
]
|
||||
|
||||
process = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdin=subprocess.PIPE
|
||||
)
|
||||
|
||||
output = process.communicate()[0]
|
||||
|
||||
if process.returncode != 0:
|
||||
raise ValueError(output.decode("utf-8"))
|
||||
|
||||
self.log.debug(output.decode("utf-8"))
|
||||
|
||||
# Generate representations.
|
||||
extension = collection.tail[1:]
|
||||
representation = {
|
||||
"name": extension,
|
||||
"ext": extension,
|
||||
"files": list(collection),
|
||||
"stagingDir": path
|
||||
}
|
||||
movie = {
|
||||
"name": "mov",
|
||||
"ext": "mov",
|
||||
"files": os.path.basename(mov_path),
|
||||
"stagingDir": path,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"fps": frame_rate,
|
||||
"preview": True,
|
||||
"tags": ["review", "ftrackreview"]
|
||||
"tags": ["review"],
|
||||
"fps": frame_rate
|
||||
}
|
||||
|
||||
thumbnail = {
|
||||
"name": "thumbnail",
|
||||
"ext": "png",
|
||||
|
|
@ -175,7 +135,7 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
"stagingDir": path,
|
||||
"tags": ["thumbnail"]
|
||||
}
|
||||
instance.data["representations"] = [representation, movie, thumbnail]
|
||||
instance.data["representations"] = [representation, thumbnail]
|
||||
|
||||
# Required for extract_review plugin (L222 onwards).
|
||||
instance.data["frameStart"] = frame_start
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ class CreateReview(avalon.maya.Creator):
|
|||
icon = "video-camera"
|
||||
defaults = ['Main']
|
||||
keepImages = False
|
||||
isolate = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateReview, self).__init__(*args, **kwargs)
|
||||
|
|
@ -22,6 +23,7 @@ class CreateReview(avalon.maya.Creator):
|
|||
for key, value in animation_data.items():
|
||||
data[key] = value
|
||||
|
||||
data["isolate"] = self.isolate
|
||||
data["keepImages"] = self.keepImages
|
||||
|
||||
self.data = data
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from maya import cmds, mel
|
||||
import pymel.core as pc
|
||||
|
||||
from avalon import api
|
||||
from avalon import api, io
|
||||
from avalon.maya.pipeline import containerise
|
||||
from avalon.maya import lib
|
||||
|
||||
|
|
@ -58,6 +58,13 @@ class AudioLoader(api.Loader):
|
|||
type="string"
|
||||
)
|
||||
|
||||
# Set frame range.
|
||||
version = io.find_one({"_id": representation["parent"]})
|
||||
subset = io.find_one({"_id": version["parent"]})
|
||||
asset = io.find_one({"_id": subset["parent"]})
|
||||
audio_node.sourceStart.set(1 - asset["data"]["frameStart"])
|
||||
audio_node.sourceEnd.set(asset["data"]["frameEnd"])
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import pymel.core as pc
|
||||
import maya.cmds as cmds
|
||||
|
||||
from avalon import api
|
||||
from avalon import api, io
|
||||
from avalon.maya.pipeline import containerise
|
||||
from avalon.maya import lib
|
||||
from Qt import QtWidgets
|
||||
|
|
@ -12,7 +12,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
|
||||
families = ["plate", "render"]
|
||||
label = "Create imagePlane on selected camera."
|
||||
representations = ["mov", "exr", "preview"]
|
||||
representations = ["mov", "exr", "preview", "png"]
|
||||
icon = "image"
|
||||
color = "orange"
|
||||
|
||||
|
|
@ -29,6 +29,8 @@ class ImagePlaneLoader(api.Loader):
|
|||
# Getting camera from selection.
|
||||
selection = pc.ls(selection=True)
|
||||
|
||||
camera = None
|
||||
|
||||
if len(selection) > 1:
|
||||
QtWidgets.QMessageBox.critical(
|
||||
None,
|
||||
|
|
@ -39,25 +41,29 @@ class ImagePlaneLoader(api.Loader):
|
|||
return
|
||||
|
||||
if len(selection) < 1:
|
||||
QtWidgets.QMessageBox.critical(
|
||||
result = QtWidgets.QMessageBox.critical(
|
||||
None,
|
||||
"Error!",
|
||||
"No camera selected.",
|
||||
QtWidgets.QMessageBox.Ok
|
||||
"No camera selected. Do you want to create a camera?",
|
||||
QtWidgets.QMessageBox.Ok,
|
||||
QtWidgets.QMessageBox.Cancel
|
||||
)
|
||||
return
|
||||
|
||||
relatives = pc.listRelatives(selection[0], shapes=True)
|
||||
if not pc.ls(relatives, type="camera"):
|
||||
QtWidgets.QMessageBox.critical(
|
||||
None,
|
||||
"Error!",
|
||||
"Selected node is not a camera.",
|
||||
QtWidgets.QMessageBox.Ok
|
||||
)
|
||||
return
|
||||
|
||||
camera = selection[0]
|
||||
if result == QtWidgets.QMessageBox.Ok:
|
||||
camera = pc.createNode("camera")
|
||||
else:
|
||||
return
|
||||
else:
|
||||
relatives = pc.listRelatives(selection[0], shapes=True)
|
||||
if pc.ls(relatives, type="camera"):
|
||||
camera = selection[0]
|
||||
else:
|
||||
QtWidgets.QMessageBox.critical(
|
||||
None,
|
||||
"Error!",
|
||||
"Selected node is not a camera.",
|
||||
QtWidgets.QMessageBox.Ok
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
camera.displayResolution.set(1)
|
||||
|
|
@ -81,6 +87,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
image_plane_shape.frameOffset.set(1 - start_frame)
|
||||
image_plane_shape.frameIn.set(start_frame)
|
||||
image_plane_shape.frameOut.set(end_frame)
|
||||
image_plane_shape.frameCache.set(end_frame)
|
||||
image_plane_shape.useFrameExtension.set(1)
|
||||
|
||||
movie_representations = ["mov", "preview"]
|
||||
|
|
@ -140,6 +147,17 @@ class ImagePlaneLoader(api.Loader):
|
|||
type="string"
|
||||
)
|
||||
|
||||
# Set frame range.
|
||||
version = io.find_one({"_id": representation["parent"]})
|
||||
subset = io.find_one({"_id": version["parent"]})
|
||||
asset = io.find_one({"_id": subset["parent"]})
|
||||
start_frame = asset["data"]["frameStart"]
|
||||
end_frame = asset["data"]["frameEnd"]
|
||||
image_plane_shape.frameOffset.set(1 - start_frame)
|
||||
image_plane_shape.frameIn.set(start_frame)
|
||||
image_plane_shape.frameOut.set(end_frame)
|
||||
image_plane_shape.frameCache.set(end_frame)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
|
|
|
|||
|
|
@ -64,6 +64,7 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
data['handles'] = instance.data.get('handles', None)
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
data["isolate"] = instance.data["isolate"]
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
self.log.debug('data {}'.format(instance.context[i].data))
|
||||
instance.context[i].data.update(data)
|
||||
|
|
|
|||
|
|
@ -76,6 +76,11 @@ class ExtractPlayblast(pype.api.Extractor):
|
|||
pm.currentTime(refreshFrameInt - 1, edit=True)
|
||||
pm.currentTime(refreshFrameInt, edit=True)
|
||||
|
||||
# Isolate view is requested by having objects in the set besides a
|
||||
# camera.
|
||||
if instance.data.get("isolate"):
|
||||
preset["isolate"] = instance.data["setMembers"]
|
||||
|
||||
with maintained_time():
|
||||
filename = preset.get("filename", "%TEMP%")
|
||||
|
||||
|
|
|
|||
|
|
@ -77,6 +77,11 @@ class ExtractThumbnail(pype.api.Extractor):
|
|||
pm.currentTime(refreshFrameInt - 1, edit=True)
|
||||
pm.currentTime(refreshFrameInt, edit=True)
|
||||
|
||||
# Isolate view is requested by having objects in the set besides a
|
||||
# camera.
|
||||
if instance.data.get("isolate"):
|
||||
preset["isolate"] = instance.data["setMembers"]
|
||||
|
||||
with maintained_time():
|
||||
filename = preset.get("filename", "%TEMP%")
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import re
|
|||
import hashlib
|
||||
from datetime import datetime
|
||||
import itertools
|
||||
from collections import OrderedDict
|
||||
|
||||
import clique
|
||||
import requests
|
||||
|
|
@ -67,7 +68,7 @@ payload_skeleton = {
|
|||
|
||||
def _format_tiles(
|
||||
filename, index, tiles_x, tiles_y,
|
||||
width, height, prefix, origin="blc"):
|
||||
width, height, prefix):
|
||||
"""Generate tile entries for Deadline tile job.
|
||||
|
||||
Returns two dictionaries - one that can be directly used in Deadline
|
||||
|
|
@ -113,12 +114,14 @@ def _format_tiles(
|
|||
"""
|
||||
tile = 0
|
||||
out = {"JobInfo": {}, "PluginInfo": {}}
|
||||
cfg = {}
|
||||
cfg = OrderedDict()
|
||||
w_space = width / tiles_x
|
||||
h_space = height / tiles_y
|
||||
|
||||
cfg["TilesCropped"] = "False"
|
||||
|
||||
for tile_x in range(1, tiles_x + 1):
|
||||
for tile_y in range(1, tiles_y + 1):
|
||||
for tile_y in reversed(range(1, tiles_y + 1)):
|
||||
tile_prefix = "_tile_{}x{}_{}x{}_".format(
|
||||
tile_x, tile_y,
|
||||
tiles_x,
|
||||
|
|
@ -143,14 +146,13 @@ def _format_tiles(
|
|||
|
||||
cfg["Tile{}".format(tile)] = new_filename
|
||||
cfg["Tile{}Tile".format(tile)] = new_filename
|
||||
cfg["Tile{}FileName".format(tile)] = new_filename
|
||||
cfg["Tile{}X".format(tile)] = (tile_x - 1) * w_space
|
||||
if origin == "blc":
|
||||
cfg["Tile{}Y".format(tile)] = (tile_y - 1) * h_space
|
||||
else:
|
||||
cfg["Tile{}Y".format(tile)] = int(height) - ((tile_y - 1) * h_space) # noqa: E501
|
||||
|
||||
cfg["Tile{}Width".format(tile)] = tile_x * w_space
|
||||
cfg["Tile{}Height".format(tile)] = tile_y * h_space
|
||||
cfg["Tile{}Y".format(tile)] = int(height) - (tile_y * h_space)
|
||||
|
||||
cfg["Tile{}Width".format(tile)] = w_space
|
||||
cfg["Tile{}Height".format(tile)] = h_space
|
||||
|
||||
tile += 1
|
||||
return out, cfg
|
||||
|
|
@ -538,7 +540,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"AuxFiles": [],
|
||||
"JobInfo": {
|
||||
"BatchName": payload["JobInfo"]["BatchName"],
|
||||
"Frames": 0,
|
||||
"Frames": 1,
|
||||
"Name": "{} - Tile Assembly Job".format(
|
||||
payload["JobInfo"]["Name"]),
|
||||
"OutputDirectory0":
|
||||
|
|
@ -590,7 +592,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
payload["JobInfo"]["Name"],
|
||||
frame,
|
||||
instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501
|
||||
)
|
||||
)
|
||||
self.log.info(
|
||||
"... preparing job {}".format(
|
||||
new_payload["JobInfo"]["Name"]))
|
||||
|
|
|
|||
233
pype/plugins/nuke/load/load_image.py
Normal file
233
pype/plugins/nuke/load/load_image.py
Normal file
|
|
@ -0,0 +1,233 @@
|
|||
import re
|
||||
import nuke
|
||||
|
||||
from avalon.vendor import qargparse
|
||||
from avalon import api, io
|
||||
|
||||
from pype.hosts.nuke import presets
|
||||
|
||||
|
||||
class LoadImage(api.Loader):
|
||||
"""Load still image into Nuke"""
|
||||
|
||||
families = [
|
||||
"render2d", "source", "plate",
|
||||
"render", "prerender", "review",
|
||||
"image"
|
||||
]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "psd"]
|
||||
|
||||
label = "Load Image"
|
||||
order = -10
|
||||
icon = "image"
|
||||
color = "white"
|
||||
|
||||
options = [
|
||||
qargparse.Integer(
|
||||
"frame_number",
|
||||
label="Frame Number",
|
||||
default=int(nuke.root()["first_frame"].getValue()),
|
||||
min=1,
|
||||
max=999999,
|
||||
help="What frame is reading from?"
|
||||
)
|
||||
]
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
from avalon.nuke import (
|
||||
containerise,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
self.log.info("__ options: `{}`".format(options))
|
||||
frame_number = options.get("frame_number", 1)
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
repr_id = context["representation"]["_id"]
|
||||
|
||||
self.log.info("version_data: {}\n".format(version_data))
|
||||
self.log.debug(
|
||||
"Representation id `{}` ".format(repr_id))
|
||||
|
||||
last = first = int(frame_number)
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
file = self.fname
|
||||
|
||||
if not file:
|
||||
repr_id = context["representation"]["_id"]
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
repr_cont = context["representation"]["context"]
|
||||
frame = repr_cont.get("frame")
|
||||
if frame:
|
||||
padding = len(frame)
|
||||
file = file.replace(
|
||||
frame,
|
||||
format(frame_number, "0{}".format(padding)))
|
||||
|
||||
read_name = "Read_{0}_{1}_{2}".format(
|
||||
repr_cont["asset"],
|
||||
repr_cont["subset"],
|
||||
repr_cont["representation"])
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
r = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name))
|
||||
r["file"].setValue(file)
|
||||
|
||||
# Set colorspace defined in version data
|
||||
colorspace = context["version"]["data"].get("colorspace")
|
||||
if colorspace:
|
||||
r["colorspace"].setValue(str(colorspace))
|
||||
|
||||
# load nuke presets for Read's colorspace
|
||||
read_clrs_presets = presets.get_colorspace_preset().get(
|
||||
"nuke", {}).get("read", {})
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
preset_clrsp = next((read_clrs_presets[k]
|
||||
for k in read_clrs_presets
|
||||
if bool(re.search(k, file))),
|
||||
None)
|
||||
if preset_clrsp is not None:
|
||||
r["colorspace"].setValue(str(preset_clrsp))
|
||||
|
||||
r["origfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["origlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = ["source", "colorspace", "author", "fps", "version"]
|
||||
|
||||
data_imprint = {
|
||||
"frameStart": first,
|
||||
"frameEnd": last
|
||||
}
|
||||
for k in add_keys:
|
||||
if k == 'version':
|
||||
data_imprint.update({k: context["version"]['name']})
|
||||
else:
|
||||
data_imprint.update(
|
||||
{k: context["version"]['data'].get(k, str(None))})
|
||||
|
||||
data_imprint.update({"objectName": read_name})
|
||||
|
||||
r["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
return containerise(r,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Nuke automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
update_container
|
||||
)
|
||||
|
||||
node = nuke.toNode(container["objectName"])
|
||||
frame_number = node["first"].value()
|
||||
|
||||
assert node.Class() == "Read", "Must be Read"
|
||||
|
||||
repr_cont = representation["context"]
|
||||
|
||||
file = api.get_representation_path(representation)
|
||||
|
||||
if not file:
|
||||
repr_id = representation["_id"]
|
||||
self.log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
frame = repr_cont.get("frame")
|
||||
if frame:
|
||||
padding = len(frame)
|
||||
file = file.replace(
|
||||
frame,
|
||||
format(frame_number, "0{}".format(padding)))
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
version_data = version.get("data", {})
|
||||
|
||||
last = first = int(frame_number)
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
node["origfirst"].setValue(first)
|
||||
node["first"].setValue(first)
|
||||
node["origlast"].setValue(last)
|
||||
node["last"].setValue(last)
|
||||
|
||||
updated_dict = {}
|
||||
updated_dict.update({
|
||||
"representation": str(representation["_id"]),
|
||||
"frameStart": str(first),
|
||||
"frameEnd": str(last),
|
||||
"version": str(version.get("name")),
|
||||
"colorspace": version_data.get("colorspace"),
|
||||
"source": version_data.get("source"),
|
||||
"fps": str(version_data.get("fps")),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir"),
|
||||
})
|
||||
|
||||
# change color of node
|
||||
if version.get("name") not in [max_version]:
|
||||
node["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
node,
|
||||
updated_dict
|
||||
)
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
|
||||
node = nuke.toNode(container['objectName'])
|
||||
assert node.Class() == "Read", "Must be Read"
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
@ -120,12 +120,12 @@ class LoadSequence(api.Loader):
|
|||
if "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#"*padding)
|
||||
file = file.replace(frame, "#" * padding)
|
||||
|
||||
read_name = "Read_{0}_{1}_{2}".format(
|
||||
repr_cont["asset"],
|
||||
repr_cont["subset"],
|
||||
repr_cont["representation"])
|
||||
repr_cont["asset"],
|
||||
repr_cont["subset"],
|
||||
repr_cont["representation"])
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
|
|
@ -250,7 +250,7 @@ class LoadSequence(api.Loader):
|
|||
if "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#"*padding)
|
||||
file = file.replace(frame, "#" * padding)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
|
|
@ -276,10 +276,10 @@ class LoadSequence(api.Loader):
|
|||
last = version_data.get("frameEnd")
|
||||
|
||||
if first is None:
|
||||
self.log.warning("Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(
|
||||
node['name'].value(), representation))
|
||||
self.log.warning(
|
||||
"Missing start frame for updated version"
|
||||
"assuming starts at frame 0 for: "
|
||||
"{} ({})".format(node['name'].value(), representation))
|
||||
first = 0
|
||||
|
||||
first -= self.handle_start
|
||||
|
|
|
|||
|
|
@ -15,10 +15,12 @@ class ExtractThumbnail(pype.api.Extractor):
|
|||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
label = "Extract Thumbnail"
|
||||
|
||||
families = ["review", "render.farm"]
|
||||
families = ["review"]
|
||||
hosts = ["nuke"]
|
||||
|
||||
def process(self, instance):
|
||||
if "render.farm" in instance.data["families"]:
|
||||
return
|
||||
|
||||
with anlib.maintained_selection():
|
||||
self.log.debug("instance: {}".format(instance))
|
||||
|
|
|
|||
|
|
@ -273,8 +273,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
instance.data["clipOut"] -
|
||||
instance.data["clipIn"])
|
||||
|
||||
|
||||
|
||||
self.log.debug(
|
||||
"__ instance.data[parents]: {}".format(
|
||||
instance.data["parents"]
|
||||
|
|
@ -319,6 +317,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
})
|
||||
|
||||
in_info['tasks'] = instance.data['tasks']
|
||||
in_info["comments"] = instance.data.get("comments", [])
|
||||
|
||||
parents = instance.data.get('parents', [])
|
||||
self.log.debug("__ in_info: {}".format(in_info))
|
||||
|
|
|
|||
|
|
@ -40,11 +40,12 @@ class CollectShots(api.InstancePlugin):
|
|||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
|
||||
data["label"] = (
|
||||
"{} - {} - tasks:{} - assetbuilds:{}".format(
|
||||
"{} - {} - tasks: {} - assetbuilds: {} - comments: {}".format(
|
||||
data["asset"],
|
||||
data["subset"],
|
||||
data["tasks"],
|
||||
[x["name"] for x in data.get("assetbuilds", [])]
|
||||
[x["name"] for x in data.get("assetbuilds", [])],
|
||||
len(data.get("comments", []))
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class CollectClipTagComments(api.InstancePlugin):
|
|||
for tag in instance.data["tags"]:
|
||||
if tag["name"].lower() == "comment":
|
||||
instance.data["comments"].append(
|
||||
tag.metadata().dict()["tag.note"]
|
||||
tag["metadata"]["tag.note"]
|
||||
)
|
||||
|
||||
# Find tags on the source clip.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from avalon import api, photoshop
|
||||
from avalon import api
|
||||
from avalon.vendor import Qt
|
||||
from avalon import photoshop
|
||||
|
||||
|
||||
class CreateImage(api.Creator):
|
||||
|
|
@ -13,11 +14,12 @@ class CreateImage(api.Creator):
|
|||
groups = []
|
||||
layers = []
|
||||
create_group = False
|
||||
group_constant = photoshop.get_com_objects().constants().psLayerSet
|
||||
|
||||
stub = photoshop.stub()
|
||||
if (self.options or {}).get("useSelection"):
|
||||
multiple_instances = False
|
||||
selection = photoshop.get_selected_layers()
|
||||
|
||||
selection = stub.get_selected_layers()
|
||||
self.log.info("selection {}".format(selection))
|
||||
if len(selection) > 1:
|
||||
# Ask user whether to create one image or image per selected
|
||||
# item.
|
||||
|
|
@ -40,19 +42,18 @@ class CreateImage(api.Creator):
|
|||
|
||||
if multiple_instances:
|
||||
for item in selection:
|
||||
if item.LayerType == group_constant:
|
||||
if item.group:
|
||||
groups.append(item)
|
||||
else:
|
||||
layers.append(item)
|
||||
else:
|
||||
group = photoshop.group_selected_layers()
|
||||
group.Name = self.name
|
||||
group = stub.group_selected_layers(self.name)
|
||||
groups.append(group)
|
||||
|
||||
elif len(selection) == 1:
|
||||
# One selected item. Use group if its a LayerSet (group), else
|
||||
# create a new group.
|
||||
if selection[0].LayerType == group_constant:
|
||||
if selection[0].group:
|
||||
groups.append(selection[0])
|
||||
else:
|
||||
layers.append(selection[0])
|
||||
|
|
@ -63,16 +64,14 @@ class CreateImage(api.Creator):
|
|||
create_group = True
|
||||
|
||||
if create_group:
|
||||
group = photoshop.app().ActiveDocument.LayerSets.Add()
|
||||
group.Name = self.name
|
||||
group = stub.create_group(self.name)
|
||||
groups.append(group)
|
||||
|
||||
for layer in layers:
|
||||
photoshop.select_layers([layer])
|
||||
group = photoshop.group_selected_layers()
|
||||
group.Name = layer.Name
|
||||
stub.select_layers([layer])
|
||||
group = stub.group_selected_layers(layer.name)
|
||||
groups.append(group)
|
||||
|
||||
for group in groups:
|
||||
self.data.update({"subset": "image" + group.Name})
|
||||
photoshop.imprint(group, self.data)
|
||||
self.data.update({"subset": "image" + group.name})
|
||||
stub.imprint(group, self.data)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
from avalon import api, photoshop
|
||||
|
||||
stub = photoshop.stub()
|
||||
|
||||
|
||||
class ImageLoader(api.Loader):
|
||||
"""Load images
|
||||
|
|
@ -12,7 +14,7 @@ class ImageLoader(api.Loader):
|
|||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
with photoshop.maintained_selection():
|
||||
layer = photoshop.import_smart_object(self.fname)
|
||||
layer = stub.import_smart_object(self.fname)
|
||||
|
||||
self[:] = [layer]
|
||||
|
||||
|
|
@ -28,11 +30,11 @@ class ImageLoader(api.Loader):
|
|||
layer = container.pop("layer")
|
||||
|
||||
with photoshop.maintained_selection():
|
||||
photoshop.replace_smart_object(
|
||||
stub.replace_smart_object(
|
||||
layer, api.get_representation_path(representation)
|
||||
)
|
||||
|
||||
photoshop.imprint(
|
||||
stub.imprint(
|
||||
layer, {"representation": str(representation["_id"])}
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import photoshop
|
||||
|
||||
|
||||
|
|
@ -13,5 +14,5 @@ class CollectCurrentFile(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
context.data["currentFile"] = os.path.normpath(
|
||||
photoshop.app().ActiveDocument.FullName
|
||||
photoshop.stub().get_active_document_full_name()
|
||||
).replace("\\", "/")
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
import pythoncom
|
||||
|
||||
from avalon import photoshop
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import photoshop
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Gather instances by LayerSet and file metadata
|
||||
|
|
@ -27,8 +27,11 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
# can be.
|
||||
pythoncom.CoInitialize()
|
||||
|
||||
for layer in photoshop.get_layers_in_document():
|
||||
layer_data = photoshop.read(layer)
|
||||
stub = photoshop.stub()
|
||||
layers = stub.get_layers()
|
||||
layers_meta = stub.get_layers_metadata()
|
||||
for layer in layers:
|
||||
layer_data = stub.read(layer, layers_meta)
|
||||
|
||||
# Skip layers without metadata.
|
||||
if layer_data is None:
|
||||
|
|
@ -38,18 +41,19 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
if "container" in layer_data["id"]:
|
||||
continue
|
||||
|
||||
child_layers = [*layer.Layers]
|
||||
if not child_layers:
|
||||
self.log.info("%s skipped, it was empty." % layer.Name)
|
||||
continue
|
||||
# child_layers = [*layer.Layers]
|
||||
# self.log.debug("child_layers {}".format(child_layers))
|
||||
# if not child_layers:
|
||||
# self.log.info("%s skipped, it was empty." % layer.Name)
|
||||
# continue
|
||||
|
||||
instance = context.create_instance(layer.Name)
|
||||
instance = context.create_instance(layer.name)
|
||||
instance.append(layer)
|
||||
instance.data.update(layer_data)
|
||||
instance.data["families"] = self.families_mapping[
|
||||
layer_data["family"]
|
||||
]
|
||||
instance.data["publish"] = layer.Visible
|
||||
instance.data["publish"] = layer.visible
|
||||
|
||||
# Produce diagnostic message for any graphical
|
||||
# user interface interested in visualising it.
|
||||
|
|
|
|||
|
|
@ -21,35 +21,37 @@ class ExtractImage(pype.api.Extractor):
|
|||
self.log.info("Outputting image to {}".format(staging_dir))
|
||||
|
||||
# Perform extraction
|
||||
stub = photoshop.stub()
|
||||
files = {}
|
||||
with photoshop.maintained_selection():
|
||||
self.log.info("Extracting %s" % str(list(instance)))
|
||||
with photoshop.maintained_visibility():
|
||||
# Hide all other layers.
|
||||
extract_ids = [
|
||||
x.id for x in photoshop.get_layers_in_layers([instance[0]])
|
||||
]
|
||||
for layer in photoshop.get_layers_in_document():
|
||||
if layer.id not in extract_ids:
|
||||
layer.Visible = False
|
||||
extract_ids = set([ll.id for ll in stub.
|
||||
get_layers_in_layers([instance[0]])])
|
||||
|
||||
save_options = {}
|
||||
for layer in stub.get_layers():
|
||||
# limit unnecessary calls to client
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
stub.set_visible(layer.id, False)
|
||||
if not layer.visible and layer.id in extract_ids:
|
||||
stub.set_visible(layer.id, True)
|
||||
|
||||
save_options = []
|
||||
if "png" in self.formats:
|
||||
save_options["png"] = photoshop.com_objects.PNGSaveOptions()
|
||||
save_options.append('png')
|
||||
if "jpg" in self.formats:
|
||||
save_options["jpg"] = photoshop.com_objects.JPEGSaveOptions()
|
||||
save_options.append('jpg')
|
||||
|
||||
file_basename = os.path.splitext(
|
||||
photoshop.app().ActiveDocument.Name
|
||||
stub.get_active_document_name()
|
||||
)[0]
|
||||
for extension, save_option in save_options.items():
|
||||
for extension in save_options:
|
||||
_filename = "{}.{}".format(file_basename, extension)
|
||||
files[extension] = _filename
|
||||
|
||||
full_filename = os.path.join(staging_dir, _filename)
|
||||
photoshop.app().ActiveDocument.SaveAs(
|
||||
full_filename, save_option, True
|
||||
)
|
||||
stub.saveAs(full_filename, extension, True)
|
||||
|
||||
representations = []
|
||||
for extension, filename in files.items():
|
||||
|
|
|
|||
|
|
@ -13,10 +13,11 @@ class ExtractReview(pype.api.Extractor):
|
|||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Outputting image to {}".format(staging_dir))
|
||||
|
||||
stub = photoshop.stub()
|
||||
|
||||
layers = []
|
||||
for image_instance in instance.context:
|
||||
if image_instance.data["family"] != "image":
|
||||
|
|
@ -25,25 +26,22 @@ class ExtractReview(pype.api.Extractor):
|
|||
|
||||
# Perform extraction
|
||||
output_image = "{}.jpg".format(
|
||||
os.path.splitext(photoshop.app().ActiveDocument.Name)[0]
|
||||
os.path.splitext(stub.get_active_document_name())[0]
|
||||
)
|
||||
output_image_path = os.path.join(staging_dir, output_image)
|
||||
with photoshop.maintained_visibility():
|
||||
# Hide all other layers.
|
||||
extract_ids = [
|
||||
x.id for x in photoshop.get_layers_in_layers(layers)
|
||||
]
|
||||
for layer in photoshop.get_layers_in_document():
|
||||
if layer.id in extract_ids:
|
||||
layer.Visible = True
|
||||
else:
|
||||
layer.Visible = False
|
||||
extract_ids = set([ll.id for ll in stub.
|
||||
get_layers_in_layers(layers)])
|
||||
self.log.info("extract_ids {}".format(extract_ids))
|
||||
for layer in stub.get_layers():
|
||||
# limit unnecessary calls to client
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
stub.set_visible(layer.id, False)
|
||||
if not layer.visible and layer.id in extract_ids:
|
||||
stub.set_visible(layer.id, True)
|
||||
|
||||
photoshop.app().ActiveDocument.SaveAs(
|
||||
output_image_path,
|
||||
photoshop.com_objects.JPEGSaveOptions(),
|
||||
True
|
||||
)
|
||||
stub.saveAs(output_image_path, 'jpg', True)
|
||||
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
|
|
@ -66,8 +64,6 @@ class ExtractReview(pype.api.Extractor):
|
|||
]
|
||||
output = pype.lib._subprocess(args)
|
||||
|
||||
self.log.debug(output)
|
||||
|
||||
instance.data["representations"].append({
|
||||
"name": "thumbnail",
|
||||
"ext": "jpg",
|
||||
|
|
@ -75,7 +71,6 @@ class ExtractReview(pype.api.Extractor):
|
|||
"stagingDir": staging_dir,
|
||||
"tags": ["thumbnail"]
|
||||
})
|
||||
|
||||
# Generate mov.
|
||||
mov_path = os.path.join(staging_dir, "review.mov")
|
||||
args = [
|
||||
|
|
@ -86,9 +81,7 @@ class ExtractReview(pype.api.Extractor):
|
|||
mov_path
|
||||
]
|
||||
output = pype.lib._subprocess(args)
|
||||
|
||||
self.log.debug(output)
|
||||
|
||||
instance.data["representations"].append({
|
||||
"name": "mov",
|
||||
"ext": "mov",
|
||||
|
|
|
|||
|
|
@ -11,4 +11,4 @@ class ExtractSaveScene(pype.api.Extractor):
|
|||
families = ["workfile"]
|
||||
|
||||
def process(self, instance):
|
||||
photoshop.app().ActiveDocument.Save()
|
||||
photoshop.stub().save()
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import pyblish.api
|
||||
from pype.action import get_errored_plugins_from_data
|
||||
from pype.lib import version_up
|
||||
|
||||
from avalon import photoshop
|
||||
|
||||
|
||||
|
|
@ -24,6 +25,6 @@ class IncrementWorkfile(pyblish.api.InstancePlugin):
|
|||
)
|
||||
|
||||
scene_path = version_up(instance.context.data["currentFile"])
|
||||
photoshop.app().ActiveDocument.SaveAs(scene_path)
|
||||
photoshop.stub().saveAs(scene_path, 'psd', True)
|
||||
|
||||
self.log.info("Incremented workfile to: {}".format(scene_path))
|
||||
|
|
|
|||
|
|
@ -23,11 +23,12 @@ class ValidateInstanceAssetRepair(pyblish.api.Action):
|
|||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
stub = photoshop.stub()
|
||||
for instance in instances:
|
||||
data = photoshop.read(instance[0])
|
||||
data = stub.read(instance[0])
|
||||
|
||||
data["asset"] = os.environ["AVALON_ASSET"]
|
||||
photoshop.imprint(instance[0], data)
|
||||
stub.imprint(instance[0], data)
|
||||
|
||||
|
||||
class ValidateInstanceAsset(pyblish.api.InstancePlugin):
|
||||
|
|
|
|||
|
|
@ -21,13 +21,14 @@ class ValidateNamingRepair(pyblish.api.Action):
|
|||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
stub = photoshop.stub()
|
||||
for instance in instances:
|
||||
self.log.info("validate_naming instance {}".format(instance))
|
||||
name = instance.data["name"].replace(" ", "_")
|
||||
instance[0].Name = name
|
||||
data = photoshop.read(instance[0])
|
||||
data = stub.read(instance[0])
|
||||
data["subset"] = "image" + name
|
||||
photoshop.imprint(instance[0], data)
|
||||
stub.imprint(instance[0], data)
|
||||
|
||||
return True
|
||||
|
||||
|
|
|
|||
79
pype/plugins/resolve/create/create_shot_clip.py
Normal file
79
pype/plugins/resolve/create/create_shot_clip.py
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
from pprint import pformat
|
||||
from pype.hosts import resolve
|
||||
from pype.hosts.resolve import lib
|
||||
|
||||
|
||||
class CreateShotClip(resolve.Creator):
|
||||
"""Publishable clip"""
|
||||
|
||||
label = "Shot"
|
||||
family = "clip"
|
||||
icon = "film"
|
||||
defaults = ["Main"]
|
||||
|
||||
gui_name = "Pype sequencial rename with hirerarchy"
|
||||
gui_info = "Define sequencial rename and fill hierarchy data."
|
||||
gui_inputs = {
|
||||
"clipName": "{episode}{sequence}{shot}",
|
||||
"hierarchy": "{folder}/{sequence}/{shot}",
|
||||
"countFrom": 10,
|
||||
"steps": 10,
|
||||
"hierarchyData": {
|
||||
"folder": "shots",
|
||||
"shot": "sh####",
|
||||
"track": "{track}",
|
||||
"sequence": "sc010",
|
||||
"episode": "ep01"
|
||||
}
|
||||
}
|
||||
presets = None
|
||||
|
||||
def process(self):
|
||||
# solve gui inputs overwrites from presets
|
||||
# overwrite gui inputs from presets
|
||||
for k, v in self.gui_inputs.items():
|
||||
if isinstance(v, dict):
|
||||
# nested dictionary (only one level allowed)
|
||||
for _k, _v in v.items():
|
||||
if self.presets.get(_k):
|
||||
self.gui_inputs[k][_k] = self.presets[_k]
|
||||
if self.presets.get(k):
|
||||
self.gui_inputs[k] = self.presets[k]
|
||||
|
||||
# open widget for plugins inputs
|
||||
widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs)
|
||||
widget.exec_()
|
||||
|
||||
print(f"__ selected_clips: {self.selected}")
|
||||
if len(self.selected) < 1:
|
||||
return
|
||||
|
||||
if not widget.result:
|
||||
print("Operation aborted")
|
||||
return
|
||||
|
||||
# sequence attrs
|
||||
sq_frame_start = self.sequence.GetStartFrame()
|
||||
sq_markers = self.sequence.GetMarkers()
|
||||
print(f"__ sq_frame_start: {pformat(sq_frame_start)}")
|
||||
print(f"__ seq_markers: {pformat(sq_markers)}")
|
||||
|
||||
# create media bin for compound clips (trackItems)
|
||||
mp_folder = resolve.create_current_sequence_media_bin(self.sequence)
|
||||
print(f"_ mp_folder: {mp_folder.GetName()}")
|
||||
|
||||
lib.rename_add = 0
|
||||
for i, t_data in enumerate(self.selected):
|
||||
lib.rename_index = i
|
||||
|
||||
# clear color after it is done
|
||||
t_data["clip"]["item"].ClearClipColor()
|
||||
|
||||
# convert track item to timeline media pool item
|
||||
resolve.create_compound_clip(
|
||||
t_data,
|
||||
mp_folder,
|
||||
rename=True,
|
||||
**dict(
|
||||
{"presets": widget.result})
|
||||
)
|
||||
162
pype/plugins/resolve/publish/collect_clips.py
Normal file
162
pype/plugins/resolve/publish/collect_clips.py
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
import os
|
||||
from pyblish import api
|
||||
from pype.hosts import resolve
|
||||
import json
|
||||
|
||||
|
||||
class CollectClips(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.CollectorOrder + 0.01
|
||||
label = "Collect Clips"
|
||||
hosts = ["resolve"]
|
||||
|
||||
def process(self, context):
|
||||
# create asset_names conversion table
|
||||
if not context.data.get("assetsShared"):
|
||||
self.log.debug("Created `assetsShared` in context")
|
||||
context.data["assetsShared"] = dict()
|
||||
|
||||
projectdata = context.data["projectEntity"]["data"]
|
||||
selection = resolve.get_current_track_items(
|
||||
filter=True, selecting_color="Pink")
|
||||
|
||||
for clip_data in selection:
|
||||
data = dict()
|
||||
|
||||
# get basic objects form data
|
||||
project = clip_data["project"]
|
||||
sequence = clip_data["sequence"]
|
||||
clip = clip_data["clip"]
|
||||
|
||||
# sequence attrs
|
||||
sq_frame_start = sequence.GetStartFrame()
|
||||
self.log.debug(f"sq_frame_start: {sq_frame_start}")
|
||||
|
||||
sq_markers = sequence.GetMarkers()
|
||||
|
||||
# get details of objects
|
||||
clip_item = clip["item"]
|
||||
track = clip_data["track"]
|
||||
|
||||
mp = project.GetMediaPool()
|
||||
|
||||
# get clip attributes
|
||||
clip_metadata = resolve.get_pype_clip_metadata(clip_item)
|
||||
clip_metadata = json.loads(clip_metadata)
|
||||
self.log.debug(f"clip_metadata: {clip_metadata}")
|
||||
|
||||
compound_source_prop = clip_metadata["sourceProperties"]
|
||||
self.log.debug(f"compound_source_prop: {compound_source_prop}")
|
||||
|
||||
asset_name = clip_item.GetName()
|
||||
mp_item = clip_item.GetMediaPoolItem()
|
||||
mp_prop = mp_item.GetClipProperty()
|
||||
source_first = int(compound_source_prop["Start"])
|
||||
source_last = int(compound_source_prop["End"])
|
||||
source_duration = compound_source_prop["Frames"]
|
||||
fps = float(mp_prop["FPS"])
|
||||
self.log.debug(f"source_first: {source_first}")
|
||||
self.log.debug(f"source_last: {source_last}")
|
||||
self.log.debug(f"source_duration: {source_duration}")
|
||||
self.log.debug(f"fps: {fps}")
|
||||
|
||||
source_path = os.path.normpath(
|
||||
compound_source_prop["File Path"])
|
||||
source_name = compound_source_prop["File Name"]
|
||||
source_id = clip_metadata["sourceId"]
|
||||
self.log.debug(f"source_path: {source_path}")
|
||||
self.log.debug(f"source_name: {source_name}")
|
||||
self.log.debug(f"source_id: {source_id}")
|
||||
|
||||
clip_left_offset = int(clip_item.GetLeftOffset())
|
||||
clip_right_offset = int(clip_item.GetRightOffset())
|
||||
self.log.debug(f"clip_left_offset: {clip_left_offset}")
|
||||
self.log.debug(f"clip_right_offset: {clip_right_offset}")
|
||||
|
||||
# source in/out
|
||||
source_in = int(source_first + clip_left_offset)
|
||||
source_out = int(source_first + clip_right_offset)
|
||||
self.log.debug(f"source_in: {source_in}")
|
||||
self.log.debug(f"source_out: {source_out}")
|
||||
|
||||
clip_in = int(clip_item.GetStart() - sq_frame_start)
|
||||
clip_out = int(clip_item.GetEnd() - sq_frame_start)
|
||||
clip_duration = int(clip_item.GetDuration())
|
||||
self.log.debug(f"clip_in: {clip_in}")
|
||||
self.log.debug(f"clip_out: {clip_out}")
|
||||
self.log.debug(f"clip_duration: {clip_duration}")
|
||||
|
||||
is_sequence = False
|
||||
|
||||
self.log.debug(
|
||||
"__ assets_shared: {}".format(
|
||||
context.data["assetsShared"]))
|
||||
|
||||
# Check for clips with the same range
|
||||
# this is for testing if any vertically neighbouring
|
||||
# clips has been already processed
|
||||
clip_matching_with_range = next(
|
||||
(k for k, v in context.data["assetsShared"].items()
|
||||
if (v.get("_clipIn", 0) == clip_in)
|
||||
and (v.get("_clipOut", 0) == clip_out)
|
||||
), False)
|
||||
|
||||
# check if clip name is the same in matched
|
||||
# vertically neighbouring clip
|
||||
# if it is then it is correct and resent variable to False
|
||||
# not to be rised wrong name exception
|
||||
if asset_name in str(clip_matching_with_range):
|
||||
clip_matching_with_range = False
|
||||
|
||||
# rise wrong name exception if found one
|
||||
assert (not clip_matching_with_range), (
|
||||
"matching clip: {asset}"
|
||||
" timeline range ({clip_in}:{clip_out})"
|
||||
" conflicting with {clip_matching_with_range}"
|
||||
" >> rename any of clips to be the same as the other <<"
|
||||
).format(
|
||||
**locals())
|
||||
|
||||
if ("[" in source_name) and ("]" in source_name):
|
||||
is_sequence = True
|
||||
|
||||
data.update({
|
||||
"name": "_".join([
|
||||
track["name"], asset_name, source_name]),
|
||||
"item": clip_item,
|
||||
"source": mp_item,
|
||||
# "timecodeStart": str(source.timecodeStart()),
|
||||
"timelineStart": sq_frame_start,
|
||||
"sourcePath": source_path,
|
||||
"sourceFileHead": source_name,
|
||||
"isSequence": is_sequence,
|
||||
"track": track["name"],
|
||||
"trackIndex": track["index"],
|
||||
"sourceFirst": source_first,
|
||||
|
||||
"sourceIn": source_in,
|
||||
"sourceOut": source_out,
|
||||
"mediaDuration": source_duration,
|
||||
"clipIn": clip_in,
|
||||
"clipOut": clip_out,
|
||||
"clipDuration": clip_duration,
|
||||
"asset": asset_name,
|
||||
"subset": "plateMain",
|
||||
"family": "clip",
|
||||
"families": [],
|
||||
"handleStart": projectdata.get("handleStart", 0),
|
||||
"handleEnd": projectdata.get("handleEnd", 0)})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
self.log.info("Created instance: {}".format(instance))
|
||||
self.log.info("Created instance.data: {}".format(instance.data))
|
||||
|
||||
context.data["assetsShared"][asset_name] = {
|
||||
"_clipIn": clip_in,
|
||||
"_clipOut": clip_out
|
||||
}
|
||||
self.log.info(
|
||||
"context.data[\"assetsShared\"]: {}".format(
|
||||
context.data["assetsShared"]))
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
import pyblish.api
|
||||
from pype.hosts.resolve.utils import get_resolve_module
|
||||
|
||||
|
||||
class CollectProject(pyblish.api.ContextPlugin):
|
||||
"""Collect Project object"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Project"
|
||||
hosts = ["resolve"]
|
||||
|
||||
def process(self, context):
|
||||
resolve = get_resolve_module()
|
||||
PM = resolve.GetProjectManager()
|
||||
P = PM.GetCurrentProject()
|
||||
|
||||
self.log.info(P.GetName())
|
||||
29
pype/plugins/resolve/publish/collect_project.py
Normal file
29
pype/plugins/resolve/publish/collect_project.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from pype.hosts.resolve.utils import get_resolve_module
|
||||
|
||||
|
||||
class CollectProject(pyblish.api.ContextPlugin):
|
||||
"""Collect Project object"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Project"
|
||||
hosts = ["resolve"]
|
||||
|
||||
def process(self, context):
|
||||
exported_projet_ext = ".drp"
|
||||
current_dir = os.getenv("AVALON_WORKDIR")
|
||||
resolve = get_resolve_module()
|
||||
PM = resolve.GetProjectManager()
|
||||
P = PM.GetCurrentProject()
|
||||
name = P.GetName()
|
||||
|
||||
fname = name + exported_projet_ext
|
||||
current_file = os.path.join(current_dir, fname)
|
||||
normalised = os.path.normpath(current_file)
|
||||
|
||||
context.data["project"] = P
|
||||
context.data["currentFile"] = normalised
|
||||
|
||||
self.log.info(name)
|
||||
self.log.debug(normalised)
|
||||
|
|
@ -123,7 +123,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
|
|||
"label": subset,
|
||||
"name": subset,
|
||||
"family": in_data["family"],
|
||||
"version": in_data.get("version", 1),
|
||||
# "version": in_data.get("version", 1),
|
||||
"frameStart": in_data.get("representations", [None])[0].get(
|
||||
"frameStart", None
|
||||
),
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class CollectEditorial(pyblish.api.InstancePlugin):
|
|||
actions = []
|
||||
|
||||
# presets
|
||||
extensions = [".mov"]
|
||||
extensions = [".mov", ".mp4"]
|
||||
|
||||
def process(self, instance):
|
||||
# remove context test attribute
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
label = "Collect Psd Instances"
|
||||
order = pyblish.api.CollectorOrder + 0.492
|
||||
order = pyblish.api.CollectorOrder + 0.489
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["background_batch"]
|
||||
|
||||
|
|
@ -34,8 +34,6 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
|
|||
context = instance.context
|
||||
asset_data = instance.data["assetEntity"]
|
||||
asset_name = instance.data["asset"]
|
||||
anatomy_data = instance.data["anatomyData"]
|
||||
|
||||
for subset_name, subset_data in self.subsets.items():
|
||||
instance_name = f"{asset_name}_{subset_name}"
|
||||
task = subset_data.get("task", "background")
|
||||
|
|
@ -55,16 +53,8 @@ class CollectPsdInstances(pyblish.api.InstancePlugin):
|
|||
|
||||
new_instance.data["label"] = f"{instance_name}"
|
||||
new_instance.data["subset"] = subset_name
|
||||
new_instance.data["task"] = task
|
||||
|
||||
# fix anatomy data
|
||||
anatomy_data_new = copy.deepcopy(anatomy_data)
|
||||
# updating hierarchy data
|
||||
anatomy_data_new.update({
|
||||
"asset": asset_data["name"],
|
||||
"task": task,
|
||||
"subset": subset_name
|
||||
})
|
||||
new_instance.data["anatomyData"] = anatomy_data_new
|
||||
|
||||
if subset_name in self.unchecked_by_default:
|
||||
new_instance.data["publish"] = False
|
||||
|
|
|
|||
Binary file not shown.
|
Before Width: | Height: | Size: 41 KiB After Width: | Height: | Size: 120 KiB |
|
|
@ -526,7 +526,7 @@ def burnins_from_data(
|
|||
|
||||
bit_rate = burnin._streams[0].get("bit_rate")
|
||||
if bit_rate:
|
||||
ffmpeg_args.append("--b:v {}".format(bit_rate))
|
||||
ffmpeg_args.append("-b:v {}".format(bit_rate))
|
||||
|
||||
pix_fmt = burnin._streams[0].get("pix_fmt")
|
||||
if pix_fmt:
|
||||
|
|
|
|||
9
pype/settings/__init__.py
Normal file
9
pype/settings/__init__.py
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
from .lib import (
|
||||
system_settings,
|
||||
project_settings
|
||||
)
|
||||
|
||||
__all__ = (
|
||||
"system_settings",
|
||||
"project_settings"
|
||||
)
|
||||
42
pype/settings/defaults/project_anatomy/colorspace.json
Normal file
42
pype/settings/defaults/project_anatomy/colorspace.json
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
{
|
||||
"nuke": {
|
||||
"root": {
|
||||
"colorManagement": "Nuke",
|
||||
"OCIO_config": "nuke-default",
|
||||
"defaultViewerLUT": "Nuke Root LUTs",
|
||||
"monitorLut": "sRGB",
|
||||
"int8Lut": "sRGB",
|
||||
"int16Lut": "sRGB",
|
||||
"logLut": "Cineon",
|
||||
"floatLut": "linear"
|
||||
},
|
||||
"viewer": {
|
||||
"viewerProcess": "sRGB"
|
||||
},
|
||||
"write": {
|
||||
"render": {
|
||||
"colorspace": "linear"
|
||||
},
|
||||
"prerender": {
|
||||
"colorspace": "linear"
|
||||
},
|
||||
"still": {
|
||||
"colorspace": "sRGB"
|
||||
}
|
||||
},
|
||||
"read": {
|
||||
"[^-a-zA-Z0-9]beauty[^-a-zA-Z0-9]": "linear",
|
||||
"[^-a-zA-Z0-9](P|N|Z|crypto)[^-a-zA-Z0-9]": "linear",
|
||||
"[^-a-zA-Z0-9](plateRef)[^-a-zA-Z0-9]": "sRGB"
|
||||
}
|
||||
},
|
||||
"maya": {
|
||||
|
||||
},
|
||||
"houdini": {
|
||||
|
||||
},
|
||||
"resolve": {
|
||||
|
||||
}
|
||||
}
|
||||
55
pype/settings/defaults/project_anatomy/dataflow.json
Normal file
55
pype/settings/defaults/project_anatomy/dataflow.json
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
{
|
||||
"nuke": {
|
||||
"nodes": {
|
||||
"connected": true,
|
||||
"modifymetadata": {
|
||||
"_id": "connect_metadata",
|
||||
"_previous": "ENDING",
|
||||
"metadata.set.pype_studio_name": "{PYPE_STUDIO_NAME}",
|
||||
"metadata.set.avalon_project_name": "{AVALON_PROJECT}",
|
||||
"metadata.set.avalon_project_code": "{PYPE_STUDIO_CODE}",
|
||||
"metadata.set.avalon_asset_name": "{AVALON_ASSET}"
|
||||
},
|
||||
"crop": {
|
||||
"_id": "connect_crop",
|
||||
"_previous": "connect_metadata",
|
||||
"box": [
|
||||
"{metadata.crop.x}",
|
||||
"{metadata.crop.y}",
|
||||
"{metadata.crop.right}",
|
||||
"{metadata.crop.top}"
|
||||
]
|
||||
},
|
||||
"write": {
|
||||
"render": {
|
||||
"_id": "output_write",
|
||||
"_previous": "connect_crop",
|
||||
"file_type": "exr",
|
||||
"datatype": "16 bit half",
|
||||
"compression": "Zip (1 scanline)",
|
||||
"autocrop": true,
|
||||
"tile_color": "0xff0000ff",
|
||||
"channels": "rgb"
|
||||
},
|
||||
"prerender": {
|
||||
"_id": "output_write",
|
||||
"_previous": "connect_crop",
|
||||
"file_type": "exr",
|
||||
"datatype": "16 bit half",
|
||||
"compression": "Zip (1 scanline)",
|
||||
"autocrop": false,
|
||||
"tile_color": "0xc9892aff",
|
||||
"channels": "rgba"
|
||||
},
|
||||
"still": {
|
||||
"_previous": "connect_crop",
|
||||
"channels": "rgba",
|
||||
"file_type": "tiff",
|
||||
"datatype": "16 bit",
|
||||
"compression": "LZW",
|
||||
"tile_color": "0x4145afff"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
5
pype/settings/defaults/project_anatomy/roots.json
Normal file
5
pype/settings/defaults/project_anatomy/roots.json
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"windows": "C:/projects",
|
||||
"linux": "/mnt/share/projects",
|
||||
"darwin": "/Volumes/path"
|
||||
}
|
||||
30
pype/settings/defaults/project_anatomy/templates.json
Normal file
30
pype/settings/defaults/project_anatomy/templates.json
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"version_padding": 3,
|
||||
"version": "v{version:0>{@version_padding}}",
|
||||
"frame_padding": 4,
|
||||
"frame": "{frame:0>{@frame_padding}}",
|
||||
"work": {
|
||||
"folder": "{root}/{project[name]}/{hierarchy}/{asset}/work/{task}",
|
||||
"file": "{project[code]}_{asset}_{task}_{@version}<_{comment}>.{ext}",
|
||||
"path": "{@folder}/{@file}"
|
||||
},
|
||||
"render": {
|
||||
"folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/render/{subset}/{@version}",
|
||||
"file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}>.{representation}",
|
||||
"path": "{@folder}/{@file}"
|
||||
},
|
||||
"texture": {
|
||||
"path": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}"
|
||||
},
|
||||
"publish": {
|
||||
"folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}",
|
||||
"file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}>.{representation}",
|
||||
"path": "{@folder}/{@file}",
|
||||
"thumbnail": "{thumbnail_root}/{project[name]}/{_id}_{thumbnail_type}{ext}"
|
||||
},
|
||||
"master": {
|
||||
"folder": "{root}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/master",
|
||||
"file": "{project[code]}_{asset}_{subset}_master<_{output}><.{frame}>.{representation}",
|
||||
"path": "{@folder}/{@file}"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"sync_to_avalon": {
|
||||
"statuses_name_change": ["not ready", "ready"]
|
||||
},
|
||||
|
||||
"status_update": {
|
||||
"_ignore_": ["in progress", "ommited", "on hold"],
|
||||
"Ready": ["not ready"],
|
||||
"In Progress" : ["_any_"]
|
||||
},
|
||||
"status_version_to_task": {
|
||||
"__description__": "Status `from` (key) must be lowered!",
|
||||
"in progress": "in progress",
|
||||
"approved": "approved"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,165 @@
|
|||
[{
|
||||
"label": "FPS",
|
||||
"key": "fps",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"write_security_role": ["ALL"],
|
||||
"read_security_role": ["ALL"],
|
||||
"default": null,
|
||||
"config": {
|
||||
"isdecimal": true
|
||||
}
|
||||
}, {
|
||||
"label": "Applications",
|
||||
"key": "applications",
|
||||
"type": "enumerator",
|
||||
"entity_type": "show",
|
||||
"group": "avalon",
|
||||
"config": {
|
||||
"multiselect": true,
|
||||
"data": [
|
||||
{"blender_2.80": "Blender 2.80"},
|
||||
{"blender_2.81": "Blender 2.81"},
|
||||
{"blender_2.82": "Blender 2.82"},
|
||||
{"blender_2.83": "Blender 2.83"},
|
||||
{"celaction_local": "CelAction2D Local"},
|
||||
{"maya_2017": "Maya 2017"},
|
||||
{"maya_2018": "Maya 2018"},
|
||||
{"maya_2019": "Maya 2019"},
|
||||
{"nuke_10.0": "Nuke 10.0"},
|
||||
{"nuke_11.2": "Nuke 11.2"},
|
||||
{"nuke_11.3": "Nuke 11.3"},
|
||||
{"nuke_12.0": "Nuke 12.0"},
|
||||
{"nukex_10.0": "NukeX 10.0"},
|
||||
{"nukex_11.2": "NukeX 11.2"},
|
||||
{"nukex_11.3": "NukeX 11.3"},
|
||||
{"nukex_12.0": "NukeX 12.0"},
|
||||
{"nukestudio_10.0": "NukeStudio 10.0"},
|
||||
{"nukestudio_11.2": "NukeStudio 11.2"},
|
||||
{"nukestudio_11.3": "NukeStudio 11.3"},
|
||||
{"nukestudio_12.0": "NukeStudio 12.0"},
|
||||
{"harmony_17": "Harmony 17"},
|
||||
{"houdini_16.5": "Houdini 16.5"},
|
||||
{"houdini_17": "Houdini 17"},
|
||||
{"houdini_18": "Houdini 18"},
|
||||
{"photoshop_2020": "Photoshop 2020"},
|
||||
{"python_3": "Python 3"},
|
||||
{"python_2": "Python 2"},
|
||||
{"premiere_2019": "Premiere Pro 2019"},
|
||||
{"premiere_2020": "Premiere Pro 2020"},
|
||||
{"resolve_16": "BM DaVinci Resolve 16"}
|
||||
]
|
||||
}
|
||||
}, {
|
||||
"label": "Avalon auto-sync",
|
||||
"key": "avalon_auto_sync",
|
||||
"type": "boolean",
|
||||
"entity_type": "show",
|
||||
"group": "avalon",
|
||||
"write_security_role": ["API", "Administrator"],
|
||||
"read_security_role": ["API", "Administrator"]
|
||||
}, {
|
||||
"label": "Intent",
|
||||
"key": "intent",
|
||||
"type": "enumerator",
|
||||
"entity_type": "assetversion",
|
||||
"group": "avalon",
|
||||
"config": {
|
||||
"multiselect": false,
|
||||
"data": [
|
||||
{"test": "Test"},
|
||||
{"wip": "WIP"},
|
||||
{"final": "Final"}
|
||||
]
|
||||
}
|
||||
}, {
|
||||
"label": "Library Project",
|
||||
"key": "library_project",
|
||||
"type": "boolean",
|
||||
"entity_type": "show",
|
||||
"group": "avalon",
|
||||
"write_security_role": ["API", "Administrator"],
|
||||
"read_security_role": ["API", "Administrator"]
|
||||
}, {
|
||||
"label": "Clip in",
|
||||
"key": "clipIn",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"default": null
|
||||
}, {
|
||||
"label": "Clip out",
|
||||
"key": "clipOut",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"default": null
|
||||
}, {
|
||||
"label": "Frame start",
|
||||
"key": "frameStart",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"default": null
|
||||
}, {
|
||||
"label": "Frame end",
|
||||
"key": "frameEnd",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"default": null
|
||||
}, {
|
||||
"label": "Tools",
|
||||
"key": "tools_env",
|
||||
"type": "enumerator",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"config": {
|
||||
"multiselect": true,
|
||||
"data": [
|
||||
{"mtoa_3.0.1": "mtoa_3.0.1"},
|
||||
{"mtoa_3.1.1": "mtoa_3.1.1"},
|
||||
{"mtoa_3.2.0": "mtoa_3.2.0"},
|
||||
{"yeti_2.1.2": "yeti_2.1"}
|
||||
]
|
||||
}
|
||||
}, {
|
||||
"label": "Resolution Width",
|
||||
"key": "resolutionWidth",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"default": null
|
||||
}, {
|
||||
"label": "Resolution Height",
|
||||
"key": "resolutionHeight",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"default": null
|
||||
}, {
|
||||
"label": "Pixel aspect",
|
||||
"key": "pixelAspect",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"config": {
|
||||
"isdecimal": true
|
||||
}
|
||||
}, {
|
||||
"label": "Frame handles start",
|
||||
"key": "handleStart",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"default": null
|
||||
}, {
|
||||
"label": "Frame handles end",
|
||||
"key": "handleEnd",
|
||||
"type": "number",
|
||||
"is_hierarchical": true,
|
||||
"group": "avalon",
|
||||
"default": null
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"server_url": "",
|
||||
"api_key": "",
|
||||
"api_user": ""
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue