Merge branch 'develop' into bugfix/170-farm-publishing-check-if-published-items-do-exist

This commit is contained in:
Petr Kalis 2021-06-03 17:54:40 +02:00
commit d106f9fbb6
253 changed files with 8875 additions and 9832 deletions

View file

@ -203,6 +203,12 @@ class OpenPypeVersion(semver.VersionInfo):
openpype_version.staging = True
return openpype_version
def __hash__(self):
if self.path:
return hash(self.path)
else:
return hash(str(self))
class BootstrapRepos:
"""Class for bootstrapping local OpenPype installation.
@ -650,6 +656,9 @@ class BootstrapRepos:
v for v in openpype_versions if v.path.suffix != ".zip"
]
# remove duplicates
openpype_versions = list(set(openpype_versions))
return openpype_versions
def process_entered_location(self, location: str) -> Union[Path, None]:

View file

@ -60,13 +60,6 @@ def tray(debug=False):
help="Ftrack api user")
@click.option("--ftrack-api-key", envvar="FTRACK_API_KEY",
help="Ftrack api key")
@click.option("--ftrack-events-path",
envvar="FTRACK_EVENTS_PATH",
help=("path to ftrack event handlers"))
@click.option("--no-stored-credentials", is_flag=True,
help="don't use stored credentials")
@click.option("--store-credentials", is_flag=True,
help="store provided credentials")
@click.option("--legacy", is_flag=True,
help="run event server without mongo storing")
@click.option("--clockify-api-key", envvar="CLOCKIFY_API_KEY",
@ -77,9 +70,6 @@ def eventserver(debug,
ftrack_url,
ftrack_user,
ftrack_api_key,
ftrack_events_path,
no_stored_credentials,
store_credentials,
legacy,
clockify_api_key,
clockify_workspace):
@ -87,10 +77,6 @@ def eventserver(debug,
This should be ideally used by system service (such us systemd or upstart
on linux and window service).
You have to set either proper environment variables to provide URL and
credentials or use option to specify them. If you use --store_credentials
provided credentials will be stored for later use.
"""
if debug:
os.environ['OPENPYPE_DEBUG'] = "3"
@ -99,9 +85,6 @@ def eventserver(debug,
ftrack_url,
ftrack_user,
ftrack_api_key,
ftrack_events_path,
no_stored_credentials,
store_credentials,
legacy,
clockify_api_key,
clockify_workspace

View file

@ -8,8 +8,19 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
This is not possible to do for all applications the same way.
"""
order = 0
app_groups = ["maya", "nuke", "nukex", "hiero", "nukestudio"]
# Execute after workfile template copy
order = 10
app_groups = [
"maya",
"nuke",
"nukex",
"hiero",
"nukestudio",
"blender",
"photoshop",
"tvpaint",
"afftereffects"
]
def execute(self):
if not self.data.get("start_last_workfile"):

View file

@ -0,0 +1,127 @@
import os
import shutil
from openpype.lib import (
PreLaunchHook,
get_custom_workfile_template_by_context,
get_custom_workfile_template_by_string_context
)
from openpype.settings import get_project_settings
class CopyTemplateWorkfile(PreLaunchHook):
"""Copy workfile template.
This is not possible to do for all applications the same way.
Prelaunch hook works only if last workfile leads to not existing file.
- That is possible only if it's first version.
"""
# Before `AddLastWorkfileToLaunchArgs`
order = 0
app_groups = ["blender", "photoshop", "tvpaint", "afftereffects"]
def execute(self):
"""Check if can copy template for context and do it if possible.
First check if host for current project should create first workfile.
Second check is if template is reachable and can be copied.
Args:
last_workfile(str): Path where template will be copied.
Returns:
None: This is a void method.
"""
last_workfile = self.data.get("last_workfile_path")
if not last_workfile:
self.log.warning((
"Last workfile was not collected."
" Can't add it to launch arguments or determine if should"
" copy template."
))
return
if os.path.exists(last_workfile):
self.log.debug("Last workfile exits. Skipping {} process.".format(
self.__class__.__name__
))
return
self.log.info("Last workfile does not exits.")
project_name = self.data["project_name"]
asset_name = self.data["asset_name"]
task_name = self.data["task_name"]
project_settings = get_project_settings(project_name)
host_settings = project_settings[self.application.host_name]
workfile_builder_settings = host_settings.get("workfile_builder")
if not workfile_builder_settings:
# TODO remove warning when deprecated
self.log.warning((
"Seems like old version of settings is used."
" Can't access custom templates in host \"{}\"."
).format(self.application.full_label))
return
if not workfile_builder_settings["create_first_version"]:
self.log.info((
"Project \"{}\" has turned off to create first workfile for"
" application \"{}\""
).format(project_name, self.application.full_label))
return
# Backwards compatibility
template_profiles = workfile_builder_settings.get("custom_templates")
if not template_profiles:
self.log.info(
"Custom templates are not filled. Skipping template copy."
)
return
project_doc = self.data.get("project_doc")
asset_doc = self.data.get("asset_doc")
anatomy = self.data.get("anatomy")
if project_doc and asset_doc:
self.log.debug("Started filtering of custom template paths.")
template_path = get_custom_workfile_template_by_context(
template_profiles, project_doc, asset_doc, task_name, anatomy
)
else:
self.log.warning((
"Global data collection probably did not execute."
" Using backup solution."
))
dbcon = self.data.get("dbcon")
template_path = get_custom_workfile_template_by_string_context(
template_profiles, project_name, asset_name, task_name,
dbcon, anatomy
)
if not template_path:
self.log.info(
"Registered custom templates didn't match current context."
)
return
if not os.path.exists(template_path):
self.log.warning(
"Couldn't find workfile template file \"{}\"".format(
template_path
)
)
return
self.log.info(
f"Creating workfile from template: \"{template_path}\""
)
# Copy template workfile to new destinantion
shutil.copy2(
os.path.normpath(template_path),
os.path.normpath(last_workfile)
)

View file

@ -0,0 +1,34 @@
import os
from openpype.lib import PreLaunchHook
class LaunchWithTerminal(PreLaunchHook):
"""Mac specific pre arguments for application.
Mac applications should be launched using "open" argument which is internal
callbacks to open executable. We also add argument "-a" to tell it's
application open. This is used only for executables ending with ".app". It
is expected that these executables lead to app packages.
"""
order = 1000
platforms = ["darwin"]
def execute(self):
executable = str(self.launch_context.executable)
# Skip executables not ending with ".app" or that are not folder
if not executable.endswith(".app") or not os.path.isdir(executable):
return
# Check if first argument match executable path
# - Few applications are not executed directly but through OpenPype
# process (Photoshop, AfterEffects, Harmony, ...). These should not
# use `open`.
if self.launch_context.launch_args[0] != executable:
return
# Tell `open` to pass arguments if there are any
if len(self.launch_context.launch_args) > 1:
self.launch_context.launch_args.insert(1, "--args")
# Prepend open arguments
self.launch_context.launch_args.insert(0, ["open", "-a"])

View file

@ -1,4 +1,5 @@
import os
import subprocess
from openpype.lib import (
PreLaunchHook,
@ -17,6 +18,8 @@ class NonPythonHostHook(PreLaunchHook):
"""
app_groups = ["harmony", "photoshop", "aftereffects"]
order = 20
def execute(self):
# Pop executable
executable_path = self.launch_context.launch_args.pop(0)
@ -45,3 +48,6 @@ class NonPythonHostHook(PreLaunchHook):
if remainders:
self.launch_context.launch_args.extend(remainders)
self.launch_context.kwargs["stdout"] = subprocess.DEVNULL
self.launch_context.kwargs["stderr"] = subprocess.STDOUT

View file

@ -11,12 +11,14 @@ class LaunchWithWindowsShell(PreLaunchHook):
instead.
"""
# Should be as last hook becuase must change launch arguments to string
# Should be as last hook because must change launch arguments to string
order = 1000
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
platforms = ["windows"]
def execute(self):
launch_args = self.launch_context.clear_launch_args(
self.launch_context.launch_args)
new_args = [
# Get comspec which is cmd.exe in most cases.
os.environ.get("COMSPEC", "cmd.exe"),
@ -24,7 +26,7 @@ class LaunchWithWindowsShell(PreLaunchHook):
"/c",
# Convert arguments to command line arguments (as string)
"\"{}\"".format(
subprocess.list2cmdline(self.launch_context.launch_args)
subprocess.list2cmdline(launch_args)
)
]
# Convert list to string

View file

@ -1,25 +0,0 @@
import bpy
from avalon import api, blender
import openpype.hosts.blender.api.plugin
class CreateSetDress(openpype.hosts.blender.api.plugin.Creator):
"""A grouped package of loaded content"""
name = "setdressMain"
label = "Set Dress"
family = "setdress"
icon = "cubes"
defaults = ["Main", "Anim"]
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
blender.lib.imprint(collection, self.data)
return collection

View file

@ -25,9 +25,6 @@ class BlendLayoutLoader(plugin.AssetLoader):
icon = "code-fork"
color = "orange"
animation_creator_name = "CreateAnimation"
setdress_creator_name = "CreateSetDress"
def _remove(self, objects, obj_container):
for obj in list(objects):
if obj.type == 'ARMATURE':
@ -293,7 +290,6 @@ class UnrealLayoutLoader(plugin.AssetLoader):
color = "orange"
animation_creator_name = "CreateAnimation"
setdress_creator_name = "CreateSetDress"
def _remove_objects(self, objects):
for obj in list(objects):
@ -383,7 +379,7 @@ class UnrealLayoutLoader(plugin.AssetLoader):
def _process(
self, libpath, layout_container, container_name, representation,
actions, parent
actions, parent_collection
):
with open(libpath, "r") as fp:
data = json.load(fp)
@ -392,6 +388,11 @@ class UnrealLayoutLoader(plugin.AssetLoader):
layout_collection = bpy.data.collections.new(container_name)
scene.collection.children.link(layout_collection)
parent = parent_collection
if parent is None:
parent = scene.collection
all_loaders = api.discover(api.Loader)
avalon_container = bpy.data.collections.get(
@ -516,23 +517,9 @@ class UnrealLayoutLoader(plugin.AssetLoader):
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
# Create a setdress subset to contain all the animation for all
# the rigs in the layout
creator_plugin = get_creator_by_name(self.setdress_creator_name)
if not creator_plugin:
raise ValueError("Creator plugin \"{}\" was not found.".format(
self.setdress_creator_name
))
parent = api.create(
creator_plugin,
name="animation",
asset=api.Session["AVALON_ASSET"],
options={"useSelection": True},
data={"dependencies": str(context["representation"]["_id"])})
layout_collection = self._process(
libpath, layout_container, container_name,
str(context["representation"]["_id"]), None, parent)
str(context["representation"]["_id"]), None, None)
container_metadata["obj_container"] = layout_collection

View file

@ -0,0 +1,218 @@
"""Load a model asset in Blender."""
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import os
import json
import bpy
from avalon import api, blender
import openpype.hosts.blender.api.plugin as plugin
class BlendLookLoader(plugin.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
"""
families = ["look"]
representations = ["json"]
label = "Load Look"
icon = "code-fork"
color = "orange"
def get_all_children(self, obj):
children = list(obj.children)
for child in children:
children.extend(child.children)
return children
def _process(self, libpath, container_name, objects):
with open(libpath, "r") as fp:
data = json.load(fp)
path = os.path.dirname(libpath)
materials_path = f"{path}/resources"
materials = []
for entry in data:
file = entry.get('fbx_filename')
if file is None:
continue
bpy.ops.import_scene.fbx(filepath=f"{materials_path}/{file}")
mesh = [o for o in bpy.context.scene.objects if o.select_get()][0]
material = mesh.data.materials[0]
material.name = f"{material.name}:{container_name}"
texture_file = entry.get('tga_filename')
if texture_file:
node_tree = material.node_tree
pbsdf = node_tree.nodes['Principled BSDF']
base_color = pbsdf.inputs[0]
tex_node = base_color.links[0].from_node
tex_node.image.filepath = f"{materials_path}/{texture_file}"
materials.append(material)
for obj in objects:
for child in self.get_all_children(obj):
mesh_name = child.name.split(':')[0]
if mesh_name == material.name.split(':')[0]:
child.data.materials.clear()
child.data.materials.append(material)
break
bpy.data.objects.remove(mesh)
return materials, objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = plugin.asset_name(
asset, subset
)
unique_number = plugin.get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
container_name = plugin.asset_name(
asset, subset, unique_number
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
metadata = container.get(blender.pipeline.AVALON_PROPERTY)
metadata["libpath"] = libpath
metadata["lib_container"] = lib_container
selected = [o for o in bpy.context.scene.objects if o.select_get()]
materials, objects = self._process(libpath, container_name, selected)
# Save the list of imported materials in the metadata container
metadata["objects"] = objects
metadata["materials"] = materials
metadata["parent"] = str(context["representation"]["parent"])
metadata["family"] = context["representation"]["context"]["family"]
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
collection = bpy.data.collections.get(container["objectName"])
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
for obj in collection_metadata['objects']:
for child in self.get_all_children(obj):
child.data.materials.clear()
for material in collection_metadata['materials']:
bpy.data.materials.remove(material)
namespace = collection_metadata['namespace']
name = collection_metadata['name']
container_name = f"{namespace}_{name}"
materials, objects = self._process(
libpath, container_name, collection_metadata['objects'])
collection_metadata["objects"] = objects
collection_metadata["materials"] = materials
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
def remove(self, container: Dict) -> bool:
collection = bpy.data.collections.get(container["objectName"])
if not collection:
return False
collection_metadata = collection.get(blender.pipeline.AVALON_PROPERTY)
for obj in collection_metadata['objects']:
for child in self.get_all_children(obj):
child.data.materials.clear()
for material in collection_metadata['materials']:
bpy.data.materials.remove(material)
bpy.data.collections.remove(collection)
return True

View file

@ -107,6 +107,9 @@ class BlendRigLoader(plugin.AssetLoader):
if action is not None:
local_obj.animation_data.action = action
elif local_obj.animation_data.action is not None:
plugin.prepare_data(
local_obj.animation_data.action, collection_name)
# Set link the drivers to the local object
if local_obj.data.animation_data:

View file

@ -1,61 +0,0 @@
import os
import json
import openpype.api
import pyblish.api
import bpy
class ExtractSetDress(openpype.api.Extractor):
"""Extract setdress."""
label = "Extract SetDress"
hosts = ["blender"]
families = ["setdress"]
optional = True
order = pyblish.api.ExtractorOrder + 0.1
def process(self, instance):
stagingdir = self.staging_dir(instance)
json_data = []
for i in instance.context:
collection = i.data.get("name")
container = None
for obj in bpy.data.collections[collection].objects:
if obj.type == "ARMATURE":
container_name = obj.get("avalon").get("container_name")
container = bpy.data.collections[container_name]
if container:
json_dict = {
"subset": i.data.get("subset"),
"container": container.name,
}
json_dict["instance_name"] = container.get("avalon").get(
"instance_name"
)
json_data.append(json_dict)
if "representations" not in instance.data:
instance.data["representations"] = []
json_filename = f"{instance.name}.json"
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:
json.dump(json_data, fp=file, indent=2)
json_representation = {
"name": "json",
"ext": "json",
"files": json_filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(json_representation)
self.log.info(
"Extracted instance '{}' to: {}".format(instance.name,
json_representation)
)

View file

@ -54,6 +54,14 @@ class ExtractFBX(openpype.api.Extractor):
# We set the scale of the scene for the export
scene.unit_settings.scale_length = 0.01
new_materials = []
for obj in collections[0].all_objects:
if obj.type == 'MESH':
mat = bpy.data.materials.new(obj.name)
obj.data.materials.append(mat)
new_materials.append(mat)
# We export the fbx
bpy.ops.export_scene.fbx(
filepath=filepath,
@ -66,6 +74,13 @@ class ExtractFBX(openpype.api.Extractor):
scene.unit_settings.scale_length = old_scale
for mat in new_materials:
bpy.data.materials.remove(mat)
for obj in collections[0].all_objects:
if obj.type == 'MESH':
obj.data.materials.pop()
if "representations" not in instance.data:
instance.data["representations"] = []

View file

@ -1,4 +1,5 @@
import os
import json
import openpype.api
@ -121,6 +122,25 @@ class ExtractAnimationFBX(openpype.api.Extractor):
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
json_filename = f"{instance.name}.json"
json_path = os.path.join(stagingdir, json_filename)
json_dict = {}
collection = instance.data.get("name")
container = None
for obj in bpy.data.collections[collection].objects:
if obj.type == "ARMATURE":
container_name = obj.get("avalon").get("container_name")
container = bpy.data.collections[container_name]
if container:
json_dict = {
"instance_name": container.get("avalon").get("instance_name")
}
with open(json_path, "w+") as file:
json.dump(json_dict, fp=file, indent=2)
if "representations" not in instance.data:
instance.data["representations"] = []
@ -130,7 +150,15 @@ class ExtractAnimationFBX(openpype.api.Extractor):
'files': fbx_filename,
"stagingDir": stagingdir,
}
json_representation = {
'name': 'json',
'ext': 'json',
'files': json_filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(fbx_representation)
instance.data["representations"].append(json_representation)
self.log.info("Extracted instance '{}' to: {}".format(
instance.name, fbx_representation))

View file

@ -214,7 +214,9 @@ def get_track_items(
# add all if no track_type is defined
return_list.append(track_item)
return return_list
# return output list but make sure all items are TrackItems
return [_i for _i in return_list
if type(_i) == hiero.core.TrackItem]
def get_track_item_pype_tag(track_item):

View file

@ -52,10 +52,11 @@ class ExtractClipEffects(openpype.api.Extractor):
instance.data["representations"] = list()
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
"version"
"handleStart", "handleEnd",
"sourceStart", "sourceStartH", "sourceEnd", "sourceEndH",
"frameStart", "frameEnd",
"clipIn", "clipOut", "clipInH", "clipOutH",
"asset", "version"
]
# pass data to version

View file

@ -5,7 +5,7 @@ import pyblish.api
class PreCollectClipEffects(pyblish.api.InstancePlugin):
"""Collect soft effects instances."""
order = pyblish.api.CollectorOrder - 0.508
order = pyblish.api.CollectorOrder - 0.579
label = "Pre-collect Clip Effects Instances"
families = ["clip"]
@ -24,7 +24,8 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
self.clip_in_h = self.clip_in - self.handle_start
self.clip_out_h = self.clip_out + self.handle_end
track = instance.data["trackItem"]
track_item = instance.data["item"]
track = track_item.parent()
track_index = track.trackIndex()
tracks_effect_items = instance.context.data.get("tracksEffectItems")
clip_effect_items = instance.data.get("clipEffectItems")
@ -112,7 +113,12 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
node = sitem.node()
node_serialized = self.node_serialisation(node)
node_name = sitem.name()
node_class = re.sub(r"\d+", "", node_name)
if "_" in node_name:
node_class = re.sub(r"(?:_)[_0-9]+", "", node_name) # more numbers
else:
node_class = re.sub(r"\d+", "", node_name) # one number
# collect timelineIn/Out
effect_t_in = int(sitem.timelineIn())
effect_t_out = int(sitem.timelineOut())
@ -121,6 +127,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
return
self.log.debug("node_name: `{}`".format(node_name))
self.log.debug("node_class: `{}`".format(node_class))
return {node_name: {
"class": node_class,

View file

@ -2,6 +2,9 @@ import pyblish
import openpype
from openpype.hosts.hiero import api as phiero
from openpype.hosts.hiero.otio import hiero_export
import hiero
from compiler.ast import flatten
# # developer reload modules
from pprint import pformat
@ -14,18 +17,40 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
label = "Precollect Instances"
hosts = ["hiero"]
audio_track_items = []
def process(self, context):
otio_timeline = context.data["otioTimeline"]
self.otio_timeline = context.data["otioTimeline"]
selected_timeline_items = phiero.get_track_items(
selected=True, check_enabled=True, check_tagged=True)
selected=True, check_tagged=True, check_enabled=True)
# only return enabled track items
if not selected_timeline_items:
selected_timeline_items = phiero.get_track_items(
check_enabled=True, check_tagged=True)
self.log.info(
"Processing enabled track items: {}".format(
selected_timeline_items))
# add all tracks subtreck effect items to context
all_tracks = hiero.ui.activeSequence().videoTracks()
tracks_effect_items = self.collect_sub_track_items(all_tracks)
context.data["tracksEffectItems"] = tracks_effect_items
# process all sellected timeline track items
for track_item in selected_timeline_items:
data = {}
clip_name = track_item.name()
source_clip = track_item.source()
# get clips subtracks and anotations
annotations = self.clip_annotations(source_clip)
subtracks = self.clip_subtrack(track_item)
self.log.debug("Annotations: {}".format(annotations))
self.log.debug(">> Subtracks: {}".format(subtracks))
# get openpype tag data
tag_data = phiero.get_track_item_pype_data(track_item)
@ -76,12 +101,15 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
"item": track_item,
"families": families,
"publish": tag_data["publish"],
"fps": context.data["fps"]
"fps": context.data["fps"],
# clip's effect
"clipEffectItems": subtracks,
"clipAnnotations": annotations
})
# otio clip data
otio_data = self.get_otio_clip_instance_data(
otio_timeline, track_item) or {}
otio_data = self.get_otio_clip_instance_data(track_item) or {}
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
data.update(otio_data)
self.log.debug("__ data: {}".format(pformat(data)))
@ -185,6 +213,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
item = data.get("item")
clip_name = item.name()
# test if any audio clips
if not self.test_any_audio(item):
return
asset = data["asset"]
subset = "audioMain"
@ -215,7 +247,28 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
def get_otio_clip_instance_data(self, otio_timeline, track_item):
def test_any_audio(self, track_item):
# collect all audio tracks to class variable
if not self.audio_track_items:
for otio_clip in self.otio_timeline.each_clip():
if otio_clip.parent().kind != "Audio":
continue
self.audio_track_items.append(otio_clip)
# get track item timeline range
timeline_range = self.create_otio_time_range_from_timeline_item_data(
track_item)
# loop trough audio track items and search for overlaping clip
for otio_audio in self.audio_track_items:
parent_range = otio_audio.range_in_parent()
# if any overaling clip found then return True
if openpype.lib.is_overlapping_otio_ranges(
parent_range, timeline_range, strict=False):
return True
def get_otio_clip_instance_data(self, track_item):
"""
Return otio objects for timeline, track and clip
@ -231,7 +284,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
ti_track_name = track_item.parent().name()
timeline_range = self.create_otio_time_range_from_timeline_item_data(
track_item)
for otio_clip in otio_timeline.each_clip():
for otio_clip in self.otio_timeline.each_clip():
track_name = otio_clip.parent().name
parent_range = otio_clip.range_in_parent()
if ti_track_name not in track_name:
@ -258,3 +311,76 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
return hiero_export.create_otio_time_range(
frame_start, frame_duration, fps)
@staticmethod
def collect_sub_track_items(tracks):
"""
Returns dictionary with track index as key and list of subtracks
"""
# collect all subtrack items
sub_track_items = {}
for track in tracks:
items = track.items()
# skip if no clips on track > need track with effect only
if items:
continue
# skip all disabled tracks
if not track.isEnabled():
continue
track_index = track.trackIndex()
_sub_track_items = flatten(track.subTrackItems())
# continue only if any subtrack items are collected
if len(_sub_track_items) < 1:
continue
enabled_sti = []
# loop all found subtrack items and check if they are enabled
for _sti in _sub_track_items:
# checking if not enabled
if not _sti.isEnabled():
continue
if isinstance(_sti, hiero.core.Annotation):
continue
# collect the subtrack item
enabled_sti.append(_sti)
# continue only if any subtrack items are collected
if len(enabled_sti) < 1:
continue
# add collection of subtrackitems to dict
sub_track_items[track_index] = enabled_sti
return sub_track_items
@staticmethod
def clip_annotations(clip):
"""
Returns list of Clip's hiero.core.Annotation
"""
annotations = []
subTrackItems = flatten(clip.subTrackItems())
annotations += [item for item in subTrackItems if isinstance(
item, hiero.core.Annotation)]
return annotations
@staticmethod
def clip_subtrack(clip):
"""
Returns list of Clip's hiero.core.SubTrackItem
"""
subtracks = []
subTrackItems = flatten(clip.parent().subTrackItems())
for item in subTrackItems:
# avoid all anotation
if isinstance(item, hiero.core.Annotation):
continue
# # avoid all not anaibled
if not item.isEnabled():
continue
subtracks.append(item)
return subtracks

View file

@ -75,10 +75,26 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
"activeProject": project,
"otioTimeline": otio_timeline,
"currentFile": curent_file,
"fps": fps,
"colorspace": self.get_colorspace(project),
"fps": fps
}
context.data.update(context_data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
self.log.debug("__ context_data: {}".format(pformat(context_data)))
def get_colorspace(self, project):
# get workfile's colorspace properties
return {
"useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(),
"lutSetting16Bit": project.lutSetting16Bit(),
"lutSetting8Bit": project.lutSetting8Bit(),
"lutSettingFloat": project.lutSettingFloat(),
"lutSettingLog": project.lutSettingLog(),
"lutSettingViewer": project.lutSettingViewer(),
"lutSettingWorkingSpace": project.lutSettingWorkingSpace(),
"lutUseOCIOForExport": project.lutUseOCIOForExport(),
"ocioConfigName": project.ocioConfigName(),
"ocioConfigPath": project.ocioConfigPath()
}

View file

@ -2124,7 +2124,7 @@ def bake_to_world_space(nodes,
return world_space_nodes
def load_capture_preset(path=None, data=None):
def load_capture_preset(data=None):
import capture
preset = data
@ -2139,11 +2139,7 @@ def load_capture_preset(path=None, data=None):
# GENERIC
id = 'Generic'
for key in preset[id]:
if key.startswith('isolate'):
pass
# options['isolate'] = preset[id][key]
else:
options[str(key)] = preset[id][key]
options[str(key)] = preset[id][key]
# RESOLUTION
id = 'Resolution'
@ -2156,6 +2152,10 @@ def load_capture_preset(path=None, data=None):
for key in preset['Display Options']:
if key.startswith('background'):
disp_options[key] = preset['Display Options'][key]
disp_options[key][0] = (float(disp_options[key][0])/255)
disp_options[key][1] = (float(disp_options[key][1])/255)
disp_options[key][2] = (float(disp_options[key][2])/255)
disp_options[key].pop()
else:
disp_options['displayGradient'] = True
@ -2220,16 +2220,6 @@ def load_capture_preset(path=None, data=None):
# use active sound track
scene = capture.parse_active_scene()
options['sound'] = scene['sound']
cam_options = dict()
cam_options['overscan'] = 1.0
cam_options['displayFieldChart'] = False
cam_options['displayFilmGate'] = False
cam_options['displayFilmOrigin'] = False
cam_options['displayFilmPivot'] = False
cam_options['displayGateMask'] = False
cam_options['displayResolution'] = False
cam_options['displaySafeAction'] = False
cam_options['displaySafeTitle'] = False
# options['display_options'] = temp_options

View file

@ -81,7 +81,10 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
self[:] = nodes

View file

@ -38,7 +38,10 @@ class GpuCacheLoader(api.Loader):
if c is not None:
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
c[0], c[1], c[2])
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
# Create transform with shape
transform_name = label + "_GPU"

View file

@ -85,7 +85,11 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
c = colors.get(family)
if c is not None:
groupNode.useOutlinerColor.set(1)
groupNode.outlinerColor.set(c[0], c[1], c[2])
groupNode.outlinerColor.set(
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
self[:] = newNodes

View file

@ -62,7 +62,10 @@ class LoadVDBtoRedShift(api.Loader):
if c is not None:
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
c[0], c[1], c[2])
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
# Create VR
volume_node = cmds.createNode("RedshiftVolumeShape",

View file

@ -55,7 +55,10 @@ class LoadVDBtoVRay(api.Loader):
if c is not None:
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
c[0], c[1], c[2])
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
# Create VR
grid_node = cmds.createNode("VRayVolumeGrid",

View file

@ -74,7 +74,10 @@ class VRayProxyLoader(api.Loader):
if c is not None:
cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1)
cmds.setAttr("{0}.outlinerColor".format(group_node),
c[0], c[1], c[2])
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
return containerise(
name=name,

View file

@ -53,7 +53,10 @@ class VRaySceneLoader(api.Loader):
if c is not None:
cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1)
cmds.setAttr("{0}.outlinerColor".format(group_node),
c[0], c[1], c[2])
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
return containerise(
name=name,

View file

@ -66,7 +66,10 @@ class YetiCacheLoader(api.Loader):
if c is not None:
cmds.setAttr(group_name + ".useOutlinerColor", 1)
cmds.setAttr(group_name + ".outlinerColor",
c[0], c[1], c[2])
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
nodes.append(group_node)

View file

@ -84,7 +84,10 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
c[0], c[1], c[2])
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
self[:] = nodes
return nodes

View file

@ -49,9 +49,6 @@ class ExtractPlayblast(openpype.api.Extractor):
preset['camera'] = camera
preset['format'] = "image"
preset['quality'] = 95
preset['compression'] = "png"
preset['start_frame'] = start
preset['end_frame'] = end
camera_option = preset.get("camera_option", {})
@ -75,7 +72,7 @@ class ExtractPlayblast(openpype.api.Extractor):
# Isolate view is requested by having objects in the set besides a
# camera.
if instance.data.get("isolate"):
if preset.pop("isolate_view", False) or instance.data.get("isolate"):
preset["isolate"] = instance.data["setMembers"]
# Show/Hide image planes on request.
@ -93,9 +90,6 @@ class ExtractPlayblast(openpype.api.Extractor):
# playblast and viewer
preset['viewer'] = False
# Remove panel key since it's internal value to capture_gui
preset.pop("panel", None)
self.log.info('using viewport preset: {}'.format(preset))
path = capture.capture(**preset)

View file

@ -12,10 +12,10 @@ import pymel.core as pm
class ExtractThumbnail(openpype.api.Extractor):
"""Extract a Camera as Alembic.
"""Extract viewport thumbnail.
The cameras gets baked to world space by default. Only when the instance's
`bakeToWorldSpace` is set to False it will include its full hierarchy.
Takes review camera and creates a thumbnail based on viewport
capture.
"""
@ -35,17 +35,14 @@ class ExtractThumbnail(openpype.api.Extractor):
try:
preset = lib.load_capture_preset(data=capture_preset)
except:
except KeyError as ke:
self.log.error('Error loading capture presets: {}'.format(str(ke)))
preset = {}
self.log.info('using viewport preset: {}'.format(capture_preset))
self.log.info('Using viewport preset: {}'.format(preset))
# preset["off_screen"] = False
preset['camera'] = camera
preset['format'] = "image"
# preset['compression'] = "qt"
preset['quality'] = 50
preset['compression'] = "jpg"
preset['start_frame'] = instance.data["frameStart"]
preset['end_frame'] = instance.data["frameStart"]
preset['camera_options'] = {
@ -78,7 +75,7 @@ class ExtractThumbnail(openpype.api.Extractor):
# Isolate view is requested by having objects in the set besides a
# camera.
if instance.data.get("isolate"):
if preset.pop("isolate_view", False) or instance.data.get("isolate"):
preset["isolate"] = instance.data["setMembers"]
with maintained_time():
@ -89,9 +86,6 @@ class ExtractThumbnail(openpype.api.Extractor):
# playblast and viewer
preset['viewer'] = False
# Remove panel key since it's internal value to capture_gui
preset.pop("panel", None)
path = capture.capture(**preset)
playblast = self._fix_playblast_output_path(path)

View file

@ -243,7 +243,10 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
"Cannot get value of {}.{}".format(
node, attribute_name))
else:
if value != render_value:
# compare values as strings to get around various
# datatypes possible in Settings and Render
# Settings
if str(value) != str(render_value):
invalid = True
cls.log.error(
("Invalid value {} set on {}.{}. "

View file

@ -80,7 +80,7 @@ def install():
# Set context settings.
nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root")
nuke.addOnCreate(lib.open_last_workfile, nodeClass="Root")
nuke.addOnCreate(lib.process_workfile_builder, nodeClass="Root")
nuke.addOnCreate(lib.launch_workfiles_app, nodeClass="Root")
menu.install()

View file

@ -16,6 +16,7 @@ from avalon.nuke import (
from openpype.api import (
Logger,
Anatomy,
BuildWorkfile,
get_version_from_path,
get_anatomy_settings,
get_hierarchy,
@ -1059,7 +1060,7 @@ class WorkfileSettings(object):
# replace reset resolution from avalon core to pype's
self.reset_frame_range_handles()
# add colorspace menu item
# self.set_colorspace()
self.set_colorspace()
def set_favorites(self):
work_dir = os.getenv("AVALON_WORKDIR")
@ -1641,23 +1642,69 @@ def launch_workfiles_app():
workfiles.show(os.environ["AVALON_WORKDIR"])
def open_last_workfile():
# get state from settings
open_last_version = get_current_project_settings()["nuke"].get(
"general", {}).get("create_initial_workfile")
def process_workfile_builder():
from openpype.lib import (
env_value_to_bool,
get_custom_workfile_template
)
# get state from settings
workfile_builder = get_current_project_settings()["nuke"].get(
"workfile_builder", {})
# get all imortant settings
openlv_on = env_value_to_bool(
env_key="AVALON_OPEN_LAST_WORKFILE",
default=None)
# get settings
createfv_on = workfile_builder.get("create_first_version") or None
custom_templates = workfile_builder.get("custom_templates") or None
builder_on = workfile_builder.get("builder_on_start") or None
log.info("Opening last workfile...")
last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE")
if not os.path.exists(last_workfile_path):
# return if none is defined
if not open_last_version:
return
# generate first version in file not existing and feature is enabled
if createfv_on and not os.path.exists(last_workfile_path):
# get custom template path if any
custom_template_path = get_custom_workfile_template(
custom_templates
)
# if custom template is defined
if custom_template_path:
log.info("Adding nodes from `{}`...".format(
custom_template_path
))
try:
# import nodes into current script
nuke.nodePaste(custom_template_path)
except RuntimeError:
raise RuntimeError((
"Template defined for project: {} is not working. "
"Talk to your manager for an advise").format(
custom_template_path))
# if builder at start is defined
if builder_on:
log.info("Building nodes from presets...")
# build nodes by defined presets
BuildWorkfile().process()
log.info("Saving script as version `{}`...".format(
last_workfile_path
))
# safe file as version
save_file(last_workfile_path)
else:
# to avoid looping of the callback, remove it!
nuke.removeOnCreate(open_last_workfile, nodeClass="Root")
return
# open workfile
open_file(last_workfile_path)
# skip opening of last version if it is not enabled
if not openlv_on or not os.path.exists(last_workfile_path):
return
# to avoid looping of the callback, remove it!
nuke.removeOnCreate(process_workfile_builder, nodeClass="Root")
log.info("Opening last workfile...")
# open workfile
open_file(last_workfile_path)

View file

@ -4,18 +4,19 @@ import json
from collections import OrderedDict
class LoadLuts(api.Loader):
class LoadEffects(api.Loader):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["lutJson"]
families = ["lut"]
representations = ["effectJson"]
families = ["effect"]
label = "Load Luts - nodes"
label = "Load Effects - nodes"
order = 0
icon = "cc"
color = style.colors.light
ignore_attr = ["useLifetime"]
def load(self, context, name, namespace, data):
"""
Loading function to get the soft effects to particular read node
@ -66,15 +67,15 @@ class LoadLuts(api.Loader):
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode("Group")
GN["name"].setValue(object_name)
GN = nuke.createNode(
"Group",
"name {}_1".format(object_name))
# adding content to the group node
with GN:
@ -186,7 +187,7 @@ class LoadLuts(api.Loader):
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
@ -266,7 +267,11 @@ class LoadLuts(api.Loader):
None: if nothing found
"""
search_name = "{0}_{1}".format(asset, subset)
node = [n for n in nuke.allNodes() if search_name in n["name"].value()]
node = [
n for n in nuke.allNodes(filter="Read")
if search_name in n["file"].value()
]
if len(node) > 0:
rn = node[0]
else:
@ -286,8 +291,10 @@ class LoadLuts(api.Loader):
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
trackNums = [v["trackIndex"] for k, v in data.items()
if isinstance(v, dict)]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
if isinstance(v, dict)]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
@ -300,6 +307,7 @@ class LoadLuts(api.Loader):
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if isinstance(val, dict)
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}

View file

@ -5,13 +5,13 @@ from collections import OrderedDict
from openpype.hosts.nuke.api import lib
class LoadLutsInputProcess(api.Loader):
class LoadEffectsInputProcess(api.Loader):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["lutJson"]
families = ["lut"]
representations = ["effectJson"]
families = ["effect"]
label = "Load Luts - Input Process"
label = "Load Effects - Input Process"
order = 0
icon = "eye"
color = style.colors.alert
@ -67,15 +67,15 @@ class LoadLutsInputProcess(api.Loader):
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode("Group")
GN["name"].setValue(object_name)
GN = nuke.createNode(
"Group",
"name {}_1".format(object_name))
# adding content to the group node
with GN:
@ -190,7 +190,7 @@ class LoadLutsInputProcess(api.Loader):
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
@ -304,8 +304,10 @@ class LoadLutsInputProcess(api.Loader):
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
trackNums = [v["trackIndex"] for k, v in data.items()
if isinstance(v, dict)]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
if isinstance(v, dict)]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
@ -318,6 +320,7 @@ class LoadLutsInputProcess(api.Loader):
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if isinstance(val, dict)
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}

View file

@ -1,6 +1,7 @@
from openpype.hosts.nuke.api.lib import (
on_script_load,
check_inventory_versions
check_inventory_versions,
WorkfileSettings
)
import nuke
@ -9,8 +10,14 @@ from openpype.api import Logger
log = Logger().get_logger(__name__)
nuke.addOnScriptSave(on_script_load)
# fix ffmpeg settings on script
nuke.addOnScriptLoad(on_script_load)
# set checker for last versions on loaded containers
nuke.addOnScriptLoad(check_inventory_versions)
nuke.addOnScriptSave(check_inventory_versions)
# # set apply all workfile settings on script load and save
nuke.addOnScriptLoad(WorkfileSettings().set_context_settings)
log.info('Automatic syncing of write file knob to script version')

View file

@ -19,6 +19,10 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
def on_instance_toggle(instance, old_value, new_value):
# Review may not have real instance in wokrfile metadata
if not instance.data.get("uuid"):
return
instance_id = instance.data["uuid"]
found_idx = None
current_instances = pipeline.list_instances()

View file

@ -34,20 +34,6 @@ class TvpaintPrelaunchHook(PreLaunchHook):
"run", self.launch_script_path(), executable_path
)
# Add workfile to launch arguments
workfile_path = self.workfile_path()
if workfile_path:
new_launch_args.append(workfile_path)
# How to create new command line
# if platform.system().lower() == "windows":
# new_launch_args = [
# "cmd.exe",
# "/c",
# "Call cmd.exe /k",
# *new_launch_args
# ]
# Append as whole list as these areguments should not be separated
self.launch_context.launch_args.append(new_launch_args)
@ -64,38 +50,4 @@ class TvpaintPrelaunchHook(PreLaunchHook):
"tvpaint",
"launch_script.py"
)
return script_path
def workfile_path(self):
workfile_path = self.data["last_workfile_path"]
# copy workfile from template if doesnt exist any on path
if not os.path.exists(workfile_path):
# TODO add ability to set different template workfile path via
# settings
pype_dir = os.path.dirname(os.path.abspath(tvpaint.__file__))
template_path = os.path.join(
pype_dir, "resources", "template.tvpp"
)
if not os.path.exists(template_path):
self.log.warning(
"Couldn't find workfile template file in {}".format(
template_path
)
)
return
self.log.info(
f"Creating workfile from template: \"{template_path}\""
)
# Copy template workfile to new destinantion
shutil.copy2(
os.path.normpath(template_path),
os.path.normpath(workfile_path)
)
self.log.info(f"Workfile to open: \"{workfile_path}\"")
return workfile_path
return script_path

View file

@ -1,19 +0,0 @@
from avalon.tvpaint import pipeline
from openpype.hosts.tvpaint.api import plugin
class CreateReview(plugin.Creator):
"""Review for global review of all layers."""
name = "review"
label = "Review"
family = "review"
icon = "cube"
defaults = ["Main"]
def process(self):
instances = pipeline.list_instances()
for instance in instances:
if instance["family"] == self.family:
self.log.info("Review family is already Created.")
return
super(CreateReview, self).process()

View file

@ -1,3 +1,4 @@
import os
import json
import copy
import pyblish.api
@ -17,6 +18,20 @@ class CollectInstances(pyblish.api.ContextPlugin):
json.dumps(workfile_instances, indent=4)
))
# Backwards compatibility for workfiles that already have review
# instance in metadata.
review_instance_exist = False
for instance_data in workfile_instances:
if instance_data["family"] == "review":
review_instance_exist = True
break
# Fake review instance if review was not found in metadata families
if not review_instance_exist:
workfile_instances.append(
self._create_review_instance_data(context)
)
for instance_data in workfile_instances:
instance_data["fps"] = context.data["sceneFps"]
@ -90,6 +105,16 @@ class CollectInstances(pyblish.api.ContextPlugin):
instance, json.dumps(instance.data, indent=4)
))
def _create_review_instance_data(self, context):
"""Fake review instance data."""
return {
"family": "review",
"asset": context.data["asset"],
# Dummy subset name
"subset": "reviewMain"
}
def create_render_layer_instance(self, context, instance_data):
name = instance_data["name"]
# Change label

View file

@ -0,0 +1,43 @@
import os
import json
import pyblish.api
from avalon import io
class CollectWorkfile(pyblish.api.ContextPlugin):
label = "Collect Workfile"
order = pyblish.api.CollectorOrder - 1
hosts = ["tvpaint"]
def process(self, context):
current_file = context.data["currentFile"]
self.log.info(
"Workfile path used for workfile family: {}".format(current_file)
)
dirpath, filename = os.path.split(current_file)
basename, ext = os.path.splitext(filename)
instance = context.create_instance(name=basename)
task_name = io.Session["AVALON_TASK"]
subset_name = "workfile" + task_name.capitalize()
# Create Workfile instance
instance.data.update({
"subset": subset_name,
"asset": context.data["asset"],
"label": subset_name,
"publish": True,
"family": "workfile",
"families": ["workfile"],
"representations": [{
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": filename,
"stagingDir": dirpath
}]
})
self.log.info("Collected workfile instance: {}".format(
json.dumps(instance.data, indent=4)
))

View file

@ -0,0 +1,49 @@
import pyblish.api
from avalon.tvpaint import save_file
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
"""Store current context into workfile metadata."""
label = "Use current context"
icon = "wrench"
on = "failed"
def process(self, context, _plugin):
"""Save current workfile which should trigger storing of metadata."""
current_file = context.data["currentFile"]
# Save file should trigger
save_file(current_file)
class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
"""Validate if wokrfile contain required metadata for publising."""
label = "Validate Workfile Metadata"
order = pyblish.api.ValidatorOrder
families = ["workfile"]
actions = [ValidateWorkfileMetadataRepair]
required_keys = {"project", "asset", "task"}
def process(self, context):
workfile_context = context.data["workfile_context"]
if not workfile_context:
raise AssertionError(
"Current workfile is missing whole metadata about context."
)
missing_keys = []
for key in self.required_keys:
value = workfile_context.get(key)
if not value:
missing_keys.append(key)
if missing_keys:
raise AssertionError(
"Current workfile is missing metadata about {}.".format(
", ".join(missing_keys)
)
)

View file

@ -0,0 +1,66 @@
import unreal
from openpype.hosts.unreal.api.plugin import Creator
from avalon.unreal import pipeline
class CreateLook(Creator):
"""Shader connections defining shape look"""
name = "unrealLook"
label = "Unreal - Look"
family = "look"
icon = "paint-brush"
root = "/Game/Avalon/Assets"
suffix = "_INS"
def __init__(self, *args, **kwargs):
super(CreateLook, self).__init__(*args, **kwargs)
def process(self):
name = self.data["subset"]
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
# Create the folder
path = f"{self.root}/{self.data['asset']}"
new_name = pipeline.create_folder(path, name)
full_path = f"{path}/{new_name}"
# Create a new cube static mesh
ar = unreal.AssetRegistryHelpers.get_asset_registry()
cube = ar.get_asset_by_object_path("/Engine/BasicShapes/Cube.Cube")
# Create the avalon publish instance object
container_name = f"{name}{self.suffix}"
pipeline.create_publish_instance(
instance=container_name, path=full_path)
# Get the mesh of the selected object
original_mesh = ar.get_asset_by_object_path(selection[0]).get_asset()
materials = original_mesh.get_editor_property('materials')
self.data["members"] = []
# Add the materials to the cube
for material in materials:
name = material.get_editor_property('material_slot_name')
object_path = f"{full_path}/{name}.{name}"
object = unreal.EditorAssetLibrary.duplicate_loaded_asset(
cube.get_asset(), object_path
)
# Remove the default material of the cube object
object.get_editor_property('static_materials').pop()
object.add_material(
material.get_editor_property('material_interface'))
self.data["members"].append(object_path)
unreal.EditorAssetLibrary.save_asset(object_path)
pipeline.imprint(f"{full_path}/{container_name}", self.data)

View file

@ -1,4 +1,5 @@
import os
import json
from avalon import api, pipeline
from avalon.unreal import lib
@ -61,10 +62,16 @@ class AnimationFBXLoader(api.Loader):
task = unreal.AssetImportTask()
task.options = unreal.FbxImportUI()
# If there are no options, the process cannot be automated
if options:
libpath = self.fname.replace("fbx", "json")
with open(libpath, "r") as fp:
data = json.load(fp)
instance_name = data.get("instance_name")
if instance_name:
automated = True
actor_name = 'PersistentLevel.' + options.get('instance_name')
actor_name = 'PersistentLevel.' + instance_name
actor = unreal.EditorLevelLibrary.get_actor_reference(actor_name)
skeleton = actor.skeletal_mesh_component.skeletal_mesh.skeleton
task.options.set_editor_property('skeleton', skeleton)
@ -81,16 +88,31 @@ class AnimationFBXLoader(api.Loader):
# set import options here
task.options.set_editor_property(
'automated_import_should_detect_type', True)
'automated_import_should_detect_type', False)
task.options.set_editor_property(
'original_import_type', unreal.FBXImportType.FBXIT_ANIMATION)
'original_import_type', unreal.FBXImportType.FBXIT_SKELETAL_MESH)
task.options.set_editor_property(
'mesh_type_to_import', unreal.FBXImportType.FBXIT_ANIMATION)
task.options.set_editor_property('import_mesh', False)
task.options.set_editor_property('import_animations', True)
task.options.set_editor_property('override_full_name', True)
task.options.skeletal_mesh_import_data.set_editor_property(
'import_content_type',
unreal.FBXImportContentType.FBXICT_SKINNING_WEIGHTS
task.options.anim_sequence_import_data.set_editor_property(
'animation_length',
unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME
)
task.options.anim_sequence_import_data.set_editor_property(
'import_meshes_in_bone_hierarchy', False)
task.options.anim_sequence_import_data.set_editor_property(
'use_default_sample_rate', True)
task.options.anim_sequence_import_data.set_editor_property(
'import_custom_attribute', True)
task.options.anim_sequence_import_data.set_editor_property(
'import_bone_tracks', True)
task.options.anim_sequence_import_data.set_editor_property(
'remove_redundant_keys', True)
task.options.anim_sequence_import_data.set_editor_property(
'convert_scene', True)
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])

View file

@ -1,127 +0,0 @@
import json
from avalon import api
import unreal
class AnimationCollectionLoader(api.Loader):
"""Load Unreal SkeletalMesh from FBX"""
families = ["setdress"]
representations = ["json"]
label = "Load Animation Collection"
icon = "cube"
color = "orange"
def load(self, context, name, namespace, options):
from avalon import api, pipeline
from avalon.unreal import lib
from avalon.unreal import pipeline as unreal_pipeline
import unreal
# Create directory for asset and avalon container
root = "/Game/Avalon/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
"{}/{}".format(root, asset), suffix="")
container_name += suffix
unreal.EditorAssetLibrary.make_directory(asset_dir)
libpath = self.fname
with open(libpath, "r") as fp:
data = json.load(fp)
all_loaders = api.discover(api.Loader)
for element in data:
reference = element.get('_id')
loaders = api.loaders_from_representation(all_loaders, reference)
loader = None
for l in loaders:
if l.__name__ == "AnimationFBXLoader":
loader = l
break
if not loader:
continue
instance_name = element.get('instance_name')
api.load(
loader,
reference,
namespace=instance_name,
options=element
)
# Create Asset Container
lib.create_avalon_container(
container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
"id": pipeline.AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
return asset_content
def update(self, container, representation):
from avalon import api, io
from avalon.unreal import pipeline
source_path = api.get_representation_path(representation)
with open(source_path, "r") as fp:
data = json.load(fp)
animation_containers = [
i for i in pipeline.ls() if
i.get('asset') == container.get('asset') and
i.get('family') == 'animation']
for element in data:
new_version = io.find_one({"_id": io.ObjectId(element.get('_id'))})
new_version_number = new_version.get('context').get('version')
anim_container = None
for i in animation_containers:
if i.get('container_name') == (element.get('subset') + "_CON"):
anim_container = i
break
if not anim_container:
continue
api.update(anim_container, new_version_number)
container_path = "{}/{}".format(container["namespace"],
container["objectName"])
# update metadata
pipeline.imprint(
container_path,
{
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
})
def remove(self, container):
unreal.EditorAssetLibrary.delete_directory(container["namespace"])

View file

@ -0,0 +1,120 @@
import json
import os
import unreal
from unreal import MaterialEditingLibrary as mat_lib
import openpype.api
class ExtractLook(openpype.api.Extractor):
"""Extract look."""
label = "Extract Look"
hosts = ["unreal"]
families = ["look"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
resources_dir = instance.data["resourcesDir"]
ar = unreal.AssetRegistryHelpers.get_asset_registry()
transfers = []
json_data = []
for member in instance:
asset = ar.get_asset_by_object_path(member)
object = asset.get_asset()
name = asset.get_editor_property('asset_name')
json_element = {'material': str(name)}
material_obj = object.get_editor_property('static_materials')[0]
material = material_obj.material_interface
base_color = mat_lib.get_material_property_input_node(
material, unreal.MaterialProperty.MP_BASE_COLOR)
base_color_name = base_color.get_editor_property('parameter_name')
texture = mat_lib.get_material_default_texture_parameter_value(
material, base_color_name)
if texture:
# Export Texture
tga_filename = f"{instance.name}_{name}_texture.tga"
tga_exporter = unreal.TextureExporterTGA()
tga_export_task = unreal.AssetExportTask()
tga_export_task.set_editor_property('exporter', tga_exporter)
tga_export_task.set_editor_property('automated', True)
tga_export_task.set_editor_property('object', texture)
tga_export_task.set_editor_property(
'filename', f"{stagingdir}/{tga_filename}")
tga_export_task.set_editor_property('prompt', False)
tga_export_task.set_editor_property('selected', False)
unreal.Exporter.run_asset_export_task(tga_export_task)
json_element['tga_filename'] = tga_filename
transfers.append((
f"{stagingdir}/{tga_filename}",
f"{resources_dir}/{tga_filename}"))
fbx_filename = f"{instance.name}_{name}.fbx"
fbx_exporter = unreal.StaticMeshExporterFBX()
fbx_exporter.set_editor_property('text', False)
options = unreal.FbxExportOption()
options.set_editor_property('ascii', False)
options.set_editor_property('collision', False)
task = unreal.AssetExportTask()
task.set_editor_property('exporter', fbx_exporter)
task.set_editor_property('options', options)
task.set_editor_property('automated', True)
task.set_editor_property('object', object)
task.set_editor_property(
'filename', f"{stagingdir}/{fbx_filename}")
task.set_editor_property('prompt', False)
task.set_editor_property('selected', False)
unreal.Exporter.run_asset_export_task(task)
json_element['fbx_filename'] = fbx_filename
transfers.append((
f"{stagingdir}/{fbx_filename}",
f"{resources_dir}/{fbx_filename}"))
json_data.append(json_element)
json_filename = f"{instance.name}.json"
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:
json.dump(json_data, fp=file, indent=2)
if "transfers" not in instance.data:
instance.data["transfers"] = []
if "representations" not in instance.data:
instance.data["representations"] = []
json_representation = {
'name': 'json',
'ext': 'json',
'files': json_filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(json_representation)
instance.data["transfers"].extend(transfers)

View file

@ -81,7 +81,13 @@ from .avalon_context import (
get_creator_by_name,
change_timer_to_current_context
get_custom_workfile_template,
change_timer_to_current_context,
get_custom_workfile_template_by_context,
get_custom_workfile_template_by_string_context,
get_custom_workfile_template
)
from .local_settings import (
@ -192,6 +198,10 @@ __all__ = [
"change_timer_to_current_context",
"get_custom_workfile_template_by_context",
"get_custom_workfile_template_by_string_context",
"get_custom_workfile_template",
"IniSettingRegistry",
"JSONSettingRegistry",
"OpenPypeSecureRegistry",

View file

@ -440,7 +440,20 @@ class EnvironmentTool:
class ApplicationExecutable:
"""Representation of executable loaded from settings."""
def __init__(self, executable):
# On MacOS check if exists path to executable when ends with `.app`
# - it is common that path will lead to "/Applications/Blender" but
# real path is "/Applications/Blender.app"
if (
platform.system().lower() == "darwin"
and not os.path.exists(executable)
):
_executable = executable + ".app"
if os.path.exists(_executable):
executable = _executable
self.executable_path = executable
def __str__(self):
@ -1177,17 +1190,23 @@ def prepare_context_environments(data):
try:
workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
if not os.path.exists(workdir):
log.debug(
"Creating workdir folder: \"{}\"".format(workdir)
)
os.makedirs(workdir)
except Exception as exc:
raise ApplicationLaunchFailed(
"Error in anatomy.format: {}".format(str(exc))
)
if not os.path.exists(workdir):
log.debug(
"Creating workdir folder: \"{}\"".format(workdir)
)
try:
os.makedirs(workdir)
except Exception as exc:
raise ApplicationLaunchFailed(
"Couldn't create workdir because: {}".format(str(exc))
)
context_env = {
"AVALON_PROJECT": project_doc["name"],
"AVALON_ASSET": asset_doc["name"],

View file

@ -3,6 +3,7 @@ import os
import json
import re
import copy
import platform
import logging
import collections
import functools
@ -755,18 +756,22 @@ class BuildWorkfile:
"""
host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
presets = get_project_settings(avalon.io.Session["AVALON_PROJECT"])
# Get presets for host
build_presets = (
presets.get(host_name, {})
.get("workfile_build")
.get("profiles")
)
if not build_presets:
wb_settings = presets.get(host_name, {}).get("workfile_builder")
if not wb_settings:
# backward compatibility
wb_settings = presets.get(host_name, {}).get("workfile_build")
builder_presets = wb_settings.get("profiles")
if not builder_presets:
return
task_name_low = task_name.lower()
per_task_preset = None
for preset in build_presets:
for preset in builder_presets:
preset_tasks = preset.get("tasks") or []
preset_tasks_low = [task.lower() for task in preset_tasks]
if task_name_low in preset_tasks_low:
@ -1266,3 +1271,201 @@ def change_timer_to_current_context():
}
requests.post(rest_api_url, json=data)
def _get_task_context_data_for_anatomy(
project_doc, asset_doc, task_name, anatomy=None
):
"""Prepare Task context for anatomy data.
WARNING: this data structure is currently used only in workfile templates.
Key "task" is currently in rest of pipeline used as string with task
name.
Args:
project_doc (dict): Project document with available "name" and
"data.code" keys.
asset_doc (dict): Asset document from MongoDB.
task_name (str): Name of context task.
anatomy (Anatomy): Optionally Anatomy for passed project name can be
passed as Anatomy creation may be slow.
Returns:
dict: With Anatomy context data.
"""
if anatomy is None:
anatomy = Anatomy(project_doc["name"])
asset_name = asset_doc["name"]
project_task_types = anatomy["tasks"]
# get relevant task type from asset doc
assert task_name in asset_doc["data"]["tasks"], (
"Task name \"{}\" not found on asset \"{}\"".format(
task_name, asset_name
)
)
task_type = asset_doc["data"]["tasks"][task_name].get("type")
assert task_type, (
"Task name \"{}\" on asset \"{}\" does not have specified task type."
).format(asset_name, task_name)
# get short name for task type defined in default anatomy settings
project_task_type_data = project_task_types.get(task_type)
assert project_task_type_data, (
"Something went wrong. Default anatomy tasks are not holding"
"requested task type: `{}`".format(task_type)
)
return {
"project": {
"name": project_doc["name"],
"code": project_doc["data"].get("code")
},
"asset": asset_name,
"task": {
"name": task_name,
"type": task_type,
"short_name": project_task_type_data["short_name"]
}
}
def get_custom_workfile_template_by_context(
template_profiles, project_doc, asset_doc, task_name, anatomy=None
):
"""Filter and fill workfile template profiles by passed context.
It is expected that passed argument are already queried documents of
project and asset as parents of processing task name.
Existence of formatted path is not validated.
Args:
template_profiles(list): Template profiles from settings.
project_doc(dict): Project document from MongoDB.
asset_doc(dict): Asset document from MongoDB.
task_name(str): Name of task for which templates are filtered.
anatomy(Anatomy): Optionally passed anatomy object for passed project
name.
Returns:
str: Path to template or None if none of profiles match current
context. (Existence of formatted path is not validated.)
"""
from openpype.lib import filter_profiles
if anatomy is None:
anatomy = Anatomy(project_doc["name"])
# get project, asset, task anatomy context data
anatomy_context_data = _get_task_context_data_for_anatomy(
project_doc, asset_doc, task_name, anatomy
)
# add root dict
anatomy_context_data["root"] = anatomy.roots
# get task type for the task in context
current_task_type = anatomy_context_data["task"]["type"]
# get path from matching profile
matching_item = filter_profiles(
template_profiles,
{"task_type": current_task_type}
)
# when path is available try to format it in case
# there are some anatomy template strings
if matching_item:
template = matching_item["path"][platform.system().lower()]
return template.format(**anatomy_context_data)
return None
def get_custom_workfile_template_by_string_context(
template_profiles, project_name, asset_name, task_name,
dbcon=None, anatomy=None
):
"""Filter and fill workfile template profiles by passed context.
Passed context are string representations of project, asset and task.
Function will query documents of project and asset to be able use
`get_custom_workfile_template_by_context` for rest of logic.
Args:
template_profiles(list): Loaded workfile template profiles.
project_name(str): Project name.
asset_name(str): Asset name.
task_name(str): Task name.
dbcon(AvalonMongoDB): Optional avalon implementation of mongo
connection with context Session.
anatomy(Anatomy): Optionally prepared anatomy object for passed
project.
Returns:
str: Path to template or None if none of profiles match current
context. (Existence of formatted path is not validated.)
"""
if dbcon is None:
from avalon.api import AvalonMongoDB
dbcon = AvalonMongoDB()
dbcon.install()
if dbcon.Session["AVALON_PROJECT"] != project_name:
dbcon.Session["AVALON_PROJECT"] = project_name
project_doc = dbcon.find_one(
{"type": "project"},
# All we need is "name" and "data.code" keys
{
"name": 1,
"data.code": 1
}
)
asset_doc = dbcon.find_one(
{
"type": "asset",
"name": asset_name
},
# All we need is "name" and "data.tasks" keys
{
"name": 1,
"data.tasks": 1
}
)
return get_custom_workfile_template_by_context(
template_profiles, project_doc, asset_doc, task_name, anatomy
)
def get_custom_workfile_template(template_profiles):
"""Filter and fill workfile template profiles by current context.
Current context is defined by `avalon.api.Session`. That's why this
function should be used only inside host where context is set and stable.
Args:
template_profiles(list): Template profiles from settings.
Returns:
str: Path to template or None if none of profiles match current
context. (Existence of formatted path is not validated.)
"""
# Use `avalon.io` as Mongo connection
from avalon import io
return get_custom_workfile_template_by_string_context(
template_profiles,
io.Session["AVALON_PROJECT"],
io.Session["AVALON_ASSET"],
io.Session["AVALON_TASK"],
io
)

306
openpype/lib/delivery.py Normal file
View file

@ -0,0 +1,306 @@
"""Functions useful for delivery action or loader"""
import os
import shutil
import clique
import collections
def collect_frames(files):
"""
Returns dict of source path and its frame, if from sequence
Uses clique as most precise solution
Args:
files(list): list of source paths
Returns:
(dict): {'/asset/subset_v001.0001.png': '0001', ....}
"""
collections, remainder = clique.assemble(files)
sources_and_frames = {}
if collections:
for collection in collections:
src_head = collection.head
src_tail = collection.tail
for index in collection.indexes:
src_frame = collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_frame,
src_tail)
sources_and_frames[src_file_name] = src_frame
else:
sources_and_frames[remainder.pop()] = None
return sources_and_frames
def sizeof_fmt(num, suffix='B'):
"""Returns formatted string with size in appropriate unit"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def path_from_representation(representation, anatomy):
from avalon import pipeline # safer importing
try:
template = representation["data"]["template"]
except KeyError:
return None
try:
context = representation["context"]
context["root"] = anatomy.roots
path = pipeline.format_template_with_optional_keys(
context, template
)
except KeyError:
# Template references unavailable data
return None
return os.path.normpath(path)
def copy_file(src_path, dst_path):
"""Hardlink file if possible(to save space), copy if not"""
from avalon.vendor import filelink # safer importing
if os.path.exists(dst_path):
return
try:
filelink.create(
src_path,
dst_path,
filelink.HARDLINK
)
except OSError:
shutil.copyfile(src_path, dst_path)
def get_format_dict(anatomy, location_path):
"""Returns replaced root values from user provider value.
Args:
anatomy (Anatomy)
location_path (str): user provided value
Returns:
(dict): prepared for formatting of a template
"""
format_dict = {}
if location_path:
location_path = location_path.replace("\\", "/")
root_names = anatomy.root_names_from_templates(
anatomy.templates["delivery"]
)
if root_names is None:
format_dict["root"] = location_path
else:
format_dict["root"] = {}
for name in root_names:
format_dict["root"][name] = location_path
return format_dict
def check_destination_path(repre_id,
anatomy, anatomy_data,
datetime_data, template_name):
""" Try to create destination path based on 'template_name'.
In the case that path cannot be filled, template contains unmatched
keys, provide error message to filter out repre later.
Args:
anatomy (Anatomy)
anatomy_data (dict): context to fill anatomy
datetime_data (dict): values with actual date
template_name (str): to pick correct delivery template
Returns:
(collections.defauldict): {"TYPE_OF_ERROR":"ERROR_DETAIL"}
"""
anatomy_data.update(datetime_data)
anatomy_filled = anatomy.format_all(anatomy_data)
dest_path = anatomy_filled["delivery"][template_name]
report_items = collections.defaultdict(list)
if not dest_path.solved:
msg = (
"Missing keys in Representation's context"
" for anatomy template \"{}\"."
).format(template_name)
sub_msg = (
"Representation: {}<br>"
).format(repre_id)
if dest_path.missing_keys:
keys = ", ".join(dest_path.missing_keys)
sub_msg += (
"- Missing keys: \"{}\"<br>"
).format(keys)
if dest_path.invalid_types:
items = []
for key, value in dest_path.invalid_types.items():
items.append("\"{}\" {}".format(key, str(value)))
keys = ", ".join(items)
sub_msg += (
"- Invalid value DataType: \"{}\"<br>"
).format(keys)
report_items[msg].append(sub_msg)
return report_items
def process_single_file(
src_path, repre, anatomy, template_name, anatomy_data, format_dict,
report_items, log
):
"""Copy single file to calculated path based on template
Args:
src_path(str): path of source representation file
_repre (dict): full repre, used only in process_sequence, here only
as to share same signature
anatomy (Anatomy)
template_name (string): user selected delivery template name
anatomy_data (dict): data from repre to fill anatomy with
format_dict (dict): root dictionary with names and values
report_items (collections.defaultdict): to return error messages
log (Logger): for log printing
Returns:
(collections.defaultdict , int)
"""
if not os.path.exists(src_path):
msg = "{} doesn't exist for {}".format(src_path,
repre["_id"])
report_items["Source file was not found"].append(msg)
return report_items, 0
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][template_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][template_name]
# context.representation could be .psd
delivery_path = delivery_path.replace("..", ".")
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
log.debug("Copying single: {} -> {}".format(src_path, delivery_path))
copy_file(src_path, delivery_path)
return report_items, 1
def process_sequence(
src_path, repre, anatomy, template_name, anatomy_data, format_dict,
report_items, log
):
""" For Pype2(mainly - works in 3 too) where representation might not
contain files.
Uses listing physical files (not 'files' on repre as a)might not be
present, b)might not be reliable for representation and copying them.
TODO Should be refactored when files are sufficient to drive all
representations.
Args:
src_path(str): path of source representation file
repre (dict): full representation
anatomy (Anatomy)
template_name (string): user selected delivery template name
anatomy_data (dict): data from repre to fill anatomy with
format_dict (dict): root dictionary with names and values
report_items (collections.defaultdict): to return error messages
log (Logger): for log printing
Returns:
(collections.defaultdict , int)
"""
if not os.path.exists(src_path):
msg = "{} doesn't exist for {}".format(src_path,
repre["_id"])
report_items["Source file was not found"].append(msg)
return report_items, 0
dir_path, file_name = os.path.split(str(src_path))
context = repre["context"]
ext = context.get("ext", context.get("representation"))
if not ext:
msg = "Source extension not found, cannot find collection"
report_items[msg].append(src_path)
log.warning("{} <{}>".format(msg, context))
return report_items, 0
ext = "." + ext
# context.representation could be .psd
ext = ext.replace("..", ".")
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:
continue
src_collection = col
break
if src_collection is None:
msg = "Source collection of files was not found"
report_items[msg].append(src_path)
log.warning("{} <{}>".format(msg, src_path))
return report_items, 0
frame_indicator = "@####@"
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][template_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][template_name]
delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding
dst_collection = clique.Collection(
head=dst_head,
tail=dst_tail,
padding=dst_padding
)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
src_head = src_collection.head
src_tail = src_collection.tail
uploaded = 0
for index in src_collection.indexes:
src_padding = src_collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
src = os.path.normpath(
os.path.join(dir_path, src_file_name)
)
dst_padding = dst_collection.format("{padding}") % index
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
log.debug("Copying single: {} -> {}".format(src, dst))
copy_file(src, dst)
uploaded += 1
return report_items, uploaded

View file

@ -139,6 +139,25 @@ class ITrayModule:
"""
pass
def execute_in_main_thread(self, callback):
""" Pushes callback to the queue or process 'callback' on a main thread
Some callbacks need to be processed on main thread (menu actions
must be added on main thread or they won't get triggered etc.)
"""
# called without initialized tray, still main thread needed
if not self.tray_initialized:
try:
callback = self._main_thread_callbacks.popleft()
callback()
except:
self.log.warning(
"Failed to execute {} in main thread".format(callback),
exc_info=True)
return
self.manager.tray_manager.execute_in_main_thread(callback)
def show_tray_message(self, title, message, icon=None, msecs=None):
"""Show tray message.
@ -680,6 +699,10 @@ class TrayModulesManager(ModulesManager):
output.append(module)
return output
def restart_tray(self):
if self.tray_manager:
self.tray_manager.restart()
def tray_init(self):
report = {}
time_start = time.time()

View file

@ -105,7 +105,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
families = ["render.farm", "prerender.farm",
"renderlayer", "imagesequence", "vrayscene"]
aov_filter = {"maya": [r".+(?:\.|_)([Bb]eauty)(?:\.|_).*"],
aov_filter = {"maya": [r".*(?:\.|_)?([Bb]eauty)(?:\.|_)?.*"],
"aftereffects": [r".*"], # for everything from AE
"harmony": [r".*"], # for everything from AE
"celaction": [r".*"]}

View file

@ -1101,9 +1101,6 @@ class SyncToAvalonEvent(BaseEvent):
# Parents, Hierarchy
ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
# TODO logging
self.log.debug(
@ -1132,7 +1129,6 @@ class SyncToAvalonEvent(BaseEvent):
"ftrackId": ftrack_ent["id"],
"entityType": ftrack_ent.entity_type,
"parents": parents,
"hierarchy": hierarchy,
"tasks": {},
"visualParent": vis_par
}
@ -1975,14 +1971,9 @@ class SyncToAvalonEvent(BaseEvent):
if cur_par == parents:
continue
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
if "data" not in self.updates[mongo_id]:
self.updates[mongo_id]["data"] = {}
self.updates[mongo_id]["data"]["parents"] = parents
self.updates[mongo_id]["data"]["hierarchy"] = hierarchy
# Skip custom attributes if didn't change
if not hier_cust_attrs_ids:

View file

@ -11,23 +11,28 @@ from avalon.api import AvalonMongoDB
class DeleteAssetSubset(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
# Action identifier.
identifier = "delete.asset.subset"
#: Action label.
# Action label.
label = "Delete Asset/Subsets"
#: Action description.
# Action description.
description = "Removes from Avalon with all childs and asset from Ftrack"
icon = statics_icon("ftrack", "action_icons", "DeleteAsset.svg")
settings_key = "delete_asset_subset"
#: Db connection
dbcon = AvalonMongoDB()
# Db connection
dbcon = None
splitter = {"type": "label", "value": "---"}
action_data_by_id = {}
asset_prefix = "asset:"
subset_prefix = "subset:"
def __init__(self, *args, **kwargs):
self.dbcon = AvalonMongoDB()
super(DeleteAssetSubset, self).__init__(*args, **kwargs)
def discover(self, session, entities, event):
""" Validation """
task_ids = []
@ -446,7 +451,14 @@ class DeleteAssetSubset(BaseAction):
if len(assets_to_delete) > 0:
map_av_ftrack_id = spec_data["without_ftrack_id"]
# Prepare data when deleting whole avalon asset
avalon_assets = self.dbcon.find({"type": "asset"})
avalon_assets = self.dbcon.find(
{"type": "asset"},
{
"_id": 1,
"data.visualParent": 1,
"data.ftrackId": 1
}
)
avalon_assets_by_parent = collections.defaultdict(list)
for asset in avalon_assets:
asset_id = asset["_id"]
@ -537,11 +549,13 @@ class DeleteAssetSubset(BaseAction):
ftrack_proc_txt, ", ".join(ftrack_ids_to_delete)
))
ftrack_ents_to_delete = (
entities_by_link_len = (
self._filter_entities_to_delete(ftrack_ids_to_delete, session)
)
for entity in ftrack_ents_to_delete:
session.delete(entity)
for link_len in sorted(entities_by_link_len.keys(), reverse=True):
for entity in entities_by_link_len[link_len]:
session.delete(entity)
try:
session.commit()
except Exception:
@ -600,29 +614,11 @@ class DeleteAssetSubset(BaseAction):
joined_ids_to_delete
)
).all()
filtered = to_delete_entities[:]
while True:
changed = False
_filtered = filtered[:]
for entity in filtered:
entity_id = entity["id"]
entities_by_link_len = collections.defaultdict(list)
for entity in to_delete_entities:
entities_by_link_len[len(entity["link"])].append(entity)
for _entity in tuple(_filtered):
if entity_id == _entity["id"]:
continue
for _link in _entity["link"]:
if entity_id == _link["id"] and _entity in _filtered:
_filtered.remove(_entity)
changed = True
break
filtered = _filtered
if not changed:
break
return filtered
return entities_by_link_len
def report_handle(self, report_messages, project_name, event):
if not report_messages:

View file

@ -1,18 +1,20 @@
import os
import copy
import json
import shutil
import collections
import clique
from bson.objectid import ObjectId
from avalon import pipeline
from avalon.vendor import filelink
from openpype.api import Anatomy, config
from openpype.modules.ftrack.lib import BaseAction, statics_icon
from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from openpype.lib.delivery import (
path_from_representation,
get_format_dict,
check_destination_path,
process_single_file,
process_sequence
)
from avalon.api import AvalonMongoDB
@ -72,7 +74,7 @@ class Delivery(BaseAction):
"value": project_name
})
# Prpeare anatomy data
# Prepare anatomy data
anatomy = Anatomy(project_name)
new_anatomies = []
first = None
@ -366,12 +368,18 @@ class Delivery(BaseAction):
def launch(self, session, entities, event):
if "values" not in event["data"]:
return
return {
"success": True,
"message": "Nothing to do"
}
values = event["data"]["values"]
skipped = values.pop("__skipped__")
if skipped:
return None
return {
"success": False,
"message": "Action skipped"
}
user_id = event["source"]["user"]["id"]
user_entity = session.query(
@ -389,27 +397,45 @@ class Delivery(BaseAction):
try:
self.db_con.install()
self.real_launch(session, entities, event)
job["status"] = "done"
report = self.real_launch(session, entities, event)
except Exception:
except Exception as exc:
report = {
"success": False,
"title": "Delivery failed",
"items": [{
"type": "label",
"value": (
"Error during delivery action process:<br>{}"
"<br><br>Check logs for more information."
).format(str(exc))
}]
}
self.log.warning(
"Failed during processing delivery action.",
exc_info=True
)
finally:
if job["status"] != "done":
if report["success"]:
job["status"] = "done"
else:
job["status"] = "failed"
session.commit()
self.db_con.uninstall()
if job["status"] == "failed":
if not report["success"]:
self.show_interface(
items=report["items"],
title=report["title"],
event=event
)
return {
"success": False,
"message": "Delivery failed. Check logs for more information."
"message": "Errors during delivery process. See report."
}
return True
return report
def real_launch(self, session, entities, event):
self.log.info("Delivery action just started.")
@ -429,7 +455,7 @@ class Delivery(BaseAction):
if not repre_names:
return {
"success": True,
"message": "Not selected components to deliver."
"message": "No selected components to deliver."
}
location_path = location_path.strip()
@ -450,18 +476,7 @@ class Delivery(BaseAction):
anatomy = Anatomy(project_name)
format_dict = {}
if location_path:
location_path = location_path.replace("\\", "/")
root_names = anatomy.root_names_from_templates(
anatomy.templates["delivery"]
)
if root_names is None:
format_dict["root"] = location_path
else:
format_dict["root"] = {}
for name in root_names:
format_dict["root"][name] = location_path
format_dict = get_format_dict(anatomy, location_path)
datetime_data = config.get_datetime_data()
for repre in repres_to_deliver:
@ -471,41 +486,15 @@ class Delivery(BaseAction):
debug_msg += " with published path {}.".format(source_path)
self.log.debug(debug_msg)
# Get destination repre path
anatomy_data = copy.deepcopy(repre["context"])
anatomy_data.update(datetime_data)
anatomy_filled = anatomy.format_all(anatomy_data)
test_path = anatomy_filled["delivery"][anatomy_name]
repre_report_items = check_destination_path(repre["_id"],
anatomy,
anatomy_data,
datetime_data,
anatomy_name)
if not test_path.solved:
msg = (
"Missing keys in Representation's context"
" for anatomy template \"{}\"."
).format(anatomy_name)
if test_path.missing_keys:
keys = ", ".join(test_path.missing_keys)
sub_msg = (
"Representation: {}<br>- Missing keys: \"{}\"<br>"
).format(str(repre["_id"]), keys)
if test_path.invalid_types:
items = []
for key, value in test_path.invalid_types.items():
items.append("\"{}\" {}".format(key, str(value)))
keys = ", ".join(items)
sub_msg = (
"Representation: {}<br>"
"- Invalid value DataType: \"{}\"<br>"
).format(str(repre["_id"]), keys)
report_items[msg].append(sub_msg)
self.log.warning(
"{} Representation: \"{}\" Filled: <{}>".format(
msg, str(repre["_id"]), str(test_path)
)
)
if repre_report_items:
report_items.update(repre_report_items)
continue
# Get source repre path
@ -514,153 +503,30 @@ class Delivery(BaseAction):
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
repre_path = self.path_from_represenation(repre, anatomy)
repre_path = path_from_representation(repre, anatomy)
# TODO add backup solution where root of path from component
# is repalced with root
# is replaced with root
args = (
repre_path,
repre,
anatomy,
anatomy_name,
anatomy_data,
format_dict,
report_items
report_items,
self.log
)
if not frame:
self.process_single_file(*args)
process_single_file(*args)
else:
self.process_sequence(*args)
process_sequence(*args)
return self.report(report_items)
def process_single_file(
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict,
report_items
):
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][anatomy_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
self.copy_file(repre_path, delivery_path)
def process_sequence(
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict,
report_items
):
dir_path, file_name = os.path.split(str(repre_path))
base_name, ext = os.path.splitext(file_name)
file_name_items = None
if "#" in base_name:
file_name_items = [part for part in base_name.split("#") if part]
elif "%" in base_name:
file_name_items = base_name.split("%")
if not file_name_items:
msg = "Source file was not found"
report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:
continue
# skip if collection don't have same basename
if not col.head.startswith(file_name_items[0]):
continue
src_collection = col
break
if src_collection is None:
# TODO log error!
msg = "Source collection of files was not found"
report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
frame_indicator = "@####@"
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][anatomy_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding
dst_collection = clique.Collection(
head=dst_head,
tail=dst_tail,
padding=dst_padding
)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
src_head = src_collection.head
src_tail = src_collection.tail
for index in src_collection.indexes:
src_padding = src_collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
src = os.path.normpath(
os.path.join(dir_path, src_file_name)
)
dst_padding = dst_collection.format("{padding}") % index
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
self.copy_file(src, dst)
def path_from_represenation(self, representation, anatomy):
try:
template = representation["data"]["template"]
except KeyError:
return None
try:
context = representation["context"]
context["root"] = anatomy.roots
path = pipeline.format_template_with_optional_keys(
context, template
)
except KeyError:
# Template references unavailable data
return None
return os.path.normpath(path)
def copy_file(self, src_path, dst_path):
if os.path.exists(dst_path):
return
try:
filelink.create(
src_path,
dst_path,
filelink.HARDLINK
)
except OSError:
shutil.copyfile(src_path, dst_path)
def report(self, report_items):
"""Returns dict with final status of delivery (succes, fail etc.)."""
items = []
title = "Delivery report"
for msg, _items in report_items.items():
if not _items:
continue
@ -691,9 +557,8 @@ class Delivery(BaseAction):
return {
"items": items,
"title": title,
"success": False,
"message": "Delivery Finished"
"title": "Delivery report",
"success": False
}

View file

@ -422,17 +422,18 @@ def run_event_server(
ftrack_url,
ftrack_user,
ftrack_api_key,
ftrack_events_path,
no_stored_credentials,
store_credentials,
legacy,
clockify_api_key,
clockify_workspace
):
if not no_stored_credentials:
if not ftrack_user or not ftrack_api_key:
print((
"Ftrack user/api key were not passed."
" Trying to use credentials from user keyring."
))
cred = credentials.get_credentials(ftrack_url)
username = cred.get('username')
api_key = cred.get('api_key')
ftrack_user = cred.get("username")
ftrack_api_key = cred.get("api_key")
if clockify_workspace and clockify_api_key:
os.environ["CLOCKIFY_WORKSPACE"] = clockify_workspace
@ -445,209 +446,16 @@ def run_event_server(
return 1
# Validate entered credentials
if not validate_credentials(ftrack_url, username, api_key):
if not validate_credentials(ftrack_url, ftrack_user, ftrack_api_key):
print('Exiting! < Please enter valid credentials >')
return 1
if store_credentials:
credentials.save_credentials(username, api_key, ftrack_url)
# Set Ftrack environments
os.environ["FTRACK_SERVER"] = ftrack_url
os.environ["FTRACK_API_USER"] = username
os.environ["FTRACK_API_KEY"] = api_key
# TODO This won't work probably
if ftrack_events_path:
if isinstance(ftrack_events_path, (list, tuple)):
ftrack_events_path = os.pathsep.join(ftrack_events_path)
os.environ["FTRACK_EVENTS_PATH"] = ftrack_events_path
os.environ["FTRACK_API_USER"] = ftrack_user
os.environ["FTRACK_API_KEY"] = ftrack_api_key
if legacy:
return legacy_server(ftrack_url)
return main_loop(ftrack_url)
def main(argv):
'''
There are 4 values neccessary for event server:
1.) Ftrack url - "studio.ftrackapp.com"
2.) Username - "my.username"
3.) API key - "apikey-long11223344-6665588-5565"
4.) Path/s to events - "X:/path/to/folder/with/events"
All these values can be entered with arguments or environment variables.
- arguments:
"-ftrackurl {url}"
"-ftrackuser {username}"
"-ftrackapikey {api key}"
"-ftrackeventpaths {path to events}"
- environment variables:
FTRACK_SERVER
FTRACK_API_USER
FTRACK_API_KEY
FTRACK_EVENTS_PATH
Credentials (Username & API key):
- Credentials can be stored for auto load on next start
- To *Store/Update* these values add argument "-storecred"
- They will be stored to appsdir file when login is successful
- To *Update/Override* values with enviromnet variables is also needed to:
- *don't enter argument for that value*
- add argument "-noloadcred" (currently stored credentials won't be loaded)
Order of getting values:
1.) Arguments are always used when entered.
- entered values through args have most priority! (in each case)
2.) Credentials are tried to load from appsdir file.
- skipped when credentials were entered through args or credentials
are not stored yet
- can be skipped with "-noloadcred" argument
3.) Environment variables are last source of values.
- will try to get not yet set values from environments
Best practice:
- set environment variables FTRACK_SERVER & FTRACK_EVENTS_PATH
- launch event_server_cli with args:
~/event_server_cli.py -ftrackuser "{username}" -ftrackapikey "{API key}" -storecred
- next time launch event_server_cli.py only with set environment variables
FTRACK_SERVER & FTRACK_EVENTS_PATH
'''
parser = argparse.ArgumentParser(description='Ftrack event server')
parser.add_argument(
"-ftrackurl", type=str, metavar='FTRACKURL',
help=(
"URL to ftrack server where events should handle"
" (default from environment: $FTRACK_SERVER)"
)
)
parser.add_argument(
"-ftrackuser", type=str,
help=(
"Username should be the username of the user in ftrack"
" to record operations against."
" (default from environment: $FTRACK_API_USER)"
)
)
parser.add_argument(
"-ftrackapikey", type=str,
help=(
"Should be the API key to use for authentication"
" (default from environment: $FTRACK_API_KEY)"
)
)
parser.add_argument(
"-ftrackeventpaths", nargs='+',
help=(
"List of paths where events are stored."
" (default from environment: $FTRACK_EVENTS_PATH)"
)
)
parser.add_argument(
'-storecred',
help=(
"Entered credentials will be also stored"
" to apps dir for future usage"
),
action="store_true"
)
parser.add_argument(
'-noloadcred',
help="Load creadentials from apps dir",
action="store_true"
)
parser.add_argument(
'-legacy',
help="Load creadentials from apps dir",
action="store_true"
)
parser.add_argument(
"-clockifyapikey", type=str,
help=(
"Enter API key for Clockify actions."
" (default from environment: $CLOCKIFY_API_KEY)"
)
)
parser.add_argument(
"-clockifyworkspace", type=str,
help=(
"Enter workspace for Clockify."
" (default from module presets or "
"environment: $CLOCKIFY_WORKSPACE)"
)
)
ftrack_url = os.environ.get("FTRACK_SERVER")
username = os.environ.get("FTRACK_API_USER")
api_key = os.environ.get("FTRACK_API_KEY")
kwargs, args = parser.parse_known_args(argv)
if kwargs.ftrackurl:
ftrack_url = kwargs.ftrackurl
# Load Ftrack url from settings if not set
if not ftrack_url:
ftrack_url = get_ftrack_url_from_settings()
event_paths = None
if kwargs.ftrackeventpaths:
event_paths = kwargs.ftrackeventpaths
if not kwargs.noloadcred:
cred = credentials.get_credentials(ftrack_url)
username = cred.get('username')
api_key = cred.get('api_key')
if kwargs.ftrackuser:
username = kwargs.ftrackuser
if kwargs.ftrackapikey:
api_key = kwargs.ftrackapikey
if kwargs.clockifyworkspace:
os.environ["CLOCKIFY_WORKSPACE"] = kwargs.clockifyworkspace
if kwargs.clockifyapikey:
os.environ["CLOCKIFY_API_KEY"] = kwargs.clockifyapikey
legacy = kwargs.legacy
# Check url regex and accessibility
ftrack_url = check_ftrack_url(ftrack_url)
if not ftrack_url:
print('Exiting! < Please enter Ftrack server url >')
return 1
# Validate entered credentials
if not validate_credentials(ftrack_url, username, api_key):
print('Exiting! < Please enter valid credentials >')
return 1
if kwargs.storecred:
credentials.save_credentials(username, api_key, ftrack_url)
# Set Ftrack environments
os.environ["FTRACK_SERVER"] = ftrack_url
os.environ["FTRACK_API_USER"] = username
os.environ["FTRACK_API_KEY"] = api_key
if event_paths:
if isinstance(event_paths, (list, tuple)):
event_paths = os.pathsep.join(event_paths)
os.environ["FTRACK_EVENTS_PATH"] = event_paths
if legacy:
return legacy_server(ftrack_url)
return main_loop(ftrack_url)
if __name__ == "__main__":
# Register interupt signal
def signal_handler(sig, frame):
print("You pressed Ctrl+C. Process ended.")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
sys.exit(main(sys.argv))

View file

@ -1237,12 +1237,8 @@ class SyncEntitiesFactory:
ent_path_items = [ent["name"] for ent in entity["link"]]
parents = ent_path_items[1:len(ent_path_items) - 1:]
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
data["parents"] = parents
data["hierarchy"] = hierarchy
data["tasks"] = self.entities_dict[id].pop("tasks", {})
self.entities_dict[id]["final_entity"]["data"] = data
self.entities_dict[id]["final_entity"]["type"] = "asset"
@ -2169,8 +2165,6 @@ class SyncEntitiesFactory:
hierarchy = "/".join(parents)
self.entities_dict[ftrack_id][
"final_entity"]["data"]["parents"] = parents
self.entities_dict[ftrack_id][
"final_entity"]["data"]["hierarchy"] = hierarchy
_parents.append(self.entities_dict[ftrack_id]["name"])
for child_id in self.entities_dict[ftrack_id]["children"]:
@ -2181,7 +2175,6 @@ class SyncEntitiesFactory:
if "data" not in self.updates[mongo_id]:
self.updates[mongo_id]["data"] = {}
self.updates[mongo_id]["data"]["parents"] = parents
self.updates[mongo_id]["data"]["hierarchy"] = hierarchy
def prepare_project_changes(self):
ftrack_ent_dict = self.entities_dict[self.ft_project_id]

View file

@ -16,6 +16,18 @@ class LoginServerHandler(BaseHTTPRequestHandler):
self.login_callback = login_callback
BaseHTTPRequestHandler.__init__(self, *args, **kw)
def log_message(self, format_str, *args):
"""Override method of BaseHTTPRequestHandler.
Goal is to use `print` instead of `sys.stderr.write`
"""
# Change
print("%s - - [%s] %s\n" % (
self.client_address[0],
self.log_date_time_string(),
format_str % args
))
def do_GET(self):
'''Override to handle requests ourselves.'''
parsed_path = parse.urlparse(self.path)

View file

@ -67,6 +67,10 @@ class SettingsAction(PypeModule, ITrayAction):
return
from openpype.tools.settings import MainWidget
self.settings_window = MainWidget(self.user_role)
self.settings_window.trigger_restart.connect(self._on_trigger_restart)
def _on_trigger_restart(self):
self.manager.restart_tray()
def show_settings_window(self):
"""Show settings tool window.

View file

@ -1,5 +1,5 @@
import os
import sys
import platform
import subprocess
from openpype.lib import get_pype_execute_args
from . import PypeModule, ITrayAction
@ -35,4 +35,14 @@ class StandAlonePublishAction(PypeModule, ITrayAction):
def run_standalone_publisher(self):
args = get_pype_execute_args("standalonepublisher")
subprocess.Popen(args, creationflags=subprocess.DETACHED_PROCESS)
kwargs = {}
if platform.system().lower() == "darwin":
new_args = ["open", "-a", args.pop(0), "--args"]
new_args.extend(args)
args = new_args
detached_process = getattr(subprocess, "DETACHED_PROCESS", None)
if detached_process is not None:
kwargs["creationflags"] = detached_process
subprocess.Popen(args, **kwargs)

View file

@ -0,0 +1,151 @@
import aiohttp
from aiohttp import web
import json
import logging
from concurrent.futures import CancelledError
from Qt import QtWidgets
from openpype.modules import ITrayService
from openpype.tools.tray_app.app import ConsoleDialog
log = logging.getLogger(__name__)
class IconType:
IDLE = "idle"
RUNNING = "running"
FAILED = "failed"
class MsgAction:
CONNECTING = "connecting"
INITIALIZED = "initialized"
ADD = "add"
CLOSE = "close"
class HostListener:
def __init__(self, webserver, module):
self._window_per_id = {}
self.module = module
self.webserver = webserver
self._window_per_id = {} # dialogs per host name
self._action_per_id = {} # QAction per host name
webserver.add_route('*', "/ws/host_listener", self.websocket_handler)
def _host_is_connecting(self, host_name, label):
""" Initialize dialog, adds to submenu. """
services_submenu = self.module._services_submenu
action = QtWidgets.QAction(label, services_submenu)
action.triggered.connect(lambda: self.show_widget(host_name))
services_submenu.addAction(action)
self._action_per_id[host_name] = action
self._set_host_icon(host_name, IconType.IDLE)
widget = ConsoleDialog("")
self._window_per_id[host_name] = widget
def _set_host_icon(self, host_name, icon_type):
"""Assigns icon to action for 'host_name' with 'icon_type'.
Action must exist in self._action_per_id
Args:
host_name (str)
icon_type (IconType)
"""
action = self._action_per_id.get(host_name)
if not action:
raise ValueError("Unknown host {}".format(host_name))
icon = None
if icon_type == IconType.IDLE:
icon = ITrayService.get_icon_idle()
elif icon_type == IconType.RUNNING:
icon = ITrayService.get_icon_running()
elif icon_type == IconType.FAILED:
icon = ITrayService.get_icon_failed()
else:
log.info("Unknown icon type {} for {}".format(icon_type,
host_name))
action.setIcon(icon)
def show_widget(self, host_name):
"""Shows prepared widget for 'host_name'.
Dialog get initialized when 'host_name' is connecting.
"""
self.module.execute_in_main_thread(
lambda: self._show_widget(host_name))
def _show_widget(self, host_name):
widget = self._window_per_id[host_name]
widget.show()
widget.raise_()
widget.activateWindow()
async def websocket_handler(self, request):
ws = web.WebSocketResponse()
await ws.prepare(request)
widget = None
try:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
host_name, action, text = self._parse_message(msg)
if action == MsgAction.CONNECTING:
self._action_per_id[host_name] = None
# must be sent to main thread, or action wont trigger
self.module.execute_in_main_thread(
lambda: self._host_is_connecting(host_name, text))
elif action == MsgAction.CLOSE:
# clean close
self._close(host_name)
await ws.close()
elif action == MsgAction.INITIALIZED:
self.module.execute_in_main_thread(
# must be queued as _host_is_connecting might not
# be triggered/finished yet
lambda: self._set_host_icon(host_name,
IconType.RUNNING))
elif action == MsgAction.ADD:
self.module.execute_in_main_thread(
lambda: self._add_text(host_name, text))
elif msg.type == aiohttp.WSMsgType.ERROR:
print('ws connection closed with exception %s' %
ws.exception())
host_name, _, _ = self._parse_message(msg)
self._set_host_icon(host_name, IconType.FAILED)
except CancelledError: # recoverable
pass
except Exception as exc:
log.warning("Exception during communication", exc_info=True)
if widget:
error_msg = str(exc)
widget.append_text(error_msg)
return ws
def _add_text(self, host_name, text):
widget = self._window_per_id[host_name]
widget.append_text(text)
def _close(self, host_name):
""" Clean close - remove from menu, delete widget."""
services_submenu = self.module._services_submenu
action = self._action_per_id.pop(host_name)
services_submenu.removeAction(action)
widget = self._window_per_id.pop(host_name)
if widget.isVisible():
widget.hide()
widget.deleteLater()
def _parse_message(self, msg):
data = json.loads(msg.data)
action = data.get("action")
host_name = data["host"]
value = data.get("text")
return host_name, action, value

View file

@ -7,6 +7,8 @@ import six
from openpype import resources
from .. import PypeModule, ITrayService
from openpype.modules.webserver.host_console_listener import HostListener
@six.add_metaclass(ABCMeta)
class IWebServerRoutes:
@ -23,6 +25,7 @@ class WebServerModule(PypeModule, ITrayService):
def initialize(self, _module_settings):
self.enabled = True
self.server_manager = None
self._host_listener = None
self.port = self.find_free_port()
@ -37,6 +40,7 @@ class WebServerModule(PypeModule, ITrayService):
def tray_init(self):
self.create_server_manager()
self._add_resources_statics()
self._add_listeners()
def tray_start(self):
self.start_server()
@ -54,6 +58,9 @@ class WebServerModule(PypeModule, ITrayService):
webserver_url, static_prefix
)
def _add_listeners(self):
self._host_listener = HostListener(self.server_manager, self)
def start_server(self):
if self.server_manager:
self.server_manager.start_server()

View file

@ -0,0 +1,318 @@
from collections import defaultdict
import copy
from Qt import QtWidgets, QtCore, QtGui
from avalon import api, style
from avalon.api import AvalonMongoDB
from openpype.api import Anatomy, config
from openpype import resources
from openpype.lib.delivery import (
sizeof_fmt,
path_from_representation,
get_format_dict,
check_destination_path,
process_single_file,
process_sequence,
collect_frames
)
class Delivery(api.SubsetLoader):
"""Export selected versions to folder structure from Template"""
is_multiple_contexts_compatible = True
sequence_splitter = "__sequence_splitter__"
representations = ["*"]
families = ["*"]
tool_names = ["library_loader"]
label = "Deliver Versions"
order = 35
icon = "upload"
color = "#d8d8d8"
def message(self, text):
msgBox = QtWidgets.QMessageBox()
msgBox.setText(text)
msgBox.setStyleSheet(style.load_stylesheet())
msgBox.setWindowFlags(
msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint
)
msgBox.exec_()
def load(self, contexts, name=None, namespace=None, options=None):
try:
dialog = DeliveryOptionsDialog(contexts, self.log)
dialog.exec_()
except Exception:
self.log.error("Failed to deliver versions.", exc_info=True)
class DeliveryOptionsDialog(QtWidgets.QDialog):
"""Dialog to select template where to deliver selected representations."""
def __init__(self, contexts, log=None, parent=None):
super(DeliveryOptionsDialog, self).__init__(parent=parent)
project = contexts[0]["project"]["name"]
self.anatomy = Anatomy(project)
self._representations = None
self.log = log
self.currently_uploaded = 0
self.dbcon = AvalonMongoDB()
self.dbcon.Session["AVALON_PROJECT"] = project
self.dbcon.install()
self._set_representations(contexts)
self.setWindowTitle("OpenPype - Deliver versions")
icon = QtGui.QIcon(resources.pype_icon_filepath())
self.setWindowIcon(icon)
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint |
QtCore.Qt.WindowMinimizeButtonHint
)
self.setStyleSheet(style.load_stylesheet())
dropdown = QtWidgets.QComboBox()
self.templates = self._get_templates(self.anatomy)
for name, _ in self.templates.items():
dropdown.addItem(name)
template_label = QtWidgets.QLabel()
template_label.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
template_label.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
root_line_edit = QtWidgets.QLineEdit()
repre_checkboxes_layout = QtWidgets.QFormLayout()
repre_checkboxes_layout.setContentsMargins(10, 5, 5, 10)
self._representation_checkboxes = {}
for repre in self._get_representation_names():
checkbox = QtWidgets.QCheckBox()
checkbox.setChecked(False)
self._representation_checkboxes[repre] = checkbox
checkbox.stateChanged.connect(self._update_selected_label)
repre_checkboxes_layout.addRow(repre, checkbox)
selected_label = QtWidgets.QLabel()
input_widget = QtWidgets.QWidget(self)
input_layout = QtWidgets.QFormLayout(input_widget)
input_layout.setContentsMargins(10, 15, 5, 5)
input_layout.addRow("Selected representations", selected_label)
input_layout.addRow("Delivery template", dropdown)
input_layout.addRow("Template value", template_label)
input_layout.addRow("Root", root_line_edit)
input_layout.addRow("Representations", repre_checkboxes_layout)
btn_delivery = QtWidgets.QPushButton("Deliver")
btn_delivery.setEnabled(bool(dropdown.currentText()))
progress_bar = QtWidgets.QProgressBar(self)
progress_bar.setMinimum = 0
progress_bar.setMaximum = 100
progress_bar.setVisible(False)
text_area = QtWidgets.QTextEdit()
text_area.setReadOnly(True)
text_area.setVisible(False)
text_area.setMinimumHeight(100)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(input_widget)
layout.addStretch(1)
layout.addWidget(btn_delivery)
layout.addWidget(progress_bar)
layout.addWidget(text_area)
self.selected_label = selected_label
self.template_label = template_label
self.dropdown = dropdown
self.root_line_edit = root_line_edit
self.progress_bar = progress_bar
self.text_area = text_area
self.btn_delivery = btn_delivery
self.files_selected, self.size_selected = \
self._get_counts(self._get_selected_repres())
self._update_selected_label()
self._update_template_value()
btn_delivery.clicked.connect(self.deliver)
dropdown.currentIndexChanged.connect(self._update_template_value)
def deliver(self):
"""Main method to loop through all selected representations"""
self.progress_bar.setVisible(True)
self.btn_delivery.setEnabled(False)
QtWidgets.QApplication.processEvents()
report_items = defaultdict(list)
selected_repres = self._get_selected_repres()
datetime_data = config.get_datetime_data()
template_name = self.dropdown.currentText()
format_dict = get_format_dict(self.anatomy, self.root_line_edit.text())
for repre in self._representations:
if repre["name"] not in selected_repres:
continue
repre_path = path_from_representation(repre, self.anatomy)
anatomy_data = copy.deepcopy(repre["context"])
new_report_items = check_destination_path(str(repre["_id"]),
self.anatomy,
anatomy_data,
datetime_data,
template_name)
report_items.update(new_report_items)
if new_report_items:
continue
args = [
repre_path,
repre,
self.anatomy,
template_name,
anatomy_data,
format_dict,
report_items,
self.log
]
if repre.get("files"):
src_paths = []
for repre_file in repre["files"]:
src_path = self.anatomy.fill_root(repre_file["path"])
src_paths.append(src_path)
sources_and_frames = collect_frames(src_paths)
for src_path, frame in sources_and_frames.items():
args[0] = src_path
if frame:
anatomy_data["frame"] = frame
new_report_items, uploaded = process_single_file(*args)
report_items.update(new_report_items)
self._update_progress(uploaded)
else: # fallback for Pype2 and representations without files
frame = repre['context'].get('frame')
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
if not frame:
new_report_items, uploaded = process_single_file(*args)
else:
new_report_items, uploaded = process_sequence(*args)
report_items.update(new_report_items)
self._update_progress(uploaded)
self.text_area.setText(self._format_report(report_items))
self.text_area.setVisible(True)
def _get_representation_names(self):
"""Get set of representation names for checkbox filtering."""
return set([repre["name"] for repre in self._representations])
def _get_templates(self, anatomy):
"""Adds list of delivery templates from Anatomy to dropdown."""
templates = {}
for template_name, value in anatomy.templates["delivery"].items():
if not isinstance(value, str) or not value.startswith('{root'):
continue
templates[template_name] = value
return templates
def _set_representations(self, contexts):
version_ids = [context["version"]["_id"] for context in contexts]
repres = list(self.dbcon.find({
"type": "representation",
"parent": {"$in": version_ids}
}))
self._representations = repres
def _get_counts(self, selected_repres=None):
"""Returns tuple of number of selected files and their size."""
files_selected = 0
size_selected = 0
for repre in self._representations:
if repre["name"] in selected_repres:
files = repre.get("files", [])
if not files: # for repre without files, cannot divide by 0
files_selected += 1
size_selected += 0
else:
for repre_file in files:
files_selected += 1
size_selected += repre_file["size"]
return files_selected, size_selected
def _prepare_label(self):
"""Provides text with no of selected files and their size."""
label = "{} files, size {}".format(self.files_selected,
sizeof_fmt(self.size_selected))
return label
def _get_selected_repres(self):
"""Returns list of representation names filtered from checkboxes."""
selected_repres = []
for repre_name, chckbox in self._representation_checkboxes.items():
if chckbox.isChecked():
selected_repres.append(repre_name)
return selected_repres
def _update_selected_label(self):
"""Updates label with list of number of selected files."""
selected_repres = self._get_selected_repres()
self.files_selected, self.size_selected = \
self._get_counts(selected_repres)
self.selected_label.setText(self._prepare_label())
def _update_template_value(self, _index=None):
"""Sets template value to label after selection in dropdown."""
name = self.dropdown.currentText()
template_value = self.templates.get(name)
if template_value:
self.btn_delivery.setEnabled(True)
self.template_label.setText(template_value)
def _update_progress(self, uploaded):
"""Update progress bar after each repre copied."""
self.currently_uploaded += uploaded
ratio = self.currently_uploaded / self.files_selected
self.progress_bar.setValue(ratio * self.progress_bar.maximum())
def _format_report(self, report_items):
"""Format final result and error details as html."""
msg = "Delivery finished"
if not report_items:
msg += " successfully"
else:
msg += " with errors"
txt = "<h2>{}</h2>".format(msg)
for header, data in report_items.items():
txt += "<h3>{}</h3>".format(header)
for item in data:
txt += "{}<br>".format(item)
return txt

View file

@ -39,7 +39,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
"lut",
"yetiRig",
"yeticache",
"nukenodes",
@ -52,7 +51,8 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
"fbx",
"textures",
"action",
"background"
"background",
"effect"
]
def process(self, instance):

View file

@ -44,6 +44,7 @@ class ExtractBurnin(openpype.api.Extractor):
"harmony",
"fusion",
"aftereffects",
"tvpaint"
# "resolve"
]
optional = True

View file

@ -40,12 +40,15 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
# get sequence
otio_timeline = context.data["otioTimeline"]
# temp file
audio_temp_fpath = self.create_temp_file("audio")
# get all audio inputs from otio timeline
audio_inputs = self.get_audio_track_items(otio_timeline)
if not audio_inputs:
return
# temp file
audio_temp_fpath = self.create_temp_file("audio")
# create empty audio with longest duration
empty = self.create_empty(audio_inputs)
@ -53,14 +56,14 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
audio_inputs.insert(0, empty)
# create cmd
cmd = self.ffmpeg_path + " "
cmd = '"{}"'.format(self.ffmpeg_path) + " "
cmd += self.create_cmd(audio_inputs)
cmd += audio_temp_fpath
cmd += "\"{}\"".format(audio_temp_fpath)
# run subprocess
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
cmd, logger=self.log
)
# remove empty
@ -97,17 +100,17 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
audio_fpath = self.create_temp_file(name)
cmd = " ".join([
self.ffmpeg_path,
'"{}"'.format(self.ffmpeg_path),
"-ss {}".format(start_sec),
"-t {}".format(duration_sec),
"-i {}".format(audio_file),
"-i \"{}\"".format(audio_file),
audio_fpath
])
# run subprocess
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
cmd, logger=self.log
)
else:
audio_fpath = recycling_file.pop()
@ -218,11 +221,11 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
# create empty cmd
cmd = " ".join([
self.ffmpeg_path,
'"{}"'.format(self.ffmpeg_path),
"-f lavfi",
"-i anullsrc=channel_layout=stereo:sample_rate=48000",
"-t {}".format(max_duration_sec),
empty_fpath
"\"{}\"".format(empty_fpath)
])
# generate empty with ffmpeg
@ -230,7 +233,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
cmd, logger=self.log
)
# return dict with output

View file

@ -209,7 +209,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
"frameStart": start,
"frameEnd": end,
"stagingDir": self.staging_dir,
"tags": ["review", "ftrackreview", "delete"]
"tags": ["review", "delete"]
}
collection = clique.Collection(
@ -313,7 +313,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
out_frame_start += end_offset
# start command list
command = [ffmpeg_path]
command = ['"{}"'.format(ffmpeg_path)]
if sequence:
input_dir, collection = sequence
@ -326,7 +326,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
# form command for rendering gap files
command.extend([
"-start_number {}".format(in_frame_start),
"-i {}".format(input_path)
"-i \"{}\"".format(input_path)
])
elif video:
@ -341,7 +341,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
command.extend([
"-ss {}".format(sec_start),
"-t {}".format(sec_duration),
"-i {}".format(video_path)
"-i \"{}\"".format(video_path)
])
elif gap:
@ -360,11 +360,13 @@ class ExtractOTIOReview(openpype.api.Extractor):
# add output attributes
command.extend([
"-start_number {}".format(out_frame_start),
output_path
"\"{}\"".format(output_path)
])
# execute
self.log.debug("Executing: {}".format(" ".join(command)))
output = openpype.api.run_subprocess(" ".join(command), shell=True)
output = openpype.api.run_subprocess(
" ".join(command), logger=self.log
)
self.log.debug("Output: {}".format(output))
def _generate_used_frames(self, duration, end_offset=None):

View file

@ -3,6 +3,9 @@ import re
import copy
import json
from abc import ABCMeta, abstractmethod
import six
import clique
import pyblish.api
@ -48,6 +51,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
video_exts = ["mov", "mp4"]
supported_exts = image_exts + video_exts
alpha_exts = ["exr", "png", "dpx"]
# FFmpeg tools paths
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
@ -296,6 +301,13 @@ class ExtractReview(pyblish.api.InstancePlugin):
):
with_audio = False
input_is_sequence = self.input_is_sequence(repre)
input_allow_bg = False
if input_is_sequence and repre["files"]:
ext = os.path.splitext(repre["files"][0])[1].replace(".", "")
if ext in self.alpha_exts:
input_allow_bg = True
return {
"fps": float(instance.data["fps"]),
"frame_start": frame_start,
@ -310,7 +322,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
"resolution_width": instance.data.get("resolutionWidth"),
"resolution_height": instance.data.get("resolutionHeight"),
"origin_repre": repre,
"input_is_sequence": self.input_is_sequence(repre),
"input_is_sequence": input_is_sequence,
"input_allow_bg": input_allow_bg,
"with_audio": with_audio,
"without_handles": without_handles,
"handles_are_set": handles_are_set
@ -470,6 +483,39 @@ class ExtractReview(pyblish.api.InstancePlugin):
lut_filters = self.lut_filters(new_repre, instance, ffmpeg_input_args)
ffmpeg_video_filters.extend(lut_filters)
bg_alpha = 0
bg_color = output_def.get("bg_color")
if bg_color:
bg_red, bg_green, bg_blue, bg_alpha = bg_color
if bg_alpha > 0:
if not temp_data["input_allow_bg"]:
self.log.info((
"Output definition has defined BG color input was"
" resolved as does not support adding BG."
))
else:
bg_color_hex = "#{0:0>2X}{1:0>2X}{2:0>2X}".format(
bg_red, bg_green, bg_blue
)
bg_color_alpha = float(bg_alpha) / 255
bg_color_str = "{}@{}".format(bg_color_hex, bg_color_alpha)
self.log.info("Applying BG color {}".format(bg_color_str))
color_args = [
"split=2[bg][fg]",
"[bg]drawbox=c={}:replace=1:t=fill[bg]".format(
bg_color_str
),
"[bg][fg]overlay=format=auto"
]
# Prepend bg color change before all video filters
# NOTE at the time of creation it is required as video filters
# from settings may affect color of BG
# e.g. `eq` can remove alpha from input
for arg in reversed(color_args):
ffmpeg_video_filters.insert(0, arg)
# Add argument to override output file
ffmpeg_output_args.append("-y")
@ -547,10 +593,12 @@ class ExtractReview(pyblish.api.InstancePlugin):
all_args.append("\"{}\"".format(self.ffmpeg_path))
all_args.extend(input_args)
if video_filters:
all_args.append("-filter:v {}".format(",".join(video_filters)))
all_args.append("-filter:v")
all_args.append("\"{}\"".format(",".join(video_filters)))
if audio_filters:
all_args.append("-filter:a {}".format(",".join(audio_filters)))
all_args.append("-filter:a")
all_args.append("\"{}\"".format(",".join(audio_filters)))
all_args.extend(output_args)
@ -828,12 +876,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
"""
filters = []
letter_box_def = output_def["letter_box"]
letter_box_enabled = letter_box_def["enabled"]
# Get instance data
pixel_aspect = temp_data["pixel_aspect"]
# NOTE Skipped using instance's resolution
full_input_path_single_file = temp_data["full_input_path_single_file"]
input_data = ffprobe_streams(
@ -842,6 +884,33 @@ class ExtractReview(pyblish.api.InstancePlugin):
input_width = int(input_data["width"])
input_height = int(input_data["height"])
# NOTE Setting only one of `width` or `heigth` is not allowed
# - settings value can't have None but has value of 0
output_width = output_def.get("width") or None
output_height = output_def.get("height") or None
# Convert overscan value video filters
overscan_crop = output_def.get("overscan_crop")
overscan = OverscanCrop(input_width, input_height, overscan_crop)
overscan_crop_filters = overscan.video_filters()
# Add overscan filters to filters if are any and modify input
# resolution by it's values
if overscan_crop_filters:
filters.extend(overscan_crop_filters)
input_width = overscan.width()
input_height = overscan.height()
# Use output resolution as inputs after cropping to skip usage of
# instance data resolution
if output_width is None or output_height is None:
output_width = input_width
output_height = input_height
letter_box_def = output_def["letter_box"]
letter_box_enabled = letter_box_def["enabled"]
# Get instance data
pixel_aspect = temp_data["pixel_aspect"]
# Make sure input width and height is not an odd number
input_width_is_odd = bool(input_width % 2 != 0)
input_height_is_odd = bool(input_height % 2 != 0)
@ -866,10 +935,6 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.debug("input_width: `{}`".format(input_width))
self.log.debug("input_height: `{}`".format(input_height))
# NOTE Setting only one of `width` or `heigth` is not allowed
# - settings value can't have None but has value of 0
output_width = output_def.get("width") or None
output_height = output_def.get("height") or None
# Use instance resolution if output definition has not set it.
if output_width is None or output_height is None:
output_width = temp_data["resolution_width"]
@ -1393,3 +1458,291 @@ class ExtractReview(pyblish.api.InstancePlugin):
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back
@six.add_metaclass(ABCMeta)
class _OverscanValue:
def __repr__(self):
return "<{}> {}".format(self.__class__.__name__, str(self))
@abstractmethod
def copy(self):
"""Create a copy of object."""
pass
@abstractmethod
def size_for(self, value):
"""Calculate new value for passed value."""
pass
class PixValueExplicit(_OverscanValue):
def __init__(self, value):
self._value = int(value)
def __str__(self):
return "{}px".format(self._value)
def copy(self):
return PixValueExplicit(self._value)
def size_for(self, value):
if self._value == 0:
return value
return self._value
class PercentValueExplicit(_OverscanValue):
def __init__(self, value):
self._value = float(value)
def __str__(self):
return "{}%".format(abs(self._value))
def copy(self):
return PercentValueExplicit(self._value)
def size_for(self, value):
if self._value == 0:
return value
return int((value / 100) * self._value)
class PixValueRelative(_OverscanValue):
def __init__(self, value):
self._value = int(value)
def __str__(self):
sign = "-" if self._value < 0 else "+"
return "{}{}px".format(sign, abs(self._value))
def copy(self):
return PixValueRelative(self._value)
def size_for(self, value):
return value + self._value
class PercentValueRelative(_OverscanValue):
def __init__(self, value):
self._value = float(value)
def __str__(self):
return "{}%".format(self._value)
def copy(self):
return PercentValueRelative(self._value)
def size_for(self, value):
if self._value == 0:
return value
offset = int((value / 100) * self._value)
return value + offset
class PercentValueRelativeSource(_OverscanValue):
def __init__(self, value, source_sign):
self._value = float(value)
if source_sign not in ("-", "+"):
raise ValueError(
"Invalid sign value \"{}\" expected \"-\" or \"+\"".format(
source_sign
)
)
self._source_sign = source_sign
def __str__(self):
return "{}%{}".format(self._value, self._source_sign)
def copy(self):
return PercentValueRelativeSource(self._value, self._source_sign)
def size_for(self, value):
if self._value == 0:
return value
return int((value * 100) / (100 - self._value))
class OverscanCrop:
"""Helper class to read overscan string and calculate output resolution.
It is possible to enter single value for both width and heigh or two values
for width and height. Overscan string may have a few variants. Each variant
define output size for input size.
### Example
For input size: 2200px
| String | Output | Description |
|----------|--------|-------------------------------------------------|
| "" | 2200px | Empty string does nothing. |
| "10%" | 220px | Explicit percent size. |
| "-10%" | 1980px | Relative percent size (decrease). |
| "+10%" | 2420px | Relative percent size (increase). |
| "-10%+" | 2000px | Relative percent size to output size. |
| "300px" | 300px | Explicit output size cropped or expanded. |
| "-300px" | 1900px | Relative pixel size (decrease). |
| "+300px" | 2500px | Relative pixel size (increase). |
| "300" | 300px | Value without "%" and "px" is used as has "px". |
Value without sign (+/-) in is always explicit and value with sign is
relative. Output size for "200px" and "+200px" are not the same.
Values "0", "0px" or "0%" are ignored.
All values that cause output resolution smaller than 1 pixel are invalid.
Value "-10%+" is a special case which says that input's resolution is
bigger by 10% than expected output.
It is possible to combine these variants to define different output for
width and height.
Resolution: 2000px 1000px
| String | Output |
|---------------|---------------|
| "100px 120px" | 2100px 1120px |
| "-10% -200px" | 1800px 800px |
"""
item_regex = re.compile(r"([\+\-])?([0-9]+)(.+)?")
relative_source_regex = re.compile(r"%([\+\-])")
def __init__(self, input_width, input_height, string_value):
# Make sure that is not None
string_value = string_value or ""
self.input_width = input_width
self.input_height = input_height
width, height = self._convert_string_to_values(string_value)
self._width_value = width
self._height_value = height
self._string_value = string_value
def __str__(self):
return "{}".format(self._string_value)
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
def width(self):
"""Calculated width."""
return self._width_value.size_for(self.input_width)
def height(self):
"""Calculated height."""
return self._height_value.size_for(self.input_height)
def video_filters(self):
"""FFmpeg video filters to achieve expected result.
Filter may be empty, use "crop" filter, "pad" filter or combination of
"crop" and "pad".
Returns:
list: FFmpeg video filters.
"""
# crop=width:height:x:y - explicit start x, y position
# crop=width:height - x, y are related to center by width/height
# pad=width:heigth:x:y - explicit start x, y position
# pad=width:heigth - x, y are set to 0 by default
width = self.width()
height = self.height()
output = []
if self.input_width == width and self.input_height == height:
return output
# Make sure resolution has odd numbers
if width % 2 == 1:
width -= 1
if height % 2 == 1:
height -= 1
if width <= self.input_width and height <= self.input_height:
output.append("crop={}:{}".format(width, height))
elif width >= self.input_width and height >= self.input_height:
output.append(
"pad={}:{}:(iw-ow)/2:(ih-oh)/2".format(width, height)
)
elif width > self.input_width and height < self.input_height:
output.append("crop=iw:{}".format(height))
output.append("pad={}:ih:(iw-ow)/2:(ih-oh)/2".format(width))
elif width < self.input_width and height > self.input_height:
output.append("crop={}:ih".format(width))
output.append("pad=iw:{}:(iw-ow)/2:(ih-oh)/2".format(height))
return output
def _convert_string_to_values(self, orig_string_value):
string_value = orig_string_value.strip().lower()
if not string_value:
return [PixValueRelative(0), PixValueRelative(0)]
# Replace "px" (and spaces before) with single space
string_value = re.sub(r"([ ]+)?px", " ", string_value)
string_value = re.sub(r"([ ]+)%", "%", string_value)
# Make sure +/- sign at the beggining of string is next to number
string_value = re.sub(r"^([\+\-])[ ]+", "\g<1>", string_value)
# Make sure +/- sign in the middle has zero spaces before number under
# which belongs
string_value = re.sub(
r"[ ]([\+\-])[ ]+([0-9])",
r" \g<1>\g<2>",
string_value
)
string_parts = [
part
for part in string_value.split(" ")
if part
]
error_msg = "Invalid string for rescaling \"{}\"".format(
orig_string_value
)
if 1 > len(string_parts) > 2:
raise ValueError(error_msg)
output = []
for item in string_parts:
groups = self.item_regex.findall(item)
if not groups:
raise ValueError(error_msg)
relative_sign, value, ending = groups[0]
if not relative_sign:
if not ending:
output.append(PixValueExplicit(value))
else:
output.append(PercentValueExplicit(value))
else:
source_sign_group = self.relative_source_regex.findall(ending)
if not ending:
output.append(PixValueRelative(int(relative_sign + value)))
elif source_sign_group:
source_sign = source_sign_group[0]
output.append(PercentValueRelativeSource(
float(relative_sign + value), source_sign
))
else:
output.append(
PercentValueRelative(float(relative_sign + value))
)
if len(output) == 1:
width = output.pop(0)
height = width.copy()
else:
width, height = output
return width, height

View file

@ -78,7 +78,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
"lut",
"audio",
"yetiRig",
"yeticache",
@ -97,7 +96,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"editorial",
"background",
"camerarig",
"redshiftproxy"
"redshiftproxy",
"effect"
]
exclude_families = ["clip"]
db_representation_context_keys = [

View file

@ -81,11 +81,11 @@ def main(argv):
host_name = os.environ["AVALON_APP"].lower()
if host_name == "photoshop":
from avalon.photoshop.lib import launch
from avalon.photoshop.lib import main
elif host_name == "aftereffects":
from avalon.aftereffects.lib import launch
from avalon.aftereffects.lib import main
elif host_name == "harmony":
from avalon.harmony.lib import launch
from avalon.harmony.lib import main
else:
title = "Unknown host name"
message = (
@ -97,7 +97,7 @@ def main(argv):
if launch_args:
# Launch host implementation
launch(*launch_args)
main(*launch_args)
else:
# Show message box
on_invalid_args(after_script_idx is None)

View file

@ -4,8 +4,12 @@
"enabled": true,
"optional": true,
"active": true,
"skip_resolution_check": [".*"],
"skip_timelines_check": [".*"]
"skip_resolution_check": [
".*"
],
"skip_timelines_check": [
".*"
]
},
"AfterEffectsSubmitDeadline": {
"use_published": true,
@ -14,5 +18,9 @@
"secondary_pool": "",
"chunk_size": 1000000
}
},
"workfile_builder": {
"create_first_version": false,
"custom_templates": []
}
}

View file

@ -0,0 +1,6 @@
{
"workfile_builder": {
"create_first_version": false,
"custom_templates": []
}
}

View file

@ -37,11 +37,11 @@
"ftrackreview"
],
"ffmpeg_args": {
"video_filters": [
"eq=gamma=2.2"
],
"video_filters": [],
"audio_filters": [],
"input": [],
"input": [
"-apply_trc gamma22"
],
"output": [
"-pix_fmt yuv420p",
"-crf 18",
@ -55,8 +55,15 @@
"ftrack"
]
},
"overscan_crop": "",
"width": 0,
"height": 0,
"bg_color": [
0,
0,
0,
0
],
"letter_box": {
"enabled": false,
"ratio": 0.0,
@ -266,8 +273,7 @@
"active_site": "studio",
"remote_site": "studio"
},
"sites": {
}
"sites": {}
},
"project_plugins": {
"windows": [],

View file

@ -293,19 +293,22 @@
},
"Display Options": {
"background": [
0.7,
0.7,
0.7
125,
125,
125,
255
],
"backgroundBottom": [
0.7,
0.7,
0.7
125,
125,
125,
255
],
"backgroundTop": [
0.7,
0.7,
0.7
125,
125,
125,
255
],
"override_display": true
},
@ -393,74 +396,88 @@
"load": {
"colors": {
"model": [
0.821,
0.518,
0.117
209,
132,
30,
255
],
"rig": [
0.144,
0.443,
0.463
59,
226,
235,
255
],
"pointcache": [
0.368,
0.821,
0.117
94,
209,
30,
255
],
"animation": [
0.368,
0.821,
0.117
94,
209,
30,
255
],
"ass": [
1.0,
0.332,
0.312
249,
135,
53,
255
],
"camera": [
0.447,
0.312,
1.0
136,
114,
244,
255
],
"fbx": [
1.0,
0.931,
0.312
215,
166,
255,
255
],
"mayaAscii": [
0.312,
1.0,
0.747
67,
174,
255,
255
],
"setdress": [
0.312,
1.0,
0.747
255,
250,
90,
255
],
"layout": [
0.312,
1.0,
0.747
255,
250,
90,
255
],
"vdbcache": [
0.312,
1.0,
0.428
249,
54,
0,
255
],
"vrayproxy": [
0.258,
0.95,
0.541
255,
150,
12,
255
],
"yeticache": [
0.2,
0.8,
0.3
99,
206,
220,
255
],
"yetiRig": [
0.0,
0.8,
0.5
0,
205,
125,
255
]
}
},

View file

@ -6,9 +6,7 @@
"load": "ctrl+alt+l",
"manage": "ctrl+alt+m",
"build_workfile": "ctrl+alt+b"
},
"open_workfile_at_start": false,
"create_initial_workfile": true
}
},
"create": {
"CreateWriteRender": {
@ -147,12 +145,13 @@
"node_name_template": "{class_name}_{ext}"
}
},
"workfile_build": {
"workfile_builder": {
"create_first_version": false,
"custom_templates": [],
"builder_on_start": false,
"profiles": [
{
"tasks": [
"compositing"
],
"tasks": [],
"current_context": [
{
"subset_name_filters": [],
@ -162,10 +161,12 @@
],
"repre_names": [
"exr",
"dpx"
"dpx",
"mov"
],
"loaders": [
"LoadSequence"
"LoadSequence",
"LoadMov"
]
}
],

View file

@ -13,5 +13,9 @@
"jpg"
]
}
},
"workfile_builder": {
"create_first_version": false,
"custom_templates": []
}
}

View file

@ -32,5 +32,9 @@
}
}
},
"workfile_builder": {
"create_first_version": false,
"custom_templates": []
},
"filters": {}
}

View file

@ -103,6 +103,7 @@ from .enum_entity import (
EnumEntity,
AppsEnumEntity,
ToolsEnumEntity,
TaskTypeEnumEntity,
ProvidersEnum
)
@ -154,6 +155,7 @@ __all__ = (
"EnumEntity",
"AppsEnumEntity",
"ToolsEnumEntity",
"TaskTypeEnumEntity",
"ProvidersEnum",
"ListEntity",

View file

@ -111,6 +111,8 @@ class BaseItemEntity(BaseEntity):
self.file_item = None
# Reference to `RootEntity`
self.root_item = None
# Change of value requires restart of OpenPype
self._require_restart_on_change = False
# Entity is in hierarchy of dynamically created entity
self.is_in_dynamic_item = False
@ -171,6 +173,14 @@ class BaseItemEntity(BaseEntity):
roles = [roles]
self.roles = roles
@property
def require_restart_on_change(self):
return self._require_restart_on_change
@property
def require_restart(self):
return False
@property
def has_studio_override(self):
"""Says if entity or it's children has studio overrides."""
@ -261,6 +271,14 @@ class BaseItemEntity(BaseEntity):
self, "Dynamic entity has set `is_group` to true."
)
if (
self.require_restart_on_change
and (self.is_dynamic_item or self.is_in_dynamic_item)
):
raise EntitySchemaError(
self, "Dynamic entity can't require restart."
)
@abstractmethod
def set_override_state(self, state):
"""Set override state and trigger it on children.
@ -788,6 +806,15 @@ class ItemEntity(BaseItemEntity):
# Root item reference
self.root_item = self.parent.root_item
# Item require restart on value change
require_restart_on_change = self.schema_data.get("require_restart")
if (
require_restart_on_change is None
and not (self.is_dynamic_item or self.is_in_dynamic_item)
):
require_restart_on_change = self.parent.require_restart_on_change
self._require_restart_on_change = require_restart_on_change
# File item reference
if self.parent.is_file:
self.file_item = self.parent

View file

@ -439,10 +439,10 @@ class DictMutableKeysEntity(EndpointEntity):
new_initial_value = []
for key, value in _settings_value:
if key in initial_value:
new_initial_value.append(key, initial_value.pop(key))
new_initial_value.append([key, initial_value.pop(key)])
for key, value in initial_value.items():
new_initial_value.append(key, value)
new_initial_value.append([key, value])
initial_value = new_initial_value
else:
initial_value = _settings_value

View file

@ -219,6 +219,41 @@ class ToolsEnumEntity(BaseEnumEntity):
self._current_value = new_value
class TaskTypeEnumEntity(BaseEnumEntity):
schema_types = ["task-types-enum"]
def _item_initalization(self):
self.multiselection = True
self.value_on_not_set = []
self.enum_items = []
self.valid_keys = set()
self.valid_value_types = (list, )
self.placeholder = None
def _get_enum_values(self):
anatomy_entity = self.get_entity_from_path(
"project_settings/project_anatomy"
)
valid_keys = set()
enum_items = []
for task_type in anatomy_entity["tasks"].keys():
enum_items.append({task_type: task_type})
valid_keys.add(task_type)
return enum_items, valid_keys
def set_override_state(self, *args, **kwargs):
super(TaskTypeEnumEntity, self).set_override_state(*args, **kwargs)
self.enum_items, self.valid_keys = self._get_enum_values()
new_value = []
for key in self._current_value:
if key in self.valid_keys:
new_value.append(key)
self._current_value = new_value
class ProvidersEnum(BaseEnumEntity):
schema_types = ["providers-enum"]

View file

@ -68,8 +68,18 @@ class EndpointEntity(ItemEntity):
def on_change(self):
for callback in self.on_change_callbacks:
callback()
if self.require_restart_on_change:
if self.require_restart:
self.root_item.add_item_require_restart(self)
else:
self.root_item.remove_item_require_restart(self)
self.parent.on_child_change(self)
@property
def require_restart(self):
return self.has_unsaved_changes
def update_default_value(self, value):
value = self._check_update_value(value, "default")
self._default_value = value
@ -115,6 +125,10 @@ class InputEntity(EndpointEntity):
"""Entity's value without metadata."""
return self._current_value
@property
def require_restart(self):
return self._value_is_modified
def _settings_value(self):
return copy.deepcopy(self.value)

View file

@ -17,26 +17,60 @@ WRAPPER_TYPES = ["form", "collapsible-wrap"]
NOT_SET = type("NOT_SET", (), {"__bool__": lambda obj: False})()
OVERRIDE_VERSION = 1
DEFAULT_VALUES_KEY = "__default_values__"
TEMPLATE_METADATA_KEYS = (
DEFAULT_VALUES_KEY,
)
template_key_pattern = re.compile(r"(\{.*?[^{0]*\})")
def _pop_metadata_item(template):
found_idx = None
for idx, item in enumerate(template):
if not isinstance(item, dict):
continue
for key in TEMPLATE_METADATA_KEYS:
if key in item:
found_idx = idx
break
if found_idx is not None:
break
metadata_item = {}
if found_idx is not None:
metadata_item = template.pop(found_idx)
return metadata_item
def _fill_schema_template_data(
template, template_data, required_keys=None, missing_keys=None
template, template_data, skip_paths, required_keys=None, missing_keys=None
):
first = False
if required_keys is None:
first = True
if "skip_paths" in template_data:
skip_paths = template_data["skip_paths"]
if not isinstance(skip_paths, list):
skip_paths = [skip_paths]
# Cleanup skip paths (skip empty values)
skip_paths = [path for path in skip_paths if path]
required_keys = set()
missing_keys = set()
_template = []
default_values = {}
for item in template:
if isinstance(item, dict) and "__default_values__" in item:
default_values = item["__default_values__"]
else:
_template.append(item)
template = _template
# Copy template data as content may change
template = copy.deepcopy(template)
# Get metadata item from template
metadata_item = _pop_metadata_item(template)
# Check for default values for template data
default_values = metadata_item.get(DEFAULT_VALUES_KEY) or {}
for key, value in default_values.items():
if key not in template_data:
@ -46,21 +80,55 @@ def _fill_schema_template_data(
output = template
elif isinstance(template, list):
# Store paths by first part if path
# - None value says that whole key should be skipped
skip_paths_by_first_key = {}
for path in skip_paths:
parts = path.split("/")
key = parts.pop(0)
if key not in skip_paths_by_first_key:
skip_paths_by_first_key[key] = []
value = "/".join(parts)
skip_paths_by_first_key[key].append(value or None)
output = []
for item in template:
output.append(_fill_schema_template_data(
item, template_data, required_keys, missing_keys
))
# Get skip paths for children item
_skip_paths = []
if not isinstance(item, dict):
pass
elif item.get("type") in WRAPPER_TYPES:
_skip_paths = copy.deepcopy(skip_paths)
elif skip_paths_by_first_key:
# Check if this item should be skipped
key = item.get("key")
if key and key in skip_paths_by_first_key:
_skip_paths = skip_paths_by_first_key[key]
# Skip whole item if None is in skip paths value
if None in _skip_paths:
continue
output_item = _fill_schema_template_data(
item, template_data, _skip_paths, required_keys, missing_keys
)
if output_item:
output.append(output_item)
elif isinstance(template, dict):
output = {}
for key, value in template.items():
output[key] = _fill_schema_template_data(
value, template_data, required_keys, missing_keys
value, template_data, skip_paths, required_keys, missing_keys
)
if output.get("type") in WRAPPER_TYPES and not output.get("children"):
return {}
elif isinstance(template, STRING_TYPE):
# TODO find much better way how to handle filling template data
template = template.replace("{{", "__dbcb__").replace("}}", "__decb__")
for replacement_string in template_key_pattern.findall(template):
key = str(replacement_string[1:-1])
required_keys.add(key)
@ -76,7 +144,8 @@ def _fill_schema_template_data(
else:
# Only replace the key in string
template = template.replace(replacement_string, value)
output = template
output = template.replace("__dbcb__", "{").replace("__decb__", "}")
else:
output = template
@ -105,11 +174,15 @@ def _fill_schema_template(child_data, schema_collection, schema_templates):
if isinstance(template_data, dict):
template_data = [template_data]
skip_paths = child_data.get("skip_paths") or []
if isinstance(skip_paths, STRING_TYPE):
skip_paths = [skip_paths]
output = []
for single_template_data in template_data:
try:
filled_child = _fill_schema_template_data(
template, single_template_data
template, single_template_data, skip_paths
)
except SchemaTemplateMissingKeys as exc:
@ -166,7 +239,7 @@ def _fill_inner_schemas(schema_data, schema_collection, schema_templates):
schema_templates
)
elif child_type == "schema_template":
elif child_type in ("template", "schema_template"):
for filled_child in _fill_schema_template(
child, schema_collection, schema_templates
):

View file

@ -55,6 +55,8 @@ class RootEntity(BaseItemEntity):
def __init__(self, schema_data, reset):
super(RootEntity, self).__init__(schema_data)
self._require_restart_callbacks = []
self._item_ids_require_restart = set()
self._item_initalization()
if reset:
self.reset()
@ -64,6 +66,31 @@ class RootEntity(BaseItemEntity):
"""Current OverrideState."""
return self._override_state
@property
def require_restart(self):
return bool(self._item_ids_require_restart)
def add_require_restart_change_callback(self, callback):
self._require_restart_callbacks.append(callback)
def _on_require_restart_change(self):
for callback in self._require_restart_callbacks:
callback()
def add_item_require_restart(self, item):
was_empty = len(self._item_ids_require_restart) == 0
self._item_ids_require_restart.add(item.id)
if was_empty:
self._on_require_restart_change()
def remove_item_require_restart(self, item):
if item.id not in self._item_ids_require_restart:
return
self._item_ids_require_restart.remove(item.id)
if not self._item_ids_require_restart:
self._on_require_restart_change()
@abstractmethod
def reset(self):
"""Reset values and entities to initial state.

View file

@ -78,6 +78,10 @@
"type": "schema",
"name": "schema_project_hiero"
},
{
"type": "schema",
"name": "schema_project_blender"
},
{
"type": "schema",
"name": "schema_project_aftereffects"

View file

@ -85,6 +85,14 @@
]
}
]
},
{
"type": "schema_template",
"name": "template_workfile_options",
"skip_paths": [
"workfile_builder/builder_on_start",
"workfile_builder/profiles"
]
}
]
}

View file

@ -0,0 +1,17 @@
{
"type": "dict",
"collapsible": true,
"key": "blender",
"label": "Blender",
"is_file": true,
"children": [
{
"type": "schema_template",
"name": "template_workfile_options",
"skip_paths": [
"workfile_builder/builder_on_start",
"workfile_builder/profiles"
]
}
]
}

View file

@ -43,16 +43,6 @@
"label": "Build Workfile"
}
]
},
{
"type": "boolean",
"key": "open_workfile_at_start",
"label": "Open Workfile window at start of a Nuke session"
},
{
"type": "boolean",
"key": "create_initial_workfile",
"label": "Create initial workfile version if none available"
}
]
},
@ -103,8 +93,8 @@
"template_data": []
},
{
"type": "schema",
"name": "schema_workfile_build"
"type": "schema_template",
"name": "template_workfile_options"
},
{
"type": "schema",

View file

@ -52,6 +52,14 @@
]
}
]
},
{
"type": "schema_template",
"name": "template_workfile_options",
"skip_paths": [
"workfile_builder/builder_on_start",
"workfile_builder/profiles"
]
}
]
}

View file

@ -112,6 +112,14 @@
}
]
},
{
"type": "schema_template",
"name": "template_workfile_options",
"skip_paths": [
"workfile_builder/builder_on_start",
"workfile_builder/profiles"
]
},
{
"type": "schema",
"name": "schema_publish_gui_filter"

View file

@ -173,6 +173,15 @@
{
"type": "separator"
},
{
"type": "label",
"label": "Crop input overscan. See the documentation for more information."
},
{
"type": "text",
"key": "overscan_crop",
"label": "Overscan crop"
},
{
"type": "label",
"label": "Width and Height must be both set to higher value than 0 else source resolution is used."
@ -193,6 +202,15 @@
"minimum": 0,
"maximum": 100000
},
{
"type": "label",
"label": "Background color is used only when input have transparency and Alpha is higher than 0."
},
{
"type": "color",
"label": "Background color",
"key": "bg_color"
},
{
"key": "letter_box",
"label": "Letter box",
@ -280,24 +298,14 @@
"minimum": 0
},
{
"type": "schema_template",
"name": "template_rgba_color",
"template_data": [
{
"label": "Font Color",
"name": "font_color"
}
]
"type": "color",
"key": "font_color",
"label": "Font Color"
},
{
"type": "schema_template",
"name": "template_rgba_color",
"template_data": [
{
"label": "Background Color",
"name": "bg_color"
}
]
"type": "color",
"key": "bg_color",
"label": "Background Color"
},
{
"type": "number",

View file

@ -0,0 +1,471 @@
{
"type": "dict",
"collapsible": true,
"key": "ExtractPlayblast",
"label": "Extract Playblast settings",
"children": [
{
"type": "dict",
"key": "capture_preset",
"children": [
{
"type": "dict",
"key": "Codec",
"children": [
{
"type": "label",
"label": "<b>Codec</b>"
},
{
"type": "text",
"key": "compression",
"label": "Compression type"
},
{
"type": "text",
"key": "format",
"label": "Data format"
},
{
"type": "number",
"key": "quality",
"label": "Quality",
"decimal": 0,
"minimum": 0,
"maximum": 100
},
{
"type": "splitter"
}
]
},
{
"type": "dict",
"key": "Display Options",
"children": [
{
"type": "label",
"label": "<b>Display Options</b>"
},
{
"type": "color",
"key": "background",
"label": "Background Color: "
},
{
"type": "color",
"key": "backgroundBottom",
"label": "Background Bottom: "
},
{
"type": "color",
"key": "backgroundTop",
"label": "Background Top: "
},
{
"type": "boolean",
"key": "override_display",
"label": "Override display options"
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"key": "Generic",
"children": [
{
"type": "label",
"label": "<b>Generic</b>"
},
{
"type": "boolean",
"key": "isolate_view",
"label": " Isolate view"
},
{
"type": "boolean",
"key": "off_screen",
"label": " Off Screen"
}
]
},
{
"type": "dict",
"key": "PanZoom",
"children": [
{
"type": "boolean",
"key": "pan_zoom",
"label": " Pan Zoom"
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"key": "Renderer",
"children": [
{
"type": "label",
"label": "<b>Renderer</b>"
},
{
"type": "enum",
"key": "rendererName",
"label": "Renderer name",
"enum_items": [
{ "vp2Renderer": "Viewport 2.0" }
]
}
]
},
{
"type": "dict",
"key": "Resolution",
"children": [
{
"type": "splitter"
},
{
"type": "label",
"label": "<b>Resolution</b>"
},
{
"type": "number",
"key": "width",
"label": " Width",
"decimal": 0,
"minimum": 0,
"maximum": 99999
},
{
"type": "number",
"key": "height",
"label": "Height",
"decimal": 0,
"minimum": 0,
"maximum": 99999
},
{
"type": "number",
"key": "percent",
"label": "percent",
"decimal": 1,
"minimum": 0,
"maximum": 200
},
{
"type": "text",
"key": "mode",
"label": "Mode"
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"collapsible": true,
"key": "Viewport Options",
"label": "Viewport Options",
"children": [
{
"type": "boolean",
"key": "override_viewport_options",
"label": "override_viewport_options"
},
{
"type": "enum",
"key": "displayLights",
"label": "Display Lights",
"enum_items": [
{ "default": "Default Lighting"},
{ "all": "All Lights"},
{ "selected": "Selected Lights"},
{ "flat": "Flat Lighting"},
{ "nolights": "No Lights"}
]
},
{
"type": "number",
"key": "textureMaxResolution",
"label": "Texture Clamp Resolution",
"decimal": 0
},
{
"type": "number",
"key": "multiSample",
"label": "Anti Aliasing Samples",
"decimal": 0,
"minimum": 0,
"maximum": 32
},
{
"type": "boolean",
"key": "shadows",
"label": "Display Shadows"
},
{
"type": "boolean",
"key": "textures",
"label": "Display Textures"
},
{
"type": "boolean",
"key": "twoSidedLighting",
"label": "Two Sided Lighting"
},
{
"type": "boolean",
"key": "ssaoEnable",
"label": "Screen Space Ambient Occlusion"
},
{
"type": "splitter"
},
{
"type": "boolean",
"key": "cameras",
"label": "cameras"
},
{
"type": "boolean",
"key": "clipGhosts",
"label": "clipGhosts"
},
{
"type": "boolean",
"key": "controlVertices",
"label": "controlVertices"
},
{
"type": "boolean",
"key": "deformers",
"label": "deformers"
},
{
"type": "boolean",
"key": "dimensions",
"label": "dimensions"
},
{
"type": "boolean",
"key": "dynamicConstraints",
"label": "dynamicConstraints"
},
{
"type": "boolean",
"key": "dynamics",
"label": "dynamics"
},
{
"type": "boolean",
"key": "fluids",
"label": "fluids"
},
{
"type": "boolean",
"key": "follicles",
"label": "follicles"
},
{
"type": "boolean",
"key": "gpuCacheDisplayFilter",
"label": "gpuCacheDisplayFilter"
},
{
"type": "boolean",
"key": "greasePencils",
"label": "greasePencils"
},
{
"type": "boolean",
"key": "grid",
"label": "grid"
},
{
"type": "boolean",
"key": "hairSystems",
"label": "hairSystems"
},
{
"type": "boolean",
"key": "handles",
"label": "handles"
},
{
"type": "boolean",
"key": "hud",
"label": "hud"
},
{
"type": "boolean",
"key": "hulls",
"label": "hulls"
},
{
"type": "boolean",
"key": "ikHandles",
"label": "ikHandles"
},
{
"type": "boolean",
"key": "imagePlane",
"label": "imagePlane"
},
{
"type": "boolean",
"key": "joints",
"label": "joints"
},
{
"type": "boolean",
"key": "lights",
"label": "lights"
},
{
"type": "boolean",
"key": "locators",
"label": "locators"
},
{
"type": "boolean",
"key": "manipulators",
"label": "manipulators"
},
{
"type": "boolean",
"key": "motionTrails",
"label": "motionTrails"
},
{
"type": "boolean",
"key": "nCloths",
"label": "nCloths"
},
{
"type": "boolean",
"key": "nParticles",
"label": "nParticles"
},
{
"type": "boolean",
"key": "nRigids",
"label": "nRigids"
},
{
"type": "boolean",
"key": "nurbsCurves",
"label": "nurbsCurves"
},
{
"type": "boolean",
"key": "nurbsSurfaces",
"label": "nurbsSurfaces"
},
{
"type": "boolean",
"key": "particleInstancers",
"label": "particleInstancers"
},
{
"type": "boolean",
"key": "pivots",
"label": "pivots"
},
{
"type": "boolean",
"key": "planes",
"label": "planes"
},
{
"type": "boolean",
"key": "pluginShapes",
"label": "pluginShapes"
},
{
"type": "boolean",
"key": "polymeshes",
"label": "polymeshes"
},
{
"type": "boolean",
"key": "strokes",
"label": "strokes"
},
{
"type": "boolean",
"key": "subdivSurfaces",
"label": "subdivSurfaces"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "Camera Options",
"label": "Camera Options",
"children": [
{
"type": "boolean",
"key": "displayGateMask",
"label": "displayGateMask"
},
{
"type": "boolean",
"key": "displayResolution",
"label": "displayResolution"
},
{
"type": "boolean",
"key": "displayFilmGate",
"label": "displayFilmGate"
},
{
"type": "boolean",
"key": "displayFieldChart",
"label": "displayFieldChart"
},
{
"type": "boolean",
"key": "displaySafeAction",
"label": "displaySafeAction"
},
{
"type": "boolean",
"key": "displaySafeTitle",
"label": "displaySafeTitle"
},
{
"type": "boolean",
"key": "displayFilmPivot",
"label": "displayFilmPivot"
},
{
"type": "boolean",
"key": "displayFilmOrigin",
"label": "displayFilmOrigin"
},
{
"type": "number",
"key": "overscan",
"label": "overscan",
"decimal": 1,
"minimum": 0,
"maximum": 10
}
]
}
]
}
]
}

View file

@ -11,144 +11,74 @@
"label": "Loaded Subsets Outliner Colors",
"children": [
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Model",
"name": "model"
}
]
"type": "color",
"label": "Model:",
"key": "model"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Rig",
"name": "rig"
}
]
"type": "color",
"label": "Rig:",
"key": "rig"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Pointcache",
"name": "pointcache"
}
]
"type": "color",
"label": "Pointcache:",
"key": "pointcache"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Animation",
"name": "animation"
}
]
"type": "color",
"label": "Animation:",
"key": "animation"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Arnold Standin",
"name": "ass"
}
]
"type": "color",
"label": "Arnold Standin:",
"key": "ass"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Camera",
"name": "camera"
}
]
"type": "color",
"label": "Camera:",
"key": "camera"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "FBX",
"name": "fbx"
}
]
"type": "color",
"label": "FBX:",
"key": "fbx"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Maya Scene",
"name": "mayaAscii"
}
]
"type": "color",
"label": "Maya Scene:",
"key": "mayaAscii"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Set Dress",
"name": "setdress"
}
]
"type": "color",
"label": "Set Dress:",
"key": "setdress"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Layout",
"name": "layout"
}
]
"type": "color",
"label": "Layout:",
"key": "layout"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "VDB Cache",
"name": "vdbcache"
}
]
"type": "color",
"label": "VDB Cache:",
"key": "vdbcache"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Vray Proxy",
"name": "vrayproxy"
}
]
"type": "color",
"label": "Vray Proxy:",
"key": "vrayproxy"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Yeti Cache",
"name": "yeticache"
}
]
"type": "color",
"label": "Yeti Cache:",
"key": "yeticache"
},
{
"type": "schema_template",
"name": "template_color",
"template_data": [
{
"label": "Yeti Rig",
"name": "yetiRig"
}
]
"type": "color",
"label": "Yeti Rig:",
"key": "yetiRig"
}
]
}

View file

@ -297,8 +297,8 @@
"label": "Extractors"
},
{
"type": "schema_template",
"name": "template_maya_capture"
"type": "schema",
"name": "schema_maya_capture"
},
{
"type": "dict",

View file

@ -2,7 +2,7 @@
{
"type": "list-strict",
"key": "{name}",
"label": "{label}:",
"label": "{label}",
"object_types": [
{
"label": "Red",

View file

@ -1,541 +0,0 @@
[
{
"type": "dict",
"collapsible": true,
"key": "ExtractPlayblast",
"label": "Extract Playblast settings",
"children": [
{
"type": "dict",
"key": "capture_preset",
"children": [
{
"type": "dict",
"key": "Codec",
"children": [
{
"type": "label",
"label": "<b>Codec</b>"
},
{
"type": "text",
"key": "compression",
"label": "Compression type"
},
{
"type": "text",
"key": "format",
"label": "Data format"
},
{
"type": "number",
"key": "quality",
"label": "Quality",
"decimal": 0,
"minimum": 0,
"maximum": 100
},
{
"type": "splitter"
}
]
},
{
"type": "dict",
"key": "Display Options",
"children": [
{
"type": "label",
"label": "<b>Display Options</b>"
},
{
"type": "list-strict",
"key": "background",
"label": "Background Color: ",
"object_types": [
{
"label": "Red",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
},
{
"label": "Green",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
},
{
"label": "Blue",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
}
]
},
{
"type": "list-strict",
"key": "backgroundBottom",
"label": "Background Bottom: ",
"object_types": [
{
"label": "Red",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
},
{
"label": "Green",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
},
{
"label": "Blue",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
}
]
},
{
"type": "list-strict",
"key": "backgroundTop",
"label": "Background Top: ",
"object_types": [
{
"label": "Red",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
},
{
"label": "Green",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
},
{
"label": "Blue",
"type": "number",
"minimum": 0,
"maximum": 1,
"decimal": 3
}
]
},
{
"type": "boolean",
"key": "override_display",
"label": "Override display options"
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"key": "Generic",
"children": [
{
"type": "label",
"label": "<b>Generic</b>"
},
{
"type": "boolean",
"key": "isolate_view",
"label": " Isolate view"
},
{
"type": "boolean",
"key": "off_screen",
"label": " Off Screen"
}
]
},
{
"type": "dict",
"key": "PanZoom",
"children": [
{
"type": "boolean",
"key": "pan_zoom",
"label": " Pan Zoom"
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"key": "Renderer",
"children": [
{
"type": "label",
"label": "<b>Renderer</b>"
},
{
"type": "enum",
"key": "rendererName",
"label": "Renderer name",
"enum_items": [
{ "vp2Renderer": "Viewport 2.0" }
]
}
]
},
{
"type": "dict",
"key": "Resolution",
"children": [
{
"type": "splitter"
},
{
"type": "label",
"label": "<b>Resolution</b>"
},
{
"type": "number",
"key": "width",
"label": " Width",
"decimal": 0,
"minimum": 0,
"maximum": 99999
},
{
"type": "number",
"key": "height",
"label": "Height",
"decimal": 0,
"minimum": 0,
"maximum": 99999
},
{
"type": "number",
"key": "percent",
"label": "percent",
"decimal": 1,
"minimum": 0,
"maximum": 200
},
{
"type": "text",
"key": "mode",
"label": "Mode"
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"collapsible": true,
"key": "Viewport Options",
"label": "Viewport Options",
"children": [
{
"type": "boolean",
"key": "override_viewport_options",
"label": "override_viewport_options"
},
{
"type": "enum",
"key": "displayLights",
"label": "Display Lights",
"enum_items": [
{ "default": "Default Lighting"},
{ "all": "All Lights"},
{ "selected": "Selected Lights"},
{ "flat": "Flat Lighting"},
{ "nolights": "No Lights"}
]
},
{
"type": "number",
"key": "textureMaxResolution",
"label": "Texture Clamp Resolution",
"decimal": 0
},
{
"type": "number",
"key": "multiSample",
"label": "Anti Aliasing Samples",
"decimal": 0,
"minimum": 0,
"maximum": 32
},
{
"type": "boolean",
"key": "shadows",
"label": "Display Shadows"
},
{
"type": "boolean",
"key": "textures",
"label": "Display Textures"
},
{
"type": "boolean",
"key": "twoSidedLighting",
"label": "Two Sided Lighting"
},
{
"type": "boolean",
"key": "ssaoEnable",
"label": "Screen Space Ambient Occlusion"
},
{
"type": "splitter"
},
{
"type": "boolean",
"key": "cameras",
"label": "cameras"
},
{
"type": "boolean",
"key": "clipGhosts",
"label": "clipGhosts"
},
{
"type": "boolean",
"key": "controlVertices",
"label": "controlVertices"
},
{
"type": "boolean",
"key": "deformers",
"label": "deformers"
},
{
"type": "boolean",
"key": "dimensions",
"label": "dimensions"
},
{
"type": "boolean",
"key": "dynamicConstraints",
"label": "dynamicConstraints"
},
{
"type": "boolean",
"key": "dynamics",
"label": "dynamics"
},
{
"type": "boolean",
"key": "fluids",
"label": "fluids"
},
{
"type": "boolean",
"key": "follicles",
"label": "follicles"
},
{
"type": "boolean",
"key": "gpuCacheDisplayFilter",
"label": "gpuCacheDisplayFilter"
},
{
"type": "boolean",
"key": "greasePencils",
"label": "greasePencils"
},
{
"type": "boolean",
"key": "grid",
"label": "grid"
},
{
"type": "boolean",
"key": "hairSystems",
"label": "hairSystems"
},
{
"type": "boolean",
"key": "handles",
"label": "handles"
},
{
"type": "boolean",
"key": "hud",
"label": "hud"
},
{
"type": "boolean",
"key": "hulls",
"label": "hulls"
},
{
"type": "boolean",
"key": "ikHandles",
"label": "ikHandles"
},
{
"type": "boolean",
"key": "imagePlane",
"label": "imagePlane"
},
{
"type": "boolean",
"key": "joints",
"label": "joints"
},
{
"type": "boolean",
"key": "lights",
"label": "lights"
},
{
"type": "boolean",
"key": "locators",
"label": "locators"
},
{
"type": "boolean",
"key": "manipulators",
"label": "manipulators"
},
{
"type": "boolean",
"key": "motionTrails",
"label": "motionTrails"
},
{
"type": "boolean",
"key": "nCloths",
"label": "nCloths"
},
{
"type": "boolean",
"key": "nParticles",
"label": "nParticles"
},
{
"type": "boolean",
"key": "nRigids",
"label": "nRigids"
},
{
"type": "boolean",
"key": "nurbsCurves",
"label": "nurbsCurves"
},
{
"type": "boolean",
"key": "nurbsSurfaces",
"label": "nurbsSurfaces"
},
{
"type": "boolean",
"key": "particleInstancers",
"label": "particleInstancers"
},
{
"type": "boolean",
"key": "pivots",
"label": "pivots"
},
{
"type": "boolean",
"key": "planes",
"label": "planes"
},
{
"type": "boolean",
"key": "pluginShapes",
"label": "pluginShapes"
},
{
"type": "boolean",
"key": "polymeshes",
"label": "polymeshes"
},
{
"type": "boolean",
"key": "strokes",
"label": "strokes"
},
{
"type": "boolean",
"key": "subdivSurfaces",
"label": "subdivSurfaces"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "Camera Options",
"label": "Camera Options",
"children": [
{
"type": "boolean",
"key": "displayGateMask",
"label": "displayGateMask"
},
{
"type": "boolean",
"key": "displayResolution",
"label": "displayResolution"
},
{
"type": "boolean",
"key": "displayFilmGate",
"label": "displayFilmGate"
},
{
"type": "boolean",
"key": "displayFieldChart",
"label": "displayFieldChart"
},
{
"type": "boolean",
"key": "displaySafeAction",
"label": "displaySafeAction"
},
{
"type": "boolean",
"key": "displaySafeTitle",
"label": "displaySafeTitle"
},
{
"type": "boolean",
"key": "displayFilmPivot",
"label": "displayFilmPivot"
},
{
"type": "boolean",
"key": "displayFilmOrigin",
"label": "displayFilmOrigin"
},
{
"type": "number",
"key": "overscan",
"label": "overscan",
"decimal": 1,
"minimum": 0,
"maximum": 10
}
]
}
]
}
]
}
]

View file

@ -1,33 +0,0 @@
[
{
"type": "list-strict",
"key": "{name}",
"label": "{label}",
"object_types": [
{
"label": "R",
"type": "number",
"minimum": 0,
"maximum": 255
},
{
"label": "G",
"type": "number",
"minimum": 0,
"maximum": 255
},
{
"label": "B",
"type": "number",
"minimum": 0,
"maximum": 255
},
{
"label": "A",
"type": "number",
"minimum": 0,
"maximum": 255
}
]
}
]

View file

@ -0,0 +1,23 @@
[
{
"type": "dict",
"collapsible": true,
"key": "workfile_builder",
"label": "Workfile Builder",
"children": [
{
"type": "boolean",
"key": "create_first_version",
"label": "Create first workfile",
"default": false
},
{
"type": "path",
"key": "template_path",
"label": "First workfile template",
"multiplatform": true,
"multipath": false
}
]
}
]

Some files were not shown because too many files have changed in this diff Show more