Merge branch 'develop' into feature/895-add-option-to-define-paht-to-workfile-template

This commit is contained in:
Jakub Jezek 2021-05-27 12:13:16 +02:00
commit d2ec91ec0e
No known key found for this signature in database
GPG key ID: D8548FBF690B100A
59 changed files with 2804 additions and 3167 deletions

View file

@ -0,0 +1,34 @@
import os
from openpype.lib import PreLaunchHook
class LaunchWithTerminal(PreLaunchHook):
"""Mac specific pre arguments for application.
Mac applications should be launched using "open" argument which is internal
callbacks to open executable. We also add argument "-a" to tell it's
application open. This is used only for executables ending with ".app". It
is expected that these executables lead to app packages.
"""
order = 1000
platforms = ["darwin"]
def execute(self):
executable = str(self.launch_context.executable)
# Skip executables not ending with ".app" or that are not folder
if not executable.endswith(".app") or not os.path.isdir(executable):
return
# Check if first argument match executable path
# - Few applications are not executed directly but through OpenPype
# process (Photoshop, AfterEffects, Harmony, ...). These should not
# use `open`.
if self.launch_context.launch_args[0] != executable:
return
# Tell `open` to pass arguments if there are any
if len(self.launch_context.launch_args) > 1:
self.launch_context.launch_args.insert(1, "--args")
# Prepend open arguments
self.launch_context.launch_args.insert(0, ["open", "-a"])

View file

@ -1,25 +0,0 @@
import bpy
from avalon import api, blender
import openpype.hosts.blender.api.plugin
class CreateSetDress(openpype.hosts.blender.api.plugin.Creator):
"""A grouped package of loaded content"""
name = "setdressMain"
label = "Set Dress"
family = "setdress"
icon = "cubes"
defaults = ["Main", "Anim"]
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
blender.lib.imprint(collection, self.data)
return collection

View file

@ -25,9 +25,6 @@ class BlendLayoutLoader(plugin.AssetLoader):
icon = "code-fork"
color = "orange"
animation_creator_name = "CreateAnimation"
setdress_creator_name = "CreateSetDress"
def _remove(self, objects, obj_container):
for obj in list(objects):
if obj.type == 'ARMATURE':
@ -293,7 +290,6 @@ class UnrealLayoutLoader(plugin.AssetLoader):
color = "orange"
animation_creator_name = "CreateAnimation"
setdress_creator_name = "CreateSetDress"
def _remove_objects(self, objects):
for obj in list(objects):
@ -383,7 +379,7 @@ class UnrealLayoutLoader(plugin.AssetLoader):
def _process(
self, libpath, layout_container, container_name, representation,
actions, parent
actions, parent_collection
):
with open(libpath, "r") as fp:
data = json.load(fp)
@ -392,6 +388,11 @@ class UnrealLayoutLoader(plugin.AssetLoader):
layout_collection = bpy.data.collections.new(container_name)
scene.collection.children.link(layout_collection)
parent = parent_collection
if parent is None:
parent = scene.collection
all_loaders = api.discover(api.Loader)
avalon_container = bpy.data.collections.get(
@ -516,23 +517,9 @@ class UnrealLayoutLoader(plugin.AssetLoader):
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
# Create a setdress subset to contain all the animation for all
# the rigs in the layout
creator_plugin = get_creator_by_name(self.setdress_creator_name)
if not creator_plugin:
raise ValueError("Creator plugin \"{}\" was not found.".format(
self.setdress_creator_name
))
parent = api.create(
creator_plugin,
name="animation",
asset=api.Session["AVALON_ASSET"],
options={"useSelection": True},
data={"dependencies": str(context["representation"]["_id"])})
layout_collection = self._process(
libpath, layout_container, container_name,
str(context["representation"]["_id"]), None, parent)
str(context["representation"]["_id"]), None, None)
container_metadata["obj_container"] = layout_collection

View file

@ -107,6 +107,9 @@ class BlendRigLoader(plugin.AssetLoader):
if action is not None:
local_obj.animation_data.action = action
elif local_obj.animation_data.action is not None:
plugin.prepare_data(
local_obj.animation_data.action, collection_name)
# Set link the drivers to the local object
if local_obj.data.animation_data:

View file

@ -1,61 +0,0 @@
import os
import json
import openpype.api
import pyblish.api
import bpy
class ExtractSetDress(openpype.api.Extractor):
"""Extract setdress."""
label = "Extract SetDress"
hosts = ["blender"]
families = ["setdress"]
optional = True
order = pyblish.api.ExtractorOrder + 0.1
def process(self, instance):
stagingdir = self.staging_dir(instance)
json_data = []
for i in instance.context:
collection = i.data.get("name")
container = None
for obj in bpy.data.collections[collection].objects:
if obj.type == "ARMATURE":
container_name = obj.get("avalon").get("container_name")
container = bpy.data.collections[container_name]
if container:
json_dict = {
"subset": i.data.get("subset"),
"container": container.name,
}
json_dict["instance_name"] = container.get("avalon").get(
"instance_name"
)
json_data.append(json_dict)
if "representations" not in instance.data:
instance.data["representations"] = []
json_filename = f"{instance.name}.json"
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:
json.dump(json_data, fp=file, indent=2)
json_representation = {
"name": "json",
"ext": "json",
"files": json_filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(json_representation)
self.log.info(
"Extracted instance '{}' to: {}".format(instance.name,
json_representation)
)

View file

@ -1,4 +1,5 @@
import os
import json
import openpype.api
@ -121,6 +122,25 @@ class ExtractAnimationFBX(openpype.api.Extractor):
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
json_filename = f"{instance.name}.json"
json_path = os.path.join(stagingdir, json_filename)
json_dict = {}
collection = instance.data.get("name")
container = None
for obj in bpy.data.collections[collection].objects:
if obj.type == "ARMATURE":
container_name = obj.get("avalon").get("container_name")
container = bpy.data.collections[container_name]
if container:
json_dict = {
"instance_name": container.get("avalon").get("instance_name")
}
with open(json_path, "w+") as file:
json.dump(json_dict, fp=file, indent=2)
if "representations" not in instance.data:
instance.data["representations"] = []
@ -130,7 +150,15 @@ class ExtractAnimationFBX(openpype.api.Extractor):
'files': fbx_filename,
"stagingDir": stagingdir,
}
json_representation = {
'name': 'json',
'ext': 'json',
'files': json_filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(fbx_representation)
instance.data["representations"].append(json_representation)
self.log.info("Extracted instance '{}' to: {}".format(
instance.name, fbx_representation))

View file

@ -214,7 +214,9 @@ def get_track_items(
# add all if no track_type is defined
return_list.append(track_item)
return return_list
# return output list but make sure all items are TrackItems
return [_i for _i in return_list
if type(_i) == hiero.core.TrackItem]
def get_track_item_pype_tag(track_item):

View file

@ -52,10 +52,11 @@ class ExtractClipEffects(openpype.api.Extractor):
instance.data["representations"] = list()
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
"version"
"handleStart", "handleEnd",
"sourceStart", "sourceStartH", "sourceEnd", "sourceEndH",
"frameStart", "frameEnd",
"clipIn", "clipOut", "clipInH", "clipOutH",
"asset", "version"
]
# pass data to version

View file

@ -5,7 +5,7 @@ import pyblish.api
class PreCollectClipEffects(pyblish.api.InstancePlugin):
"""Collect soft effects instances."""
order = pyblish.api.CollectorOrder - 0.508
order = pyblish.api.CollectorOrder - 0.579
label = "Pre-collect Clip Effects Instances"
families = ["clip"]
@ -24,7 +24,8 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
self.clip_in_h = self.clip_in - self.handle_start
self.clip_out_h = self.clip_out + self.handle_end
track = instance.data["trackItem"]
track_item = instance.data["item"]
track = track_item.parent()
track_index = track.trackIndex()
tracks_effect_items = instance.context.data.get("tracksEffectItems")
clip_effect_items = instance.data.get("clipEffectItems")
@ -112,7 +113,12 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
node = sitem.node()
node_serialized = self.node_serialisation(node)
node_name = sitem.name()
node_class = re.sub(r"\d+", "", node_name)
if "_" in node_name:
node_class = re.sub(r"(?:_)[_0-9]+", "", node_name) # more numbers
else:
node_class = re.sub(r"\d+", "", node_name) # one number
# collect timelineIn/Out
effect_t_in = int(sitem.timelineIn())
effect_t_out = int(sitem.timelineOut())
@ -121,6 +127,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
return
self.log.debug("node_name: `{}`".format(node_name))
self.log.debug("node_class: `{}`".format(node_class))
return {node_name: {
"class": node_class,

View file

@ -2,6 +2,9 @@ import pyblish
import openpype
from openpype.hosts.hiero import api as phiero
from openpype.hosts.hiero.otio import hiero_export
import hiero
from compiler.ast import flatten
# # developer reload modules
from pprint import pformat
@ -14,18 +17,40 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
label = "Precollect Instances"
hosts = ["hiero"]
audio_track_items = []
def process(self, context):
otio_timeline = context.data["otioTimeline"]
self.otio_timeline = context.data["otioTimeline"]
selected_timeline_items = phiero.get_track_items(
selected=True, check_enabled=True, check_tagged=True)
selected=True, check_tagged=True, check_enabled=True)
# only return enabled track items
if not selected_timeline_items:
selected_timeline_items = phiero.get_track_items(
check_enabled=True, check_tagged=True)
self.log.info(
"Processing enabled track items: {}".format(
selected_timeline_items))
# add all tracks subtreck effect items to context
all_tracks = hiero.ui.activeSequence().videoTracks()
tracks_effect_items = self.collect_sub_track_items(all_tracks)
context.data["tracksEffectItems"] = tracks_effect_items
# process all sellected timeline track items
for track_item in selected_timeline_items:
data = {}
clip_name = track_item.name()
source_clip = track_item.source()
# get clips subtracks and anotations
annotations = self.clip_annotations(source_clip)
subtracks = self.clip_subtrack(track_item)
self.log.debug("Annotations: {}".format(annotations))
self.log.debug(">> Subtracks: {}".format(subtracks))
# get openpype tag data
tag_data = phiero.get_track_item_pype_data(track_item)
@ -76,12 +101,15 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
"item": track_item,
"families": families,
"publish": tag_data["publish"],
"fps": context.data["fps"]
"fps": context.data["fps"],
# clip's effect
"clipEffectItems": subtracks,
"clipAnnotations": annotations
})
# otio clip data
otio_data = self.get_otio_clip_instance_data(
otio_timeline, track_item) or {}
otio_data = self.get_otio_clip_instance_data(track_item) or {}
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
data.update(otio_data)
self.log.debug("__ data: {}".format(pformat(data)))
@ -185,6 +213,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
item = data.get("item")
clip_name = item.name()
# test if any audio clips
if not self.test_any_audio(item):
return
asset = data["asset"]
subset = "audioMain"
@ -215,7 +247,28 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
def get_otio_clip_instance_data(self, otio_timeline, track_item):
def test_any_audio(self, track_item):
# collect all audio tracks to class variable
if not self.audio_track_items:
for otio_clip in self.otio_timeline.each_clip():
if otio_clip.parent().kind != "Audio":
continue
self.audio_track_items.append(otio_clip)
# get track item timeline range
timeline_range = self.create_otio_time_range_from_timeline_item_data(
track_item)
# loop trough audio track items and search for overlaping clip
for otio_audio in self.audio_track_items:
parent_range = otio_audio.range_in_parent()
# if any overaling clip found then return True
if openpype.lib.is_overlapping_otio_ranges(
parent_range, timeline_range, strict=False):
return True
def get_otio_clip_instance_data(self, track_item):
"""
Return otio objects for timeline, track and clip
@ -231,7 +284,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
ti_track_name = track_item.parent().name()
timeline_range = self.create_otio_time_range_from_timeline_item_data(
track_item)
for otio_clip in otio_timeline.each_clip():
for otio_clip in self.otio_timeline.each_clip():
track_name = otio_clip.parent().name
parent_range = otio_clip.range_in_parent()
if ti_track_name not in track_name:
@ -258,3 +311,76 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
return hiero_export.create_otio_time_range(
frame_start, frame_duration, fps)
@staticmethod
def collect_sub_track_items(tracks):
"""
Returns dictionary with track index as key and list of subtracks
"""
# collect all subtrack items
sub_track_items = {}
for track in tracks:
items = track.items()
# skip if no clips on track > need track with effect only
if items:
continue
# skip all disabled tracks
if not track.isEnabled():
continue
track_index = track.trackIndex()
_sub_track_items = flatten(track.subTrackItems())
# continue only if any subtrack items are collected
if len(_sub_track_items) < 1:
continue
enabled_sti = []
# loop all found subtrack items and check if they are enabled
for _sti in _sub_track_items:
# checking if not enabled
if not _sti.isEnabled():
continue
if isinstance(_sti, hiero.core.Annotation):
continue
# collect the subtrack item
enabled_sti.append(_sti)
# continue only if any subtrack items are collected
if len(enabled_sti) < 1:
continue
# add collection of subtrackitems to dict
sub_track_items[track_index] = enabled_sti
return sub_track_items
@staticmethod
def clip_annotations(clip):
"""
Returns list of Clip's hiero.core.Annotation
"""
annotations = []
subTrackItems = flatten(clip.subTrackItems())
annotations += [item for item in subTrackItems if isinstance(
item, hiero.core.Annotation)]
return annotations
@staticmethod
def clip_subtrack(clip):
"""
Returns list of Clip's hiero.core.SubTrackItem
"""
subtracks = []
subTrackItems = flatten(clip.parent().subTrackItems())
for item in subTrackItems:
# avoid all anotation
if isinstance(item, hiero.core.Annotation):
continue
# # avoid all not anaibled
if not item.isEnabled():
continue
subtracks.append(item)
return subtracks

View file

@ -75,10 +75,26 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
"activeProject": project,
"otioTimeline": otio_timeline,
"currentFile": curent_file,
"fps": fps,
"colorspace": self.get_colorspace(project),
"fps": fps
}
context.data.update(context_data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
self.log.debug("__ context_data: {}".format(pformat(context_data)))
def get_colorspace(self, project):
# get workfile's colorspace properties
return {
"useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(),
"lutSetting16Bit": project.lutSetting16Bit(),
"lutSetting8Bit": project.lutSetting8Bit(),
"lutSettingFloat": project.lutSettingFloat(),
"lutSettingLog": project.lutSettingLog(),
"lutSettingViewer": project.lutSettingViewer(),
"lutSettingWorkingSpace": project.lutSettingWorkingSpace(),
"lutUseOCIOForExport": project.lutUseOCIOForExport(),
"ocioConfigName": project.ocioConfigName(),
"ocioConfigPath": project.ocioConfigPath()
}

View file

@ -1060,7 +1060,7 @@ class WorkfileSettings(object):
# replace reset resolution from avalon core to pype's
self.reset_frame_range_handles()
# add colorspace menu item
# self.set_colorspace()
self.set_colorspace()
def set_favorites(self):
work_dir = os.getenv("AVALON_WORKDIR")

View file

@ -4,18 +4,19 @@ import json
from collections import OrderedDict
class LoadLuts(api.Loader):
class LoadEffects(api.Loader):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["lutJson"]
families = ["lut"]
representations = ["effectJson"]
families = ["effect"]
label = "Load Luts - nodes"
label = "Load Effects - nodes"
order = 0
icon = "cc"
color = style.colors.light
ignore_attr = ["useLifetime"]
def load(self, context, name, namespace, data):
"""
Loading function to get the soft effects to particular read node
@ -66,15 +67,15 @@ class LoadLuts(api.Loader):
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode("Group")
GN["name"].setValue(object_name)
GN = nuke.createNode(
"Group",
"name {}_1".format(object_name))
# adding content to the group node
with GN:
@ -186,7 +187,7 @@ class LoadLuts(api.Loader):
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
@ -266,7 +267,11 @@ class LoadLuts(api.Loader):
None: if nothing found
"""
search_name = "{0}_{1}".format(asset, subset)
node = [n for n in nuke.allNodes() if search_name in n["name"].value()]
node = [
n for n in nuke.allNodes(filter="Read")
if search_name in n["file"].value()
]
if len(node) > 0:
rn = node[0]
else:
@ -286,8 +291,10 @@ class LoadLuts(api.Loader):
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
trackNums = [v["trackIndex"] for k, v in data.items()
if isinstance(v, dict)]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
if isinstance(v, dict)]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
@ -300,6 +307,7 @@ class LoadLuts(api.Loader):
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if isinstance(val, dict)
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}

View file

@ -5,13 +5,13 @@ from collections import OrderedDict
from openpype.hosts.nuke.api import lib
class LoadLutsInputProcess(api.Loader):
class LoadEffectsInputProcess(api.Loader):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["lutJson"]
families = ["lut"]
representations = ["effectJson"]
families = ["effect"]
label = "Load Luts - Input Process"
label = "Load Effects - Input Process"
order = 0
icon = "eye"
color = style.colors.alert
@ -67,15 +67,15 @@ class LoadLutsInputProcess(api.Loader):
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode("Group")
GN["name"].setValue(object_name)
GN = nuke.createNode(
"Group",
"name {}_1".format(object_name))
# adding content to the group node
with GN:
@ -190,7 +190,7 @@ class LoadLutsInputProcess(api.Loader):
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f["effects"])
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
@ -304,8 +304,10 @@ class LoadLutsInputProcess(api.Loader):
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
trackNums = [v["trackIndex"] for k, v in data.items()
if isinstance(v, dict)]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
if isinstance(v, dict)]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
@ -318,6 +320,7 @@ class LoadLutsInputProcess(api.Loader):
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if isinstance(val, dict)
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}

View file

@ -1,6 +1,7 @@
from openpype.hosts.nuke.api.lib import (
on_script_load,
check_inventory_versions
check_inventory_versions,
WorkfileSettings
)
import nuke
@ -9,8 +10,14 @@ from openpype.api import Logger
log = Logger().get_logger(__name__)
nuke.addOnScriptSave(on_script_load)
# fix ffmpeg settings on script
nuke.addOnScriptLoad(on_script_load)
# set checker for last versions on loaded containers
nuke.addOnScriptLoad(check_inventory_versions)
nuke.addOnScriptSave(check_inventory_versions)
# # set apply all workfile settings on script load and save
nuke.addOnScriptLoad(WorkfileSettings().set_context_settings)
log.info('Automatic syncing of write file knob to script version')

View file

@ -19,6 +19,10 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
def on_instance_toggle(instance, old_value, new_value):
# Review may not have real instance in wokrfile metadata
if not instance.data.get("uuid"):
return
instance_id = instance.data["uuid"]
found_idx = None
current_instances = pipeline.list_instances()

View file

@ -1,19 +0,0 @@
from avalon.tvpaint import pipeline
from openpype.hosts.tvpaint.api import plugin
class CreateReview(plugin.Creator):
"""Review for global review of all layers."""
name = "review"
label = "Review"
family = "review"
icon = "cube"
defaults = ["Main"]
def process(self):
instances = pipeline.list_instances()
for instance in instances:
if instance["family"] == self.family:
self.log.info("Review family is already Created.")
return
super(CreateReview, self).process()

View file

@ -17,6 +17,20 @@ class CollectInstances(pyblish.api.ContextPlugin):
json.dumps(workfile_instances, indent=4)
))
# Backwards compatibility for workfiles that already have review
# instance in metadata.
review_instance_exist = False
for instance_data in workfile_instances:
if instance_data["family"] == "review":
review_instance_exist = True
break
# Fake review instance if review was not found in metadata families
if not review_instance_exist:
workfile_instances.append(
self._create_review_instance_data(context)
)
for instance_data in workfile_instances:
instance_data["fps"] = context.data["sceneFps"]
@ -90,6 +104,16 @@ class CollectInstances(pyblish.api.ContextPlugin):
instance, json.dumps(instance.data, indent=4)
))
def _create_review_instance_data(self, context):
"""Fake review instance data."""
return {
"family": "review",
"asset": context.data["workfile_context"]["asset"],
# Dummy subset name
"subset": "reviewMain"
}
def create_render_layer_instance(self, context, instance_data):
name = instance_data["name"]
# Change label

View file

@ -1,4 +1,5 @@
import os
import json
from avalon import api, pipeline
from avalon.unreal import lib
@ -61,10 +62,16 @@ class AnimationFBXLoader(api.Loader):
task = unreal.AssetImportTask()
task.options = unreal.FbxImportUI()
# If there are no options, the process cannot be automated
if options:
libpath = self.fname.replace("fbx", "json")
with open(libpath, "r") as fp:
data = json.load(fp)
instance_name = data.get("instance_name")
if instance_name:
automated = True
actor_name = 'PersistentLevel.' + options.get('instance_name')
actor_name = 'PersistentLevel.' + instance_name
actor = unreal.EditorLevelLibrary.get_actor_reference(actor_name)
skeleton = actor.skeletal_mesh_component.skeletal_mesh.skeleton
task.options.set_editor_property('skeleton', skeleton)
@ -81,16 +88,31 @@ class AnimationFBXLoader(api.Loader):
# set import options here
task.options.set_editor_property(
'automated_import_should_detect_type', True)
'automated_import_should_detect_type', False)
task.options.set_editor_property(
'original_import_type', unreal.FBXImportType.FBXIT_ANIMATION)
'original_import_type', unreal.FBXImportType.FBXIT_SKELETAL_MESH)
task.options.set_editor_property(
'mesh_type_to_import', unreal.FBXImportType.FBXIT_ANIMATION)
task.options.set_editor_property('import_mesh', False)
task.options.set_editor_property('import_animations', True)
task.options.set_editor_property('override_full_name', True)
task.options.skeletal_mesh_import_data.set_editor_property(
'import_content_type',
unreal.FBXImportContentType.FBXICT_SKINNING_WEIGHTS
task.options.anim_sequence_import_data.set_editor_property(
'animation_length',
unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME
)
task.options.anim_sequence_import_data.set_editor_property(
'import_meshes_in_bone_hierarchy', False)
task.options.anim_sequence_import_data.set_editor_property(
'use_default_sample_rate', True)
task.options.anim_sequence_import_data.set_editor_property(
'import_custom_attribute', True)
task.options.anim_sequence_import_data.set_editor_property(
'import_bone_tracks', True)
task.options.anim_sequence_import_data.set_editor_property(
'remove_redundant_keys', True)
task.options.anim_sequence_import_data.set_editor_property(
'convert_scene', True)
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])

View file

@ -1,127 +0,0 @@
import json
from avalon import api
import unreal
class AnimationCollectionLoader(api.Loader):
"""Load Unreal SkeletalMesh from FBX"""
families = ["setdress"]
representations = ["json"]
label = "Load Animation Collection"
icon = "cube"
color = "orange"
def load(self, context, name, namespace, options):
from avalon import api, pipeline
from avalon.unreal import lib
from avalon.unreal import pipeline as unreal_pipeline
import unreal
# Create directory for asset and avalon container
root = "/Game/Avalon/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
"{}/{}".format(root, asset), suffix="")
container_name += suffix
unreal.EditorAssetLibrary.make_directory(asset_dir)
libpath = self.fname
with open(libpath, "r") as fp:
data = json.load(fp)
all_loaders = api.discover(api.Loader)
for element in data:
reference = element.get('_id')
loaders = api.loaders_from_representation(all_loaders, reference)
loader = None
for l in loaders:
if l.__name__ == "AnimationFBXLoader":
loader = l
break
if not loader:
continue
instance_name = element.get('instance_name')
api.load(
loader,
reference,
namespace=instance_name,
options=element
)
# Create Asset Container
lib.create_avalon_container(
container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
"id": pipeline.AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
return asset_content
def update(self, container, representation):
from avalon import api, io
from avalon.unreal import pipeline
source_path = api.get_representation_path(representation)
with open(source_path, "r") as fp:
data = json.load(fp)
animation_containers = [
i for i in pipeline.ls() if
i.get('asset') == container.get('asset') and
i.get('family') == 'animation']
for element in data:
new_version = io.find_one({"_id": io.ObjectId(element.get('_id'))})
new_version_number = new_version.get('context').get('version')
anim_container = None
for i in animation_containers:
if i.get('container_name') == (element.get('subset') + "_CON"):
anim_container = i
break
if not anim_container:
continue
api.update(anim_container, new_version_number)
container_path = "{}/{}".format(container["namespace"],
container["objectName"])
# update metadata
pipeline.imprint(
container_path,
{
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
})
def remove(self, container):
unreal.EditorAssetLibrary.delete_directory(container["namespace"])

View file

@ -440,7 +440,20 @@ class EnvironmentTool:
class ApplicationExecutable:
"""Representation of executable loaded from settings."""
def __init__(self, executable):
# On MacOS check if exists path to executable when ends with `.app`
# - it is common that path will lead to "/Applications/Blender" but
# real path is "/Applications/Blender.app"
if (
platform.system().lower() == "darwin"
and not os.path.exists(executable)
):
_executable = executable + ".app"
if os.path.exists(_executable):
executable = _executable
self.executable_path = executable
def __str__(self):
@ -1177,17 +1190,23 @@ def prepare_context_environments(data):
try:
workdir = get_workdir_with_workdir_data(workdir_data, anatomy)
if not os.path.exists(workdir):
log.debug(
"Creating workdir folder: \"{}\"".format(workdir)
)
os.makedirs(workdir)
except Exception as exc:
raise ApplicationLaunchFailed(
"Error in anatomy.format: {}".format(str(exc))
)
if not os.path.exists(workdir):
log.debug(
"Creating workdir folder: \"{}\"".format(workdir)
)
try:
os.makedirs(workdir)
except Exception as exc:
raise ApplicationLaunchFailed(
"Couldn't create workdir because: {}".format(str(exc))
)
context_env = {
"AVALON_PROJECT": project_doc["name"],
"AVALON_ASSET": asset_doc["name"],

303
openpype/lib/delivery.py Normal file
View file

@ -0,0 +1,303 @@
"""Functions useful for delivery action or loader"""
import os
import shutil
import clique
import collections
def collect_frames(files):
"""
Returns dict of source path and its frame, if from sequence
Uses clique as most precise solution
Args:
files(list): list of source paths
Returns:
(dict): {'/asset/subset_v001.0001.png': '0001', ....}
"""
collections, remainder = clique.assemble(files)
sources_and_frames = {}
if collections:
for collection in collections:
src_head = collection.head
src_tail = collection.tail
for index in collection.indexes:
src_frame = collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_frame,
src_tail)
sources_and_frames[src_file_name] = src_frame
else:
sources_and_frames[remainder.pop()] = None
return sources_and_frames
def sizeof_fmt(num, suffix='B'):
"""Returns formatted string with size in appropriate unit"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def path_from_represenation(representation, anatomy):
from avalon import pipeline # safer importing
try:
template = representation["data"]["template"]
except KeyError:
return None
try:
context = representation["context"]
context["root"] = anatomy.roots
path = pipeline.format_template_with_optional_keys(
context, template
)
except KeyError:
# Template references unavailable data
return None
return os.path.normpath(path)
def copy_file(src_path, dst_path):
"""Hardlink file if possible(to save space), copy if not"""
from avalon.vendor import filelink # safer importing
if os.path.exists(dst_path):
return
try:
filelink.create(
src_path,
dst_path,
filelink.HARDLINK
)
except OSError:
shutil.copyfile(src_path, dst_path)
def get_format_dict(anatomy, location_path):
"""Returns replaced root values from user provider value.
Args:
anatomy (Anatomy)
location_path (str): user provided value
Returns:
(dict): prepared for formatting of a template
"""
format_dict = {}
if location_path:
location_path = location_path.replace("\\", "/")
root_names = anatomy.root_names_from_templates(
anatomy.templates["delivery"]
)
if root_names is None:
format_dict["root"] = location_path
else:
format_dict["root"] = {}
for name in root_names:
format_dict["root"][name] = location_path
return format_dict
def check_destination_path(repre_id,
anatomy, anatomy_data,
datetime_data, template_name):
""" Try to create destination path based on 'template_name'.
In the case that path cannot be filled, template contains unmatched
keys, provide error message to filter out repre later.
Args:
anatomy (Anatomy)
anatomy_data (dict): context to fill anatomy
datetime_data (dict): values with actual date
template_name (str): to pick correct delivery template
Returns:
(collections.defauldict): {"TYPE_OF_ERROR":"ERROR_DETAIL"}
"""
anatomy_data.update(datetime_data)
anatomy_filled = anatomy.format_all(anatomy_data)
dest_path = anatomy_filled["delivery"][template_name]
report_items = collections.defaultdict(list)
sub_msg = None
if not dest_path.solved:
msg = (
"Missing keys in Representation's context"
" for anatomy template \"{}\"."
).format(template_name)
if dest_path.missing_keys:
keys = ", ".join(dest_path.missing_keys)
sub_msg = (
"Representation: {}<br>- Missing keys: \"{}\"<br>"
).format(repre_id, keys)
if dest_path.invalid_types:
items = []
for key, value in dest_path.invalid_types.items():
items.append("\"{}\" {}".format(key, str(value)))
keys = ", ".join(items)
sub_msg = (
"Representation: {}<br>"
"- Invalid value DataType: \"{}\"<br>"
).format(repre_id, keys)
report_items[msg].append(sub_msg)
return report_items
def process_single_file(
src_path, repre, anatomy, template_name, anatomy_data, format_dict,
report_items, log
):
"""Copy single file to calculated path based on template
Args:
src_path(str): path of source representation file
_repre (dict): full repre, used only in process_sequence, here only
as to share same signature
anatomy (Anatomy)
template_name (string): user selected delivery template name
anatomy_data (dict): data from repre to fill anatomy with
format_dict (dict): root dictionary with names and values
report_items (collections.defaultdict): to return error messages
log (Logger): for log printing
Returns:
(collections.defaultdict , int)
"""
if not os.path.exists(src_path):
msg = "{} doesn't exist for {}".format(src_path,
repre["_id"])
report_items["Source file was not found"].append(msg)
return report_items, 0
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][template_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][template_name]
# context.representation could be .psd
delivery_path = delivery_path.replace("..", ".")
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
log.debug("Copying single: {} -> {}".format(src_path, delivery_path))
copy_file(src_path, delivery_path)
return report_items, 1
def process_sequence(
src_path, repre, anatomy, template_name, anatomy_data, format_dict,
report_items, log
):
""" For Pype2(mainly - works in 3 too) where representation might not
contain files.
Uses listing physical files (not 'files' on repre as a)might not be
present, b)might not be reliable for representation and copying them.
TODO Should be refactored when files are sufficient to drive all
representations.
Args:
src_path(str): path of source representation file
repre (dict): full representation
anatomy (Anatomy)
template_name (string): user selected delivery template name
anatomy_data (dict): data from repre to fill anatomy with
format_dict (dict): root dictionary with names and values
report_items (collections.defaultdict): to return error messages
log (Logger): for log printing
Returns:
(collections.defaultdict , int)
"""
if not os.path.exists(src_path):
msg = "{} doesn't exist for {}".format(src_path,
repre["_id"])
report_items["Source file was not found"].append(msg)
return report_items, 0
dir_path, file_name = os.path.split(str(src_path))
context = repre["context"]
ext = context.get("ext", context.get("representation"))
if not ext:
msg = "Source extension not found, cannot find collection"
report_items[msg].append(src_path)
log.warning("{} <{}>".format(msg, context))
return report_items, 0
ext = "." + ext
# context.representation could be .psd
ext = ext.replace("..", ".")
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:
continue
src_collection = col
break
if src_collection is None:
msg = "Source collection of files was not found"
report_items[msg].append(src_path)
log.warning("{} <{}>".format(msg, src_path))
return report_items, 0
frame_indicator = "@####@"
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][template_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][template_name]
delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding
dst_collection = clique.Collection(
head=dst_head,
tail=dst_tail,
padding=dst_padding
)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
src_head = src_collection.head
src_tail = src_collection.tail
uploaded = 0
for index in src_collection.indexes:
src_padding = src_collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
src = os.path.normpath(
os.path.join(dir_path, src_file_name)
)
dst_padding = dst_collection.format("{padding}") % index
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
log.debug("Copying single: {} -> {}".format(src, dst))
copy_file(src, dst)
uploaded += 1
return report_items, uploaded

View file

@ -1101,9 +1101,6 @@ class SyncToAvalonEvent(BaseEvent):
# Parents, Hierarchy
ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
parents = ent_path_items[1:len(ent_path_items)-1:]
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
# TODO logging
self.log.debug(
@ -1132,7 +1129,6 @@ class SyncToAvalonEvent(BaseEvent):
"ftrackId": ftrack_ent["id"],
"entityType": ftrack_ent.entity_type,
"parents": parents,
"hierarchy": hierarchy,
"tasks": {},
"visualParent": vis_par
}
@ -1975,14 +1971,9 @@ class SyncToAvalonEvent(BaseEvent):
if cur_par == parents:
continue
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
if "data" not in self.updates[mongo_id]:
self.updates[mongo_id]["data"] = {}
self.updates[mongo_id]["data"]["parents"] = parents
self.updates[mongo_id]["data"]["hierarchy"] = hierarchy
# Skip custom attributes if didn't change
if not hier_cust_attrs_ids:

View file

@ -11,23 +11,28 @@ from avalon.api import AvalonMongoDB
class DeleteAssetSubset(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
# Action identifier.
identifier = "delete.asset.subset"
#: Action label.
# Action label.
label = "Delete Asset/Subsets"
#: Action description.
# Action description.
description = "Removes from Avalon with all childs and asset from Ftrack"
icon = statics_icon("ftrack", "action_icons", "DeleteAsset.svg")
settings_key = "delete_asset_subset"
#: Db connection
dbcon = AvalonMongoDB()
# Db connection
dbcon = None
splitter = {"type": "label", "value": "---"}
action_data_by_id = {}
asset_prefix = "asset:"
subset_prefix = "subset:"
def __init__(self, *args, **kwargs):
self.dbcon = AvalonMongoDB()
super(DeleteAssetSubset, self).__init__(*args, **kwargs)
def discover(self, session, entities, event):
""" Validation """
task_ids = []
@ -446,7 +451,14 @@ class DeleteAssetSubset(BaseAction):
if len(assets_to_delete) > 0:
map_av_ftrack_id = spec_data["without_ftrack_id"]
# Prepare data when deleting whole avalon asset
avalon_assets = self.dbcon.find({"type": "asset"})
avalon_assets = self.dbcon.find(
{"type": "asset"},
{
"_id": 1,
"data.visualParent": 1,
"data.ftrackId": 1
}
)
avalon_assets_by_parent = collections.defaultdict(list)
for asset in avalon_assets:
asset_id = asset["_id"]
@ -537,11 +549,13 @@ class DeleteAssetSubset(BaseAction):
ftrack_proc_txt, ", ".join(ftrack_ids_to_delete)
))
ftrack_ents_to_delete = (
entities_by_link_len = (
self._filter_entities_to_delete(ftrack_ids_to_delete, session)
)
for entity in ftrack_ents_to_delete:
session.delete(entity)
for link_len in sorted(entities_by_link_len.keys(), reverse=True):
for entity in entities_by_link_len[link_len]:
session.delete(entity)
try:
session.commit()
except Exception:
@ -600,29 +614,11 @@ class DeleteAssetSubset(BaseAction):
joined_ids_to_delete
)
).all()
filtered = to_delete_entities[:]
while True:
changed = False
_filtered = filtered[:]
for entity in filtered:
entity_id = entity["id"]
entities_by_link_len = collections.defaultdict(list)
for entity in to_delete_entities:
entities_by_link_len[len(entity["link"])].append(entity)
for _entity in tuple(_filtered):
if entity_id == _entity["id"]:
continue
for _link in _entity["link"]:
if entity_id == _link["id"] and _entity in _filtered:
_filtered.remove(_entity)
changed = True
break
filtered = _filtered
if not changed:
break
return filtered
return entities_by_link_len
def report_handle(self, report_messages, project_name, event):
if not report_messages:

View file

@ -1,18 +1,20 @@
import os
import copy
import json
import shutil
import collections
import clique
from bson.objectid import ObjectId
from avalon import pipeline
from avalon.vendor import filelink
from openpype.api import Anatomy, config
from openpype.modules.ftrack.lib import BaseAction, statics_icon
from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from openpype.lib.delivery import (
path_from_represenation,
get_format_dict,
check_destination_path,
process_single_file,
process_sequence
)
from avalon.api import AvalonMongoDB
@ -450,18 +452,7 @@ class Delivery(BaseAction):
anatomy = Anatomy(project_name)
format_dict = {}
if location_path:
location_path = location_path.replace("\\", "/")
root_names = anatomy.root_names_from_templates(
anatomy.templates["delivery"]
)
if root_names is None:
format_dict["root"] = location_path
else:
format_dict["root"] = {}
for name in root_names:
format_dict["root"][name] = location_path
format_dict = get_format_dict(anatomy, location_path)
datetime_data = config.get_datetime_data()
for repre in repres_to_deliver:
@ -471,41 +462,15 @@ class Delivery(BaseAction):
debug_msg += " with published path {}.".format(source_path)
self.log.debug(debug_msg)
# Get destination repre path
anatomy_data = copy.deepcopy(repre["context"])
anatomy_data.update(datetime_data)
anatomy_filled = anatomy.format_all(anatomy_data)
test_path = anatomy_filled["delivery"][anatomy_name]
repre_report_items = check_destination_path(repre["_id"],
anatomy,
anatomy_data,
datetime_data,
anatomy_name)
if not test_path.solved:
msg = (
"Missing keys in Representation's context"
" for anatomy template \"{}\"."
).format(anatomy_name)
if test_path.missing_keys:
keys = ", ".join(test_path.missing_keys)
sub_msg = (
"Representation: {}<br>- Missing keys: \"{}\"<br>"
).format(str(repre["_id"]), keys)
if test_path.invalid_types:
items = []
for key, value in test_path.invalid_types.items():
items.append("\"{}\" {}".format(key, str(value)))
keys = ", ".join(items)
sub_msg = (
"Representation: {}<br>"
"- Invalid value DataType: \"{}\"<br>"
).format(str(repre["_id"]), keys)
report_items[msg].append(sub_msg)
self.log.warning(
"{} Representation: \"{}\" Filled: <{}>".format(
msg, str(repre["_id"]), str(test_path)
)
)
if repre_report_items:
report_items.update(repre_report_items)
continue
# Get source repre path
@ -514,151 +479,28 @@ class Delivery(BaseAction):
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
repre_path = self.path_from_represenation(repre, anatomy)
repre_path = path_from_represenation(repre, anatomy)
# TODO add backup solution where root of path from component
# is repalced with root
# is replaced with root
args = (
repre_path,
repre,
anatomy,
anatomy_name,
anatomy_data,
format_dict,
report_items
report_items,
self.log
)
if not frame:
self.process_single_file(*args)
process_single_file(*args)
else:
self.process_sequence(*args)
process_sequence(*args)
return self.report(report_items)
def process_single_file(
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict,
report_items
):
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][anatomy_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
self.copy_file(repre_path, delivery_path)
def process_sequence(
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict,
report_items
):
dir_path, file_name = os.path.split(str(repre_path))
base_name, ext = os.path.splitext(file_name)
file_name_items = None
if "#" in base_name:
file_name_items = [part for part in base_name.split("#") if part]
elif "%" in base_name:
file_name_items = base_name.split("%")
if not file_name_items:
msg = "Source file was not found"
report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
src_collections, remainder = clique.assemble(os.listdir(dir_path))
src_collection = None
for col in src_collections:
if col.tail != ext:
continue
# skip if collection don't have same basename
if not col.head.startswith(file_name_items[0]):
continue
src_collection = col
break
if src_collection is None:
# TODO log error!
msg = "Source collection of files was not found"
report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
frame_indicator = "@####@"
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][anatomy_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding
dst_collection = clique.Collection(
head=dst_head,
tail=dst_tail,
padding=dst_padding
)
if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder)
src_head = src_collection.head
src_tail = src_collection.tail
for index in src_collection.indexes:
src_padding = src_collection.format("{padding}") % index
src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
src = os.path.normpath(
os.path.join(dir_path, src_file_name)
)
dst_padding = dst_collection.format("{padding}") % index
dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
self.copy_file(src, dst)
def path_from_represenation(self, representation, anatomy):
try:
template = representation["data"]["template"]
except KeyError:
return None
try:
context = representation["context"]
context["root"] = anatomy.roots
path = pipeline.format_template_with_optional_keys(
context, template
)
except KeyError:
# Template references unavailable data
return None
return os.path.normpath(path)
def copy_file(self, src_path, dst_path):
if os.path.exists(dst_path):
return
try:
filelink.create(
src_path,
dst_path,
filelink.HARDLINK
)
except OSError:
shutil.copyfile(src_path, dst_path)
def report(self, report_items):
"""Returns dict with final status of delivery (succes, fail etc.)."""
items = []
title = "Delivery report"
for msg, _items in report_items.items():

View file

@ -1237,12 +1237,8 @@ class SyncEntitiesFactory:
ent_path_items = [ent["name"] for ent in entity["link"]]
parents = ent_path_items[1:len(ent_path_items) - 1:]
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
data["parents"] = parents
data["hierarchy"] = hierarchy
data["tasks"] = self.entities_dict[id].pop("tasks", {})
self.entities_dict[id]["final_entity"]["data"] = data
self.entities_dict[id]["final_entity"]["type"] = "asset"
@ -2169,8 +2165,6 @@ class SyncEntitiesFactory:
hierarchy = "/".join(parents)
self.entities_dict[ftrack_id][
"final_entity"]["data"]["parents"] = parents
self.entities_dict[ftrack_id][
"final_entity"]["data"]["hierarchy"] = hierarchy
_parents.append(self.entities_dict[ftrack_id]["name"])
for child_id in self.entities_dict[ftrack_id]["children"]:
@ -2181,7 +2175,6 @@ class SyncEntitiesFactory:
if "data" not in self.updates[mongo_id]:
self.updates[mongo_id]["data"] = {}
self.updates[mongo_id]["data"]["parents"] = parents
self.updates[mongo_id]["data"]["hierarchy"] = hierarchy
def prepare_project_changes(self):
ftrack_ent_dict = self.entities_dict[self.ft_project_id]

View file

@ -16,6 +16,18 @@ class LoginServerHandler(BaseHTTPRequestHandler):
self.login_callback = login_callback
BaseHTTPRequestHandler.__init__(self, *args, **kw)
def log_message(self, format_str, *args):
"""Override method of BaseHTTPRequestHandler.
Goal is to use `print` instead of `sys.stderr.write`
"""
# Change
print("%s - - [%s] %s\n" % (
self.client_address[0],
self.log_date_time_string(),
format_str % args
))
def do_GET(self):
'''Override to handle requests ourselves.'''
parsed_path = parse.urlparse(self.path)

View file

@ -0,0 +1,318 @@
from collections import defaultdict
import copy
from Qt import QtWidgets, QtCore, QtGui
from avalon import api, style
from avalon.api import AvalonMongoDB
from openpype.api import Anatomy, config
from openpype import resources
from openpype.lib.delivery import (
sizeof_fmt,
path_from_represenation,
get_format_dict,
check_destination_path,
process_single_file,
process_sequence,
collect_frames
)
class Delivery(api.SubsetLoader):
"""Export selected versions to folder structure from Template"""
is_multiple_contexts_compatible = True
sequence_splitter = "__sequence_splitter__"
representations = ["*"]
families = ["*"]
tool_names = ["library_loader"]
label = "Deliver Versions"
order = 35
icon = "upload"
color = "#d8d8d8"
def message(self, text):
msgBox = QtWidgets.QMessageBox()
msgBox.setText(text)
msgBox.setStyleSheet(style.load_stylesheet())
msgBox.setWindowFlags(
msgBox.windowFlags() | QtCore.Qt.FramelessWindowHint
)
msgBox.exec_()
def load(self, contexts, name=None, namespace=None, options=None):
try:
dialog = DeliveryOptionsDialog(contexts, self.log)
dialog.exec_()
except Exception:
self.log.error("Failed to deliver versions.", exc_info=True)
class DeliveryOptionsDialog(QtWidgets.QDialog):
"""Dialog to select template where to deliver selected representations."""
def __init__(self, contexts, log=None, parent=None):
super(DeliveryOptionsDialog, self).__init__(parent=parent)
project = contexts[0]["project"]["name"]
self.anatomy = Anatomy(project)
self._representations = None
self.log = log
self.currently_uploaded = 0
self.dbcon = AvalonMongoDB()
self.dbcon.Session["AVALON_PROJECT"] = project
self.dbcon.install()
self._set_representations(contexts)
self.setWindowTitle("OpenPype - Deliver versions")
icon = QtGui.QIcon(resources.pype_icon_filepath())
self.setWindowIcon(icon)
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint |
QtCore.Qt.WindowMinimizeButtonHint
)
self.setStyleSheet(style.load_stylesheet())
dropdown = QtWidgets.QComboBox()
self.templates = self._get_templates(self.anatomy)
for name, _ in self.templates.items():
dropdown.addItem(name)
template_label = QtWidgets.QLabel()
template_label.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
template_label.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
root_line_edit = QtWidgets.QLineEdit()
repre_checkboxes_layout = QtWidgets.QFormLayout()
repre_checkboxes_layout.setContentsMargins(10, 5, 5, 10)
self._representation_checkboxes = {}
for repre in self._get_representation_names():
checkbox = QtWidgets.QCheckBox()
checkbox.setChecked(False)
self._representation_checkboxes[repre] = checkbox
checkbox.stateChanged.connect(self._update_selected_label)
repre_checkboxes_layout.addRow(repre, checkbox)
selected_label = QtWidgets.QLabel()
input_widget = QtWidgets.QWidget(self)
input_layout = QtWidgets.QFormLayout(input_widget)
input_layout.setContentsMargins(10, 15, 5, 5)
input_layout.addRow("Selected representations", selected_label)
input_layout.addRow("Delivery template", dropdown)
input_layout.addRow("Template value", template_label)
input_layout.addRow("Root", root_line_edit)
input_layout.addRow("Representations", repre_checkboxes_layout)
btn_delivery = QtWidgets.QPushButton("Deliver")
btn_delivery.setEnabled(bool(dropdown.currentText()))
progress_bar = QtWidgets.QProgressBar(self)
progress_bar.setMinimum = 0
progress_bar.setMaximum = 100
progress_bar.setVisible(False)
text_area = QtWidgets.QTextEdit()
text_area.setReadOnly(True)
text_area.setVisible(False)
text_area.setMinimumHeight(100)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(input_widget)
layout.addStretch(1)
layout.addWidget(btn_delivery)
layout.addWidget(progress_bar)
layout.addWidget(text_area)
self.selected_label = selected_label
self.template_label = template_label
self.dropdown = dropdown
self.root_line_edit = root_line_edit
self.progress_bar = progress_bar
self.text_area = text_area
self.btn_delivery = btn_delivery
self.files_selected, self.size_selected = \
self._get_counts(self._get_selected_repres())
self._update_selected_label()
self._update_template_value()
btn_delivery.clicked.connect(self.deliver)
dropdown.currentIndexChanged.connect(self._update_template_value)
def deliver(self):
"""Main method to loop through all selected representations"""
self.progress_bar.setVisible(True)
self.btn_delivery.setEnabled(False)
QtWidgets.QApplication.processEvents()
report_items = defaultdict(list)
selected_repres = self._get_selected_repres()
datetime_data = config.get_datetime_data()
template_name = self.dropdown.currentText()
format_dict = get_format_dict(self.anatomy, self.root_line_edit.text())
for repre in self._representations:
if repre["name"] not in selected_repres:
continue
repre_path = path_from_represenation(repre, self.anatomy)
anatomy_data = copy.deepcopy(repre["context"])
new_report_items = check_destination_path(str(repre["_id"]),
self.anatomy,
anatomy_data,
datetime_data,
template_name)
report_items.update(new_report_items)
if new_report_items:
continue
args = [
repre_path,
repre,
self.anatomy,
template_name,
anatomy_data,
format_dict,
report_items,
self.log
]
if repre.get("files"):
src_paths = []
for repre_file in repre["files"]:
src_path = self.anatomy.fill_root(repre_file["path"])
src_paths.append(src_path)
sources_and_frames = collect_frames(src_paths)
for src_path, frame in sources_and_frames.items():
args[0] = src_path
if frame:
anatomy_data["frame"] = frame
new_report_items, uploaded = process_single_file(*args)
report_items.update(new_report_items)
self._update_progress(uploaded)
else: # fallback for Pype2 and representations without files
frame = repre['context'].get('frame')
if frame:
repre["context"]["frame"] = len(str(frame)) * "#"
if not frame:
new_report_items, uploaded = process_single_file(*args)
else:
new_report_items, uploaded = process_sequence(*args)
report_items.update(new_report_items)
self._update_progress(uploaded)
self.text_area.setText(self._format_report(report_items))
self.text_area.setVisible(True)
def _get_representation_names(self):
"""Get set of representation names for checkbox filtering."""
return set([repre["name"] for repre in self._representations])
def _get_templates(self, anatomy):
"""Adds list of delivery templates from Anatomy to dropdown."""
templates = {}
for template_name, value in anatomy.templates["delivery"].items():
if not isinstance(value, str) or not value.startswith('{root'):
continue
templates[template_name] = value
return templates
def _set_representations(self, contexts):
version_ids = [context["version"]["_id"] for context in contexts]
repres = list(self.dbcon.find({
"type": "representation",
"parent": {"$in": version_ids}
}))
self._representations = repres
def _get_counts(self, selected_repres=None):
"""Returns tuple of number of selected files and their size."""
files_selected = 0
size_selected = 0
for repre in self._representations:
if repre["name"] in selected_repres:
files = repre.get("files", [])
if not files: # for repre without files, cannot divide by 0
files_selected += 1
size_selected += 0
else:
for repre_file in files:
files_selected += 1
size_selected += repre_file["size"]
return files_selected, size_selected
def _prepare_label(self):
"""Provides text with no of selected files and their size."""
label = "{} files, size {}".format(self.files_selected,
sizeof_fmt(self.size_selected))
return label
def _get_selected_repres(self):
"""Returns list of representation names filtered from checkboxes."""
selected_repres = []
for repre_name, chckbox in self._representation_checkboxes.items():
if chckbox.isChecked():
selected_repres.append(repre_name)
return selected_repres
def _update_selected_label(self):
"""Updates label with list of number of selected files."""
selected_repres = self._get_selected_repres()
self.files_selected, self.size_selected = \
self._get_counts(selected_repres)
self.selected_label.setText(self._prepare_label())
def _update_template_value(self, _index=None):
"""Sets template value to label after selection in dropdown."""
name = self.dropdown.currentText()
template_value = self.templates.get(name)
if template_value:
self.btn_delivery.setEnabled(True)
self.template_label.setText(template_value)
def _update_progress(self, uploaded):
"""Update progress bar after each repre copied."""
self.currently_uploaded += uploaded
ratio = self.currently_uploaded / self.files_selected
self.progress_bar.setValue(ratio * self.progress_bar.maximum())
def _format_report(self, report_items):
"""Format final result and error details as html."""
msg = "Delivery finished"
if not report_items:
msg += " successfully"
else:
msg += " with errors"
txt = "<h2>{}</h2>".format(msg)
for header, data in report_items.items():
txt += "<h3>{}</h3>".format(header)
for item in data:
txt += "{}<br>".format(item)
return txt

View file

@ -39,7 +39,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
"lut",
"yetiRig",
"yeticache",
"nukenodes",
@ -52,7 +51,8 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
"fbx",
"textures",
"action",
"background"
"background",
"effect"
]
def process(self, instance):

View file

@ -40,12 +40,15 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
# get sequence
otio_timeline = context.data["otioTimeline"]
# temp file
audio_temp_fpath = self.create_temp_file("audio")
# get all audio inputs from otio timeline
audio_inputs = self.get_audio_track_items(otio_timeline)
if not audio_inputs:
return
# temp file
audio_temp_fpath = self.create_temp_file("audio")
# create empty audio with longest duration
empty = self.create_empty(audio_inputs)
@ -53,14 +56,14 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
audio_inputs.insert(0, empty)
# create cmd
cmd = self.ffmpeg_path + " "
cmd = '"{}"'.format(self.ffmpeg_path) + " "
cmd += self.create_cmd(audio_inputs)
cmd += audio_temp_fpath
cmd += "\"{}\"".format(audio_temp_fpath)
# run subprocess
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
cmd, logger=self.log
)
# remove empty
@ -97,17 +100,17 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
audio_fpath = self.create_temp_file(name)
cmd = " ".join([
self.ffmpeg_path,
'"{}"'.format(self.ffmpeg_path),
"-ss {}".format(start_sec),
"-t {}".format(duration_sec),
"-i {}".format(audio_file),
"-i \"{}\"".format(audio_file),
audio_fpath
])
# run subprocess
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
cmd, logger=self.log
)
else:
audio_fpath = recycling_file.pop()
@ -218,11 +221,11 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
# create empty cmd
cmd = " ".join([
self.ffmpeg_path,
'"{}"'.format(self.ffmpeg_path),
"-f lavfi",
"-i anullsrc=channel_layout=stereo:sample_rate=48000",
"-t {}".format(max_duration_sec),
empty_fpath
"\"{}\"".format(empty_fpath)
])
# generate empty with ffmpeg
@ -230,7 +233,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
self.log.debug("Executing: {}".format(cmd))
openpype.api.run_subprocess(
cmd, shell=True, logger=self.log
cmd, logger=self.log
)
# return dict with output

View file

@ -209,7 +209,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
"frameStart": start,
"frameEnd": end,
"stagingDir": self.staging_dir,
"tags": ["review", "ftrackreview", "delete"]
"tags": ["review", "delete"]
}
collection = clique.Collection(
@ -313,7 +313,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
out_frame_start += end_offset
# start command list
command = [ffmpeg_path]
command = ['"{}"'.format(ffmpeg_path)]
if sequence:
input_dir, collection = sequence
@ -326,7 +326,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
# form command for rendering gap files
command.extend([
"-start_number {}".format(in_frame_start),
"-i {}".format(input_path)
"-i \"{}\"".format(input_path)
])
elif video:
@ -341,7 +341,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
command.extend([
"-ss {}".format(sec_start),
"-t {}".format(sec_duration),
"-i {}".format(video_path)
"-i \"{}\"".format(video_path)
])
elif gap:
@ -360,11 +360,13 @@ class ExtractOTIOReview(openpype.api.Extractor):
# add output attributes
command.extend([
"-start_number {}".format(out_frame_start),
output_path
"\"{}\"".format(output_path)
])
# execute
self.log.debug("Executing: {}".format(" ".join(command)))
output = openpype.api.run_subprocess(" ".join(command), shell=True)
output = openpype.api.run_subprocess(
" ".join(command), logger=self.log
)
self.log.debug("Output: {}".format(output))
def _generate_used_frames(self, duration, end_offset=None):

View file

@ -48,6 +48,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
video_exts = ["mov", "mp4"]
supported_exts = image_exts + video_exts
alpha_exts = ["exr", "png", "dpx"]
# FFmpeg tools paths
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
@ -296,6 +298,13 @@ class ExtractReview(pyblish.api.InstancePlugin):
):
with_audio = False
input_is_sequence = self.input_is_sequence(repre)
input_allow_bg = False
if input_is_sequence and repre["files"]:
ext = os.path.splitext(repre["files"][0])[1].replace(".", "")
if ext in self.alpha_exts:
input_allow_bg = True
return {
"fps": float(instance.data["fps"]),
"frame_start": frame_start,
@ -310,7 +319,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
"resolution_width": instance.data.get("resolutionWidth"),
"resolution_height": instance.data.get("resolutionHeight"),
"origin_repre": repre,
"input_is_sequence": self.input_is_sequence(repre),
"input_is_sequence": input_is_sequence,
"input_allow_bg": input_allow_bg,
"with_audio": with_audio,
"without_handles": without_handles,
"handles_are_set": handles_are_set
@ -470,6 +480,39 @@ class ExtractReview(pyblish.api.InstancePlugin):
lut_filters = self.lut_filters(new_repre, instance, ffmpeg_input_args)
ffmpeg_video_filters.extend(lut_filters)
bg_alpha = 0
bg_color = output_def.get("bg_color")
if bg_color:
bg_red, bg_green, bg_blue, bg_alpha = bg_color
if bg_alpha > 0:
if not temp_data["input_allow_bg"]:
self.log.info((
"Output definition has defined BG color input was"
" resolved as does not support adding BG."
))
else:
bg_color_hex = "#{0:0>2X}{1:0>2X}{2:0>2X}".format(
bg_red, bg_green, bg_blue
)
bg_color_alpha = float(bg_alpha) / 255
bg_color_str = "{}@{}".format(bg_color_hex, bg_color_alpha)
self.log.info("Applying BG color {}".format(bg_color_str))
color_args = [
"split=2[bg][fg]",
"[bg]drawbox=c={}:replace=1:t=fill[bg]".format(
bg_color_str
),
"[bg][fg]overlay=format=auto"
]
# Prepend bg color change before all video filters
# NOTE at the time of creation it is required as video filters
# from settings may affect color of BG
# e.g. `eq` can remove alpha from input
for arg in reversed(color_args):
ffmpeg_video_filters.insert(0, arg)
# Add argument to override output file
ffmpeg_output_args.append("-y")
@ -547,10 +590,12 @@ class ExtractReview(pyblish.api.InstancePlugin):
all_args.append("\"{}\"".format(self.ffmpeg_path))
all_args.extend(input_args)
if video_filters:
all_args.append("-filter:v {}".format(",".join(video_filters)))
all_args.append("-filter:v")
all_args.append("\"{}\"".format(",".join(video_filters)))
if audio_filters:
all_args.append("-filter:a {}".format(",".join(audio_filters)))
all_args.append("-filter:a")
all_args.append("\"{}\"".format(",".join(audio_filters)))
all_args.extend(output_args)

View file

@ -78,7 +78,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
"lut",
"audio",
"yetiRig",
"yeticache",
@ -97,7 +96,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"editorial",
"background",
"camerarig",
"redshiftproxy"
"redshiftproxy",
"effect"
]
exclude_families = ["clip"]
db_representation_context_keys = [

View file

@ -37,11 +37,11 @@
"ftrackreview"
],
"ffmpeg_args": {
"video_filters": [
"eq=gamma=2.2"
],
"video_filters": [],
"audio_filters": [],
"input": [],
"input": [
"-apply_trc gamma22"
],
"output": [
"-pix_fmt yuv420p",
"-crf 18",
@ -57,6 +57,12 @@
},
"width": 0,
"height": 0,
"bg_color": [
0,
0,
0,
0
],
"letter_box": {
"enabled": false,
"ratio": 0.0,

View file

@ -193,6 +193,15 @@
"minimum": 0,
"maximum": 100000
},
{
"type": "label",
"label": "Background color is used only when input have transparency and Alpha is higher than 0."
},
{
"type": "color",
"label": "Background color",
"key": "bg_color"
},
{
"key": "letter_box",
"label": "Letter box",
@ -280,24 +289,14 @@
"minimum": 0
},
{
"type": "schema_template",
"name": "template_rgba_color",
"template_data": [
{
"label": "Font Color",
"name": "font_color"
}
]
"type": "color",
"key": "font_color",
"label": "Font Color"
},
{
"type": "schema_template",
"name": "template_rgba_color",
"template_data": [
{
"label": "Background Color",
"name": "bg_color"
}
]
"type": "color",
"key": "bg_color",
"label": "Background Color"
},
{
"type": "number",

View file

@ -1,33 +0,0 @@
[
{
"type": "list-strict",
"key": "{name}",
"label": "{label}",
"object_types": [
{
"label": "R",
"type": "number",
"minimum": 0,
"maximum": 255
},
{
"label": "G",
"type": "number",
"minimum": 0,
"maximum": 255
},
{
"label": "B",
"type": "number",
"minimum": 0,
"maximum": 255
},
{
"label": "A",
"type": "number",
"minimum": 0,
"maximum": 255
}
]
}
]

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.0.0-rc.5"
__version__ = "3.0.0-rc.6"