mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' of github.com:pypeclub/pype into feature/delivery_in_library_loader
This commit is contained in:
commit
dd04e7833b
23 changed files with 479 additions and 336 deletions
|
|
@ -1,25 +0,0 @@
|
|||
import bpy
|
||||
|
||||
from avalon import api, blender
|
||||
import openpype.hosts.blender.api.plugin
|
||||
|
||||
|
||||
class CreateSetDress(openpype.hosts.blender.api.plugin.Creator):
|
||||
"""A grouped package of loaded content"""
|
||||
|
||||
name = "setdressMain"
|
||||
label = "Set Dress"
|
||||
family = "setdress"
|
||||
icon = "cubes"
|
||||
defaults = ["Main", "Anim"]
|
||||
|
||||
def process(self):
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
blender.lib.imprint(collection, self.data)
|
||||
|
||||
return collection
|
||||
|
|
@ -25,9 +25,6 @@ class BlendLayoutLoader(plugin.AssetLoader):
|
|||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
animation_creator_name = "CreateAnimation"
|
||||
setdress_creator_name = "CreateSetDress"
|
||||
|
||||
def _remove(self, objects, obj_container):
|
||||
for obj in list(objects):
|
||||
if obj.type == 'ARMATURE':
|
||||
|
|
@ -293,7 +290,6 @@ class UnrealLayoutLoader(plugin.AssetLoader):
|
|||
color = "orange"
|
||||
|
||||
animation_creator_name = "CreateAnimation"
|
||||
setdress_creator_name = "CreateSetDress"
|
||||
|
||||
def _remove_objects(self, objects):
|
||||
for obj in list(objects):
|
||||
|
|
@ -383,7 +379,7 @@ class UnrealLayoutLoader(plugin.AssetLoader):
|
|||
|
||||
def _process(
|
||||
self, libpath, layout_container, container_name, representation,
|
||||
actions, parent
|
||||
actions, parent_collection
|
||||
):
|
||||
with open(libpath, "r") as fp:
|
||||
data = json.load(fp)
|
||||
|
|
@ -392,6 +388,11 @@ class UnrealLayoutLoader(plugin.AssetLoader):
|
|||
layout_collection = bpy.data.collections.new(container_name)
|
||||
scene.collection.children.link(layout_collection)
|
||||
|
||||
parent = parent_collection
|
||||
|
||||
if parent is None:
|
||||
parent = scene.collection
|
||||
|
||||
all_loaders = api.discover(api.Loader)
|
||||
|
||||
avalon_container = bpy.data.collections.get(
|
||||
|
|
@ -516,23 +517,9 @@ class UnrealLayoutLoader(plugin.AssetLoader):
|
|||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
# Create a setdress subset to contain all the animation for all
|
||||
# the rigs in the layout
|
||||
creator_plugin = get_creator_by_name(self.setdress_creator_name)
|
||||
if not creator_plugin:
|
||||
raise ValueError("Creator plugin \"{}\" was not found.".format(
|
||||
self.setdress_creator_name
|
||||
))
|
||||
parent = api.create(
|
||||
creator_plugin,
|
||||
name="animation",
|
||||
asset=api.Session["AVALON_ASSET"],
|
||||
options={"useSelection": True},
|
||||
data={"dependencies": str(context["representation"]["_id"])})
|
||||
|
||||
layout_collection = self._process(
|
||||
libpath, layout_container, container_name,
|
||||
str(context["representation"]["_id"]), None, parent)
|
||||
str(context["representation"]["_id"]), None, None)
|
||||
|
||||
container_metadata["obj_container"] = layout_collection
|
||||
|
||||
|
|
|
|||
|
|
@ -107,6 +107,9 @@ class BlendRigLoader(plugin.AssetLoader):
|
|||
|
||||
if action is not None:
|
||||
local_obj.animation_data.action = action
|
||||
elif local_obj.animation_data.action is not None:
|
||||
plugin.prepare_data(
|
||||
local_obj.animation_data.action, collection_name)
|
||||
|
||||
# Set link the drivers to the local object
|
||||
if local_obj.data.animation_data:
|
||||
|
|
|
|||
|
|
@ -1,61 +0,0 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import openpype.api
|
||||
import pyblish.api
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class ExtractSetDress(openpype.api.Extractor):
|
||||
"""Extract setdress."""
|
||||
|
||||
label = "Extract SetDress"
|
||||
hosts = ["blender"]
|
||||
families = ["setdress"]
|
||||
optional = True
|
||||
order = pyblish.api.ExtractorOrder + 0.1
|
||||
|
||||
def process(self, instance):
|
||||
stagingdir = self.staging_dir(instance)
|
||||
|
||||
json_data = []
|
||||
|
||||
for i in instance.context:
|
||||
collection = i.data.get("name")
|
||||
container = None
|
||||
for obj in bpy.data.collections[collection].objects:
|
||||
if obj.type == "ARMATURE":
|
||||
container_name = obj.get("avalon").get("container_name")
|
||||
container = bpy.data.collections[container_name]
|
||||
if container:
|
||||
json_dict = {
|
||||
"subset": i.data.get("subset"),
|
||||
"container": container.name,
|
||||
}
|
||||
json_dict["instance_name"] = container.get("avalon").get(
|
||||
"instance_name"
|
||||
)
|
||||
json_data.append(json_dict)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
json_filename = f"{instance.name}.json"
|
||||
json_path = os.path.join(stagingdir, json_filename)
|
||||
|
||||
with open(json_path, "w+") as file:
|
||||
json.dump(json_data, fp=file, indent=2)
|
||||
|
||||
json_representation = {
|
||||
"name": "json",
|
||||
"ext": "json",
|
||||
"files": json_filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(json_representation)
|
||||
|
||||
self.log.info(
|
||||
"Extracted instance '{}' to: {}".format(instance.name,
|
||||
json_representation)
|
||||
)
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import openpype.api
|
||||
|
||||
|
|
@ -121,6 +122,25 @@ class ExtractAnimationFBX(openpype.api.Extractor):
|
|||
pair[1].user_clear()
|
||||
bpy.data.actions.remove(pair[1])
|
||||
|
||||
json_filename = f"{instance.name}.json"
|
||||
json_path = os.path.join(stagingdir, json_filename)
|
||||
|
||||
json_dict = {}
|
||||
|
||||
collection = instance.data.get("name")
|
||||
container = None
|
||||
for obj in bpy.data.collections[collection].objects:
|
||||
if obj.type == "ARMATURE":
|
||||
container_name = obj.get("avalon").get("container_name")
|
||||
container = bpy.data.collections[container_name]
|
||||
if container:
|
||||
json_dict = {
|
||||
"instance_name": container.get("avalon").get("instance_name")
|
||||
}
|
||||
|
||||
with open(json_path, "w+") as file:
|
||||
json.dump(json_dict, fp=file, indent=2)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
|
|
@ -130,7 +150,15 @@ class ExtractAnimationFBX(openpype.api.Extractor):
|
|||
'files': fbx_filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
json_representation = {
|
||||
'name': 'json',
|
||||
'ext': 'json',
|
||||
'files': json_filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(fbx_representation)
|
||||
instance.data["representations"].append(json_representation)
|
||||
|
||||
|
||||
self.log.info("Extracted instance '{}' to: {}".format(
|
||||
instance.name, fbx_representation))
|
||||
|
|
|
|||
|
|
@ -214,7 +214,9 @@ def get_track_items(
|
|||
# add all if no track_type is defined
|
||||
return_list.append(track_item)
|
||||
|
||||
return return_list
|
||||
# return output list but make sure all items are TrackItems
|
||||
return [_i for _i in return_list
|
||||
if type(_i) == hiero.core.TrackItem]
|
||||
|
||||
|
||||
def get_track_item_pype_tag(track_item):
|
||||
|
|
|
|||
|
|
@ -52,10 +52,11 @@ class ExtractClipEffects(openpype.api.Extractor):
|
|||
instance.data["representations"] = list()
|
||||
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
|
||||
"version"
|
||||
"handleStart", "handleEnd",
|
||||
"sourceStart", "sourceStartH", "sourceEnd", "sourceEndH",
|
||||
"frameStart", "frameEnd",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH",
|
||||
"asset", "version"
|
||||
]
|
||||
|
||||
# pass data to version
|
||||
|
|
@ -5,7 +5,7 @@ import pyblish.api
|
|||
class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
||||
"""Collect soft effects instances."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.508
|
||||
order = pyblish.api.CollectorOrder - 0.579
|
||||
label = "Pre-collect Clip Effects Instances"
|
||||
families = ["clip"]
|
||||
|
||||
|
|
@ -24,7 +24,8 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
self.clip_in_h = self.clip_in - self.handle_start
|
||||
self.clip_out_h = self.clip_out + self.handle_end
|
||||
|
||||
track = instance.data["trackItem"]
|
||||
track_item = instance.data["item"]
|
||||
track = track_item.parent()
|
||||
track_index = track.trackIndex()
|
||||
tracks_effect_items = instance.context.data.get("tracksEffectItems")
|
||||
clip_effect_items = instance.data.get("clipEffectItems")
|
||||
|
|
@ -112,7 +113,12 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
node = sitem.node()
|
||||
node_serialized = self.node_serialisation(node)
|
||||
node_name = sitem.name()
|
||||
node_class = re.sub(r"\d+", "", node_name)
|
||||
|
||||
if "_" in node_name:
|
||||
node_class = re.sub(r"(?:_)[_0-9]+", "", node_name) # more numbers
|
||||
else:
|
||||
node_class = re.sub(r"\d+", "", node_name) # one number
|
||||
|
||||
# collect timelineIn/Out
|
||||
effect_t_in = int(sitem.timelineIn())
|
||||
effect_t_out = int(sitem.timelineOut())
|
||||
|
|
@ -121,6 +127,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
return
|
||||
|
||||
self.log.debug("node_name: `{}`".format(node_name))
|
||||
self.log.debug("node_class: `{}`".format(node_class))
|
||||
|
||||
return {node_name: {
|
||||
"class": node_class,
|
||||
|
|
@ -2,6 +2,9 @@ import pyblish
|
|||
import openpype
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from openpype.hosts.hiero.otio import hiero_export
|
||||
import hiero
|
||||
|
||||
from compiler.ast import flatten
|
||||
|
||||
# # developer reload modules
|
||||
from pprint import pformat
|
||||
|
|
@ -14,18 +17,40 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
label = "Precollect Instances"
|
||||
hosts = ["hiero"]
|
||||
|
||||
audio_track_items = []
|
||||
|
||||
def process(self, context):
|
||||
otio_timeline = context.data["otioTimeline"]
|
||||
self.otio_timeline = context.data["otioTimeline"]
|
||||
|
||||
selected_timeline_items = phiero.get_track_items(
|
||||
selected=True, check_enabled=True, check_tagged=True)
|
||||
selected=True, check_tagged=True, check_enabled=True)
|
||||
|
||||
# only return enabled track items
|
||||
if not selected_timeline_items:
|
||||
selected_timeline_items = phiero.get_track_items(
|
||||
check_enabled=True, check_tagged=True)
|
||||
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(
|
||||
selected_timeline_items))
|
||||
|
||||
# add all tracks subtreck effect items to context
|
||||
all_tracks = hiero.ui.activeSequence().videoTracks()
|
||||
tracks_effect_items = self.collect_sub_track_items(all_tracks)
|
||||
context.data["tracksEffectItems"] = tracks_effect_items
|
||||
|
||||
# process all sellected timeline track items
|
||||
for track_item in selected_timeline_items:
|
||||
|
||||
data = {}
|
||||
clip_name = track_item.name()
|
||||
source_clip = track_item.source()
|
||||
|
||||
# get clips subtracks and anotations
|
||||
annotations = self.clip_annotations(source_clip)
|
||||
subtracks = self.clip_subtrack(track_item)
|
||||
self.log.debug("Annotations: {}".format(annotations))
|
||||
self.log.debug(">> Subtracks: {}".format(subtracks))
|
||||
|
||||
# get openpype tag data
|
||||
tag_data = phiero.get_track_item_pype_data(track_item)
|
||||
|
|
@ -76,12 +101,15 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
"item": track_item,
|
||||
"families": families,
|
||||
"publish": tag_data["publish"],
|
||||
"fps": context.data["fps"]
|
||||
"fps": context.data["fps"],
|
||||
|
||||
# clip's effect
|
||||
"clipEffectItems": subtracks,
|
||||
"clipAnnotations": annotations
|
||||
})
|
||||
|
||||
# otio clip data
|
||||
otio_data = self.get_otio_clip_instance_data(
|
||||
otio_timeline, track_item) or {}
|
||||
otio_data = self.get_otio_clip_instance_data(track_item) or {}
|
||||
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
|
||||
data.update(otio_data)
|
||||
self.log.debug("__ data: {}".format(pformat(data)))
|
||||
|
|
@ -185,6 +213,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
item = data.get("item")
|
||||
clip_name = item.name()
|
||||
|
||||
# test if any audio clips
|
||||
if not self.test_any_audio(item):
|
||||
return
|
||||
|
||||
asset = data["asset"]
|
||||
subset = "audioMain"
|
||||
|
||||
|
|
@ -215,7 +247,28 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def get_otio_clip_instance_data(self, otio_timeline, track_item):
|
||||
def test_any_audio(self, track_item):
|
||||
# collect all audio tracks to class variable
|
||||
if not self.audio_track_items:
|
||||
for otio_clip in self.otio_timeline.each_clip():
|
||||
if otio_clip.parent().kind != "Audio":
|
||||
continue
|
||||
self.audio_track_items.append(otio_clip)
|
||||
|
||||
# get track item timeline range
|
||||
timeline_range = self.create_otio_time_range_from_timeline_item_data(
|
||||
track_item)
|
||||
|
||||
# loop trough audio track items and search for overlaping clip
|
||||
for otio_audio in self.audio_track_items:
|
||||
parent_range = otio_audio.range_in_parent()
|
||||
|
||||
# if any overaling clip found then return True
|
||||
if openpype.lib.is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=False):
|
||||
return True
|
||||
|
||||
def get_otio_clip_instance_data(self, track_item):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
|
|
@ -231,7 +284,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
ti_track_name = track_item.parent().name()
|
||||
timeline_range = self.create_otio_time_range_from_timeline_item_data(
|
||||
track_item)
|
||||
for otio_clip in otio_timeline.each_clip():
|
||||
for otio_clip in self.otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if ti_track_name not in track_name:
|
||||
|
|
@ -258,3 +311,76 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
return hiero_export.create_otio_time_range(
|
||||
frame_start, frame_duration, fps)
|
||||
|
||||
@staticmethod
|
||||
def collect_sub_track_items(tracks):
|
||||
"""
|
||||
Returns dictionary with track index as key and list of subtracks
|
||||
"""
|
||||
# collect all subtrack items
|
||||
sub_track_items = {}
|
||||
for track in tracks:
|
||||
items = track.items()
|
||||
|
||||
# skip if no clips on track > need track with effect only
|
||||
if items:
|
||||
continue
|
||||
|
||||
# skip all disabled tracks
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
track_index = track.trackIndex()
|
||||
_sub_track_items = flatten(track.subTrackItems())
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(_sub_track_items) < 1:
|
||||
continue
|
||||
|
||||
enabled_sti = []
|
||||
# loop all found subtrack items and check if they are enabled
|
||||
for _sti in _sub_track_items:
|
||||
# checking if not enabled
|
||||
if not _sti.isEnabled():
|
||||
continue
|
||||
if isinstance(_sti, hiero.core.Annotation):
|
||||
continue
|
||||
# collect the subtrack item
|
||||
enabled_sti.append(_sti)
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(enabled_sti) < 1:
|
||||
continue
|
||||
|
||||
# add collection of subtrackitems to dict
|
||||
sub_track_items[track_index] = enabled_sti
|
||||
|
||||
return sub_track_items
|
||||
|
||||
@staticmethod
|
||||
def clip_annotations(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.Annotation
|
||||
"""
|
||||
annotations = []
|
||||
subTrackItems = flatten(clip.subTrackItems())
|
||||
annotations += [item for item in subTrackItems if isinstance(
|
||||
item, hiero.core.Annotation)]
|
||||
return annotations
|
||||
|
||||
@staticmethod
|
||||
def clip_subtrack(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.SubTrackItem
|
||||
"""
|
||||
subtracks = []
|
||||
subTrackItems = flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
# avoid all anotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
# # avoid all not anaibled
|
||||
if not item.isEnabled():
|
||||
continue
|
||||
subtracks.append(item)
|
||||
return subtracks
|
||||
|
|
|
|||
|
|
@ -75,10 +75,26 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
|||
"activeProject": project,
|
||||
"otioTimeline": otio_timeline,
|
||||
"currentFile": curent_file,
|
||||
"fps": fps,
|
||||
"colorspace": self.get_colorspace(project),
|
||||
"fps": fps
|
||||
}
|
||||
context.data.update(context_data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
|
||||
self.log.debug("__ context_data: {}".format(pformat(context_data)))
|
||||
|
||||
def get_colorspace(self, project):
|
||||
# get workfile's colorspace properties
|
||||
return {
|
||||
"useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(),
|
||||
"lutSetting16Bit": project.lutSetting16Bit(),
|
||||
"lutSetting8Bit": project.lutSetting8Bit(),
|
||||
"lutSettingFloat": project.lutSettingFloat(),
|
||||
"lutSettingLog": project.lutSettingLog(),
|
||||
"lutSettingViewer": project.lutSettingViewer(),
|
||||
"lutSettingWorkingSpace": project.lutSettingWorkingSpace(),
|
||||
"lutUseOCIOForExport": project.lutUseOCIOForExport(),
|
||||
"ocioConfigName": project.ocioConfigName(),
|
||||
"ocioConfigPath": project.ocioConfigPath()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1059,7 +1059,7 @@ class WorkfileSettings(object):
|
|||
# replace reset resolution from avalon core to pype's
|
||||
self.reset_frame_range_handles()
|
||||
# add colorspace menu item
|
||||
# self.set_colorspace()
|
||||
self.set_colorspace()
|
||||
|
||||
def set_favorites(self):
|
||||
work_dir = os.getenv("AVALON_WORKDIR")
|
||||
|
|
|
|||
|
|
@ -4,18 +4,19 @@ import json
|
|||
from collections import OrderedDict
|
||||
|
||||
|
||||
class LoadLuts(api.Loader):
|
||||
class LoadEffects(api.Loader):
|
||||
"""Loading colorspace soft effect exported from nukestudio"""
|
||||
|
||||
representations = ["lutJson"]
|
||||
families = ["lut"]
|
||||
representations = ["effectJson"]
|
||||
families = ["effect"]
|
||||
|
||||
label = "Load Luts - nodes"
|
||||
label = "Load Effects - nodes"
|
||||
order = 0
|
||||
icon = "cc"
|
||||
color = style.colors.light
|
||||
ignore_attr = ["useLifetime"]
|
||||
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Loading function to get the soft effects to particular read node
|
||||
|
|
@ -66,15 +67,15 @@ class LoadLuts(api.Loader):
|
|||
for key, value in json.load(f).iteritems()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f["effects"])
|
||||
nodes_order = self.reorder_nodes(json_f)
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
GN = nuke.createNode("Group")
|
||||
|
||||
GN["name"].setValue(object_name)
|
||||
GN = nuke.createNode(
|
||||
"Group",
|
||||
"name {}_1".format(object_name))
|
||||
|
||||
# adding content to the group node
|
||||
with GN:
|
||||
|
|
@ -186,7 +187,7 @@ class LoadLuts(api.Loader):
|
|||
for key, value in json.load(f).iteritems()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f["effects"])
|
||||
nodes_order = self.reorder_nodes(json_f)
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
|
|
@ -266,7 +267,11 @@ class LoadLuts(api.Loader):
|
|||
None: if nothing found
|
||||
"""
|
||||
search_name = "{0}_{1}".format(asset, subset)
|
||||
node = [n for n in nuke.allNodes() if search_name in n["name"].value()]
|
||||
|
||||
node = [
|
||||
n for n in nuke.allNodes(filter="Read")
|
||||
if search_name in n["file"].value()
|
||||
]
|
||||
if len(node) > 0:
|
||||
rn = node[0]
|
||||
else:
|
||||
|
|
@ -286,8 +291,10 @@ class LoadLuts(api.Loader):
|
|||
|
||||
def reorder_nodes(self, data):
|
||||
new_order = OrderedDict()
|
||||
trackNums = [v["trackIndex"] for k, v in data.items()]
|
||||
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
|
||||
trackNums = [v["trackIndex"] for k, v in data.items()
|
||||
if isinstance(v, dict)]
|
||||
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
|
||||
if isinstance(v, dict)]
|
||||
|
||||
for trackIndex in range(
|
||||
min(trackNums), max(trackNums) + 1):
|
||||
|
|
@ -300,6 +307,7 @@ class LoadLuts(api.Loader):
|
|||
|
||||
def get_item(self, data, trackIndex, subTrackIndex):
|
||||
return {key: val for key, val in data.items()
|
||||
if isinstance(val, dict)
|
||||
if subTrackIndex == val["subTrackIndex"]
|
||||
if trackIndex == val["trackIndex"]}
|
||||
|
||||
|
|
@ -5,13 +5,13 @@ from collections import OrderedDict
|
|||
from openpype.hosts.nuke.api import lib
|
||||
|
||||
|
||||
class LoadLutsInputProcess(api.Loader):
|
||||
class LoadEffectsInputProcess(api.Loader):
|
||||
"""Loading colorspace soft effect exported from nukestudio"""
|
||||
|
||||
representations = ["lutJson"]
|
||||
families = ["lut"]
|
||||
representations = ["effectJson"]
|
||||
families = ["effect"]
|
||||
|
||||
label = "Load Luts - Input Process"
|
||||
label = "Load Effects - Input Process"
|
||||
order = 0
|
||||
icon = "eye"
|
||||
color = style.colors.alert
|
||||
|
|
@ -67,15 +67,15 @@ class LoadLutsInputProcess(api.Loader):
|
|||
for key, value in json.load(f).iteritems()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f["effects"])
|
||||
nodes_order = self.reorder_nodes(json_f)
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
GN = nuke.createNode("Group")
|
||||
|
||||
GN["name"].setValue(object_name)
|
||||
GN = nuke.createNode(
|
||||
"Group",
|
||||
"name {}_1".format(object_name))
|
||||
|
||||
# adding content to the group node
|
||||
with GN:
|
||||
|
|
@ -190,7 +190,7 @@ class LoadLutsInputProcess(api.Loader):
|
|||
for key, value in json.load(f).iteritems()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f["effects"])
|
||||
nodes_order = self.reorder_nodes(json_f)
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
|
|
@ -304,8 +304,10 @@ class LoadLutsInputProcess(api.Loader):
|
|||
|
||||
def reorder_nodes(self, data):
|
||||
new_order = OrderedDict()
|
||||
trackNums = [v["trackIndex"] for k, v in data.items()]
|
||||
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
|
||||
trackNums = [v["trackIndex"] for k, v in data.items()
|
||||
if isinstance(v, dict)]
|
||||
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
|
||||
if isinstance(v, dict)]
|
||||
|
||||
for trackIndex in range(
|
||||
min(trackNums), max(trackNums) + 1):
|
||||
|
|
@ -318,6 +320,7 @@ class LoadLutsInputProcess(api.Loader):
|
|||
|
||||
def get_item(self, data, trackIndex, subTrackIndex):
|
||||
return {key: val for key, val in data.items()
|
||||
if isinstance(val, dict)
|
||||
if subTrackIndex == val["subTrackIndex"]
|
||||
if trackIndex == val["trackIndex"]}
|
||||
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
from openpype.hosts.nuke.api.lib import (
|
||||
on_script_load,
|
||||
check_inventory_versions
|
||||
check_inventory_versions,
|
||||
WorkfileSettings
|
||||
)
|
||||
|
||||
import nuke
|
||||
|
|
@ -9,8 +10,14 @@ from openpype.api import Logger
|
|||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
nuke.addOnScriptSave(on_script_load)
|
||||
# fix ffmpeg settings on script
|
||||
nuke.addOnScriptLoad(on_script_load)
|
||||
|
||||
# set checker for last versions on loaded containers
|
||||
nuke.addOnScriptLoad(check_inventory_versions)
|
||||
nuke.addOnScriptSave(check_inventory_versions)
|
||||
|
||||
# # set apply all workfile settings on script load and save
|
||||
nuke.addOnScriptLoad(WorkfileSettings().set_context_settings)
|
||||
|
||||
log.info('Automatic syncing of write file knob to script version')
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
from avalon import api, pipeline
|
||||
from avalon.unreal import lib
|
||||
|
|
@ -61,10 +62,16 @@ class AnimationFBXLoader(api.Loader):
|
|||
task = unreal.AssetImportTask()
|
||||
task.options = unreal.FbxImportUI()
|
||||
|
||||
# If there are no options, the process cannot be automated
|
||||
if options:
|
||||
libpath = self.fname.replace("fbx", "json")
|
||||
|
||||
with open(libpath, "r") as fp:
|
||||
data = json.load(fp)
|
||||
|
||||
instance_name = data.get("instance_name")
|
||||
|
||||
if instance_name:
|
||||
automated = True
|
||||
actor_name = 'PersistentLevel.' + options.get('instance_name')
|
||||
actor_name = 'PersistentLevel.' + instance_name
|
||||
actor = unreal.EditorLevelLibrary.get_actor_reference(actor_name)
|
||||
skeleton = actor.skeletal_mesh_component.skeletal_mesh.skeleton
|
||||
task.options.set_editor_property('skeleton', skeleton)
|
||||
|
|
@ -81,16 +88,31 @@ class AnimationFBXLoader(api.Loader):
|
|||
|
||||
# set import options here
|
||||
task.options.set_editor_property(
|
||||
'automated_import_should_detect_type', True)
|
||||
'automated_import_should_detect_type', False)
|
||||
task.options.set_editor_property(
|
||||
'original_import_type', unreal.FBXImportType.FBXIT_ANIMATION)
|
||||
'original_import_type', unreal.FBXImportType.FBXIT_SKELETAL_MESH)
|
||||
task.options.set_editor_property(
|
||||
'mesh_type_to_import', unreal.FBXImportType.FBXIT_ANIMATION)
|
||||
task.options.set_editor_property('import_mesh', False)
|
||||
task.options.set_editor_property('import_animations', True)
|
||||
task.options.set_editor_property('override_full_name', True)
|
||||
|
||||
task.options.skeletal_mesh_import_data.set_editor_property(
|
||||
'import_content_type',
|
||||
unreal.FBXImportContentType.FBXICT_SKINNING_WEIGHTS
|
||||
task.options.anim_sequence_import_data.set_editor_property(
|
||||
'animation_length',
|
||||
unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME
|
||||
)
|
||||
task.options.anim_sequence_import_data.set_editor_property(
|
||||
'import_meshes_in_bone_hierarchy', False)
|
||||
task.options.anim_sequence_import_data.set_editor_property(
|
||||
'use_default_sample_rate', True)
|
||||
task.options.anim_sequence_import_data.set_editor_property(
|
||||
'import_custom_attribute', True)
|
||||
task.options.anim_sequence_import_data.set_editor_property(
|
||||
'import_bone_tracks', True)
|
||||
task.options.anim_sequence_import_data.set_editor_property(
|
||||
'remove_redundant_keys', True)
|
||||
task.options.anim_sequence_import_data.set_editor_property(
|
||||
'convert_scene', True)
|
||||
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
|
||||
|
||||
|
|
|
|||
|
|
@ -1,127 +0,0 @@
|
|||
import json
|
||||
|
||||
from avalon import api
|
||||
import unreal
|
||||
|
||||
|
||||
class AnimationCollectionLoader(api.Loader):
|
||||
"""Load Unreal SkeletalMesh from FBX"""
|
||||
|
||||
families = ["setdress"]
|
||||
representations = ["json"]
|
||||
|
||||
label = "Load Animation Collection"
|
||||
icon = "cube"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
from avalon import api, pipeline
|
||||
from avalon.unreal import lib
|
||||
from avalon.unreal import pipeline as unreal_pipeline
|
||||
import unreal
|
||||
|
||||
# Create directory for asset and avalon container
|
||||
root = "/Game/Avalon/Assets"
|
||||
asset = context.get('asset').get('name')
|
||||
suffix = "_CON"
|
||||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
asset_dir, container_name = tools.create_unique_asset_name(
|
||||
"{}/{}".format(root, asset), suffix="")
|
||||
|
||||
container_name += suffix
|
||||
|
||||
unreal.EditorAssetLibrary.make_directory(asset_dir)
|
||||
|
||||
libpath = self.fname
|
||||
|
||||
with open(libpath, "r") as fp:
|
||||
data = json.load(fp)
|
||||
|
||||
all_loaders = api.discover(api.Loader)
|
||||
|
||||
for element in data:
|
||||
reference = element.get('_id')
|
||||
|
||||
loaders = api.loaders_from_representation(all_loaders, reference)
|
||||
loader = None
|
||||
for l in loaders:
|
||||
if l.__name__ == "AnimationFBXLoader":
|
||||
loader = l
|
||||
break
|
||||
|
||||
if not loader:
|
||||
continue
|
||||
|
||||
instance_name = element.get('instance_name')
|
||||
|
||||
api.load(
|
||||
loader,
|
||||
reference,
|
||||
namespace=instance_name,
|
||||
options=element
|
||||
)
|
||||
|
||||
# Create Asset Container
|
||||
lib.create_avalon_container(
|
||||
container=container_name, path=asset_dir)
|
||||
|
||||
data = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": pipeline.AVALON_CONTAINER_ID,
|
||||
"asset": asset,
|
||||
"namespace": asset_dir,
|
||||
"container_name": container_name,
|
||||
"loader": str(self.__class__.__name__),
|
||||
"representation": context["representation"]["_id"],
|
||||
"parent": context["representation"]["parent"],
|
||||
"family": context["representation"]["context"]["family"]
|
||||
}
|
||||
unreal_pipeline.imprint(
|
||||
"{}/{}".format(asset_dir, container_name), data)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_dir, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
return asset_content
|
||||
|
||||
def update(self, container, representation):
|
||||
from avalon import api, io
|
||||
from avalon.unreal import pipeline
|
||||
|
||||
source_path = api.get_representation_path(representation)
|
||||
|
||||
with open(source_path, "r") as fp:
|
||||
data = json.load(fp)
|
||||
|
||||
animation_containers = [
|
||||
i for i in pipeline.ls() if
|
||||
i.get('asset') == container.get('asset') and
|
||||
i.get('family') == 'animation']
|
||||
|
||||
for element in data:
|
||||
new_version = io.find_one({"_id": io.ObjectId(element.get('_id'))})
|
||||
new_version_number = new_version.get('context').get('version')
|
||||
anim_container = None
|
||||
for i in animation_containers:
|
||||
if i.get('container_name') == (element.get('subset') + "_CON"):
|
||||
anim_container = i
|
||||
break
|
||||
if not anim_container:
|
||||
continue
|
||||
|
||||
api.update(anim_container, new_version_number)
|
||||
|
||||
container_path = "{}/{}".format(container["namespace"],
|
||||
container["objectName"])
|
||||
# update metadata
|
||||
pipeline.imprint(
|
||||
container_path,
|
||||
{
|
||||
"representation": str(representation["_id"]),
|
||||
"parent": str(representation["parent"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
unreal.EditorAssetLibrary.delete_directory(container["namespace"])
|
||||
|
|
@ -11,23 +11,28 @@ from avalon.api import AvalonMongoDB
|
|||
class DeleteAssetSubset(BaseAction):
|
||||
'''Edit meta data action.'''
|
||||
|
||||
#: Action identifier.
|
||||
# Action identifier.
|
||||
identifier = "delete.asset.subset"
|
||||
#: Action label.
|
||||
# Action label.
|
||||
label = "Delete Asset/Subsets"
|
||||
#: Action description.
|
||||
# Action description.
|
||||
description = "Removes from Avalon with all childs and asset from Ftrack"
|
||||
icon = statics_icon("ftrack", "action_icons", "DeleteAsset.svg")
|
||||
|
||||
settings_key = "delete_asset_subset"
|
||||
#: Db connection
|
||||
dbcon = AvalonMongoDB()
|
||||
# Db connection
|
||||
dbcon = None
|
||||
|
||||
splitter = {"type": "label", "value": "---"}
|
||||
action_data_by_id = {}
|
||||
asset_prefix = "asset:"
|
||||
subset_prefix = "subset:"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.dbcon = AvalonMongoDB()
|
||||
|
||||
super(DeleteAssetSubset, self).__init__(*args, **kwargs)
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
""" Validation """
|
||||
task_ids = []
|
||||
|
|
@ -446,7 +451,14 @@ class DeleteAssetSubset(BaseAction):
|
|||
if len(assets_to_delete) > 0:
|
||||
map_av_ftrack_id = spec_data["without_ftrack_id"]
|
||||
# Prepare data when deleting whole avalon asset
|
||||
avalon_assets = self.dbcon.find({"type": "asset"})
|
||||
avalon_assets = self.dbcon.find(
|
||||
{"type": "asset"},
|
||||
{
|
||||
"_id": 1,
|
||||
"data.visualParent": 1,
|
||||
"data.ftrackId": 1
|
||||
}
|
||||
)
|
||||
avalon_assets_by_parent = collections.defaultdict(list)
|
||||
for asset in avalon_assets:
|
||||
asset_id = asset["_id"]
|
||||
|
|
@ -537,11 +549,13 @@ class DeleteAssetSubset(BaseAction):
|
|||
ftrack_proc_txt, ", ".join(ftrack_ids_to_delete)
|
||||
))
|
||||
|
||||
ftrack_ents_to_delete = (
|
||||
entities_by_link_len = (
|
||||
self._filter_entities_to_delete(ftrack_ids_to_delete, session)
|
||||
)
|
||||
for entity in ftrack_ents_to_delete:
|
||||
session.delete(entity)
|
||||
for link_len in sorted(entities_by_link_len.keys(), reverse=True):
|
||||
for entity in entities_by_link_len[link_len]:
|
||||
session.delete(entity)
|
||||
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
|
|
@ -600,29 +614,11 @@ class DeleteAssetSubset(BaseAction):
|
|||
joined_ids_to_delete
|
||||
)
|
||||
).all()
|
||||
filtered = to_delete_entities[:]
|
||||
while True:
|
||||
changed = False
|
||||
_filtered = filtered[:]
|
||||
for entity in filtered:
|
||||
entity_id = entity["id"]
|
||||
entities_by_link_len = collections.defaultdict(list)
|
||||
for entity in to_delete_entities:
|
||||
entities_by_link_len[len(entity["link"])].append(entity)
|
||||
|
||||
for _entity in tuple(_filtered):
|
||||
if entity_id == _entity["id"]:
|
||||
continue
|
||||
|
||||
for _link in _entity["link"]:
|
||||
if entity_id == _link["id"] and _entity in _filtered:
|
||||
_filtered.remove(_entity)
|
||||
changed = True
|
||||
break
|
||||
|
||||
filtered = _filtered
|
||||
|
||||
if not changed:
|
||||
break
|
||||
|
||||
return filtered
|
||||
return entities_by_link_len
|
||||
|
||||
def report_handle(self, report_messages, project_name, event):
|
||||
if not report_messages:
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
|
|||
"rig",
|
||||
"plate",
|
||||
"look",
|
||||
"lut",
|
||||
"yetiRig",
|
||||
"yeticache",
|
||||
"nukenodes",
|
||||
|
|
@ -52,7 +51,8 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
|
|||
"fbx",
|
||||
"textures",
|
||||
"action",
|
||||
"background"
|
||||
"background",
|
||||
"effect"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -40,12 +40,15 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
# get sequence
|
||||
otio_timeline = context.data["otioTimeline"]
|
||||
|
||||
# temp file
|
||||
audio_temp_fpath = self.create_temp_file("audio")
|
||||
|
||||
# get all audio inputs from otio timeline
|
||||
audio_inputs = self.get_audio_track_items(otio_timeline)
|
||||
|
||||
if not audio_inputs:
|
||||
return
|
||||
|
||||
# temp file
|
||||
audio_temp_fpath = self.create_temp_file("audio")
|
||||
|
||||
# create empty audio with longest duration
|
||||
empty = self.create_empty(audio_inputs)
|
||||
|
||||
|
|
@ -53,14 +56,14 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
audio_inputs.insert(0, empty)
|
||||
|
||||
# create cmd
|
||||
cmd = self.ffmpeg_path + " "
|
||||
cmd = '"{}"'.format(self.ffmpeg_path) + " "
|
||||
cmd += self.create_cmd(audio_inputs)
|
||||
cmd += audio_temp_fpath
|
||||
cmd += "\"{}\"".format(audio_temp_fpath)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(cmd))
|
||||
openpype.api.run_subprocess(
|
||||
cmd, shell=True, logger=self.log
|
||||
cmd, logger=self.log
|
||||
)
|
||||
|
||||
# remove empty
|
||||
|
|
@ -97,17 +100,17 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
audio_fpath = self.create_temp_file(name)
|
||||
|
||||
cmd = " ".join([
|
||||
self.ffmpeg_path,
|
||||
'"{}"'.format(self.ffmpeg_path),
|
||||
"-ss {}".format(start_sec),
|
||||
"-t {}".format(duration_sec),
|
||||
"-i {}".format(audio_file),
|
||||
"-i \"{}\"".format(audio_file),
|
||||
audio_fpath
|
||||
])
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(cmd))
|
||||
openpype.api.run_subprocess(
|
||||
cmd, shell=True, logger=self.log
|
||||
cmd, logger=self.log
|
||||
)
|
||||
else:
|
||||
audio_fpath = recycling_file.pop()
|
||||
|
|
@ -218,11 +221,11 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
|
||||
# create empty cmd
|
||||
cmd = " ".join([
|
||||
self.ffmpeg_path,
|
||||
'"{}"'.format(self.ffmpeg_path),
|
||||
"-f lavfi",
|
||||
"-i anullsrc=channel_layout=stereo:sample_rate=48000",
|
||||
"-t {}".format(max_duration_sec),
|
||||
empty_fpath
|
||||
"\"{}\"".format(empty_fpath)
|
||||
])
|
||||
|
||||
# generate empty with ffmpeg
|
||||
|
|
@ -230,7 +233,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
self.log.debug("Executing: {}".format(cmd))
|
||||
|
||||
openpype.api.run_subprocess(
|
||||
cmd, shell=True, logger=self.log
|
||||
cmd, logger=self.log
|
||||
)
|
||||
|
||||
# return dict with output
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"stagingDir": self.staging_dir,
|
||||
"tags": ["review", "ftrackreview", "delete"]
|
||||
"tags": ["review", "delete"]
|
||||
}
|
||||
|
||||
collection = clique.Collection(
|
||||
|
|
@ -313,7 +313,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
out_frame_start += end_offset
|
||||
|
||||
# start command list
|
||||
command = [ffmpeg_path]
|
||||
command = ['"{}"'.format(ffmpeg_path)]
|
||||
|
||||
if sequence:
|
||||
input_dir, collection = sequence
|
||||
|
|
@ -326,7 +326,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
# form command for rendering gap files
|
||||
command.extend([
|
||||
"-start_number {}".format(in_frame_start),
|
||||
"-i {}".format(input_path)
|
||||
"-i \"{}\"".format(input_path)
|
||||
])
|
||||
|
||||
elif video:
|
||||
|
|
@ -341,7 +341,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
command.extend([
|
||||
"-ss {}".format(sec_start),
|
||||
"-t {}".format(sec_duration),
|
||||
"-i {}".format(video_path)
|
||||
"-i \"{}\"".format(video_path)
|
||||
])
|
||||
|
||||
elif gap:
|
||||
|
|
@ -360,11 +360,13 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
# add output attributes
|
||||
command.extend([
|
||||
"-start_number {}".format(out_frame_start),
|
||||
output_path
|
||||
"\"{}\"".format(output_path)
|
||||
])
|
||||
# execute
|
||||
self.log.debug("Executing: {}".format(" ".join(command)))
|
||||
output = openpype.api.run_subprocess(" ".join(command), shell=True)
|
||||
output = openpype.api.run_subprocess(
|
||||
" ".join(command), logger=self.log
|
||||
)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
def _generate_used_frames(self, duration, end_offset=None):
|
||||
|
|
|
|||
|
|
@ -78,7 +78,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"rig",
|
||||
"plate",
|
||||
"look",
|
||||
"lut",
|
||||
"audio",
|
||||
"yetiRig",
|
||||
"yeticache",
|
||||
|
|
@ -97,7 +96,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"editorial",
|
||||
"background",
|
||||
"camerarig",
|
||||
"redshiftproxy"
|
||||
"redshiftproxy",
|
||||
"effect"
|
||||
]
|
||||
exclude_families = ["clip"]
|
||||
db_representation_context_keys = [
|
||||
|
|
|
|||
|
|
@ -10,9 +10,51 @@
|
|||
PS> .\run_project_manager.ps1
|
||||
|
||||
#>
|
||||
|
||||
$art = @"
|
||||
|
||||
. . .. . ..
|
||||
_oOOP3OPP3Op_. .
|
||||
.PPpo~. .. ~2p. .. .... . .
|
||||
.Ppo . .pPO3Op.. . O:. . . .
|
||||
.3Pp . oP3'. 'P33. . 4 .. . . . .. . . .
|
||||
.~OP 3PO. .Op3 : . .. _____ _____ _____
|
||||
.P3O . oP3oP3O3P' . . . . / /./ /./ /
|
||||
O3:. O3p~ . .:. . ./____/./____/ /____/
|
||||
'P . 3p3. oP3~. ..P:. . . .. . . .. . . .
|
||||
. ': . Po' .Opo'. .3O. . o[ by Pype Club ]]]==- - - . .
|
||||
. '_ .. . . _OP3.. . .https://openpype.io.. .
|
||||
~P3.OPPPO3OP~ . .. .
|
||||
. ' '. . .. . . . .. .
|
||||
|
||||
"@
|
||||
|
||||
Write-Host $art -ForegroundColor DarkGreen
|
||||
|
||||
$current_dir = Get-Location
|
||||
$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent
|
||||
$openpype_root = (Get-Item $script_dir).parent.FullName
|
||||
|
||||
$env:_INSIDE_OPENPYPE_TOOL = "1"
|
||||
|
||||
# make sure Poetry is in PATH
|
||||
if (-not (Test-Path 'env:POETRY_HOME')) {
|
||||
$env:POETRY_HOME = "$openpype_root\.poetry"
|
||||
}
|
||||
$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin"
|
||||
|
||||
Set-Location -Path $openpype_root
|
||||
|
||||
Write-Host ">>> " -NoNewline -ForegroundColor Green
|
||||
Write-Host "Reading Poetry ... " -NoNewline
|
||||
if (-not (Test-Path -PathType Container -Path "$openpype_root\.poetry\bin")) {
|
||||
Write-Host "NOT FOUND" -ForegroundColor Yellow
|
||||
Write-Host "*** " -NoNewline -ForegroundColor Yellow
|
||||
Write-Host "We need to install Poetry create virtual env first ..."
|
||||
& "$openpype_root\tools\create_env.ps1"
|
||||
} else {
|
||||
Write-Host "OK" -ForegroundColor Green
|
||||
}
|
||||
|
||||
& poetry run python "$($openpype_root)\start.py" projectmanager
|
||||
Set-Location -Path $current_dir
|
||||
|
|
|
|||
103
tools/run_projectmanager.sh
Executable file
103
tools/run_projectmanager.sh
Executable file
|
|
@ -0,0 +1,103 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Run OpenPype Settings GUI
|
||||
|
||||
|
||||
art () {
|
||||
cat <<-EOF
|
||||
|
||||
. . .. . ..
|
||||
_oOOP3OPP3Op_. .
|
||||
.PPpo~· ·· ~2p. ·· ···· · ·
|
||||
·Ppo · .pPO3Op.· · O:· · · ·
|
||||
.3Pp · oP3'· 'P33· · 4 ·· · · · ·· · · ·
|
||||
·~OP 3PO· .Op3 : · ·· _____ _____ _____
|
||||
·P3O · oP3oP3O3P' · · · · / /·/ /·/ /
|
||||
O3:· O3p~ · ·:· · ·/____/·/____/ /____/
|
||||
'P · 3p3· oP3~· ·.P:· · · ·· · · ·· · · ·
|
||||
· ': · Po' ·Opo'· .3O· . o[ by Pype Club ]]]==- - - · ·
|
||||
· '_ .. · . _OP3·· · ·https://openpype.io·· ·
|
||||
~P3·OPPPO3OP~ · ·· ·
|
||||
· ' '· · ·· · · · ·· ·
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Colors for terminal
|
||||
|
||||
RST='\033[0m' # Text Reset
|
||||
|
||||
# Regular Colors
|
||||
Black='\033[0;30m' # Black
|
||||
Red='\033[0;31m' # Red
|
||||
Green='\033[0;32m' # Green
|
||||
Yellow='\033[0;33m' # Yellow
|
||||
Blue='\033[0;34m' # Blue
|
||||
Purple='\033[0;35m' # Purple
|
||||
Cyan='\033[0;36m' # Cyan
|
||||
White='\033[0;37m' # White
|
||||
|
||||
# Bold
|
||||
BBlack='\033[1;30m' # Black
|
||||
BRed='\033[1;31m' # Red
|
||||
BGreen='\033[1;32m' # Green
|
||||
BYellow='\033[1;33m' # Yellow
|
||||
BBlue='\033[1;34m' # Blue
|
||||
BPurple='\033[1;35m' # Purple
|
||||
BCyan='\033[1;36m' # Cyan
|
||||
BWhite='\033[1;37m' # White
|
||||
|
||||
# Bold High Intensity
|
||||
BIBlack='\033[1;90m' # Black
|
||||
BIRed='\033[1;91m' # Red
|
||||
BIGreen='\033[1;92m' # Green
|
||||
BIYellow='\033[1;93m' # Yellow
|
||||
BIBlue='\033[1;94m' # Blue
|
||||
BIPurple='\033[1;95m' # Purple
|
||||
BICyan='\033[1;96m' # Cyan
|
||||
BIWhite='\033[1;97m' # White
|
||||
|
||||
|
||||
##############################################################################
|
||||
# Return absolute path
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# Path to resolve
|
||||
# Returns:
|
||||
# None
|
||||
###############################################################################
|
||||
realpath () {
|
||||
echo $(cd $(dirname "$1"); pwd)/$(basename "$1")
|
||||
}
|
||||
|
||||
# Main
|
||||
main () {
|
||||
|
||||
# Directories
|
||||
openpype_root=$(realpath $(dirname $(dirname "${BASH_SOURCE[0]}")))
|
||||
|
||||
_inside_openpype_tool="1"
|
||||
|
||||
# make sure Poetry is in PATH
|
||||
if [[ -z $POETRY_HOME ]]; then
|
||||
export POETRY_HOME="$openpype_root/.poetry"
|
||||
fi
|
||||
export PATH="$POETRY_HOME/bin:$PATH"
|
||||
|
||||
pushd "$openpype_root" > /dev/null || return > /dev/null
|
||||
|
||||
echo -e "${BIGreen}>>>${RST} Reading Poetry ... \c"
|
||||
if [ -f "$POETRY_HOME/bin/poetry" ]; then
|
||||
echo -e "${BIGreen}OK${RST}"
|
||||
else
|
||||
echo -e "${BIYellow}NOT FOUND${RST}"
|
||||
echo -e "${BIYellow}***${RST} We need to install Poetry and virtual env ..."
|
||||
. "$openpype_root/tools/create_env.sh" || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; }
|
||||
fi
|
||||
|
||||
echo -e "${BIGreen}>>>${RST} Generating zip from current sources ..."
|
||||
poetry run python "$openpype_root/start.py" projectmanager
|
||||
}
|
||||
|
||||
main
|
||||
Loading…
Add table
Add a link
Reference in a new issue