[Automated] Merged develop into main

This commit is contained in:
pypebot 2022-02-19 04:33:13 +01:00 committed by GitHub
commit 8e3887cbb8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
118 changed files with 2973 additions and 2506 deletions

View file

@ -20,7 +20,8 @@ jobs:
- uses: actions/checkout@v1
- uses: actions/setup-node@v1
with:
node-version: '12.x'
node-version: 14.x
cache: yarn
- name: Test Build
run: |
cd website
@ -41,8 +42,8 @@ jobs:
- uses: actions/setup-node@v1
with:
node-version: '12.x'
node-version: 14.x
cache: yarn
- name: 🔨 Build
run: |
cd website

View file

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.21"
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.22"
ExtensionBundleName="openpype" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList>
<Extension Id="com.openpype.AE.panel" Version="1.0" />

View file

@ -301,6 +301,15 @@ function main(websocket_url){
return get_extension_version();
});
RPC.addRoute('AfterEffects.get_app_version', function (data) {
log.warn('Server called client route "get_app_version":', data);
return runEvalScript("getAppVersion()")
.then(function(result){
log.warn("get_app_version: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.close', function (data) {
log.warn('Server called client route "close":', data);
return runEvalScript("close()");

View file

@ -438,7 +438,10 @@ function getAudioUrlForComp(comp_id){
for (i = 1; i <= item.numLayers; ++i){
var layer = item.layers[i];
if (layer instanceof AVLayer){
return layer.source.file.fsName.toString();
if (layer.hasAudio){
source_url = layer.source.file.fsName.toString()
return _prepareSingleValue(source_url);
}
}
}
@ -715,6 +718,10 @@ function close(){
app.quit();
}
function getAppVersion(){
return _prepareSingleValue(app.version);
}
function _prepareSingleValue(value){
return JSON.stringify({"result": value})
}

View file

@ -537,6 +537,13 @@ class AfterEffectsServerStub():
return self._handle_return(res)
def get_app_version(self):
"""Returns version number of installed application (17.5...)."""
res = self.websocketserver.call(self.client.call(
'AfterEffects.get_app_version'))
return self._handle_return(res)
def close(self):
res = self.websocketserver.call(self.client.call('AfterEffects.close'))

View file

@ -19,6 +19,7 @@ class CreateRender(openpype.api.Creator):
name = "renderDefault"
label = "Render on Farm"
family = "render"
defaults = ["Main"]
def process(self):
stub = get_stub() # only after After Effects is up

View file

@ -20,6 +20,7 @@ class AERenderInstance(RenderInstance):
fps = attr.ib(default=None)
projectEntity = attr.ib(default=None)
stagingDir = attr.ib(default=None)
app_version = attr.ib(default=None)
class CollectAERender(abstract_collect_render.AbstractCollectRender):
@ -41,6 +42,9 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
def get_instances(self, context):
instances = []
app_version = self.stub.get_app_version()
app_version = app_version[0:4]
current_file = context.data["currentFile"]
version = context.data["version"]
asset_entity = context.data["assetEntity"]
@ -105,7 +109,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
frameEnd=frameEnd,
frameStep=1,
toBeRenderedOn='deadline',
fps=fps
fps=fps,
app_version=app_version
)
comp = compositions_by_id.get(int(item_id))

View file

@ -131,6 +131,8 @@ def deselect_all():
class Creator(PypeCreatorMixin, avalon.api.Creator):
"""Base class for Creator plug-ins."""
defaults = ['Main']
def process(self):
collection = bpy.data.collections.new(name=self.data["subset"])
bpy.context.scene.collection.children.link(collection)

View file

@ -3,3 +3,20 @@ import os
HOST_DIR = os.path.dirname(
os.path.abspath(__file__)
)
def add_implementation_envs(env, _app):
# Add requirements to DL_PYTHON_HOOK_PATH
pype_root = os.environ["OPENPYPE_REPOS_ROOT"]
env["DL_PYTHON_HOOK_PATH"] = os.path.join(
pype_root, "openpype", "hosts", "flame", "startup")
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
# Set default values if are not already set via settings
defaults = {
"LOGLEVEL": "DEBUG"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value

View file

@ -28,7 +28,8 @@ from .lib import (
get_reformated_filename,
get_frame_from_filename,
get_padding_from_filename,
maintained_object_duplication
maintained_object_duplication,
get_clip_segment
)
from .utils import (
setup,
@ -52,7 +53,10 @@ from .menu import (
)
from .plugin import (
Creator,
PublishableClip
PublishableClip,
ClipLoader,
OpenClipSolver
)
from .workio import (
open_file,
@ -96,6 +100,7 @@ __all__ = [
"get_frame_from_filename",
"get_padding_from_filename",
"maintained_object_duplication",
"get_clip_segment",
# pipeline
"install",
@ -122,6 +127,8 @@ __all__ = [
# plugin
"Creator",
"PublishableClip",
"ClipLoader",
"OpenClipSolver",
# workio
"open_file",

View file

@ -692,3 +692,18 @@ def maintained_object_duplication(item):
finally:
# delete the item at the end
flame.delete(duplicate)
def get_clip_segment(flame_clip):
name = flame_clip.name.get_value()
version = flame_clip.versions[0]
track = version.tracks[0]
segments = track.segments
if len(segments) < 1:
raise ValueError("Clip `{}` has no segments!".format(name))
if len(segments) > 1:
raise ValueError("Clip `{}` has too many segments!".format(name))
return segments[0]

View file

@ -4,6 +4,7 @@ Basic avalon integration
import os
import contextlib
from avalon import api as avalon
from avalon.pipeline import AVALON_CONTAINER_ID
from pyblish import api as pyblish
from openpype.api import Logger
from .lib import (
@ -56,14 +57,31 @@ def uninstall():
log.info("OpenPype Flame host uninstalled ...")
def containerise(tl_segment,
def containerise(flame_clip_segment,
name,
namespace,
context,
loader=None,
data=None):
# TODO: containerise
pass
data_imprint = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": str(name),
"namespace": str(namespace),
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
}
if data:
for k, v in data.items():
data_imprint[k] = v
log.debug("_ data_imprint: {}".format(data_imprint))
set_segment_data_marker(flame_clip_segment, data_imprint)
return True
def ls():

View file

@ -1,7 +1,14 @@
import os
import re
import shutil
import sys
from avalon.vendor import qargparse
from xml.etree import ElementTree as ET
import six
from Qt import QtWidgets, QtCore
import openpype.api as openpype
from openpype import style
import avalon.api as avalon
from . import (
lib as flib,
pipeline as fpipeline,
@ -644,3 +651,274 @@ class PublishableClip:
# Publishing plugin functions
# Loader plugin functions
class ClipLoader(avalon.Loader):
"""A basic clip loader for Flame
This will implement the basic behavior for a loader to inherit from that
will containerize the reference and will implement the `remove` and
`update` logic.
"""
options = [
qargparse.Boolean(
"handles",
label="Set handles",
default=0,
help="Also set handles to clip as In/Out marks"
)
]
class OpenClipSolver:
media_script_path = "/opt/Autodesk/mio/current/dl_get_media_info"
tmp_name = "_tmp.clip"
tmp_file = None
create_new_clip = False
out_feed_nb_ticks = None
out_feed_fps = None
out_feed_drop_mode = None
log = log
def __init__(self, openclip_file_path, feed_data):
# test if media script paht exists
self._validate_media_script_path()
# new feed variables:
feed_path = feed_data["path"]
self.feed_version_name = feed_data["version"]
self.feed_colorspace = feed_data.get("colorspace")
if feed_data.get("logger"):
self.log = feed_data["logger"]
# derivate other feed variables
self.feed_basename = os.path.basename(feed_path)
self.feed_dir = os.path.dirname(feed_path)
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
if not os.path.isfile(openclip_file_path):
# openclip does not exist yet and will be created
self.tmp_file = self.out_file = openclip_file_path
self.create_new_clip = True
else:
# output a temp file
self.out_file = openclip_file_path
self.tmp_file = os.path.join(self.feed_dir, self.tmp_name)
self._clear_tmp_file()
self.log.info("Temp File: {}".format(self.tmp_file))
def make(self):
self._generate_media_info_file()
if self.create_new_clip:
# New openClip
self._create_new_open_clip()
else:
self._update_open_clip()
def _validate_media_script_path(self):
if not os.path.isfile(self.media_script_path):
raise IOError("Media Scirpt does not exist: `{}`".format(
self.media_script_path))
def _generate_media_info_file(self):
# Create cmd arguments for gettig xml file info file
cmd_args = [
self.media_script_path,
"-e", self.feed_ext,
"-o", self.tmp_file,
self.feed_dir
]
# execute creation of clip xml template data
try:
openpype.run_subprocess(cmd_args)
except TypeError:
self.log.error("Error creating self.tmp_file")
six.reraise(*sys.exc_info())
def _clear_tmp_file(self):
if os.path.isfile(self.tmp_file):
os.remove(self.tmp_file)
def _clear_handler(self, xml_object):
for handler in xml_object.findall("./handler"):
self.log.debug("Handler found")
xml_object.remove(handler)
def _create_new_open_clip(self):
self.log.info("Building new openClip")
tmp_xml = ET.parse(self.tmp_file)
tmp_xml_feeds = tmp_xml.find('tracks/track/feeds')
tmp_xml_feeds.set('currentVersion', self.feed_version_name)
for tmp_feed in tmp_xml_feeds:
tmp_feed.set('vuid', self.feed_version_name)
# add colorspace if any is set
if self.feed_colorspace:
self._add_colorspace(tmp_feed, self.feed_colorspace)
self._clear_handler(tmp_feed)
tmp_xml_versions_obj = tmp_xml.find('versions')
tmp_xml_versions_obj.set('currentVersion', self.feed_version_name)
for xml_new_version in tmp_xml_versions_obj:
xml_new_version.set('uid', self.feed_version_name)
xml_new_version.set('type', 'version')
xml_data = self._fix_xml_data(tmp_xml)
self.log.info("Adding feed version: {}".format(self.feed_basename))
self._write_result_xml_to_file(xml_data)
self.log.info("openClip Updated: {}".format(self.tmp_file))
def _update_open_clip(self):
self.log.info("Updating openClip ..")
out_xml = ET.parse(self.out_file)
tmp_xml = ET.parse(self.tmp_file)
self.log.debug(">> out_xml: {}".format(out_xml))
self.log.debug(">> tmp_xml: {}".format(tmp_xml))
# Get new feed from tmp file
tmp_xml_feed = tmp_xml.find('tracks/track/feeds/feed')
self._clear_handler(tmp_xml_feed)
self._get_time_info_from_origin(out_xml)
if self.out_feed_fps:
tmp_feed_fps_obj = tmp_xml_feed.find(
"startTimecode/rate")
tmp_feed_fps_obj.text = self.out_feed_fps
if self.out_feed_nb_ticks:
tmp_feed_nb_ticks_obj = tmp_xml_feed.find(
"startTimecode/nbTicks")
tmp_feed_nb_ticks_obj.text = self.out_feed_nb_ticks
if self.out_feed_drop_mode:
tmp_feed_drop_mode_obj = tmp_xml_feed.find(
"startTimecode/dropMode")
tmp_feed_drop_mode_obj.text = self.out_feed_drop_mode
new_path_obj = tmp_xml_feed.find(
"spans/span/path")
new_path = new_path_obj.text
feed_added = False
if not self._feed_exists(out_xml, new_path):
tmp_xml_feed.set('vuid', self.feed_version_name)
# Append new temp file feed to .clip source out xml
out_track = out_xml.find("tracks/track")
# add colorspace if any is set
if self.feed_colorspace:
self._add_colorspace(tmp_xml_feed, self.feed_colorspace)
out_feeds = out_track.find('feeds')
out_feeds.set('currentVersion', self.feed_version_name)
out_feeds.append(tmp_xml_feed)
self.log.info(
"Appending new feed: {}".format(
self.feed_version_name))
feed_added = True
if feed_added:
# Append vUID to versions
out_xml_versions_obj = out_xml.find('versions')
out_xml_versions_obj.set(
'currentVersion', self.feed_version_name)
new_version_obj = ET.Element(
"version", {"type": "version", "uid": self.feed_version_name})
out_xml_versions_obj.insert(0, new_version_obj)
xml_data = self._fix_xml_data(out_xml)
# fist create backup
self._create_openclip_backup_file(self.out_file)
self.log.info("Adding feed version: {}".format(
self.feed_version_name))
self._write_result_xml_to_file(xml_data)
self.log.info("openClip Updated: {}".format(self.out_file))
self._clear_tmp_file()
def _get_time_info_from_origin(self, xml_data):
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
self.out_feed_nb_ticks = out_feed_nb_ticks_obj.text
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
self.out_feed_fps = out_feed_fps_obj.text
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
self.out_feed_drop_mode = out_feed_drop_mode_obj.text
break
else:
continue
except Exception as msg:
self.log.warning(msg)
def _feed_exists(self, xml_data, path):
# loop all available feed paths and check if
# the path is not already in file
for src_path in xml_data.iter('path'):
if path == src_path.text:
self.log.warning(
"Not appending file as it already is in .clip file")
return True
def _fix_xml_data(self, xml_data):
xml_root = xml_data.getroot()
self._clear_handler(xml_root)
return ET.tostring(xml_root).decode('utf-8')
def _write_result_xml_to_file(self, xml_data):
with open(self.out_file, "w") as f:
f.write(xml_data)
def _create_openclip_backup_file(self, file):
bck_file = "{}.bak".format(file)
# if backup does not exist
if not os.path.isfile(bck_file):
shutil.copy2(file, bck_file)
else:
# in case it exists and is already multiplied
created = False
for _i in range(1, 99):
bck_file = "{name}.bak.{idx:0>2}".format(
name=file,
idx=_i)
# create numbered backup file
if not os.path.isfile(bck_file):
shutil.copy2(file, bck_file)
created = True
break
# in case numbered does not exists
if not created:
bck_file = "{}.bak.last".format(file)
shutil.copy2(file, bck_file)
def _add_colorspace(self, feed_obj, profile_name):
feed_storage_obj = feed_obj.find("storageFormat")
feed_clr_obj = feed_storage_obj.find("colourSpace")
if feed_clr_obj is not None:
feed_clr_obj = ET.Element(
"colourSpace", {"type": "string"})
feed_storage_obj.append(feed_clr_obj)
feed_clr_obj.text = profile_name

View file

@ -4,9 +4,13 @@ import tempfile
import contextlib
import socket
from openpype.lib import (
PreLaunchHook, get_openpype_username)
PreLaunchHook,
get_openpype_username
)
from openpype.lib.applications import (
ApplicationLaunchFailed
)
from openpype.hosts import flame as opflame
import openpype.hosts.flame.api as opfapi
import openpype
from pprint import pformat
@ -33,7 +37,25 @@ class FlamePrelaunch(PreLaunchHook):
"""Hook entry method."""
project_doc = self.data["project_doc"]
project_name = project_doc["name"]
# get image io
project_anatomy = self.data["anatomy"]
# make sure anatomy settings are having flame key
if not project_anatomy["imageio"].get("flame"):
raise ApplicationLaunchFailed((
"Anatomy project settings are missing `flame` key. "
"Please make sure you remove project overides on "
"Anatomy Image io")
)
imageio_flame = project_anatomy["imageio"]["flame"]
# get user name and host name
user_name = get_openpype_username()
user_name = user_name.replace(".", "_")
hostname = socket.gethostname() # not returning wiretap host name
self.log.debug("Collected user \"{}\"".format(user_name))
@ -41,7 +63,7 @@ class FlamePrelaunch(PreLaunchHook):
_db_p_data = project_doc["data"]
width = _db_p_data["resolutionWidth"]
height = _db_p_data["resolutionHeight"]
fps = int(_db_p_data["fps"])
fps = float(_db_p_data["fps"])
project_data = {
"Name": project_doc["name"],
@ -52,8 +74,8 @@ class FlamePrelaunch(PreLaunchHook):
"FrameHeight": int(height),
"AspectRatio": float((width / height) * _db_p_data["pixelAspect"]),
"FrameRate": "{} fps".format(fps),
"FrameDepth": "16-bit fp",
"FieldDominance": "PROGRESSIVE"
"FrameDepth": str(imageio_flame["project"]["frameDepth"]),
"FieldDominance": str(imageio_flame["project"]["fieldDominance"])
}
data_to_script = {
@ -61,10 +83,10 @@ class FlamePrelaunch(PreLaunchHook):
"host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname,
"volume_name": _env.get("FLAME_WIRETAP_VOLUME"),
"group_name": _env.get("FLAME_WIRETAP_GROUP"),
"color_policy": "ACES 1.1",
"color_policy": str(imageio_flame["project"]["colourPolicy"]),
# from project
"project_name": project_doc["name"],
"project_name": project_name,
"user_name": user_name,
"project_data": project_data
}
@ -77,8 +99,6 @@ class FlamePrelaunch(PreLaunchHook):
app_arguments = self._get_launch_arguments(data_to_script)
opfapi.setup(self.launch_context.env)
self.launch_context.launch_args.extend(app_arguments)
def _add_pythonpath(self):

View file

@ -0,0 +1,247 @@
import os
import flame
from pprint import pformat
import openpype.hosts.flame.api as opfapi
class LoadClip(opfapi.ClipLoader):
"""Load a subset to timeline as clip
Place clip to timeline on its asset origin timings collected
during conforming to project
"""
families = ["render2d", "source", "plate", "render", "review"]
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
label = "Load as clip"
order = -10
icon = "code-fork"
color = "orange"
# settings
reel_group_name = "OpenPype_Reels"
reel_name = "Loaded"
clip_name_template = "{asset}_{subset}_{representation}"
def load(self, context, name, namespace, options):
# get flame objects
fproject = flame.project.current_project
self.fpd = fproject.current_workspace.desktop
# load clip to timeline and get main variables
namespace = namespace
version = context['version']
version_data = version.get("data", {})
version_name = version.get("name", None)
colorspace = version_data.get("colorspace", None)
clip_name = self.clip_name_template.format(
**context["representation"]["context"])
# todo: settings in imageio
# convert colorspace with ocio to flame mapping
# in imageio flame section
colorspace = colorspace
# create workfile path
workfile_dir = os.environ["AVALON_WORKDIR"]
openclip_dir = os.path.join(
workfile_dir, clip_name
)
openclip_path = os.path.join(
openclip_dir, clip_name + ".clip"
)
if not os.path.exists(openclip_dir):
os.makedirs(openclip_dir)
# prepare clip data from context ad send it to openClipLoader
loading_context = {
"path": self.fname.replace("\\", "/"),
"colorspace": colorspace,
"version": "v{:0>3}".format(version_name),
"logger": self.log
}
self.log.debug(pformat(
loading_context
))
self.log.debug(openclip_path)
# make openpype clip file
opfapi.OpenClipSolver(openclip_path, loading_context).make()
# prepare Reel group in actual desktop
opc = self._get_clip(
clip_name,
openclip_path
)
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "source", "author",
"fps", "handleStart", "handleEnd"
]
# move all version data keys to tag data
data_imprint = {}
for key in add_keys:
data_imprint.update({
key: version_data.get(key, str(None))
})
# add variables related to version context
data_imprint.update({
"version": version_name,
"colorspace": colorspace,
"objectName": clip_name
})
# TODO: finish the containerisation
# opc_segment = opfapi.get_clip_segment(opc)
# return opfapi.containerise(
# opc_segment,
# name, namespace, context,
# self.__class__.__name__,
# data_imprint)
return opc
def _get_clip(self, name, clip_path):
reel = self._get_reel()
# with maintained openclip as opc
matching_clip = [cl for cl in reel.clips
if cl.name.get_value() == name]
if matching_clip:
return matching_clip.pop()
else:
created_clips = flame.import_clips(str(clip_path), reel)
return created_clips.pop()
def _get_reel(self):
matching_rgroup = [
rg for rg in self.fpd.reel_groups
if rg.name.get_value() == self.reel_group_name
]
if not matching_rgroup:
reel_group = self.fpd.create_reel_group(str(self.reel_group_name))
for _r in reel_group.reels:
if "reel" not in _r.name.get_value().lower():
continue
self.log.debug("Removing: {}".format(_r.name))
flame.delete(_r)
else:
reel_group = matching_rgroup.pop()
matching_reel = [
re for re in reel_group.reels
if re.name.get_value() == self.reel_name
]
if not matching_reel:
reel_group = reel_group.create_reel(str(self.reel_name))
else:
reel_group = matching_reel.pop()
return reel_group
def _get_segment_from_clip(self, clip):
# unwrapping segment from input clip
pass
# def switch(self, container, representation):
# self.update(container, representation)
# def update(self, container, representation):
# """ Updating previously loaded clips
# """
# # load clip to timeline and get main variables
# name = container['name']
# namespace = container['namespace']
# track_item = phiero.get_track_items(
# track_item_name=namespace)
# version = io.find_one({
# "type": "version",
# "_id": representation["parent"]
# })
# version_data = version.get("data", {})
# version_name = version.get("name", None)
# colorspace = version_data.get("colorspace", None)
# object_name = "{}_{}".format(name, namespace)
# file = api.get_representation_path(representation).replace("\\", "/")
# clip = track_item.source()
# # reconnect media to new path
# clip.reconnectMedia(file)
# # set colorspace
# if colorspace:
# clip.setSourceMediaColourTransform(colorspace)
# # add additional metadata from the version to imprint Avalon knob
# add_keys = [
# "frameStart", "frameEnd", "source", "author",
# "fps", "handleStart", "handleEnd"
# ]
# # move all version data keys to tag data
# data_imprint = {}
# for key in add_keys:
# data_imprint.update({
# key: version_data.get(key, str(None))
# })
# # add variables related to version context
# data_imprint.update({
# "representation": str(representation["_id"]),
# "version": version_name,
# "colorspace": colorspace,
# "objectName": object_name
# })
# # update color of clip regarding the version order
# self.set_item_color(track_item, version)
# return phiero.update_container(track_item, data_imprint)
# def remove(self, container):
# """ Removing previously loaded clips
# """
# # load clip to timeline and get main variables
# namespace = container['namespace']
# track_item = phiero.get_track_items(
# track_item_name=namespace)
# track = track_item.parent()
# # remove track item from track
# track.removeItem(track_item)
# @classmethod
# def multiselection(cls, track_item):
# if not cls.track:
# cls.track = track_item.parent()
# cls.sequence = cls.track.parent()
# @classmethod
# def set_item_color(cls, track_item, version):
# clip = track_item.source()
# # define version name
# version_name = version.get("name", None)
# # get all versions in list
# versions = io.find({
# "type": "version",
# "parent": version["parent"]
# }).distinct('name')
# max_version = max(versions)
# # set clip colour
# if version_name == max_version:
# clip.binItem().setColor(cls.clip_color_last)
# else:
# clip.binItem().setColor(cls.clip_color)

View file

@ -22,6 +22,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
"ext": "jpg",
"xml_preset_file": "Jpeg (8-bit).xml",
"xml_preset_dir": "",
"colorspace_out": "Output - sRGB",
"representation_add_range": False,
"representation_tags": ["thumbnail"]
},
@ -29,6 +30,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
"ext": "mov",
"xml_preset_file": "Apple iPad (1920x1080).xml",
"xml_preset_dir": "",
"colorspace_out": "Output - Rec.709",
"representation_add_range": True,
"representation_tags": [
"review",
@ -45,7 +47,6 @@ class ExtractSubsetResources(openpype.api.Extractor):
export_presets_mapping = {}
def process(self, instance):
if (
self.keep_original_representation
and "representations" not in instance.data
@ -84,6 +85,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
preset_file = preset_config["xml_preset_file"]
preset_dir = preset_config["xml_preset_dir"]
repre_tags = preset_config["representation_tags"]
color_out = preset_config["colorspace_out"]
# validate xml preset file is filled
if preset_file == "":
@ -129,17 +131,31 @@ class ExtractSubsetResources(openpype.api.Extractor):
opfapi.export_clip(
export_dir_path, duplclip, preset_path, **kwargs)
extension = preset_config["ext"]
# create representation data
representation_data = {
"name": unique_name,
"outputName": unique_name,
"ext": preset_config["ext"],
"ext": extension,
"stagingDir": export_dir_path,
"tags": repre_tags
"tags": repre_tags,
"data": {
"colorspace": color_out
}
}
# collect all available content of export dir
files = os.listdir(export_dir_path)
# make sure no nested folders inside
n_stage_dir, n_files = self._unfolds_nested_folders(
export_dir_path, files, extension)
# fix representation in case of nested folders
if n_stage_dir:
representation_data["stagingDir"] = n_stage_dir
files = n_files
# add files to represetation but add
# imagesequence as list
if (
@ -170,3 +186,63 @@ class ExtractSubsetResources(openpype.api.Extractor):
self.log.debug("All representations: {}".format(
pformat(instance.data["representations"])))
def _unfolds_nested_folders(self, stage_dir, files_list, ext):
"""Unfolds nested folders
Args:
stage_dir (str): path string with directory
files_list (list): list of file names
ext (str): extension (jpg)[without dot]
Raises:
IOError: in case no files were collected form any directory
Returns:
str, list: new staging dir path, new list of file names
or
None, None: In case single file in `files_list`
"""
# exclude single files which are having extension
# the same as input ext attr
if (
# only one file in list
len(files_list) == 1
# file is having extension as input
and ext in os.path.splitext(files_list[0])[-1]
):
return None, None
elif (
# more then one file in list
len(files_list) >= 1
# extension is correct
and ext in os.path.splitext(files_list[0])[-1]
# test file exists
and os.path.exists(
os.path.join(stage_dir, files_list[0])
)
):
return None, None
new_stage_dir = None
new_files_list = []
for file in files_list:
search_path = os.path.join(stage_dir, file)
if not os.path.isdir(search_path):
continue
for root, _dirs, files in os.walk(search_path):
for _file in files:
_fn, _ext = os.path.splitext(_file)
if ext.lower() != _ext[1:].lower():
continue
new_files_list.append(_file)
if not new_stage_dir:
new_stage_dir = root
if not new_stage_dir:
raise AssertionError(
"Files in `{}` are not correct! Check `{}`".format(
files_list, stage_dir)
)
return new_stage_dir, new_files_list

View file

@ -0,0 +1,24 @@
import pyblish
@pyblish.api.log
class ValidateSourceClip(pyblish.api.InstancePlugin):
"""Validate instance is not having empty `flameSourceClip`"""
order = pyblish.api.ValidatorOrder
label = "Validate Source Clip"
hosts = ["flame"]
families = ["clip"]
def process(self, instance):
flame_source_clip = instance.data["flameSourceClip"]
self.log.debug("_ flame_source_clip: {}".format(flame_source_clip))
if flame_source_clip is None:
raise AttributeError((
"Timeline segment `{}` is not having "
"relative clip in reels. Please make sure "
"you push `Save Sources` button in Conform Tab").format(
instance.data["asset"]
))

View file

@ -3,6 +3,10 @@ from __future__ import print_function
import os
import sys
# only testing dependency for nested modules in package
import six # noqa
SCRIPT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.join(SCRIPT_DIR, "modules")
sys.path.append(PACKAGE_DIR)

View file

@ -1,14 +1,27 @@
from .pipeline import (
install,
uninstall
uninstall,
ls,
imprint_container,
parse_container,
get_current_comp,
comp_lock_and_undo_chunk
)
from .utils import (
setup
from .workio import (
open_file,
save_file,
current_file,
has_unsaved_changes,
file_extensions,
work_root
)
from .lib import (
maintained_selection,
get_additional_data,
update_frame_range
)
@ -20,11 +33,24 @@ __all__ = [
# pipeline
"install",
"uninstall",
"ls",
# utils
"setup",
"imprint_container",
"parse_container",
"get_current_comp",
"comp_lock_and_undo_chunk",
# workio
"open_file",
"save_file",
"current_file",
"has_unsaved_changes",
"file_extensions",
"work_root",
# lib
"maintained_selection",
"get_additional_data",
"update_frame_range",

View file

@ -1,8 +1,13 @@
import os
import sys
import re
import contextlib
from Qt import QtGui
import avalon.fusion
import avalon.api
from avalon import io
from .pipeline import get_current_comp, comp_lock_and_undo_chunk
self = sys.modules[__name__]
self._project = None
@ -24,7 +29,7 @@ def update_frame_range(start, end, comp=None, set_render_range=True):
"""
if not comp:
comp = avalon.fusion.get_current_comp()
comp = get_current_comp()
attrs = {
"COMPN_GlobalStart": start,
@ -37,7 +42,7 @@ def update_frame_range(start, end, comp=None, set_render_range=True):
"COMPN_RenderEnd": end
})
with avalon.fusion.comp_lock_and_undo_chunk(comp):
with comp_lock_and_undo_chunk(comp):
comp.SetAttrs(attrs)
@ -140,3 +145,51 @@ def switch_item(container,
avalon.api.switch(container, representation)
return representation
@contextlib.contextmanager
def maintained_selection():
comp = get_current_comp()
previous_selection = comp.GetToolList(True).values()
try:
yield
finally:
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
if previous_selection:
for tool in previous_selection:
flow.Select(tool, True)
def get_frame_path(path):
"""Get filename for the Fusion Saver with padded number as '#'
>>> get_frame_path("C:/test.exr")
('C:/test', 4, '.exr')
>>> get_frame_path("filename.00.tif")
('filename.', 2, '.tif')
>>> get_frame_path("foobar35.tif")
('foobar', 2, '.tif')
Args:
path (str): The path to render to.
Returns:
tuple: head, padding, tail (extension)
"""
filename, ext = os.path.splitext(path)
# Find a final number group
match = re.match('.*?([0-9]+)$', filename)
if match:
padding = len(match.group(1))
# remove number from end since fusion
# will swap it with the frame number
filename = filename[:-padding]
else:
padding = 4 # default Fusion padding
return filename, padding, ext

View file

@ -3,6 +3,7 @@ import sys
from Qt import QtWidgets, QtCore
from openpype import style
from openpype.tools.utils import host_tools
from openpype.hosts.fusion.scripts import (
@ -58,7 +59,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
create_btn = QtWidgets.QPushButton("Create...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
inventory_btn = QtWidgets.QPushButton("Inventory...", self)
manager_btn = QtWidgets.QPushButton("Manage...", self)
libload_btn = QtWidgets.QPushButton("Library...", self)
rendermode_btn = QtWidgets.QPushButton("Set render mode...", self)
duplicate_with_inputs_btn = QtWidgets.QPushButton(
@ -75,7 +76,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
layout.addWidget(create_btn)
layout.addWidget(publish_btn)
layout.addWidget(load_btn)
layout.addWidget(inventory_btn)
layout.addWidget(manager_btn)
layout.addWidget(Spacer(15, self))
@ -96,7 +97,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
create_btn.clicked.connect(self.on_create_clicked)
publish_btn.clicked.connect(self.on_publish_clicked)
load_btn.clicked.connect(self.on_load_clicked)
inventory_btn.clicked.connect(self.on_inventory_clicked)
manager_btn.clicked.connect(self.on_manager_clicked)
libload_btn.clicked.connect(self.on_libload_clicked)
rendermode_btn.clicked.connect(self.on_rendernode_clicked)
duplicate_with_inputs_btn.clicked.connect(
@ -119,8 +120,8 @@ class OpenPypeMenu(QtWidgets.QWidget):
print("Clicked Load")
host_tools.show_loader(use_context=True)
def on_inventory_clicked(self):
print("Clicked Inventory")
def on_manager_clicked(self):
print("Clicked Manager")
host_tools.show_scene_inventory()
def on_libload_clicked(self):
@ -128,7 +129,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
host_tools.show_library_loader()
def on_rendernode_clicked(self):
from avalon import style
print("Clicked Set Render Mode")
if self.render_mode_widget is None:
window = set_rendermode.SetRenderMode()

View file

@ -2,9 +2,14 @@
Basic avalon integration
"""
import os
import sys
import logging
import contextlib
import pyblish.api
import avalon.api
from avalon.pipeline import AVALON_CONTAINER_ID
from avalon import api as avalon
from pyblish import api as pyblish
from openpype.api import Logger
import openpype.hosts.fusion
@ -19,6 +24,14 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class CompLogHandler(logging.Handler):
def emit(self, record):
entry = self.format(record)
comp = get_current_comp()
if comp:
comp.Print(entry)
def install():
"""Install fusion-specific functionality of avalon-core.
@ -30,18 +43,32 @@ def install():
See the Maya equivalent for inspiration on how to implement this.
"""
# Remove all handlers associated with the root logger object, because
# that one sometimes logs as "warnings" incorrectly.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Attach default logging handler that prints to active comp
logger = logging.getLogger()
formatter = logging.Formatter(fmt="%(message)s\n")
handler = CompLogHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
log.info("openpype.hosts.fusion installed")
pyblish.register_host("fusion")
pyblish.register_plugin_path(PUBLISH_PATH)
pyblish.api.register_host("fusion")
pyblish.api.register_plugin_path(PUBLISH_PATH)
log.info("Registering Fusion plug-ins..")
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
def uninstall():
@ -55,22 +82,23 @@ def uninstall():
modifying the menu or registered families.
"""
pyblish.deregister_host("fusion")
pyblish.deregister_plugin_path(PUBLISH_PATH)
pyblish.api.deregister_host("fusion")
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
log.info("Deregistering Fusion plug-ins..")
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
avalon.api.deregister_plugin_path(
avalon.api.InventoryAction, INVENTORY_PATH
)
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
pyblish.api.deregister_callback(
"instanceToggled", on_pyblish_instance_toggled
)
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
from avalon.fusion import comp_lock_and_undo_chunk
comp = instance.context.data.get("currentComp")
if not comp:
return
@ -90,3 +118,106 @@ def on_pyblish_instance_toggled(instance, new_value, old_value):
current = attrs["TOOLB_PassThrough"]
if current != passthrough:
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
def ls():
"""List containers from active Fusion scene
This is the host-equivalent of api.ls(), but instead of listing
assets on disk, it lists assets already loaded in Fusion; once loaded
they are called 'containers'
Yields:
dict: container
"""
comp = get_current_comp()
tools = comp.GetToolList(False, "Loader").values()
for tool in tools:
container = parse_container(tool)
if container:
yield container
def imprint_container(tool,
name,
namespace,
context,
loader=None):
"""Imprint a Loader with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
tool (object): The node in Fusion to imprint as container, usually a
Loader.
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
context (dict): Asset information
loader (str, optional): Name of loader used to produce this container.
Returns:
None
"""
data = [
("schema", "openpype:container-2.0"),
("id", AVALON_CONTAINER_ID),
("name", str(name)),
("namespace", str(namespace)),
("loader", str(loader)),
("representation", str(context["representation"]["_id"])),
]
for key, value in data:
tool.SetData("avalon.{}".format(key), value)
def parse_container(tool):
"""Returns imprinted container data of a tool
This reads the imprinted data from `imprint_container`.
"""
data = tool.GetData('avalon')
if not isinstance(data, dict):
return
# If not all required data return the empty container
required = ['schema', 'id', 'name',
'namespace', 'loader', 'representation']
if not all(key in data for key in required):
return
container = {key: data[key] for key in required}
# Store the tool's name
container["objectName"] = tool.Name
# Store reference to the tool object
container["_tool"] = tool
return container
def get_current_comp():
"""Hack to get current comp in this session"""
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion.CurrentComp if fusion else None
@contextlib.contextmanager
def comp_lock_and_undo_chunk(comp, undo_queue_name="Script CMD"):
"""Lock comp and open an undo chunk during the context"""
try:
comp.Lock()
comp.StartUndo(undo_queue_name)
yield
finally:
comp.Unlock()
comp.EndUndo()

View file

@ -1,86 +0,0 @@
#! python3
"""
Fusion tools for setting environment
"""
import os
import shutil
from openpype.api import Logger
import openpype.hosts.fusion
log = Logger().get_logger(__name__)
def _sync_utility_scripts(env=None):
""" Synchronizing basic utlility scripts for resolve.
To be able to run scripts from inside `Fusion/Workspace/Scripts` menu
all scripts has to be accessible from defined folder.
"""
if not env:
env = os.environ
# initiate inputs
scripts = {}
us_env = env.get("FUSION_UTILITY_SCRIPTS_SOURCE_DIR")
us_dir = env.get("FUSION_UTILITY_SCRIPTS_DIR", "")
us_paths = [os.path.join(
os.path.dirname(os.path.abspath(openpype.hosts.fusion.__file__)),
"utility_scripts"
)]
# collect script dirs
if us_env:
log.info(f"Utility Scripts Env: `{us_env}`")
us_paths = us_env.split(
os.pathsep) + us_paths
# collect scripts from dirs
for path in us_paths:
scripts.update({path: os.listdir(path)})
log.info(f"Utility Scripts Dir: `{us_paths}`")
log.info(f"Utility Scripts: `{scripts}`")
# make sure no script file is in folder
if next((s for s in os.listdir(us_dir)), None):
for s in os.listdir(us_dir):
path = os.path.normpath(
os.path.join(us_dir, s))
log.info(f"Removing `{path}`...")
# remove file or directory if not in our folders
if not os.path.isdir(path):
os.remove(path)
else:
shutil.rmtree(path)
# copy scripts into Resolve's utility scripts dir
for d, sl in scripts.items():
# directory and scripts list
for s in sl:
# script in script list
src = os.path.normpath(os.path.join(d, s))
dst = os.path.normpath(os.path.join(us_dir, s))
log.info(f"Copying `{src}` to `{dst}`...")
# copy file or directory from our folders to fusion's folder
if not os.path.isdir(src):
shutil.copy2(src, dst)
else:
shutil.copytree(src, dst)
def setup(env=None):
""" Wrapper installer started from pype.hooks.fusion.FusionPrelaunch()
"""
if not env:
env = os.environ
# synchronize resolve utility scripts
_sync_utility_scripts(env)
log.info("Fusion Pype wrapper has been installed")

View file

@ -0,0 +1,45 @@
"""Host API required Work Files tool"""
import sys
import os
from avalon import api
from .pipeline import get_current_comp
def file_extensions():
return api.HOST_WORKFILE_EXTENSIONS["fusion"]
def has_unsaved_changes():
comp = get_current_comp()
return comp.GetAttrs()["COMPB_Modified"]
def save_file(filepath):
comp = get_current_comp()
comp.Save(filepath)
def open_file(filepath):
# Hack to get fusion, see
# openpype.hosts.fusion.api.pipeline.get_current_comp()
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion.LoadComp(filepath)
def current_file():
comp = get_current_comp()
current_filepath = comp.GetAttrs()["COMPS_FileName"]
if not current_filepath:
return None
return current_filepath
def work_root(session):
work_dir = session["AVALON_WORKDIR"]
scene_dir = session.get("AVALON_SCENEDIR")
if scene_dir:
return os.path.join(work_dir, scene_dir)
else:
return work_dir

View file

@ -1,7 +1,8 @@
import os
import importlib
import shutil
import openpype.hosts.fusion
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
from openpype.hosts.fusion.api import utils
class FusionPrelaunch(PreLaunchHook):
@ -13,40 +14,101 @@ class FusionPrelaunch(PreLaunchHook):
def execute(self):
# making sure python 3.6 is installed at provided path
py36_dir = os.path.normpath(self.launch_context.env.get("PYTHON36", ""))
py36_dir = self.launch_context.env.get("PYTHON36")
if not py36_dir:
raise ApplicationLaunchFailed(
"Required environment variable \"PYTHON36\" is not set."
"\n\nFusion implementation requires to have"
" installed Python 3.6"
)
py36_dir = os.path.normpath(py36_dir)
if not os.path.isdir(py36_dir):
raise ApplicationLaunchFailed(
"Python 3.6 is not installed at the provided path.\n"
"Either make sure the 'environments/fusion.json' has "
"'PYTHON36' set corectly or make sure Python 3.6 is installed "
f"in the given path.\n\nPYTHON36: {py36_dir}"
"Either make sure the environments in fusion settings has"
" 'PYTHON36' set corectly or make sure Python 3.6 is installed"
f" in the given path.\n\nPYTHON36: {py36_dir}"
)
self.log.info(f"Path to Fusion Python folder: '{py36_dir}'...")
self.launch_context.env["PYTHON36"] = py36_dir
utility_dir = self.launch_context.env.get("FUSION_UTILITY_SCRIPTS_DIR")
if not utility_dir:
raise ApplicationLaunchFailed(
"Required Fusion utility script dir environment variable"
" \"FUSION_UTILITY_SCRIPTS_DIR\" is not set."
)
# setting utility scripts dir for scripts syncing
us_dir = os.path.normpath(
self.launch_context.env.get("FUSION_UTILITY_SCRIPTS_DIR", "")
)
if not os.path.isdir(us_dir):
utility_dir = os.path.normpath(utility_dir)
if not os.path.isdir(utility_dir):
raise ApplicationLaunchFailed(
"Fusion utility script dir does not exist. Either make sure "
"the 'environments/fusion.json' has "
"'FUSION_UTILITY_SCRIPTS_DIR' set correctly or reinstall "
f"Fusion.\n\nFUSION_UTILITY_SCRIPTS_DIR: '{us_dir}'"
"the environments in fusion settings has"
" 'FUSION_UTILITY_SCRIPTS_DIR' set correctly or reinstall "
f"Fusion.\n\nFUSION_UTILITY_SCRIPTS_DIR: '{utility_dir}'"
)
try:
__import__("avalon.fusion")
__import__("pyblish")
self._sync_utility_scripts(self.launch_context.env)
self.log.info("Fusion Pype wrapper has been installed")
except ImportError:
self.log.warning(
"pyblish: Could not load Fusion integration.",
exc_info=True
)
def _sync_utility_scripts(self, env):
""" Synchronizing basic utlility scripts for resolve.
else:
# Resolve Setup integration
importlib.reload(utils)
utils.setup(self.launch_context.env)
To be able to run scripts from inside `Fusion/Workspace/Scripts` menu
all scripts has to be accessible from defined folder.
"""
if not env:
env = {k: v for k, v in os.environ.items()}
# initiate inputs
scripts = {}
us_env = env.get("FUSION_UTILITY_SCRIPTS_SOURCE_DIR")
us_dir = env.get("FUSION_UTILITY_SCRIPTS_DIR", "")
us_paths = [os.path.join(
os.path.dirname(os.path.abspath(openpype.hosts.fusion.__file__)),
"utility_scripts"
)]
# collect script dirs
if us_env:
self.log.info(f"Utility Scripts Env: `{us_env}`")
us_paths = us_env.split(
os.pathsep) + us_paths
# collect scripts from dirs
for path in us_paths:
scripts.update({path: os.listdir(path)})
self.log.info(f"Utility Scripts Dir: `{us_paths}`")
self.log.info(f"Utility Scripts: `{scripts}`")
# make sure no script file is in folder
if next((s for s in os.listdir(us_dir)), None):
for s in os.listdir(us_dir):
path = os.path.normpath(
os.path.join(us_dir, s))
self.log.info(f"Removing `{path}`...")
# remove file or directory if not in our folders
if not os.path.isdir(path):
os.remove(path)
else:
shutil.rmtree(path)
# copy scripts into Resolve's utility scripts dir
for d, sl in scripts.items():
# directory and scripts list
for s in sl:
# script in script list
src = os.path.normpath(os.path.join(d, s))
dst = os.path.normpath(os.path.join(us_dir, s))
self.log.info(f"Copying `{src}` to `{dst}`...")
# copy file or directory from our folders to fusion's folder
if not os.path.isdir(src):
shutil.copy2(src, dst)
else:
shutil.copytree(src, dst)

View file

@ -1,7 +1,10 @@
import os
import openpype.api
from avalon import fusion
from openpype.hosts.fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
class CreateOpenEXRSaver(openpype.api.Creator):
@ -10,12 +13,13 @@ class CreateOpenEXRSaver(openpype.api.Creator):
label = "Create OpenEXR Saver"
hosts = ["fusion"]
family = "render"
defaults = ["Main"]
def process(self):
file_format = "OpenEXRFormat"
comp = fusion.get_current_comp()
comp = get_current_comp()
# todo: improve method of getting current environment
# todo: pref avalon.Session over os.environ
@ -25,7 +29,7 @@ class CreateOpenEXRSaver(openpype.api.Creator):
filename = "{}..tiff".format(self.name)
filepath = os.path.join(workdir, "render", filename)
with fusion.comp_lock_and_undo_chunk(comp):
with comp_lock_and_undo_chunk(comp):
args = (-32768, -32768) # Magical position numbers
saver = comp.AddTool("Saver", *args)
saver.SetAttrs({"TOOLS_Name": self.name})

View file

@ -8,15 +8,17 @@ class FusionSelectContainers(api.InventoryAction):
color = "#d8d8d8"
def process(self, containers):
import avalon.fusion
from openpype.hosts.fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
tools = [i["_tool"] for i in containers]
comp = avalon.fusion.get_current_comp()
comp = get_current_comp()
flow = comp.CurrentFrame.FlowView
with avalon.fusion.comp_lock_and_undo_chunk(comp, self.label):
with comp_lock_and_undo_chunk(comp, self.label):
# Clear selection
flow.Select()

View file

@ -1,7 +1,11 @@
from avalon import api, style
from avalon import api
from Qt import QtGui, QtWidgets
import avalon.fusion
from openpype import style
from openpype.hosts.fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
class FusionSetToolColor(api.InventoryAction):
@ -16,7 +20,7 @@ class FusionSetToolColor(api.InventoryAction):
"""Color all selected tools the selected colors"""
result = []
comp = avalon.fusion.get_current_comp()
comp = get_current_comp()
# Get tool color
first = containers[0]
@ -33,7 +37,7 @@ class FusionSetToolColor(api.InventoryAction):
if not picked_color:
return
with avalon.fusion.comp_lock_and_undo_chunk(comp):
with comp_lock_and_undo_chunk(comp):
for container in containers:
# Convert color to RGB 0-1 floats
rgb_f = picked_color.getRgbF()

View file

@ -12,7 +12,8 @@ class FusionSetFrameRangeLoader(api.Loader):
"camera",
"imagesequence",
"yeticache",
"pointcache"]
"pointcache",
"render"]
representations = ["*"]
label = "Set frame range"
@ -45,7 +46,8 @@ class FusionSetFrameRangeWithHandlesLoader(api.Loader):
"camera",
"imagesequence",
"yeticache",
"pointcache"]
"pointcache",
"render"]
representations = ["*"]
label = "Set frame range (with handles)"

View file

@ -1,12 +1,15 @@
import os
import contextlib
from avalon import api
import avalon.io as io
from avalon import api, io
from avalon import fusion
from openpype.hosts.fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
comp = fusion.get_current_comp()
comp = get_current_comp()
@contextlib.contextmanager
@ -117,7 +120,7 @@ def loader_shift(loader, frame, relative=True):
class FusionLoadSequence(api.Loader):
"""Load image sequence into Fusion"""
families = ["imagesequence", "review"]
families = ["imagesequence", "review", "render"]
representations = ["*"]
label = "Load sequence"
@ -126,13 +129,6 @@ class FusionLoadSequence(api.Loader):
color = "orange"
def load(self, context, name, namespace, data):
from avalon.fusion import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
@ -204,13 +200,11 @@ class FusionLoadSequence(api.Loader):
"""
from avalon.fusion import comp_lock_and_undo_chunk
tool = container["_tool"]
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
root = api.get_representation_path(representation)
root = os.path.dirname(api.get_representation_path(representation))
path = self._get_first_image(root)
# Get start frame from version data
@ -247,9 +241,6 @@ class FusionLoadSequence(api.Loader):
tool.SetData("avalon.representation", str(representation["_id"]))
def remove(self, container):
from avalon.fusion import comp_lock_and_undo_chunk
tool = container["_tool"]
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()

View file

@ -2,7 +2,7 @@ import os
import pyblish.api
from avalon import fusion
from openpype.hosts.fusion.api import get_current_comp
class CollectCurrentCompFusion(pyblish.api.ContextPlugin):
@ -15,7 +15,7 @@ class CollectCurrentCompFusion(pyblish.api.ContextPlugin):
def process(self, context):
"""Collect all image sequence tools"""
current_comp = fusion.get_current_comp()
current_comp = get_current_comp()
assert current_comp, "Must have active Fusion composition"
context.data["currentComp"] = current_comp

View file

@ -34,7 +34,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
def process(self, context):
"""Collect all image sequence tools"""
from avalon.fusion.lib import get_frame_path
from openpype.hosts.fusion.api.lib import get_frame_path
comp = context.data["currentComp"]

View file

@ -1,9 +1,9 @@
import os
import pyblish.api
import avalon.fusion as fusion
from pprint import pformat
import pyblish.api
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
class Fusionlocal(pyblish.api.InstancePlugin):
"""Render the current Fusion composition locally.
@ -39,7 +39,7 @@ class Fusionlocal(pyblish.api.InstancePlugin):
self.log.info("Start frame: {}".format(frame_start))
self.log.info("End frame: {}".format(frame_end))
with fusion.comp_lock_and_undo_chunk(current_comp):
with comp_lock_and_undo_chunk(current_comp):
result = current_comp.Render()
if "representations" not in instance.data:

View file

@ -2,8 +2,9 @@ import os
import json
import getpass
import requests
from avalon import api
from avalon.vendor import requests
import pyblish.api
@ -30,7 +31,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
else:
context.data[key] = True
from avalon.fusion.lib import get_frame_path
from openpype.hosts.fusion.api.lib import get_frame_path
deadline_url = (
context.data["system_settings"]

View file

@ -1,4 +1,7 @@
from avalon import fusion
from openpype.hosts.fusion.api import (
comp_lock_and_undo_chunk,
get_current_comp
)
def is_connected(input):
@ -9,12 +12,12 @@ def is_connected(input):
def duplicate_with_input_connections():
"""Duplicate selected tools with incoming connections."""
comp = fusion.get_current_comp()
comp = get_current_comp()
original_tools = comp.GetToolList(True).values()
if not original_tools:
return # nothing selected
with fusion.comp_lock_and_undo_chunk(
with comp_lock_and_undo_chunk(
comp, "Duplicate With Input Connections"):
# Generate duplicates

View file

@ -4,12 +4,12 @@ import sys
import logging
# Pipeline imports
from avalon import api, io, pipeline
import avalon.fusion
import avalon.api
from avalon import io, pipeline
# Config imports
import openpype.lib as pype
import openpype.hosts.fusion.api.lib as fusion_lib
from openpype.lib import version_up
from openpype.hosts.fusion import api
from openpype.hosts.fusion.api import lib
log = logging.getLogger("Update Slap Comp")
@ -87,7 +87,7 @@ def _format_filepath(session):
# Create new unique filepath
if os.path.exists(new_filepath):
new_filepath = pype.version_up(new_filepath)
new_filepath = version_up(new_filepath)
return new_filepath
@ -119,7 +119,7 @@ def _update_savers(comp, session):
comp.Print("New renders to: %s\n" % renders)
with avalon.fusion.comp_lock_and_undo_chunk(comp):
with api.comp_lock_and_undo_chunk(comp):
savers = comp.GetToolList(False, "Saver").values()
for saver in savers:
filepath = saver.GetAttrs("TOOLST_Clip_Name")[1.0]
@ -185,7 +185,7 @@ def update_frame_range(comp, representations):
start = min(v["data"]["frameStart"] for v in versions)
end = max(v["data"]["frameEnd"] for v in versions)
fusion_lib.update_frame_range(start, end, comp=comp)
lib.update_frame_range(start, end, comp=comp)
def switch(asset_name, filepath=None, new=True):
@ -215,11 +215,11 @@ def switch(asset_name, filepath=None, new=True):
# Get current project
self._project = io.find_one({"type": "project",
"name": api.Session["AVALON_PROJECT"]})
"name": avalon.api.Session["AVALON_PROJECT"]})
# Go to comp
if not filepath:
current_comp = avalon.fusion.get_current_comp()
current_comp = api.get_current_comp()
assert current_comp is not None, "Could not find current comp"
else:
fusion = _get_fusion_instance()
@ -227,14 +227,14 @@ def switch(asset_name, filepath=None, new=True):
assert current_comp is not None, (
"Fusion could not load '{}'").format(filepath)
host = api.registered_host()
host = avalon.api.registered_host()
containers = list(host.ls())
assert containers, "Nothing to update"
representations = []
for container in containers:
try:
representation = fusion_lib.switch_item(
representation = lib.switch_item(
container,
asset_name=asset_name)
representations.append(representation)
@ -246,7 +246,7 @@ def switch(asset_name, filepath=None, new=True):
current_comp.Print(message)
# Build the session to switch to
switch_to_session = api.Session.copy()
switch_to_session = avalon.api.Session.copy()
switch_to_session["AVALON_ASSET"] = asset['name']
if new:
@ -255,7 +255,7 @@ def switch(asset_name, filepath=None, new=True):
# Update savers output based on new session
_update_savers(current_comp, switch_to_session)
else:
comp_path = pype.version_up(filepath)
comp_path = version_up(filepath)
current_comp.Print(comp_path)
@ -288,7 +288,7 @@ if __name__ == '__main__':
args, unknown = parser.parse_args()
api.install(avalon.fusion)
avalon.api.install(api)
switch(args.asset_name, args.file_path)
sys.exit(0)

View file

@ -1,6 +1,6 @@
from Qt import QtWidgets
from avalon.vendor import qtawesome
import avalon.fusion as avalon
from openpype.hosts.fusion.api import get_current_comp
_help = {"local": "Render the comp on your own machine and publish "
@ -14,7 +14,7 @@ class SetRenderMode(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self._comp = avalon.get_current_comp()
self._comp = get_current_comp()
self._comp_name = self._get_comp_name()
self.setWindowTitle("Set Render Mode")
@ -79,7 +79,7 @@ class SetRenderMode(QtWidgets.QWidget):
def update(self):
"""Update all information in the UI"""
self._comp = avalon.get_current_comp()
self._comp = get_current_comp()
self._comp_name = self._get_comp_name()
self.comp_information.setText(self._comp_name)

View file

@ -1,10 +1,11 @@
from avalon.fusion import comp_lock_and_undo_chunk
from avalon import fusion
comp = fusion.get_current_comp()
from openpype.hosts.fusion.api import (
comp_lock_and_undo_chunk,
get_current_comp
)
def main():
comp = get_current_comp()
"""Set all selected backgrounds to 32 bit"""
with comp_lock_and_undo_chunk(comp, 'Selected Backgrounds to 32bit'):
tools = comp.GetToolList(True, "Background").values()

View file

@ -1,9 +1,11 @@
from avalon.fusion import comp_lock_and_undo_chunk
from avalon import fusion
comp = fusion.get_current_comp()
from openpype.hosts.fusion.api import (
comp_lock_and_undo_chunk,
get_current_comp
)
def main():
comp = get_current_comp()
"""Set all backgrounds to 32 bit"""
with comp_lock_and_undo_chunk(comp, 'Backgrounds to 32bit'):
tools = comp.GetToolList(False, "Background").values()

View file

@ -1,9 +1,11 @@
from avalon.fusion import comp_lock_and_undo_chunk
from avalon import fusion
comp = fusion.get_current_comp()
from openpype.hosts.fusion.api import (
comp_lock_and_undo_chunk,
get_current_comp
)
def main():
comp = get_current_comp()
"""Set all selected loaders to 32 bit"""
with comp_lock_and_undo_chunk(comp, 'Selected Loaders to 32bit'):
tools = comp.GetToolList(True, "Loader").values()

View file

@ -1,9 +1,11 @@
from avalon.fusion import comp_lock_and_undo_chunk
from avalon import fusion
comp = fusion.get_current_comp()
from openpype.hosts.fusion.api import (
comp_lock_and_undo_chunk,
get_current_comp
)
def main():
comp = get_current_comp()
"""Set all loaders to 32 bit"""
with comp_lock_and_undo_chunk(comp, 'Loaders to 32bit'):
tools = comp.GetToolList(False, "Loader").values()

View file

@ -8,13 +8,15 @@ log = Logger().get_logger(__name__)
def main(env):
import avalon.api
from openpype.hosts.fusion import api
from openpype.hosts.fusion.api import menu
import avalon.fusion
# Registers pype's Global pyblish plugins
openpype.install()
# activate resolve from pype
avalon.api.install(avalon.fusion)
avalon.api.install(api)
log.info(f"Avalon registered hosts: {avalon.api.registered_host()}")

View file

@ -4,13 +4,12 @@ import logging
from Qt import QtWidgets, QtCore
import avalon.io as io
import avalon.api as api
import avalon.pipeline as pipeline
import avalon.fusion
import avalon.style as style
import avalon.api
from avalon import io, pipeline
from avalon.vendor import qtawesome as qta
from openpype import style
from openpype.hosts.fusion import api
log = logging.getLogger("Fusion Switch Shot")
@ -150,7 +149,7 @@ class App(QtWidgets.QWidget):
if not self._use_current.isChecked():
file_name = self._comps.itemData(self._comps.currentIndex())
else:
comp = avalon.fusion.get_current_comp()
comp = api.get_current_comp()
file_name = comp.GetAttrs("COMPS_FileName")
asset = self._assets.currentText()
@ -161,11 +160,11 @@ class App(QtWidgets.QWidget):
def _get_context_directory(self):
project = io.find_one({"type": "project",
"name": api.Session["AVALON_PROJECT"]},
"name": avalon.api.Session["AVALON_PROJECT"]},
projection={"config": True})
template = project["config"]["template"]["work"]
dir = pipeline._format_work_template(template, api.Session)
dir = pipeline._format_work_template(template, avalon.api.Session)
return dir
@ -174,7 +173,7 @@ class App(QtWidgets.QWidget):
return items
def collect_assets(self):
return list(io.find({"type": "asset", "silo": "film"}))
return list(io.find({"type": "asset"}, {"name": True}))
def populate_comp_box(self, files):
"""Ensure we display the filename only but the path is stored as well
@ -193,7 +192,7 @@ class App(QtWidgets.QWidget):
if __name__ == '__main__':
import sys
api.install(avalon.fusion)
avalon.api.install(api)
app = QtWidgets.QApplication(sys.argv)
window = App()

View file

@ -5,12 +5,15 @@ Warning:
settings of the Loader. So use this at your own risk.
"""
from avalon import fusion
from openpype.hosts.fusion.api.pipeline import (
get_current_comp,
comp_lock_and_undo_chunk
)
def update_loader_ranges():
comp = fusion.get_current_comp()
with fusion.comp_lock_and_undo_chunk(comp, "Reload clip time ranges"):
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Reload clip time ranges"):
tools = comp.GetToolList(True, "Loader").values()
for tool in tools:

View file

@ -12,6 +12,7 @@ class Creator(PypeCreatorMixin, avalon.api.Creator):
created node.
"""
defaults = ["Main"]
node_type = "COMPOSITE"
def setup_node(self, node):

View file

@ -406,31 +406,46 @@ def imprint(node, data):
node.setParmTemplateGroup(parm_group)
def lsattr(attr, value=None):
def lsattr(attr, value=None, root="/"):
"""Return nodes that have `attr`
When `value` is not None it will only return nodes matching that value
for the given attribute.
Args:
attr (str): Name of the attribute (hou.Parm)
value (object, Optional): The value to compare the attribute too.
When the default None is provided the value check is skipped.
root (str): The root path in Houdini to search in.
Returns:
list: Matching nodes that have attribute with value.
"""
if value is None:
nodes = list(hou.node("/obj").allNodes())
# Use allSubChildren() as allNodes() errors on nodes without
# permission to enter without a means to continue of querying
# the rest
nodes = hou.node(root).allSubChildren()
return [n for n in nodes if n.parm(attr)]
return lsattrs({attr: value})
def lsattrs(attrs):
def lsattrs(attrs, root="/"):
"""Return nodes matching `key` and `value`
Arguments:
attrs (dict): collection of attribute: value
root (str): The root path in Houdini to search in.
Example:
>> lsattrs({"id": "myId"})
["myNode"]
>> lsattr("id")
["myNode", "myOtherNode"]
Returns:
list
list: Matching nodes that have attribute with value.
"""
matches = set()
nodes = list(hou.node("/obj").allNodes()) # returns generator object
# Use allSubChildren() as allNodes() errors on nodes without
# permission to enter without a means to continue of querying
# the rest
nodes = hou.node(root).allSubChildren()
for node in nodes:
for attr in attrs:
if not node.parm(attr):

View file

@ -30,6 +30,7 @@ class Creator(PypeCreatorMixin, avalon.api.Creator):
the node.
"""
defaults = ['Main']
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)

View file

@ -6,10 +6,7 @@ class USDSublayerLoader(api.Loader):
"""Sublayer USD file in Solaris"""
families = [
"colorbleed.usd",
"colorbleed.pointcache",
"colorbleed.animation",
"colorbleed.camera",
"usd",
"usdCamera",
]
label = "Sublayer USD"

View file

@ -6,10 +6,7 @@ class USDReferenceLoader(api.Loader):
"""Reference USD file in Solaris"""
families = [
"colorbleed.usd",
"colorbleed.pointcache",
"colorbleed.animation",
"colorbleed.camera",
"usd",
"usdCamera",
]
label = "Reference USD"

View file

@ -48,7 +48,7 @@ class CollectUsdLayers(pyblish.api.InstancePlugin):
label = "{0} -> {1}".format(instance.data["name"], name)
layer_inst = context.create_instance(name)
family = "colorbleed.usdlayer"
family = "usdlayer"
layer_inst.data["family"] = family
layer_inst.data["families"] = [family]
layer_inst.data["subset"] = "__stub__"

View file

@ -1820,6 +1820,40 @@ def apply_attributes(attributes, nodes_by_id):
set_attribute(attr, value, node)
def get_container_members(container):
"""Returns the members of a container.
This includes the nodes from any loaded references in the container.
"""
if isinstance(container, dict):
# Assume it's a container dictionary
container = container["objectName"]
members = cmds.sets(container, query=True) or []
members = cmds.ls(members, long=True, objectsOnly=True) or []
members = set(members)
# Include any referenced nodes from any reference in the container
# This is required since we've removed adding ALL nodes of a reference
# into the container set and only add the reference node now.
for ref in cmds.ls(members, exactType="reference", objectsOnly=True):
# Ignore any `:sharedReferenceNode`
if ref.rsplit(":", 1)[-1].startswith("sharedReferenceNode"):
continue
# Ignore _UNKNOWN_REF_NODE_ (PLN-160)
if ref.rsplit(":", 1)[-1].startswith("_UNKNOWN_REF_NODE_"):
continue
reference_members = cmds.referenceQuery(ref, nodes=True)
reference_members = cmds.ls(reference_members,
long=True,
objectsOnly=True)
members.update(reference_members)
return members
# region LOOKDEV
def list_looks(asset_id):
"""Return all look subsets for the given asset
@ -1882,7 +1916,7 @@ def assign_look_by_version(nodes, version_id):
container_node = pipeline.load(Loader, look_representation)
# Get container members
shader_nodes = cmds.sets(container_node, query=True)
shader_nodes = get_container_members(container_node)
# Load relationships
shader_relation = api.get_representation_path(json_representation)
@ -2108,7 +2142,7 @@ def get_container_transforms(container, members=None, root=False):
"""
if not members:
members = cmds.sets(container["objectName"], query=True)
members = get_container_members(container)
results = cmds.ls(members, type="transform", long=True)
if root:

View file

@ -78,6 +78,8 @@ def get_reference_node_parents(ref):
class Creator(PypeCreatorMixin, api.Creator):
defaults = ['Main']
def process(self):
nodes = list()
@ -164,24 +166,15 @@ class ReferenceLoader(Loader):
nodes = self[:]
if not nodes:
return
# FIXME: there is probably better way to do this for looks.
if "look" in self.families:
loaded_containers.append(containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__
))
else:
ref_node = get_reference_node(nodes, self.log)
loaded_containers.append(containerise(
name=name,
namespace=namespace,
nodes=[ref_node],
context=context,
loader=self.__class__.__name__
))
ref_node = get_reference_node(nodes, self.log)
loaded_containers.append(containerise(
name=name,
namespace=namespace,
nodes=[ref_node],
context=context,
loader=self.__class__.__name__
))
c += 1
namespace = None
@ -191,17 +184,18 @@ class ReferenceLoader(Loader):
"""To be implemented by subclass"""
raise NotImplementedError("Must be implemented by subclass")
def update(self, container, representation):
from maya import cmds
from openpype.hosts.maya.api.lib import get_container_members
node = container["objectName"]
path = api.get_representation_path(representation)
# Get reference node from container members
members = cmds.sets(node, query=True, nodesOnly=True)
members = get_container_members(node)
reference_node = get_reference_node(members, self.log)
namespace = cmds.referenceQuery(reference_node, namespace=True)
file_type = {
"ma": "mayaAscii",
@ -219,18 +213,14 @@ class ReferenceLoader(Loader):
alembic_data = {}
if representation["name"] == "abc":
alembic_nodes = cmds.ls(
"{}:*".format(members[0].split(":")[0]), type="AlembicNode"
"{}:*".format(namespace), type="AlembicNode"
)
if alembic_nodes:
for attr in alembic_attrs:
node_attr = "{}.{}".format(alembic_nodes[0], attr)
alembic_data[attr] = cmds.getAttr(node_attr)
else:
cmds.warning(
"No alembic nodes found in {}".format(
cmds.ls("{}:*".format(members[0].split(":")[0]))
)
)
self.log.debug("No alembic nodes found in {}".format(members))
try:
content = cmds.file(path,
@ -254,9 +244,9 @@ class ReferenceLoader(Loader):
self.log.warning("Ignoring file read error:\n%s", exc)
# Reapply alembic settings.
if representation["name"] == "abc":
if representation["name"] == "abc" and alembic_data:
alembic_nodes = cmds.ls(
"{}:*".format(members[0].split(":")[0]), type="AlembicNode"
"{}:*".format(namespace), type="AlembicNode"
)
if alembic_nodes:
for attr, value in alembic_data.items():

View file

@ -11,7 +11,6 @@ class CreateAnimation(plugin.Creator):
label = "Animation"
family = "animation"
icon = "male"
defaults = ['Main']
def __init__(self, *args, **kwargs):
super(CreateAnimation, self).__init__(*args, **kwargs)

View file

@ -15,7 +15,6 @@ class CreateAss(plugin.Creator):
label = "Ass StandIn"
family = "ass"
icon = "cube"
defaults = ['Main']
def __init__(self, *args, **kwargs):
super(CreateAss, self).__init__(*args, **kwargs)

View file

@ -8,4 +8,3 @@ class CreateAssembly(plugin.Creator):
label = "Assembly"
family = "assembly"
icon = "cubes"
defaults = ['Main']

View file

@ -11,7 +11,6 @@ class CreateCamera(plugin.Creator):
label = "Camera"
family = "camera"
icon = "video-camera"
defaults = ['Main']
def __init__(self, *args, **kwargs):
super(CreateCamera, self).__init__(*args, **kwargs)
@ -33,4 +32,3 @@ class CreateCameraRig(plugin.Creator):
label = "Camera Rig"
family = "camerarig"
icon = "video-camera"
defaults = ['Main']

View file

@ -8,4 +8,3 @@ class CreateLayout(plugin.Creator):
label = "Layout"
family = "layout"
icon = "cubes"
defaults = ["Main"]

View file

@ -11,7 +11,6 @@ class CreateLook(plugin.Creator):
label = "Look"
family = "look"
icon = "paint-brush"
defaults = ['Main']
make_tx = True
def __init__(self, *args, **kwargs):

View file

@ -8,4 +8,3 @@ class CreateMayaScene(plugin.Creator):
label = "Maya Scene"
family = "mayaScene"
icon = "file-archive-o"
defaults = ['Main']

View file

@ -11,7 +11,6 @@ class CreatePointCache(plugin.Creator):
label = "Point Cache"
family = "pointcache"
icon = "gears"
defaults = ['Main']
def __init__(self, *args, **kwargs):
super(CreatePointCache, self).__init__(*args, **kwargs)

View file

@ -64,7 +64,6 @@ class CreateRender(plugin.Creator):
label = "Render"
family = "rendering"
icon = "eye"
defaults = ["Main"]
_token = None
_user = None

View file

@ -12,7 +12,6 @@ class CreateReview(plugin.Creator):
label = "Review"
family = "review"
icon = "video-camera"
defaults = ['Main']
keepImages = False
isolate = False
imagePlane = True

View file

@ -13,7 +13,6 @@ class CreateRig(plugin.Creator):
label = "Rig"
family = "rig"
icon = "wheelchair"
defaults = ['Main']
def process(self):

View file

@ -8,4 +8,3 @@ class CreateXgen(plugin.Creator):
label = "Xgen Interactive"
family = "xgen"
icon = "pagelines"
defaults = ['Main']

View file

@ -13,7 +13,6 @@ class CreateYetiCache(plugin.Creator):
label = "Yeti Cache"
family = "yeticache"
icon = "pagelines"
defaults = ["Main"]
def __init__(self, *args, **kwargs):
super(CreateYetiCache, self).__init__(*args, **kwargs)

View file

@ -12,7 +12,6 @@ class CreateYetiRig(plugin.Creator):
label = "Yeti Rig"
family = "yetiRig"
icon = "usb"
defaults = ["Main"]
def process(self):

View file

@ -25,18 +25,6 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
color = "orange"
def process_reference(self, context, name, namespace, options):
"""
Load and try to assign Lookdev to nodes based on relationship data.
Args:
name:
namespace:
context:
options:
Returns:
"""
import maya.cmds as cmds
with lib.maintained_selection():
@ -66,36 +54,17 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
Returns:
None
"""
import os
from maya import cmds
node = container["objectName"]
path = api.get_representation_path(representation)
# Get reference node from container members
members = cmds.sets(node, query=True, nodesOnly=True)
members = lib.get_container_members(container)
reference_node = get_reference_node(members, log=self.log)
shader_nodes = cmds.ls(members, type='shadingEngine')
orig_nodes = set(self._get_nodes_with_shader(shader_nodes))
file_type = {
"ma": "mayaAscii",
"mb": "mayaBinary",
"abc": "Alembic"
}.get(representation["name"])
assert file_type, "Unsupported representation: %s" % representation
assert os.path.exists(path), "%s does not exist." % path
self._load_reference(file_type, node, path, reference_node)
# Remove any placeHolderList attribute entries from the set that
# are remaining from nodes being removed from the referenced file.
members = cmds.sets(node, query=True)
invalid = [x for x in members if ".placeHolderList" in x]
if invalid:
cmds.sets(invalid, remove=node)
# Trigger the regular reference update on the ReferenceLoader
super(LookLoader, self).update(container, representation)
# get new applied shaders and nodes from new version
shader_nodes = cmds.ls(members, type='shadingEngine')
@ -112,30 +81,12 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
with open(shader_relation, "r") as f:
json_data = json.load(f)
for rel, data in json_data["relationships"].items():
# process only non-shading nodes
current_node = "{}:{}".format(container["namespace"], rel)
if current_node in shader_nodes:
continue
print("processing {}".format(rel))
current_members = set(cmds.ls(
cmds.sets(current_node, query=True) or [], long=True))
new_members = {"{}".format(
m["name"]) for m in data["members"] or []}
dif = new_members.difference(current_members)
# add to set
cmds.sets(
dif, forceElement="{}:{}".format(container["namespace"], rel))
# update of reference could result in failed edits - material is not
# present because of renaming etc.
# present because of renaming etc. If so highlight failed edits to user
failed_edits = cmds.referenceQuery(reference_node,
editStrings=True,
failedEdits=True,
successfulEdits=False)
# highlight failed edits to user
if failed_edits:
# clean references - removes failed reference edits
cmds.file(cr=reference_node) # cleanReference
@ -161,11 +112,6 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
nodes_by_id[lib.get_id(n)].append(n)
lib.apply_attributes(attributes, nodes_by_id)
# Update metadata
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
def _get_nodes_with_shader(self, shader_nodes):
"""
Returns list of nodes belonging to specific shaders
@ -175,7 +121,6 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
<list> node names
"""
import maya.cmds as cmds
# Get container members
nodes_list = []
for shader in shader_nodes:
@ -186,45 +131,3 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
nodes_list.extend(cmds.listRelatives(connection,
shapes=True))
return nodes_list
def _load_reference(self, file_type, node, path, reference_node):
"""
Load reference from 'path' on 'reference_node'. Used when change
of look (version/update) is triggered.
Args:
file_type: extension of referenced file
node:
path: (string) location of referenced file
reference_node: (string) - name of node that should be applied
on
Returns:
None
"""
import maya.cmds as cmds
try:
content = cmds.file(path,
loadReference=reference_node,
type=file_type,
returnNewNodes=True)
except RuntimeError as exc:
# When changing a reference to a file that has load errors the
# command will raise an error even if the file is still loaded
# correctly (e.g. when raising errors on Arnold attributes)
# When the file is loaded and has content, we consider it's fine.
if not cmds.referenceQuery(reference_node, isLoaded=True):
raise
content = cmds.referenceQuery(reference_node,
nodes=True,
dagPath=True)
if not content:
raise
self.log.warning("Ignoring file read error:\n%s", exc)
# Fix PLN-40 for older containers created with Avalon that had the
# `.verticesOnlySet` set to True.
if cmds.getAttr("{}.verticesOnlySet".format(node)):
self.log.info("Setting %s.verticesOnlySet to False", node)
cmds.setAttr("{}.verticesOnlySet".format(node), False)
# Add new nodes of the reference to the container
cmds.sets(content, forceElement=node)

View file

@ -7,9 +7,9 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
"""
label = "Save current file"
order = pyblish.api.IntegratorOrder - 0.49
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["maya"]
families = ["renderlayer"]
families = ["renderlayer", "workfile"]
def process(self, context):
import maya.cmds as cmds
@ -17,5 +17,11 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
current = cmds.file(query=True, sceneName=True)
assert context.data['currentFile'] == current
# If file has no modifications, skip forcing a file save
if not cmds.file(query=True, modified=True):
self.log.debug("Skipping file save as there "
"are no modifications..")
return
self.log.info("Saving current file..")
cmds.file(save=True, force=True)

View file

@ -4,7 +4,7 @@ import openpype.api
class Creator(openpype.api.Creator):
"""This serves as skeleton for future OpenPype specific functionality"""
pass
defaults = ['Main']
class Loader(api.Loader):

View file

@ -51,12 +51,6 @@ def version_up(filepath):
padding=padding)
new_label = label.replace(version, new_version, 1)
new_basename = _rreplace(basename, label, new_label)
if not new_basename.endswith(new_label):
index = (new_basename.find(new_label))
index += len(new_label)
new_basename = new_basename[:index]
new_filename = "{}{}".format(new_basename, ext)
new_filename = os.path.join(dirname, new_filename)
new_filename = os.path.normpath(new_filename)
@ -65,8 +59,19 @@ def version_up(filepath):
raise RuntimeError("Created path is the same as current file,"
"this is a bug")
# We check for version clashes against the current file for any file
# that matches completely in name up to the {version} label found. Thus
# if source file was test_v001_test.txt we want to also check clashes
# against test_v002.txt but do want to preserve the part after the version
# label for our new filename
clash_basename = new_basename
if not clash_basename.endswith(new_label):
index = (clash_basename.find(new_label))
index += len(new_label)
clash_basename = clash_basename[:index]
for file in os.listdir(dirname):
if file.endswith(ext) and file.startswith(new_basename):
if file.endswith(ext) and file.startswith(clash_basename):
log.info("Skipping existing version %s" % new_label)
return version_up(new_filename)

View file

@ -1,11 +1,14 @@
from openpype.lib import abstract_submit_deadline
from openpype.lib.abstract_submit_deadline import DeadlineJobInfo
import pyblish.api
import os
import attr
import getpass
import pyblish.api
from avalon import api
from openpype.lib import abstract_submit_deadline
from openpype.lib.abstract_submit_deadline import DeadlineJobInfo
from openpype.lib import env_value_to_bool
@attr.s
class DeadlinePluginInfo():
@ -29,7 +32,13 @@ class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
families = ["render.farm"] # cannot be "render' as that is integrated
use_published = True
priority = 50
chunk_size = 1000000
primary_pool = None
secondary_pool = None
group = None
department = None
multiprocess = True
def get_job_info(self):
dln_job_info = DeadlineJobInfo(Plugin="AfterEffects")
@ -49,6 +58,11 @@ class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
int(round(self._instance.data["frameEnd"])))
dln_job_info.Frames = frame_range
dln_job_info.Priority = self.priority
dln_job_info.Pool = self.primary_pool
dln_job_info.SecondaryPool = self.secondary_pool
dln_job_info.Group = self.group
dln_job_info.Department = self.department
dln_job_info.ChunkSize = self.chunk_size
dln_job_info.OutputFilename = \
os.path.basename(self._instance.data["expectedFiles"][0])
@ -105,9 +119,13 @@ class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
'{}.{}.{}'.format(arr[0], hashed,
arr[2]))
deadline_plugin_info.MultiProcess = True
deadline_plugin_info.Comp = self._instance.data["comp_name"]
deadline_plugin_info.Version = "17.5"
deadline_plugin_info.Version = self._instance.data["app_version"]
# must be here because of DL AE plugin
# added override of multiprocess by env var, if shouldn't be used for
# some app variant use MULTIPROCESS:false in Settings, default is True
env_multi = env_value_to_bool("MULTIPROCESS", default=True)
deadline_plugin_info.MultiProcess = env_multi and self.multiprocess
deadline_plugin_info.SceneFile = self.scene_path
deadline_plugin_info.Output = render_path.replace("\\", "/")

View file

@ -320,7 +320,6 @@ class HarmonySubmitDeadline(
/ published_scene.stem
/ f"{published_scene.stem}.xstage"
)
unzip_dir = (published_scene.parent / published_scene.stem)
with _ZipFile(published_scene, "r") as zip_ref:
zip_ref.extractall(unzip_dir.as_posix())
@ -351,12 +350,9 @@ class HarmonySubmitDeadline(
# use that one.
if not ideal_scene:
xstage_path = xstage_files[0]
return xstage_path
def get_plugin_info(self):
work_scene = Path(self._instance.data["source"])
# this is path to published scene workfile _ZIP_. Before
# rendering, we need to unzip it.
published_scene = Path(
@ -368,14 +364,13 @@ class HarmonySubmitDeadline(
# for submit_publish job to create .json file in
self._instance.data["outputDir"] = render_path
new_expected_files = []
work_path_str = str(work_scene.parent.as_posix())
render_path_str = str(render_path.as_posix())
for file in self._instance.data["expectedFiles"]:
_file = str(Path(file).as_posix())
expected_dir_str = os.path.dirname(_file)
new_expected_files.append(
_file.replace(work_path_str, render_path_str)
_file.replace(expected_dir_str, render_path_str)
)
audio_file = self._instance.data.get("audioFile")
if audio_file:
abs_path = xstage_path.parent / audio_file

View file

@ -404,7 +404,13 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
dirname = os.path.join(workspace, default_render_file)
renderlayer = instance.data['setMembers'] # rs_beauty
deadline_user = context.data.get("user", getpass.getuser())
jobname = "%s - %s" % (filename, instance.name)
# Always use the original work file name for the Job name even when
# rendering is done from the published Work File. The original work
# file name is clearer because it can also have subversion strings,
# etc. which are stripped for the published file.
src_filename = os.path.basename(context.data["currentFile"])
jobname = "%s - %s" % (src_filename, instance.name)
# Get the variables depending on the renderer
render_variables = get_renderer_variables(renderlayer, dirname)
@ -452,7 +458,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
self.payload_skeleton["JobInfo"]["Plugin"] = self._instance.data.get(
"mayaRenderPlugin", "MayaBatch")
self.payload_skeleton["JobInfo"]["BatchName"] = filename
self.payload_skeleton["JobInfo"]["BatchName"] = src_filename
# Job name, as seen in Monitor
self.payload_skeleton["JobInfo"]["Name"] = jobname
# Arbitrary username, for visualisation in Monitor

View file

@ -694,13 +694,13 @@ class ExtractReview(pyblish.api.InstancePlugin):
audio_args_dentifiers = ["-af", "-filter:a"]
for arg in tuple(output_args):
for identifier in video_args_dentifiers:
if identifier in arg:
if arg.startswith("{} ".format(identifier)):
output_args.remove(arg)
arg = arg.replace(identifier, "").strip()
video_filters.append(arg)
for identifier in audio_args_dentifiers:
if identifier in arg:
if arg.startswith("{} ".format(identifier)):
output_args.remove(arg)
arg = arg.replace(identifier, "").strip()
audio_filters.append(arg)

View file

@ -100,7 +100,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"redshiftproxy",
"effect",
"xgen",
"hda"
"hda",
"usd"
]
exclude_families = ["clip"]
db_representation_context_keys = [

View file

@ -32,7 +32,7 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin):
self.log.debug("__ db_assets: {}".format(db_assets))
asset_db_docs = {
str(e["name"]): e["data"]["parents"]
str(e["name"]): [str(p) for p in e["data"]["parents"]]
for e in db_assets}
self.log.debug("__ project_entities: {}".format(
@ -43,17 +43,15 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin):
for asset in asset_and_parents.keys():
if asset not in asset_db_docs.keys():
# add to some nonexistent list for next layer of check
assets_missing_name.update({asset: asset_and_parents[asset]})
assets_missing_name[asset] = asset_and_parents[asset]
continue
if asset_and_parents[asset] != asset_db_docs[asset]:
# add to some nonexistent list for next layer of check
assets_wrong_parent.update({
asset: {
"required": asset_and_parents[asset],
"already_in_db": asset_db_docs[asset]
}
})
assets_wrong_parent[asset] = {
"required": asset_and_parents[asset],
"already_in_db": asset_db_docs[asset]
}
continue
self.log.info("correct asset: {}".format(asset))
@ -62,17 +60,24 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin):
wrong_names = {}
self.log.debug(
">> assets_missing_name: {}".format(assets_missing_name))
for asset in assets_missing_name.keys():
# This will create set asset names
asset_names = {
a.lower().replace("_", "") for a in asset_db_docs
}
for asset in assets_missing_name:
_asset = asset.lower().replace("_", "")
if _asset in [a.lower().replace("_", "")
for a in asset_db_docs.keys()]:
wrong_names.update({
"required_name": asset,
"used_variants_in_db": [
a for a in asset_db_docs.keys()
if a.lower().replace("_", "") == _asset
]
})
if _asset in asset_names:
wrong_names[asset].update(
{
"required_name": asset,
"used_variants_in_db": [
a for a in asset_db_docs
if a.lower().replace("_", "") == _asset
]
}
)
if wrong_names:
self.log.debug(
@ -114,8 +119,8 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin):
parents = instance.data["parents"]
return_dict.update({
asset: [p["entity_name"] for p in parents
if p["entity_type"].lower() != "project"]
})
return_dict[asset] = [
str(p["entity_name"]) for p in parents
if p["entity_type"].lower() != "project"
]
return return_dict

View file

@ -186,5 +186,24 @@
"renderSpace": "scene-linear Rec 709/sRGB",
"viewTransform": "sRGB gamma"
}
},
"flame": {
"project": {
"colourPolicy": "ACES 1.1",
"frameDepth": "16-bit fp",
"fieldDominance": "PROGRESSIVE"
},
"profilesMapping": {
"inputs": [
{
"flameName": "ACEScg",
"ocioName": "ACES - ACEScg"
},
{
"flameName": "Rec.709 video",
"ocioName": "Output - Rec.709"
}
]
}
}
}

View file

@ -10,13 +10,6 @@
"skip_timelines_check": [
".*"
]
},
"AfterEffectsSubmitDeadline": {
"use_published": true,
"priority": 50,
"primary_pool": "",
"secondary_pool": "",
"chunk_size": 1000000
}
},
"workfile_builder": {

View file

@ -93,7 +93,8 @@
"primary_pool": "",
"secondary_pool": "",
"group": "",
"department": ""
"department": "",
"multiprocess": true
}
}
}

View file

@ -24,12 +24,38 @@
"export_presets_mapping": {
"exr16fpdwaa": {
"ext": "exr",
"xml_preset_dir": "",
"xml_preset_file": "OpenEXR (16-bit fp DWAA).xml",
"xml_preset_dir": "",
"colorspace_out": "ACES - ACEScg",
"representation_add_range": true,
"representation_tags": []
}
}
}
},
"load": {
"LoadClip": {
"enabled": true,
"families": [
"render2d",
"source",
"plate",
"render",
"review"
],
"representations": [
"exr",
"dpx",
"jpg",
"jpeg",
"png",
"h264",
"mov",
"mp4"
],
"reel_group_name": "OpenPype_Reels",
"reel_name": "Loaded",
"clip_name_template": "{asset}_{subset}_{representation}"
}
}
}

View file

@ -332,6 +332,18 @@
"tasks": [],
"add_ftrack_family": true,
"advanced_filtering": []
},
{
"hosts": [
"houdini"
],
"families": [
"usd"
],
"task_types": [],
"tasks": [],
"add_ftrack_family": true,
"advanced_filtering": []
}
]
},
@ -376,7 +388,8 @@
"layout": "layout",
"unrealStaticMesh": "geo",
"vrayproxy": "cache",
"redshiftproxy": "cache"
"redshiftproxy": "cache",
"usd": "usd"
}
}
}

View file

@ -121,7 +121,7 @@
"/opt/Autodesk/flame_2021/bin/flame.app/Contents/MacOS/startApp"
],
"linux": [
"/opt/Autodesk/flame_2021/bin/flame"
"/opt/Autodesk/flame_2021/bin/startApplication"
]
},
"arguments": {
@ -135,8 +135,31 @@
"OPENPYPE_WIRETAP_TOOLS": "/opt/Autodesk/wiretap/tools/2021"
}
},
"2021.1": {
"use_python_2": true,
"executables": {
"windows": [],
"darwin": [
"/opt/Autodesk/flame_2021.1/bin/flame.app/Contents/MacOS/startApp"
],
"linux": [
"/opt/Autodesk/flame_2021.1/bin/startApplication"
]
},
"arguments": {
"windows": [],
"darwin": [],
"linux": []
},
"environment": {
"OPENPYPE_FLAME_PYTHON_EXEC": "/opt/Autodesk/python/2021.1/bin/python2.7",
"OPENPYPE_FLAME_PYTHONPATH": "/opt/Autodesk/flame_2021.1/python",
"OPENPYPE_WIRETAP_TOOLS": "/opt/Autodesk/wiretap/tools/2021.1"
}
},
"__dynamic_keys_labels__": {
"2021": "2021 (Testing Only)"
"2021": "2021",
"2021.1": "2021.1"
}
}
},
@ -690,15 +713,29 @@
"OPENPYPE_LOG_NO_COLORS": "Yes"
},
"variants": {
"16": {
"enabled": true,
"variant_label": "16",
"use_python_2": false,
"17": {
"executables": {
"windows": [
"C:\\Program Files\\Blackmagic Design\\Fusion 17\\Fusion.exe"
],
"darwin": [],
"linux": []
},
"arguments": {
"windows": [],
"darwin": [],
"linux": []
},
"environment": {}
},
"16": {
"executables": {
"windows": [
"C:\\Program Files\\Blackmagic Design\\Fusion 16\\Fusion.exe"
],
"darwin": [],
"linux": []
},
"arguments": {
"windows": [],
"darwin": [],
@ -707,9 +744,6 @@
"environment": {}
},
"9": {
"enabled": true,
"variant_label": "9",
"use_python_2": false,
"executables": {
"windows": [
"C:\\Program Files\\Blackmagic Design\\Fusion 9\\Fusion.exe"
@ -938,8 +972,12 @@
"enabled": true,
"variant_label": "21",
"executables": {
"windows": ["c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 21 Premium\\win64\\bin\\HarmonyPremium.exe"],
"darwin": ["/Applications/Toon Boom Harmony 21 Premium/Harmony Premium.app/Contents/MacOS/Harmony Premium"],
"windows": [
"c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 21 Premium\\win64\\bin\\HarmonyPremium.exe"
],
"darwin": [
"/Applications/Toon Boom Harmony 21 Premium/Harmony Premium.app/Contents/MacOS/Harmony Premium"
],
"linux": []
},
"arguments": {
@ -953,8 +991,12 @@
"enabled": true,
"variant_label": "20",
"executables": {
"windows": ["c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 20 Premium\\win64\\bin\\HarmonyPremium.exe"],
"darwin": ["/Applications/Toon Boom Harmony 20 Premium/Harmony Premium.app/Contents/MacOS/Harmony Premium"],
"windows": [
"c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 20 Premium\\win64\\bin\\HarmonyPremium.exe"
],
"darwin": [
"/Applications/Toon Boom Harmony 20 Premium/Harmony Premium.app/Contents/MacOS/Harmony Premium"
],
"linux": []
},
"arguments": {
@ -968,7 +1010,9 @@
"enabled": true,
"variant_label": "17",
"executables": {
"windows": ["c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 17 Premium\\win64\\bin\\HarmonyPremium.exe"],
"windows": [
"c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 17 Premium\\win64\\bin\\HarmonyPremium.exe"
],
"darwin": [
"/Applications/Toon Boom Harmony 17 Premium/Harmony Premium.app/Contents/MacOS/Harmony Premium"
],
@ -1142,7 +1186,9 @@
"darwin": [],
"linux": []
},
"environment": {}
"environment": {
"MULTIPROCESS": "No"
}
}
}
},

View file

@ -13,7 +13,7 @@
}
},
"ftrack": {
"enabled": true,
"enabled": false,
"ftrack_server": "",
"ftrack_actions_path": {
"windows": [],

View file

@ -50,39 +50,6 @@
"label": "Skip Timeline Check for Tasks"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "AfterEffectsSubmitDeadline",
"label": "AfterEffects Submit to Deadline",
"children": [
{
"type": "boolean",
"key": "use_published",
"label": "Use Published scene"
},
{
"type": "number",
"key": "priority",
"label": "Priority"
},
{
"type": "text",
"key": "primary_pool",
"label": "Primary Pool"
},
{
"type": "text",
"key": "secondary_pool",
"label": "Secondary Pool"
},
{
"type": "number",
"key": "chunk_size",
"label": "Frames Per Task"
}
]
}
]
},

View file

@ -367,6 +367,11 @@
"type": "text",
"key": "department",
"label": "Department"
},
{
"type": "boolean",
"key": "multiprocess",
"label": "Multiprocess"
}
]
},

View file

@ -166,6 +166,11 @@
"label": "XML preset folder (optional)",
"type": "text"
},
{
"key": "colorspace_out",
"label": "Output color (imageio)",
"type": "text"
},
{
"type": "separator"
},
@ -189,6 +194,61 @@
]
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "load",
"label": "Loader plugins",
"children": [
{
"type": "dict",
"collapsible": true,
"key": "LoadClip",
"label": "Load Clip",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "list",
"key": "families",
"label": "Families",
"object_type": "text"
},
{
"type": "list",
"key": "representations",
"label": "Representations",
"object_type": "text"
},
{
"type": "separator"
},
{
"type": "text",
"key": "reel_group_name",
"label": "Reel group name"
},
{
"type": "text",
"key": "reel_name",
"label": "Reel name"
},
{
"type": "separator"
},
{
"type": "text",
"key": "clip_name_template",
"label": "Clip name template"
}
]
}
]
}
]
}

View file

@ -403,6 +403,68 @@
]
}
]
},
{
"key": "flame",
"type": "dict",
"label": "Flame/Flair",
"children": [
{
"key": "project",
"type": "dict",
"label": "Project",
"collapsible": false,
"children": [
{
"type": "form",
"children": [
{
"type": "text",
"key": "colourPolicy",
"label": "Colour Policy"
},
{
"type": "text",
"key": "frameDepth",
"label": "Image Depth"
},
{
"type": "text",
"key": "fieldDominance",
"label": "Field Dominance"
}
]
}
]
},
{
"key": "profilesMapping",
"type": "dict",
"label": "Profile names mapping",
"collapsible": true,
"children": [
{
"type": "list",
"key": "inputs",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "flameName",
"label": "Flame name"
},
{
"type": "text",
"key": "ocioName",
"label": "OCIO name"
}
]
}
}
]
}
]
}
]
}

View file

@ -20,24 +20,21 @@
"type": "raw-json"
},
{
"type": "dict",
"type": "dict-modifiable",
"key": "variants",
"children": [
{
"type": "schema_template",
"name": "template_host_variant",
"template_data": [
{
"app_variant_label": "16",
"app_variant": "16"
},
{
"app_variant_label": "9",
"app_variant": "9"
}
]
}
]
"collapsible_key": true,
"use_label_wrap": false,
"object_type": {
"type": "dict",
"collapsible": true,
"children": [
{
"type": "schema_template",
"name": "template_host_variant_items",
"skip_paths": ["use_python_2"]
}
]
}
}
]
}

View file

@ -396,9 +396,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog):
self._versionschanged()
return
selected_subsets = self._subsets_widget.selected_subsets(
_merged=True, _other=False
)
selected_subsets = self._subsets_widget.get_selected_merge_items()
asset_colors = {}
asset_ids = []
@ -423,35 +421,14 @@ class LibraryLoaderWindow(QtWidgets.QDialog):
self._versionschanged()
def _versionschanged(self):
selection = self._subsets_widget.view.selectionModel()
# Active must be in the selected rows otherwise we
# assume it's not actually an "active" current index.
version_docs = None
items = self._subsets_widget.get_selected_subsets()
version_doc = None
active = selection.currentIndex()
rows = selection.selectedRows(column=active.column())
if active and active in rows:
item = active.data(self._subsets_widget.model.ItemRole)
if (
item is not None
and not (item.get("isGroup") or item.get("isMerged"))
):
version_doc = item["version_document"]
if rows:
version_docs = []
for index in rows:
if not index or not index.isValid():
continue
item = index.data(self._subsets_widget.model.ItemRole)
if (
item is None
or item.get("isGroup")
or item.get("isMerged")
):
continue
version_docs.append(item["version_document"])
version_docs = []
for item in items:
doc = item["version_document"]
version_docs.append(doc)
if version_doc is None:
version_doc = doc
self._version_info_widget.set_version(version_doc)

View file

@ -287,9 +287,7 @@ class LoaderWindow(QtWidgets.QDialog):
on selection change so they match current selection.
"""
# TODO do not touch inner attributes of asset widget
last_asset_ids = self.data["state"]["assetIds"]
if last_asset_ids:
self._assets_widget.clear_underlines()
self._assets_widget.clear_underlines()
def _assetschanged(self):
"""Selected assets have changed"""
@ -328,12 +326,11 @@ class LoaderWindow(QtWidgets.QDialog):
asset_ids = self.data["state"]["assetIds"]
# Skip setting colors if not asset multiselection
if not asset_ids or len(asset_ids) < 2:
self.clear_assets_underlines()
self._versionschanged()
return
selected_subsets = self._subsets_widget.selected_subsets(
_merged=True, _other=False
)
selected_subsets = self._subsets_widget.get_selected_merge_items()
asset_colors = {}
asset_ids = []
@ -358,37 +355,16 @@ class LoaderWindow(QtWidgets.QDialog):
self._versionschanged()
def _versionschanged(self):
subsets = self._subsets_widget
selection = subsets.view.selectionModel()
# Active must be in the selected rows otherwise we
# assume it's not actually an "active" current index.
items = self._subsets_widget.get_selected_subsets()
version_doc = None
active = selection.currentIndex()
rows = selection.selectedRows(column=active.column())
if active:
if active in rows:
item = active.data(subsets.model.ItemRole)
if (
item is not None and
not (item.get("isGroup") or item.get("isMerged"))
):
version_doc = item["version_document"]
self._version_info_widget.set_version(version_doc)
version_docs = []
if rows:
for index in rows:
if not index or not index.isValid():
continue
item = index.data(subsets.model.ItemRole)
if item is None:
continue
if item.get("isGroup") or item.get("isMerged"):
for child in item.children():
version_docs.append(child["version_document"])
else:
version_docs.append(item["version_document"])
for item in items:
doc = item["version_document"]
version_docs.append(doc)
if version_doc is None:
version_doc = doc
self._version_info_widget.set_version(version_doc)
thumbnail_src_ids = [
version_doc["_id"]
@ -480,18 +456,7 @@ class LoaderWindow(QtWidgets.QDialog):
self.echo("Grouping not enabled.")
return
selected = []
merged_items = []
for item in subsets.selected_subsets(_merged=True):
if item.get("isMerged"):
merged_items.append(item)
else:
selected.append(item)
for merged_item in merged_items:
for child_item in merged_item.children():
selected.append(child_item)
selected = self._subsets_widget.get_selected_subsets()
if not selected:
self.echo("No selected subset.")
return

View file

@ -18,26 +18,6 @@ def change_visibility(model, view, column_name, visible):
view.setColumnHidden(index, not visible)
def get_selected_items(rows, item_role):
items = []
for row_index in rows:
item = row_index.data(item_role)
if item.get("isGroup"):
continue
elif item.get("isMerged"):
for idx in range(row_index.model().rowCount(row_index)):
child_index = row_index.child(idx, 0)
item = child_index.data(item_role)
if item not in items:
items.append(item)
else:
if item not in items:
items.append(item)
return items
def get_options(action, loader, parent, repre_contexts):
"""Provides dialog to select value from loader provided options.

Some files were not shown because too many files have changed in this diff Show more