mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
5acb645e63
203 changed files with 3578 additions and 1165 deletions
2
.github/workflows/test_build.yml
vendored
2
.github/workflows/test_build.yml
vendored
|
|
@ -37,6 +37,7 @@ jobs:
|
|||
- name: 🔨 Build
|
||||
shell: pwsh
|
||||
run: |
|
||||
$env:SKIP_THIRD_PARTY_VALIDATION="1"
|
||||
./tools/build.ps1
|
||||
|
||||
Ubuntu-latest:
|
||||
|
|
@ -61,6 +62,7 @@ jobs:
|
|||
|
||||
- name: 🔨 Build
|
||||
run: |
|
||||
export SKIP_THIRD_PARTY_VALIDATION="1"
|
||||
./tools/build.sh
|
||||
|
||||
# MacOS-latest:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import re
|
|||
import tempfile
|
||||
import attr
|
||||
|
||||
from avalon import aftereffects
|
||||
import pyblish.api
|
||||
|
||||
from openpype.settings import get_project_settings
|
||||
|
|
@ -159,7 +158,7 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
|||
in url
|
||||
|
||||
Returns:
|
||||
(list) of absolut urls to rendered file
|
||||
(list) of absolute urls to rendered file
|
||||
"""
|
||||
start = render_instance.frameStart
|
||||
end = render_instance.frameEnd
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning(
|
||||
"Failed plug-in doens't have any selectable objects."
|
||||
"Failed plug-in doesn't have any selectable objects."
|
||||
)
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import addon_utils
|
|||
def load_scripts(paths):
|
||||
"""Copy of `load_scripts` from Blender's implementation.
|
||||
|
||||
It is possible that whis function will be changed in future and usage will
|
||||
It is possible that this function will be changed in future and usage will
|
||||
be based on Blender version.
|
||||
"""
|
||||
import bpy_types
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
platforms = ["windows"]
|
||||
|
||||
def execute(self):
|
||||
# Prelaunch hook is not crutial
|
||||
# Prelaunch hook is not crucial
|
||||
try:
|
||||
self.inner_execute()
|
||||
except Exception:
|
||||
|
|
@ -156,7 +156,7 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
except pywintypes.error:
|
||||
pass
|
||||
|
||||
self.log.warning("Failed to instal PySide2 module to blender.")
|
||||
self.log.warning("Failed to install PySide2 module to blender.")
|
||||
|
||||
def is_pyside_installed(self, python_executable):
|
||||
"""Check if PySide2 module is in blender's pip list.
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class CreateAnimation(plugin.Creator):
|
|||
ops.execute_in_main_thread(mti)
|
||||
|
||||
def _process(self):
|
||||
# Get Instance Containter or create it if it does not exist
|
||||
# Get Instance Container or create it if it does not exist
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
if not instances:
|
||||
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class CreateCamera(plugin.Creator):
|
|||
ops.execute_in_main_thread(mti)
|
||||
|
||||
def _process(self):
|
||||
# Get Instance Containter or create it if it does not exist
|
||||
# Get Instance Container or create it if it does not exist
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
if not instances:
|
||||
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class CreateLayout(plugin.Creator):
|
|||
ops.execute_in_main_thread(mti)
|
||||
|
||||
def _process(self):
|
||||
# Get Instance Containter or create it if it does not exist
|
||||
# Get Instance Container or create it if it does not exist
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
if not instances:
|
||||
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class CreateModel(plugin.Creator):
|
|||
ops.execute_in_main_thread(mti)
|
||||
|
||||
def _process(self):
|
||||
# Get Instance Containter or create it if it does not exist
|
||||
# Get Instance Container or create it if it does not exist
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
if not instances:
|
||||
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class CreateRig(plugin.Creator):
|
|||
ops.execute_in_main_thread(mti)
|
||||
|
||||
def _process(self):
|
||||
# Get Instance Containter or create it if it does not exist
|
||||
# Get Instance Container or create it if it does not exist
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
if not instances:
|
||||
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
|
||||
assert all(result["success"] for result in context.data["results"]), (
|
||||
"Publishing not succesfull so version is not increased.")
|
||||
"Publishing not successful so version is not increased.")
|
||||
|
||||
from openpype.lib import version_up
|
||||
path = context.data["currentFile"]
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
repr = next((r for r in reprs), None)
|
||||
if not repr:
|
||||
raise "Missing `audioMain` representation"
|
||||
self.log.info(f"represetation is: {repr}")
|
||||
self.log.info(f"representation is: {repr}")
|
||||
|
||||
audio_file = repr.get('data', {}).get('path', "")
|
||||
|
||||
|
|
@ -56,7 +56,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
representations (list): list for all representations
|
||||
|
||||
Returns:
|
||||
dict: subsets with version and representaions in keys
|
||||
dict: subsets with version and representations in keys
|
||||
"""
|
||||
|
||||
# Query all subsets for asset
|
||||
|
|
|
|||
|
|
@ -23,10 +23,17 @@ from .lib import (
|
|||
get_sequence_segments,
|
||||
maintained_segment_selection,
|
||||
reset_segment_selection,
|
||||
get_segment_attributes
|
||||
get_segment_attributes,
|
||||
get_clips_in_reels,
|
||||
get_reformated_filename,
|
||||
get_frame_from_filename,
|
||||
get_padding_from_filename,
|
||||
maintained_object_duplication
|
||||
)
|
||||
from .utils import (
|
||||
setup
|
||||
setup,
|
||||
get_flame_version,
|
||||
get_flame_install_root
|
||||
)
|
||||
from .pipeline import (
|
||||
install,
|
||||
|
|
@ -55,6 +62,10 @@ from .workio import (
|
|||
file_extensions,
|
||||
work_root
|
||||
)
|
||||
from .render_utils import (
|
||||
export_clip,
|
||||
get_preset_path_by_xml_name
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# constants
|
||||
|
|
@ -80,6 +91,11 @@ __all__ = [
|
|||
"maintained_segment_selection",
|
||||
"reset_segment_selection",
|
||||
"get_segment_attributes",
|
||||
"get_clips_in_reels",
|
||||
"get_reformated_filename",
|
||||
"get_frame_from_filename",
|
||||
"get_padding_from_filename",
|
||||
"maintained_object_duplication",
|
||||
|
||||
# pipeline
|
||||
"install",
|
||||
|
|
@ -96,6 +112,8 @@ __all__ = [
|
|||
|
||||
# utils
|
||||
"setup",
|
||||
"get_flame_version",
|
||||
"get_flame_install_root",
|
||||
|
||||
# menu
|
||||
"FlameMenuProjectConnect",
|
||||
|
|
@ -111,5 +129,9 @@ __all__ = [
|
|||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root"
|
||||
"work_root",
|
||||
|
||||
# render utils
|
||||
"export_clip",
|
||||
"get_preset_path_by_xml_name"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ from openpype.api import Logger
|
|||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
|
||||
|
||||
class CTX:
|
||||
# singleton used for passing data between api modules
|
||||
|
|
@ -445,6 +446,8 @@ def get_sequence_segments(sequence, selected=False):
|
|||
for segment in track.segments:
|
||||
if segment.name.get_value() == "":
|
||||
continue
|
||||
if segment.hidden.get_value() is True:
|
||||
continue
|
||||
if (
|
||||
selected is True
|
||||
and segment.selected.get_value() is not True
|
||||
|
|
@ -519,7 +522,7 @@ def _get_shot_tokens_values(clip, tokens):
|
|||
|
||||
|
||||
def get_segment_attributes(segment):
|
||||
if str(segment.name)[1:-1] == "":
|
||||
if segment.name.get_value() == "":
|
||||
return None
|
||||
|
||||
# Add timeline segment to tree
|
||||
|
|
@ -532,6 +535,12 @@ def get_segment_attributes(segment):
|
|||
"PySegment": segment
|
||||
}
|
||||
|
||||
# head and tail with forward compatibility
|
||||
if segment.head:
|
||||
clip_data["segment_head"] = int(segment.head)
|
||||
if segment.tail:
|
||||
clip_data["segment_tail"] = int(segment.tail)
|
||||
|
||||
# add all available shot tokens
|
||||
shot_tokens = _get_shot_tokens_values(segment, [
|
||||
"<colour space>", "<width>", "<height>", "<depth>", "<segment>",
|
||||
|
|
@ -551,7 +560,7 @@ def get_segment_attributes(segment):
|
|||
attr = getattr(segment, attr_name)
|
||||
segment_attrs_data[attr] = str(attr).replace("+", ":")
|
||||
|
||||
if attr in ["record_in", "record_out"]:
|
||||
if attr_name in ["record_in", "record_out"]:
|
||||
clip_data[attr_name] = attr.relative_frame
|
||||
else:
|
||||
clip_data[attr_name] = attr.frame
|
||||
|
|
@ -559,3 +568,127 @@ def get_segment_attributes(segment):
|
|||
clip_data["segment_timecodes"] = segment_attrs_data
|
||||
|
||||
return clip_data
|
||||
|
||||
|
||||
def get_clips_in_reels(project):
|
||||
output_clips = []
|
||||
project_desktop = project.current_workspace.desktop
|
||||
|
||||
for reel_group in project_desktop.reel_groups:
|
||||
for reel in reel_group.reels:
|
||||
for clip in reel.clips:
|
||||
clip_data = {
|
||||
"PyClip": clip,
|
||||
"fps": float(str(clip.frame_rate)[:-4])
|
||||
}
|
||||
|
||||
attrs = [
|
||||
"name", "width", "height",
|
||||
"ratio", "sample_rate", "bit_depth"
|
||||
]
|
||||
|
||||
for attr in attrs:
|
||||
val = getattr(clip, attr)
|
||||
clip_data[attr] = val
|
||||
|
||||
version = clip.versions[-1]
|
||||
track = version.tracks[-1]
|
||||
for segment in track.segments:
|
||||
segment_data = get_segment_attributes(segment)
|
||||
clip_data.update(segment_data)
|
||||
|
||||
output_clips.append(clip_data)
|
||||
|
||||
return output_clips
|
||||
|
||||
|
||||
def get_reformated_filename(filename, padded=True):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
type: string with reformated path
|
||||
|
||||
Example:
|
||||
get_reformated_filename("plate.1001.exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
found = FRAME_PATTERN.search(filename)
|
||||
|
||||
if not found:
|
||||
log.info("File name is not sequence: {}".format(filename))
|
||||
return filename
|
||||
|
||||
padding = get_padding_from_filename(filename)
|
||||
|
||||
replacement = "%0{}d".format(padding) if padded else "%d"
|
||||
start_idx, end_idx = found.span(1)
|
||||
|
||||
return replacement.join(
|
||||
[filename[:start_idx], filename[end_idx:]]
|
||||
)
|
||||
|
||||
|
||||
def get_padding_from_filename(filename):
|
||||
"""
|
||||
Return padding number from Flame path style
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_filename("plate.0001.exr") > 4
|
||||
|
||||
"""
|
||||
found = get_frame_from_filename(filename)
|
||||
|
||||
return len(found) if found else None
|
||||
|
||||
|
||||
def get_frame_from_filename(filename):
|
||||
"""
|
||||
Return sequence number from Flame path style
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
int: sequence frame number
|
||||
|
||||
Example:
|
||||
def get_frame_from_filename(path):
|
||||
("plate.0001.exr") > 0001
|
||||
|
||||
"""
|
||||
|
||||
found = re.findall(FRAME_PATTERN, filename)
|
||||
|
||||
return found.pop() if found else None
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_object_duplication(item):
|
||||
"""Maintain input item duplication
|
||||
|
||||
Attributes:
|
||||
item (any flame.PyObject): python api object
|
||||
|
||||
Yield:
|
||||
duplicate input PyObject type
|
||||
"""
|
||||
import flame
|
||||
# Duplicate the clip to avoid modifying the original clip
|
||||
duplicate = flame.duplicate(item)
|
||||
|
||||
try:
|
||||
# do the operation on selected segments
|
||||
yield duplicate
|
||||
finally:
|
||||
# delete the item at the end
|
||||
flame.delete(duplicate)
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class _FlameMenuApp(object):
|
|||
self.menu_group_name = menu_group_name
|
||||
self.dynamic_menu_data = {}
|
||||
|
||||
# flame module is only avaliable when a
|
||||
# flame module is only available when a
|
||||
# flame project is loaded and initialized
|
||||
self.flame = None
|
||||
try:
|
||||
|
|
|
|||
125
openpype/hosts/flame/api/render_utils.py
Normal file
125
openpype/hosts/flame/api/render_utils.py
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
import os
|
||||
|
||||
|
||||
def export_clip(export_path, clip, preset_path, **kwargs):
|
||||
"""Flame exported wrapper
|
||||
|
||||
Args:
|
||||
export_path (str): exporting directory path
|
||||
clip (PyClip): flame api object
|
||||
preset_path (str): full export path to xml file
|
||||
|
||||
Kwargs:
|
||||
thumb_frame_number (int)[optional]: source frame number
|
||||
in_mark (int)[optional]: cut in mark
|
||||
out_mark (int)[optional]: cut out mark
|
||||
|
||||
Raises:
|
||||
KeyError: Missing input kwarg `thumb_frame_number`
|
||||
in case `thumbnail` in `export_preset`
|
||||
FileExistsError: Missing export preset in shared folder
|
||||
"""
|
||||
import flame
|
||||
|
||||
in_mark = out_mark = None
|
||||
|
||||
# Set exporter
|
||||
exporter = flame.PyExporter()
|
||||
exporter.foreground = True
|
||||
exporter.export_between_marks = True
|
||||
|
||||
if kwargs.get("thumb_frame_number"):
|
||||
thumb_frame_number = kwargs["thumb_frame_number"]
|
||||
# make sure it exists in kwargs
|
||||
if not thumb_frame_number:
|
||||
raise KeyError(
|
||||
"Missing key `thumb_frame_number` in input kwargs")
|
||||
|
||||
in_mark = int(thumb_frame_number)
|
||||
out_mark = int(thumb_frame_number) + 1
|
||||
|
||||
elif kwargs.get("in_mark") and kwargs.get("out_mark"):
|
||||
in_mark = int(kwargs["in_mark"])
|
||||
out_mark = int(kwargs["out_mark"])
|
||||
else:
|
||||
exporter.export_between_marks = False
|
||||
|
||||
try:
|
||||
# set in and out marks if they are available
|
||||
if in_mark and out_mark:
|
||||
clip.in_mark = in_mark
|
||||
clip.out_mark = out_mark
|
||||
|
||||
# export with exporter
|
||||
exporter.export(clip, preset_path, export_path)
|
||||
finally:
|
||||
print('Exported: {} at {}-{}'.format(
|
||||
clip.name.get_value(),
|
||||
clip.in_mark,
|
||||
clip.out_mark
|
||||
))
|
||||
|
||||
|
||||
def get_preset_path_by_xml_name(xml_preset_name):
|
||||
def _search_path(root):
|
||||
output = []
|
||||
for root, _dirs, files in os.walk(root):
|
||||
for f in files:
|
||||
if f != xml_preset_name:
|
||||
continue
|
||||
file_path = os.path.join(root, f)
|
||||
output.append(file_path)
|
||||
return output
|
||||
|
||||
def _validate_results(results):
|
||||
if results and len(results) == 1:
|
||||
return results.pop()
|
||||
elif results and len(results) > 1:
|
||||
print((
|
||||
"More matching presets for `{}`: /n"
|
||||
"{}").format(xml_preset_name, results))
|
||||
return results.pop()
|
||||
else:
|
||||
return None
|
||||
|
||||
from .utils import (
|
||||
get_flame_install_root,
|
||||
get_flame_version
|
||||
)
|
||||
|
||||
# get actual flame version and install path
|
||||
_version = get_flame_version()["full"]
|
||||
_install_root = get_flame_install_root()
|
||||
|
||||
# search path templates
|
||||
shared_search_root = "{install_root}/shared/export/presets"
|
||||
install_search_root = (
|
||||
"{install_root}/presets/{version}/export/presets/flame")
|
||||
|
||||
# fill templates
|
||||
shared_search_root = shared_search_root.format(
|
||||
install_root=_install_root
|
||||
)
|
||||
install_search_root = install_search_root.format(
|
||||
install_root=_install_root,
|
||||
version=_version
|
||||
)
|
||||
|
||||
# get search results
|
||||
shared_results = _search_path(shared_search_root)
|
||||
installed_results = _search_path(install_search_root)
|
||||
|
||||
# first try to return shared results
|
||||
shared_preset_path = _validate_results(shared_results)
|
||||
|
||||
if shared_preset_path:
|
||||
return os.path.dirname(shared_preset_path)
|
||||
|
||||
# then try installed results
|
||||
installed_preset_path = _validate_results(installed_results)
|
||||
|
||||
if installed_preset_path:
|
||||
return os.path.dirname(installed_preset_path)
|
||||
|
||||
# if nothing found then return False
|
||||
return False
|
||||
|
|
@ -25,7 +25,7 @@ class WireTapCom(object):
|
|||
|
||||
This way we are able to set new project with settings and
|
||||
correct colorspace policy. Also we are able to create new user
|
||||
or get actuall user with similar name (users are usually cloning
|
||||
or get actual user with similar name (users are usually cloning
|
||||
their profiles and adding date stamp into suffix).
|
||||
"""
|
||||
|
||||
|
|
@ -214,7 +214,7 @@ class WireTapCom(object):
|
|||
|
||||
volumes = []
|
||||
|
||||
# go trough all children and get volume names
|
||||
# go through all children and get volume names
|
||||
child_obj = WireTapNodeHandle()
|
||||
for child_idx in range(children_num):
|
||||
|
||||
|
|
@ -254,7 +254,7 @@ class WireTapCom(object):
|
|||
filtered_users = [user for user in used_names if user_name in user]
|
||||
|
||||
if filtered_users:
|
||||
# todo: need to find lastly created following regex patern for
|
||||
# todo: need to find lastly created following regex pattern for
|
||||
# date used in name
|
||||
return filtered_users.pop()
|
||||
|
||||
|
|
@ -299,7 +299,7 @@ class WireTapCom(object):
|
|||
|
||||
usernames = []
|
||||
|
||||
# go trough all children and get volume names
|
||||
# go through all children and get volume names
|
||||
child_obj = WireTapNodeHandle()
|
||||
for child_idx in range(children_num):
|
||||
|
||||
|
|
@ -346,7 +346,7 @@ class WireTapCom(object):
|
|||
if not requested:
|
||||
raise AttributeError((
|
||||
"Error: Cannot request number of "
|
||||
"childrens from the node {}. Make sure your "
|
||||
"children from the node {}. Make sure your "
|
||||
"wiretap service is running: {}").format(
|
||||
parent_path, parent.lastError())
|
||||
)
|
||||
|
|
|
|||
|
|
@ -234,7 +234,7 @@ class FtrackComponentCreator:
|
|||
).first()
|
||||
|
||||
if component_entity:
|
||||
# overwrite existing members in component enity
|
||||
# overwrite existing members in component entity
|
||||
# - get data for member from `ftrack.origin` location
|
||||
self._overwrite_members(component_entity, comp_data)
|
||||
|
||||
|
|
|
|||
|
|
@ -304,7 +304,7 @@ class FlameToFtrackPanel(object):
|
|||
self._resolve_project_entity()
|
||||
self._save_ui_state_to_cfg()
|
||||
|
||||
# get hanldes from gui input
|
||||
# get handles from gui input
|
||||
handles = self.handles_input.text()
|
||||
|
||||
# get frame start from gui input
|
||||
|
|
@ -517,7 +517,7 @@ class FlameToFtrackPanel(object):
|
|||
if self.temp_data_dir:
|
||||
shutil.rmtree(self.temp_data_dir)
|
||||
self.temp_data_dir = None
|
||||
print("All Temp data were destroied ...")
|
||||
print("All Temp data were destroyed ...")
|
||||
|
||||
def close(self):
|
||||
self._save_ui_state_to_cfg()
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ def openpype_install():
|
|||
"""
|
||||
openpype.install()
|
||||
avalon.api.install(opfapi)
|
||||
print("Avalon registred hosts: {}".format(
|
||||
print("Avalon registered hosts: {}".format(
|
||||
avalon.api.registered_host()))
|
||||
|
||||
|
||||
|
|
@ -101,7 +101,7 @@ def app_initialized(parent=None):
|
|||
"""
|
||||
Initialisation of the hook is starting from here
|
||||
|
||||
First it needs to test if it can import the flame modul.
|
||||
First it needs to test if it can import the flame module.
|
||||
This will happen only in case a project has been loaded.
|
||||
Then `app_initialized` will load main Framework which will load
|
||||
all menu objects as flame_apps.
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ def _sync_utility_scripts(env=None):
|
|||
if _itm not in remove_black_list:
|
||||
skip = True
|
||||
|
||||
# do not skyp if pyc in extension
|
||||
# do not skip if pyc in extension
|
||||
if not os.path.isdir(_itm) and "pyc" in os.path.splitext(_itm)[-1]:
|
||||
skip = False
|
||||
|
||||
|
|
@ -125,3 +125,18 @@ def setup(env=None):
|
|||
_sync_utility_scripts(env)
|
||||
|
||||
log.info("Flame OpenPype wrapper has been installed")
|
||||
|
||||
|
||||
def get_flame_version():
|
||||
import flame
|
||||
|
||||
return {
|
||||
"full": flame.get_version(),
|
||||
"major": flame.get_version_major(),
|
||||
"minor": flame.get_version_minor(),
|
||||
"patch": flame.get_version_patch()
|
||||
}
|
||||
|
||||
|
||||
def get_flame_install_root():
|
||||
return "/opt/Autodesk"
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from pprint import pformat
|
|||
class FlamePrelaunch(PreLaunchHook):
|
||||
""" Flame prelaunch hook
|
||||
|
||||
Will make sure flame_script_dirs are coppied to user's folder defined
|
||||
Will make sure flame_script_dirs are copied to user's folder defined
|
||||
in environment var FLAME_SCRIPT_DIR.
|
||||
"""
|
||||
app_groups = ["flame"]
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ def create_time_effects(otio_clip, item):
|
|||
# # add otio effect to clip effects
|
||||
# otio_clip.effects.append(otio_effect)
|
||||
|
||||
# # loop trought and get all Timewarps
|
||||
# # loop through and get all Timewarps
|
||||
# for effect in subTrackItems:
|
||||
# if ((track_item not in effect.linkedItems())
|
||||
# and (len(effect.linkedItems()) > 0)):
|
||||
|
|
@ -284,23 +284,20 @@ def create_otio_reference(clip_data):
|
|||
# get padding and other file infos
|
||||
log.debug("_ path: {}".format(path))
|
||||
|
||||
is_sequence = padding = utils.get_frame_from_path(path)
|
||||
if is_sequence:
|
||||
number = utils.get_frame_from_path(path)
|
||||
file_head = file_name.split(number)[:-1]
|
||||
frame_start = int(number)
|
||||
|
||||
frame_duration = clip_data["source_duration"]
|
||||
otio_ex_ref_item = None
|
||||
|
||||
is_sequence = frame_number = utils.get_frame_from_filename(file_name)
|
||||
if is_sequence:
|
||||
file_head = file_name.split(frame_number)[:-1]
|
||||
frame_start = int(frame_number)
|
||||
padding = len(frame_number)
|
||||
|
||||
metadata.update({
|
||||
"isSequence": True,
|
||||
"padding": padding
|
||||
})
|
||||
|
||||
otio_ex_ref_item = None
|
||||
|
||||
if is_sequence:
|
||||
# if it is file sequence try to create `ImageSequenceReference`
|
||||
# the OTIO might not be compatible so return nothing and do it old way
|
||||
try:
|
||||
|
|
@ -322,10 +319,12 @@ def create_otio_reference(clip_data):
|
|||
pass
|
||||
|
||||
if not otio_ex_ref_item:
|
||||
reformat_path = utils.get_reformated_path(path, padded=False)
|
||||
dirname, file_name = os.path.split(path)
|
||||
file_name = utils.get_reformated_filename(file_name, padded=False)
|
||||
reformated_path = os.path.join(dirname, file_name)
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
otio_ex_ref_item = otio.schema.ExternalReference(
|
||||
target_url=reformat_path,
|
||||
target_url=reformated_path,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
|
|
@ -346,7 +345,7 @@ def create_otio_clip(clip_data):
|
|||
media_reference = create_otio_reference(clip_data)
|
||||
|
||||
# calculate source in
|
||||
first_frame = utils.get_frame_from_path(clip_data["fpath"]) or 0
|
||||
first_frame = utils.get_frame_from_filename(clip_data["fpath"]) or 0
|
||||
source_in = int(clip_data["source_in"]) - int(first_frame)
|
||||
|
||||
# creatae source range
|
||||
|
|
@ -615,11 +614,11 @@ def create_otio_timeline(sequence):
|
|||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
# it to previouse item
|
||||
# it to previous item
|
||||
prev_item = segment_data
|
||||
|
||||
else:
|
||||
# get previouse item
|
||||
# get previous item
|
||||
prev_item = segments_ordered[itemindex - 1]
|
||||
|
||||
log.debug("_ segment_data: {}".format(segment_data))
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ import opentimelineio as otio
|
|||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
rt = otio.opentime.from_timecode(timecode, framerate)
|
||||
|
|
@ -19,77 +21,71 @@ def frames_to_seconds(frames, framerate):
|
|||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=True):
|
||||
def get_reformated_filename(filename, padded=True):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
type: string with reformated path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.1001.exr") > plate.%04d.exr
|
||||
get_reformated_filename("plate.1001.exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
padding = get_padding_from_path(path)
|
||||
found = get_frame_from_path(path)
|
||||
found = FRAME_PATTERN.search(filename)
|
||||
|
||||
if not found:
|
||||
log.info("Path is not sequence: {}".format(path))
|
||||
return path
|
||||
log.info("File name is not sequence: {}".format(filename))
|
||||
return filename
|
||||
|
||||
if padded:
|
||||
path = path.replace(found, "%0{}d".format(padding))
|
||||
else:
|
||||
path = path.replace(found, "%d")
|
||||
padding = get_padding_from_filename(filename)
|
||||
|
||||
return path
|
||||
replacement = "%0{}d".format(padding) if padded else "%d"
|
||||
start_idx, end_idx = found.span(1)
|
||||
|
||||
return replacement.join(
|
||||
[filename[:start_idx], filename[end_idx:]]
|
||||
)
|
||||
|
||||
|
||||
def get_padding_from_path(path):
|
||||
def get_padding_from_filename(filename):
|
||||
"""
|
||||
Return padding number from Flame path style
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_path("plate.0001.exr") > 4
|
||||
get_padding_from_filename("plate.0001.exr") > 4
|
||||
|
||||
"""
|
||||
found = get_frame_from_path(path)
|
||||
found = get_frame_from_filename(filename)
|
||||
|
||||
if found:
|
||||
return len(found)
|
||||
else:
|
||||
return None
|
||||
return len(found) if found else None
|
||||
|
||||
|
||||
def get_frame_from_path(path):
|
||||
def get_frame_from_filename(filename):
|
||||
"""
|
||||
Return sequence number from Flame path style
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
int: sequence frame number
|
||||
|
||||
Example:
|
||||
def get_frame_from_path(path):
|
||||
def get_frame_from_filename(path):
|
||||
("plate.0001.exr") > 0001
|
||||
|
||||
"""
|
||||
frame_pattern = re.compile(r"[._](\d+)[.]")
|
||||
|
||||
found = re.findall(frame_pattern, path)
|
||||
found = re.findall(FRAME_PATTERN, filename)
|
||||
|
||||
if found:
|
||||
return found.pop()
|
||||
else:
|
||||
return None
|
||||
return found.pop() if found else None
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ class CollectTestSelection(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder
|
||||
label = "test selection"
|
||||
hosts = ["flame"]
|
||||
active = False
|
||||
|
||||
def process(self, context):
|
||||
self.log.info(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,256 @@
|
|||
import pyblish
|
||||
import openpype
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
from openpype.hosts.flame.otio import flame_export
|
||||
|
||||
# # developer reload modules
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect all Timeline segment selection."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.09
|
||||
label = "Collect timeline Instances"
|
||||
hosts = ["flame"]
|
||||
|
||||
audio_track_items = []
|
||||
|
||||
def process(self, context):
|
||||
project = context.data["flameProject"]
|
||||
sequence = context.data["flameSequence"]
|
||||
self.otio_timeline = context.data["otioTimeline"]
|
||||
self.clips_in_reels = opfapi.get_clips_in_reels(project)
|
||||
self.fps = context.data["fps"]
|
||||
|
||||
# process all sellected
|
||||
with opfapi.maintained_segment_selection(sequence) as segments:
|
||||
for segment in segments:
|
||||
clip_data = opfapi.get_segment_attributes(segment)
|
||||
clip_name = clip_data["segment_name"]
|
||||
self.log.debug("clip_name: {}".format(clip_name))
|
||||
|
||||
# get openpype tag data
|
||||
marker_data = opfapi.get_segment_data_marker(segment)
|
||||
self.log.debug("__ marker_data: {}".format(
|
||||
pformat(marker_data)))
|
||||
|
||||
if not marker_data:
|
||||
continue
|
||||
|
||||
if marker_data.get("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
# get file path
|
||||
file_path = clip_data["fpath"]
|
||||
|
||||
# get source clip
|
||||
source_clip = self._get_reel_clip(file_path)
|
||||
|
||||
first_frame = opfapi.get_frame_from_filename(file_path) or 0
|
||||
|
||||
head, tail = self._get_head_tail(clip_data, first_frame)
|
||||
|
||||
# solve handles length
|
||||
marker_data["handleStart"] = min(
|
||||
marker_data["handleStart"], head)
|
||||
marker_data["handleEnd"] = min(
|
||||
marker_data["handleEnd"], tail)
|
||||
|
||||
with_audio = bool(marker_data.pop("audio"))
|
||||
|
||||
# add marker data to instance data
|
||||
inst_data = dict(marker_data.items())
|
||||
|
||||
asset = marker_data["asset"]
|
||||
subset = marker_data["subset"]
|
||||
|
||||
# insert family into families
|
||||
family = marker_data["family"]
|
||||
families = [str(f) for f in marker_data["families"]]
|
||||
families.insert(0, str(family))
|
||||
|
||||
# form label
|
||||
label = asset
|
||||
if asset != clip_name:
|
||||
label += " ({})".format(clip_name)
|
||||
label += " {}".format(subset)
|
||||
label += " {}".format("[" + ", ".join(families) + "]")
|
||||
|
||||
inst_data.update({
|
||||
"name": "{}_{}".format(asset, subset),
|
||||
"label": label,
|
||||
"asset": asset,
|
||||
"item": segment,
|
||||
"families": families,
|
||||
"publish": marker_data["publish"],
|
||||
"fps": self.fps,
|
||||
"flameSourceClip": source_clip,
|
||||
"sourceFirstFrame": int(first_frame),
|
||||
"path": file_path
|
||||
})
|
||||
|
||||
# get otio clip data
|
||||
otio_data = self._get_otio_clip_instance_data(clip_data) or {}
|
||||
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
|
||||
|
||||
# add to instance data
|
||||
inst_data.update(otio_data)
|
||||
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
|
||||
|
||||
# add resolution
|
||||
self._get_resolution_to_data(inst_data, context)
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(**inst_data)
|
||||
|
||||
# add colorspace data
|
||||
instance.data.update({
|
||||
"versionData": {
|
||||
"colorspace": clip_data["colour_space"],
|
||||
}
|
||||
})
|
||||
|
||||
# create shot instance for shot attributes create/update
|
||||
self._create_shot_instance(context, clip_name, **inst_data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.info(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
if not with_audio:
|
||||
continue
|
||||
|
||||
# add audioReview attribute to plate instance data
|
||||
# if reviewTrack is on
|
||||
if marker_data.get("reviewTrack") is not None:
|
||||
instance.data["reviewAudio"] = True
|
||||
|
||||
def _get_head_tail(self, clip_data, first_frame):
|
||||
# calculate head and tail with forward compatibility
|
||||
head = clip_data.get("segment_head")
|
||||
tail = clip_data.get("segment_tail")
|
||||
|
||||
if not head:
|
||||
head = int(clip_data["source_in"]) - int(first_frame)
|
||||
if not tail:
|
||||
tail = int(
|
||||
clip_data["source_duration"] - (
|
||||
head + clip_data["record_duration"]
|
||||
)
|
||||
)
|
||||
return head, tail
|
||||
|
||||
def _get_reel_clip(self, path):
|
||||
match_reel_clip = [
|
||||
clip for clip in self.clips_in_reels
|
||||
if clip["fpath"] == path
|
||||
]
|
||||
if match_reel_clip:
|
||||
return match_reel_clip.pop()
|
||||
|
||||
def _get_resolution_to_data(self, data, context):
|
||||
assert data.get("otioClip"), "Missing `otioClip` data"
|
||||
|
||||
# solve source resolution option
|
||||
if data.get("sourceResolution", None):
|
||||
otio_clip_metadata = data[
|
||||
"otioClip"].media_reference.metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_clip_metadata[
|
||||
"openpype.source.width"],
|
||||
"resolutionHeight": otio_clip_metadata[
|
||||
"openpype.source.height"],
|
||||
"pixelAspect": otio_clip_metadata[
|
||||
"openpype.source.pixelAspect"]
|
||||
})
|
||||
else:
|
||||
otio_tl_metadata = context.data["otioTimeline"].metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_tl_metadata["openpype.timeline.width"],
|
||||
"resolutionHeight": otio_tl_metadata[
|
||||
"openpype.timeline.height"],
|
||||
"pixelAspect": otio_tl_metadata[
|
||||
"openpype.timeline.pixelAspect"]
|
||||
})
|
||||
|
||||
def _create_shot_instance(self, context, clip_name, **data):
|
||||
master_layer = data.get("heroTrack")
|
||||
hierarchy_data = data.get("hierarchyData")
|
||||
asset = data.get("asset")
|
||||
|
||||
if not master_layer:
|
||||
return
|
||||
|
||||
if not hierarchy_data:
|
||||
return
|
||||
|
||||
asset = data["asset"]
|
||||
subset = "shotMain"
|
||||
|
||||
# insert family into families
|
||||
family = "shot"
|
||||
|
||||
# form label
|
||||
label = asset
|
||||
if asset != clip_name:
|
||||
label += " ({}) ".format(clip_name)
|
||||
label += " {}".format(subset)
|
||||
label += " [{}]".format(family)
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(asset, subset),
|
||||
"label": label,
|
||||
"subset": subset,
|
||||
"asset": asset,
|
||||
"family": family,
|
||||
"families": []
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def _get_otio_clip_instance_data(self, clip_data):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
Args:
|
||||
timeline_item_data (dict): timeline_item_data from list returned by
|
||||
resolve.get_current_timeline_items()
|
||||
otio_timeline (otio.schema.Timeline): otio object
|
||||
|
||||
Returns:
|
||||
dict: otio clip object
|
||||
|
||||
"""
|
||||
segment = clip_data["PySegment"]
|
||||
s_track_name = segment.parent.name.get_value()
|
||||
timeline_range = self._create_otio_time_range_from_timeline_item_data(
|
||||
clip_data)
|
||||
|
||||
for otio_clip in self.otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if s_track_name not in track_name:
|
||||
continue
|
||||
if otio_clip.name not in segment.name.get_value():
|
||||
continue
|
||||
if openpype.lib.is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if opfapi.MARKER_NAME in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
return None
|
||||
|
||||
def _create_otio_time_range_from_timeline_item_data(self, clip_data):
|
||||
frame_start = int(clip_data["record_in"])
|
||||
frame_duration = int(clip_data["record_duration"])
|
||||
|
||||
return flame_export.create_otio_time_range(
|
||||
frame_start, frame_duration, self.fps)
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
import pyblish.api
|
||||
import avalon.api as avalon
|
||||
import openpype.lib as oplib
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
from openpype.hosts.flame.otio import flame_export
|
||||
|
||||
|
||||
class CollecTimelineOTIO(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working context into publish context"""
|
||||
|
||||
label = "Collect Timeline OTIO"
|
||||
order = pyblish.api.CollectorOrder - 0.099
|
||||
|
||||
def process(self, context):
|
||||
# plugin defined
|
||||
family = "workfile"
|
||||
variant = "otioTimeline"
|
||||
|
||||
# main
|
||||
asset_doc = context.data["assetEntity"]
|
||||
task_name = avalon.Session["AVALON_TASK"]
|
||||
project = opfapi.get_current_project()
|
||||
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||
|
||||
# create subset name
|
||||
subset_name = oplib.get_subset_name_with_asset_doc(
|
||||
family,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
)
|
||||
|
||||
# adding otio timeline to context
|
||||
with opfapi.maintained_segment_selection(sequence):
|
||||
otio_timeline = flame_export.create_otio_timeline(sequence)
|
||||
|
||||
instance_data = {
|
||||
"name": subset_name,
|
||||
"asset": asset_doc["name"],
|
||||
"subset": subset_name,
|
||||
"family": "workfile"
|
||||
}
|
||||
|
||||
# create instance with workfile
|
||||
instance = context.create_instance(**instance_data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
|
||||
# update context with main project attributes
|
||||
context.data.update({
|
||||
"flameProject": project,
|
||||
"flameSequence": sequence,
|
||||
"otioTimeline": otio_timeline,
|
||||
"currentFile": "Flame/{}/{}".format(
|
||||
project.name, sequence.name
|
||||
),
|
||||
"fps": float(str(sequence.frame_rate)[:-4])
|
||||
})
|
||||
43
openpype/hosts/flame/plugins/publish/extract_otio_file.py
Normal file
43
openpype/hosts/flame/plugins/publish/extract_otio_file.py
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import openpype.api
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
class ExtractOTIOFile(openpype.api.Extractor):
|
||||
"""
|
||||
Extractor export OTIO file
|
||||
"""
|
||||
|
||||
label = "Extract OTIO file"
|
||||
order = pyblish.api.ExtractorOrder - 0.45
|
||||
families = ["workfile"]
|
||||
hosts = ["flame"]
|
||||
|
||||
def process(self, instance):
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
name = instance.data["name"]
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
otio_timeline = instance.context.data["otioTimeline"]
|
||||
# create otio timeline representation
|
||||
otio_file_name = name + ".otio"
|
||||
otio_file_path = os.path.join(staging_dir, otio_file_name)
|
||||
|
||||
# export otio file to temp dir
|
||||
otio.adapters.write_to_file(otio_timeline, otio_file_path)
|
||||
|
||||
representation_otio = {
|
||||
'name': "otio",
|
||||
'ext': "otio",
|
||||
'files': otio_file_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation_otio)
|
||||
|
||||
self.log.info("Added OTIO file representation: {}".format(
|
||||
representation_otio))
|
||||
172
openpype/hosts/flame/plugins/publish/extract_subset_resources.py
Normal file
172
openpype/hosts/flame/plugins/publish/extract_subset_resources.py
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
from copy import deepcopy
|
||||
import pyblish.api
|
||||
import openpype.api
|
||||
from openpype.hosts.flame import api as opfapi
|
||||
|
||||
|
||||
class ExtractSubsetResources(openpype.api.Extractor):
|
||||
"""
|
||||
Extractor for transcoding files from Flame clip
|
||||
"""
|
||||
|
||||
label = "Extract subset resources"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["clip"]
|
||||
hosts = ["flame"]
|
||||
|
||||
# plugin defaults
|
||||
default_presets = {
|
||||
"thumbnail": {
|
||||
"ext": "jpg",
|
||||
"xml_preset_file": "Jpeg (8-bit).xml",
|
||||
"xml_preset_dir": "",
|
||||
"representation_add_range": False,
|
||||
"representation_tags": ["thumbnail"]
|
||||
},
|
||||
"ftrackpreview": {
|
||||
"ext": "mov",
|
||||
"xml_preset_file": "Apple iPad (1920x1080).xml",
|
||||
"xml_preset_dir": "",
|
||||
"representation_add_range": True,
|
||||
"representation_tags": [
|
||||
"review",
|
||||
"delete"
|
||||
]
|
||||
}
|
||||
}
|
||||
keep_original_representation = False
|
||||
|
||||
# hide publisher during exporting
|
||||
hide_ui_on_process = True
|
||||
|
||||
# settings
|
||||
export_presets_mapping = {}
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if (
|
||||
self.keep_original_representation
|
||||
and "representations" not in instance.data
|
||||
or not self.keep_original_representation
|
||||
):
|
||||
instance.data["representations"] = []
|
||||
|
||||
frame_start = instance.data["frameStart"]
|
||||
handle_start = instance.data["handleStart"]
|
||||
frame_start_handle = frame_start - handle_start
|
||||
source_first_frame = instance.data["sourceFirstFrame"]
|
||||
source_start_handles = instance.data["sourceStartH"]
|
||||
source_end_handles = instance.data["sourceEndH"]
|
||||
source_duration_handles = (
|
||||
source_end_handles - source_start_handles) + 1
|
||||
|
||||
clip_data = instance.data["flameSourceClip"]
|
||||
clip = clip_data["PyClip"]
|
||||
|
||||
in_mark = (source_start_handles - source_first_frame) + 1
|
||||
out_mark = in_mark + source_duration_handles
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
# add default preset type for thumbnail and reviewable video
|
||||
# update them with settings and overide in case the same
|
||||
# are found in there
|
||||
export_presets = deepcopy(self.default_presets)
|
||||
export_presets.update(self.export_presets_mapping)
|
||||
|
||||
# with maintained duplication loop all presets
|
||||
with opfapi.maintained_object_duplication(clip) as duplclip:
|
||||
# loop all preset names and
|
||||
for unique_name, preset_config in export_presets.items():
|
||||
kwargs = {}
|
||||
preset_file = preset_config["xml_preset_file"]
|
||||
preset_dir = preset_config["xml_preset_dir"]
|
||||
repre_tags = preset_config["representation_tags"]
|
||||
|
||||
# validate xml preset file is filled
|
||||
if preset_file == "":
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` is not filled").format(
|
||||
unique_name)
|
||||
)
|
||||
|
||||
# resolve xml preset dir if not filled
|
||||
if preset_dir == "":
|
||||
preset_dir = opfapi.get_preset_path_by_xml_name(
|
||||
preset_file)
|
||||
|
||||
if not preset_dir:
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` {} is not found").format(
|
||||
unique_name, preset_file)
|
||||
)
|
||||
|
||||
# create preset path
|
||||
preset_path = str(os.path.join(
|
||||
preset_dir, preset_file
|
||||
))
|
||||
|
||||
# define kwargs based on preset type
|
||||
if "thumbnail" in unique_name:
|
||||
kwargs["thumb_frame_number"] = in_mark + (
|
||||
source_duration_handles / 2)
|
||||
else:
|
||||
kwargs.update({
|
||||
"in_mark": in_mark,
|
||||
"out_mark": out_mark
|
||||
})
|
||||
|
||||
export_dir_path = str(os.path.join(
|
||||
staging_dir, unique_name
|
||||
))
|
||||
os.makedirs(export_dir_path)
|
||||
|
||||
# export
|
||||
opfapi.export_clip(
|
||||
export_dir_path, duplclip, preset_path, **kwargs)
|
||||
|
||||
# create representation data
|
||||
representation_data = {
|
||||
"name": unique_name,
|
||||
"outputName": unique_name,
|
||||
"ext": preset_config["ext"],
|
||||
"stagingDir": export_dir_path,
|
||||
"tags": repre_tags
|
||||
}
|
||||
|
||||
files = os.listdir(export_dir_path)
|
||||
|
||||
# add files to represetation but add
|
||||
# imagesequence as list
|
||||
if (
|
||||
"movie_file" in preset_path
|
||||
or unique_name == "thumbnail"
|
||||
):
|
||||
representation_data["files"] = files.pop()
|
||||
else:
|
||||
representation_data["files"] = files
|
||||
|
||||
# add frame range
|
||||
if preset_config["representation_add_range"]:
|
||||
representation_data.update({
|
||||
"frameStart": frame_start_handle,
|
||||
"frameEnd": (
|
||||
frame_start_handle + source_duration_handles),
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
|
||||
instance.data["representations"].append(representation_data)
|
||||
|
||||
# add review family if found in tags
|
||||
if "review" in repre_tags:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
self.log.info("Added representation: {}".format(
|
||||
representation_data))
|
||||
|
||||
self.log.debug("All representations: {}".format(
|
||||
pformat(instance.data["representations"])))
|
||||
|
|
@ -52,7 +52,7 @@ def install():
|
|||
|
||||
|
||||
def uninstall():
|
||||
"""Uninstall all tha was installed
|
||||
"""Uninstall all that was installed
|
||||
|
||||
This is where you undo everything that was done in `install()`.
|
||||
That means, removing menus, deregistering families and data
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import os
|
||||
import importlib
|
||||
from openpype.lib import PreLaunchHook
|
||||
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
|
||||
from openpype.hosts.fusion.api import utils
|
||||
|
||||
|
||||
|
|
@ -12,27 +12,29 @@ class FusionPrelaunch(PreLaunchHook):
|
|||
app_groups = ["fusion"]
|
||||
|
||||
def execute(self):
|
||||
# making sure pyton 3.6 is installed at provided path
|
||||
# making sure python 3.6 is installed at provided path
|
||||
py36_dir = os.path.normpath(self.launch_context.env.get("PYTHON36", ""))
|
||||
assert os.path.isdir(py36_dir), (
|
||||
"Python 3.6 is not installed at the provided folder path. Either "
|
||||
"make sure the `environments\resolve.json` is having correctly "
|
||||
"set `PYTHON36` or make sure Python 3.6 is installed "
|
||||
f"in given path. \nPYTHON36E: `{py36_dir}`"
|
||||
)
|
||||
self.log.info(f"Path to Fusion Python folder: `{py36_dir}`...")
|
||||
if not os.path.isdir(py36_dir):
|
||||
raise ApplicationLaunchFailed(
|
||||
"Python 3.6 is not installed at the provided path.\n"
|
||||
"Either make sure the 'environments/fusion.json' has "
|
||||
"'PYTHON36' set corectly or make sure Python 3.6 is installed "
|
||||
f"in the given path.\n\nPYTHON36: {py36_dir}"
|
||||
)
|
||||
self.log.info(f"Path to Fusion Python folder: '{py36_dir}'...")
|
||||
self.launch_context.env["PYTHON36"] = py36_dir
|
||||
|
||||
# setting utility scripts dir for scripts syncing
|
||||
us_dir = os.path.normpath(
|
||||
self.launch_context.env.get("FUSION_UTILITY_SCRIPTS_DIR", "")
|
||||
)
|
||||
assert os.path.isdir(us_dir), (
|
||||
"Fusion utility script dir does not exists. Either make sure "
|
||||
"the `environments\fusion.json` is having correctly set "
|
||||
"`FUSION_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n"
|
||||
f"FUSION_UTILITY_SCRIPTS_DIR: `{us_dir}`"
|
||||
)
|
||||
if not os.path.isdir(us_dir):
|
||||
raise ApplicationLaunchFailed(
|
||||
"Fusion utility script dir does not exist. Either make sure "
|
||||
"the 'environments/fusion.json' has "
|
||||
"'FUSION_UTILITY_SCRIPTS_DIR' set correctly or reinstall "
|
||||
f"Fusion.\n\nFUSION_UTILITY_SCRIPTS_DIR: '{us_dir}'"
|
||||
)
|
||||
|
||||
try:
|
||||
__import__("avalon.fusion")
|
||||
|
|
|
|||
|
|
@ -185,22 +185,22 @@ class FusionLoadSequence(api.Loader):
|
|||
- We do the same like Fusion - allow fusion to take control.
|
||||
|
||||
- HoldFirstFrame: Fusion resets this to 0
|
||||
- We preverse the value.
|
||||
- We preserve the value.
|
||||
|
||||
- HoldLastFrame: Fusion resets this to 0
|
||||
- We preverse the value.
|
||||
- We preserve the value.
|
||||
|
||||
- Reverse: Fusion resets to disabled if "Loop" is not enabled.
|
||||
- We preserve the value.
|
||||
|
||||
- Depth: Fusion resets to "Format"
|
||||
- We preverse the value.
|
||||
- We preserve the value.
|
||||
|
||||
- KeyCode: Fusion resets to ""
|
||||
- We preverse the value.
|
||||
- We preserve the value.
|
||||
|
||||
- TimeCodeOffset: Fusion resets to 0
|
||||
- We preverse the value.
|
||||
- We preserve the value.
|
||||
|
||||
"""
|
||||
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
# Include critical variables with submission
|
||||
keys = [
|
||||
# TODO: This won't work if the slaves don't have accesss to
|
||||
# TODO: This won't work if the slaves don't have access to
|
||||
# these paths, such as if slaves are running Linux and the
|
||||
# submitter is on Windows.
|
||||
"PYTHONPATH",
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ def _format_filepath(session):
|
|||
new_filename = "{}_{}_slapcomp_v001.comp".format(project, asset)
|
||||
new_filepath = os.path.join(slapcomp_dir, new_filename)
|
||||
|
||||
# Create new unqiue filepath
|
||||
# Create new unique filepath
|
||||
if os.path.exists(new_filepath):
|
||||
new_filepath = pype.version_up(new_filepath)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ def main(env):
|
|||
# activate resolve from pype
|
||||
avalon.api.install(avalon.fusion)
|
||||
|
||||
log.info(f"Avalon registred hosts: {avalon.api.registered_host()}")
|
||||
log.info(f"Avalon registered hosts: {avalon.api.registered_host()}")
|
||||
|
||||
menu.launch_openpype_menu()
|
||||
|
||||
|
|
|
|||
|
|
@ -2,13 +2,13 @@
|
|||
|
||||
### Development
|
||||
|
||||
#### Setting up ESLint as linter for javasript code
|
||||
#### Setting up ESLint as linter for javascript code
|
||||
|
||||
You nee [node.js](https://nodejs.org/en/) installed. All you need to do then
|
||||
is to run:
|
||||
|
||||
```sh
|
||||
npm intall
|
||||
npm install
|
||||
```
|
||||
in **js** directory. This will install eslint and all requirements locally.
|
||||
|
||||
|
|
|
|||
|
|
@ -18,11 +18,11 @@ if (typeof $ === 'undefined'){
|
|||
* @classdesc Image Sequence loader JS code.
|
||||
*/
|
||||
var ImageSequenceLoader = function() {
|
||||
this.PNGTransparencyMode = 0; // Premultiplied wih Black
|
||||
this.TGATransparencyMode = 0; // Premultiplied wih Black
|
||||
this.SGITransparencyMode = 0; // Premultiplied wih Black
|
||||
this.PNGTransparencyMode = 0; // Premultiplied with Black
|
||||
this.TGATransparencyMode = 0; // Premultiplied with Black
|
||||
this.SGITransparencyMode = 0; // Premultiplied with Black
|
||||
this.LayeredPSDTransparencyMode = 1; // Straight
|
||||
this.FlatPSDTransparencyMode = 2; // Premultiplied wih White
|
||||
this.FlatPSDTransparencyMode = 2; // Premultiplied with White
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -84,7 +84,7 @@ ImageSequenceLoader.getUniqueColumnName = function(columnPrefix) {
|
|||
* @return {string} Read node name
|
||||
*
|
||||
* @example
|
||||
* // Agrguments are in following order:
|
||||
* // Arguments are in following order:
|
||||
* var args = [
|
||||
* files, // Files in file sequences.
|
||||
* asset, // Asset name.
|
||||
|
|
@ -97,11 +97,11 @@ ImageSequenceLoader.prototype.importFiles = function(args) {
|
|||
MessageLog.trace("ImageSequence:: " + typeof PypeHarmony);
|
||||
MessageLog.trace("ImageSequence $:: " + typeof $);
|
||||
MessageLog.trace("ImageSequence OH:: " + typeof PypeHarmony.OpenHarmony);
|
||||
var PNGTransparencyMode = 0; // Premultiplied wih Black
|
||||
var TGATransparencyMode = 0; // Premultiplied wih Black
|
||||
var SGITransparencyMode = 0; // Premultiplied wih Black
|
||||
var PNGTransparencyMode = 0; // Premultiplied with Black
|
||||
var TGATransparencyMode = 0; // Premultiplied with Black
|
||||
var SGITransparencyMode = 0; // Premultiplied with Black
|
||||
var LayeredPSDTransparencyMode = 1; // Straight
|
||||
var FlatPSDTransparencyMode = 2; // Premultiplied wih White
|
||||
var FlatPSDTransparencyMode = 2; // Premultiplied with White
|
||||
|
||||
var doc = $.scn;
|
||||
var files = args[0];
|
||||
|
|
@ -224,7 +224,7 @@ ImageSequenceLoader.prototype.importFiles = function(args) {
|
|||
* @return {string} Read node name
|
||||
*
|
||||
* @example
|
||||
* // Agrguments are in following order:
|
||||
* // Arguments are in following order:
|
||||
* var args = [
|
||||
* files, // Files in file sequences
|
||||
* name, // Node name
|
||||
|
|
|
|||
|
|
@ -13,11 +13,11 @@ copy_files = """function copyFile(srcFilename, dstFilename)
|
|||
}
|
||||
"""
|
||||
|
||||
import_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black
|
||||
var TGATransparencyMode = 0; //Premultiplied wih Black
|
||||
var SGITransparencyMode = 0; //Premultiplied wih Black
|
||||
import_files = """var PNGTransparencyMode = 1; //Premultiplied with Black
|
||||
var TGATransparencyMode = 0; //Premultiplied with Black
|
||||
var SGITransparencyMode = 0; //Premultiplied with Black
|
||||
var LayeredPSDTransparencyMode = 1; //Straight
|
||||
var FlatPSDTransparencyMode = 2; //Premultiplied wih White
|
||||
var FlatPSDTransparencyMode = 2; //Premultiplied with White
|
||||
|
||||
function getUniqueColumnName( column_prefix )
|
||||
{
|
||||
|
|
@ -140,11 +140,11 @@ function import_files(args)
|
|||
import_files
|
||||
"""
|
||||
|
||||
replace_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black
|
||||
var TGATransparencyMode = 0; //Premultiplied wih Black
|
||||
var SGITransparencyMode = 0; //Premultiplied wih Black
|
||||
replace_files = """var PNGTransparencyMode = 1; //Premultiplied with Black
|
||||
var TGATransparencyMode = 0; //Premultiplied with Black
|
||||
var SGITransparencyMode = 0; //Premultiplied with Black
|
||||
var LayeredPSDTransparencyMode = 1; //Straight
|
||||
var FlatPSDTransparencyMode = 2; //Premultiplied wih White
|
||||
var FlatPSDTransparencyMode = 2; //Premultiplied with White
|
||||
|
||||
function replace_files(args)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ def beforeNewProjectCreated(event):
|
|||
|
||||
def afterNewProjectCreated(event):
|
||||
log.info("after new project created event...")
|
||||
# sync avalon data to project properities
|
||||
# sync avalon data to project properties
|
||||
sync_avalon_data_to_workfile()
|
||||
|
||||
# add tags from preset
|
||||
|
|
@ -51,7 +51,7 @@ def beforeProjectLoad(event):
|
|||
|
||||
def afterProjectLoad(event):
|
||||
log.info("after project load event...")
|
||||
# sync avalon data to project properities
|
||||
# sync avalon data to project properties
|
||||
sync_avalon_data_to_workfile()
|
||||
|
||||
# add tags from preset
|
||||
|
|
|
|||
|
|
@ -299,7 +299,7 @@ def get_track_item_pype_data(track_item):
|
|||
if not tag:
|
||||
return None
|
||||
|
||||
# get tag metadata attribut
|
||||
# get tag metadata attribute
|
||||
tag_data = tag.metadata()
|
||||
# convert tag metadata to normal keys names and values to correct types
|
||||
for k, v in dict(tag_data).items():
|
||||
|
|
@ -402,7 +402,7 @@ def sync_avalon_data_to_workfile():
|
|||
try:
|
||||
project.setProjectDirectory(active_project_root)
|
||||
except Exception:
|
||||
# old way of seting it
|
||||
# old way of setting it
|
||||
project.setProjectRoot(active_project_root)
|
||||
|
||||
# get project data from avalon db
|
||||
|
|
@ -614,7 +614,7 @@ def create_nuke_workfile_clips(nuke_workfiles, seq=None):
|
|||
if not seq:
|
||||
seq = hiero.core.Sequence('NewSequences')
|
||||
root.addItem(hiero.core.BinItem(seq))
|
||||
# todo will ned to define this better
|
||||
# todo will need to define this better
|
||||
# track = seq[1] # lazy example to get a destination# track
|
||||
clips_lst = []
|
||||
for nk in nuke_workfiles:
|
||||
|
|
@ -838,7 +838,7 @@ def apply_colorspace_project():
|
|||
# remove the TEMP file as we dont need it
|
||||
os.remove(copy_current_file_tmp)
|
||||
|
||||
# use the code from bellow for changing xml hrox Attributes
|
||||
# use the code from below for changing xml hrox Attributes
|
||||
presets.update({"name": os.path.basename(copy_current_file)})
|
||||
|
||||
# read HROX in as QDomSocument
|
||||
|
|
@ -874,7 +874,7 @@ def apply_colorspace_clips():
|
|||
if "default" in clip_colorspace:
|
||||
continue
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
# check if any colorspace presets for read is matching
|
||||
preset_clrsp = None
|
||||
for k in presets:
|
||||
if not bool(re.search(k["regex"], clip_media_source_path)):
|
||||
|
|
@ -931,7 +931,7 @@ def get_sequence_pattern_and_padding(file):
|
|||
Can find file.0001.ext, file.%02d.ext, file.####.ext
|
||||
|
||||
Return:
|
||||
string: any matching sequence patern
|
||||
string: any matching sequence pattern
|
||||
int: padding of sequnce numbering
|
||||
"""
|
||||
foundall = re.findall(
|
||||
|
|
@ -950,7 +950,7 @@ def get_sequence_pattern_and_padding(file):
|
|||
|
||||
|
||||
def sync_clip_name_to_data_asset(track_items_list):
|
||||
# loop trough all selected clips
|
||||
# loop through all selected clips
|
||||
for track_item in track_items_list:
|
||||
# ignore if parent track is locked or disabled
|
||||
if track_item.parent().isLocked():
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ def create_time_effects(otio_clip, track_item):
|
|||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
# loop trought and get all Timewarps
|
||||
# loop through and get all Timewarps
|
||||
for effect in subTrackItems:
|
||||
if ((track_item not in effect.linkedItems())
|
||||
and (len(effect.linkedItems()) > 0)):
|
||||
|
|
@ -388,11 +388,11 @@ def create_otio_timeline():
|
|||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
# it to previouse item
|
||||
# it to previous item
|
||||
return track_item
|
||||
|
||||
else:
|
||||
# get previouse item
|
||||
# get previous item
|
||||
return track_item.parent().items()[itemindex - 1]
|
||||
|
||||
# get current timeline
|
||||
|
|
@ -416,11 +416,11 @@ def create_otio_timeline():
|
|||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
# it to previouse item
|
||||
# it to previous item
|
||||
prev_item = track_item
|
||||
|
||||
else:
|
||||
# get previouse item
|
||||
# get previous item
|
||||
prev_item = track_item.parent().items()[itemindex - 1]
|
||||
|
||||
# calculate clip frame range difference from each other
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
# convert label text to normal capitalized text with spaces
|
||||
label_text = self.camel_case_split(text)
|
||||
|
||||
# assign the new text to lable widget
|
||||
# assign the new text to label widget
|
||||
label = QtWidgets.QLabel(label_text)
|
||||
label.setObjectName("LineLabel")
|
||||
|
||||
|
|
@ -337,7 +337,7 @@ class SequenceLoader(avalon.Loader):
|
|||
"Sequentially in order"
|
||||
],
|
||||
default="Original timing",
|
||||
help="Would you like to place it at orignal timing?"
|
||||
help="Would you like to place it at original timing?"
|
||||
)
|
||||
]
|
||||
|
||||
|
|
@ -475,7 +475,7 @@ class ClipLoader:
|
|||
def _get_asset_data(self):
|
||||
""" Get all available asset data
|
||||
|
||||
joint `data` key with asset.data dict into the representaion
|
||||
joint `data` key with asset.data dict into the representation
|
||||
|
||||
"""
|
||||
asset_name = self.context["representation"]["context"]["asset"]
|
||||
|
|
@ -550,7 +550,7 @@ class ClipLoader:
|
|||
(self.timeline_out - self.timeline_in + 1)
|
||||
+ self.handle_start + self.handle_end) < self.media_duration)
|
||||
|
||||
# if slate is on then remove the slate frame from begining
|
||||
# if slate is on then remove the slate frame from beginning
|
||||
if slate_on:
|
||||
self.media_duration -= 1
|
||||
self.handle_start += 1
|
||||
|
|
@ -634,8 +634,8 @@ class PublishClip:
|
|||
"track": "sequence",
|
||||
}
|
||||
|
||||
# parents search patern
|
||||
parents_search_patern = r"\{([a-z]*?)\}"
|
||||
# parents search pattern
|
||||
parents_search_pattern = r"\{([a-z]*?)\}"
|
||||
|
||||
# default templates for non-ui use
|
||||
rename_default = False
|
||||
|
|
@ -719,7 +719,7 @@ class PublishClip:
|
|||
return self.track_item
|
||||
|
||||
def _populate_track_item_default_data(self):
|
||||
""" Populate default formating data from track item. """
|
||||
""" Populate default formatting data from track item. """
|
||||
|
||||
self.track_item_default_data = {
|
||||
"_folder_": "shots",
|
||||
|
|
@ -814,7 +814,7 @@ class PublishClip:
|
|||
# mark review layer
|
||||
if self.review_track and (
|
||||
self.review_track not in self.review_track_default):
|
||||
# if review layer is defined and not the same as defalut
|
||||
# if review layer is defined and not the same as default
|
||||
self.review_layer = self.review_track
|
||||
# shot num calculate
|
||||
if self.rename_index == 0:
|
||||
|
|
@ -863,7 +863,7 @@ class PublishClip:
|
|||
# in case track name and subset name is the same then add
|
||||
if self.subset_name == self.track_name:
|
||||
hero_data["subset"] = self.subset
|
||||
# assing data to return hierarchy data to tag
|
||||
# assign data to return hierarchy data to tag
|
||||
tag_hierarchy_data = hero_data
|
||||
|
||||
# add data to return data dict
|
||||
|
|
@ -897,7 +897,7 @@ class PublishClip:
|
|||
type
|
||||
)
|
||||
|
||||
# first collect formating data to use for formating template
|
||||
# first collect formatting data to use for formatting template
|
||||
formating_data = {}
|
||||
for _k, _v in self.hierarchy_data.items():
|
||||
value = _v["value"].format(
|
||||
|
|
@ -915,9 +915,9 @@ class PublishClip:
|
|||
""" Create parents and return it in list. """
|
||||
self.parents = []
|
||||
|
||||
patern = re.compile(self.parents_search_patern)
|
||||
pattern = re.compile(self.parents_search_pattern)
|
||||
|
||||
par_split = [(patern.findall(t).pop(), t)
|
||||
par_split = [(pattern.findall(t).pop(), t)
|
||||
for t in self.hierarchy.split("/")]
|
||||
|
||||
for type, template in par_split:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# PimpMySpreadsheet 1.0, Antony Nasce, 23/05/13.
|
||||
# Adds custom spreadsheet columns and right-click menu for setting the Shot Status, and Artist Shot Assignement.
|
||||
# Adds custom spreadsheet columns and right-click menu for setting the Shot Status, and Artist Shot Assignment.
|
||||
# gStatusTags is a global dictionary of key(status)-value(icon) pairs, which can be overridden with custom icons if required
|
||||
# Requires Hiero 1.7v2 or later.
|
||||
# Install Instructions: Copy to ~/.hiero/Python/StartupUI
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ def add_tags_to_workfile():
|
|||
}
|
||||
}
|
||||
|
||||
# loop trough tag data dict and create deep tag structure
|
||||
# loop through tag data dict and create deep tag structure
|
||||
for _k, _val in nks_pres_tags.items():
|
||||
# check if key is not decorated with [] so it is defined as bin
|
||||
bin_find = None
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ class CreateShotClip(phiero.Creator):
|
|||
"type": "QComboBox",
|
||||
"label": "Subset Name",
|
||||
"target": "ui",
|
||||
"toolTip": "chose subset name patern, if <track_name> is selected, name of track layer will be used", # noqa
|
||||
"toolTip": "chose subset name pattern, if <track_name> is selected, name of track layer will be used", # noqa
|
||||
"order": 0},
|
||||
"subsetFamily": {
|
||||
"value": ["plate", "take"],
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
if clip_effect_items:
|
||||
tracks_effect_items[track_index] = clip_effect_items
|
||||
|
||||
# process all effects and devide them to instance
|
||||
# process all effects and divide them to instance
|
||||
for _track_index, sub_track_items in tracks_effect_items.items():
|
||||
# skip if track index is the same as review track index
|
||||
if review and review_track_index == _track_index:
|
||||
|
|
@ -156,7 +156,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin):
|
|||
'postage_stamp_frame', 'maskChannel', 'export_cc',
|
||||
'select_cccid', 'mix', 'version', 'matrix']
|
||||
|
||||
# loop trough all knobs and collect not ignored
|
||||
# loop through all knobs and collect not ignored
|
||||
# and any with any value
|
||||
for knob in node.knobs().keys():
|
||||
# skip nodes in ignore keys
|
||||
|
|
|
|||
|
|
@ -264,7 +264,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
timeline_range = self.create_otio_time_range_from_timeline_item_data(
|
||||
track_item)
|
||||
|
||||
# loop trough audio track items and search for overlaping clip
|
||||
# loop through audio track items and search for overlapping clip
|
||||
for otio_audio in self.audio_track_items:
|
||||
parent_range = otio_audio.range_in_parent()
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ class CollectClipResolution(pyblish.api.InstancePlugin):
|
|||
"""Collect clip geometry resolution"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Clip Resoluton"
|
||||
label = "Collect Clip Resolution"
|
||||
hosts = ["hiero"]
|
||||
families = ["clip"]
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class PrecollectRetime(api.InstancePlugin):
|
|||
handle_end
|
||||
))
|
||||
|
||||
# loop withing subtrack items
|
||||
# loop within subtrack items
|
||||
time_warp_nodes = []
|
||||
source_in_change = 0
|
||||
source_out_change = 0
|
||||
|
|
@ -76,7 +76,7 @@ class PrecollectRetime(api.InstancePlugin):
|
|||
(timeline_in - handle_start),
|
||||
(timeline_out + handle_end) + 1)
|
||||
]
|
||||
# calculate differnce
|
||||
# calculate difference
|
||||
diff_in = (node["lookup"].getValueAt(
|
||||
timeline_in)) - timeline_in
|
||||
diff_out = (node["lookup"].getValueAt(
|
||||
|
|
|
|||
|
|
@ -74,6 +74,9 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
instance = context.create_instance(label)
|
||||
|
||||
# Include `families` using `family` data
|
||||
instance.data["families"] = [instance.data["family"]]
|
||||
|
||||
instance[:] = [node]
|
||||
instance.data.update(data)
|
||||
|
||||
|
|
|
|||
|
|
@ -37,5 +37,7 @@ class ExtractVDBCache(openpype.api.Extractor):
|
|||
"ext": "vdb",
|
||||
"files": output,
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
|
|||
|
|
@ -93,31 +93,20 @@ def override_toolbox_ui():
|
|||
return
|
||||
|
||||
# Create our controls
|
||||
background_color = (0.267, 0.267, 0.267)
|
||||
controls = []
|
||||
look_assigner = None
|
||||
try:
|
||||
look_assigner = host_tools.get_tool_by_name(
|
||||
"lookassigner",
|
||||
parent=pipeline._parent
|
||||
)
|
||||
except Exception:
|
||||
log.warning("Couldn't create Look assigner window.", exc_info=True)
|
||||
|
||||
if look_assigner is not None:
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_lookmanager",
|
||||
annotation="Look Manager",
|
||||
label="Look Manager",
|
||||
image=os.path.join(icons, "lookmanager.png"),
|
||||
command=host_tools.show_look_assigner,
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
)
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
"pype_toolbox_lookmanager",
|
||||
annotation="Look Manager",
|
||||
label="Look Manager",
|
||||
image=os.path.join(icons, "lookmanager.png"),
|
||||
command=host_tools.show_look_assigner,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
)
|
||||
)
|
||||
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
|
|
@ -128,7 +117,6 @@ def override_toolbox_ui():
|
|||
command=lambda: host_tools.show_workfiles(
|
||||
parent=pipeline._parent
|
||||
),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
|
|
@ -144,7 +132,6 @@ def override_toolbox_ui():
|
|||
command=lambda: host_tools.show_loader(
|
||||
parent=pipeline._parent, use_context=True
|
||||
),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
|
|
@ -160,7 +147,6 @@ def override_toolbox_ui():
|
|||
command=lambda: host_tools.show_scene_inventory(
|
||||
parent=pipeline._parent
|
||||
),
|
||||
bgc=background_color,
|
||||
width=icon_size,
|
||||
height=icon_size,
|
||||
parent=parent
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ def uv_from_element(element):
|
|||
parent = element.split(".", 1)[0]
|
||||
|
||||
# Maya is funny in that when the transform of the shape
|
||||
# of the component elemen has children, the name returned
|
||||
# of the component element has children, the name returned
|
||||
# by that elementection is the shape. Otherwise, it is
|
||||
# the transform. So lets see what type we're dealing with here.
|
||||
if cmds.nodeType(parent) in supported:
|
||||
|
|
@ -733,7 +733,7 @@ def namespaced(namespace, new=True):
|
|||
str: The namespace that is used during the context
|
||||
|
||||
"""
|
||||
original = cmds.namespaceInfo(cur=True)
|
||||
original = cmds.namespaceInfo(cur=True, absoluteName=True)
|
||||
if new:
|
||||
namespace = avalon.maya.lib.unique_namespace(namespace)
|
||||
cmds.namespace(add=namespace)
|
||||
|
|
@ -1630,7 +1630,7 @@ def get_container_transforms(container, members=None, root=False):
|
|||
Args:
|
||||
container (dict): the container
|
||||
members (list): optional and convenience argument
|
||||
root (bool): return highest node in hierachy if True
|
||||
root (bool): return highest node in hierarchy if True
|
||||
|
||||
Returns:
|
||||
root (list / str):
|
||||
|
|
@ -2517,7 +2517,7 @@ class shelf():
|
|||
def _get_render_instances():
|
||||
"""Return all 'render-like' instances.
|
||||
|
||||
This returns list of instance sets that needs to receive informations
|
||||
This returns list of instance sets that needs to receive information
|
||||
about render layer changes.
|
||||
|
||||
Returns:
|
||||
|
|
|
|||
|
|
@ -506,8 +506,8 @@
|
|||
"transforms",
|
||||
"local"
|
||||
],
|
||||
"title": "# Copy Local Transfroms",
|
||||
"tooltip": "Copy local transfroms"
|
||||
"title": "# Copy Local Transforms",
|
||||
"tooltip": "Copy local transforms"
|
||||
},
|
||||
{
|
||||
"type": "action",
|
||||
|
|
@ -520,8 +520,8 @@
|
|||
"transforms",
|
||||
"matrix"
|
||||
],
|
||||
"title": "# Copy Matrix Transfroms",
|
||||
"tooltip": "Copy Matrix transfroms"
|
||||
"title": "# Copy Matrix Transforms",
|
||||
"tooltip": "Copy Matrix transforms"
|
||||
},
|
||||
{
|
||||
"type": "action",
|
||||
|
|
@ -842,7 +842,7 @@
|
|||
"sourcetype": "file",
|
||||
"tags": ["cleanup", "remove_user_defined_attributes"],
|
||||
"title": "# Remove User Defined Attributes",
|
||||
"tooltip": "Remove all user-defined attributs from all nodes"
|
||||
"tooltip": "Remove all user-defined attributes from all nodes"
|
||||
},
|
||||
{
|
||||
"type": "action",
|
||||
|
|
|
|||
|
|
@ -794,8 +794,8 @@
|
|||
"transforms",
|
||||
"local"
|
||||
],
|
||||
"title": "Copy Local Transfroms",
|
||||
"tooltip": "Copy local transfroms"
|
||||
"title": "Copy Local Transforms",
|
||||
"tooltip": "Copy local transforms"
|
||||
},
|
||||
{
|
||||
"type": "action",
|
||||
|
|
@ -808,8 +808,8 @@
|
|||
"transforms",
|
||||
"matrix"
|
||||
],
|
||||
"title": "Copy Matrix Transfroms",
|
||||
"tooltip": "Copy Matrix transfroms"
|
||||
"title": "Copy Matrix Transforms",
|
||||
"tooltip": "Copy Matrix transforms"
|
||||
},
|
||||
{
|
||||
"type": "action",
|
||||
|
|
@ -1274,7 +1274,7 @@
|
|||
"sourcetype": "file",
|
||||
"tags": ["cleanup", "remove_user_defined_attributes"],
|
||||
"title": "Remove User Defined Attributes",
|
||||
"tooltip": "Remove all user-defined attributs from all nodes"
|
||||
"tooltip": "Remove all user-defined attributes from all nodes"
|
||||
},
|
||||
{
|
||||
"type": "action",
|
||||
|
|
|
|||
|
|
@ -341,7 +341,7 @@ def update_package(set_container, representation):
|
|||
def update_scene(set_container, containers, current_data, new_data, new_file):
|
||||
"""Updates the hierarchy, assets and their matrix
|
||||
|
||||
Updates the following withing the scene:
|
||||
Updates the following within the scene:
|
||||
* Setdress hierarchy alembic
|
||||
* Matrix
|
||||
* Parenting
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ class ShaderDefinitionsEditor(QtWidgets.QWidget):
|
|||
def _write_definition_file(self, content, force=False):
|
||||
"""Write content as definition to file in database.
|
||||
|
||||
Before file is writen, check is made if its content has not
|
||||
Before file is written, check is made if its content has not
|
||||
changed. If is changed, warning is issued to user if he wants
|
||||
it to overwrite. Note: GridFs doesn't allow changing file content.
|
||||
You need to delete existing file and create new one.
|
||||
|
|
|
|||
|
|
@ -53,8 +53,8 @@ class CreateRender(plugin.Creator):
|
|||
renderer.
|
||||
ass (bool): Submit as ``ass`` file for standalone Arnold renderer.
|
||||
tileRendering (bool): Instance is set to tile rendering mode. We
|
||||
won't submit actuall render, but we'll make publish job to wait
|
||||
for Tile Assemly job done and then publish.
|
||||
won't submit actual render, but we'll make publish job to wait
|
||||
for Tile Assembly job done and then publish.
|
||||
|
||||
See Also:
|
||||
https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from avalon import api
|
||||
import openpype.hosts.maya.api.plugin
|
||||
from openpype.hosts.maya.api.plugin import get_reference_node
|
||||
import os
|
||||
from openpype.api import get_project_settings
|
||||
import clique
|
||||
|
|
@ -111,7 +112,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
|
||||
# Get reference node from container members
|
||||
members = cmds.sets(node, query=True, nodesOnly=True)
|
||||
reference_node = self._get_reference_node(members)
|
||||
reference_node = get_reference_node(members)
|
||||
|
||||
assert os.path.exists(proxyPath), "%s does not exist." % proxyPath
|
||||
|
||||
|
|
|
|||
|
|
@ -3,16 +3,16 @@ from avalon.maya.pipeline import containerise
|
|||
from avalon.maya import lib
|
||||
from maya import cmds, mel
|
||||
|
||||
|
||||
class AudioLoader(api.Loader):
|
||||
"""Specific loader of audio."""
|
||||
|
||||
families = ["audio"]
|
||||
label = "Import audio."
|
||||
label = "Import audio"
|
||||
representations = ["wav"]
|
||||
icon = "volume-up"
|
||||
color = "orange"
|
||||
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
start_frame = cmds.playbackOptions(query=True, min=True)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
from avalon import api
|
||||
from openpype.api import get_project_settings
|
||||
|
||||
|
||||
class GpuCacheLoader(api.Loader):
|
||||
"""Load model Alembic as gpuCache"""
|
||||
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
"""Specific loader of plate for image planes on selected camera."""
|
||||
|
||||
families = ["image", "plate", "render"]
|
||||
label = "Load imagePlane."
|
||||
label = "Load imagePlane"
|
||||
representations = ["mov", "exr", "preview", "png"]
|
||||
icon = "image"
|
||||
color = "orange"
|
||||
|
|
@ -118,7 +118,7 @@ class ImagePlaneLoader(api.Loader):
|
|||
camera = pm.createNode("camera")
|
||||
|
||||
if camera is None:
|
||||
return
|
||||
return
|
||||
|
||||
try:
|
||||
camera.displayResolution.set(1)
|
||||
|
|
|
|||
|
|
@ -63,6 +63,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
if current_namespace != ":":
|
||||
group_name = current_namespace + ":" + group_name
|
||||
|
||||
group_name = "|" + group_name
|
||||
|
||||
self[:] = new_nodes
|
||||
|
||||
if attach_to_root:
|
||||
|
|
|
|||
|
|
@ -2,6 +2,72 @@ from avalon import api
|
|||
from openpype.api import get_project_settings
|
||||
import os
|
||||
|
||||
from maya import cmds
|
||||
|
||||
# List of 3rd Party Channels Mapping names for VRayVolumeGrid
|
||||
# See: https://docs.chaosgroup.com/display/VRAY4MAYA/Input
|
||||
# #Input-3rdPartyChannelsMapping
|
||||
THIRD_PARTY_CHANNELS = {
|
||||
2: "Smoke",
|
||||
1: "Temperature",
|
||||
10: "Fuel",
|
||||
4: "Velocity.x",
|
||||
5: "Velocity.y",
|
||||
6: "Velocity.z",
|
||||
7: "Red",
|
||||
8: "Green",
|
||||
9: "Blue",
|
||||
14: "Wavelet Energy",
|
||||
19: "Wavelet.u",
|
||||
20: "Wavelet.v",
|
||||
21: "Wavelet.w",
|
||||
# These are not in UI or documentation but V-Ray does seem to set these.
|
||||
15: "AdvectionOrigin.x",
|
||||
16: "AdvectionOrigin.y",
|
||||
17: "AdvectionOrigin.z",
|
||||
|
||||
}
|
||||
|
||||
|
||||
def _fix_duplicate_vvg_callbacks():
|
||||
"""Workaround to kill duplicate VRayVolumeGrids attribute callbacks.
|
||||
|
||||
This fixes a huge lag in Maya on switching 3rd Party Channels Mappings
|
||||
or to different .vdb file paths because it spams an attribute changed
|
||||
callback: `vvgUserChannelMappingsUpdateUI`.
|
||||
|
||||
ChaosGroup bug ticket: 154-008-9890
|
||||
|
||||
Found with:
|
||||
- Maya 2019.2 on Windows 10
|
||||
- V-Ray: V-Ray Next for Maya, update 1 version 4.12.01.00001
|
||||
|
||||
Bug still present in:
|
||||
- Maya 2022.1 on Windows 10
|
||||
- V-Ray 5 for Maya, Update 2.1 (v5.20.01 from Dec 16 2021)
|
||||
|
||||
"""
|
||||
# todo(roy): Remove when new V-Ray release fixes duplicate calls
|
||||
|
||||
jobs = cmds.scriptJob(listJobs=True)
|
||||
|
||||
matched = set()
|
||||
for entry in jobs:
|
||||
# Remove the number
|
||||
index, callback = entry.split(":", 1)
|
||||
callback = callback.strip()
|
||||
|
||||
# Detect whether it is a `vvgUserChannelMappingsUpdateUI`
|
||||
# attribute change callback
|
||||
if callback.startswith('"-runOnce" 1 "-attributeChange" "'):
|
||||
if '"vvgUserChannelMappingsUpdateUI(' in callback:
|
||||
if callback in matched:
|
||||
# If we've seen this callback before then
|
||||
# delete the duplicate callback
|
||||
cmds.scriptJob(kill=int(index))
|
||||
else:
|
||||
matched.add(callback)
|
||||
|
||||
|
||||
class LoadVDBtoVRay(api.Loader):
|
||||
|
||||
|
|
@ -14,15 +80,24 @@ class LoadVDBtoVRay(api.Loader):
|
|||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
from maya import cmds
|
||||
import avalon.maya.lib as lib
|
||||
from avalon.maya.pipeline import containerise
|
||||
|
||||
assert os.path.exists(self.fname), (
|
||||
"Path does not exist: %s" % self.fname
|
||||
)
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
except ValueError:
|
||||
family = "vdbcache"
|
||||
|
||||
# Ensure V-ray is loaded with the vrayvolumegrid
|
||||
if not cmds.pluginInfo("vrayformaya", query=True, loaded=True):
|
||||
cmds.loadPlugin("vrayformaya")
|
||||
if not cmds.pluginInfo("vrayvolumegrid", query=True, loaded=True):
|
||||
cmds.loadPlugin("vrayvolumegrid")
|
||||
|
||||
# Check if viewport drawing engine is Open GL Core (compat)
|
||||
render_engine = None
|
||||
compatible = "OpenGLCoreProfileCompat"
|
||||
|
|
@ -30,13 +105,11 @@ class LoadVDBtoVRay(api.Loader):
|
|||
render_engine = cmds.optionVar(query="vp2RenderingEngine")
|
||||
|
||||
if not render_engine or render_engine != compatible:
|
||||
raise RuntimeError("Current scene's settings are incompatible."
|
||||
"See Preferences > Display > Viewport 2.0 to "
|
||||
"set the render engine to '%s'" % compatible)
|
||||
self.log.warning("Current scene's settings are incompatible."
|
||||
"See Preferences > Display > Viewport 2.0 to "
|
||||
"set the render engine to '%s'" % compatible)
|
||||
|
||||
asset = context['asset']
|
||||
version = context["version"]
|
||||
|
||||
asset_name = asset["name"]
|
||||
namespace = namespace or lib.unique_namespace(
|
||||
asset_name + "_",
|
||||
|
|
@ -45,7 +118,7 @@ class LoadVDBtoVRay(api.Loader):
|
|||
)
|
||||
|
||||
# Root group
|
||||
label = "{}:{}".format(namespace, name)
|
||||
label = "{}:{}_VDB".format(namespace, name)
|
||||
root = cmds.group(name=label, empty=True)
|
||||
|
||||
settings = get_project_settings(os.environ['AVALON_PROJECT'])
|
||||
|
|
@ -55,20 +128,24 @@ class LoadVDBtoVRay(api.Loader):
|
|||
if c is not None:
|
||||
cmds.setAttr(root + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(root + ".outlinerColor",
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
)
|
||||
float(c[0]) / 255,
|
||||
float(c[1]) / 255,
|
||||
float(c[2]) / 255)
|
||||
|
||||
# Create VR
|
||||
# Create VRayVolumeGrid
|
||||
grid_node = cmds.createNode("VRayVolumeGrid",
|
||||
name="{}VVGShape".format(label),
|
||||
name="{}Shape".format(label),
|
||||
parent=root)
|
||||
|
||||
# Set attributes
|
||||
cmds.setAttr("{}.inFile".format(grid_node), self.fname, type="string")
|
||||
cmds.setAttr("{}.inReadOffset".format(grid_node),
|
||||
version["startFrames"])
|
||||
# Ensure .currentTime is connected to time1.outTime
|
||||
cmds.connectAttr("time1.outTime", grid_node + ".currentTime")
|
||||
|
||||
# Set path
|
||||
self._set_path(grid_node, self.fname, show_preset_popup=True)
|
||||
|
||||
# Lock the shape node so the user can't delete the transform/shape
|
||||
# as if it was referenced
|
||||
cmds.lockNode(grid_node, lock=True)
|
||||
|
||||
nodes = [root, grid_node]
|
||||
self[:] = nodes
|
||||
|
|
@ -79,3 +156,132 @@ class LoadVDBtoVRay(api.Loader):
|
|||
nodes=nodes,
|
||||
context=context,
|
||||
loader=self.__class__.__name__)
|
||||
|
||||
def _set_path(self, grid_node, path, show_preset_popup=True):
|
||||
|
||||
from openpype.hosts.maya.api.lib import attribute_values
|
||||
from maya import cmds
|
||||
|
||||
def _get_filename_from_folder(path):
|
||||
# Using the sequence of .vdb files we check the frame range, etc.
|
||||
# to set the filename with #### padding.
|
||||
files = sorted(x for x in os.listdir(path) if x.endswith(".vdb"))
|
||||
if not files:
|
||||
raise RuntimeError("Couldn't find .vdb files in: %s" % path)
|
||||
|
||||
if len(files) == 1:
|
||||
# Ensure check for single file is also done in folder
|
||||
fname = files[0]
|
||||
else:
|
||||
# Sequence
|
||||
from avalon.vendor import clique
|
||||
# todo: check support for negative frames as input
|
||||
collections, remainder = clique.assemble(files)
|
||||
assert len(collections) == 1, (
|
||||
"Must find a single image sequence, "
|
||||
"found: %s" % (collections,)
|
||||
)
|
||||
collection = collections[0]
|
||||
|
||||
fname = collection.format('{head}{{padding}}{tail}')
|
||||
padding = collection.padding
|
||||
if padding == 0:
|
||||
# Clique doesn't provide padding if the frame number never
|
||||
# starts with a zero and thus has never any visual padding.
|
||||
# So we fall back to the smallest frame number as padding.
|
||||
padding = min(len(str(i)) for i in collection.indexes)
|
||||
|
||||
# Supply frame/padding with # signs
|
||||
padding_str = "#" * padding
|
||||
fname = fname.format(padding=padding_str)
|
||||
|
||||
return os.path.join(path, fname)
|
||||
|
||||
# The path is either a single file or sequence in a folder so
|
||||
# we do a quick lookup for our files
|
||||
if os.path.isfile(path):
|
||||
path = os.path.dirname(path)
|
||||
path = _get_filename_from_folder(path)
|
||||
|
||||
# Even when not applying a preset V-Ray will reset the 3rd Party
|
||||
# Channels Mapping of the VRayVolumeGrid when setting the .inPath
|
||||
# value. As such we try and preserve the values ourselves.
|
||||
# Reported as ChaosGroup bug ticket: 154-011-2909
|
||||
# todo(roy): Remove when new V-Ray release preserves values
|
||||
original_user_mapping = cmds.getAttr(grid_node + ".usrchmap") or ""
|
||||
|
||||
# Workaround for V-Ray bug: fix lag on path change, see function
|
||||
_fix_duplicate_vvg_callbacks()
|
||||
|
||||
# Suppress preset pop-up if we want.
|
||||
popup_attr = "{0}.inDontOfferPresets".format(grid_node)
|
||||
popup = {popup_attr: not show_preset_popup}
|
||||
with attribute_values(popup):
|
||||
cmds.setAttr(grid_node + ".inPath", path, type="string")
|
||||
|
||||
# Reapply the 3rd Party channels user mapping when no preset popup
|
||||
# was shown to the user
|
||||
if not show_preset_popup:
|
||||
channels = cmds.getAttr(grid_node + ".usrchmapallch").split(";")
|
||||
channels = set(channels) # optimize lookup
|
||||
restored_mapping = ""
|
||||
for entry in original_user_mapping.split(";"):
|
||||
if not entry:
|
||||
# Ignore empty entries
|
||||
continue
|
||||
|
||||
# If 3rd Party Channels selection channel still exists then
|
||||
# add it again.
|
||||
index, channel = entry.split(",")
|
||||
attr = THIRD_PARTY_CHANNELS.get(int(index),
|
||||
# Fallback for when a mapping
|
||||
# was set that is not in the
|
||||
# documentation
|
||||
"???")
|
||||
if channel in channels:
|
||||
restored_mapping += entry + ";"
|
||||
else:
|
||||
self.log.warning("Can't preserve '%s' mapping due to "
|
||||
"missing channel '%s' on node: "
|
||||
"%s" % (attr, channel, grid_node))
|
||||
|
||||
if restored_mapping:
|
||||
cmds.setAttr(grid_node + ".usrchmap",
|
||||
restored_mapping,
|
||||
type="string")
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
path = api.get_representation_path(representation)
|
||||
|
||||
# Find VRayVolumeGrid
|
||||
members = cmds.sets(container['objectName'], query=True)
|
||||
grid_nodes = cmds.ls(members, type="VRayVolumeGrid", long=True)
|
||||
assert len(grid_nodes) > 0, "This is a bug"
|
||||
|
||||
# Update the VRayVolumeGrid
|
||||
for grid_node in grid_nodes:
|
||||
self._set_path(grid_node, path=path, show_preset_popup=False)
|
||||
|
||||
# Update container representation
|
||||
cmds.setAttr(container["objectName"] + ".representation",
|
||||
str(representation["_id"]),
|
||||
type="string")
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
# Get all members of the avalon container, ensure they are unlocked
|
||||
# and delete everything
|
||||
members = cmds.sets(container['objectName'], query=True)
|
||||
cmds.lockNode(members, lock=False)
|
||||
cmds.delete([container['objectName']] + members)
|
||||
|
||||
# Clean up the namespace
|
||||
try:
|
||||
cmds.namespace(removeNamespace=container['namespace'],
|
||||
deleteNamespaceContent=True)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class CollectAssembly(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.49
|
||||
label = "Assemby"
|
||||
label = "Assembly"
|
||||
families = ["assembly"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
r"^.+:(.*)", layer).group(1)
|
||||
except IndexError:
|
||||
msg = "Invalid layer name in set [ {} ]".format(layer)
|
||||
self.log.warnig(msg)
|
||||
self.log.warning(msg)
|
||||
continue
|
||||
|
||||
self.log.info("processing %s" % layer)
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ class CollectVrayScene(pyblish.api.InstancePlugin):
|
|||
expected_layer_name = re.search(r"^.+:(.*)", layer).group(1)
|
||||
except IndexError:
|
||||
msg = "Invalid layer name in set [ {} ]".format(layer)
|
||||
self.log.warnig(msg)
|
||||
self.log.warning(msg)
|
||||
continue
|
||||
|
||||
self.log.info("processing %s" % layer)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,11 @@ COPY = 1
|
|||
HARDLINK = 2
|
||||
|
||||
|
||||
def escape_space(path):
|
||||
"""Ensure path is enclosed by quotes to allow paths with spaces"""
|
||||
return '"{}"'.format(path) if " " in path else path
|
||||
|
||||
|
||||
def find_paths_by_hash(texture_hash):
|
||||
"""Find the texture hash key in the dictionary.
|
||||
|
||||
|
|
@ -76,7 +81,7 @@ def maketx(source, destination, *args):
|
|||
]
|
||||
|
||||
cmd.extend(args)
|
||||
cmd.extend(["-o", destination, source])
|
||||
cmd.extend(["-o", escape_space(destination), escape_space(source)])
|
||||
|
||||
cmd = " ".join(cmd)
|
||||
|
||||
|
|
@ -314,7 +319,6 @@ class ExtractLook(openpype.api.Extractor):
|
|||
do_maketx = instance.data.get("maketx", False)
|
||||
|
||||
# Collect all unique files used in the resources
|
||||
files = set()
|
||||
files_metadata = {}
|
||||
for resource in resources:
|
||||
# Preserve color space values (force value after filepath change)
|
||||
|
|
@ -325,7 +329,6 @@ class ExtractLook(openpype.api.Extractor):
|
|||
for f in resource["files"]:
|
||||
files_metadata[os.path.normpath(f)] = {
|
||||
"color_space": color_space}
|
||||
# files.update(os.path.normpath(f))
|
||||
|
||||
# Process the resource files
|
||||
transfers = []
|
||||
|
|
@ -333,7 +336,6 @@ class ExtractLook(openpype.api.Extractor):
|
|||
hashes = {}
|
||||
force_copy = instance.data.get("forceCopy", False)
|
||||
|
||||
self.log.info(files)
|
||||
for filepath in files_metadata:
|
||||
|
||||
linearize = False
|
||||
|
|
@ -492,7 +494,7 @@ class ExtractLook(openpype.api.Extractor):
|
|||
# Include `source-hash` as string metadata
|
||||
"-sattrib",
|
||||
"sourceHash",
|
||||
texture_hash,
|
||||
escape_space(texture_hash),
|
||||
colorconvert,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class ExtractVrayscene(openpype.api.Extractor):
|
|||
else:
|
||||
node = vray_settings[0]
|
||||
|
||||
# setMembers on vrayscene_layer shoudl contain layer name.
|
||||
# setMembers on vrayscene_layer should contain layer name.
|
||||
layer_name = instance.data.get("layer")
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
|
@ -111,7 +111,7 @@ class ExtractVrayscene(openpype.api.Extractor):
|
|||
layer (str): layer name.
|
||||
template (str): token template.
|
||||
start_frame (int, optional): start frame - if set we use
|
||||
mutliple files export mode.
|
||||
multiple files export mode.
|
||||
|
||||
Returns:
|
||||
str: formatted path.
|
||||
|
|
|
|||
|
|
@ -331,7 +331,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
# but dispatcher (Server) and not render clients. Render clients
|
||||
# inherit environment from publisher including PATH, so there's
|
||||
# no problem finding PYPE, but there is now way (as far as I know)
|
||||
# to set environment dynamically for dispatcher. Therefor this hack.
|
||||
# to set environment dynamically for dispatcher. Therefore this hack.
|
||||
args = [muster_python,
|
||||
_get_script().replace('\\', '\\\\'),
|
||||
"--paths",
|
||||
|
|
@ -478,7 +478,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
# such that proper initialisation happens the same
|
||||
# way as it does on a local machine.
|
||||
# TODO(marcus): This won't work if the slaves don't
|
||||
# have accesss to these paths, such as if slaves are
|
||||
# have access to these paths, such as if slaves are
|
||||
# running Linux and the submitter is on Windows.
|
||||
"PYTHONPATH",
|
||||
"PATH",
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ class GetOverlappingUVs(object):
|
|||
if len(uarray) == 0 or len(varray) == 0:
|
||||
return (False, None, None)
|
||||
|
||||
# loop throught all vertices to construct edges/rays
|
||||
# loop through all vertices to construct edges/rays
|
||||
u = uarray[-1]
|
||||
v = varray[-1]
|
||||
for i in xrange(len(uarray)): # noqa: F821
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class ValidateRigContents(pyblish.api.InstancePlugin):
|
|||
|
||||
Every rig must contain at least two object sets:
|
||||
"controls_SET" - Set of all animatable controls
|
||||
"out_SET" - Set of all cachable meshes
|
||||
"out_SET" - Set of all cacheable meshes
|
||||
|
||||
"""
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import re
|
|||
class ValidateUnrealStaticmeshName(pyblish.api.InstancePlugin):
|
||||
"""Validate name of Unreal Static Mesh
|
||||
|
||||
Unreals naming convention states that staticMesh sould start with `SM`
|
||||
Unreals naming convention states that staticMesh should start with `SM`
|
||||
prefix - SM_[Name]_## (Eg. SM_sube_01). This plugin also validates other
|
||||
types of meshes - collision meshes:
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,7 @@ def add_implementation_envs(env, _app):
|
|||
# Add requirements to NUKE_PATH
|
||||
pype_root = os.environ["OPENPYPE_REPOS_ROOT"]
|
||||
new_nuke_paths = [
|
||||
os.path.join(pype_root, "openpype", "hosts", "nuke", "startup"),
|
||||
os.path.join(
|
||||
pype_root, "repos", "avalon-core", "setup", "nuke", "nuke_path"
|
||||
)
|
||||
os.path.join(pype_root, "openpype", "hosts", "nuke", "startup")
|
||||
]
|
||||
old_nuke_path = env.get("NUKE_PATH") or ""
|
||||
for path in old_nuke_path.split(os.pathsep):
|
||||
|
|
|
|||
|
|
@ -1,130 +1,57 @@
|
|||
import os
|
||||
import nuke
|
||||
from .workio import (
|
||||
file_extensions,
|
||||
has_unsaved_changes,
|
||||
save_file,
|
||||
open_file,
|
||||
current_file,
|
||||
work_root,
|
||||
)
|
||||
|
||||
import avalon.api
|
||||
import pyblish.api
|
||||
import openpype
|
||||
from . import lib, menu
|
||||
from .command import (
|
||||
reset_frame_range,
|
||||
get_handles,
|
||||
reset_resolution,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
log = openpype.api.Logger().get_logger(__name__)
|
||||
from .plugin import OpenPypeCreator
|
||||
from .pipeline import (
|
||||
install,
|
||||
uninstall,
|
||||
|
||||
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
|
||||
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.nuke.__file__))
|
||||
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
ls,
|
||||
|
||||
containerise,
|
||||
parse_container,
|
||||
update_container,
|
||||
)
|
||||
from .lib import (
|
||||
maintained_selection
|
||||
)
|
||||
|
||||
|
||||
# registering pyblish gui regarding settings in presets
|
||||
if os.getenv("PYBLISH_GUI", None):
|
||||
pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None))
|
||||
__all__ = (
|
||||
"file_extensions",
|
||||
"has_unsaved_changes",
|
||||
"save_file",
|
||||
"open_file",
|
||||
"current_file",
|
||||
"work_root",
|
||||
|
||||
"reset_frame_range",
|
||||
"get_handles",
|
||||
"reset_resolution",
|
||||
"viewer_update_and_undo_stop",
|
||||
|
||||
def reload_config():
|
||||
"""Attempt to reload pipeline at run-time.
|
||||
"OpenPypeCreator",
|
||||
"install",
|
||||
"uninstall",
|
||||
|
||||
CAUTION: This is primarily for development and debugging purposes.
|
||||
"ls",
|
||||
|
||||
"""
|
||||
"containerise",
|
||||
"parse_container",
|
||||
"update_container",
|
||||
|
||||
import importlib
|
||||
|
||||
for module in (
|
||||
"{}.api".format(AVALON_CONFIG),
|
||||
"{}.hosts.nuke.api.actions".format(AVALON_CONFIG),
|
||||
"{}.hosts.nuke.api.menu".format(AVALON_CONFIG),
|
||||
"{}.hosts.nuke.api.plugin".format(AVALON_CONFIG),
|
||||
"{}.hosts.nuke.api.lib".format(AVALON_CONFIG),
|
||||
):
|
||||
log.info("Reloading module: {}...".format(module))
|
||||
|
||||
module = importlib.import_module(module)
|
||||
|
||||
try:
|
||||
importlib.reload(module)
|
||||
except AttributeError as e:
|
||||
from importlib import reload
|
||||
log.warning("Cannot reload module: {}".format(e))
|
||||
reload(module)
|
||||
|
||||
|
||||
def install():
|
||||
''' Installing all requarements for Nuke host
|
||||
'''
|
||||
|
||||
# remove all registred callbacks form avalon.nuke
|
||||
from avalon import pipeline
|
||||
pipeline._registered_event_handlers.clear()
|
||||
|
||||
log.info("Registering Nuke plug-ins..")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
|
||||
avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
|
||||
avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH)
|
||||
|
||||
# Register Avalon event for workfiles loading.
|
||||
avalon.api.on("workio.open_file", lib.check_inventory_versions)
|
||||
avalon.api.on("taskChanged", menu.change_context_label)
|
||||
|
||||
pyblish.api.register_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled)
|
||||
workfile_settings = lib.WorkfileSettings()
|
||||
# Disable all families except for the ones we explicitly want to see
|
||||
family_states = [
|
||||
"write",
|
||||
"review",
|
||||
"nukenodes",
|
||||
"model",
|
||||
"gizmo"
|
||||
]
|
||||
|
||||
avalon.api.data["familiesStateDefault"] = False
|
||||
avalon.api.data["familiesStateToggled"] = family_states
|
||||
|
||||
# Set context settings.
|
||||
nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
|
||||
nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root")
|
||||
nuke.addOnCreate(lib.process_workfile_builder, nodeClass="Root")
|
||||
nuke.addOnCreate(lib.launch_workfiles_app, nodeClass="Root")
|
||||
menu.install()
|
||||
|
||||
|
||||
def uninstall():
|
||||
'''Uninstalling host's integration
|
||||
'''
|
||||
log.info("Deregistering Nuke plug-ins..")
|
||||
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
|
||||
avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
|
||||
avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
|
||||
|
||||
pyblish.api.deregister_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
reload_config()
|
||||
menu.uninstall()
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node passthrough states on instance toggles."""
|
||||
|
||||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
from avalon.nuke import (
|
||||
viewer_update_and_undo_stop,
|
||||
add_publish_knob
|
||||
)
|
||||
|
||||
# Whether instances should be passthrough based on new value
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
n = instance[0]
|
||||
try:
|
||||
n["publish"].value()
|
||||
except ValueError:
|
||||
n = add_publish_knob(n)
|
||||
log.info(" `Publish` knob was added to write node..")
|
||||
|
||||
n["publish"].setValue(new_value)
|
||||
"maintained_selection",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,12 +1,11 @@
|
|||
import pyblish.api
|
||||
|
||||
from avalon.nuke.lib import (
|
||||
from openpype.api import get_errored_instances_from_context
|
||||
from .lib import (
|
||||
reset_selection,
|
||||
select_nodes
|
||||
)
|
||||
|
||||
from openpype.api import get_errored_instances_from_context
|
||||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid nodes in Nuke when plug-in failed.
|
||||
|
|
|
|||
135
openpype/hosts/nuke/api/command.py
Normal file
135
openpype/hosts/nuke/api/command.py
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
import logging
|
||||
import contextlib
|
||||
import nuke
|
||||
|
||||
from avalon import api, io
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def reset_frame_range():
|
||||
""" Set frame range to current asset
|
||||
Also it will set a Viewer range with
|
||||
displayed handles
|
||||
"""
|
||||
|
||||
fps = float(api.Session.get("AVALON_FPS", 25))
|
||||
|
||||
nuke.root()["fps"].setValue(fps)
|
||||
name = api.Session["AVALON_ASSET"]
|
||||
asset = io.find_one({"name": name, "type": "asset"})
|
||||
asset_data = asset["data"]
|
||||
|
||||
handles = get_handles(asset)
|
||||
|
||||
frame_start = int(asset_data.get(
|
||||
"frameStart",
|
||||
asset_data.get("edit_in")))
|
||||
|
||||
frame_end = int(asset_data.get(
|
||||
"frameEnd",
|
||||
asset_data.get("edit_out")))
|
||||
|
||||
if not all([frame_start, frame_end]):
|
||||
missing = ", ".join(["frame_start", "frame_end"])
|
||||
msg = "'{}' are not set for asset '{}'!".format(missing, name)
|
||||
log.warning(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
|
||||
frame_start -= handles
|
||||
frame_end += handles
|
||||
|
||||
nuke.root()["first_frame"].setValue(frame_start)
|
||||
nuke.root()["last_frame"].setValue(frame_end)
|
||||
|
||||
# setting active viewers
|
||||
vv = nuke.activeViewer().node()
|
||||
vv["frame_range_lock"].setValue(True)
|
||||
vv["frame_range"].setValue("{0}-{1}".format(
|
||||
int(asset_data["frameStart"]),
|
||||
int(asset_data["frameEnd"]))
|
||||
)
|
||||
|
||||
|
||||
def get_handles(asset):
|
||||
""" Gets handles data
|
||||
|
||||
Arguments:
|
||||
asset (dict): avalon asset entity
|
||||
|
||||
Returns:
|
||||
handles (int)
|
||||
"""
|
||||
data = asset["data"]
|
||||
if "handles" in data and data["handles"] is not None:
|
||||
return int(data["handles"])
|
||||
|
||||
parent_asset = None
|
||||
if "visualParent" in data:
|
||||
vp = data["visualParent"]
|
||||
if vp is not None:
|
||||
parent_asset = io.find_one({"_id": io.ObjectId(vp)})
|
||||
|
||||
if parent_asset is None:
|
||||
parent_asset = io.find_one({"_id": io.ObjectId(asset["parent"])})
|
||||
|
||||
if parent_asset is not None:
|
||||
return get_handles(parent_asset)
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def reset_resolution():
|
||||
"""Set resolution to project resolution."""
|
||||
project = io.find_one({"type": "project"})
|
||||
p_data = project["data"]
|
||||
|
||||
width = p_data.get("resolution_width",
|
||||
p_data.get("resolutionWidth"))
|
||||
height = p_data.get("resolution_height",
|
||||
p_data.get("resolutionHeight"))
|
||||
|
||||
if not all([width, height]):
|
||||
missing = ", ".join(["width", "height"])
|
||||
msg = "No resolution information `{0}` found for '{1}'.".format(
|
||||
missing,
|
||||
project["name"])
|
||||
log.warning(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
|
||||
current_width = nuke.root()["format"].value().width()
|
||||
current_height = nuke.root()["format"].value().height()
|
||||
|
||||
if width != current_width or height != current_height:
|
||||
|
||||
fmt = None
|
||||
for f in nuke.formats():
|
||||
if f.width() == width and f.height() == height:
|
||||
fmt = f.name()
|
||||
|
||||
if not fmt:
|
||||
nuke.addFormat(
|
||||
"{0} {1} {2}".format(int(width), int(height), project["name"])
|
||||
)
|
||||
fmt = project["name"]
|
||||
|
||||
nuke.root()["format"].setValue(fmt)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def viewer_update_and_undo_stop():
|
||||
"""Lock viewer from updating and stop recording undo steps"""
|
||||
try:
|
||||
# stop active viewer to update any change
|
||||
viewer = nuke.activeViewer()
|
||||
if viewer:
|
||||
viewer.stop()
|
||||
else:
|
||||
log.warning("No available active Viewer")
|
||||
nuke.Undo.disable()
|
||||
yield
|
||||
finally:
|
||||
nuke.Undo.enable()
|
||||
|
|
@ -3,15 +3,15 @@ import re
|
|||
import sys
|
||||
import six
|
||||
import platform
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
|
||||
import clique
|
||||
|
||||
import nuke
|
||||
|
||||
from avalon import api, io, lib
|
||||
import avalon.nuke
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import (
|
||||
save_file, open_file
|
||||
)
|
||||
|
||||
from openpype.api import (
|
||||
Logger,
|
||||
Anatomy,
|
||||
|
|
@ -28,21 +28,476 @@ from openpype.lib.path_tools import HostDirmap
|
|||
from openpype.settings import get_project_settings
|
||||
from openpype.modules import ModulesManager
|
||||
|
||||
import nuke
|
||||
from .workio import (
|
||||
save_file,
|
||||
open_file
|
||||
)
|
||||
|
||||
from .utils import set_context_favorites
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
_NODE_TAB_NAME = "{}".format(os.getenv("AVALON_LABEL") or "Avalon")
|
||||
AVALON_LABEL = os.getenv("AVALON_LABEL") or "Avalon"
|
||||
AVALON_TAB = "{}".format(AVALON_LABEL)
|
||||
AVALON_DATA_GROUP = "{}DataGroup".format(AVALON_LABEL.capitalize())
|
||||
EXCLUDED_KNOB_TYPE_ON_READ = (
|
||||
20, # Tab Knob
|
||||
26, # Text Knob (But for backward compatibility, still be read
|
||||
# if value is not an empty string.)
|
||||
)
|
||||
|
||||
opnl = sys.modules[__name__]
|
||||
opnl._project = None
|
||||
opnl.project_name = os.getenv("AVALON_PROJECT")
|
||||
opnl.workfiles_launched = False
|
||||
opnl._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon")
|
||||
|
||||
class Context:
|
||||
main_window = None
|
||||
context_label = None
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
workfiles_launched = False
|
||||
# Seems unused
|
||||
_project_doc = None
|
||||
|
||||
|
||||
class Knobby(object):
|
||||
"""For creating knob which it's type isn't mapped in `create_knobs`
|
||||
|
||||
Args:
|
||||
type (string): Nuke knob type name
|
||||
value: Value to be set with `Knob.setValue`, put `None` if not required
|
||||
flags (list, optional): Knob flags to be set with `Knob.setFlag`
|
||||
*args: Args other than knob name for initializing knob class
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, type, value, flags=None, *args):
|
||||
self.type = type
|
||||
self.value = value
|
||||
self.flags = flags or []
|
||||
self.args = args
|
||||
|
||||
def create(self, name, nice=None):
|
||||
knob_cls = getattr(nuke, self.type)
|
||||
knob = knob_cls(name, nice, *self.args)
|
||||
if self.value is not None:
|
||||
knob.setValue(self.value)
|
||||
for flag in self.flags:
|
||||
knob.setFlag(flag)
|
||||
return knob
|
||||
|
||||
|
||||
def create_knobs(data, tab=None):
|
||||
"""Create knobs by data
|
||||
|
||||
Depending on the type of each dict value and creates the correct Knob.
|
||||
|
||||
Mapped types:
|
||||
bool: nuke.Boolean_Knob
|
||||
int: nuke.Int_Knob
|
||||
float: nuke.Double_Knob
|
||||
list: nuke.Enumeration_Knob
|
||||
six.string_types: nuke.String_Knob
|
||||
|
||||
dict: If it's a nested dict (all values are dict), will turn into
|
||||
A tabs group. Or just a knobs group.
|
||||
|
||||
Args:
|
||||
data (dict): collection of attributes and their value
|
||||
tab (string, optional): Knobs' tab name
|
||||
|
||||
Returns:
|
||||
list: A list of `nuke.Knob` objects
|
||||
|
||||
"""
|
||||
def nice_naming(key):
|
||||
"""Convert camelCase name into UI Display Name"""
|
||||
words = re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:])
|
||||
return " ".join(words)
|
||||
|
||||
# Turn key-value pairs into knobs
|
||||
knobs = list()
|
||||
|
||||
if tab:
|
||||
knobs.append(nuke.Tab_Knob(tab))
|
||||
|
||||
for key, value in data.items():
|
||||
# Knob name
|
||||
if isinstance(key, tuple):
|
||||
name, nice = key
|
||||
else:
|
||||
name, nice = key, nice_naming(key)
|
||||
|
||||
# Create knob by value type
|
||||
if isinstance(value, Knobby):
|
||||
knobby = value
|
||||
knob = knobby.create(name, nice)
|
||||
|
||||
elif isinstance(value, float):
|
||||
knob = nuke.Double_Knob(name, nice)
|
||||
knob.setValue(value)
|
||||
|
||||
elif isinstance(value, bool):
|
||||
knob = nuke.Boolean_Knob(name, nice)
|
||||
knob.setValue(value)
|
||||
knob.setFlag(nuke.STARTLINE)
|
||||
|
||||
elif isinstance(value, int):
|
||||
knob = nuke.Int_Knob(name, nice)
|
||||
knob.setValue(value)
|
||||
|
||||
elif isinstance(value, six.string_types):
|
||||
knob = nuke.String_Knob(name, nice)
|
||||
knob.setValue(value)
|
||||
|
||||
elif isinstance(value, list):
|
||||
knob = nuke.Enumeration_Knob(name, nice, value)
|
||||
|
||||
elif isinstance(value, dict):
|
||||
if all(isinstance(v, dict) for v in value.values()):
|
||||
# Create a group of tabs
|
||||
begain = nuke.BeginTabGroup_Knob()
|
||||
end = nuke.EndTabGroup_Knob()
|
||||
begain.setName(name)
|
||||
end.setName(name + "_End")
|
||||
knobs.append(begain)
|
||||
for k, v in value.items():
|
||||
knobs += create_knobs(v, tab=k)
|
||||
knobs.append(end)
|
||||
else:
|
||||
# Create a group of knobs
|
||||
knobs.append(nuke.Tab_Knob(
|
||||
name, nice, nuke.TABBEGINCLOSEDGROUP))
|
||||
knobs += create_knobs(value)
|
||||
knobs.append(
|
||||
nuke.Tab_Knob(name + "_End", nice, nuke.TABENDGROUP))
|
||||
continue
|
||||
|
||||
else:
|
||||
raise TypeError("Unsupported type: %r" % type(value))
|
||||
|
||||
knobs.append(knob)
|
||||
|
||||
return knobs
|
||||
|
||||
|
||||
def imprint(node, data, tab=None):
|
||||
"""Store attributes with value on node
|
||||
|
||||
Parse user data into Node knobs.
|
||||
Use `collections.OrderedDict` to ensure knob order.
|
||||
|
||||
Args:
|
||||
node(nuke.Node): node object from Nuke
|
||||
data(dict): collection of attributes and their value
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Examples:
|
||||
```
|
||||
import nuke
|
||||
from avalon.nuke import lib
|
||||
|
||||
node = nuke.createNode("NoOp")
|
||||
data = {
|
||||
# Regular type of attributes
|
||||
"myList": ["x", "y", "z"],
|
||||
"myBool": True,
|
||||
"myFloat": 0.1,
|
||||
"myInt": 5,
|
||||
|
||||
# Creating non-default imprint type of knob
|
||||
"MyFilePath": lib.Knobby("File_Knob", "/file/path"),
|
||||
"divider": lib.Knobby("Text_Knob", ""),
|
||||
|
||||
# Manual nice knob naming
|
||||
("my_knob", "Nice Knob Name"): "some text",
|
||||
|
||||
# dict type will be created as knob group
|
||||
"KnobGroup": {
|
||||
"knob1": 5,
|
||||
"knob2": "hello",
|
||||
"knob3": ["a", "b"],
|
||||
},
|
||||
|
||||
# Nested dict will be created as tab group
|
||||
"TabGroup": {
|
||||
"tab1": {"count": 5},
|
||||
"tab2": {"isGood": True},
|
||||
"tab3": {"direction": ["Left", "Right"]},
|
||||
},
|
||||
}
|
||||
lib.imprint(node, data, tab="Demo")
|
||||
|
||||
```
|
||||
|
||||
"""
|
||||
for knob in create_knobs(data, tab):
|
||||
node.addKnob(knob)
|
||||
|
||||
|
||||
def add_publish_knob(node):
|
||||
"""Add Publish knob to node
|
||||
|
||||
Arguments:
|
||||
node (nuke.Node): nuke node to be processed
|
||||
|
||||
Returns:
|
||||
node (nuke.Node): processed nuke node
|
||||
|
||||
"""
|
||||
if "publish" not in node.knobs():
|
||||
body = OrderedDict()
|
||||
body[("divd", "Publishing")] = Knobby("Text_Knob", '')
|
||||
body["publish"] = True
|
||||
imprint(node, body)
|
||||
return node
|
||||
|
||||
|
||||
def set_avalon_knob_data(node, data=None, prefix="avalon:"):
|
||||
""" Sets data into nodes's avalon knob
|
||||
|
||||
Arguments:
|
||||
node (nuke.Node): Nuke node to imprint with data,
|
||||
data (dict, optional): Data to be imprinted into AvalonTab
|
||||
prefix (str, optional): filtering prefix
|
||||
|
||||
Returns:
|
||||
node (nuke.Node)
|
||||
|
||||
Examples:
|
||||
data = {
|
||||
'asset': 'sq020sh0280',
|
||||
'family': 'render',
|
||||
'subset': 'subsetMain'
|
||||
}
|
||||
"""
|
||||
data = data or dict()
|
||||
create = OrderedDict()
|
||||
|
||||
tab_name = AVALON_TAB
|
||||
editable = ["asset", "subset", "name", "namespace"]
|
||||
|
||||
existed_knobs = node.knobs()
|
||||
|
||||
for key, value in data.items():
|
||||
knob_name = prefix + key
|
||||
gui_name = key
|
||||
|
||||
if knob_name in existed_knobs:
|
||||
# Set value
|
||||
try:
|
||||
node[knob_name].setValue(value)
|
||||
except TypeError:
|
||||
node[knob_name].setValue(str(value))
|
||||
else:
|
||||
# New knob
|
||||
name = (knob_name, gui_name) # Hide prefix on GUI
|
||||
if key in editable:
|
||||
create[name] = value
|
||||
else:
|
||||
create[name] = Knobby("String_Knob",
|
||||
str(value),
|
||||
flags=[nuke.READ_ONLY])
|
||||
if tab_name in existed_knobs:
|
||||
tab_name = None
|
||||
else:
|
||||
tab = OrderedDict()
|
||||
warn = Knobby("Text_Knob", "Warning! Do not change following data!")
|
||||
divd = Knobby("Text_Knob", "")
|
||||
head = [
|
||||
(("warn", ""), warn),
|
||||
(("divd", ""), divd),
|
||||
]
|
||||
tab[AVALON_DATA_GROUP] = OrderedDict(head + list(create.items()))
|
||||
create = tab
|
||||
|
||||
imprint(node, create, tab=tab_name)
|
||||
return node
|
||||
|
||||
|
||||
def get_avalon_knob_data(node, prefix="avalon:"):
|
||||
""" Gets a data from nodes's avalon knob
|
||||
|
||||
Arguments:
|
||||
node (obj): Nuke node to search for data,
|
||||
prefix (str, optional): filtering prefix
|
||||
|
||||
Returns:
|
||||
data (dict)
|
||||
"""
|
||||
|
||||
# check if lists
|
||||
if not isinstance(prefix, list):
|
||||
prefix = list([prefix])
|
||||
|
||||
data = dict()
|
||||
|
||||
# loop prefix
|
||||
for p in prefix:
|
||||
# check if the node is avalon tracked
|
||||
if AVALON_TAB not in node.knobs():
|
||||
continue
|
||||
try:
|
||||
# check if data available on the node
|
||||
test = node[AVALON_DATA_GROUP].value()
|
||||
log.debug("Only testing if data avalable: `{}`".format(test))
|
||||
except NameError as e:
|
||||
# if it doesn't then create it
|
||||
log.debug("Creating avalon knob: `{}`".format(e))
|
||||
node = set_avalon_knob_data(node)
|
||||
return get_avalon_knob_data(node)
|
||||
|
||||
# get data from filtered knobs
|
||||
data.update({k.replace(p, ''): node[k].value()
|
||||
for k in node.knobs().keys()
|
||||
if p in k})
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def fix_data_for_node_create(data):
|
||||
"""Fixing data to be used for nuke knobs
|
||||
"""
|
||||
for k, v in data.items():
|
||||
if isinstance(v, six.text_type):
|
||||
data[k] = str(v)
|
||||
if str(v).startswith("0x"):
|
||||
data[k] = int(v, 16)
|
||||
return data
|
||||
|
||||
|
||||
def add_write_node(name, **kwarg):
|
||||
"""Adding nuke write node
|
||||
|
||||
Arguments:
|
||||
name (str): nuke node name
|
||||
kwarg (attrs): data for nuke knobs
|
||||
|
||||
Returns:
|
||||
node (obj): nuke write node
|
||||
"""
|
||||
frame_range = kwarg.get("frame_range", None)
|
||||
|
||||
w = nuke.createNode(
|
||||
"Write",
|
||||
"name {}".format(name))
|
||||
|
||||
w["file"].setValue(kwarg["file"])
|
||||
|
||||
for k, v in kwarg.items():
|
||||
if "frame_range" in k:
|
||||
continue
|
||||
log.info([k, v])
|
||||
try:
|
||||
w[k].setValue(v)
|
||||
except KeyError as e:
|
||||
log.debug(e)
|
||||
continue
|
||||
|
||||
if frame_range:
|
||||
w["use_limit"].setValue(True)
|
||||
w["first"].setValue(frame_range[0])
|
||||
w["last"].setValue(frame_range[1])
|
||||
|
||||
return w
|
||||
|
||||
|
||||
def read(node):
|
||||
"""Return user-defined knobs from given `node`
|
||||
|
||||
Args:
|
||||
node (nuke.Node): Nuke node object
|
||||
|
||||
Returns:
|
||||
list: A list of nuke.Knob object
|
||||
|
||||
"""
|
||||
def compat_prefixed(knob_name):
|
||||
if knob_name.startswith("avalon:"):
|
||||
return knob_name[len("avalon:"):]
|
||||
elif knob_name.startswith("ak:"):
|
||||
return knob_name[len("ak:"):]
|
||||
else:
|
||||
return knob_name
|
||||
|
||||
data = dict()
|
||||
|
||||
pattern = ("(?<=addUserKnob {)"
|
||||
"([0-9]*) (\\S*)" # Matching knob type and knob name
|
||||
"(?=[ |}])")
|
||||
tcl_script = node.writeKnobs(nuke.WRITE_USER_KNOB_DEFS)
|
||||
result = re.search(pattern, tcl_script)
|
||||
|
||||
if result:
|
||||
first_user_knob = result.group(2)
|
||||
# Collect user knobs from the end of the knob list
|
||||
for knob in reversed(node.allKnobs()):
|
||||
knob_name = knob.name()
|
||||
if not knob_name:
|
||||
# Ignore unnamed knob
|
||||
continue
|
||||
|
||||
knob_type = nuke.knob(knob.fullyQualifiedName(), type=True)
|
||||
value = knob.value()
|
||||
|
||||
if (
|
||||
knob_type not in EXCLUDED_KNOB_TYPE_ON_READ or
|
||||
# For compating read-only string data that imprinted
|
||||
# by `nuke.Text_Knob`.
|
||||
(knob_type == 26 and value)
|
||||
):
|
||||
key = compat_prefixed(knob_name)
|
||||
data[key] = value
|
||||
|
||||
if knob_name == first_user_knob:
|
||||
break
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_node_path(path, padding=4):
|
||||
"""Get filename for the Nuke write with padded number as '#'
|
||||
|
||||
Arguments:
|
||||
path (str): The path to render to.
|
||||
|
||||
Returns:
|
||||
tuple: head, padding, tail (extension)
|
||||
|
||||
Examples:
|
||||
>>> get_frame_path("test.exr")
|
||||
('test', 4, '.exr')
|
||||
|
||||
>>> get_frame_path("filename.#####.tif")
|
||||
('filename.', 5, '.tif')
|
||||
|
||||
>>> get_frame_path("foobar##.tif")
|
||||
('foobar', 2, '.tif')
|
||||
|
||||
>>> get_frame_path("foobar_%08d.tif")
|
||||
('foobar_', 8, '.tif')
|
||||
"""
|
||||
filename, ext = os.path.splitext(path)
|
||||
|
||||
# Find a final number group
|
||||
if '%' in filename:
|
||||
match = re.match('.*?(%[0-9]+d)$', filename)
|
||||
if match:
|
||||
padding = int(match.group(1).replace('%', '').replace('d', ''))
|
||||
# remove number from end since fusion
|
||||
# will swap it with the frame number
|
||||
filename = filename.replace(match.group(1), '')
|
||||
elif '#' in filename:
|
||||
match = re.match('.*?(#+)$', filename)
|
||||
|
||||
if match:
|
||||
padding = len(match.group(1))
|
||||
# remove number from end since fusion
|
||||
# will swap it with the frame number
|
||||
filename = filename.replace(match.group(1), '')
|
||||
|
||||
return filename, padding, ext
|
||||
|
||||
|
||||
def get_nuke_imageio_settings():
|
||||
return get_anatomy_settings(opnl.project_name)["imageio"]["nuke"]
|
||||
return get_anatomy_settings(Context.project_name)["imageio"]["nuke"]
|
||||
|
||||
|
||||
def get_created_node_imageio_setting(**kwarg):
|
||||
|
|
@ -103,14 +558,15 @@ def check_inventory_versions():
|
|||
and check if the node is having actual version. If not then it will color
|
||||
it to red.
|
||||
"""
|
||||
from .pipeline import parse_container
|
||||
|
||||
# get all Loader nodes by avalon attribute metadata
|
||||
for each in nuke.allNodes():
|
||||
container = avalon.nuke.parse_container(each)
|
||||
container = parse_container(each)
|
||||
|
||||
if container:
|
||||
node = nuke.toNode(container["objectName"])
|
||||
avalon_knob_data = avalon.nuke.read(
|
||||
node)
|
||||
avalon_knob_data = read(node)
|
||||
|
||||
# get representation from io
|
||||
representation = io.find_one({
|
||||
|
|
@ -141,7 +597,7 @@ def check_inventory_versions():
|
|||
max_version = max(versions)
|
||||
|
||||
# check the available version and do match
|
||||
# change color of node if not max verion
|
||||
# change color of node if not max version
|
||||
if version.get("name") not in [max_version]:
|
||||
node["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
|
|
@ -163,11 +619,10 @@ def writes_version_sync():
|
|||
|
||||
for each in nuke.allNodes(filter="Write"):
|
||||
# check if the node is avalon tracked
|
||||
if opnl._node_tab_name not in each.knobs():
|
||||
if _NODE_TAB_NAME not in each.knobs():
|
||||
continue
|
||||
|
||||
avalon_knob_data = avalon.nuke.read(
|
||||
each)
|
||||
avalon_knob_data = read(each)
|
||||
|
||||
try:
|
||||
if avalon_knob_data['families'] not in ["render"]:
|
||||
|
|
@ -209,14 +664,14 @@ def check_subsetname_exists(nodes, subset_name):
|
|||
bool: True of False
|
||||
"""
|
||||
return next((True for n in nodes
|
||||
if subset_name in avalon.nuke.read(n).get("subset", "")),
|
||||
if subset_name in read(n).get("subset", "")),
|
||||
False)
|
||||
|
||||
|
||||
def get_render_path(node):
|
||||
''' Generate Render path from presets regarding avalon knob data
|
||||
'''
|
||||
data = {'avalon': avalon.nuke.read(node)}
|
||||
data = {'avalon': read(node)}
|
||||
data_preset = {
|
||||
"nodeclass": data['avalon']['family'],
|
||||
"families": [data['avalon']['families']],
|
||||
|
|
@ -236,10 +691,10 @@ def get_render_path(node):
|
|||
|
||||
|
||||
def format_anatomy(data):
|
||||
''' Helping function for formating of anatomy paths
|
||||
''' Helping function for formatting of anatomy paths
|
||||
|
||||
Arguments:
|
||||
data (dict): dictionary with attributes used for formating
|
||||
data (dict): dictionary with attributes used for formatting
|
||||
|
||||
Return:
|
||||
path (str)
|
||||
|
|
@ -385,7 +840,7 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
for knob in imageio_writes["knobs"]:
|
||||
_data.update({knob["name"]: knob["value"]})
|
||||
|
||||
_data = anlib.fix_data_for_node_create(_data)
|
||||
_data = fix_data_for_node_create(_data)
|
||||
|
||||
log.debug("_data: `{}`".format(_data))
|
||||
|
||||
|
|
@ -462,11 +917,11 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
else:
|
||||
now_node.setInput(0, prev_node)
|
||||
|
||||
# swith actual node to previous
|
||||
# switch actual node to previous
|
||||
prev_node = now_node
|
||||
|
||||
# creating write node
|
||||
write_node = now_node = anlib.add_write_node(
|
||||
write_node = now_node = add_write_node(
|
||||
"inside_{}".format(name),
|
||||
**_data
|
||||
)
|
||||
|
|
@ -474,7 +929,7 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
# connect to previous node
|
||||
now_node.setInput(0, prev_node)
|
||||
|
||||
# swith actual node to previous
|
||||
# switch actual node to previous
|
||||
prev_node = now_node
|
||||
|
||||
now_node = nuke.createNode("Output", "name Output1")
|
||||
|
|
@ -484,8 +939,8 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
now_node.setInput(0, prev_node)
|
||||
|
||||
# imprinting group node
|
||||
anlib.set_avalon_knob_data(GN, data["avalon"])
|
||||
anlib.add_publish_knob(GN)
|
||||
set_avalon_knob_data(GN, data["avalon"])
|
||||
add_publish_knob(GN)
|
||||
add_rendering_knobs(GN, farm)
|
||||
|
||||
if review:
|
||||
|
|
@ -516,7 +971,7 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
GN.addKnob(knob)
|
||||
else:
|
||||
if "___" in _k_name:
|
||||
# add devider
|
||||
# add divider
|
||||
GN.addKnob(nuke.Text_Knob(""))
|
||||
else:
|
||||
# add linked knob by _k_name
|
||||
|
|
@ -537,7 +992,7 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
add_deadline_tab(GN)
|
||||
|
||||
# open the our Tab as default
|
||||
GN[opnl._node_tab_name].setFlag(0)
|
||||
GN[_NODE_TAB_NAME].setFlag(0)
|
||||
|
||||
# set tile color
|
||||
tile_color = _data.get("tile_color", "0xff0000ff")
|
||||
|
|
@ -663,7 +1118,7 @@ class WorkfileSettings(object):
|
|||
root_node=None,
|
||||
nodes=None,
|
||||
**kwargs):
|
||||
opnl._project = kwargs.get(
|
||||
Context._project_doc = kwargs.get(
|
||||
"project") or io.find_one({"type": "project"})
|
||||
self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"]
|
||||
self._asset_entity = get_asset(self._asset)
|
||||
|
|
@ -725,7 +1180,7 @@ class WorkfileSettings(object):
|
|||
for i, n in enumerate(copy_inputs):
|
||||
nv.setInput(i, n)
|
||||
|
||||
# set coppied knobs
|
||||
# set copied knobs
|
||||
for k, v in copy_knobs.items():
|
||||
print(k, v)
|
||||
nv[k].setValue(v)
|
||||
|
|
@ -804,8 +1259,6 @@ class WorkfileSettings(object):
|
|||
''' Adds correct colorspace to write node dict
|
||||
|
||||
'''
|
||||
from avalon.nuke import read
|
||||
|
||||
for node in nuke.allNodes(filter="Group"):
|
||||
|
||||
# get data from avalon knob
|
||||
|
|
@ -862,7 +1315,7 @@ class WorkfileSettings(object):
|
|||
def set_reads_colorspace(self, read_clrs_inputs):
|
||||
""" Setting colorspace to Read nodes
|
||||
|
||||
Looping trought all read nodes and tries to set colorspace based
|
||||
Looping through all read nodes and tries to set colorspace based
|
||||
on regex rules in presets
|
||||
"""
|
||||
changes = {}
|
||||
|
|
@ -871,7 +1324,7 @@ class WorkfileSettings(object):
|
|||
if n.Class() != "Read":
|
||||
continue
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
# check if any colorspace presets for read is matching
|
||||
preset_clrsp = None
|
||||
|
||||
for input in read_clrs_inputs:
|
||||
|
|
@ -1005,7 +1458,7 @@ class WorkfileSettings(object):
|
|||
node['frame_range_lock'].setValue(True)
|
||||
|
||||
# adding handle_start/end to root avalon knob
|
||||
if not anlib.set_avalon_knob_data(self._root_node, {
|
||||
if not set_avalon_knob_data(self._root_node, {
|
||||
"handleStart": int(handle_start),
|
||||
"handleEnd": int(handle_end)
|
||||
}):
|
||||
|
|
@ -1013,7 +1466,7 @@ class WorkfileSettings(object):
|
|||
|
||||
def reset_resolution(self):
|
||||
"""Set resolution to project resolution."""
|
||||
log.info("Reseting resolution")
|
||||
log.info("Resetting resolution")
|
||||
project = io.find_one({"type": "project"})
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
asset = io.find_one({"name": asset, "type": "asset"})
|
||||
|
|
@ -1089,6 +1542,8 @@ class WorkfileSettings(object):
|
|||
self.set_colorspace()
|
||||
|
||||
def set_favorites(self):
|
||||
from .utils import set_context_favorites
|
||||
|
||||
work_dir = os.getenv("AVALON_WORKDIR")
|
||||
asset = os.getenv("AVALON_ASSET")
|
||||
favorite_items = OrderedDict()
|
||||
|
|
@ -1096,9 +1551,9 @@ class WorkfileSettings(object):
|
|||
# project
|
||||
# get project's root and split to parts
|
||||
projects_root = os.path.normpath(work_dir.split(
|
||||
opnl.project_name)[0])
|
||||
Context.project_name)[0])
|
||||
# add project name
|
||||
project_dir = os.path.join(projects_root, opnl.project_name) + "/"
|
||||
project_dir = os.path.join(projects_root, Context.project_name) + "/"
|
||||
# add to favorites
|
||||
favorite_items.update({"Project dir": project_dir.replace("\\", "/")})
|
||||
|
||||
|
|
@ -1145,8 +1600,7 @@ def get_write_node_template_attr(node):
|
|||
'''
|
||||
# get avalon data from node
|
||||
data = dict()
|
||||
data['avalon'] = avalon.nuke.read(
|
||||
node)
|
||||
data['avalon'] = read(node)
|
||||
data_preset = {
|
||||
"nodeclass": data['avalon']['family'],
|
||||
"families": [data['avalon']['families']],
|
||||
|
|
@ -1167,7 +1621,7 @@ def get_write_node_template_attr(node):
|
|||
if k not in ["_id", "_previous"]}
|
||||
|
||||
# fix badly encoded data
|
||||
return anlib.fix_data_for_node_create(correct_data)
|
||||
return fix_data_for_node_create(correct_data)
|
||||
|
||||
|
||||
def get_dependent_nodes(nodes):
|
||||
|
|
@ -1274,13 +1728,53 @@ def find_free_space_to_paste_nodes(
|
|||
return xpos, ypos
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
"""Maintain selection during context
|
||||
|
||||
Example:
|
||||
>>> with maintained_selection():
|
||||
... node['selected'].setValue(True)
|
||||
>>> print(node['selected'].value())
|
||||
False
|
||||
"""
|
||||
previous_selection = nuke.selectedNodes()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# unselect all selection in case there is some
|
||||
current_seletion = nuke.selectedNodes()
|
||||
[n['selected'].setValue(False) for n in current_seletion]
|
||||
# and select all previously selected nodes
|
||||
if previous_selection:
|
||||
[n['selected'].setValue(True) for n in previous_selection]
|
||||
|
||||
|
||||
def reset_selection():
|
||||
"""Deselect all selected nodes"""
|
||||
for node in nuke.selectedNodes():
|
||||
node["selected"].setValue(False)
|
||||
|
||||
|
||||
def select_nodes(nodes):
|
||||
"""Selects all inputed nodes
|
||||
|
||||
Arguments:
|
||||
nodes (list): nuke nodes to be selected
|
||||
"""
|
||||
assert isinstance(nodes, (list, tuple)), "nodes has to be list or tuple"
|
||||
|
||||
for node in nodes:
|
||||
node["selected"].setValue(True)
|
||||
|
||||
|
||||
def launch_workfiles_app():
|
||||
'''Function letting start workfiles after start of host
|
||||
'''
|
||||
from openpype.lib import (
|
||||
env_value_to_bool
|
||||
)
|
||||
from avalon.nuke.pipeline import get_main_window
|
||||
from .pipeline import get_main_window
|
||||
|
||||
# get all imortant settings
|
||||
open_at_start = env_value_to_bool(
|
||||
|
|
@ -1291,8 +1785,8 @@ def launch_workfiles_app():
|
|||
if not open_at_start:
|
||||
return
|
||||
|
||||
if not opnl.workfiles_launched:
|
||||
opnl.workfiles_launched = True
|
||||
if not Context.workfiles_launched:
|
||||
Context.workfiles_launched = True
|
||||
main_window = get_main_window()
|
||||
host_tools.show_workfiles(parent=main_window)
|
||||
|
||||
|
|
@ -1378,7 +1872,7 @@ def recreate_instance(origin_node, avalon_data=None):
|
|||
knobs_wl = ["render", "publish", "review", "ypos",
|
||||
"use_limit", "first", "last"]
|
||||
# get data from avalon knobs
|
||||
data = anlib.get_avalon_knob_data(
|
||||
data = get_avalon_knob_data(
|
||||
origin_node)
|
||||
|
||||
# add input data to avalon data
|
||||
|
|
@ -1494,3 +1988,45 @@ def dirmap_file_name_filter(file_name):
|
|||
if os.path.exists(dirmap_processor.file_name):
|
||||
return dirmap_processor.file_name
|
||||
return file_name
|
||||
|
||||
|
||||
# ------------------------------------
|
||||
# This function seems to be deprecated
|
||||
# ------------------------------------
|
||||
def ls_img_sequence(path):
|
||||
"""Listing all available coherent image sequence from path
|
||||
|
||||
Arguments:
|
||||
path (str): A nuke's node object
|
||||
|
||||
Returns:
|
||||
data (dict): with nuke formated path and frameranges
|
||||
"""
|
||||
file = os.path.basename(path)
|
||||
dirpath = os.path.dirname(path)
|
||||
base, ext = os.path.splitext(file)
|
||||
name, padding = os.path.splitext(base)
|
||||
|
||||
# populate list of files
|
||||
files = [
|
||||
f for f in os.listdir(dirpath)
|
||||
if name in f
|
||||
if ext in f
|
||||
]
|
||||
|
||||
# create collection from list of files
|
||||
collections, reminder = clique.assemble(files)
|
||||
|
||||
if len(collections) > 0:
|
||||
head = collections[0].format("{head}")
|
||||
padding = collections[0].format("{padding}") % 1
|
||||
padding = "#" * len(padding)
|
||||
tail = collections[0].format("{tail}")
|
||||
file = head + padding + tail
|
||||
|
||||
return {
|
||||
"path": os.path.join(dirpath, file).replace("\\", "/"),
|
||||
"frames": collections[0].format("[{ranges}]")
|
||||
}
|
||||
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -1,166 +0,0 @@
|
|||
import os
|
||||
import nuke
|
||||
from avalon.nuke.pipeline import get_main_window
|
||||
|
||||
from .lib import WorkfileSettings
|
||||
from openpype.api import Logger, BuildWorkfile, get_current_project_settings
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
menu_label = os.environ["AVALON_LABEL"]
|
||||
context_label = None
|
||||
|
||||
|
||||
def change_context_label(*args):
|
||||
global context_label
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.findItem(menu_label)
|
||||
|
||||
label = "{0}, {1}".format(
|
||||
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
|
||||
)
|
||||
|
||||
rm_item = [
|
||||
(i, item) for i, item in enumerate(menu.items())
|
||||
if context_label in item.name()
|
||||
][0]
|
||||
|
||||
menu.removeItem(rm_item[1].name())
|
||||
|
||||
context_action = menu.addCommand(
|
||||
label,
|
||||
index=(rm_item[0])
|
||||
)
|
||||
context_action.setEnabled(False)
|
||||
|
||||
log.info("Task label changed from `{}` to `{}`".format(
|
||||
context_label, label))
|
||||
|
||||
context_label = label
|
||||
|
||||
|
||||
|
||||
def install():
|
||||
from openpype.hosts.nuke.api import reload_config
|
||||
|
||||
global context_label
|
||||
|
||||
# uninstall original avalon menu
|
||||
uninstall()
|
||||
|
||||
main_window = get_main_window()
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.addMenu(menu_label)
|
||||
|
||||
label = "{0}, {1}".format(
|
||||
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
|
||||
)
|
||||
context_label = label
|
||||
context_action = menu.addCommand(label)
|
||||
context_action.setEnabled(False)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Work Files...",
|
||||
lambda: host_tools.show_workfiles(parent=main_window)
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Create...",
|
||||
lambda: host_tools.show_creator(parent=main_window)
|
||||
)
|
||||
menu.addCommand(
|
||||
"Load...",
|
||||
lambda: host_tools.show_loader(
|
||||
parent=main_window,
|
||||
use_context=True
|
||||
)
|
||||
)
|
||||
menu.addCommand(
|
||||
"Publish...",
|
||||
lambda: host_tools.show_publish(parent=main_window)
|
||||
)
|
||||
menu.addCommand(
|
||||
"Manage...",
|
||||
lambda: host_tools.show_scene_inventory(parent=main_window)
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Set Resolution",
|
||||
lambda: WorkfileSettings().reset_resolution()
|
||||
)
|
||||
menu.addCommand(
|
||||
"Set Frame Range",
|
||||
lambda: WorkfileSettings().reset_frame_range_handles()
|
||||
)
|
||||
menu.addCommand(
|
||||
"Set Colorspace",
|
||||
lambda: WorkfileSettings().set_colorspace()
|
||||
)
|
||||
menu.addCommand(
|
||||
"Apply All Settings",
|
||||
lambda: WorkfileSettings().set_context_settings()
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Build Workfile",
|
||||
lambda: BuildWorkfile().process()
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Experimental tools...",
|
||||
lambda: host_tools.show_experimental_tools_dialog(parent=main_window)
|
||||
)
|
||||
|
||||
# add reload pipeline only in debug mode
|
||||
if bool(os.getenv("NUKE_DEBUG")):
|
||||
menu.addSeparator()
|
||||
menu.addCommand("Reload Pipeline", reload_config)
|
||||
|
||||
# adding shortcuts
|
||||
add_shortcuts_from_presets()
|
||||
|
||||
|
||||
def uninstall():
|
||||
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.findItem(menu_label)
|
||||
|
||||
for item in menu.items():
|
||||
log.info("Removing menu item: {}".format(item.name()))
|
||||
menu.removeItem(item.name())
|
||||
|
||||
|
||||
def add_shortcuts_from_presets():
|
||||
menubar = nuke.menu("Nuke")
|
||||
nuke_presets = get_current_project_settings()["nuke"]["general"]
|
||||
|
||||
if nuke_presets.get("menu"):
|
||||
menu_label_mapping = {
|
||||
"manage": "Manage...",
|
||||
"create": "Create...",
|
||||
"load": "Load...",
|
||||
"build_workfile": "Build Workfile",
|
||||
"publish": "Publish..."
|
||||
}
|
||||
|
||||
for command_name, shortcut_str in nuke_presets.get("menu").items():
|
||||
log.info("menu_name `{}` | menu_label `{}`".format(
|
||||
command_name, menu_label
|
||||
))
|
||||
log.info("Adding Shortcut `{}` to `{}`".format(
|
||||
shortcut_str, command_name
|
||||
))
|
||||
try:
|
||||
menu = menubar.findItem(menu_label)
|
||||
item_label = menu_label_mapping[command_name]
|
||||
menuitem = menu.findItem(item_label)
|
||||
menuitem.setShortcut(shortcut_str)
|
||||
except AttributeError as e:
|
||||
log.error(e)
|
||||
421
openpype/hosts/nuke/api/pipeline.py
Normal file
421
openpype/hosts/nuke/api/pipeline.py
Normal file
|
|
@ -0,0 +1,421 @@
|
|||
import os
|
||||
import importlib
|
||||
from collections import OrderedDict
|
||||
|
||||
import nuke
|
||||
|
||||
import pyblish.api
|
||||
import avalon.api
|
||||
from avalon import pipeline
|
||||
|
||||
import openpype
|
||||
from openpype.api import (
|
||||
Logger,
|
||||
BuildWorkfile,
|
||||
get_current_project_settings
|
||||
)
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
from .command import viewer_update_and_undo_stop
|
||||
from .lib import (
|
||||
add_publish_knob,
|
||||
WorkfileSettings,
|
||||
process_workfile_builder,
|
||||
launch_workfiles_app,
|
||||
check_inventory_versions,
|
||||
set_avalon_knob_data,
|
||||
read,
|
||||
Context
|
||||
)
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
|
||||
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.nuke.__file__))
|
||||
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
|
||||
MENU_LABEL = os.environ["AVALON_LABEL"]
|
||||
|
||||
|
||||
# registering pyblish gui regarding settings in presets
|
||||
if os.getenv("PYBLISH_GUI", None):
|
||||
pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None))
|
||||
|
||||
|
||||
def get_main_window():
|
||||
"""Acquire Nuke's main window"""
|
||||
if Context.main_window is None:
|
||||
from Qt import QtWidgets
|
||||
|
||||
top_widgets = QtWidgets.QApplication.topLevelWidgets()
|
||||
name = "Foundry::UI::DockMainWindow"
|
||||
for widget in top_widgets:
|
||||
if (
|
||||
widget.inherits("QMainWindow")
|
||||
and widget.metaObject().className() == name
|
||||
):
|
||||
Context.main_window = widget
|
||||
break
|
||||
return Context.main_window
|
||||
|
||||
|
||||
def reload_config():
|
||||
"""Attempt to reload pipeline at run-time.
|
||||
|
||||
CAUTION: This is primarily for development and debugging purposes.
|
||||
|
||||
"""
|
||||
|
||||
for module in (
|
||||
"{}.api".format(AVALON_CONFIG),
|
||||
"{}.hosts.nuke.api.actions".format(AVALON_CONFIG),
|
||||
"{}.hosts.nuke.api.menu".format(AVALON_CONFIG),
|
||||
"{}.hosts.nuke.api.plugin".format(AVALON_CONFIG),
|
||||
"{}.hosts.nuke.api.lib".format(AVALON_CONFIG),
|
||||
):
|
||||
log.info("Reloading module: {}...".format(module))
|
||||
|
||||
module = importlib.import_module(module)
|
||||
|
||||
try:
|
||||
importlib.reload(module)
|
||||
except AttributeError as e:
|
||||
from importlib import reload
|
||||
log.warning("Cannot reload module: {}".format(e))
|
||||
reload(module)
|
||||
|
||||
|
||||
def install():
|
||||
''' Installing all requarements for Nuke host
|
||||
'''
|
||||
|
||||
pyblish.api.register_host("nuke")
|
||||
|
||||
log.info("Registering Nuke plug-ins..")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
|
||||
avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
|
||||
avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH)
|
||||
|
||||
# Register Avalon event for workfiles loading.
|
||||
avalon.api.on("workio.open_file", check_inventory_versions)
|
||||
avalon.api.on("taskChanged", change_context_label)
|
||||
|
||||
pyblish.api.register_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled)
|
||||
workfile_settings = WorkfileSettings()
|
||||
# Disable all families except for the ones we explicitly want to see
|
||||
family_states = [
|
||||
"write",
|
||||
"review",
|
||||
"nukenodes",
|
||||
"model",
|
||||
"gizmo"
|
||||
]
|
||||
|
||||
avalon.api.data["familiesStateDefault"] = False
|
||||
avalon.api.data["familiesStateToggled"] = family_states
|
||||
|
||||
# Set context settings.
|
||||
nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
|
||||
nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root")
|
||||
nuke.addOnCreate(process_workfile_builder, nodeClass="Root")
|
||||
nuke.addOnCreate(launch_workfiles_app, nodeClass="Root")
|
||||
_install_menu()
|
||||
|
||||
|
||||
def uninstall():
|
||||
'''Uninstalling host's integration
|
||||
'''
|
||||
log.info("Deregistering Nuke plug-ins..")
|
||||
pyblish.deregister_host("nuke")
|
||||
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
|
||||
avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH)
|
||||
avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH)
|
||||
|
||||
pyblish.api.deregister_callback(
|
||||
"instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
reload_config()
|
||||
_uninstall_menu()
|
||||
|
||||
|
||||
def _install_menu():
|
||||
# uninstall original avalon menu
|
||||
main_window = get_main_window()
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.addMenu(MENU_LABEL)
|
||||
|
||||
label = "{0}, {1}".format(
|
||||
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
|
||||
)
|
||||
Context.context_label = label
|
||||
context_action = menu.addCommand(label)
|
||||
context_action.setEnabled(False)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Work Files...",
|
||||
lambda: host_tools.show_workfiles(parent=main_window)
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Create...",
|
||||
lambda: host_tools.show_creator(parent=main_window)
|
||||
)
|
||||
menu.addCommand(
|
||||
"Load...",
|
||||
lambda: host_tools.show_loader(
|
||||
parent=main_window,
|
||||
use_context=True
|
||||
)
|
||||
)
|
||||
menu.addCommand(
|
||||
"Publish...",
|
||||
lambda: host_tools.show_publish(parent=main_window)
|
||||
)
|
||||
menu.addCommand(
|
||||
"Manage...",
|
||||
lambda: host_tools.show_scene_inventory(parent=main_window)
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Set Resolution",
|
||||
lambda: WorkfileSettings().reset_resolution()
|
||||
)
|
||||
menu.addCommand(
|
||||
"Set Frame Range",
|
||||
lambda: WorkfileSettings().reset_frame_range_handles()
|
||||
)
|
||||
menu.addCommand(
|
||||
"Set Colorspace",
|
||||
lambda: WorkfileSettings().set_colorspace()
|
||||
)
|
||||
menu.addCommand(
|
||||
"Apply All Settings",
|
||||
lambda: WorkfileSettings().set_context_settings()
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Build Workfile",
|
||||
lambda: BuildWorkfile().process()
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Experimental tools...",
|
||||
lambda: host_tools.show_experimental_tools_dialog(parent=main_window)
|
||||
)
|
||||
|
||||
# add reload pipeline only in debug mode
|
||||
if bool(os.getenv("NUKE_DEBUG")):
|
||||
menu.addSeparator()
|
||||
menu.addCommand("Reload Pipeline", reload_config)
|
||||
|
||||
# adding shortcuts
|
||||
add_shortcuts_from_presets()
|
||||
|
||||
|
||||
def _uninstall_menu():
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.findItem(MENU_LABEL)
|
||||
|
||||
for item in menu.items():
|
||||
log.info("Removing menu item: {}".format(item.name()))
|
||||
menu.removeItem(item.name())
|
||||
|
||||
|
||||
def change_context_label(*args):
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.findItem(MENU_LABEL)
|
||||
|
||||
label = "{0}, {1}".format(
|
||||
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
|
||||
)
|
||||
|
||||
rm_item = [
|
||||
(i, item) for i, item in enumerate(menu.items())
|
||||
if Context.context_label in item.name()
|
||||
][0]
|
||||
|
||||
menu.removeItem(rm_item[1].name())
|
||||
|
||||
context_action = menu.addCommand(
|
||||
label,
|
||||
index=(rm_item[0])
|
||||
)
|
||||
context_action.setEnabled(False)
|
||||
|
||||
log.info("Task label changed from `{}` to `{}`".format(
|
||||
Context.context_label, label))
|
||||
|
||||
|
||||
def add_shortcuts_from_presets():
|
||||
menubar = nuke.menu("Nuke")
|
||||
nuke_presets = get_current_project_settings()["nuke"]["general"]
|
||||
|
||||
if nuke_presets.get("menu"):
|
||||
menu_label_mapping = {
|
||||
"manage": "Manage...",
|
||||
"create": "Create...",
|
||||
"load": "Load...",
|
||||
"build_workfile": "Build Workfile",
|
||||
"publish": "Publish..."
|
||||
}
|
||||
|
||||
for command_name, shortcut_str in nuke_presets.get("menu").items():
|
||||
log.info("menu_name `{}` | menu_label `{}`".format(
|
||||
command_name, MENU_LABEL
|
||||
))
|
||||
log.info("Adding Shortcut `{}` to `{}`".format(
|
||||
shortcut_str, command_name
|
||||
))
|
||||
try:
|
||||
menu = menubar.findItem(MENU_LABEL)
|
||||
item_label = menu_label_mapping[command_name]
|
||||
menuitem = menu.findItem(item_label)
|
||||
menuitem.setShortcut(shortcut_str)
|
||||
except AttributeError as e:
|
||||
log.error(e)
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node passthrough states on instance toggles."""
|
||||
|
||||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
# Whether instances should be passthrough based on new value
|
||||
|
||||
with viewer_update_and_undo_stop():
|
||||
n = instance[0]
|
||||
try:
|
||||
n["publish"].value()
|
||||
except ValueError:
|
||||
n = add_publish_knob(n)
|
||||
log.info(" `Publish` knob was added to write node..")
|
||||
|
||||
n["publish"].setValue(new_value)
|
||||
|
||||
|
||||
def containerise(node,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
loader=None,
|
||||
data=None):
|
||||
"""Bundle `node` into an assembly and imprint it with metadata
|
||||
|
||||
Containerisation enables a tracking of version, author and origin
|
||||
for loaded assets.
|
||||
|
||||
Arguments:
|
||||
node (nuke.Node): Nuke's node object to imprint as container
|
||||
name (str): Name of resulting assembly
|
||||
namespace (str): Namespace under which to host container
|
||||
context (dict): Asset information
|
||||
loader (str, optional): Name of node used to produce this container.
|
||||
|
||||
Returns:
|
||||
node (nuke.Node): containerised nuke's node object
|
||||
|
||||
"""
|
||||
data = OrderedDict(
|
||||
[
|
||||
("schema", "openpype:container-2.0"),
|
||||
("id", pipeline.AVALON_CONTAINER_ID),
|
||||
("name", name),
|
||||
("namespace", namespace),
|
||||
("loader", str(loader)),
|
||||
("representation", context["representation"]["_id"]),
|
||||
],
|
||||
|
||||
**data or dict()
|
||||
)
|
||||
|
||||
set_avalon_knob_data(node, data)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def parse_container(node):
|
||||
"""Returns containerised data of a node
|
||||
|
||||
Reads the imprinted data from `containerise`.
|
||||
|
||||
Arguments:
|
||||
node (nuke.Node): Nuke's node object to read imprinted data
|
||||
|
||||
Returns:
|
||||
dict: The container schema data for this container node.
|
||||
|
||||
"""
|
||||
data = read(node)
|
||||
|
||||
# (TODO) Remove key validation when `ls` has re-implemented.
|
||||
#
|
||||
# If not all required data return the empty container
|
||||
required = ["schema", "id", "name",
|
||||
"namespace", "loader", "representation"]
|
||||
if not all(key in data for key in required):
|
||||
return
|
||||
|
||||
# Store the node's name
|
||||
data["objectName"] = node["name"].value()
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def update_container(node, keys=None):
|
||||
"""Returns node with updateted containder data
|
||||
|
||||
Arguments:
|
||||
node (nuke.Node): The node in Nuke to imprint as container,
|
||||
keys (dict, optional): data which should be updated
|
||||
|
||||
Returns:
|
||||
node (nuke.Node): nuke node with updated container data
|
||||
|
||||
Raises:
|
||||
TypeError on given an invalid container node
|
||||
|
||||
"""
|
||||
keys = keys or dict()
|
||||
|
||||
container = parse_container(node)
|
||||
if not container:
|
||||
raise TypeError("Not a valid container node.")
|
||||
|
||||
container.update(keys)
|
||||
node = set_avalon_knob_data(node, container)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
def ls():
|
||||
"""List available containers.
|
||||
|
||||
This function is used by the Container Manager in Nuke. You'll
|
||||
need to implement a for-loop that then *yields* one Container at
|
||||
a time.
|
||||
|
||||
See the `container.json` schema for details on how it should look,
|
||||
and the Maya equivalent, which is in `avalon.maya.pipeline`
|
||||
"""
|
||||
all_nodes = nuke.allNodes(recurseGroups=False)
|
||||
|
||||
# TODO: add readgeo, readcamera, readimage
|
||||
nodes = [n for n in all_nodes]
|
||||
|
||||
for n in nodes:
|
||||
log.debug("name: `{}`".format(n.name()))
|
||||
container = parse_container(n)
|
||||
if container:
|
||||
yield container
|
||||
|
|
@ -2,23 +2,30 @@ import os
|
|||
import random
|
||||
import string
|
||||
|
||||
import avalon.nuke
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon import api
|
||||
import nuke
|
||||
|
||||
import avalon.api
|
||||
|
||||
from openpype.api import (
|
||||
get_current_project_settings,
|
||||
PypeCreatorMixin
|
||||
)
|
||||
from .lib import check_subsetname_exists
|
||||
import nuke
|
||||
from .lib import (
|
||||
Knobby,
|
||||
check_subsetname_exists,
|
||||
reset_selection,
|
||||
maintained_selection,
|
||||
set_avalon_knob_data,
|
||||
add_publish_knob
|
||||
)
|
||||
|
||||
|
||||
class PypeCreator(PypeCreatorMixin, avalon.nuke.pipeline.Creator):
|
||||
"""Pype Nuke Creator class wrapper
|
||||
"""
|
||||
class OpenPypeCreator(PypeCreatorMixin, avalon.api.Creator):
|
||||
"""Pype Nuke Creator class wrapper"""
|
||||
node_color = "0xdfea5dff"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(PypeCreator, self).__init__(*args, **kwargs)
|
||||
super(OpenPypeCreator, self).__init__(*args, **kwargs)
|
||||
self.presets = get_current_project_settings()["nuke"]["create"].get(
|
||||
self.__class__.__name__, {}
|
||||
)
|
||||
|
|
@ -31,6 +38,38 @@ class PypeCreator(PypeCreatorMixin, avalon.nuke.pipeline.Creator):
|
|||
raise NameError("`{0}: {1}".format(__name__, msg))
|
||||
return
|
||||
|
||||
def process(self):
|
||||
from nukescripts import autoBackdrop
|
||||
|
||||
instance = None
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
|
||||
nodes = nuke.selectedNodes()
|
||||
if not nodes:
|
||||
nuke.message("Please select nodes that you "
|
||||
"wish to add to a container")
|
||||
return
|
||||
|
||||
elif len(nodes) == 1:
|
||||
# only one node is selected
|
||||
instance = nodes[0]
|
||||
|
||||
if not instance:
|
||||
# Not using selection or multiple nodes selected
|
||||
bckd_node = autoBackdrop()
|
||||
bckd_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
bckd_node["note_font_size"].setValue(24)
|
||||
bckd_node["label"].setValue("[{}]".format(self.name))
|
||||
|
||||
instance = bckd_node
|
||||
|
||||
# add avalon knobs
|
||||
set_avalon_knob_data(instance, self.data)
|
||||
add_publish_knob(instance)
|
||||
|
||||
return instance
|
||||
|
||||
|
||||
def get_review_presets_config():
|
||||
settings = get_current_project_settings()
|
||||
|
|
@ -48,7 +87,7 @@ def get_review_presets_config():
|
|||
return [str(name) for name, _prop in outputs.items()]
|
||||
|
||||
|
||||
class NukeLoader(api.Loader):
|
||||
class NukeLoader(avalon.api.Loader):
|
||||
container_id_knob = "containerId"
|
||||
container_id = None
|
||||
|
||||
|
|
@ -74,7 +113,7 @@ class NukeLoader(api.Loader):
|
|||
node[self.container_id_knob].setValue(source_id)
|
||||
else:
|
||||
HIDEN_FLAG = 0x00040000
|
||||
_knob = anlib.Knobby(
|
||||
_knob = Knobby(
|
||||
"String_Knob",
|
||||
self.container_id,
|
||||
flags=[
|
||||
|
|
@ -183,7 +222,7 @@ class ExporterReview(object):
|
|||
Returns:
|
||||
nuke.Node: copy node of Input Process node
|
||||
"""
|
||||
anlib.reset_selection()
|
||||
reset_selection()
|
||||
ipn_orig = None
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
ip = v["input_process"].getValue()
|
||||
|
|
@ -196,7 +235,7 @@ class ExporterReview(object):
|
|||
# copy selected to clipboard
|
||||
nuke.nodeCopy("%clipboard%")
|
||||
# reset selection
|
||||
anlib.reset_selection()
|
||||
reset_selection()
|
||||
# paste node and selection is on it only
|
||||
nuke.nodePaste("%clipboard%")
|
||||
# assign to variable
|
||||
|
|
@ -209,7 +248,7 @@ class ExporterReview(object):
|
|||
nuke_imageio = opnlib.get_nuke_imageio_settings()
|
||||
|
||||
# TODO: this is only securing backward compatibility lets remove
|
||||
# this once all projects's anotomy are upated to newer config
|
||||
# this once all projects's anotomy are updated to newer config
|
||||
if "baking" in nuke_imageio.keys():
|
||||
return nuke_imageio["baking"]["viewerProcess"]
|
||||
else:
|
||||
|
|
@ -396,7 +435,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
|
||||
def save_file(self):
|
||||
import shutil
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
self.log.info("Saving nodes as file... ")
|
||||
# create nk path
|
||||
path = os.path.splitext(self.path)[0] + ".nk"
|
||||
|
|
@ -477,7 +516,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
write_node["file_type"].setValue(str(self.ext))
|
||||
|
||||
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
|
||||
# TODO should't this come from settings on outputs?
|
||||
# TODO shouldn't this come from settings on outputs?
|
||||
try:
|
||||
write_node["meta_codec"].setValue("ap4h")
|
||||
except Exception:
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
import os
|
||||
import nuke
|
||||
from avalon.nuke import lib as anlib
|
||||
|
||||
from openpype.api import resources
|
||||
from .lib import maintained_selection
|
||||
|
||||
|
||||
def set_context_favorites(favorites=None):
|
||||
""" Addig favorite folders to nuke's browser
|
||||
""" Adding favorite folders to nuke's browser
|
||||
|
||||
Argumets:
|
||||
Arguments:
|
||||
favorites (dict): couples of {name:path}
|
||||
"""
|
||||
favorites = favorites or {}
|
||||
|
|
@ -48,14 +49,16 @@ def gizmo_is_nuke_default(gizmo):
|
|||
return gizmo.filename().startswith(plug_dir)
|
||||
|
||||
|
||||
def bake_gizmos_recursively(in_group=nuke.Root()):
|
||||
def bake_gizmos_recursively(in_group=None):
|
||||
"""Converting a gizmo to group
|
||||
|
||||
Argumets:
|
||||
Arguments:
|
||||
is_group (nuke.Node)[optonal]: group node or all nodes
|
||||
"""
|
||||
if in_group is None:
|
||||
in_group = nuke.Root()
|
||||
# preserve selection after all is done
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
# jump to the group
|
||||
with in_group:
|
||||
for node in nuke.allNodes():
|
||||
|
|
|
|||
55
openpype/hosts/nuke/api/workio.py
Normal file
55
openpype/hosts/nuke/api/workio.py
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
"""Host API required Work Files tool"""
|
||||
import os
|
||||
import nuke
|
||||
import avalon.api
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return avalon.api.HOST_WORKFILE_EXTENSIONS["nuke"]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
return nuke.root().modified()
|
||||
|
||||
|
||||
def save_file(filepath):
|
||||
path = filepath.replace("\\", "/")
|
||||
nuke.scriptSaveAs(path)
|
||||
nuke.Root()["name"].setValue(path)
|
||||
nuke.Root()["project_directory"].setValue(os.path.dirname(path))
|
||||
nuke.Root().setModified(False)
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
filepath = filepath.replace("\\", "/")
|
||||
|
||||
# To remain in the same window, we have to clear the script and read
|
||||
# in the contents of the workfile.
|
||||
nuke.scriptClear()
|
||||
nuke.scriptReadFile(filepath)
|
||||
nuke.Root()["name"].setValue(filepath)
|
||||
nuke.Root()["project_directory"].setValue(os.path.dirname(filepath))
|
||||
nuke.Root().setModified(False)
|
||||
return True
|
||||
|
||||
|
||||
def current_file():
|
||||
current_file = nuke.root().name()
|
||||
|
||||
# Unsaved current file
|
||||
if current_file == 'Root':
|
||||
return None
|
||||
|
||||
return os.path.normpath(current_file).replace("\\", "/")
|
||||
|
||||
|
||||
def work_root(session):
|
||||
|
||||
work_dir = session["AVALON_WORKDIR"]
|
||||
scene_dir = session.get("AVALON_SCENEDIR")
|
||||
if scene_dir:
|
||||
path = os.path.join(work_dir, scene_dir)
|
||||
else:
|
||||
path = work_dir
|
||||
|
||||
return os.path.normpath(path).replace("\\", "/")
|
||||
|
|
@ -1,9 +1,12 @@
|
|||
from avalon.nuke import lib as anlib
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
import nuke
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
select_nodes,
|
||||
set_avalon_knob_data
|
||||
)
|
||||
|
||||
|
||||
class CreateBackdrop(plugin.PypeCreator):
|
||||
class CreateBackdrop(plugin.OpenPypeCreator):
|
||||
"""Add Publishable Backdrop"""
|
||||
|
||||
name = "nukenodes"
|
||||
|
|
@ -25,14 +28,14 @@ class CreateBackdrop(plugin.PypeCreator):
|
|||
nodes = self.nodes
|
||||
|
||||
if len(nodes) >= 1:
|
||||
anlib.select_nodes(nodes)
|
||||
select_nodes(nodes)
|
||||
bckd_node = autoBackdrop()
|
||||
bckd_node["name"].setValue("{}_BDN".format(self.name))
|
||||
bckd_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
bckd_node["note_font_size"].setValue(24)
|
||||
bckd_node["label"].setValue("[{}]".format(self.name))
|
||||
# add avalon knobs
|
||||
instance = anlib.set_avalon_knob_data(bckd_node, self.data)
|
||||
instance = set_avalon_knob_data(bckd_node, self.data)
|
||||
|
||||
return instance
|
||||
else:
|
||||
|
|
@ -48,6 +51,6 @@ class CreateBackdrop(plugin.PypeCreator):
|
|||
bckd_node["note_font_size"].setValue(24)
|
||||
bckd_node["label"].setValue("[{}]".format(self.name))
|
||||
# add avalon knobs
|
||||
instance = anlib.set_avalon_knob_data(bckd_node, self.data)
|
||||
instance = set_avalon_knob_data(bckd_node, self.data)
|
||||
|
||||
return instance
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
from avalon.nuke import lib as anlib
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
import nuke
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
set_avalon_knob_data
|
||||
)
|
||||
|
||||
|
||||
class CreateCamera(plugin.PypeCreator):
|
||||
class CreateCamera(plugin.OpenPypeCreator):
|
||||
"""Add Publishable Backdrop"""
|
||||
|
||||
name = "camera"
|
||||
|
|
@ -36,7 +38,7 @@ class CreateCamera(plugin.PypeCreator):
|
|||
# change node color
|
||||
n["tile_color"].setValue(int(self.node_color, 16))
|
||||
# add avalon knobs
|
||||
anlib.set_avalon_knob_data(n, data)
|
||||
set_avalon_knob_data(n, data)
|
||||
return True
|
||||
else:
|
||||
msg = str("Please select nodes you "
|
||||
|
|
@ -49,5 +51,5 @@ class CreateCamera(plugin.PypeCreator):
|
|||
camera_node = nuke.createNode("Camera2")
|
||||
camera_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
# add avalon knobs
|
||||
instance = anlib.set_avalon_knob_data(camera_node, self.data)
|
||||
instance = set_avalon_knob_data(camera_node, self.data)
|
||||
return instance
|
||||
|
|
|
|||
|
|
@ -1,9 +1,14 @@
|
|||
from avalon.nuke import lib as anlib
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
import nuke
|
||||
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
maintained_selection,
|
||||
select_nodes,
|
||||
set_avalon_knob_data
|
||||
)
|
||||
|
||||
class CreateGizmo(plugin.PypeCreator):
|
||||
|
||||
class CreateGizmo(plugin.OpenPypeCreator):
|
||||
"""Add Publishable "gizmo" group
|
||||
|
||||
The name is symbolically gizmo as presumably
|
||||
|
|
@ -28,13 +33,13 @@ class CreateGizmo(plugin.PypeCreator):
|
|||
nodes = self.nodes
|
||||
self.log.info(len(nodes))
|
||||
if len(nodes) == 1:
|
||||
anlib.select_nodes(nodes)
|
||||
select_nodes(nodes)
|
||||
node = nodes[-1]
|
||||
# check if Group node
|
||||
if node.Class() in "Group":
|
||||
node["name"].setValue("{}_GZM".format(self.name))
|
||||
node["tile_color"].setValue(int(self.node_color, 16))
|
||||
return anlib.set_avalon_knob_data(node, self.data)
|
||||
return set_avalon_knob_data(node, self.data)
|
||||
else:
|
||||
msg = ("Please select a group node "
|
||||
"you wish to publish as the gizmo")
|
||||
|
|
@ -42,13 +47,13 @@ class CreateGizmo(plugin.PypeCreator):
|
|||
nuke.message(msg)
|
||||
|
||||
if len(nodes) >= 2:
|
||||
anlib.select_nodes(nodes)
|
||||
select_nodes(nodes)
|
||||
nuke.makeGroup()
|
||||
gizmo_node = nuke.selectedNode()
|
||||
gizmo_node["name"].setValue("{}_GZM".format(self.name))
|
||||
gizmo_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
# add sticky node wit guide
|
||||
# add sticky node with guide
|
||||
with gizmo_node:
|
||||
sticky = nuke.createNode("StickyNote")
|
||||
sticky["label"].setValue(
|
||||
|
|
@ -57,21 +62,20 @@ class CreateGizmo(plugin.PypeCreator):
|
|||
"- create User knobs on the group")
|
||||
|
||||
# add avalon knobs
|
||||
return anlib.set_avalon_knob_data(gizmo_node, self.data)
|
||||
return set_avalon_knob_data(gizmo_node, self.data)
|
||||
|
||||
else:
|
||||
msg = ("Please select nodes you "
|
||||
"wish to add to the gizmo")
|
||||
msg = "Please select nodes you wish to add to the gizmo"
|
||||
self.log.error(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
else:
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
gizmo_node = nuke.createNode("Group")
|
||||
gizmo_node["name"].setValue("{}_GZM".format(self.name))
|
||||
gizmo_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
# add sticky node wit guide
|
||||
# add sticky node with guide
|
||||
with gizmo_node:
|
||||
sticky = nuke.createNode("StickyNote")
|
||||
sticky["label"].setValue(
|
||||
|
|
@ -80,4 +84,4 @@ class CreateGizmo(plugin.PypeCreator):
|
|||
"- create User knobs on the group")
|
||||
|
||||
# add avalon knobs
|
||||
return anlib.set_avalon_knob_data(gizmo_node, self.data)
|
||||
return set_avalon_knob_data(gizmo_node, self.data)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
from avalon.nuke import lib as anlib
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
import nuke
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
set_avalon_knob_data
|
||||
)
|
||||
|
||||
|
||||
class CreateModel(plugin.PypeCreator):
|
||||
class CreateModel(plugin.OpenPypeCreator):
|
||||
"""Add Publishable Model Geometry"""
|
||||
|
||||
name = "model"
|
||||
|
|
@ -68,7 +70,7 @@ class CreateModel(plugin.PypeCreator):
|
|||
# change node color
|
||||
n["tile_color"].setValue(int(self.node_color, 16))
|
||||
# add avalon knobs
|
||||
anlib.set_avalon_knob_data(n, data)
|
||||
set_avalon_knob_data(n, data)
|
||||
return True
|
||||
else:
|
||||
msg = str("Please select nodes you "
|
||||
|
|
@ -81,5 +83,5 @@ class CreateModel(plugin.PypeCreator):
|
|||
model_node = nuke.createNode("WriteGeo")
|
||||
model_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
# add avalon knobs
|
||||
instance = anlib.set_avalon_knob_data(model_node, self.data)
|
||||
instance = set_avalon_knob_data(model_node, self.data)
|
||||
return instance
|
||||
|
|
|
|||
|
|
@ -1,13 +1,16 @@
|
|||
from collections import OrderedDict
|
||||
import avalon.api
|
||||
import avalon.nuke
|
||||
from openpype import api as pype
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
|
||||
import nuke
|
||||
|
||||
import avalon.api
|
||||
from openpype import api as pype
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
set_avalon_knob_data
|
||||
)
|
||||
|
||||
class CrateRead(plugin.PypeCreator):
|
||||
|
||||
class CrateRead(plugin.OpenPypeCreator):
|
||||
# change this to template preset
|
||||
name = "ReadCopy"
|
||||
label = "Create Read Copy"
|
||||
|
|
@ -45,7 +48,7 @@ class CrateRead(plugin.PypeCreator):
|
|||
continue
|
||||
avalon_data = self.data
|
||||
avalon_data['subset'] = "{}".format(self.name)
|
||||
avalon.nuke.lib.set_avalon_knob_data(node, avalon_data)
|
||||
set_avalon_knob_data(node, avalon_data)
|
||||
node['tile_color'].setValue(16744935)
|
||||
count_reads += 1
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
from collections import OrderedDict
|
||||
from openpype.hosts.nuke.api import (
|
||||
plugin,
|
||||
lib)
|
||||
|
||||
import nuke
|
||||
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
from openpype.hosts.nuke.api.lib import create_write_node
|
||||
|
||||
class CreateWritePrerender(plugin.PypeCreator):
|
||||
|
||||
class CreateWritePrerender(plugin.OpenPypeCreator):
|
||||
# change this to template preset
|
||||
name = "WritePrerender"
|
||||
label = "Create Write Prerender"
|
||||
|
|
@ -98,7 +99,7 @@ class CreateWritePrerender(plugin.PypeCreator):
|
|||
|
||||
self.log.info("write_data: {}".format(write_data))
|
||||
|
||||
write_node = lib.create_write_node(
|
||||
write_node = create_write_node(
|
||||
self.data["subset"],
|
||||
write_data,
|
||||
input=selected_node,
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
from collections import OrderedDict
|
||||
from openpype.hosts.nuke.api import (
|
||||
plugin,
|
||||
lib)
|
||||
|
||||
import nuke
|
||||
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
from openpype.hosts.nuke.api.lib import create_write_node
|
||||
|
||||
class CreateWriteRender(plugin.PypeCreator):
|
||||
|
||||
class CreateWriteRender(plugin.OpenPypeCreator):
|
||||
# change this to template preset
|
||||
name = "WriteRender"
|
||||
label = "Create Write Render"
|
||||
|
|
@ -119,7 +120,7 @@ class CreateWriteRender(plugin.PypeCreator):
|
|||
}
|
||||
]
|
||||
|
||||
write_node = lib.create_write_node(
|
||||
write_node = create_write_node(
|
||||
self.data["subset"],
|
||||
write_data,
|
||||
input=selected_node,
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
from collections import OrderedDict
|
||||
from openpype.hosts.nuke.api import (
|
||||
plugin,
|
||||
lib)
|
||||
|
||||
import nuke
|
||||
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
from openpype.hosts.nuke.api.lib import create_write_node
|
||||
|
||||
class CreateWriteStill(plugin.PypeCreator):
|
||||
|
||||
class CreateWriteStill(plugin.OpenPypeCreator):
|
||||
# change this to template preset
|
||||
name = "WriteStillFrame"
|
||||
label = "Create Write Still Image"
|
||||
|
|
@ -108,7 +109,7 @@ class CreateWriteStill(plugin.PypeCreator):
|
|||
}
|
||||
]
|
||||
|
||||
write_node = lib.create_write_node(
|
||||
write_node = create_write_node(
|
||||
self.name,
|
||||
write_data,
|
||||
input=selected_node,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
from avalon import api, style
|
||||
from avalon.nuke import lib as anlib
|
||||
from openpype.api import (
|
||||
Logger)
|
||||
from openpype.api import Logger
|
||||
from openpype.hosts.nuke.api.lib import set_avalon_knob_data
|
||||
|
||||
|
||||
class RepairOldLoaders(api.InventoryAction):
|
||||
|
|
@ -10,7 +9,7 @@ class RepairOldLoaders(api.InventoryAction):
|
|||
icon = "gears"
|
||||
color = style.colors.alert
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
def process(self, containers):
|
||||
import nuke
|
||||
|
|
@ -34,4 +33,4 @@ class RepairOldLoaders(api.InventoryAction):
|
|||
})
|
||||
node["name"].setValue(new_name)
|
||||
# get data from avalon knob
|
||||
anlib.set_avalon_knob_data(node, cdata)
|
||||
set_avalon_knob_data(node, cdata)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
from avalon import api
|
||||
from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop
|
||||
|
||||
|
||||
class SelectContainers(api.InventoryAction):
|
||||
|
|
@ -9,11 +10,10 @@ class SelectContainers(api.InventoryAction):
|
|||
|
||||
def process(self, containers):
|
||||
import nuke
|
||||
import avalon.nuke
|
||||
|
||||
nodes = [nuke.toNode(i["objectName"]) for i in containers]
|
||||
|
||||
with avalon.nuke.viewer_update_and_undo_stop():
|
||||
with viewer_update_and_undo_stop():
|
||||
# clear previous_selection
|
||||
[n['selected'].setValue(False) for n in nodes]
|
||||
# Select tool
|
||||
|
|
|
|||
|
|
@ -1,9 +1,18 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
import nukescripts
|
||||
from openpype.hosts.nuke.api import lib as pnlib
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import containerise, update_container
|
||||
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
find_free_space_to_paste_nodes,
|
||||
maintained_selection,
|
||||
reset_selection,
|
||||
select_nodes,
|
||||
get_avalon_knob_data,
|
||||
set_avalon_knob_data
|
||||
)
|
||||
from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop
|
||||
from openpype.hosts.nuke.api import containerise, update_container
|
||||
|
||||
|
||||
class LoadBackdropNodes(api.Loader):
|
||||
"""Loading Published Backdrop nodes (workfile, nukenodes)"""
|
||||
|
|
@ -66,12 +75,12 @@ class LoadBackdropNodes(api.Loader):
|
|||
# Get mouse position
|
||||
n = nuke.createNode("NoOp")
|
||||
xcursor, ycursor = (n.xpos(), n.ypos())
|
||||
anlib.reset_selection()
|
||||
reset_selection()
|
||||
nuke.delete(n)
|
||||
|
||||
bdn_frame = 50
|
||||
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
|
@ -81,11 +90,13 @@ class LoadBackdropNodes(api.Loader):
|
|||
nodes = nuke.selectedNodes()
|
||||
|
||||
# get pointer position in DAG
|
||||
xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame)
|
||||
xpointer, ypointer = find_free_space_to_paste_nodes(
|
||||
nodes, direction="right", offset=200 + bdn_frame
|
||||
)
|
||||
|
||||
# reset position to all nodes and replace inputs and output
|
||||
for n in nodes:
|
||||
anlib.reset_selection()
|
||||
reset_selection()
|
||||
xpos = (n.xpos() - xcursor) + xpointer
|
||||
ypos = (n.ypos() - ycursor) + ypointer
|
||||
n.setXYpos(xpos, ypos)
|
||||
|
|
@ -108,7 +119,7 @@ class LoadBackdropNodes(api.Loader):
|
|||
d.setInput(index, dot)
|
||||
|
||||
# remove Input node
|
||||
anlib.reset_selection()
|
||||
reset_selection()
|
||||
nuke.delete(n)
|
||||
continue
|
||||
|
||||
|
|
@ -127,15 +138,15 @@ class LoadBackdropNodes(api.Loader):
|
|||
dot.setInput(0, dep)
|
||||
|
||||
# remove Input node
|
||||
anlib.reset_selection()
|
||||
reset_selection()
|
||||
nuke.delete(n)
|
||||
continue
|
||||
else:
|
||||
new_nodes.append(n)
|
||||
|
||||
# reselect nodes with new Dot instead of Inputs and Output
|
||||
anlib.reset_selection()
|
||||
anlib.select_nodes(new_nodes)
|
||||
reset_selection()
|
||||
select_nodes(new_nodes)
|
||||
# place on backdrop
|
||||
bdn = nukescripts.autoBackdrop()
|
||||
|
||||
|
|
@ -208,16 +219,16 @@ class LoadBackdropNodes(api.Loader):
|
|||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
xpos = GN.xpos()
|
||||
ypos = GN.ypos()
|
||||
avalon_data = anlib.get_avalon_knob_data(GN)
|
||||
avalon_data = get_avalon_knob_data(GN)
|
||||
nuke.delete(GN)
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
anlib.set_avalon_knob_data(GN, avalon_data)
|
||||
set_avalon_knob_data(GN, avalon_data)
|
||||
GN.setXYpos(xpos, ypos)
|
||||
GN["name"].setValue(object_name)
|
||||
|
||||
|
|
@ -235,7 +246,7 @@ class LoadBackdropNodes(api.Loader):
|
|||
else:
|
||||
GN["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("updated to version: {}".format(version.get("name")))
|
||||
|
||||
return update_container(GN, data_imprint)
|
||||
|
||||
|
|
@ -243,7 +254,6 @@ class LoadBackdropNodes(api.Loader):
|
|||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
from avalon import api, io
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import containerise, update_container
|
||||
import nuke
|
||||
|
||||
from avalon import api, io
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
update_container,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
maintained_selection
|
||||
)
|
||||
|
||||
|
||||
class AlembicCameraLoader(api.Loader):
|
||||
"""
|
||||
|
|
@ -43,7 +50,7 @@ class AlembicCameraLoader(api.Loader):
|
|||
# getting file path
|
||||
file = self.fname.replace("\\", "/")
|
||||
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
camera_node = nuke.createNode(
|
||||
"Camera2",
|
||||
"name {} file {} read_from_file True".format(
|
||||
|
|
@ -122,7 +129,7 @@ class AlembicCameraLoader(api.Loader):
|
|||
# getting file path
|
||||
file = api.get_representation_path(representation).replace("\\", "/")
|
||||
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
camera_node = nuke.toNode(object_name)
|
||||
camera_node['selected'].setValue(True)
|
||||
|
||||
|
|
@ -156,7 +163,7 @@ class AlembicCameraLoader(api.Loader):
|
|||
# color node by correct color by actual version
|
||||
self.node_version_color(version, camera_node)
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("updated to version: {}".format(version.get("name")))
|
||||
|
||||
return update_container(camera_node, data_imprint)
|
||||
|
||||
|
|
@ -181,7 +188,6 @@ class AlembicCameraLoader(api.Loader):
|
|||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
|
|||
|
|
@ -3,13 +3,13 @@ from avalon.vendor import qargparse
|
|||
from avalon import api, io
|
||||
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
get_imageio_input_colorspace
|
||||
get_imageio_input_colorspace,
|
||||
maintained_selection
|
||||
)
|
||||
from avalon.nuke import (
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
update_container,
|
||||
viewer_update_and_undo_stop,
|
||||
maintained_selection
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
|
||||
|
|
@ -270,7 +270,7 @@ class LoadClip(plugin.NukeLoader):
|
|||
read_node,
|
||||
updated_dict
|
||||
)
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("updated to version: {}".format(version.get("name")))
|
||||
|
||||
if version_data.get("retime", None):
|
||||
self._make_retimes(read_node, version_data)
|
||||
|
|
@ -280,9 +280,6 @@ class LoadClip(plugin.NukeLoader):
|
|||
self.set_as_member(read_node)
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
|
||||
read_node = nuke.toNode(container['objectName'])
|
||||
assert read_node.Class() == "Read", "Must be Read"
|
||||
|
||||
|
|
@ -302,7 +299,7 @@ class LoadClip(plugin.NukeLoader):
|
|||
self._loader_shift(read_node, start_at_workfile)
|
||||
|
||||
def _make_retimes(self, parent_node, version_data):
|
||||
''' Create all retime and timewarping nodes with coppied animation '''
|
||||
''' Create all retime and timewarping nodes with copied animation '''
|
||||
speed = version_data.get('speed', 1)
|
||||
time_warp_nodes = version_data.get('timewarps', [])
|
||||
last_node = None
|
||||
|
|
@ -378,4 +375,4 @@ class LoadClip(plugin.NukeLoader):
|
|||
"class_name": self.__class__.__name__
|
||||
}
|
||||
|
||||
return self.node_name_template.format(**name_data)
|
||||
return self.node_name_template.format(**name_data)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,12 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
import nuke
|
||||
from avalon import api, style, io
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
update_container,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
|
||||
class LoadEffects(api.Loader):
|
||||
|
|
@ -30,9 +35,6 @@ class LoadEffects(api.Loader):
|
|||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
"""
|
||||
# import dependencies
|
||||
from avalon.nuke import containerise
|
||||
|
||||
# get main variables
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
|
@ -138,10 +140,6 @@ class LoadEffects(api.Loader):
|
|||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
update_container
|
||||
)
|
||||
# get main variables
|
||||
# Get version from io
|
||||
version = io.find_one({
|
||||
|
|
@ -253,7 +251,7 @@ class LoadEffects(api.Loader):
|
|||
else:
|
||||
GN["tile_color"].setValue(int("0x3469ffff", 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("updated to version: {}".format(version.get("name")))
|
||||
|
||||
def connect_read_node(self, group_node, asset, subset):
|
||||
"""
|
||||
|
|
@ -314,7 +312,7 @@ class LoadEffects(api.Loader):
|
|||
def byteify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes trought all dictionary
|
||||
It goes through all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
|
@ -338,7 +336,6 @@ class LoadEffects(api.Loader):
|
|||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
|
||||
import nuke
|
||||
|
||||
from avalon import api, style, io
|
||||
from openpype.hosts.nuke.api import lib
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
update_container,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
|
||||
class LoadEffectsInputProcess(api.Loader):
|
||||
|
|
@ -30,8 +37,6 @@ class LoadEffectsInputProcess(api.Loader):
|
|||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
"""
|
||||
# import dependencies
|
||||
from avalon.nuke import containerise
|
||||
|
||||
# get main variables
|
||||
version = context['version']
|
||||
|
|
@ -142,9 +147,6 @@ class LoadEffectsInputProcess(api.Loader):
|
|||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
update_container
|
||||
)
|
||||
# get main variables
|
||||
# Get version from io
|
||||
version = io.find_one({
|
||||
|
|
@ -258,7 +260,7 @@ class LoadEffectsInputProcess(api.Loader):
|
|||
else:
|
||||
GN["tile_color"].setValue(int("0x3469ffff", 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("updated to version: {}".format(version.get("name")))
|
||||
|
||||
def connect_active_viewer(self, group_node):
|
||||
"""
|
||||
|
|
@ -331,7 +333,7 @@ class LoadEffectsInputProcess(api.Loader):
|
|||
def byteify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes trought all dictionary
|
||||
It goes through all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
|
@ -355,7 +357,6 @@ class LoadEffectsInputProcess(api.Loader):
|
|||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,15 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import containerise, update_container
|
||||
from avalon import api, style, io
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
maintained_selection,
|
||||
get_avalon_knob_data,
|
||||
set_avalon_knob_data
|
||||
)
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
update_container,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
|
||||
class LoadGizmo(api.Loader):
|
||||
|
|
@ -61,7 +69,7 @@ class LoadGizmo(api.Loader):
|
|||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
|
|
@ -122,16 +130,16 @@ class LoadGizmo(api.Loader):
|
|||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
xpos = GN.xpos()
|
||||
ypos = GN.ypos()
|
||||
avalon_data = anlib.get_avalon_knob_data(GN)
|
||||
avalon_data = get_avalon_knob_data(GN)
|
||||
nuke.delete(GN)
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
anlib.set_avalon_knob_data(GN, avalon_data)
|
||||
set_avalon_knob_data(GN, avalon_data)
|
||||
GN.setXYpos(xpos, ypos)
|
||||
GN["name"].setValue(object_name)
|
||||
|
||||
|
|
@ -149,7 +157,7 @@ class LoadGizmo(api.Loader):
|
|||
else:
|
||||
GN["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("updated to version: {}".format(version.get("name")))
|
||||
|
||||
return update_container(GN, data_imprint)
|
||||
|
||||
|
|
@ -157,7 +165,6 @@ class LoadGizmo(api.Loader):
|
|||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,16 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
from openpype.hosts.nuke.api import lib as pnlib
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import containerise, update_container
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
maintained_selection,
|
||||
create_backdrop,
|
||||
get_avalon_knob_data,
|
||||
set_avalon_knob_data
|
||||
)
|
||||
from openpype.hosts.nuke.api import (
|
||||
containerise,
|
||||
update_container,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
|
||||
class LoadGizmoInputProcess(api.Loader):
|
||||
|
|
@ -62,7 +70,7 @@ class LoadGizmoInputProcess(api.Loader):
|
|||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
|
|
@ -128,16 +136,16 @@ class LoadGizmoInputProcess(api.Loader):
|
|||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with anlib.maintained_selection():
|
||||
with maintained_selection():
|
||||
xpos = GN.xpos()
|
||||
ypos = GN.ypos()
|
||||
avalon_data = anlib.get_avalon_knob_data(GN)
|
||||
avalon_data = get_avalon_knob_data(GN)
|
||||
nuke.delete(GN)
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
anlib.set_avalon_knob_data(GN, avalon_data)
|
||||
set_avalon_knob_data(GN, avalon_data)
|
||||
GN.setXYpos(xpos, ypos)
|
||||
GN["name"].setValue(object_name)
|
||||
|
||||
|
|
@ -155,7 +163,7 @@ class LoadGizmoInputProcess(api.Loader):
|
|||
else:
|
||||
GN["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
self.log.info("updated to version: {}".format(version.get("name")))
|
||||
|
||||
return update_container(GN, data_imprint)
|
||||
|
||||
|
|
@ -197,8 +205,12 @@ class LoadGizmoInputProcess(api.Loader):
|
|||
viewer["input_process_node"].setValue(group_node_name)
|
||||
|
||||
# put backdrop under
|
||||
pnlib.create_backdrop(label="Input Process", layer=2,
|
||||
nodes=[viewer, group_node], color="0x7c7faaff")
|
||||
create_backdrop(
|
||||
label="Input Process",
|
||||
layer=2,
|
||||
nodes=[viewer, group_node],
|
||||
color="0x7c7faaff"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
|
@ -210,7 +222,7 @@ class LoadGizmoInputProcess(api.Loader):
|
|||
def byteify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes trought all dictionary
|
||||
It goes through all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
|
@ -234,7 +246,6 @@ class LoadGizmoInputProcess(api.Loader):
|
|||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue