mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 08:54:53 +01:00
Merge branch 'develop' into feature/project_manager
This commit is contained in:
commit
82634e604b
179 changed files with 5867 additions and 2663 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -64,7 +64,6 @@ coverage.xml
|
|||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
|
||||
# Node JS packages
|
||||
##################
|
||||
node_modules
|
||||
|
|
|
|||
|
|
@ -285,7 +285,7 @@ class BootstrapRepos:
|
|||
"""Get version of local OpenPype."""
|
||||
|
||||
version = {}
|
||||
path = Path(os.path.dirname(__file__)).parent / "openpype" / "version.py"
|
||||
path = Path(os.environ["OPENPYPE_ROOT"]) / "openpype" / "version.py"
|
||||
with open(path, "r") as fp:
|
||||
exec(fp.read(), version)
|
||||
return version["__version__"]
|
||||
|
|
|
|||
BIN
igniter/openpype.icns
Normal file
BIN
igniter/openpype.icns
Normal file
Binary file not shown.
|
|
@ -130,7 +130,7 @@ def validate_mongo_connection(cnx: str) -> (bool, str):
|
|||
mongo_args["port"] = int(port)
|
||||
|
||||
try:
|
||||
client = MongoClient(**mongo_args)
|
||||
client = MongoClient(cnx)
|
||||
client.server_info()
|
||||
client.close()
|
||||
except ServerSelectionTimeoutError as e:
|
||||
|
|
|
|||
50
inno_setup.iss
Normal file
50
inno_setup.iss
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
; Script generated by the Inno Setup Script Wizard.
|
||||
; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
|
||||
|
||||
|
||||
#define MyAppName "OpenPype"
|
||||
#define Build GetEnv("BUILD_DIR")
|
||||
#define AppVer GetEnv("BUILD_VERSION")
|
||||
|
||||
|
||||
[Setup]
|
||||
; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications.
|
||||
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
|
||||
AppId={{B9E9DF6A-5BDA-42DD-9F35-C09D564C4D93}
|
||||
AppName={#MyAppName}
|
||||
AppVersion={#AppVer}
|
||||
AppVerName={#MyAppName} version {#AppVer}
|
||||
AppPublisher=Orbi Tools s.r.o
|
||||
AppPublisherURL=http://pype.club
|
||||
AppSupportURL=http://pype.club
|
||||
AppUpdatesURL=http://pype.club
|
||||
DefaultDirName={autopf}\{#MyAppName}
|
||||
DisableProgramGroupPage=yes
|
||||
OutputBaseFilename={#MyAppName}-{#AppVer}-install
|
||||
AllowCancelDuringInstall=yes
|
||||
; Uncomment the following line to run in non administrative install mode (install for current user only.)
|
||||
;PrivilegesRequired=lowest
|
||||
PrivilegesRequiredOverridesAllowed=dialog
|
||||
SetupIconFile=igniter\openpype.ico
|
||||
OutputDir=build\
|
||||
Compression=lzma
|
||||
SolidCompression=yes
|
||||
WizardStyle=modern
|
||||
|
||||
[Languages]
|
||||
Name: "english"; MessagesFile: "compiler:Default.isl"
|
||||
|
||||
[Tasks]
|
||||
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked
|
||||
|
||||
[Files]
|
||||
Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
|
||||
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
|
||||
|
||||
[Icons]
|
||||
Name: "{autoprograms}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe"
|
||||
Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon
|
||||
|
||||
[Run]
|
||||
Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent
|
||||
|
||||
|
|
@ -5,7 +5,7 @@ import logging
|
|||
from avalon import io
|
||||
from avalon import api as avalon
|
||||
from avalon.vendor import Qt
|
||||
from openpype import lib
|
||||
from openpype import lib, api
|
||||
import pyblish.api as pyblish
|
||||
import openpype.hosts.aftereffects
|
||||
|
||||
|
|
@ -81,3 +81,69 @@ def uninstall():
|
|||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle layer visibility on instance toggles."""
|
||||
instance[0].Visible = new_value
|
||||
|
||||
|
||||
def get_asset_settings():
|
||||
"""Get settings on current asset from database.
|
||||
|
||||
Returns:
|
||||
dict: Scene data.
|
||||
|
||||
"""
|
||||
asset_data = lib.get_asset()["data"]
|
||||
fps = asset_data.get("fps")
|
||||
frame_start = asset_data.get("frameStart")
|
||||
frame_end = asset_data.get("frameEnd")
|
||||
handle_start = asset_data.get("handleStart")
|
||||
handle_end = asset_data.get("handleEnd")
|
||||
resolution_width = asset_data.get("resolutionWidth")
|
||||
resolution_height = asset_data.get("resolutionHeight")
|
||||
duration = (frame_end - frame_start + 1) + handle_start + handle_end
|
||||
entity_type = asset_data.get("entityType")
|
||||
|
||||
scene_data = {
|
||||
"fps": fps,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"duration": duration
|
||||
}
|
||||
|
||||
try:
|
||||
# temporary, in pype3 replace with api.get_current_project_settings
|
||||
skip_resolution_check = (
|
||||
api.get_current_project_settings()
|
||||
["plugins"]
|
||||
["aftereffects"]
|
||||
["publish"]
|
||||
["ValidateSceneSettings"]
|
||||
["skip_resolution_check"]
|
||||
)
|
||||
skip_timelines_check = (
|
||||
api.get_current_project_settings()
|
||||
["plugins"]
|
||||
["aftereffects"]
|
||||
["publish"]
|
||||
["ValidateSceneSettings"]
|
||||
["skip_timelines_check"]
|
||||
)
|
||||
except KeyError:
|
||||
skip_resolution_check = ['*']
|
||||
skip_timelines_check = ['*']
|
||||
|
||||
if os.getenv('AVALON_TASK') in skip_resolution_check or \
|
||||
'*' in skip_timelines_check:
|
||||
scene_data.pop("resolutionWidth")
|
||||
scene_data.pop("resolutionHeight")
|
||||
|
||||
if entity_type in skip_timelines_check or '*' in skip_timelines_check:
|
||||
scene_data.pop('fps', None)
|
||||
scene_data.pop('frameStart', None)
|
||||
scene_data.pop('frameEnd', None)
|
||||
scene_data.pop('handleStart', None)
|
||||
scene_data.pop('handleEnd', None)
|
||||
|
||||
return scene_data
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ class AERenderInstance(RenderInstance):
|
|||
# extend generic, composition name is needed
|
||||
comp_name = attr.ib(default=None)
|
||||
comp_id = attr.ib(default=None)
|
||||
fps = attr.ib(default=None)
|
||||
|
||||
|
||||
class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
||||
|
|
@ -45,6 +46,7 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
|||
raise ValueError("Couldn't find id, unable to publish. " +
|
||||
"Please recreate instance.")
|
||||
item_id = inst["members"][0]
|
||||
|
||||
work_area_info = self.stub.get_work_area(int(item_id))
|
||||
|
||||
if not work_area_info:
|
||||
|
|
@ -57,6 +59,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
|||
frameEnd = round(work_area_info.workAreaStart +
|
||||
float(work_area_info.workAreaDuration) *
|
||||
float(work_area_info.frameRate)) - 1
|
||||
fps = work_area_info.frameRate
|
||||
# TODO add resolution when supported by extension
|
||||
|
||||
if inst["family"] == "render" and inst["active"]:
|
||||
instance = AERenderInstance(
|
||||
|
|
@ -86,7 +90,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
|||
frameStart=frameStart,
|
||||
frameEnd=frameEnd,
|
||||
frameStep=1,
|
||||
toBeRenderedOn='deadline'
|
||||
toBeRenderedOn='deadline',
|
||||
fps=fps
|
||||
)
|
||||
|
||||
comp = compositions_by_id.get(int(item_id))
|
||||
|
|
@ -102,7 +107,6 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
|||
|
||||
instances.append(instance)
|
||||
|
||||
self.log.debug("instances::{}".format(instances))
|
||||
return instances
|
||||
|
||||
def get_expected_files(self, render_instance):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,110 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validate scene settings."""
|
||||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import aftereffects
|
||||
|
||||
import openpype.hosts.aftereffects.api as api
|
||||
|
||||
stub = aftereffects.stub()
|
||||
|
||||
|
||||
class ValidateSceneSettings(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Ensures that Composition Settings (right mouse on comp) are same as
|
||||
in FTrack on task.
|
||||
|
||||
By default checks only duration - how many frames should be rendered.
|
||||
Compares:
|
||||
Frame start - Frame end + 1 from FTrack
|
||||
against
|
||||
Duration in Composition Settings.
|
||||
|
||||
If this complains:
|
||||
Check error message where is discrepancy.
|
||||
Check FTrack task 'pype' section of task attributes for expected
|
||||
values.
|
||||
Check/modify rendered Composition Settings.
|
||||
|
||||
If you know what you are doing run publishing again, uncheck this
|
||||
validation before Validation phase.
|
||||
"""
|
||||
|
||||
"""
|
||||
Dev docu:
|
||||
Could be configured by 'presets/plugins/aftereffects/publish'
|
||||
|
||||
skip_timelines_check - fill task name for which skip validation of
|
||||
frameStart
|
||||
frameEnd
|
||||
fps
|
||||
handleStart
|
||||
handleEnd
|
||||
skip_resolution_check - fill entity type ('asset') to skip validation
|
||||
resolutionWidth
|
||||
resolutionHeight
|
||||
TODO support in extension is missing for now
|
||||
|
||||
By defaults validates duration (how many frames should be published)
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Scene Settings"
|
||||
families = ["render.farm"]
|
||||
hosts = ["aftereffects"]
|
||||
optional = True
|
||||
|
||||
skip_timelines_check = ["*"] # * >> skip for all
|
||||
skip_resolution_check = ["*"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
expected_settings = api.get_asset_settings()
|
||||
self.log.info("expected_settings::{}".format(expected_settings))
|
||||
|
||||
# handle case where ftrack uses only two decimal places
|
||||
# 23.976023976023978 vs. 23.98
|
||||
fps = instance.data.get("fps")
|
||||
if fps:
|
||||
if isinstance(fps, float):
|
||||
fps = float(
|
||||
"{:.2f}".format(fps))
|
||||
expected_settings["fps"] = fps
|
||||
|
||||
duration = instance.data.get("frameEndHandle") - \
|
||||
instance.data.get("frameStartHandle") + 1
|
||||
|
||||
current_settings = {
|
||||
"fps": fps,
|
||||
"frameStartHandle": instance.data.get("frameStartHandle"),
|
||||
"frameEndHandle": instance.data.get("frameEndHandle"),
|
||||
"resolutionWidth": instance.data.get("resolutionWidth"),
|
||||
"resolutionHeight": instance.data.get("resolutionHeight"),
|
||||
"duration": duration
|
||||
}
|
||||
self.log.info("current_settings:: {}".format(current_settings))
|
||||
|
||||
invalid_settings = []
|
||||
for key, value in expected_settings.items():
|
||||
if value != current_settings[key]:
|
||||
invalid_settings.append(
|
||||
"{} expected: {} found: {}".format(key, value,
|
||||
current_settings[key])
|
||||
)
|
||||
|
||||
if ((expected_settings.get("handleStart")
|
||||
or expected_settings.get("handleEnd"))
|
||||
and invalid_settings):
|
||||
msg = "Handles included in calculation. Remove handles in DB " +\
|
||||
"or extend frame range in Composition Setting."
|
||||
invalid_settings[-1]["reason"] = msg
|
||||
|
||||
msg = "Found invalid settings:\n{}".format(
|
||||
"\n".join(invalid_settings)
|
||||
)
|
||||
assert not invalid_settings, msg
|
||||
assert os.path.exists(instance.data.get("source")), (
|
||||
"Scene file not found (saved under wrong name)"
|
||||
)
|
||||
|
|
@ -9,7 +9,7 @@ from avalon import api
|
|||
import avalon.blender
|
||||
from openpype.api import PypeCreatorMixin
|
||||
|
||||
VALID_EXTENSIONS = [".blend", ".json"]
|
||||
VALID_EXTENSIONS = [".blend", ".json", ".abc"]
|
||||
|
||||
|
||||
def asset_name(
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from openpype.lib import PreLaunchHook
|
||||
|
||||
|
|
@ -31,10 +32,46 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
|
||||
def inner_execute(self):
|
||||
# Get blender's python directory
|
||||
version_regex = re.compile(r"^2\.[0-9]{2}$")
|
||||
|
||||
executable = self.launch_context.executable.executable_path
|
||||
# Blender installation contain subfolder named with it's version where
|
||||
# python binaries are stored.
|
||||
version_subfolder = self.launch_context.app_name.split("_")[1]
|
||||
if os.path.basename(executable).lower() != "blender.exe":
|
||||
self.log.info((
|
||||
"Executable does not lead to blender.exe file. Can't determine"
|
||||
" blender's python to check/install PySide2."
|
||||
))
|
||||
return
|
||||
|
||||
executable_dir = os.path.dirname(executable)
|
||||
version_subfolders = []
|
||||
for name in os.listdir(executable_dir):
|
||||
fullpath = os.path.join(name, executable_dir)
|
||||
if not os.path.isdir(fullpath):
|
||||
continue
|
||||
|
||||
if not version_regex.match(name):
|
||||
continue
|
||||
|
||||
version_subfolders.append(name)
|
||||
|
||||
if not version_subfolders:
|
||||
self.log.info(
|
||||
"Didn't find version subfolder next to Blender executable"
|
||||
)
|
||||
return
|
||||
|
||||
if len(version_subfolders) > 1:
|
||||
self.log.info((
|
||||
"Found more than one version subfolder next"
|
||||
" to blender executable. {}"
|
||||
).format(", ".join([
|
||||
'"./{}"'.format(name)
|
||||
for name in version_subfolders
|
||||
])))
|
||||
return
|
||||
|
||||
version_subfolder = version_subfolders[0]
|
||||
|
||||
pythond_dir = os.path.join(
|
||||
os.path.dirname(executable),
|
||||
version_subfolder,
|
||||
|
|
@ -65,6 +102,7 @@ class InstallPySideToBlender(PreLaunchHook):
|
|||
|
||||
# Check if PySide2 is installed and skip if yes
|
||||
if self.is_pyside_installed(python_executable):
|
||||
self.log.debug("Blender has already installed PySide2.")
|
||||
return
|
||||
|
||||
# Install PySide2 in blender's python
|
||||
|
|
|
|||
35
openpype/hosts/blender/plugins/create/create_pointcache.py
Normal file
35
openpype/hosts/blender/plugins/create/create_pointcache.py
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
"""Create a pointcache asset."""
|
||||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from avalon.blender import lib
|
||||
import openpype.hosts.blender.api.plugin
|
||||
|
||||
|
||||
class CreatePointcache(openpype.hosts.blender.api.plugin.Creator):
|
||||
"""Polygonal static geometry"""
|
||||
|
||||
name = "pointcacheMain"
|
||||
label = "Point Cache"
|
||||
family = "pointcache"
|
||||
icon = "gears"
|
||||
|
||||
def process(self):
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
objects = lib.get_selection()
|
||||
for obj in objects:
|
||||
collection.objects.link(obj)
|
||||
if obj.type == 'EMPTY':
|
||||
objects.extend(obj.children)
|
||||
|
||||
return collection
|
||||
246
openpype/hosts/blender/plugins/load/load_abc.py
Normal file
246
openpype/hosts/blender/plugins/load/load_abc.py
Normal file
|
|
@ -0,0 +1,246 @@
|
|||
"""Load an asset in Blender from an Alembic file."""
|
||||
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import openpype.hosts.blender.api.plugin as plugin
|
||||
|
||||
|
||||
class CacheModelLoader(plugin.AssetLoader):
|
||||
"""Load cache models.
|
||||
|
||||
Stores the imported asset in a collection named after the asset.
|
||||
|
||||
Note:
|
||||
At least for now it only supports Alembic files.
|
||||
"""
|
||||
|
||||
families = ["model", "pointcache"]
|
||||
representations = ["abc"]
|
||||
|
||||
label = "Link Alembic"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def _remove(self, objects, container):
|
||||
for obj in list(objects):
|
||||
if obj.type == 'MESH':
|
||||
bpy.data.meshes.remove(obj.data)
|
||||
elif obj.type == 'EMPTY':
|
||||
bpy.data.objects.remove(obj)
|
||||
|
||||
bpy.data.collections.remove(container)
|
||||
|
||||
def _process(self, libpath, container_name, parent_collection):
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
view_layer = bpy.context.view_layer
|
||||
view_layer_collection = view_layer.active_layer_collection.collection
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
bpy.ops.wm.alembic_import(
|
||||
filepath=libpath,
|
||||
relative_path=relative
|
||||
)
|
||||
|
||||
parent = parent_collection
|
||||
|
||||
if parent is None:
|
||||
parent = bpy.context.scene.collection
|
||||
|
||||
model_container = bpy.data.collections.new(container_name)
|
||||
parent.children.link(model_container)
|
||||
for obj in bpy.context.selected_objects:
|
||||
model_container.objects.link(obj)
|
||||
view_layer_collection.objects.unlink(obj)
|
||||
|
||||
name = obj.name
|
||||
obj.name = f"{name}:{container_name}"
|
||||
|
||||
# Groups are imported as Empty objects in Blender
|
||||
if obj.type == 'MESH':
|
||||
data_name = obj.data.name
|
||||
obj.data.name = f"{data_name}:{container_name}"
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
return model_container
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
|
||||
lib_container = plugin.asset_name(
|
||||
asset, subset
|
||||
)
|
||||
unique_number = plugin.get_unique_number(
|
||||
asset, subset
|
||||
)
|
||||
namespace = namespace or f"{asset}_{unique_number}"
|
||||
container_name = plugin.asset_name(
|
||||
asset, subset, unique_number
|
||||
)
|
||||
|
||||
container = bpy.data.collections.new(lib_container)
|
||||
container.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
container,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
container_metadata = container.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
obj_container = self._process(
|
||||
libpath, container_name, None)
|
||||
|
||||
container_metadata["obj_container"] = obj_container
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = obj_container.all_objects
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
def update(self, container: Dict, representation: Dict):
|
||||
"""Update the loaded asset.
|
||||
|
||||
This will remove all objects of the current collection, load the new
|
||||
ones and add them to the collection.
|
||||
If the objects of the collection are used in another collection they
|
||||
will not be removed, only unlinked. Normally this should not be the
|
||||
case though.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
libpath = Path(api.get_representation_path(representation))
|
||||
extension = libpath.suffix.lower()
|
||||
|
||||
self.log.info(
|
||||
"Container: %s\nRepresentation: %s",
|
||||
pformat(container, indent=2),
|
||||
pformat(representation, indent=2),
|
||||
)
|
||||
|
||||
assert collection, (
|
||||
f"The asset is not loaded: {container['objectName']}"
|
||||
)
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
assert libpath, (
|
||||
"No existing library file found for {container['objectName']}"
|
||||
)
|
||||
assert libpath.is_file(), (
|
||||
f"The file doesn't exist: {libpath}"
|
||||
)
|
||||
assert extension in plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
|
||||
obj_container = plugin.get_local_collection_with_name(
|
||||
collection_metadata["obj_container"].name
|
||||
)
|
||||
objects = obj_container.all_objects
|
||||
|
||||
container_name = obj_container.name
|
||||
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
normalized_libpath = (
|
||||
str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
)
|
||||
self.log.debug(
|
||||
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
|
||||
normalized_collection_libpath,
|
||||
normalized_libpath,
|
||||
)
|
||||
if normalized_collection_libpath == normalized_libpath:
|
||||
self.log.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
parent = plugin.get_parent_collection(obj_container)
|
||||
|
||||
self._remove(objects, obj_container)
|
||||
|
||||
obj_container = self._process(
|
||||
str(libpath), container_name, parent)
|
||||
|
||||
collection_metadata["obj_container"] = obj_container
|
||||
collection_metadata["objects"] = obj_container.all_objects
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
||||
Arguments:
|
||||
container (openpype:container-1.0): Container to remove,
|
||||
from `host.ls()`.
|
||||
|
||||
Returns:
|
||||
bool: Whether the container was deleted.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
if not collection:
|
||||
return False
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
obj_container = plugin.get_local_collection_with_name(
|
||||
collection_metadata["obj_container"].name
|
||||
)
|
||||
objects = obj_container.all_objects
|
||||
|
||||
self._remove(objects, obj_container)
|
||||
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
|
@ -242,65 +242,3 @@ class BlendModelLoader(plugin.AssetLoader):
|
|||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class CacheModelLoader(plugin.AssetLoader):
|
||||
"""Load cache models.
|
||||
|
||||
Stores the imported asset in a collection named after the asset.
|
||||
|
||||
Note:
|
||||
At least for now it only supports Alembic files.
|
||||
"""
|
||||
|
||||
families = ["model"]
|
||||
representations = ["abc"]
|
||||
|
||||
label = "Link Model"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"Loading of Alembic files is not yet implemented.")
|
||||
# TODO (jasper): implement Alembic import.
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
# TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
|
||||
lib_container = container_name = (
|
||||
plugin.asset_name(asset, subset, namespace)
|
||||
)
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (data_from, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
instance_empty = bpy.data.objects.new(
|
||||
container_name, None
|
||||
)
|
||||
scene.collection.objects.link(instance_empty)
|
||||
instance_empty.instance_type = 'COLLECTION'
|
||||
collection = bpy.data.collections[lib_container]
|
||||
collection.name = container_name
|
||||
instance_empty.instance_collection = collection
|
||||
|
||||
nodes = list(collection.objects)
|
||||
nodes.append(collection)
|
||||
nodes.append(instance_empty)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@ class ExtractABC(openpype.api.Extractor):
|
|||
|
||||
label = "Extract ABC"
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
families = ["model", "pointcache"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.fbx"
|
||||
filename = f"{instance.name}.abc"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
context = bpy.context
|
||||
|
|
@ -52,6 +52,8 @@ class ExtractABC(openpype.api.Extractor):
|
|||
|
||||
old_scale = scene.unit_settings.scale_length
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
selected = list()
|
||||
|
||||
for obj in instance:
|
||||
|
|
@ -67,14 +69,11 @@ class ExtractABC(openpype.api.Extractor):
|
|||
# We set the scale of the scene for the export
|
||||
scene.unit_settings.scale_length = 0.01
|
||||
|
||||
self.log.info(new_context)
|
||||
|
||||
# We export the abc
|
||||
bpy.ops.wm.alembic_export(
|
||||
new_context,
|
||||
filepath=filepath,
|
||||
start=1,
|
||||
end=1
|
||||
selected=True
|
||||
)
|
||||
|
||||
view_layer.active_layer_collection = old_active_layer_collection
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ from .pipeline import (
|
|||
)
|
||||
|
||||
from .lib import (
|
||||
pype_tag_name,
|
||||
get_track_items,
|
||||
get_current_project,
|
||||
get_current_sequence,
|
||||
|
|
@ -73,6 +74,7 @@ __all__ = [
|
|||
"work_root",
|
||||
|
||||
# Lib functions
|
||||
"pype_tag_name",
|
||||
"get_track_items",
|
||||
"get_current_project",
|
||||
"get_current_sequence",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,12 @@ import os
|
|||
import hiero.core.events
|
||||
import avalon.api as avalon
|
||||
from openpype.api import Logger
|
||||
from .lib import sync_avalon_data_to_workfile, launch_workfiles_app
|
||||
from .lib import (
|
||||
sync_avalon_data_to_workfile,
|
||||
launch_workfiles_app,
|
||||
selection_changed_timeline,
|
||||
before_project_save
|
||||
)
|
||||
from .tags import add_tags_to_workfile
|
||||
from .menu import update_menu_task_label
|
||||
|
||||
|
|
@ -78,7 +83,7 @@ def register_hiero_events():
|
|||
"Registering events for: kBeforeNewProjectCreated, "
|
||||
"kAfterNewProjectCreated, kBeforeProjectLoad, kAfterProjectLoad, "
|
||||
"kBeforeProjectSave, kAfterProjectSave, kBeforeProjectClose, "
|
||||
"kAfterProjectClose, kShutdown, kStartup"
|
||||
"kAfterProjectClose, kShutdown, kStartup, kSelectionChanged"
|
||||
)
|
||||
|
||||
# hiero.core.events.registerInterest(
|
||||
|
|
@ -91,8 +96,8 @@ def register_hiero_events():
|
|||
hiero.core.events.registerInterest(
|
||||
"kAfterProjectLoad", afterProjectLoad)
|
||||
|
||||
# hiero.core.events.registerInterest(
|
||||
# "kBeforeProjectSave", beforeProjectSaved)
|
||||
hiero.core.events.registerInterest(
|
||||
"kBeforeProjectSave", before_project_save)
|
||||
# hiero.core.events.registerInterest(
|
||||
# "kAfterProjectSave", afterProjectSaved)
|
||||
#
|
||||
|
|
@ -104,10 +109,16 @@ def register_hiero_events():
|
|||
# hiero.core.events.registerInterest("kShutdown", shutDown)
|
||||
# hiero.core.events.registerInterest("kStartup", startupCompleted)
|
||||
|
||||
# workfiles
|
||||
hiero.core.events.registerEventType("kStartWorkfiles")
|
||||
hiero.core.events.registerInterest("kStartWorkfiles", launch_workfiles_app)
|
||||
hiero.core.events.registerInterest(
|
||||
("kSelectionChanged", "kTimeline"), selection_changed_timeline)
|
||||
|
||||
# workfiles
|
||||
try:
|
||||
hiero.core.events.registerEventType("kStartWorkfiles")
|
||||
hiero.core.events.registerInterest(
|
||||
"kStartWorkfiles", launch_workfiles_app)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
def register_events():
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import hiero
|
|||
import avalon.api as avalon
|
||||
import avalon.io
|
||||
from avalon.vendor.Qt import QtWidgets
|
||||
from openpype.api import (Logger, Anatomy, config)
|
||||
from openpype.api import (Logger, Anatomy, get_anatomy_settings)
|
||||
from . import tags
|
||||
import shutil
|
||||
from compiler.ast import flatten
|
||||
|
|
@ -30,9 +30,9 @@ self = sys.modules[__name__]
|
|||
self._has_been_setup = False
|
||||
self._has_menu = False
|
||||
self._registered_gui = None
|
||||
self.pype_tag_name = "Pype Data"
|
||||
self.default_sequence_name = "PypeSequence"
|
||||
self.default_bin_name = "PypeBin"
|
||||
self.pype_tag_name = "openpypeData"
|
||||
self.default_sequence_name = "openpypeSequence"
|
||||
self.default_bin_name = "openpypeBin"
|
||||
|
||||
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
|
||||
|
||||
|
|
@ -150,15 +150,27 @@ def get_track_items(
|
|||
|
||||
# get selected track items or all in active sequence
|
||||
if selected:
|
||||
selected_items = list(hiero.selection)
|
||||
for item in selected_items:
|
||||
if track_name and track_name in item.parent().name():
|
||||
# filter only items fitting input track name
|
||||
track_items.append(item)
|
||||
elif not track_name:
|
||||
# or add all if no track_name was defined
|
||||
track_items.append(item)
|
||||
else:
|
||||
try:
|
||||
selected_items = list(hiero.selection)
|
||||
for item in selected_items:
|
||||
if track_name and track_name in item.parent().name():
|
||||
# filter only items fitting input track name
|
||||
track_items.append(item)
|
||||
elif not track_name:
|
||||
# or add all if no track_name was defined
|
||||
track_items.append(item)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# check if any collected track items are
|
||||
# `core.Hiero.Python.TrackItem` instance
|
||||
if track_items:
|
||||
any_track_item = track_items[0]
|
||||
if not isinstance(any_track_item, hiero.core.TrackItem):
|
||||
selected_items = []
|
||||
|
||||
# collect all available active sequence track items
|
||||
if not track_items:
|
||||
sequence = get_current_sequence(name=sequence_name)
|
||||
# get all available tracks from sequence
|
||||
tracks = list(sequence.audioTracks()) + list(sequence.videoTracks())
|
||||
|
|
@ -240,7 +252,7 @@ def set_track_item_pype_tag(track_item, data=None):
|
|||
# basic Tag's attribute
|
||||
tag_data = {
|
||||
"editable": "0",
|
||||
"note": "Pype data holder",
|
||||
"note": "OpenPype data container",
|
||||
"icon": "openpype_icon.png",
|
||||
"metadata": {k: v for k, v in data.items()}
|
||||
}
|
||||
|
|
@ -744,10 +756,13 @@ def _set_hrox_project_knobs(doc, **knobs):
|
|||
# set attributes to Project Tag
|
||||
proj_elem = doc.documentElement().firstChildElement("Project")
|
||||
for k, v in knobs.items():
|
||||
proj_elem.setAttribute(k, v)
|
||||
if isinstance(v, dict):
|
||||
continue
|
||||
proj_elem.setAttribute(str(k), v)
|
||||
|
||||
|
||||
def apply_colorspace_project():
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
# get path the the active projects
|
||||
project = get_current_project(remove_untitled=True)
|
||||
current_file = project.path()
|
||||
|
|
@ -756,9 +771,9 @@ def apply_colorspace_project():
|
|||
project.close()
|
||||
|
||||
# get presets for hiero
|
||||
presets = config.get_init_presets()
|
||||
colorspace = presets["colorspace"]
|
||||
hiero_project_clrs = colorspace.get("hiero", {}).get("project", {})
|
||||
imageio = get_anatomy_settings(
|
||||
project_name)["imageio"].get("hiero", None)
|
||||
presets = imageio.get("workfile")
|
||||
|
||||
# save the workfile as subversion "comment:_colorspaceChange"
|
||||
split_current_file = os.path.splitext(current_file)
|
||||
|
|
@ -789,13 +804,13 @@ def apply_colorspace_project():
|
|||
os.remove(copy_current_file_tmp)
|
||||
|
||||
# use the code from bellow for changing xml hrox Attributes
|
||||
hiero_project_clrs.update({"name": os.path.basename(copy_current_file)})
|
||||
presets.update({"name": os.path.basename(copy_current_file)})
|
||||
|
||||
# read HROX in as QDomSocument
|
||||
doc = _read_doc_from_path(copy_current_file)
|
||||
|
||||
# apply project colorspace properties
|
||||
_set_hrox_project_knobs(doc, **hiero_project_clrs)
|
||||
_set_hrox_project_knobs(doc, **presets)
|
||||
|
||||
# write QDomSocument back as HROX
|
||||
_write_doc_to_path(doc, copy_current_file)
|
||||
|
|
@ -805,14 +820,17 @@ def apply_colorspace_project():
|
|||
|
||||
|
||||
def apply_colorspace_clips():
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
project = get_current_project(remove_untitled=True)
|
||||
clips = project.clips()
|
||||
|
||||
# get presets for hiero
|
||||
presets = config.get_init_presets()
|
||||
colorspace = presets["colorspace"]
|
||||
hiero_clips_clrs = colorspace.get("hiero", {}).get("clips", {})
|
||||
imageio = get_anatomy_settings(
|
||||
project_name)["imageio"].get("hiero", None)
|
||||
from pprint import pprint
|
||||
|
||||
presets = imageio.get("regexInputs", {}).get("inputs", {})
|
||||
pprint(presets)
|
||||
for clip in clips:
|
||||
clip_media_source_path = clip.mediaSource().firstpath()
|
||||
clip_name = clip.name()
|
||||
|
|
@ -822,10 +840,11 @@ def apply_colorspace_clips():
|
|||
continue
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
preset_clrsp = next((hiero_clips_clrs[k]
|
||||
for k in hiero_clips_clrs
|
||||
if bool(re.search(k, clip_media_source_path))),
|
||||
None)
|
||||
preset_clrsp = None
|
||||
for k in presets:
|
||||
if not bool(re.search(k["regex"], clip_media_source_path)):
|
||||
continue
|
||||
preset_clrsp = k["colorspace"]
|
||||
|
||||
if preset_clrsp:
|
||||
log.debug("Changing clip.path: {}".format(clip_media_source_path))
|
||||
|
|
@ -893,3 +912,61 @@ def get_sequence_pattern_and_padding(file):
|
|||
return found, padding
|
||||
else:
|
||||
return None, None
|
||||
|
||||
|
||||
def sync_clip_name_to_data_asset(track_items_list):
|
||||
# loop trough all selected clips
|
||||
for track_item in track_items_list:
|
||||
# ignore if parent track is locked or disabled
|
||||
if track_item.parent().isLocked():
|
||||
continue
|
||||
if not track_item.parent().isEnabled():
|
||||
continue
|
||||
# ignore if the track item is disabled
|
||||
if not track_item.isEnabled():
|
||||
continue
|
||||
|
||||
# get name and data
|
||||
ti_name = track_item.name()
|
||||
data = get_track_item_pype_data(track_item)
|
||||
|
||||
# ignore if no data on the clip or not publish instance
|
||||
if not data:
|
||||
continue
|
||||
if data.get("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
# fix data if wrong name
|
||||
if data["asset"] != ti_name:
|
||||
data["asset"] = ti_name
|
||||
# remove the original tag
|
||||
tag = get_track_item_pype_tag(track_item)
|
||||
track_item.removeTag(tag)
|
||||
# create new tag with updated data
|
||||
set_track_item_pype_tag(track_item, data)
|
||||
print("asset was changed in clip: {}".format(ti_name))
|
||||
|
||||
|
||||
def selection_changed_timeline(event):
|
||||
"""Callback on timeline to check if asset in data is the same as clip name.
|
||||
|
||||
Args:
|
||||
event (hiero.core.Event): timeline event
|
||||
"""
|
||||
timeline_editor = event.sender
|
||||
selection = timeline_editor.selection()
|
||||
|
||||
# run checking function
|
||||
sync_clip_name_to_data_asset(selection)
|
||||
|
||||
|
||||
def before_project_save(event):
|
||||
track_items = get_track_items(
|
||||
selected=False,
|
||||
track_type="video",
|
||||
check_enabled=True,
|
||||
check_locked=True,
|
||||
check_tagged=True)
|
||||
|
||||
# run checking function
|
||||
sync_clip_name_to_data_asset(track_items)
|
||||
|
|
|
|||
|
|
@ -68,50 +68,45 @@ def menu_install():
|
|||
|
||||
menu.addSeparator()
|
||||
|
||||
workfiles_action = menu.addAction("Work Files...")
|
||||
workfiles_action = menu.addAction("Work Files ...")
|
||||
workfiles_action.setIcon(QtGui.QIcon("icons:Position.png"))
|
||||
workfiles_action.triggered.connect(launch_workfiles_app)
|
||||
|
||||
default_tags_action = menu.addAction("Create Default Tags...")
|
||||
default_tags_action = menu.addAction("Create Default Tags")
|
||||
default_tags_action.setIcon(QtGui.QIcon("icons:Position.png"))
|
||||
default_tags_action.triggered.connect(tags.add_tags_to_workfile)
|
||||
|
||||
menu.addSeparator()
|
||||
|
||||
publish_action = menu.addAction("Publish...")
|
||||
publish_action = menu.addAction("Publish ...")
|
||||
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
|
||||
publish_action.triggered.connect(
|
||||
lambda *args: publish(hiero.ui.mainWindow())
|
||||
)
|
||||
|
||||
creator_action = menu.addAction("Create...")
|
||||
creator_action = menu.addAction("Create ...")
|
||||
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
creator_action.triggered.connect(creator.show)
|
||||
|
||||
loader_action = menu.addAction("Load...")
|
||||
loader_action = menu.addAction("Load ...")
|
||||
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
loader_action.triggered.connect(cbloader.show)
|
||||
|
||||
sceneinventory_action = menu.addAction("Manage...")
|
||||
sceneinventory_action = menu.addAction("Manage ...")
|
||||
sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
sceneinventory_action.triggered.connect(sceneinventory.show)
|
||||
menu.addSeparator()
|
||||
|
||||
reload_action = menu.addAction("Reload pipeline...")
|
||||
reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
reload_action.triggered.connect(reload_config)
|
||||
if os.getenv("OPENPYPE_DEVELOP"):
|
||||
reload_action = menu.addAction("Reload pipeline")
|
||||
reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
reload_action.triggered.connect(reload_config)
|
||||
|
||||
menu.addSeparator()
|
||||
apply_colorspace_p_action = menu.addAction("Apply Colorspace Project...")
|
||||
apply_colorspace_p_action = menu.addAction("Apply Colorspace Project")
|
||||
apply_colorspace_p_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
apply_colorspace_p_action.triggered.connect(apply_colorspace_project)
|
||||
|
||||
apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips...")
|
||||
apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips")
|
||||
apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
apply_colorspace_c_action.triggered.connect(apply_colorspace_clips)
|
||||
|
||||
self.context_label_action = context_label_action
|
||||
self.workfile_actions = workfiles_action
|
||||
self.default_tags_action = default_tags_action
|
||||
self.publish_action = publish_action
|
||||
self.reload_action = reload_action
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@ import hiero
|
|||
from Qt import QtWidgets, QtCore
|
||||
from avalon.vendor import qargparse
|
||||
import avalon.api as avalon
|
||||
import openpype.api as pype
|
||||
import openpype.api as openpype
|
||||
from . import lib
|
||||
|
||||
log = pype.Logger().get_logger(__name__)
|
||||
log = openpype.Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def load_stylesheet():
|
||||
|
|
@ -266,7 +266,8 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
elif v["type"] == "QSpinBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QSpinBox", v["label"],
|
||||
setValue=v["value"], setMaximum=10000, setToolTip=tool_tip)
|
||||
setValue=v["value"], setMinimum=0,
|
||||
setMaximum=100000, setToolTip=tool_tip)
|
||||
return data
|
||||
|
||||
|
||||
|
|
@ -387,7 +388,8 @@ class ClipLoader:
|
|||
# try to get value from options or evaluate key value for `load_to`
|
||||
self.new_sequence = options.get("newSequence") or bool(
|
||||
"New timeline" in options.get("load_to", ""))
|
||||
|
||||
self.clip_name_template = options.get(
|
||||
"clipNameTemplate") or "{asset}_{subset}_{representation}"
|
||||
assert self._populate_data(), str(
|
||||
"Cannot Load selected data, look into database "
|
||||
"or call your supervisor")
|
||||
|
|
@ -432,7 +434,7 @@ class ClipLoader:
|
|||
asset = str(repr_cntx["asset"])
|
||||
subset = str(repr_cntx["subset"])
|
||||
representation = str(repr_cntx["representation"])
|
||||
self.data["clip_name"] = "_".join([asset, subset, representation])
|
||||
self.data["clip_name"] = self.clip_name_template.format(**repr_cntx)
|
||||
self.data["track_name"] = "_".join([subset, representation])
|
||||
self.data["versionData"] = self.context["version"]["data"]
|
||||
# gets file path
|
||||
|
|
@ -476,7 +478,7 @@ class ClipLoader:
|
|||
|
||||
"""
|
||||
asset_name = self.context["representation"]["context"]["asset"]
|
||||
self.data["assetData"] = pype.get_asset(asset_name)["data"]
|
||||
self.data["assetData"] = openpype.get_asset(asset_name)["data"]
|
||||
|
||||
def _make_track_item(self, source_bin_item, audio=False):
|
||||
""" Create track item with """
|
||||
|
|
@ -543,15 +545,9 @@ class ClipLoader:
|
|||
if "slate" in f),
|
||||
# if nothing was found then use default None
|
||||
# so other bool could be used
|
||||
None) or bool(((
|
||||
# put together duration of clip attributes
|
||||
self.timeline_out - self.timeline_in + 1) \
|
||||
+ self.handle_start \
|
||||
+ self.handle_end
|
||||
# and compare it with meda duration
|
||||
) > self.media_duration)
|
||||
|
||||
print("__ slate_on: `{}`".format(slate_on))
|
||||
None) or bool(int(
|
||||
(self.timeline_out - self.timeline_in + 1)
|
||||
+ self.handle_start + self.handle_end) < self.media_duration)
|
||||
|
||||
# if slate is on then remove the slate frame from begining
|
||||
if slate_on:
|
||||
|
|
@ -592,7 +588,7 @@ class ClipLoader:
|
|||
return track_item
|
||||
|
||||
|
||||
class Creator(pype.Creator):
|
||||
class Creator(openpype.Creator):
|
||||
"""Creator class wrapper
|
||||
"""
|
||||
clip_color = "Purple"
|
||||
|
|
@ -601,7 +597,7 @@ class Creator(pype.Creator):
|
|||
def __init__(self, *args, **kwargs):
|
||||
import openpype.hosts.hiero.api as phiero
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
self.presets = pype.get_current_project_settings()[
|
||||
self.presets = openpype.get_current_project_settings()[
|
||||
"hiero"]["create"].get(self.__class__.__name__, {})
|
||||
|
||||
# adding basic current context resolve objects
|
||||
|
|
@ -674,6 +670,9 @@ class PublishClip:
|
|||
if kwargs.get("avalon"):
|
||||
self.tag_data.update(kwargs["avalon"])
|
||||
|
||||
# add publish attribute to tag data
|
||||
self.tag_data.update({"publish": True})
|
||||
|
||||
# adding ui inputs if any
|
||||
self.ui_inputs = kwargs.get("ui_inputs", {})
|
||||
|
||||
|
|
@ -687,6 +686,7 @@ class PublishClip:
|
|||
self._create_parents()
|
||||
|
||||
def convert(self):
|
||||
|
||||
# solve track item data and add them to tag data
|
||||
self._convert_to_tag_data()
|
||||
|
||||
|
|
@ -705,6 +705,12 @@ class PublishClip:
|
|||
self.tag_data["asset"] = new_name
|
||||
else:
|
||||
self.tag_data["asset"] = self.ti_name
|
||||
self.tag_data["hierarchyData"]["shot"] = self.ti_name
|
||||
|
||||
if self.tag_data["heroTrack"] and self.review_layer:
|
||||
self.tag_data.update({"reviewTrack": self.review_layer})
|
||||
else:
|
||||
self.tag_data.update({"reviewTrack": None})
|
||||
|
||||
# create pype tag on track_item and add data
|
||||
lib.imprint(self.track_item, self.tag_data)
|
||||
|
|
@ -773,8 +779,8 @@ class PublishClip:
|
|||
_spl = text.split("#")
|
||||
_len = (len(_spl) - 1)
|
||||
_repl = "{{{0}:0>{1}}}".format(name, _len)
|
||||
new_text = text.replace(("#" * _len), _repl)
|
||||
return new_text
|
||||
return text.replace(("#" * _len), _repl)
|
||||
|
||||
|
||||
def _convert_to_tag_data(self):
|
||||
""" Convert internal data to tag data.
|
||||
|
|
@ -782,13 +788,13 @@ class PublishClip:
|
|||
Populating the tag data into internal variable self.tag_data
|
||||
"""
|
||||
# define vertical sync attributes
|
||||
master_layer = True
|
||||
hero_track = True
|
||||
self.review_layer = ""
|
||||
if self.vertical_sync:
|
||||
# check if track name is not in driving layer
|
||||
if self.track_name not in self.driving_layer:
|
||||
# if it is not then define vertical sync as None
|
||||
master_layer = False
|
||||
hero_track = False
|
||||
|
||||
# increasing steps by index of rename iteration
|
||||
self.count_steps *= self.rename_index
|
||||
|
|
@ -802,7 +808,7 @@ class PublishClip:
|
|||
self.tag_data[_k] = _v["value"]
|
||||
|
||||
# driving layer is set as positive match
|
||||
if master_layer or self.vertical_sync:
|
||||
if hero_track or self.vertical_sync:
|
||||
# mark review layer
|
||||
if self.review_track and (
|
||||
self.review_track not in self.review_track_default):
|
||||
|
|
@ -836,40 +842,40 @@ class PublishClip:
|
|||
hierarchy_formating_data
|
||||
)
|
||||
|
||||
tag_hierarchy_data.update({"masterLayer": True})
|
||||
if master_layer and self.vertical_sync:
|
||||
tag_hierarchy_data.update({"heroTrack": True})
|
||||
if hero_track and self.vertical_sync:
|
||||
self.vertical_clip_match.update({
|
||||
(self.clip_in, self.clip_out): tag_hierarchy_data
|
||||
})
|
||||
|
||||
if not master_layer and self.vertical_sync:
|
||||
if not hero_track and self.vertical_sync:
|
||||
# driving layer is set as negative match
|
||||
for (_in, _out), master_data in self.vertical_clip_match.items():
|
||||
master_data.update({"masterLayer": False})
|
||||
for (_in, _out), hero_data in self.vertical_clip_match.items():
|
||||
hero_data.update({"heroTrack": False})
|
||||
if _in == self.clip_in and _out == self.clip_out:
|
||||
data_subset = master_data["subset"]
|
||||
# add track index in case duplicity of names in master data
|
||||
data_subset = hero_data["subset"]
|
||||
# add track index in case duplicity of names in hero data
|
||||
if self.subset in data_subset:
|
||||
master_data["subset"] = self.subset + str(
|
||||
hero_data["subset"] = self.subset + str(
|
||||
self.track_index)
|
||||
# in case track name and subset name is the same then add
|
||||
if self.subset_name == self.track_name:
|
||||
master_data["subset"] = self.subset
|
||||
hero_data["subset"] = self.subset
|
||||
# assing data to return hierarchy data to tag
|
||||
tag_hierarchy_data = master_data
|
||||
tag_hierarchy_data = hero_data
|
||||
|
||||
# add data to return data dict
|
||||
self.tag_data.update(tag_hierarchy_data)
|
||||
|
||||
if master_layer and self.review_layer:
|
||||
self.tag_data.update({"reviewTrack": self.review_layer})
|
||||
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
|
||||
""" Solve tag data from hierarchy data and templates. """
|
||||
# fill up clip name and hierarchy keys
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formating_data)
|
||||
|
||||
# remove shot from hierarchy data: is not needed anymore
|
||||
hierarchy_formating_data.pop("shot")
|
||||
|
||||
return {
|
||||
"newClipName": clip_name_filled,
|
||||
"hierarchy": hierarchy_filled,
|
||||
|
|
|
|||
|
|
@ -84,6 +84,13 @@ def update_tag(tag, data):
|
|||
mtd = tag.metadata()
|
||||
# get metadata key from data
|
||||
data_mtd = data.get("metadata", {})
|
||||
|
||||
# due to hiero bug we have to make sure keys which are not existent in
|
||||
# data are cleared of value by `None`
|
||||
for _mk in mtd.keys():
|
||||
if _mk.replace("tag.", "") not in data_mtd.keys():
|
||||
mtd.setValue(_mk, str(None))
|
||||
|
||||
# set all data metadata to tag metadata
|
||||
for k, v in data_mtd.items():
|
||||
mtd.setValue(
|
||||
|
|
|
|||
0
openpype/hosts/hiero/otio/__init__.py
Normal file
0
openpype/hosts/hiero/otio/__init__.py
Normal file
366
openpype/hosts/hiero/otio/hiero_export.py
Normal file
366
openpype/hosts/hiero/otio/hiero_export.py
Normal file
|
|
@ -0,0 +1,366 @@
|
|||
""" compatibility OpenTimelineIO 0.12.0 and newer
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import ast
|
||||
from compiler.ast import flatten
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.track_types = {
|
||||
hiero.core.VideoTrack: otio.schema.TrackKind.Video,
|
||||
hiero.core.AudioTrack: otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
self.marker_color_map = {
|
||||
"magenta": otio.schema.MarkerColor.MAGENTA,
|
||||
"red": otio.schema.MarkerColor.RED,
|
||||
"yellow": otio.schema.MarkerColor.YELLOW,
|
||||
"green": otio.schema.MarkerColor.GREEN,
|
||||
"cyan": otio.schema.MarkerColor.CYAN,
|
||||
"blue": otio.schema.MarkerColor.BLUE,
|
||||
}
|
||||
self.timeline = None
|
||||
self.include_tags = True
|
||||
|
||||
|
||||
def get_current_hiero_project(remove_untitled=False):
|
||||
projects = flatten(hiero.core.projects())
|
||||
if not remove_untitled:
|
||||
return next(iter(projects))
|
||||
|
||||
# if remove_untitled
|
||||
for proj in projects:
|
||||
if "Untitled" in proj.name():
|
||||
proj.close()
|
||||
else:
|
||||
return proj
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
return otio.opentime.RationalTime(
|
||||
float(frame),
|
||||
float(fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_time_range(start_frame, frame_duration, fps):
|
||||
return otio.opentime.TimeRange(
|
||||
start_time=create_otio_rational_time(start_frame, fps),
|
||||
duration=create_otio_rational_time(frame_duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def _get_metadata(item):
|
||||
if hasattr(item, 'metadata'):
|
||||
return {key: value for key, value in dict(item.metadata()).items()}
|
||||
return {}
|
||||
|
||||
|
||||
def create_otio_reference(clip):
|
||||
metadata = _get_metadata(clip)
|
||||
media_source = clip.mediaSource()
|
||||
|
||||
# get file info for path and start frame
|
||||
file_info = media_source.fileinfos().pop()
|
||||
frame_start = file_info.startFrame()
|
||||
path = file_info.filename()
|
||||
|
||||
# get padding and other file infos
|
||||
padding = media_source.filenamePadding()
|
||||
file_head = media_source.filenameHead()
|
||||
is_sequence = not media_source.singleFile()
|
||||
frame_duration = media_source.duration()
|
||||
fps = utils.get_rate(clip) or self.project_fps
|
||||
extension = os.path.splitext(path)[-1]
|
||||
|
||||
if is_sequence:
|
||||
metadata.update({
|
||||
"isSequence": True,
|
||||
"padding": padding
|
||||
})
|
||||
|
||||
# add resolution metadata
|
||||
metadata.update({
|
||||
"openpype.source.colourtransform": clip.sourceMediaColourTransform(),
|
||||
"openpype.source.width": int(media_source.width()),
|
||||
"openpype.source.height": int(media_source.height()),
|
||||
"openpype.source.pixelAspect": float(media_source.pixelAspect())
|
||||
})
|
||||
|
||||
otio_ex_ref_item = None
|
||||
|
||||
if is_sequence:
|
||||
# if it is file sequence try to create `ImageSequenceReference`
|
||||
# the OTIO might not be compatible so return nothing and do it old way
|
||||
try:
|
||||
dirname = os.path.dirname(path)
|
||||
otio_ex_ref_item = otio.schema.ImageSequenceReference(
|
||||
target_url_base=dirname + os.sep,
|
||||
name_prefix=file_head,
|
||||
name_suffix=extension,
|
||||
start_frame=frame_start,
|
||||
frame_zero_padding=padding,
|
||||
rate=fps,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if not otio_ex_ref_item:
|
||||
reformat_path = utils.get_reformated_path(path, padded=False)
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
otio_ex_ref_item = otio.schema.ExternalReference(
|
||||
target_url=reformat_path,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
# add metadata to otio item
|
||||
add_otio_metadata(otio_ex_ref_item, media_source, **metadata)
|
||||
|
||||
return otio_ex_ref_item
|
||||
|
||||
|
||||
def get_marker_color(tag):
|
||||
icon = tag.icon()
|
||||
pat = r'icons:Tag(?P<color>\w+)\.\w+'
|
||||
|
||||
res = re.search(pat, icon)
|
||||
if res:
|
||||
color = res.groupdict().get('color')
|
||||
if color.lower() in self.marker_color_map:
|
||||
return self.marker_color_map[color.lower()]
|
||||
|
||||
return otio.schema.MarkerColor.RED
|
||||
|
||||
|
||||
def create_otio_markers(otio_item, item):
|
||||
for tag in item.tags():
|
||||
if not tag.visible():
|
||||
continue
|
||||
|
||||
if tag.name() == 'Copy':
|
||||
# Hiero adds this tag to a lot of clips
|
||||
continue
|
||||
|
||||
frame_rate = utils.get_rate(item) or self.project_fps
|
||||
|
||||
marked_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
tag.inTime(),
|
||||
frame_rate
|
||||
),
|
||||
duration=otio.opentime.RationalTime(
|
||||
int(tag.metadata().dict().get('tag.length', '0')),
|
||||
frame_rate
|
||||
)
|
||||
)
|
||||
# add tag metadata but remove "tag." string
|
||||
metadata = {}
|
||||
|
||||
for key, value in tag.metadata().dict().items():
|
||||
_key = key.replace("tag.", "")
|
||||
|
||||
try:
|
||||
# capture exceptions which are related to strings only
|
||||
_value = ast.literal_eval(value)
|
||||
except (ValueError, SyntaxError):
|
||||
_value = value
|
||||
|
||||
metadata.update({_key: _value})
|
||||
|
||||
# Store the source item for future import assignment
|
||||
metadata['hiero_source_type'] = item.__class__.__name__
|
||||
|
||||
marker = otio.schema.Marker(
|
||||
name=tag.name(),
|
||||
color=get_marker_color(tag),
|
||||
marked_range=marked_range,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
otio_item.markers.append(marker)
|
||||
|
||||
|
||||
def create_otio_clip(track_item):
|
||||
clip = track_item.source()
|
||||
source_in = track_item.sourceIn()
|
||||
duration = track_item.sourceDuration()
|
||||
fps = utils.get_rate(track_item) or self.project_fps
|
||||
name = track_item.name()
|
||||
|
||||
media_reference = create_otio_reference(clip)
|
||||
source_range = create_otio_time_range(
|
||||
int(source_in),
|
||||
int(duration),
|
||||
fps
|
||||
)
|
||||
|
||||
otio_clip = otio.schema.Clip(
|
||||
name=name,
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
|
||||
# Add tags as markers
|
||||
if self.include_tags:
|
||||
create_otio_markers(otio_clip, track_item)
|
||||
create_otio_markers(otio_clip, track_item.source())
|
||||
|
||||
return otio_clip
|
||||
|
||||
|
||||
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
||||
return otio.schema.Gap(
|
||||
source_range=create_otio_time_range(
|
||||
gap_start,
|
||||
(clip_start - tl_start_frame) - gap_start,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _create_otio_timeline():
|
||||
project = get_current_hiero_project(remove_untitled=False)
|
||||
metadata = _get_metadata(self.timeline)
|
||||
|
||||
metadata.update({
|
||||
"openpype.timeline.width": int(self.timeline.format().width()),
|
||||
"openpype.timeline.height": int(self.timeline.format().height()),
|
||||
"openpype.timeline.pixelAspect": int(self.timeline.format().pixelAspect()), # noqa
|
||||
"openpype.project.useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), # noqa
|
||||
"openpype.project.lutSetting16Bit": project.lutSetting16Bit(),
|
||||
"openpype.project.lutSetting8Bit": project.lutSetting8Bit(),
|
||||
"openpype.project.lutSettingFloat": project.lutSettingFloat(),
|
||||
"openpype.project.lutSettingLog": project.lutSettingLog(),
|
||||
"openpype.project.lutSettingViewer": project.lutSettingViewer(),
|
||||
"openpype.project.lutSettingWorkingSpace": project.lutSettingWorkingSpace(), # noqa
|
||||
"openpype.project.lutUseOCIOForExport": project.lutUseOCIOForExport(),
|
||||
"openpype.project.ocioConfigName": project.ocioConfigName(),
|
||||
"openpype.project.ocioConfigPath": project.ocioConfigPath()
|
||||
})
|
||||
|
||||
start_time = create_otio_rational_time(
|
||||
self.timeline.timecodeStart(), self.project_fps)
|
||||
|
||||
return otio.schema.Timeline(
|
||||
name=self.timeline.name(),
|
||||
global_start_time=start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
|
||||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=self.track_types[track_type]
|
||||
)
|
||||
|
||||
|
||||
def add_otio_gap(track_item, otio_track, prev_out):
|
||||
gap_length = track_item.timelineIn() - prev_out
|
||||
if prev_out != 0:
|
||||
gap_length -= 1
|
||||
|
||||
gap = otio.opentime.TimeRange(
|
||||
duration=otio.opentime.RationalTime(
|
||||
gap_length,
|
||||
self.project_fps
|
||||
)
|
||||
)
|
||||
otio_gap = otio.schema.Gap(source_range=gap)
|
||||
otio_track.append(otio_gap)
|
||||
|
||||
|
||||
def add_otio_metadata(otio_item, media_source, **kwargs):
|
||||
metadata = _get_metadata(media_source)
|
||||
|
||||
# add additional metadata from kwargs
|
||||
if kwargs:
|
||||
metadata.update(kwargs)
|
||||
|
||||
# add metadata to otio item metadata
|
||||
for key, value in metadata.items():
|
||||
otio_item.metadata.update({key: value})
|
||||
|
||||
|
||||
def create_otio_timeline():
|
||||
|
||||
# get current timeline
|
||||
self.timeline = hiero.ui.activeSequence()
|
||||
self.project_fps = self.timeline.framerate().toFloat()
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline()
|
||||
|
||||
# loop all defined track types
|
||||
for track in self.timeline.items():
|
||||
# skip if track is disabled
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
type(track), track.name())
|
||||
|
||||
for itemindex, track_item in enumerate(track):
|
||||
# skip offline track items
|
||||
if not track_item.isMediaPresent():
|
||||
continue
|
||||
|
||||
# skip if track item is disabled
|
||||
if not track_item.isEnabled():
|
||||
continue
|
||||
|
||||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
# it to previouse item
|
||||
prev_item = track_item
|
||||
|
||||
else:
|
||||
# get previouse item
|
||||
prev_item = track_item.parent().items()[itemindex - 1]
|
||||
|
||||
# calculate clip frame range difference from each other
|
||||
clip_diff = track_item.timelineIn() - prev_item.timelineOut()
|
||||
|
||||
# add gap if first track item is not starting
|
||||
# at first timeline frame
|
||||
if itemindex == 0 and track_item.timelineIn() > 0:
|
||||
add_otio_gap(track_item, otio_track, 0)
|
||||
|
||||
# or add gap if following track items are having
|
||||
# frame range differences from each other
|
||||
elif itemindex and clip_diff != 1:
|
||||
add_otio_gap(track_item, otio_track, prev_item.timelineOut())
|
||||
|
||||
# create otio clip and add it to track
|
||||
otio_clip = create_otio_clip(track_item)
|
||||
otio_track.append(otio_clip)
|
||||
|
||||
# Add tags as markers
|
||||
if self.include_tags:
|
||||
create_otio_markers(otio_track, track)
|
||||
|
||||
# add track to otio timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def write_to_file(otio_timeline, path):
|
||||
otio.adapters.write_to_file(otio_timeline, path)
|
||||
545
openpype/hosts/hiero/otio/hiero_import.py
Normal file
545
openpype/hosts/hiero/otio/hiero_import.py
Normal file
|
|
@ -0,0 +1,545 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = "Daniel Flehner Heen"
|
||||
__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"]
|
||||
|
||||
|
||||
import os
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
import PySide2.QtWidgets as qw
|
||||
|
||||
try:
|
||||
from urllib import unquote
|
||||
|
||||
except ImportError:
|
||||
from urllib.parse import unquote # lint:ok
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
_otio_old = False
|
||||
|
||||
|
||||
def inform(messages):
|
||||
if isinstance(messages, type('')):
|
||||
messages = [messages]
|
||||
|
||||
qw.QMessageBox.information(
|
||||
hiero.ui.mainWindow(),
|
||||
'OTIO Import',
|
||||
'\n'.join(messages),
|
||||
qw.QMessageBox.StandardButton.Ok
|
||||
)
|
||||
|
||||
|
||||
def get_transition_type(otio_item, otio_track):
|
||||
_in, _out = otio_track.neighbors_of(otio_item)
|
||||
|
||||
if isinstance(_in, otio.schema.Gap):
|
||||
_in = None
|
||||
|
||||
if isinstance(_out, otio.schema.Gap):
|
||||
_out = None
|
||||
|
||||
if _in and _out:
|
||||
return 'dissolve'
|
||||
|
||||
elif _in and not _out:
|
||||
return 'fade_out'
|
||||
|
||||
elif not _in and _out:
|
||||
return 'fade_in'
|
||||
|
||||
else:
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def find_trackitem(otio_clip, hiero_track):
|
||||
for item in hiero_track.items():
|
||||
if item.timelineIn() == otio_clip.range_in_parent().start_time.value:
|
||||
if item.name() == otio_clip.name:
|
||||
return item
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_neighboring_trackitems(otio_item, otio_track, hiero_track):
|
||||
_in, _out = otio_track.neighbors_of(otio_item)
|
||||
trackitem_in = None
|
||||
trackitem_out = None
|
||||
|
||||
if _in:
|
||||
trackitem_in = find_trackitem(_in, hiero_track)
|
||||
|
||||
if _out:
|
||||
trackitem_out = find_trackitem(_out, hiero_track)
|
||||
|
||||
return trackitem_in, trackitem_out
|
||||
|
||||
|
||||
def apply_transition(otio_track, otio_item, track):
|
||||
warning = None
|
||||
|
||||
# Figure out type of transition
|
||||
transition_type = get_transition_type(otio_item, otio_track)
|
||||
|
||||
# Figure out track kind for getattr below
|
||||
kind = ''
|
||||
if isinstance(track, hiero.core.AudioTrack):
|
||||
kind = 'Audio'
|
||||
|
||||
# Gather TrackItems involved in trasition
|
||||
item_in, item_out = get_neighboring_trackitems(
|
||||
otio_item,
|
||||
otio_track,
|
||||
track
|
||||
)
|
||||
|
||||
# Create transition object
|
||||
if transition_type == 'dissolve':
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
'create{kind}DissolveTransition'.format(kind=kind)
|
||||
)
|
||||
|
||||
try:
|
||||
transition = transition_func(
|
||||
item_in,
|
||||
item_out,
|
||||
otio_item.in_offset.value,
|
||||
otio_item.out_offset.value
|
||||
)
|
||||
|
||||
# Catch error raised if transition is bigger than TrackItem source
|
||||
except RuntimeError as e:
|
||||
transition = None
|
||||
warning = (
|
||||
"Unable to apply transition \"{t.name}\": {e} "
|
||||
"Ignoring the transition.").format(t=otio_item, e=str(e))
|
||||
|
||||
elif transition_type == 'fade_in':
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
'create{kind}FadeInTransition'.format(kind=kind)
|
||||
)
|
||||
|
||||
# Warn user if part of fade is outside of clip
|
||||
if otio_item.in_offset.value:
|
||||
warning = \
|
||||
'Fist half of transition "{t.name}" is outside of clip and ' \
|
||||
'not valid in Hiero. Only applied second half.' \
|
||||
.format(t=otio_item)
|
||||
|
||||
transition = transition_func(
|
||||
item_out,
|
||||
otio_item.out_offset.value
|
||||
)
|
||||
|
||||
elif transition_type == 'fade_out':
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
'create{kind}FadeOutTransition'.format(kind=kind)
|
||||
)
|
||||
transition = transition_func(
|
||||
item_in,
|
||||
otio_item.in_offset.value
|
||||
)
|
||||
|
||||
# Warn user if part of fade is outside of clip
|
||||
if otio_item.out_offset.value:
|
||||
warning = \
|
||||
'Second half of transition "{t.name}" is outside of clip ' \
|
||||
'and not valid in Hiero. Only applied first half.' \
|
||||
.format(t=otio_item)
|
||||
|
||||
else:
|
||||
# Unknown transition
|
||||
return
|
||||
|
||||
# Apply transition to track
|
||||
if transition:
|
||||
track.addTransition(transition)
|
||||
|
||||
# Inform user about missing or adjusted transitions
|
||||
return warning
|
||||
|
||||
|
||||
def prep_url(url_in):
|
||||
url = unquote(url_in)
|
||||
|
||||
if url.startswith('file://localhost/'):
|
||||
return url
|
||||
|
||||
url = 'file://localhost{sep}{url}'.format(
|
||||
sep=url.startswith(os.sep) and '' or os.sep,
|
||||
url=url.startswith(os.sep) and url[1:] or url
|
||||
)
|
||||
|
||||
return url
|
||||
|
||||
|
||||
def create_offline_mediasource(otio_clip, path=None):
|
||||
global _otio_old
|
||||
|
||||
hiero_rate = hiero.core.TimeBase(
|
||||
otio_clip.source_range.start_time.rate
|
||||
)
|
||||
|
||||
try:
|
||||
legal_media_refs = (
|
||||
otio.schema.ExternalReference,
|
||||
otio.schema.ImageSequenceReference
|
||||
)
|
||||
except AttributeError:
|
||||
_otio_old = True
|
||||
legal_media_refs = (
|
||||
otio.schema.ExternalReference
|
||||
)
|
||||
|
||||
if isinstance(otio_clip.media_reference, legal_media_refs):
|
||||
source_range = otio_clip.available_range()
|
||||
|
||||
else:
|
||||
source_range = otio_clip.source_range
|
||||
|
||||
if path is None:
|
||||
path = otio_clip.name
|
||||
|
||||
media = hiero.core.MediaSource.createOfflineVideoMediaSource(
|
||||
prep_url(path),
|
||||
source_range.start_time.value,
|
||||
source_range.duration.value,
|
||||
hiero_rate,
|
||||
source_range.start_time.value
|
||||
)
|
||||
|
||||
return media
|
||||
|
||||
|
||||
def load_otio(otio_file, project=None, sequence=None):
|
||||
otio_timeline = otio.adapters.read_from_file(otio_file)
|
||||
build_sequence(otio_timeline, project=project, sequence=sequence)
|
||||
|
||||
|
||||
marker_color_map = {
|
||||
"PINK": "Magenta",
|
||||
"RED": "Red",
|
||||
"ORANGE": "Yellow",
|
||||
"YELLOW": "Yellow",
|
||||
"GREEN": "Green",
|
||||
"CYAN": "Cyan",
|
||||
"BLUE": "Blue",
|
||||
"PURPLE": "Magenta",
|
||||
"MAGENTA": "Magenta",
|
||||
"BLACK": "Blue",
|
||||
"WHITE": "Green"
|
||||
}
|
||||
|
||||
|
||||
def get_tag(tagname, tagsbin):
|
||||
for tag in tagsbin.items():
|
||||
if tag.name() == tagname:
|
||||
return tag
|
||||
|
||||
if isinstance(tag, hiero.core.Bin):
|
||||
tag = get_tag(tagname, tag)
|
||||
|
||||
if tag is not None:
|
||||
return tag
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def add_metadata(metadata, hiero_item):
|
||||
for key, value in metadata.get('Hiero', dict()).items():
|
||||
if key == 'source_type':
|
||||
# Only used internally to reassign tag to correct Hiero item
|
||||
continue
|
||||
|
||||
if isinstance(value, dict):
|
||||
add_metadata(value, hiero_item)
|
||||
continue
|
||||
|
||||
if value is not None:
|
||||
if not key.startswith('tag.'):
|
||||
key = 'tag.' + key
|
||||
|
||||
hiero_item.metadata().setValue(key, str(value))
|
||||
|
||||
|
||||
def add_markers(otio_item, hiero_item, tagsbin):
|
||||
if isinstance(otio_item, (otio.schema.Stack, otio.schema.Clip)):
|
||||
markers = otio_item.markers
|
||||
|
||||
elif isinstance(otio_item, otio.schema.Timeline):
|
||||
markers = otio_item.tracks.markers
|
||||
|
||||
else:
|
||||
markers = []
|
||||
|
||||
for marker in markers:
|
||||
meta = marker.metadata.get('Hiero', dict())
|
||||
if 'source_type' in meta:
|
||||
if hiero_item.__class__.__name__ != meta.get('source_type'):
|
||||
continue
|
||||
|
||||
marker_color = marker.color
|
||||
|
||||
_tag = get_tag(marker.name, tagsbin)
|
||||
if _tag is None:
|
||||
_tag = get_tag(marker_color_map[marker_color], tagsbin)
|
||||
|
||||
if _tag is None:
|
||||
_tag = hiero.core.Tag(marker_color_map[marker.color])
|
||||
|
||||
start = marker.marked_range.start_time.value
|
||||
end = (
|
||||
marker.marked_range.start_time.value +
|
||||
marker.marked_range.duration.value
|
||||
)
|
||||
|
||||
if hasattr(hiero_item, 'addTagToRange'):
|
||||
tag = hiero_item.addTagToRange(_tag, start, end)
|
||||
|
||||
else:
|
||||
tag = hiero_item.addTag(_tag)
|
||||
|
||||
tag.setName(marker.name or marker_color_map[marker_color])
|
||||
# tag.setNote(meta.get('tag.note', ''))
|
||||
|
||||
# Add metadata
|
||||
add_metadata(marker.metadata, tag)
|
||||
|
||||
|
||||
def create_track(otio_track, tracknum, track_kind):
|
||||
if track_kind is None and hasattr(otio_track, 'kind'):
|
||||
track_kind = otio_track.kind
|
||||
|
||||
# Create a Track
|
||||
if track_kind == otio.schema.TrackKind.Video:
|
||||
track = hiero.core.VideoTrack(
|
||||
otio_track.name or 'Video{n}'.format(n=tracknum)
|
||||
)
|
||||
|
||||
else:
|
||||
track = hiero.core.AudioTrack(
|
||||
otio_track.name or 'Audio{n}'.format(n=tracknum)
|
||||
)
|
||||
|
||||
return track
|
||||
|
||||
|
||||
def create_clip(otio_clip, tagsbin, sequencebin):
|
||||
# Create MediaSource
|
||||
url = None
|
||||
media = None
|
||||
otio_media = otio_clip.media_reference
|
||||
|
||||
if isinstance(otio_media, otio.schema.ExternalReference):
|
||||
url = prep_url(otio_media.target_url)
|
||||
media = hiero.core.MediaSource(url)
|
||||
|
||||
elif not _otio_old:
|
||||
if isinstance(otio_media, otio.schema.ImageSequenceReference):
|
||||
url = prep_url(otio_media.abstract_target_url('#'))
|
||||
media = hiero.core.MediaSource(url)
|
||||
|
||||
if media is None or media.isOffline():
|
||||
media = create_offline_mediasource(otio_clip, url)
|
||||
|
||||
# Reuse previous clip if possible
|
||||
clip = None
|
||||
for item in sequencebin.clips():
|
||||
if item.activeItem().mediaSource() == media:
|
||||
clip = item.activeItem()
|
||||
break
|
||||
|
||||
if not clip:
|
||||
# Create new Clip
|
||||
clip = hiero.core.Clip(media)
|
||||
|
||||
# Add Clip to a Bin
|
||||
sequencebin.addItem(hiero.core.BinItem(clip))
|
||||
|
||||
# Add markers
|
||||
add_markers(otio_clip, clip, tagsbin)
|
||||
|
||||
return clip
|
||||
|
||||
|
||||
def create_trackitem(playhead, track, otio_clip, clip):
|
||||
source_range = otio_clip.source_range
|
||||
|
||||
trackitem = track.createTrackItem(otio_clip.name)
|
||||
trackitem.setPlaybackSpeed(source_range.start_time.rate)
|
||||
trackitem.setSource(clip)
|
||||
|
||||
time_scalar = 1.
|
||||
|
||||
# Check for speed effects and adjust playback speed accordingly
|
||||
for effect in otio_clip.effects:
|
||||
if isinstance(effect, otio.schema.LinearTimeWarp):
|
||||
time_scalar = effect.time_scalar
|
||||
# Only reverse effect can be applied here
|
||||
if abs(time_scalar) == 1.:
|
||||
trackitem.setPlaybackSpeed(
|
||||
trackitem.playbackSpeed() * time_scalar)
|
||||
|
||||
elif isinstance(effect, otio.schema.FreezeFrame):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
time_scalar = 0.
|
||||
|
||||
# If reverse playback speed swap source in and out
|
||||
if trackitem.playbackSpeed() < 0:
|
||||
source_out = source_range.start_time.value
|
||||
source_in = source_range.end_time_inclusive().value
|
||||
|
||||
timeline_in = playhead + source_out
|
||||
timeline_out = (
|
||||
timeline_in +
|
||||
source_range.duration.value
|
||||
) - 1
|
||||
else:
|
||||
# Normal playback speed
|
||||
source_in = source_range.start_time.value
|
||||
source_out = source_range.end_time_inclusive().value
|
||||
|
||||
timeline_in = playhead
|
||||
timeline_out = (
|
||||
timeline_in +
|
||||
source_range.duration.value
|
||||
) - 1
|
||||
|
||||
# Set source and timeline in/out points
|
||||
trackitem.setTimes(
|
||||
timeline_in,
|
||||
timeline_out,
|
||||
source_in,
|
||||
source_out
|
||||
|
||||
)
|
||||
|
||||
# Apply playback speed for freeze frames
|
||||
if abs(time_scalar) != 1.:
|
||||
trackitem.setPlaybackSpeed(trackitem.playbackSpeed() * time_scalar)
|
||||
|
||||
# Link audio to video when possible
|
||||
if isinstance(track, hiero.core.AudioTrack):
|
||||
for other in track.parent().trackItemsAt(playhead):
|
||||
if other.source() == clip:
|
||||
trackitem.link(other)
|
||||
|
||||
return trackitem
|
||||
|
||||
|
||||
def build_sequence(
|
||||
otio_timeline, project=None, sequence=None, track_kind=None):
|
||||
if project is None:
|
||||
if sequence:
|
||||
project = sequence.project()
|
||||
|
||||
else:
|
||||
# Per version 12.1v2 there is no way of getting active project
|
||||
project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1]
|
||||
|
||||
projectbin = project.clipsBin()
|
||||
|
||||
if not sequence:
|
||||
# Create a Sequence
|
||||
sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence')
|
||||
|
||||
# Set sequence settings from otio timeline if available
|
||||
if (
|
||||
hasattr(otio_timeline, 'global_start_time')
|
||||
and otio_timeline.global_start_time
|
||||
):
|
||||
start_time = otio_timeline.global_start_time
|
||||
sequence.setFramerate(start_time.rate)
|
||||
sequence.setTimecodeStart(start_time.value)
|
||||
|
||||
# Create a Bin to hold clips
|
||||
projectbin.addItem(hiero.core.BinItem(sequence))
|
||||
|
||||
sequencebin = hiero.core.Bin(sequence.name())
|
||||
projectbin.addItem(sequencebin)
|
||||
|
||||
else:
|
||||
sequencebin = projectbin
|
||||
|
||||
# Get tagsBin
|
||||
tagsbin = hiero.core.project("Tag Presets").tagsBin()
|
||||
|
||||
# Add timeline markers
|
||||
add_markers(otio_timeline, sequence, tagsbin)
|
||||
|
||||
if isinstance(otio_timeline, otio.schema.Timeline):
|
||||
tracks = otio_timeline.tracks
|
||||
|
||||
else:
|
||||
tracks = [otio_timeline]
|
||||
|
||||
for tracknum, otio_track in enumerate(tracks):
|
||||
playhead = 0
|
||||
_transitions = []
|
||||
|
||||
# Add track to sequence
|
||||
track = create_track(otio_track, tracknum, track_kind)
|
||||
sequence.addTrack(track)
|
||||
|
||||
# iterate over items in track
|
||||
for _itemnum, otio_clip in enumerate(otio_track):
|
||||
if isinstance(otio_clip, (otio.schema.Track, otio.schema.Stack)):
|
||||
inform('Nested sequences/tracks are created separately.')
|
||||
|
||||
# Add gap where the nested sequence would have been
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
# Process nested sequence
|
||||
build_sequence(
|
||||
otio_clip,
|
||||
project=project,
|
||||
track_kind=otio_track.kind
|
||||
)
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Clip):
|
||||
# Create a Clip
|
||||
clip = create_clip(otio_clip, tagsbin, sequencebin)
|
||||
|
||||
# Create TrackItem
|
||||
trackitem = create_trackitem(
|
||||
playhead,
|
||||
track,
|
||||
otio_clip,
|
||||
clip
|
||||
)
|
||||
|
||||
# Add markers
|
||||
add_markers(otio_clip, trackitem, tagsbin)
|
||||
|
||||
# Add trackitem to track
|
||||
track.addTrackItem(trackitem)
|
||||
|
||||
# Update playhead
|
||||
playhead = trackitem.timelineOut() + 1
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Transition):
|
||||
# Store transitions for when all clips in the track are created
|
||||
_transitions.append((otio_track, otio_clip))
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Gap):
|
||||
# Hiero has no fillers, slugs or blanks at the moment
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
# Apply transitions we stored earlier now that all clips are present
|
||||
warnings = []
|
||||
for otio_track, otio_item in _transitions:
|
||||
# Catch warnings form transitions in case
|
||||
# of unsupported transitions
|
||||
warning = apply_transition(otio_track, otio_item, track)
|
||||
if warning:
|
||||
warnings.append(warning)
|
||||
|
||||
if warnings:
|
||||
inform(warnings)
|
||||
76
openpype/hosts/hiero/otio/utils.py
Normal file
76
openpype/hosts/hiero/otio/utils.py
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
import re
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
rt = otio.opentime.from_timecode(timecode, 24)
|
||||
return int(otio.opentime.to_frames(rt))
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_timecode(rt)
|
||||
|
||||
|
||||
def frames_to_secons(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=True):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
type: string with reformated path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
if "%" in path:
|
||||
padding_pattern = r"(\d+)"
|
||||
padding = int(re.findall(padding_pattern, path).pop())
|
||||
num_pattern = r"(%\d+d)"
|
||||
if padded:
|
||||
path = re.sub(num_pattern, "%0{}d".format(padding), path)
|
||||
else:
|
||||
path = re.sub(num_pattern, "%d", path)
|
||||
return path
|
||||
|
||||
|
||||
def get_padding_from_path(path):
|
||||
"""
|
||||
Return padding number from DaVinci Resolve sequence path style
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_path("plate.[0001-1008].exr") > 4
|
||||
|
||||
"""
|
||||
padding_pattern = "(\\d+)(?=-)"
|
||||
if "[" in path:
|
||||
return len(re.findall(padding_pattern, path).pop())
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_rate(item):
|
||||
if not hasattr(item, 'framerate'):
|
||||
return None
|
||||
|
||||
num, den = item.framerate().toRational()
|
||||
rate = float(num) / float(den)
|
||||
|
||||
if rate.is_integer():
|
||||
return rate
|
||||
|
||||
return round(rate, 4)
|
||||
|
|
@ -120,9 +120,9 @@ class CreateShotClip(phiero.Creator):
|
|||
"vSyncTrack": {
|
||||
"value": gui_tracks, # noqa
|
||||
"type": "QComboBox",
|
||||
"label": "Master track",
|
||||
"label": "Hero track",
|
||||
"target": "ui",
|
||||
"toolTip": "Select driving track name which should be mastering all others", # noqa
|
||||
"toolTip": "Select driving track name which should be hero for all others", # noqa
|
||||
"order": 1}
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -29,13 +29,19 @@ class LoadClip(phiero.SequenceLoader):
|
|||
clip_color_last = "green"
|
||||
clip_color = "red"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
clip_name_template = "{asset}_{subset}_{representation}"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
# add clip name template to options
|
||||
options.update({
|
||||
"clipNameTemplate": self.clip_name_template
|
||||
})
|
||||
# in case loader uses multiselection
|
||||
if self.track and self.sequence:
|
||||
options.update({
|
||||
"sequence": self.sequence,
|
||||
"track": self.track
|
||||
"track": self.track,
|
||||
"clipNameTemplate": self.clip_name_template
|
||||
})
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
|
|
@ -45,7 +51,8 @@ class LoadClip(phiero.SequenceLoader):
|
|||
version_data = version.get("data", {})
|
||||
version_name = version.get("name", None)
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
object_name = self.clip_name_template.format(
|
||||
**context["representation"]["context"])
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
|
|
|
|||
59
openpype/hosts/hiero/plugins/publish/extract_thumbnail.py
Normal file
59
openpype/hosts/hiero/plugins/publish/extract_thumbnail.py
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
|
||||
class ExtractThumnail(openpype.api.Extractor):
|
||||
"""
|
||||
Extractor for track item's tumnails
|
||||
"""
|
||||
|
||||
label = "Extract Thumnail"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["plate", "take"]
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
self.create_thumbnail(staging_dir, instance)
|
||||
|
||||
def create_thumbnail(self, staging_dir, instance):
|
||||
track_item = instance.data["item"]
|
||||
track_item_name = track_item.name()
|
||||
|
||||
# frames
|
||||
duration = track_item.sourceDuration()
|
||||
frame_start = track_item.sourceIn()
|
||||
self.log.debug(
|
||||
"__ frame_start: `{}`, duration: `{}`".format(
|
||||
frame_start, duration))
|
||||
|
||||
# get thumbnail frame from the middle
|
||||
thumb_frame = int(frame_start + (duration / 2))
|
||||
|
||||
thumb_file = "{}thumbnail{}{}".format(
|
||||
track_item_name, thumb_frame, ".png")
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
|
||||
thumbnail = track_item.thumbnail(thumb_frame).save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
self.log.debug(
|
||||
"__ thumb_path: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
|
||||
|
||||
self.log.info("Thumnail was generated to: {}".format(thumb_path))
|
||||
thumb_representation = {
|
||||
'files': thumb_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
thumb_representation)
|
||||
|
|
@ -2,7 +2,7 @@ from pyblish import api
|
|||
import openpype.api as pype
|
||||
|
||||
|
||||
class VersionUpWorkfile(api.ContextPlugin):
|
||||
class IntegrateVersionUpWorkfile(api.ContextPlugin):
|
||||
"""Save as new workfile version"""
|
||||
|
||||
order = api.IntegratorOrder + 10.1
|
||||
|
|
@ -1,221 +1,204 @@
|
|||
from compiler.ast import flatten
|
||||
from pyblish import api
|
||||
import pyblish
|
||||
import openpype
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
import hiero
|
||||
# from openpype.hosts.hiero.api import lib
|
||||
# reload(lib)
|
||||
# reload(phiero)
|
||||
from openpype.hosts.hiero.otio import hiero_export
|
||||
|
||||
# # developer reload modules
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class PreCollectInstances(api.ContextPlugin):
|
||||
class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.CollectorOrder - 0.509
|
||||
label = "Pre-collect Instances"
|
||||
order = pyblish.api.CollectorOrder - 0.59
|
||||
label = "Precollect Instances"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
track_items = phiero.get_track_items(
|
||||
selected=True, check_tagged=True, check_enabled=True)
|
||||
# only return enabled track items
|
||||
if not track_items:
|
||||
track_items = phiero.get_track_items(
|
||||
check_enabled=True, check_tagged=True)
|
||||
# get sequence and video tracks
|
||||
sequence = context.data["activeSequence"]
|
||||
tracks = sequence.videoTracks()
|
||||
|
||||
# add collection to context
|
||||
tracks_effect_items = self.collect_sub_track_items(tracks)
|
||||
|
||||
context.data["tracksEffectItems"] = tracks_effect_items
|
||||
|
||||
otio_timeline = context.data["otioTimeline"]
|
||||
selected_timeline_items = phiero.get_track_items(
|
||||
selected=True, check_enabled=True, check_tagged=True)
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(len(track_items)))
|
||||
"Processing enabled track items: {}".format(
|
||||
selected_timeline_items))
|
||||
|
||||
for track_item in selected_timeline_items:
|
||||
|
||||
for _ti in track_items:
|
||||
data = dict()
|
||||
clip = _ti.source()
|
||||
clip_name = track_item.name()
|
||||
|
||||
# get clips subtracks and anotations
|
||||
annotations = self.clip_annotations(clip)
|
||||
subtracks = self.clip_subtrack(_ti)
|
||||
self.log.debug("Annotations: {}".format(annotations))
|
||||
self.log.debug(">> Subtracks: {}".format(subtracks))
|
||||
# get openpype tag data
|
||||
tag_data = phiero.get_track_item_pype_data(track_item)
|
||||
self.log.debug("__ tag_data: {}".format(pformat(tag_data)))
|
||||
|
||||
# get pype tag data
|
||||
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
|
||||
# self.log.debug(pformat(tag_parsed_data))
|
||||
|
||||
if not tag_parsed_data:
|
||||
if not tag_data:
|
||||
continue
|
||||
|
||||
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
|
||||
if tag_data.get("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
# solve handles length
|
||||
tag_data["handleStart"] = min(
|
||||
tag_data["handleStart"], int(track_item.handleInLength()))
|
||||
tag_data["handleEnd"] = min(
|
||||
tag_data["handleEnd"], int(track_item.handleOutLength()))
|
||||
|
||||
# add tag data to instance data
|
||||
data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
k: v for k, v in tag_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
asset = tag_parsed_data["asset"]
|
||||
subset = tag_parsed_data["subset"]
|
||||
review = tag_parsed_data.get("review")
|
||||
audio = tag_parsed_data.get("audio")
|
||||
|
||||
# remove audio attribute from data
|
||||
data.pop("audio")
|
||||
asset = tag_data["asset"]
|
||||
subset = tag_data["subset"]
|
||||
|
||||
# insert family into families
|
||||
family = tag_parsed_data["family"]
|
||||
families = [str(f) for f in tag_parsed_data["families"]]
|
||||
family = tag_data["family"]
|
||||
families = [str(f) for f in tag_data["families"]]
|
||||
families.insert(0, str(family))
|
||||
|
||||
track = _ti.parent()
|
||||
media_source = _ti.source().mediaSource()
|
||||
source_path = media_source.firstpath()
|
||||
file_head = media_source.filenameHead()
|
||||
file_info = media_source.fileinfos().pop()
|
||||
source_first_frame = int(file_info.startFrame())
|
||||
|
||||
# apply only for feview and master track instance
|
||||
if review:
|
||||
families += ["review", "ftrack"]
|
||||
# form label
|
||||
label = asset
|
||||
if asset != clip_name:
|
||||
label += " ({})".format(clip_name)
|
||||
label += " {}".format(subset)
|
||||
label += " {}".format("[" + ", ".join(families) + "]")
|
||||
|
||||
data.update({
|
||||
"name": "{} {} {}".format(asset, subset, families),
|
||||
"name": "{}_{}".format(asset, subset),
|
||||
"label": label,
|
||||
"asset": asset,
|
||||
"item": _ti,
|
||||
"item": track_item,
|
||||
"families": families,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
|
||||
# track item attributes
|
||||
"track": track.name(),
|
||||
"trackItem": track,
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _ti.sourceMediaColourTransform()
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"source": source_path,
|
||||
"sourceMedia": media_source,
|
||||
"sourcePath": source_path,
|
||||
"sourceFileHead": file_head,
|
||||
"sourceFirst": source_first_frame,
|
||||
|
||||
# clip's effect
|
||||
"clipEffectItems": subtracks
|
||||
"publish": tag_data["publish"],
|
||||
"fps": context.data["fps"]
|
||||
})
|
||||
|
||||
# otio clip data
|
||||
otio_data = self.get_otio_clip_instance_data(
|
||||
otio_timeline, track_item) or {}
|
||||
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
|
||||
data.update(otio_data)
|
||||
self.log.debug("__ data: {}".format(pformat(data)))
|
||||
|
||||
# add resolution
|
||||
self.get_resolution_to_data(data, context)
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
# create shot instance for shot attributes create/update
|
||||
self.create_shot_instance(context, **data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
if audio:
|
||||
a_data = dict()
|
||||
def get_resolution_to_data(self, data, context):
|
||||
assert data.get("otioClip"), "Missing `otioClip` data"
|
||||
|
||||
# add tag data to instance data
|
||||
a_data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
# solve source resolution option
|
||||
if data.get("sourceResolution", None):
|
||||
otio_clip_metadata = data[
|
||||
"otioClip"].media_reference.metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_clip_metadata[
|
||||
"openpype.source.width"],
|
||||
"resolutionHeight": otio_clip_metadata[
|
||||
"openpype.source.height"],
|
||||
"pixelAspect": otio_clip_metadata[
|
||||
"openpype.source.pixelAspect"]
|
||||
})
|
||||
else:
|
||||
otio_tl_metadata = context.data["otioTimeline"].metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_tl_metadata["openpype.timeline.width"],
|
||||
"resolutionHeight": otio_tl_metadata[
|
||||
"openpype.timeline.height"],
|
||||
"pixelAspect": otio_tl_metadata[
|
||||
"openpype.timeline.pixelAspect"]
|
||||
})
|
||||
|
||||
# create main attributes
|
||||
subset = "audioMain"
|
||||
family = "audio"
|
||||
families = ["clip", "ftrack"]
|
||||
families.insert(0, str(family))
|
||||
def create_shot_instance(self, context, **data):
|
||||
master_layer = data.get("heroTrack")
|
||||
hierarchy_data = data.get("hierarchyData")
|
||||
asset = data.get("asset")
|
||||
item = data.get("item")
|
||||
clip_name = item.name()
|
||||
|
||||
name = "{} {} {}".format(asset, subset, families)
|
||||
if not master_layer:
|
||||
return
|
||||
|
||||
a_data.update({
|
||||
"name": name,
|
||||
"subset": subset,
|
||||
"asset": asset,
|
||||
"family": family,
|
||||
"families": families,
|
||||
"item": _ti,
|
||||
if not hierarchy_data:
|
||||
return
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
})
|
||||
asset = data["asset"]
|
||||
subset = "shotMain"
|
||||
|
||||
a_instance = context.create_instance(**a_data)
|
||||
self.log.info("Creating audio instance: {}".format(a_instance))
|
||||
# insert family into families
|
||||
family = "shot"
|
||||
|
||||
# form label
|
||||
label = asset
|
||||
if asset != clip_name:
|
||||
label += " ({}) ".format(clip_name)
|
||||
label += " {}".format(subset)
|
||||
label += " [{}]".format(family)
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(asset, subset),
|
||||
"label": label,
|
||||
"subset": subset,
|
||||
"asset": asset,
|
||||
"family": family,
|
||||
"families": []
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def get_otio_clip_instance_data(self, otio_timeline, track_item):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
Args:
|
||||
timeline_item_data (dict): timeline_item_data from list returned by
|
||||
resolve.get_current_timeline_items()
|
||||
otio_timeline (otio.schema.Timeline): otio object
|
||||
|
||||
Returns:
|
||||
dict: otio clip object
|
||||
|
||||
"""
|
||||
ti_track_name = track_item.parent().name()
|
||||
timeline_range = self.create_otio_time_range_from_timeline_item_data(
|
||||
track_item)
|
||||
for otio_clip in otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if ti_track_name not in track_name:
|
||||
continue
|
||||
if otio_clip.name not in track_item.name():
|
||||
continue
|
||||
if openpype.lib.is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if phiero.pype_tag_name in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def clip_annotations(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.Annotation
|
||||
"""
|
||||
annotations = []
|
||||
subTrackItems = flatten(clip.subTrackItems())
|
||||
annotations += [item for item in subTrackItems if isinstance(
|
||||
item, hiero.core.Annotation)]
|
||||
return annotations
|
||||
def create_otio_time_range_from_timeline_item_data(track_item):
|
||||
timeline = phiero.get_current_sequence()
|
||||
frame_start = int(track_item.timelineIn())
|
||||
frame_duration = int(track_item.sourceDuration())
|
||||
fps = timeline.framerate().toFloat()
|
||||
|
||||
@staticmethod
|
||||
def clip_subtrack(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.SubTrackItem
|
||||
"""
|
||||
subtracks = []
|
||||
subTrackItems = flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
# avoid all anotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
# # avoid all not anaibled
|
||||
if not item.isEnabled():
|
||||
continue
|
||||
subtracks.append(item)
|
||||
return subtracks
|
||||
|
||||
@staticmethod
|
||||
def collect_sub_track_items(tracks):
|
||||
"""
|
||||
Returns dictionary with track index as key and list of subtracks
|
||||
"""
|
||||
# collect all subtrack items
|
||||
sub_track_items = dict()
|
||||
for track in tracks:
|
||||
items = track.items()
|
||||
|
||||
# skip if no clips on track > need track with effect only
|
||||
if items:
|
||||
continue
|
||||
|
||||
# skip all disabled tracks
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
track_index = track.trackIndex()
|
||||
_sub_track_items = flatten(track.subTrackItems())
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(_sub_track_items) < 1:
|
||||
continue
|
||||
|
||||
enabled_sti = list()
|
||||
# loop all found subtrack items and check if they are enabled
|
||||
for _sti in _sub_track_items:
|
||||
# checking if not enabled
|
||||
if not _sti.isEnabled():
|
||||
continue
|
||||
if isinstance(_sti, hiero.core.Annotation):
|
||||
continue
|
||||
# collect the subtrack item
|
||||
enabled_sti.append(_sti)
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(enabled_sti) < 1:
|
||||
continue
|
||||
|
||||
# add collection of subtrackitems to dict
|
||||
sub_track_items[track_index] = enabled_sti
|
||||
|
||||
return sub_track_items
|
||||
return hiero_export.create_otio_time_range(
|
||||
frame_start, frame_duration, fps)
|
||||
|
|
|
|||
|
|
@ -1,52 +1,57 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import hiero.ui
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from avalon import api as avalon
|
||||
from pprint import pformat
|
||||
from openpype.hosts.hiero.otio import hiero_export
|
||||
from Qt.QtGui import QPixmap
|
||||
import tempfile
|
||||
|
||||
|
||||
class PreCollectWorkfile(pyblish.api.ContextPlugin):
|
||||
class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
label = "Pre-collect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.51
|
||||
label = "Precollect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.6
|
||||
|
||||
def process(self, context):
|
||||
|
||||
asset = avalon.Session["AVALON_ASSET"]
|
||||
subset = "workfile"
|
||||
|
||||
project = phiero.get_current_project()
|
||||
active_sequence = phiero.get_current_sequence()
|
||||
video_tracks = active_sequence.videoTracks()
|
||||
audio_tracks = active_sequence.audioTracks()
|
||||
current_file = project.path()
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
base_name = os.path.basename(current_file)
|
||||
active_timeline = hiero.ui.activeSequence()
|
||||
fps = active_timeline.framerate().toFloat()
|
||||
|
||||
# get workfile's colorspace properties
|
||||
_clrs = {}
|
||||
_clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa
|
||||
_clrs["lutSetting16Bit"] = project.lutSetting16Bit()
|
||||
_clrs["lutSetting8Bit"] = project.lutSetting8Bit()
|
||||
_clrs["lutSettingFloat"] = project.lutSettingFloat()
|
||||
_clrs["lutSettingLog"] = project.lutSettingLog()
|
||||
_clrs["lutSettingViewer"] = project.lutSettingViewer()
|
||||
_clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
|
||||
_clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
|
||||
_clrs["ocioConfigName"] = project.ocioConfigName()
|
||||
_clrs["ocioConfigPath"] = project.ocioConfigPath()
|
||||
# adding otio timeline to context
|
||||
otio_timeline = hiero_export.create_otio_timeline()
|
||||
|
||||
# set main project attributes to context
|
||||
context.data["activeProject"] = project
|
||||
context.data["activeSequence"] = active_sequence
|
||||
context.data["videoTracks"] = video_tracks
|
||||
context.data["audioTracks"] = audio_tracks
|
||||
context.data["currentFile"] = current_file
|
||||
context.data["colorspace"] = _clrs
|
||||
# get workfile thumnail paths
|
||||
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
thumbnail_name = "workfile_thumbnail.png"
|
||||
thumbnail_path = os.path.join(tmp_staging, thumbnail_name)
|
||||
|
||||
self.log.info("currentFile: {}".format(current_file))
|
||||
# search for all windows with name of actual sequence
|
||||
_windows = [w for w in hiero.ui.windowManager().windows()
|
||||
if active_timeline.name() in w.windowTitle()]
|
||||
|
||||
# export window to thumb path
|
||||
QPixmap.grabWidget(_windows[-1]).save(thumbnail_path, 'png')
|
||||
|
||||
# thumbnail
|
||||
thumb_representation = {
|
||||
'files': thumbnail_name,
|
||||
'stagingDir': tmp_staging,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
|
||||
# get workfile paths
|
||||
curent_file = project.path()
|
||||
staging_dir, base_name = os.path.split(curent_file)
|
||||
|
||||
# creating workfile representation
|
||||
representation = {
|
||||
workfile_representation = {
|
||||
'name': 'hrox',
|
||||
'ext': 'hrox',
|
||||
'files': base_name,
|
||||
|
|
@ -59,16 +64,21 @@ class PreCollectWorkfile(pyblish.api.ContextPlugin):
|
|||
"subset": "{}{}".format(asset, subset.capitalize()),
|
||||
"item": project,
|
||||
"family": "workfile",
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _clrs
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"sourcePath": current_file,
|
||||
"representations": [representation]
|
||||
"representations": [workfile_representation, thumb_representation]
|
||||
}
|
||||
|
||||
# create instance with workfile
|
||||
instance = context.create_instance(**instance_data)
|
||||
|
||||
# update context with main project attributes
|
||||
context_data = {
|
||||
"activeProject": project,
|
||||
"otioTimeline": otio_timeline,
|
||||
"currentFile": curent_file,
|
||||
"fps": fps,
|
||||
}
|
||||
context.data.update(context_data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
|
||||
self.log.debug("__ context_data: {}".format(pformat(context_data)))
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ class CollectFrameRanges(pyblish.api.InstancePlugin):
|
|||
""" Collect all framranges.
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Frame Ranges"
|
||||
hosts = ["hiero"]
|
||||
families = ["clip", "effect"]
|
||||
|
|
@ -39,8 +39,8 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
|||
if not set(self.families).intersection(families):
|
||||
continue
|
||||
|
||||
# exclude if not masterLayer True
|
||||
if not instance.data.get("masterLayer"):
|
||||
# exclude if not heroTrack True
|
||||
if not instance.data.get("heroTrack"):
|
||||
continue
|
||||
|
||||
# update families to include `shot` for hierarchy integration
|
||||
|
|
@ -29,7 +29,7 @@ class CollectReview(api.InstancePlugin):
|
|||
Exception: description
|
||||
|
||||
"""
|
||||
review_track = instance.data.get("review")
|
||||
review_track = instance.data.get("reviewTrack")
|
||||
video_tracks = instance.context.data["videoTracks"]
|
||||
for track in video_tracks:
|
||||
if review_track not in track.name():
|
||||
|
|
@ -132,7 +132,7 @@ class ExtractReviewPreparation(openpype.api.Extractor):
|
|||
).format(**locals())
|
||||
|
||||
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
|
||||
audio_check_output = openpype.api.subprocess(ffprob_cmd)
|
||||
audio_check_output = openpype.api.run_subprocess(ffprob_cmd)
|
||||
self.log.debug(
|
||||
"audio_check_output: {}".format(audio_check_output))
|
||||
|
||||
|
|
@ -167,7 +167,7 @@ class ExtractReviewPreparation(openpype.api.Extractor):
|
|||
|
||||
# try to get video native resolution data
|
||||
try:
|
||||
resolution_output = openpype.api.subprocess((
|
||||
resolution_output = openpype.api.run_subprocess((
|
||||
"\"{ffprobe_path}\" -i \"{full_input_path}\""
|
||||
" -v error "
|
||||
"-select_streams v:0 -show_entries "
|
||||
|
|
@ -280,7 +280,7 @@ class ExtractReviewPreparation(openpype.api.Extractor):
|
|||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = openpype.api.subprocess(subprcs_cmd)
|
||||
output = openpype.api.run_subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_new = {
|
||||
|
|
@ -0,0 +1,223 @@
|
|||
from compiler.ast import flatten
|
||||
from pyblish import api
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
import hiero
|
||||
# from openpype.hosts.hiero.api import lib
|
||||
# reload(lib)
|
||||
# reload(phiero)
|
||||
|
||||
|
||||
class PreCollectInstances(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.CollectorOrder - 0.509
|
||||
label = "Pre-collect Instances"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
track_items = phiero.get_track_items(
|
||||
selected=True, check_tagged=True, check_enabled=True)
|
||||
# only return enabled track items
|
||||
if not track_items:
|
||||
track_items = phiero.get_track_items(
|
||||
check_enabled=True, check_tagged=True)
|
||||
# get sequence and video tracks
|
||||
sequence = context.data["activeSequence"]
|
||||
tracks = sequence.videoTracks()
|
||||
|
||||
# add collection to context
|
||||
tracks_effect_items = self.collect_sub_track_items(tracks)
|
||||
|
||||
context.data["tracksEffectItems"] = tracks_effect_items
|
||||
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(len(track_items)))
|
||||
|
||||
for _ti in track_items:
|
||||
data = {}
|
||||
clip = _ti.source()
|
||||
|
||||
# get clips subtracks and anotations
|
||||
annotations = self.clip_annotations(clip)
|
||||
subtracks = self.clip_subtrack(_ti)
|
||||
self.log.debug("Annotations: {}".format(annotations))
|
||||
self.log.debug(">> Subtracks: {}".format(subtracks))
|
||||
|
||||
# get pype tag data
|
||||
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
|
||||
# self.log.debug(pformat(tag_parsed_data))
|
||||
|
||||
if not tag_parsed_data:
|
||||
continue
|
||||
|
||||
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
# add tag data to instance data
|
||||
data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
asset = tag_parsed_data["asset"]
|
||||
subset = tag_parsed_data["subset"]
|
||||
review_track = tag_parsed_data.get("reviewTrack")
|
||||
hiero_track = tag_parsed_data.get("heroTrack")
|
||||
audio = tag_parsed_data.get("audio")
|
||||
|
||||
# remove audio attribute from data
|
||||
data.pop("audio")
|
||||
|
||||
# insert family into families
|
||||
family = tag_parsed_data["family"]
|
||||
families = [str(f) for f in tag_parsed_data["families"]]
|
||||
families.insert(0, str(family))
|
||||
|
||||
track = _ti.parent()
|
||||
media_source = _ti.source().mediaSource()
|
||||
source_path = media_source.firstpath()
|
||||
file_head = media_source.filenameHead()
|
||||
file_info = media_source.fileinfos().pop()
|
||||
source_first_frame = int(file_info.startFrame())
|
||||
|
||||
# apply only for review and master track instance
|
||||
if review_track and hiero_track:
|
||||
families += ["review", "ftrack"]
|
||||
|
||||
data.update({
|
||||
"name": "{} {} {}".format(asset, subset, families),
|
||||
"asset": asset,
|
||||
"item": _ti,
|
||||
"families": families,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
|
||||
# track item attributes
|
||||
"track": track.name(),
|
||||
"trackItem": track,
|
||||
"reviewTrack": review_track,
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _ti.sourceMediaColourTransform()
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"source": source_path,
|
||||
"sourceMedia": media_source,
|
||||
"sourcePath": source_path,
|
||||
"sourceFileHead": file_head,
|
||||
"sourceFirst": source_first_frame,
|
||||
|
||||
# clip's effect
|
||||
"clipEffectItems": subtracks
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
self.log.info("Creating instance.data: {}".format(instance.data))
|
||||
|
||||
if audio:
|
||||
a_data = dict()
|
||||
|
||||
# add tag data to instance data
|
||||
a_data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
# create main attributes
|
||||
subset = "audioMain"
|
||||
family = "audio"
|
||||
families = ["clip", "ftrack"]
|
||||
families.insert(0, str(family))
|
||||
|
||||
name = "{} {} {}".format(asset, subset, families)
|
||||
|
||||
a_data.update({
|
||||
"name": name,
|
||||
"subset": subset,
|
||||
"asset": asset,
|
||||
"family": family,
|
||||
"families": families,
|
||||
"item": _ti,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
})
|
||||
|
||||
a_instance = context.create_instance(**a_data)
|
||||
self.log.info("Creating audio instance: {}".format(a_instance))
|
||||
|
||||
@staticmethod
|
||||
def clip_annotations(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.Annotation
|
||||
"""
|
||||
annotations = []
|
||||
subTrackItems = flatten(clip.subTrackItems())
|
||||
annotations += [item for item in subTrackItems if isinstance(
|
||||
item, hiero.core.Annotation)]
|
||||
return annotations
|
||||
|
||||
@staticmethod
|
||||
def clip_subtrack(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.SubTrackItem
|
||||
"""
|
||||
subtracks = []
|
||||
subTrackItems = flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
# avoid all anotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
# # avoid all not anaibled
|
||||
if not item.isEnabled():
|
||||
continue
|
||||
subtracks.append(item)
|
||||
return subtracks
|
||||
|
||||
@staticmethod
|
||||
def collect_sub_track_items(tracks):
|
||||
"""
|
||||
Returns dictionary with track index as key and list of subtracks
|
||||
"""
|
||||
# collect all subtrack items
|
||||
sub_track_items = dict()
|
||||
for track in tracks:
|
||||
items = track.items()
|
||||
|
||||
# skip if no clips on track > need track with effect only
|
||||
if items:
|
||||
continue
|
||||
|
||||
# skip all disabled tracks
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
track_index = track.trackIndex()
|
||||
_sub_track_items = flatten(track.subTrackItems())
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(_sub_track_items) < 1:
|
||||
continue
|
||||
|
||||
enabled_sti = list()
|
||||
# loop all found subtrack items and check if they are enabled
|
||||
for _sti in _sub_track_items:
|
||||
# checking if not enabled
|
||||
if not _sti.isEnabled():
|
||||
continue
|
||||
if isinstance(_sti, hiero.core.Annotation):
|
||||
continue
|
||||
# collect the subtrack item
|
||||
enabled_sti.append(_sti)
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(enabled_sti) < 1:
|
||||
continue
|
||||
|
||||
# add collection of subtrackitems to dict
|
||||
sub_track_items[track_index] = enabled_sti
|
||||
|
||||
return sub_track_items
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from avalon import api as avalon
|
||||
|
||||
|
||||
class PreCollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
label = "Pre-collect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.51
|
||||
|
||||
def process(self, context):
|
||||
asset = avalon.Session["AVALON_ASSET"]
|
||||
subset = "workfile"
|
||||
|
||||
project = phiero.get_current_project()
|
||||
active_sequence = phiero.get_current_sequence()
|
||||
video_tracks = active_sequence.videoTracks()
|
||||
audio_tracks = active_sequence.audioTracks()
|
||||
current_file = project.path()
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
base_name = os.path.basename(current_file)
|
||||
|
||||
# get workfile's colorspace properties
|
||||
_clrs = {}
|
||||
_clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa
|
||||
_clrs["lutSetting16Bit"] = project.lutSetting16Bit()
|
||||
_clrs["lutSetting8Bit"] = project.lutSetting8Bit()
|
||||
_clrs["lutSettingFloat"] = project.lutSettingFloat()
|
||||
_clrs["lutSettingLog"] = project.lutSettingLog()
|
||||
_clrs["lutSettingViewer"] = project.lutSettingViewer()
|
||||
_clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
|
||||
_clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
|
||||
_clrs["ocioConfigName"] = project.ocioConfigName()
|
||||
_clrs["ocioConfigPath"] = project.ocioConfigPath()
|
||||
|
||||
# set main project attributes to context
|
||||
context.data["activeProject"] = project
|
||||
context.data["activeSequence"] = active_sequence
|
||||
context.data["videoTracks"] = video_tracks
|
||||
context.data["audioTracks"] = audio_tracks
|
||||
context.data["currentFile"] = current_file
|
||||
context.data["colorspace"] = _clrs
|
||||
|
||||
self.log.info("currentFile: {}".format(current_file))
|
||||
|
||||
# creating workfile representation
|
||||
representation = {
|
||||
'name': 'hrox',
|
||||
'ext': 'hrox',
|
||||
'files': base_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance_data = {
|
||||
"name": "{}_{}".format(asset, subset),
|
||||
"asset": asset,
|
||||
"subset": "{}{}".format(asset, subset.capitalize()),
|
||||
"item": project,
|
||||
"family": "workfile",
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _clrs
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"sourcePath": current_file,
|
||||
"representations": [representation]
|
||||
}
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
|
|
@ -1,338 +1,28 @@
|
|||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = "Daniel Flehner Heen"
|
||||
__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"]
|
||||
|
||||
import os
|
||||
import re
|
||||
import hiero.core
|
||||
from hiero.core import util
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
marker_color_map = {
|
||||
"magenta": otio.schema.MarkerColor.MAGENTA,
|
||||
"red": otio.schema.MarkerColor.RED,
|
||||
"yellow": otio.schema.MarkerColor.YELLOW,
|
||||
"green": otio.schema.MarkerColor.GREEN,
|
||||
"cyan": otio.schema.MarkerColor.CYAN,
|
||||
"blue": otio.schema.MarkerColor.BLUE,
|
||||
}
|
||||
|
||||
from openpype.hosts.hiero.otio import hiero_export
|
||||
|
||||
class OTIOExportTask(hiero.core.TaskBase):
|
||||
|
||||
def __init__(self, initDict):
|
||||
"""Initialize"""
|
||||
hiero.core.TaskBase.__init__(self, initDict)
|
||||
self.otio_timeline = None
|
||||
|
||||
def name(self):
|
||||
return str(type(self))
|
||||
|
||||
def get_rate(self, item):
|
||||
if not hasattr(item, 'framerate'):
|
||||
item = item.sequence()
|
||||
|
||||
num, den = item.framerate().toRational()
|
||||
rate = float(num) / float(den)
|
||||
|
||||
if rate.is_integer():
|
||||
return rate
|
||||
|
||||
return round(rate, 2)
|
||||
|
||||
def get_clip_ranges(self, trackitem):
|
||||
# Get rate from source or sequence
|
||||
if trackitem.source().mediaSource().hasVideo():
|
||||
rate_item = trackitem.source()
|
||||
|
||||
else:
|
||||
rate_item = trackitem.sequence()
|
||||
|
||||
source_rate = self.get_rate(rate_item)
|
||||
|
||||
# Reversed video/audio
|
||||
if trackitem.playbackSpeed() < 0:
|
||||
start = trackitem.sourceOut()
|
||||
|
||||
else:
|
||||
start = trackitem.sourceIn()
|
||||
|
||||
source_start_time = otio.opentime.RationalTime(
|
||||
start,
|
||||
source_rate
|
||||
)
|
||||
source_duration = otio.opentime.RationalTime(
|
||||
trackitem.duration(),
|
||||
source_rate
|
||||
)
|
||||
|
||||
source_range = otio.opentime.TimeRange(
|
||||
start_time=source_start_time,
|
||||
duration=source_duration
|
||||
)
|
||||
|
||||
hiero_clip = trackitem.source()
|
||||
|
||||
available_range = None
|
||||
if hiero_clip.mediaSource().isMediaPresent():
|
||||
start_time = otio.opentime.RationalTime(
|
||||
hiero_clip.mediaSource().startTime(),
|
||||
source_rate
|
||||
)
|
||||
duration = otio.opentime.RationalTime(
|
||||
hiero_clip.mediaSource().duration(),
|
||||
source_rate
|
||||
)
|
||||
available_range = otio.opentime.TimeRange(
|
||||
start_time=start_time,
|
||||
duration=duration
|
||||
)
|
||||
|
||||
return source_range, available_range
|
||||
|
||||
def add_gap(self, trackitem, otio_track, prev_out):
|
||||
gap_length = trackitem.timelineIn() - prev_out
|
||||
if prev_out != 0:
|
||||
gap_length -= 1
|
||||
|
||||
rate = self.get_rate(trackitem.sequence())
|
||||
gap = otio.opentime.TimeRange(
|
||||
duration=otio.opentime.RationalTime(
|
||||
gap_length,
|
||||
rate
|
||||
)
|
||||
)
|
||||
otio_gap = otio.schema.Gap(source_range=gap)
|
||||
otio_track.append(otio_gap)
|
||||
|
||||
def get_marker_color(self, tag):
|
||||
icon = tag.icon()
|
||||
pat = r'icons:Tag(?P<color>\w+)\.\w+'
|
||||
|
||||
res = re.search(pat, icon)
|
||||
if res:
|
||||
color = res.groupdict().get('color')
|
||||
if color.lower() in marker_color_map:
|
||||
return marker_color_map[color.lower()]
|
||||
|
||||
return otio.schema.MarkerColor.RED
|
||||
|
||||
def add_markers(self, hiero_item, otio_item):
|
||||
for tag in hiero_item.tags():
|
||||
if not tag.visible():
|
||||
continue
|
||||
|
||||
if tag.name() == 'Copy':
|
||||
# Hiero adds this tag to a lot of clips
|
||||
continue
|
||||
|
||||
frame_rate = self.get_rate(hiero_item)
|
||||
|
||||
marked_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
tag.inTime(),
|
||||
frame_rate
|
||||
),
|
||||
duration=otio.opentime.RationalTime(
|
||||
int(tag.metadata().dict().get('tag.length', '0')),
|
||||
frame_rate
|
||||
)
|
||||
)
|
||||
|
||||
metadata = dict(
|
||||
Hiero=tag.metadata().dict()
|
||||
)
|
||||
# Store the source item for future import assignment
|
||||
metadata['Hiero']['source_type'] = hiero_item.__class__.__name__
|
||||
|
||||
marker = otio.schema.Marker(
|
||||
name=tag.name(),
|
||||
color=self.get_marker_color(tag),
|
||||
marked_range=marked_range,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
otio_item.markers.append(marker)
|
||||
|
||||
def add_clip(self, trackitem, otio_track, itemindex):
|
||||
hiero_clip = trackitem.source()
|
||||
|
||||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
prev_item = trackitem
|
||||
|
||||
else:
|
||||
prev_item = trackitem.parent().items()[itemindex - 1]
|
||||
|
||||
clip_diff = trackitem.timelineIn() - prev_item.timelineOut()
|
||||
|
||||
if itemindex == 0 and trackitem.timelineIn() > 0:
|
||||
self.add_gap(trackitem, otio_track, 0)
|
||||
|
||||
elif itemindex and clip_diff != 1:
|
||||
self.add_gap(trackitem, otio_track, prev_item.timelineOut())
|
||||
|
||||
# Create Clip
|
||||
source_range, available_range = self.get_clip_ranges(trackitem)
|
||||
|
||||
otio_clip = otio.schema.Clip(
|
||||
name=trackitem.name(),
|
||||
source_range=source_range
|
||||
)
|
||||
|
||||
# Add media reference
|
||||
media_reference = otio.schema.MissingReference()
|
||||
if hiero_clip.mediaSource().isMediaPresent():
|
||||
source = hiero_clip.mediaSource()
|
||||
first_file = source.fileinfos()[0]
|
||||
path = first_file.filename()
|
||||
|
||||
if "%" in path:
|
||||
path = re.sub(r"%\d+d", "%d", path)
|
||||
if "#" in path:
|
||||
path = re.sub(r"#+", "%d", path)
|
||||
|
||||
media_reference = otio.schema.ExternalReference(
|
||||
target_url=u'{}'.format(path),
|
||||
available_range=available_range
|
||||
)
|
||||
|
||||
otio_clip.media_reference = media_reference
|
||||
|
||||
# Add Time Effects
|
||||
playbackspeed = trackitem.playbackSpeed()
|
||||
if playbackspeed != 1:
|
||||
if playbackspeed == 0:
|
||||
time_effect = otio.schema.FreezeFrame()
|
||||
|
||||
else:
|
||||
time_effect = otio.schema.LinearTimeWarp(
|
||||
time_scalar=playbackspeed
|
||||
)
|
||||
otio_clip.effects.append(time_effect)
|
||||
|
||||
# Add tags as markers
|
||||
if self._preset.properties()["includeTags"]:
|
||||
self.add_markers(trackitem, otio_clip)
|
||||
self.add_markers(trackitem.source(), otio_clip)
|
||||
|
||||
otio_track.append(otio_clip)
|
||||
|
||||
# Add Transition if needed
|
||||
if trackitem.inTransition() or trackitem.outTransition():
|
||||
self.add_transition(trackitem, otio_track)
|
||||
|
||||
def add_transition(self, trackitem, otio_track):
|
||||
transitions = []
|
||||
|
||||
if trackitem.inTransition():
|
||||
if trackitem.inTransition().alignment().name == 'kFadeIn':
|
||||
transitions.append(trackitem.inTransition())
|
||||
|
||||
if trackitem.outTransition():
|
||||
transitions.append(trackitem.outTransition())
|
||||
|
||||
for transition in transitions:
|
||||
alignment = transition.alignment().name
|
||||
|
||||
if alignment == 'kFadeIn':
|
||||
in_offset_frames = 0
|
||||
out_offset_frames = (
|
||||
transition.timelineOut() - transition.timelineIn()
|
||||
) + 1
|
||||
|
||||
elif alignment == 'kFadeOut':
|
||||
in_offset_frames = (
|
||||
trackitem.timelineOut() - transition.timelineIn()
|
||||
) + 1
|
||||
out_offset_frames = 0
|
||||
|
||||
elif alignment == 'kDissolve':
|
||||
in_offset_frames = (
|
||||
transition.inTrackItem().timelineOut() -
|
||||
transition.timelineIn()
|
||||
)
|
||||
out_offset_frames = (
|
||||
transition.timelineOut() -
|
||||
transition.outTrackItem().timelineIn()
|
||||
)
|
||||
|
||||
else:
|
||||
# kUnknown transition is ignored
|
||||
continue
|
||||
|
||||
rate = trackitem.source().framerate().toFloat()
|
||||
in_time = otio.opentime.RationalTime(in_offset_frames, rate)
|
||||
out_time = otio.opentime.RationalTime(out_offset_frames, rate)
|
||||
|
||||
otio_transition = otio.schema.Transition(
|
||||
name=alignment, # Consider placing Hiero name in metadata
|
||||
transition_type=otio.schema.TransitionTypes.SMPTE_Dissolve,
|
||||
in_offset=in_time,
|
||||
out_offset=out_time
|
||||
)
|
||||
|
||||
if alignment == 'kFadeIn':
|
||||
otio_track.insert(-1, otio_transition)
|
||||
|
||||
else:
|
||||
otio_track.append(otio_transition)
|
||||
|
||||
|
||||
def add_tracks(self):
|
||||
for track in self._sequence.items():
|
||||
if isinstance(track, hiero.core.AudioTrack):
|
||||
kind = otio.schema.TrackKind.Audio
|
||||
|
||||
else:
|
||||
kind = otio.schema.TrackKind.Video
|
||||
|
||||
otio_track = otio.schema.Track(name=track.name(), kind=kind)
|
||||
|
||||
for itemindex, trackitem in enumerate(track):
|
||||
if isinstance(trackitem.source(), hiero.core.Clip):
|
||||
self.add_clip(trackitem, otio_track, itemindex)
|
||||
|
||||
self.otio_timeline.tracks.append(otio_track)
|
||||
|
||||
# Add tags as markers
|
||||
if self._preset.properties()["includeTags"]:
|
||||
self.add_markers(self._sequence, self.otio_timeline.tracks)
|
||||
|
||||
def create_OTIO(self):
|
||||
self.otio_timeline = otio.schema.Timeline()
|
||||
|
||||
# Set global start time based on sequence
|
||||
self.otio_timeline.global_start_time = otio.opentime.RationalTime(
|
||||
self._sequence.timecodeStart(),
|
||||
self._sequence.framerate().toFloat()
|
||||
)
|
||||
self.otio_timeline.name = self._sequence.name()
|
||||
|
||||
self.add_tracks()
|
||||
|
||||
def startTask(self):
|
||||
self.create_OTIO()
|
||||
self.otio_timeline = hiero_export.create_otio_timeline()
|
||||
|
||||
def taskStep(self):
|
||||
return False
|
||||
|
|
@ -350,7 +40,7 @@ class OTIOExportTask(hiero.core.TaskBase):
|
|||
util.filesystem.makeDirs(dirname)
|
||||
|
||||
# write otio file
|
||||
otio.adapters.write_to_file(self.otio_timeline, exportPath)
|
||||
hiero_export.write_to_file(self.otio_timeline, exportPath)
|
||||
|
||||
# Catch all exceptions and log error
|
||||
except Exception as e:
|
||||
|
|
@ -370,7 +60,7 @@ class OTIOExportPreset(hiero.core.TaskPresetBase):
|
|||
"""Initialise presets to default values"""
|
||||
hiero.core.TaskPresetBase.__init__(self, OTIOExportTask, name)
|
||||
|
||||
self.properties()["includeTags"] = True
|
||||
self.properties()["includeTags"] = hiero_export.include_tags = True
|
||||
self.properties().update(properties)
|
||||
|
||||
def supportedItems(self):
|
||||
|
|
|
|||
|
|
@ -1,3 +1,9 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = "Daniel Flehner Heen"
|
||||
__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"]
|
||||
|
||||
import hiero.ui
|
||||
import OTIOExportTask
|
||||
|
||||
|
|
@ -14,6 +20,7 @@ except ImportError:
|
|||
|
||||
FormLayout = QFormLayout # lint:ok
|
||||
|
||||
from openpype.hosts.hiero.otio import hiero_export
|
||||
|
||||
class OTIOExportUI(hiero.ui.TaskUIBase):
|
||||
def __init__(self, preset):
|
||||
|
|
@ -27,7 +34,7 @@ class OTIOExportUI(hiero.ui.TaskUIBase):
|
|||
|
||||
def includeMarkersCheckboxChanged(self, state):
|
||||
# Slot to handle change of checkbox state
|
||||
self._preset.properties()["includeTags"] = state == QtCore.Qt.Checked
|
||||
hiero_export.include_tags = state == QtCore.Qt.Checked
|
||||
|
||||
def populateUI(self, widget, exportTemplate):
|
||||
layout = widget.layout()
|
||||
|
|
|
|||
|
|
@ -1,25 +1,3 @@
|
|||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
from OTIOExportTask import OTIOExportTask
|
||||
from OTIOExportUI import OTIOExportUI
|
||||
|
||||
|
|
|
|||
|
|
@ -1,42 +1,91 @@
|
|||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = "Daniel Flehner Heen"
|
||||
__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"]
|
||||
|
||||
import hiero.ui
|
||||
import hiero.core
|
||||
|
||||
from otioimporter.OTIOImport import load_otio
|
||||
import PySide2.QtWidgets as qw
|
||||
|
||||
from openpype.hosts.hiero.otio.hiero_import import load_otio
|
||||
|
||||
|
||||
class OTIOProjectSelect(qw.QDialog):
|
||||
|
||||
def __init__(self, projects, *args, **kwargs):
|
||||
super(OTIOProjectSelect, self).__init__(*args, **kwargs)
|
||||
self.setWindowTitle('Please select active project')
|
||||
self.layout = qw.QVBoxLayout()
|
||||
|
||||
self.label = qw.QLabel(
|
||||
'Unable to determine which project to import sequence to.\n'
|
||||
'Please select one.'
|
||||
)
|
||||
self.layout.addWidget(self.label)
|
||||
|
||||
self.projects = qw.QComboBox()
|
||||
self.projects.addItems(map(lambda p: p.name(), projects))
|
||||
self.layout.addWidget(self.projects)
|
||||
|
||||
QBtn = qw.QDialogButtonBox.Ok | qw.QDialogButtonBox.Cancel
|
||||
self.buttonBox = qw.QDialogButtonBox(QBtn)
|
||||
self.buttonBox.accepted.connect(self.accept)
|
||||
self.buttonBox.rejected.connect(self.reject)
|
||||
|
||||
self.layout.addWidget(self.buttonBox)
|
||||
self.setLayout(self.layout)
|
||||
|
||||
|
||||
def get_sequence(view):
|
||||
sequence = None
|
||||
if isinstance(view, hiero.ui.TimelineEditor):
|
||||
sequence = view.sequence()
|
||||
|
||||
elif isinstance(view, hiero.ui.BinView):
|
||||
for item in view.selection():
|
||||
if not hasattr(item, 'acitveItem'):
|
||||
continue
|
||||
|
||||
if isinstance(item.activeItem(), hiero.core.Sequence):
|
||||
sequence = item.activeItem()
|
||||
|
||||
return sequence
|
||||
|
||||
|
||||
def OTIO_menu_action(event):
|
||||
otio_action = hiero.ui.createMenuAction(
|
||||
'Import OTIO',
|
||||
# Menu actions
|
||||
otio_import_action = hiero.ui.createMenuAction(
|
||||
'Import OTIO...',
|
||||
open_otio_file,
|
||||
icon=None
|
||||
)
|
||||
hiero.ui.registerAction(otio_action)
|
||||
|
||||
otio_add_track_action = hiero.ui.createMenuAction(
|
||||
'New Track(s) from OTIO...',
|
||||
open_otio_file,
|
||||
icon=None
|
||||
)
|
||||
otio_add_track_action.setEnabled(False)
|
||||
|
||||
hiero.ui.registerAction(otio_import_action)
|
||||
hiero.ui.registerAction(otio_add_track_action)
|
||||
|
||||
view = hiero.ui.currentContextMenuView()
|
||||
|
||||
if view:
|
||||
sequence = get_sequence(view)
|
||||
if sequence:
|
||||
otio_add_track_action.setEnabled(True)
|
||||
|
||||
for action in event.menu.actions():
|
||||
if action.text() == 'Import':
|
||||
action.menu().addAction(otio_action)
|
||||
break
|
||||
action.menu().addAction(otio_import_action)
|
||||
action.menu().addAction(otio_add_track_action)
|
||||
|
||||
elif action.text() == 'New Track':
|
||||
action.menu().addAction(otio_add_track_action)
|
||||
|
||||
|
||||
def open_otio_file():
|
||||
|
|
@ -45,8 +94,39 @@ def open_otio_file():
|
|||
pattern='*.otio',
|
||||
requiredExtension='.otio'
|
||||
)
|
||||
|
||||
selection = None
|
||||
sequence = None
|
||||
|
||||
view = hiero.ui.currentContextMenuView()
|
||||
if view:
|
||||
sequence = get_sequence(view)
|
||||
selection = view.selection()
|
||||
|
||||
if sequence:
|
||||
project = sequence.project()
|
||||
|
||||
elif selection:
|
||||
project = selection[0].project()
|
||||
|
||||
elif len(hiero.core.projects()) > 1:
|
||||
dialog = OTIOProjectSelect(hiero.core.projects())
|
||||
if dialog.exec_():
|
||||
project = hiero.core.projects()[dialog.projects.currentIndex()]
|
||||
|
||||
else:
|
||||
bar = hiero.ui.mainWindow().statusBar()
|
||||
bar.showMessage(
|
||||
'OTIO Import aborted by user',
|
||||
timeout=3000
|
||||
)
|
||||
return
|
||||
|
||||
else:
|
||||
project = hiero.core.projects()[-1]
|
||||
|
||||
for otio_file in files:
|
||||
load_otio(otio_file)
|
||||
load_otio(otio_file, project, sequence)
|
||||
|
||||
|
||||
# HieroPlayer is quite limited and can't create transitions etc.
|
||||
|
|
@ -55,3 +135,7 @@ if not hiero.core.isHieroPlayer():
|
|||
"kShowContextMenu/kBin",
|
||||
OTIO_menu_action
|
||||
)
|
||||
hiero.core.events.registerInterest(
|
||||
"kShowContextMenu/kTimeline",
|
||||
OTIO_menu_action
|
||||
)
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ def validate_fps():
|
|||
|
||||
if current_fps != fps:
|
||||
|
||||
from ...widgets import popup
|
||||
from openpype.widgets import popup
|
||||
|
||||
# Find main window
|
||||
parent = hou.ui.mainQtWindow()
|
||||
|
|
@ -219,8 +219,8 @@ def validate_fps():
|
|||
else:
|
||||
dialog = popup.Popup2(parent=parent)
|
||||
dialog.setModal(True)
|
||||
dialog.setWindowTitle("Maya scene not in line with project")
|
||||
dialog.setMessage("The FPS is out of sync, please fix")
|
||||
dialog.setWindowTitle("Houdini scene not in line with project")
|
||||
dialog.setMessage("The FPS is out of sync, please fix it")
|
||||
|
||||
# Set new text for button (add optional argument for the popup?)
|
||||
toggle = dialog.widgets["toggle"]
|
||||
|
|
|
|||
|
|
@ -1872,7 +1872,7 @@ def set_context_settings():
|
|||
|
||||
# Set project fps
|
||||
fps = asset_data.get("fps", project_data.get("fps", 25))
|
||||
api.Session["AVALON_FPS"] = fps
|
||||
api.Session["AVALON_FPS"] = str(fps)
|
||||
set_scene_fps(fps)
|
||||
|
||||
# Set project resolution
|
||||
|
|
|
|||
|
|
@ -348,6 +348,13 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
history = []
|
||||
for material in materials:
|
||||
history.extend(cmds.listHistory(material))
|
||||
|
||||
# handle VrayPluginNodeMtl node - see #1397
|
||||
vray_plugin_nodes = cmds.ls(
|
||||
history, type="VRayPluginNodeMtl", long=True)
|
||||
for vray_node in vray_plugin_nodes:
|
||||
history.extend(cmds.listHistory(vray_node))
|
||||
|
||||
files = cmds.ls(history, type="file", long=True)
|
||||
files.extend(cmds.ls(history, type="aiImage", long=True))
|
||||
|
||||
|
|
|
|||
|
|
@ -358,9 +358,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
options["extendFrames"] = extend_frames
|
||||
options["overrideExistingFrame"] = override_frames
|
||||
|
||||
maya_render_plugin = "MayaPype"
|
||||
if attributes.get("useMayaBatch", True):
|
||||
maya_render_plugin = "MayaBatch"
|
||||
maya_render_plugin = "MayaBatch"
|
||||
|
||||
options["mayaRenderPlugin"] = maya_render_plugin
|
||||
|
||||
|
|
|
|||
|
|
@ -74,6 +74,8 @@ class ExtractRedshiftProxy(openpype.api.Extractor):
|
|||
'files': repr_files,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
if anim_on:
|
||||
representation["frameStart"] = instance.data["proxyFrameStart"]
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s"
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ print("starting OpenPype usersetup")
|
|||
settings = get_project_settings(os.environ['AVALON_PROJECT'])
|
||||
shelf_preset = settings['maya'].get('project_shelf')
|
||||
|
||||
|
||||
if shelf_preset:
|
||||
project = os.environ["AVALON_PROJECT"]
|
||||
|
||||
|
|
@ -23,7 +22,7 @@ if shelf_preset:
|
|||
print(import_string)
|
||||
exec(import_string)
|
||||
|
||||
cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)")
|
||||
cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)")
|
||||
|
||||
|
||||
print("finished OpenPype usersetup")
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ def on_pyblish_instance_toggled(instance, old_value, new_value):
|
|||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
from avalon.api.nuke import (
|
||||
from avalon.nuke import (
|
||||
viewer_update_and_undo_stop,
|
||||
add_publish_knob
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
import six
|
||||
import platform
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
|
|
@ -19,7 +21,6 @@ from openpype.api import (
|
|||
get_hierarchy,
|
||||
get_asset,
|
||||
get_current_project_settings,
|
||||
config,
|
||||
ApplicationManager
|
||||
)
|
||||
|
||||
|
|
@ -29,36 +30,34 @@ from .utils import set_context_favorites
|
|||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._project = None
|
||||
self.workfiles_launched = False
|
||||
self._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon")
|
||||
opnl = sys.modules[__name__]
|
||||
opnl._project = None
|
||||
opnl.project_name = os.getenv("AVALON_PROJECT")
|
||||
opnl.workfiles_launched = False
|
||||
opnl._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon")
|
||||
|
||||
|
||||
def get_node_imageio_setting(**kwarg):
|
||||
def get_created_node_imageio_setting(**kwarg):
|
||||
''' Get preset data for dataflow (fileType, compression, bitDepth)
|
||||
'''
|
||||
log.info(kwarg)
|
||||
host = str(kwarg.get("host", "nuke"))
|
||||
log.debug(kwarg)
|
||||
nodeclass = kwarg.get("nodeclass", None)
|
||||
creator = kwarg.get("creator", None)
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
|
||||
assert any([host, nodeclass]), nuke.message(
|
||||
assert any([creator, nodeclass]), nuke.message(
|
||||
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
|
||||
|
||||
imageio_nodes = (get_anatomy_settings(project_name)
|
||||
["imageio"]
|
||||
.get(host, None)
|
||||
["nodes"]
|
||||
["requiredNodes"]
|
||||
)
|
||||
imageio = get_anatomy_settings(opnl.project_name)["imageio"]
|
||||
imageio_nodes = imageio["nuke"]["nodes"]["requiredNodes"]
|
||||
|
||||
imageio_node = None
|
||||
for node in imageio_nodes:
|
||||
log.info(node)
|
||||
if node["nukeNodeClass"] == nodeclass:
|
||||
if creator in node["plugins"]:
|
||||
imageio_node = node
|
||||
if (node["nukeNodeClass"] != nodeclass) and (
|
||||
creator not in node["plugins"]):
|
||||
continue
|
||||
|
||||
imageio_node = node
|
||||
|
||||
log.info("ImageIO node: {}".format(imageio_node))
|
||||
return imageio_node
|
||||
|
|
@ -67,12 +66,9 @@ def get_node_imageio_setting(**kwarg):
|
|||
def get_imageio_input_colorspace(filename):
|
||||
''' Get input file colorspace based on regex in settings.
|
||||
'''
|
||||
imageio_regex_inputs = (get_anatomy_settings(os.getenv("AVALON_PROJECT"))
|
||||
["imageio"]
|
||||
["nuke"]
|
||||
["regexInputs"]
|
||||
["inputs"]
|
||||
)
|
||||
imageio_regex_inputs = (
|
||||
get_anatomy_settings(opnl.project_name)
|
||||
["imageio"]["nuke"]["regexInputs"]["inputs"])
|
||||
|
||||
preset_clrsp = None
|
||||
for regexInput in imageio_regex_inputs:
|
||||
|
|
@ -104,40 +100,39 @@ def check_inventory_versions():
|
|||
"""
|
||||
# get all Loader nodes by avalon attribute metadata
|
||||
for each in nuke.allNodes():
|
||||
if each.Class() == 'Read':
|
||||
container = avalon.nuke.parse_container(each)
|
||||
container = avalon.nuke.parse_container(each)
|
||||
|
||||
if container:
|
||||
node = nuke.toNode(container["objectName"])
|
||||
avalon_knob_data = avalon.nuke.read(
|
||||
node)
|
||||
if container:
|
||||
node = nuke.toNode(container["objectName"])
|
||||
avalon_knob_data = avalon.nuke.read(
|
||||
node)
|
||||
|
||||
# get representation from io
|
||||
representation = io.find_one({
|
||||
"type": "representation",
|
||||
"_id": io.ObjectId(avalon_knob_data["representation"])
|
||||
})
|
||||
# get representation from io
|
||||
representation = io.find_one({
|
||||
"type": "representation",
|
||||
"_id": io.ObjectId(avalon_knob_data["representation"])
|
||||
})
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
max_version = max(versions)
|
||||
|
||||
# check the available version and do match
|
||||
# change color of node if not max verion
|
||||
if version.get("name") not in [max_version]:
|
||||
node["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
# check the available version and do match
|
||||
# change color of node if not max verion
|
||||
if version.get("name") not in [max_version]:
|
||||
node["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
node["tile_color"].setValue(int("0x4ecd25ff", 16))
|
||||
|
||||
|
||||
def writes_version_sync():
|
||||
|
|
@ -153,34 +148,33 @@ def writes_version_sync():
|
|||
except Exception:
|
||||
return
|
||||
|
||||
for each in nuke.allNodes():
|
||||
if each.Class() == 'Write':
|
||||
# check if the node is avalon tracked
|
||||
if self._node_tab_name not in each.knobs():
|
||||
for each in nuke.allNodes(filter="Write"):
|
||||
# check if the node is avalon tracked
|
||||
if opnl._node_tab_name not in each.knobs():
|
||||
continue
|
||||
|
||||
avalon_knob_data = avalon.nuke.read(
|
||||
each)
|
||||
|
||||
try:
|
||||
if avalon_knob_data['families'] not in ["render"]:
|
||||
log.debug(avalon_knob_data['families'])
|
||||
continue
|
||||
|
||||
avalon_knob_data = avalon.nuke.read(
|
||||
each)
|
||||
node_file = each['file'].value()
|
||||
|
||||
try:
|
||||
if avalon_knob_data['families'] not in ["render"]:
|
||||
log.debug(avalon_knob_data['families'])
|
||||
continue
|
||||
node_version = "v" + get_version_from_path(node_file)
|
||||
log.debug("node_version: {}".format(node_version))
|
||||
|
||||
node_file = each['file'].value()
|
||||
|
||||
node_version = "v" + get_version_from_path(node_file)
|
||||
log.debug("node_version: {}".format(node_version))
|
||||
|
||||
node_new_file = node_file.replace(node_version, new_version)
|
||||
each['file'].setValue(node_new_file)
|
||||
if not os.path.isdir(os.path.dirname(node_new_file)):
|
||||
log.warning("Path does not exist! I am creating it.")
|
||||
os.makedirs(os.path.dirname(node_new_file))
|
||||
except Exception as e:
|
||||
log.warning(
|
||||
"Write node: `{}` has no version in path: {}".format(
|
||||
each.name(), e))
|
||||
node_new_file = node_file.replace(node_version, new_version)
|
||||
each['file'].setValue(node_new_file)
|
||||
if not os.path.isdir(os.path.dirname(node_new_file)):
|
||||
log.warning("Path does not exist! I am creating it.")
|
||||
os.makedirs(os.path.dirname(node_new_file))
|
||||
except Exception as e:
|
||||
log.warning(
|
||||
"Write node: `{}` has no version in path: {}".format(
|
||||
each.name(), e))
|
||||
|
||||
|
||||
def version_up_script():
|
||||
|
|
@ -201,24 +195,22 @@ def check_subsetname_exists(nodes, subset_name):
|
|||
Returns:
|
||||
bool: True of False
|
||||
"""
|
||||
result = next((True for n in nodes
|
||||
if subset_name in avalon.nuke.read(n).get("subset", "")), False)
|
||||
return result
|
||||
return next((True for n in nodes
|
||||
if subset_name in avalon.nuke.read(n).get("subset", "")),
|
||||
False)
|
||||
|
||||
|
||||
def get_render_path(node):
|
||||
''' Generate Render path from presets regarding avalon knob data
|
||||
'''
|
||||
data = dict()
|
||||
data['avalon'] = avalon.nuke.read(
|
||||
node)
|
||||
|
||||
data = {'avalon': avalon.nuke.read(node)}
|
||||
data_preset = {
|
||||
"class": data['avalon']['family'],
|
||||
"preset": data['avalon']['families']
|
||||
"nodeclass": data['avalon']['family'],
|
||||
"families": [data['avalon']['families']],
|
||||
"creator": data['avalon']['creator']
|
||||
}
|
||||
|
||||
nuke_imageio_writes = get_node_imageio_setting(**data_preset)
|
||||
nuke_imageio_writes = get_created_node_imageio_setting(**data_preset)
|
||||
|
||||
application = lib.get_application(os.environ["AVALON_APP_NAME"])
|
||||
data.update({
|
||||
|
|
@ -324,7 +316,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
|
|||
node (obj): group node with avalon data as Knobs
|
||||
'''
|
||||
|
||||
imageio_writes = get_node_imageio_setting(**data)
|
||||
imageio_writes = get_created_node_imageio_setting(**data)
|
||||
app_manager = ApplicationManager()
|
||||
app_name = os.environ.get("AVALON_APP_NAME")
|
||||
if app_name:
|
||||
|
|
@ -367,8 +359,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
|
|||
# adding dataflow template
|
||||
log.debug("imageio_writes: `{}`".format(imageio_writes))
|
||||
for knob in imageio_writes["knobs"]:
|
||||
if knob["name"] not in ["_id", "_previous"]:
|
||||
_data.update({knob["name"]: knob["value"]})
|
||||
_data.update({knob["name"]: knob["value"]})
|
||||
|
||||
_data = anlib.fix_data_for_node_create(_data)
|
||||
|
||||
|
|
@ -506,7 +497,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True):
|
|||
add_deadline_tab(GN)
|
||||
|
||||
# open the our Tab as default
|
||||
GN[self._node_tab_name].setFlag(0)
|
||||
GN[opnl._node_tab_name].setFlag(0)
|
||||
|
||||
# set tile color
|
||||
tile_color = _data.get("tile_color", "0xff0000ff")
|
||||
|
|
@ -629,7 +620,7 @@ class WorkfileSettings(object):
|
|||
root_node=None,
|
||||
nodes=None,
|
||||
**kwargs):
|
||||
self._project = kwargs.get(
|
||||
opnl._project = kwargs.get(
|
||||
"project") or io.find_one({"type": "project"})
|
||||
self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"]
|
||||
self._asset_entity = get_asset(self._asset)
|
||||
|
|
@ -672,7 +663,7 @@ class WorkfileSettings(object):
|
|||
]
|
||||
|
||||
erased_viewers = []
|
||||
for v in [n for n in nuke.allNodes(filter="Viewer")]:
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
|
||||
if str(viewer_dict["viewerProcess"]) \
|
||||
not in v['viewerProcess'].value():
|
||||
|
|
@ -716,7 +707,7 @@ class WorkfileSettings(object):
|
|||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
log.debug(">> root_dict: {}".format(root_dict))
|
||||
log.warning(">> root_dict: {}".format(root_dict))
|
||||
|
||||
# first set OCIO
|
||||
if self._root_node["colorManagement"].value() \
|
||||
|
|
@ -738,41 +729,41 @@ class WorkfileSettings(object):
|
|||
|
||||
# third set ocio custom path
|
||||
if root_dict.get("customOCIOConfigPath"):
|
||||
self._root_node["customOCIOConfigPath"].setValue(
|
||||
str(root_dict["customOCIOConfigPath"]).format(
|
||||
**os.environ
|
||||
).replace("\\", "/")
|
||||
)
|
||||
log.debug("nuke.root()['{}'] changed to: {}".format(
|
||||
"customOCIOConfigPath", root_dict["customOCIOConfigPath"]))
|
||||
root_dict.pop("customOCIOConfigPath")
|
||||
unresolved_path = root_dict["customOCIOConfigPath"]
|
||||
ocio_paths = unresolved_path[platform.system().lower()]
|
||||
|
||||
resolved_path = None
|
||||
for ocio_p in ocio_paths:
|
||||
resolved_path = str(ocio_p).format(**os.environ)
|
||||
if not os.path.exists(resolved_path):
|
||||
continue
|
||||
|
||||
if resolved_path:
|
||||
self._root_node["customOCIOConfigPath"].setValue(
|
||||
str(resolved_path).replace("\\", "/")
|
||||
)
|
||||
log.debug("nuke.root()['{}'] changed to: {}".format(
|
||||
"customOCIOConfigPath", resolved_path))
|
||||
root_dict.pop("customOCIOConfigPath")
|
||||
|
||||
# then set the rest
|
||||
for knob, value in root_dict.items():
|
||||
# skip unfilled ocio config path
|
||||
# it will be dict in value
|
||||
if isinstance(value, dict):
|
||||
continue
|
||||
if self._root_node[knob].value() not in value:
|
||||
self._root_node[knob].setValue(str(value))
|
||||
log.debug("nuke.root()['{}'] changed to: {}".format(
|
||||
knob, value))
|
||||
|
||||
def set_writes_colorspace(self, write_dict):
|
||||
def set_writes_colorspace(self):
|
||||
''' Adds correct colorspace to write node dict
|
||||
|
||||
Arguments:
|
||||
write_dict (dict): nuke write node as dictionary
|
||||
|
||||
'''
|
||||
# scene will have fixed colorspace following presets for the project
|
||||
if not isinstance(write_dict, dict):
|
||||
msg = "set_root_colorspace(): argument should be dictionary"
|
||||
log.error(msg)
|
||||
return
|
||||
|
||||
from avalon.nuke import read
|
||||
|
||||
for node in nuke.allNodes():
|
||||
|
||||
if node.Class() in ["Viewer", "Dot"]:
|
||||
continue
|
||||
for node in nuke.allNodes(filter="Group"):
|
||||
|
||||
# get data from avalon knob
|
||||
avalon_knob_data = read(node)
|
||||
|
|
@ -788,49 +779,63 @@ class WorkfileSettings(object):
|
|||
if avalon_knob_data.get("families"):
|
||||
families.append(avalon_knob_data.get("families"))
|
||||
|
||||
# except disabled nodes but exclude backdrops in test
|
||||
for fmly, knob in write_dict.items():
|
||||
write = None
|
||||
if (fmly in families):
|
||||
# Add all nodes in group instances.
|
||||
if node.Class() == "Group":
|
||||
node.begin()
|
||||
for x in nuke.allNodes():
|
||||
if x.Class() == "Write":
|
||||
write = x
|
||||
node.end()
|
||||
elif node.Class() == "Write":
|
||||
write = node
|
||||
else:
|
||||
log.warning("Wrong write node Class")
|
||||
data_preset = {
|
||||
"nodeclass": avalon_knob_data["family"],
|
||||
"families": families,
|
||||
"creator": avalon_knob_data['creator']
|
||||
}
|
||||
|
||||
write["colorspace"].setValue(str(knob["colorspace"]))
|
||||
log.info(
|
||||
"Setting `{0}` to `{1}`".format(
|
||||
write.name(),
|
||||
knob["colorspace"]))
|
||||
nuke_imageio_writes = get_created_node_imageio_setting(
|
||||
**data_preset)
|
||||
|
||||
def set_reads_colorspace(self, reads):
|
||||
log.debug("nuke_imageio_writes: `{}`".format(nuke_imageio_writes))
|
||||
|
||||
if not nuke_imageio_writes:
|
||||
return
|
||||
|
||||
write_node = None
|
||||
|
||||
# get into the group node
|
||||
node.begin()
|
||||
for x in nuke.allNodes():
|
||||
if x.Class() == "Write":
|
||||
write_node = x
|
||||
node.end()
|
||||
|
||||
if not write_node:
|
||||
return
|
||||
|
||||
# write all knobs to node
|
||||
for knob in nuke_imageio_writes["knobs"]:
|
||||
value = knob["value"]
|
||||
if isinstance(value, six.text_type):
|
||||
value = str(value)
|
||||
if str(value).startswith("0x"):
|
||||
value = int(value, 16)
|
||||
|
||||
write_node[knob["name"]].setValue(value)
|
||||
|
||||
|
||||
def set_reads_colorspace(self, read_clrs_inputs):
|
||||
""" Setting colorspace to Read nodes
|
||||
|
||||
Looping trought all read nodes and tries to set colorspace based
|
||||
on regex rules in presets
|
||||
"""
|
||||
changes = dict()
|
||||
changes = {}
|
||||
for n in nuke.allNodes():
|
||||
file = nuke.filename(n)
|
||||
if not n.Class() == "Read":
|
||||
if n.Class() != "Read":
|
||||
continue
|
||||
|
||||
# load nuke presets for Read's colorspace
|
||||
read_clrs_presets = config.get_init_presets()["colorspace"].get(
|
||||
"nuke", {}).get("read", {})
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
preset_clrsp = next((read_clrs_presets[k]
|
||||
for k in read_clrs_presets
|
||||
if bool(re.search(k, file))),
|
||||
None)
|
||||
preset_clrsp = None
|
||||
|
||||
for input in read_clrs_inputs:
|
||||
if not bool(re.search(input["regex"], file)):
|
||||
continue
|
||||
preset_clrsp = input["colorspace"]
|
||||
|
||||
log.debug(preset_clrsp)
|
||||
if preset_clrsp is not None:
|
||||
current = n["colorspace"].value()
|
||||
|
|
@ -864,13 +869,15 @@ class WorkfileSettings(object):
|
|||
def set_colorspace(self):
|
||||
''' Setting colorpace following presets
|
||||
'''
|
||||
nuke_colorspace = config.get_init_presets(
|
||||
)["colorspace"].get("nuke", None)
|
||||
# get imageio
|
||||
imageio = get_anatomy_settings(opnl.project_name)["imageio"]
|
||||
nuke_colorspace = imageio["nuke"]
|
||||
|
||||
try:
|
||||
self.set_root_colorspace(nuke_colorspace["root"])
|
||||
self.set_root_colorspace(nuke_colorspace["workfile"])
|
||||
except AttributeError:
|
||||
msg = "set_colorspace(): missing `root` settings in template"
|
||||
msg = "set_colorspace(): missing `workfile` settings in template"
|
||||
nuke.message(msg)
|
||||
|
||||
try:
|
||||
self.set_viewers_colorspace(nuke_colorspace["viewer"])
|
||||
|
|
@ -880,15 +887,14 @@ class WorkfileSettings(object):
|
|||
log.error(msg)
|
||||
|
||||
try:
|
||||
self.set_writes_colorspace(nuke_colorspace["write"])
|
||||
except AttributeError:
|
||||
msg = "set_colorspace(): missing `write` settings in template"
|
||||
nuke.message(msg)
|
||||
log.error(msg)
|
||||
self.set_writes_colorspace()
|
||||
except AttributeError as _error:
|
||||
nuke.message(_error)
|
||||
log.error(_error)
|
||||
|
||||
reads = nuke_colorspace.get("read")
|
||||
if reads:
|
||||
self.set_reads_colorspace(reads)
|
||||
read_clrs_inputs = nuke_colorspace["regexInputs"].get("inputs", [])
|
||||
if read_clrs_inputs:
|
||||
self.set_reads_colorspace(read_clrs_inputs)
|
||||
|
||||
try:
|
||||
for key in nuke_colorspace:
|
||||
|
|
@ -1070,15 +1076,14 @@ class WorkfileSettings(object):
|
|||
def set_favorites(self):
|
||||
work_dir = os.getenv("AVALON_WORKDIR")
|
||||
asset = os.getenv("AVALON_ASSET")
|
||||
project = os.getenv("AVALON_PROJECT")
|
||||
favorite_items = OrderedDict()
|
||||
|
||||
# project
|
||||
# get project's root and split to parts
|
||||
projects_root = os.path.normpath(work_dir.split(
|
||||
project)[0])
|
||||
opnl.project_name)[0])
|
||||
# add project name
|
||||
project_dir = os.path.join(projects_root, project) + "/"
|
||||
project_dir = os.path.join(projects_root, opnl.project_name) + "/"
|
||||
# add to favorites
|
||||
favorite_items.update({"Project dir": project_dir.replace("\\", "/")})
|
||||
|
||||
|
|
@ -1128,13 +1133,13 @@ def get_write_node_template_attr(node):
|
|||
data['avalon'] = avalon.nuke.read(
|
||||
node)
|
||||
data_preset = {
|
||||
"class": data['avalon']['family'],
|
||||
"families": data['avalon']['families'],
|
||||
"preset": data['avalon']['families'] # omit < 2.0.0v
|
||||
"nodeclass": data['avalon']['family'],
|
||||
"families": [data['avalon']['families']],
|
||||
"creator": data['avalon']['creator']
|
||||
}
|
||||
|
||||
# get template data
|
||||
nuke_imageio_writes = get_node_imageio_setting(**data_preset)
|
||||
nuke_imageio_writes = get_created_node_imageio_setting(**data_preset)
|
||||
|
||||
# collecting correct data
|
||||
correct_data = OrderedDict({
|
||||
|
|
@ -1230,8 +1235,7 @@ class ExporterReview:
|
|||
"""
|
||||
anlib.reset_selection()
|
||||
ipn_orig = None
|
||||
for v in [n for n in nuke.allNodes()
|
||||
if "Viewer" == n.Class()]:
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
ip = v['input_process'].getValue()
|
||||
ipn = v['input_process_node'].getValue()
|
||||
if "VIEWER_INPUT" not in ipn and ip:
|
||||
|
|
@ -1644,8 +1648,8 @@ def launch_workfiles_app():
|
|||
if not open_at_start:
|
||||
return
|
||||
|
||||
if not self.workfiles_launched:
|
||||
self.workfiles_launched = True
|
||||
if not opnl.workfiles_launched:
|
||||
opnl.workfiles_launched = True
|
||||
workfiles.show(os.environ["AVALON_WORKDIR"])
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -26,9 +26,9 @@ def install():
|
|||
menu.addCommand(
|
||||
name,
|
||||
workfiles.show,
|
||||
index=(rm_item[0])
|
||||
index=2
|
||||
)
|
||||
|
||||
menu.addSeparator(index=3)
|
||||
# replace reset resolution from avalon core to pype's
|
||||
name = "Reset Resolution"
|
||||
new_name = "Set Resolution"
|
||||
|
|
@ -63,16 +63,7 @@ def install():
|
|||
# add colorspace menu item
|
||||
name = "Set Colorspace"
|
||||
menu.addCommand(
|
||||
name, lambda: WorkfileSettings().set_colorspace(),
|
||||
index=(rm_item[0] + 2)
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
# add workfile builder menu item
|
||||
name = "Build Workfile"
|
||||
menu.addCommand(
|
||||
name, lambda: BuildWorkfile().process(),
|
||||
index=(rm_item[0] + 7)
|
||||
name, lambda: WorkfileSettings().set_colorspace()
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
|
|
@ -80,11 +71,20 @@ def install():
|
|||
name = "Apply All Settings"
|
||||
menu.addCommand(
|
||||
name,
|
||||
lambda: WorkfileSettings().set_context_settings(),
|
||||
index=(rm_item[0] + 3)
|
||||
lambda: WorkfileSettings().set_context_settings()
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
menu.addSeparator()
|
||||
|
||||
# add workfile builder menu item
|
||||
name = "Build Workfile"
|
||||
menu.addCommand(
|
||||
name, lambda: BuildWorkfile().process()
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
|
||||
# adding shortcuts
|
||||
add_shortcuts_from_presets()
|
||||
|
||||
|
|
|
|||
|
|
@ -77,10 +77,14 @@ class CreateWritePrerender(plugin.PypeCreator):
|
|||
write_data = {
|
||||
"nodeclass": self.n_class,
|
||||
"families": [self.family],
|
||||
"avalon": self.data,
|
||||
"creator": self.__class__.__name__
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
# add creator data
|
||||
creator_data = {"creator": self.__class__.__name__}
|
||||
self.data.update(creator_data)
|
||||
write_data.update(creator_data)
|
||||
|
||||
if self.presets.get('fpath_template'):
|
||||
self.log.info("Adding template path from preset")
|
||||
write_data.update(
|
||||
|
|
|
|||
|
|
@ -80,10 +80,14 @@ class CreateWriteRender(plugin.PypeCreator):
|
|||
write_data = {
|
||||
"nodeclass": self.n_class,
|
||||
"families": [self.family],
|
||||
"avalon": self.data,
|
||||
"creator": self.__class__.__name__
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
# add creator data
|
||||
creator_data = {"creator": self.__class__.__name__}
|
||||
self.data.update(creator_data)
|
||||
write_data.update(creator_data)
|
||||
|
||||
if self.presets.get('fpath_template'):
|
||||
self.log.info("Adding template path from preset")
|
||||
write_data.update(
|
||||
|
|
|
|||
|
|
@ -135,12 +135,14 @@ class LoadMov(api.Loader):
|
|||
|
||||
read_name = self.node_name_template.format(**name_data)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
read_node = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name)
|
||||
)
|
||||
|
||||
# to avoid multiple undo steps for rest of process
|
||||
# we will switch off undo-ing
|
||||
with viewer_update_and_undo_stop():
|
||||
read_node = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name)
|
||||
)
|
||||
read_node["file"].setValue(file)
|
||||
|
||||
read_node["origfirst"].setValue(first)
|
||||
|
|
|
|||
|
|
@ -139,11 +139,15 @@ class LoadSequence(api.Loader):
|
|||
read_name = self.node_name_template.format(**name_data)
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
|
||||
# TODO: it might be universal read to img/geo/camera
|
||||
r = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name))
|
||||
|
||||
# to avoid multiple undo steps for rest of process
|
||||
# we will switch off undo-ing
|
||||
with viewer_update_and_undo_stop():
|
||||
# TODO: it might be universal read to img/geo/camera
|
||||
r = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name))
|
||||
r["file"].setValue(file)
|
||||
|
||||
# Set colorspace defined in version data
|
||||
|
|
|
|||
|
|
@ -34,7 +34,8 @@ class CollectSlate(pyblish.api.InstancePlugin):
|
|||
if slate_node:
|
||||
instance.data["slateNode"] = slate_node
|
||||
instance.data["families"].append("slate")
|
||||
instance.data["versionData"]["families"].append("slate")
|
||||
self.log.info(
|
||||
"Slate node is in node graph: `{}`".format(slate.name()))
|
||||
self.log.debug(
|
||||
"__ instance: `{}`".format(instance))
|
||||
"__ instance.data: `{}`".format(instance.data))
|
||||
|
|
|
|||
|
|
@ -77,8 +77,9 @@ def set_context_settings(asset_doc=None):
|
|||
handle_start = handles
|
||||
handle_end = handles
|
||||
|
||||
frame_start -= int(handle_start)
|
||||
frame_end += int(handle_end)
|
||||
# Always start from 0 Mark In and set only Mark Out
|
||||
mark_in = 0
|
||||
mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end
|
||||
|
||||
execute_george("tv_markin {} set".format(frame_start - 1))
|
||||
execute_george("tv_markout {} set".format(frame_end - 1))
|
||||
execute_george("tv_markin {} set".format(mark_in))
|
||||
execute_george("tv_markout {} set".format(mark_out))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,37 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectOutputFrameRange(pyblish.api.ContextPlugin):
|
||||
"""Collect frame start/end from context.
|
||||
|
||||
When instances are collected context does not contain `frameStart` and
|
||||
`frameEnd` keys yet. They are collected in global plugin
|
||||
`CollectAvalonEntities`.
|
||||
"""
|
||||
label = "Collect output frame range"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
for instance in context:
|
||||
frame_start = instance.data.get("frameStart")
|
||||
frame_end = instance.data.get("frameEnd")
|
||||
if frame_start is not None and frame_end is not None:
|
||||
self.log.debug(
|
||||
"Instance {} already has set frames {}-{}".format(
|
||||
str(instance), frame_start, frame_end
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
frame_start = context.data.get("frameStart")
|
||||
frame_end = context.data.get("frameEnd")
|
||||
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
|
||||
self.log.info(
|
||||
"Set frames {}-{} on instance {} ".format(
|
||||
frame_start, frame_end, str(instance)
|
||||
)
|
||||
)
|
||||
|
|
@ -86,9 +86,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
instance.data["publish"] = any_visible
|
||||
|
||||
instance.data["frameStart"] = context.data["sceneMarkIn"] + 1
|
||||
instance.data["frameEnd"] = context.data["sceneMarkOut"] + 1
|
||||
|
||||
self.log.debug("Created instance: {}\n{}".format(
|
||||
instance, json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
import os
|
||||
import shutil
|
||||
import time
|
||||
import tempfile
|
||||
import multiprocessing
|
||||
|
||||
import pyblish.api
|
||||
from avalon.tvpaint import lib
|
||||
|
|
@ -45,10 +43,64 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
)
|
||||
|
||||
family_lowered = instance.data["family"].lower()
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
mark_in = instance.context.data["sceneMarkIn"]
|
||||
mark_out = instance.context.data["sceneMarkOut"]
|
||||
# Frame start/end may be stored as float
|
||||
frame_start = int(instance.data["frameStart"])
|
||||
frame_end = int(instance.data["frameEnd"])
|
||||
|
||||
filename_template = self._get_filename_template(frame_end)
|
||||
# Handles are not stored per instance but on Context
|
||||
handle_start = instance.context.data["handleStart"]
|
||||
handle_end = instance.context.data["handleEnd"]
|
||||
|
||||
# --- Fallbacks ----------------------------------------------------
|
||||
# This is required if validations of ranges are ignored.
|
||||
# - all of this code won't change processing if range to render
|
||||
# match to range of expected output
|
||||
|
||||
# Prepare output frames
|
||||
output_frame_start = frame_start - handle_start
|
||||
output_frame_end = frame_end + handle_end
|
||||
|
||||
# Change output frame start to 0 if handles cause it's negative number
|
||||
if output_frame_start < 0:
|
||||
self.log.warning((
|
||||
"Frame start with handles has negative value."
|
||||
" Changed to \"0\". Frames start: {}, Handle Start: {}"
|
||||
).format(frame_start, handle_start))
|
||||
output_frame_start = 0
|
||||
|
||||
# Check Marks range and output range
|
||||
output_range = output_frame_end - output_frame_start
|
||||
marks_range = mark_out - mark_in
|
||||
|
||||
# Lower Mark Out if mark range is bigger than output
|
||||
# - do not rendered not used frames
|
||||
if output_range < marks_range:
|
||||
new_mark_out = mark_out - (marks_range - output_range)
|
||||
self.log.warning((
|
||||
"Lowering render range to {} frames. Changed Mark Out {} -> {}"
|
||||
).format(marks_range + 1, mark_out, new_mark_out))
|
||||
# Assign new mark out to variable
|
||||
mark_out = new_mark_out
|
||||
|
||||
# Lower output frame end so representation has right `frameEnd` value
|
||||
elif output_range > marks_range:
|
||||
new_output_frame_end = (
|
||||
output_frame_end - (output_range - marks_range)
|
||||
)
|
||||
self.log.warning((
|
||||
"Lowering representation range to {} frames."
|
||||
" Changed frame end {} -> {}"
|
||||
).format(output_range + 1, mark_out, new_mark_out))
|
||||
output_frame_end = new_output_frame_end
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
filename_template = self._get_filename_template(
|
||||
# Use the biggest number
|
||||
max(mark_out, frame_end)
|
||||
)
|
||||
ext = os.path.splitext(filename_template)[1].replace(".", "")
|
||||
|
||||
self.log.debug("Using file template \"{}\"".format(filename_template))
|
||||
|
|
@ -57,7 +109,9 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
output_dir = instance.data.get("stagingDir")
|
||||
if not output_dir:
|
||||
# Create temp folder if staging dir is not set
|
||||
output_dir = tempfile.mkdtemp().replace("\\", "/")
|
||||
output_dir = (
|
||||
tempfile.mkdtemp(prefix="tvpaint_render_")
|
||||
).replace("\\", "/")
|
||||
instance.data["stagingDir"] = output_dir
|
||||
|
||||
self.log.debug(
|
||||
|
|
@ -65,23 +119,36 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
)
|
||||
|
||||
if instance.data["family"] == "review":
|
||||
repre_files, thumbnail_fullpath = self.render_review(
|
||||
filename_template, output_dir, frame_start, frame_end
|
||||
output_filenames, thumbnail_fullpath = self.render_review(
|
||||
filename_template, output_dir, mark_in, mark_out
|
||||
)
|
||||
else:
|
||||
# Render output
|
||||
repre_files, thumbnail_fullpath = self.render(
|
||||
filename_template, output_dir, frame_start, frame_end,
|
||||
output_filenames, thumbnail_fullpath = self.render(
|
||||
filename_template, output_dir,
|
||||
mark_in, mark_out,
|
||||
filtered_layers
|
||||
)
|
||||
|
||||
# Sequence of one frame
|
||||
if not output_filenames:
|
||||
self.log.warning("Extractor did not create any output.")
|
||||
return
|
||||
|
||||
repre_files = self._rename_output_files(
|
||||
filename_template, output_dir,
|
||||
mark_in, mark_out,
|
||||
output_frame_start, output_frame_end
|
||||
)
|
||||
|
||||
# Fill tags and new families
|
||||
tags = []
|
||||
if family_lowered in ("review", "renderlayer"):
|
||||
tags.append("review")
|
||||
|
||||
# Sequence of one frame
|
||||
if len(repre_files) == 1:
|
||||
single_file = len(repre_files) == 1
|
||||
if single_file:
|
||||
repre_files = repre_files[0]
|
||||
|
||||
new_repre = {
|
||||
|
|
@ -89,10 +156,13 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
"ext": ext,
|
||||
"files": repre_files,
|
||||
"stagingDir": output_dir,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"tags": tags
|
||||
}
|
||||
|
||||
if not single_file:
|
||||
new_repre["frameStart"] = output_frame_start
|
||||
new_repre["frameEnd"] = output_frame_end
|
||||
|
||||
self.log.debug("Creating new representation: {}".format(new_repre))
|
||||
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
|
@ -133,9 +203,45 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
return "{{frame:0>{}}}".format(frame_padding) + ".png"
|
||||
|
||||
def render_review(
|
||||
self, filename_template, output_dir, frame_start, frame_end
|
||||
def _rename_output_files(
|
||||
self, filename_template, output_dir,
|
||||
mark_in, mark_out, output_frame_start, output_frame_end
|
||||
):
|
||||
# Use differnet ranges based on Mark In and output Frame Start values
|
||||
# - this is to make sure that filename renaming won't affect files that
|
||||
# are not renamed yet
|
||||
mark_start_is_less = bool(mark_in < output_frame_start)
|
||||
if mark_start_is_less:
|
||||
marks_range = range(mark_out, mark_in - 1, -1)
|
||||
frames_range = range(output_frame_end, output_frame_start - 1, -1)
|
||||
else:
|
||||
# This is less possible situation as frame start will be in most
|
||||
# cases higher than Mark In.
|
||||
marks_range = range(mark_in, mark_out + 1)
|
||||
frames_range = range(output_frame_start, output_frame_end + 1)
|
||||
|
||||
repre_filepaths = []
|
||||
for mark, frame in zip(marks_range, frames_range):
|
||||
new_filename = filename_template.format(frame=frame)
|
||||
new_filepath = os.path.join(output_dir, new_filename)
|
||||
|
||||
repre_filepaths.append(new_filepath)
|
||||
|
||||
if mark != frame:
|
||||
old_filename = filename_template.format(frame=mark)
|
||||
old_filepath = os.path.join(output_dir, old_filename)
|
||||
os.rename(old_filepath, new_filepath)
|
||||
|
||||
# Reverse repre files order if output
|
||||
if mark_start_is_less:
|
||||
repre_filepaths = list(reversed(repre_filepaths))
|
||||
|
||||
return [
|
||||
os.path.basename(path)
|
||||
for path in repre_filepaths
|
||||
]
|
||||
|
||||
def render_review(self, filename_template, output_dir, mark_in, mark_out):
|
||||
""" Export images from TVPaint using `tv_savesequence` command.
|
||||
|
||||
Args:
|
||||
|
|
@ -144,8 +250,8 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
keyword argument `{frame}` or index argument (for same value).
|
||||
Extension in template must match `save_mode`.
|
||||
output_dir (str): Directory where files will be stored.
|
||||
first_frame (int): Starting frame from which export will begin.
|
||||
last_frame (int): On which frame export will end.
|
||||
mark_in (int): Starting frame index from which export will begin.
|
||||
mark_out (int): On which frame index export will end.
|
||||
|
||||
Retruns:
|
||||
tuple: With 2 items first is list of filenames second is path to
|
||||
|
|
@ -154,10 +260,8 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
self.log.debug("Preparing data for rendering.")
|
||||
first_frame_filepath = os.path.join(
|
||||
output_dir,
|
||||
filename_template.format(frame=frame_start)
|
||||
filename_template.format(frame=mark_in)
|
||||
)
|
||||
mark_in = frame_start - 1
|
||||
mark_out = frame_end - 1
|
||||
|
||||
george_script_lines = [
|
||||
"tv_SaveMode \"PNG\"",
|
||||
|
|
@ -170,13 +274,22 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
]
|
||||
lib.execute_george_through_file("\n".join(george_script_lines))
|
||||
|
||||
output = []
|
||||
first_frame_filepath = None
|
||||
for frame in range(frame_start, frame_end + 1):
|
||||
output_filenames = []
|
||||
for frame in range(mark_in, mark_out + 1):
|
||||
filename = filename_template.format(frame=frame)
|
||||
output.append(filename)
|
||||
output_filenames.append(filename)
|
||||
|
||||
filepath = os.path.join(output_dir, filename)
|
||||
if not os.path.exists(filepath):
|
||||
raise AssertionError(
|
||||
"Output was not rendered. File was not found {}".format(
|
||||
filepath
|
||||
)
|
||||
)
|
||||
|
||||
if first_frame_filepath is None:
|
||||
first_frame_filepath = os.path.join(output_dir, filename)
|
||||
first_frame_filepath = filepath
|
||||
|
||||
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
|
||||
if first_frame_filepath and os.path.exists(first_frame_filepath):
|
||||
|
|
@ -184,11 +297,10 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
thumbnail_obj = Image.new("RGB", source_img.size, (255, 255, 255))
|
||||
thumbnail_obj.paste(source_img)
|
||||
thumbnail_obj.save(thumbnail_filepath)
|
||||
return output, thumbnail_filepath
|
||||
|
||||
def render(
|
||||
self, filename_template, output_dir, frame_start, frame_end, layers
|
||||
):
|
||||
return output_filenames, thumbnail_filepath
|
||||
|
||||
def render(self, filename_template, output_dir, mark_in, mark_out, layers):
|
||||
""" Export images from TVPaint.
|
||||
|
||||
Args:
|
||||
|
|
@ -197,8 +309,8 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
keyword argument `{frame}` or index argument (for same value).
|
||||
Extension in template must match `save_mode`.
|
||||
output_dir (str): Directory where files will be stored.
|
||||
first_frame (int): Starting frame from which export will begin.
|
||||
last_frame (int): On which frame export will end.
|
||||
mark_in (int): Starting frame index from which export will begin.
|
||||
mark_out (int): On which frame index export will end.
|
||||
layers (list): List of layers to be exported.
|
||||
|
||||
Retruns:
|
||||
|
|
@ -219,14 +331,11 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
# Sort layer positions in reverse order
|
||||
sorted_positions = list(reversed(sorted(layers_by_position.keys())))
|
||||
if not sorted_positions:
|
||||
return
|
||||
return [], None
|
||||
|
||||
self.log.debug("Collecting pre/post behavior of individual layers.")
|
||||
behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids)
|
||||
|
||||
mark_in_index = frame_start - 1
|
||||
mark_out_index = frame_end - 1
|
||||
|
||||
tmp_filename_template = "pos_{pos}." + filename_template
|
||||
|
||||
files_by_position = {}
|
||||
|
|
@ -239,25 +348,47 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
tmp_filename_template,
|
||||
output_dir,
|
||||
behavior,
|
||||
mark_in_index,
|
||||
mark_out_index
|
||||
mark_in,
|
||||
mark_out
|
||||
)
|
||||
files_by_position[position] = files_by_frames
|
||||
if files_by_frames:
|
||||
files_by_position[position] = files_by_frames
|
||||
else:
|
||||
self.log.warning((
|
||||
"Skipped layer \"{}\". Probably out of Mark In/Out range."
|
||||
).format(layer["name"]))
|
||||
|
||||
if not files_by_position:
|
||||
layer_names = set(layer["name"] for layer in layers)
|
||||
joined_names = ", ".join(
|
||||
["\"{}\"".format(name) for name in layer_names]
|
||||
)
|
||||
self.log.warning(
|
||||
"Layers {} do not have content in range {} - {}".format(
|
||||
joined_names, mark_in, mark_out
|
||||
)
|
||||
)
|
||||
return [], None
|
||||
|
||||
output_filepaths = self._composite_files(
|
||||
files_by_position,
|
||||
mark_in_index,
|
||||
mark_out_index,
|
||||
mark_in,
|
||||
mark_out,
|
||||
filename_template,
|
||||
output_dir
|
||||
)
|
||||
self._cleanup_tmp_files(files_by_position)
|
||||
|
||||
thumbnail_src_filepath = None
|
||||
thumbnail_filepath = None
|
||||
if output_filepaths:
|
||||
thumbnail_src_filepath = tuple(sorted(output_filepaths))[0]
|
||||
output_filenames = [
|
||||
os.path.basename(filepath)
|
||||
for filepath in output_filepaths
|
||||
]
|
||||
|
||||
thumbnail_src_filepath = None
|
||||
if output_filepaths:
|
||||
thumbnail_src_filepath = output_filepaths[0]
|
||||
|
||||
thumbnail_filepath = None
|
||||
if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath):
|
||||
source_img = Image.open(thumbnail_src_filepath)
|
||||
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
|
||||
|
|
@ -265,11 +396,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
thumbnail_obj.paste(source_img)
|
||||
thumbnail_obj.save(thumbnail_filepath)
|
||||
|
||||
repre_files = [
|
||||
os.path.basename(path)
|
||||
for path in output_filepaths
|
||||
]
|
||||
return repre_files, thumbnail_filepath
|
||||
return output_filenames, thumbnail_filepath
|
||||
|
||||
def _render_layer(
|
||||
self,
|
||||
|
|
@ -283,6 +410,22 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
layer_id = layer["layer_id"]
|
||||
frame_start_index = layer["frame_start"]
|
||||
frame_end_index = layer["frame_end"]
|
||||
|
||||
pre_behavior = behavior["pre"]
|
||||
post_behavior = behavior["post"]
|
||||
|
||||
# Check if layer is before mark in
|
||||
if frame_end_index < mark_in_index:
|
||||
# Skip layer if post behavior is "none"
|
||||
if post_behavior == "none":
|
||||
return {}
|
||||
|
||||
# Check if layer is after mark out
|
||||
elif frame_start_index > mark_out_index:
|
||||
# Skip layer if pre behavior is "none"
|
||||
if pre_behavior == "none":
|
||||
return {}
|
||||
|
||||
exposure_frames = lib.get_exposure_frames(
|
||||
layer_id, frame_start_index, frame_end_index
|
||||
)
|
||||
|
|
@ -341,8 +484,6 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
self.log.debug("Filled frames {}".format(str(_debug_filled_frames)))
|
||||
|
||||
# Fill frames by pre/post behavior of layer
|
||||
pre_behavior = behavior["pre"]
|
||||
post_behavior = behavior["post"]
|
||||
self.log.debug((
|
||||
"Completing image sequence of layer by pre/post behavior."
|
||||
" PRE: {} | POST: {}"
|
||||
|
|
@ -530,17 +671,12 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
filepath = position_data[frame_idx]
|
||||
images_by_frame[frame_idx].append(filepath)
|
||||
|
||||
process_count = os.cpu_count()
|
||||
if process_count > 1:
|
||||
process_count -= 1
|
||||
|
||||
processes = {}
|
||||
output_filepaths = []
|
||||
missing_frame_paths = []
|
||||
random_frame_path = None
|
||||
for frame_idx in sorted(images_by_frame.keys()):
|
||||
image_filepaths = images_by_frame[frame_idx]
|
||||
output_filename = filename_template.format(frame=frame_idx + 1)
|
||||
output_filename = filename_template.format(frame=frame_idx)
|
||||
output_filepath = os.path.join(output_dir, output_filename)
|
||||
output_filepaths.append(output_filepath)
|
||||
|
||||
|
|
@ -553,45 +689,15 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
if len(image_filepaths) == 1:
|
||||
os.rename(image_filepaths[0], output_filepath)
|
||||
|
||||
# Prepare process for compositing of images
|
||||
# Composite images
|
||||
else:
|
||||
processes[frame_idx] = multiprocessing.Process(
|
||||
target=composite_images,
|
||||
args=(image_filepaths, output_filepath)
|
||||
)
|
||||
composite_images(image_filepaths, output_filepath)
|
||||
|
||||
# Store path of random output image that will 100% exist after all
|
||||
# multiprocessing as mockup for missing frames
|
||||
if random_frame_path is None:
|
||||
random_frame_path = output_filepath
|
||||
|
||||
self.log.info(
|
||||
"Running {} compositing processes - this mey take a while.".format(
|
||||
len(processes)
|
||||
)
|
||||
)
|
||||
# Wait until all compositing processes are done
|
||||
running_processes = {}
|
||||
while True:
|
||||
for idx in tuple(running_processes.keys()):
|
||||
process = running_processes[idx]
|
||||
if not process.is_alive():
|
||||
running_processes.pop(idx).join()
|
||||
|
||||
if processes and len(running_processes) != process_count:
|
||||
indexes = list(processes.keys())
|
||||
for _ in range(process_count - len(running_processes)):
|
||||
if not indexes:
|
||||
break
|
||||
idx = indexes.pop(0)
|
||||
running_processes[idx] = processes.pop(idx)
|
||||
running_processes[idx].start()
|
||||
|
||||
if not running_processes and not processes:
|
||||
break
|
||||
|
||||
time.sleep(0.01)
|
||||
|
||||
self.log.debug(
|
||||
"Creating transparent images for frames without render {}.".format(
|
||||
str(missing_frame_paths)
|
||||
|
|
|
|||
|
|
@ -14,37 +14,54 @@ class ValidateMarksRepair(pyblish.api.Action):
|
|||
def process(self, context, plugin):
|
||||
expected_data = ValidateMarks.get_expected_data(context)
|
||||
|
||||
expected_data["markIn"] -= 1
|
||||
expected_data["markOut"] -= 1
|
||||
|
||||
lib.execute_george("tv_markin {} set".format(expected_data["markIn"]))
|
||||
lib.execute_george(
|
||||
"tv_markin {} set".format(expected_data["markIn"])
|
||||
)
|
||||
lib.execute_george(
|
||||
"tv_markout {} set".format(expected_data["markOut"])
|
||||
)
|
||||
|
||||
|
||||
class ValidateMarks(pyblish.api.ContextPlugin):
|
||||
"""Validate mark in and out are enabled."""
|
||||
"""Validate mark in and out are enabled and it's duration.
|
||||
|
||||
label = "Validate Marks"
|
||||
Mark In/Out does not have to match frameStart and frameEnd but duration is
|
||||
important.
|
||||
"""
|
||||
|
||||
label = "Validate Mark In/Out"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
actions = [ValidateMarksRepair]
|
||||
|
||||
@staticmethod
|
||||
def get_expected_data(context):
|
||||
scene_mark_in = context.data["sceneMarkIn"]
|
||||
|
||||
# Data collected in `CollectAvalonEntities`
|
||||
frame_end = context.data["frameEnd"]
|
||||
frame_start = context.data["frameStart"]
|
||||
handle_start = context.data["handleStart"]
|
||||
handle_end = context.data["handleEnd"]
|
||||
|
||||
# Calculate expeted Mark out (Mark In + duration - 1)
|
||||
expected_mark_out = (
|
||||
scene_mark_in
|
||||
+ (frame_end - frame_start)
|
||||
+ handle_start + handle_end
|
||||
)
|
||||
return {
|
||||
"markIn": int(context.data["frameStart"]),
|
||||
"markIn": scene_mark_in,
|
||||
"markInState": True,
|
||||
"markOut": int(context.data["frameEnd"]),
|
||||
"markOut": expected_mark_out,
|
||||
"markOutState": True
|
||||
}
|
||||
|
||||
def process(self, context):
|
||||
current_data = {
|
||||
"markIn": context.data["sceneMarkIn"] + 1,
|
||||
"markIn": context.data["sceneMarkIn"],
|
||||
"markInState": context.data["sceneMarkInState"],
|
||||
"markOut": context.data["sceneMarkOut"] + 1,
|
||||
"markOut": context.data["sceneMarkOut"],
|
||||
"markOutState": context.data["sceneMarkOutState"]
|
||||
}
|
||||
expected_data = self.get_expected_data(context)
|
||||
|
|
|
|||
162
openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py
Normal file
162
openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
import os
|
||||
|
||||
from avalon import api, pipeline
|
||||
from avalon.unreal import lib
|
||||
from avalon.unreal import pipeline as unreal_pipeline
|
||||
import unreal
|
||||
|
||||
|
||||
class PointCacheAlembicLoader(api.Loader):
|
||||
"""Load Point Cache from Alembic"""
|
||||
|
||||
families = ["model", "pointcache"]
|
||||
label = "Import Alembic Point Cache"
|
||||
representations = ["abc"]
|
||||
icon = "cube"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Load and containerise representation into Content Browser.
|
||||
|
||||
This is two step process. First, import FBX to temporary path and
|
||||
then call `containerise()` on it - this moves all content to new
|
||||
directory and then it will create AssetContainer there and imprint it
|
||||
with metadata. This will mark this path as container.
|
||||
|
||||
Args:
|
||||
context (dict): application context
|
||||
name (str): subset name
|
||||
namespace (str): in Unreal this is basically path to container.
|
||||
This is not passed here, so namespace is set
|
||||
by `containerise()` because only then we know
|
||||
real path.
|
||||
data (dict): Those would be data to be imprinted. This is not used
|
||||
now, data are imprinted by `containerise()`.
|
||||
|
||||
Returns:
|
||||
list(str): list of container content
|
||||
"""
|
||||
|
||||
# Create directory for asset and avalon container
|
||||
root = "/Game/Avalon/Assets"
|
||||
asset = context.get('asset').get('name')
|
||||
suffix = "_CON"
|
||||
if asset:
|
||||
asset_name = "{}_{}".format(asset, name)
|
||||
else:
|
||||
asset_name = "{}".format(name)
|
||||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
asset_dir, container_name = tools.create_unique_asset_name(
|
||||
"{}/{}/{}".format(root, asset, name), suffix="")
|
||||
|
||||
container_name += suffix
|
||||
|
||||
unreal.EditorAssetLibrary.make_directory(asset_dir)
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
|
||||
task.set_editor_property('filename', self.fname)
|
||||
task.set_editor_property('destination_path', asset_dir)
|
||||
task.set_editor_property('destination_name', asset_name)
|
||||
task.set_editor_property('replace_existing', False)
|
||||
task.set_editor_property('automated', True)
|
||||
task.set_editor_property('save', True)
|
||||
|
||||
# set import options here
|
||||
# Unreal 4.24 ignores the settings. It works with Unreal 4.26
|
||||
options = unreal.AbcImportSettings()
|
||||
options.set_editor_property(
|
||||
'import_type', unreal.AlembicImportType.GEOMETRY_CACHE)
|
||||
|
||||
options.geometry_cache_settings.set_editor_property(
|
||||
'flatten_tracks', False)
|
||||
|
||||
task.options = options
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
|
||||
|
||||
# Create Asset Container
|
||||
lib.create_avalon_container(
|
||||
container=container_name, path=asset_dir)
|
||||
|
||||
data = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": pipeline.AVALON_CONTAINER_ID,
|
||||
"asset": asset,
|
||||
"namespace": asset_dir,
|
||||
"container_name": container_name,
|
||||
"asset_name": asset_name,
|
||||
"loader": str(self.__class__.__name__),
|
||||
"representation": context["representation"]["_id"],
|
||||
"parent": context["representation"]["parent"],
|
||||
"family": context["representation"]["context"]["family"]
|
||||
}
|
||||
unreal_pipeline.imprint(
|
||||
"{}/{}".format(asset_dir, container_name), data)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_dir, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
|
||||
return asset_content
|
||||
|
||||
def update(self, container, representation):
|
||||
name = container["asset_name"]
|
||||
source_path = api.get_representation_path(representation)
|
||||
destination_path = container["namespace"]
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
|
||||
task.set_editor_property('filename', source_path)
|
||||
task.set_editor_property('destination_path', destination_path)
|
||||
# strip suffix
|
||||
task.set_editor_property('destination_name', name)
|
||||
task.set_editor_property('replace_existing', True)
|
||||
task.set_editor_property('automated', True)
|
||||
task.set_editor_property('save', True)
|
||||
|
||||
# set import options here
|
||||
# Unreal 4.24 ignores the settings. It works with Unreal 4.26
|
||||
options = unreal.AbcImportSettings()
|
||||
options.set_editor_property(
|
||||
'import_type', unreal.AlembicImportType.GEOMETRY_CACHE)
|
||||
|
||||
options.geometry_cache_settings.set_editor_property(
|
||||
'flatten_tracks', False)
|
||||
|
||||
task.options = options
|
||||
# do import fbx and replace existing data
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
|
||||
container_path = "{}/{}".format(container["namespace"],
|
||||
container["objectName"])
|
||||
# update metadata
|
||||
unreal_pipeline.imprint(
|
||||
container_path,
|
||||
{
|
||||
"representation": str(representation["_id"]),
|
||||
"parent": str(representation["parent"])
|
||||
})
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
destination_path, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
|
||||
def remove(self, container):
|
||||
path = container["namespace"]
|
||||
parent_path = os.path.dirname(path)
|
||||
|
||||
unreal.EditorAssetLibrary.delete_directory(path)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
parent_path, recursive=False
|
||||
)
|
||||
|
||||
if len(asset_content) == 0:
|
||||
unreal.EditorAssetLibrary.delete_directory(parent_path)
|
||||
156
openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py
Normal file
156
openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py
Normal file
|
|
@ -0,0 +1,156 @@
|
|||
import os
|
||||
|
||||
from avalon import api, pipeline
|
||||
from avalon.unreal import lib
|
||||
from avalon.unreal import pipeline as unreal_pipeline
|
||||
import unreal
|
||||
|
||||
|
||||
class SkeletalMeshAlembicLoader(api.Loader):
|
||||
"""Load Unreal SkeletalMesh from Alembic"""
|
||||
|
||||
families = ["pointcache"]
|
||||
label = "Import Alembic Skeletal Mesh"
|
||||
representations = ["abc"]
|
||||
icon = "cube"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Load and containerise representation into Content Browser.
|
||||
|
||||
This is two step process. First, import FBX to temporary path and
|
||||
then call `containerise()` on it - this moves all content to new
|
||||
directory and then it will create AssetContainer there and imprint it
|
||||
with metadata. This will mark this path as container.
|
||||
|
||||
Args:
|
||||
context (dict): application context
|
||||
name (str): subset name
|
||||
namespace (str): in Unreal this is basically path to container.
|
||||
This is not passed here, so namespace is set
|
||||
by `containerise()` because only then we know
|
||||
real path.
|
||||
data (dict): Those would be data to be imprinted. This is not used
|
||||
now, data are imprinted by `containerise()`.
|
||||
|
||||
Returns:
|
||||
list(str): list of container content
|
||||
"""
|
||||
|
||||
# Create directory for asset and avalon container
|
||||
root = "/Game/Avalon/Assets"
|
||||
asset = context.get('asset').get('name')
|
||||
suffix = "_CON"
|
||||
if asset:
|
||||
asset_name = "{}_{}".format(asset, name)
|
||||
else:
|
||||
asset_name = "{}".format(name)
|
||||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
asset_dir, container_name = tools.create_unique_asset_name(
|
||||
"{}/{}/{}".format(root, asset, name), suffix="")
|
||||
|
||||
container_name += suffix
|
||||
|
||||
unreal.EditorAssetLibrary.make_directory(asset_dir)
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
|
||||
task.set_editor_property('filename', self.fname)
|
||||
task.set_editor_property('destination_path', asset_dir)
|
||||
task.set_editor_property('destination_name', asset_name)
|
||||
task.set_editor_property('replace_existing', False)
|
||||
task.set_editor_property('automated', True)
|
||||
task.set_editor_property('save', True)
|
||||
|
||||
# set import options here
|
||||
# Unreal 4.24 ignores the settings. It works with Unreal 4.26
|
||||
options = unreal.AbcImportSettings()
|
||||
options.set_editor_property(
|
||||
'import_type', unreal.AlembicImportType.SKELETAL)
|
||||
|
||||
task.options = options
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
|
||||
|
||||
# Create Asset Container
|
||||
lib.create_avalon_container(
|
||||
container=container_name, path=asset_dir)
|
||||
|
||||
data = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": pipeline.AVALON_CONTAINER_ID,
|
||||
"asset": asset,
|
||||
"namespace": asset_dir,
|
||||
"container_name": container_name,
|
||||
"asset_name": asset_name,
|
||||
"loader": str(self.__class__.__name__),
|
||||
"representation": context["representation"]["_id"],
|
||||
"parent": context["representation"]["parent"],
|
||||
"family": context["representation"]["context"]["family"]
|
||||
}
|
||||
unreal_pipeline.imprint(
|
||||
"{}/{}".format(asset_dir, container_name), data)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_dir, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
|
||||
return asset_content
|
||||
|
||||
def update(self, container, representation):
|
||||
name = container["asset_name"]
|
||||
source_path = api.get_representation_path(representation)
|
||||
destination_path = container["namespace"]
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
|
||||
task.set_editor_property('filename', source_path)
|
||||
task.set_editor_property('destination_path', destination_path)
|
||||
# strip suffix
|
||||
task.set_editor_property('destination_name', name)
|
||||
task.set_editor_property('replace_existing', True)
|
||||
task.set_editor_property('automated', True)
|
||||
task.set_editor_property('save', True)
|
||||
|
||||
# set import options here
|
||||
# Unreal 4.24 ignores the settings. It works with Unreal 4.26
|
||||
options = unreal.AbcImportSettings()
|
||||
options.set_editor_property(
|
||||
'import_type', unreal.AlembicImportType.SKELETAL)
|
||||
|
||||
task.options = options
|
||||
# do import fbx and replace existing data
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
|
||||
container_path = "{}/{}".format(container["namespace"],
|
||||
container["objectName"])
|
||||
# update metadata
|
||||
unreal_pipeline.imprint(
|
||||
container_path,
|
||||
{
|
||||
"representation": str(representation["_id"]),
|
||||
"parent": str(representation["parent"])
|
||||
})
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
destination_path, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
|
||||
def remove(self, container):
|
||||
path = container["namespace"]
|
||||
parent_path = os.path.dirname(path)
|
||||
|
||||
unreal.EditorAssetLibrary.delete_directory(path)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
parent_path, recursive=False
|
||||
)
|
||||
|
||||
if len(asset_content) == 0:
|
||||
unreal.EditorAssetLibrary.delete_directory(parent_path)
|
||||
156
openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py
Normal file
156
openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py
Normal file
|
|
@ -0,0 +1,156 @@
|
|||
import os
|
||||
|
||||
from avalon import api, pipeline
|
||||
from avalon.unreal import lib
|
||||
from avalon.unreal import pipeline as unreal_pipeline
|
||||
import unreal
|
||||
|
||||
|
||||
class StaticMeshAlembicLoader(api.Loader):
|
||||
"""Load Unreal StaticMesh from Alembic"""
|
||||
|
||||
families = ["model"]
|
||||
label = "Import Alembic Static Mesh"
|
||||
representations = ["abc"]
|
||||
icon = "cube"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Load and containerise representation into Content Browser.
|
||||
|
||||
This is two step process. First, import FBX to temporary path and
|
||||
then call `containerise()` on it - this moves all content to new
|
||||
directory and then it will create AssetContainer there and imprint it
|
||||
with metadata. This will mark this path as container.
|
||||
|
||||
Args:
|
||||
context (dict): application context
|
||||
name (str): subset name
|
||||
namespace (str): in Unreal this is basically path to container.
|
||||
This is not passed here, so namespace is set
|
||||
by `containerise()` because only then we know
|
||||
real path.
|
||||
data (dict): Those would be data to be imprinted. This is not used
|
||||
now, data are imprinted by `containerise()`.
|
||||
|
||||
Returns:
|
||||
list(str): list of container content
|
||||
"""
|
||||
|
||||
# Create directory for asset and avalon container
|
||||
root = "/Game/Avalon/Assets"
|
||||
asset = context.get('asset').get('name')
|
||||
suffix = "_CON"
|
||||
if asset:
|
||||
asset_name = "{}_{}".format(asset, name)
|
||||
else:
|
||||
asset_name = "{}".format(name)
|
||||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
asset_dir, container_name = tools.create_unique_asset_name(
|
||||
"{}/{}/{}".format(root, asset, name), suffix="")
|
||||
|
||||
container_name += suffix
|
||||
|
||||
unreal.EditorAssetLibrary.make_directory(asset_dir)
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
|
||||
task.set_editor_property('filename', self.fname)
|
||||
task.set_editor_property('destination_path', asset_dir)
|
||||
task.set_editor_property('destination_name', asset_name)
|
||||
task.set_editor_property('replace_existing', False)
|
||||
task.set_editor_property('automated', True)
|
||||
task.set_editor_property('save', True)
|
||||
|
||||
# set import options here
|
||||
# Unreal 4.24 ignores the settings. It works with Unreal 4.26
|
||||
options = unreal.AbcImportSettings()
|
||||
options.set_editor_property(
|
||||
'import_type', unreal.AlembicImportType.STATIC_MESH)
|
||||
|
||||
task.options = options
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
|
||||
|
||||
# Create Asset Container
|
||||
lib.create_avalon_container(
|
||||
container=container_name, path=asset_dir)
|
||||
|
||||
data = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": pipeline.AVALON_CONTAINER_ID,
|
||||
"asset": asset,
|
||||
"namespace": asset_dir,
|
||||
"container_name": container_name,
|
||||
"asset_name": asset_name,
|
||||
"loader": str(self.__class__.__name__),
|
||||
"representation": context["representation"]["_id"],
|
||||
"parent": context["representation"]["parent"],
|
||||
"family": context["representation"]["context"]["family"]
|
||||
}
|
||||
unreal_pipeline.imprint(
|
||||
"{}/{}".format(asset_dir, container_name), data)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_dir, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
|
||||
return asset_content
|
||||
|
||||
def update(self, container, representation):
|
||||
name = container["asset_name"]
|
||||
source_path = api.get_representation_path(representation)
|
||||
destination_path = container["namespace"]
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
|
||||
task.set_editor_property('filename', source_path)
|
||||
task.set_editor_property('destination_path', destination_path)
|
||||
# strip suffix
|
||||
task.set_editor_property('destination_name', name)
|
||||
task.set_editor_property('replace_existing', True)
|
||||
task.set_editor_property('automated', True)
|
||||
task.set_editor_property('save', True)
|
||||
|
||||
# set import options here
|
||||
# Unreal 4.24 ignores the settings. It works with Unreal 4.26
|
||||
options = unreal.AbcImportSettings()
|
||||
options.set_editor_property(
|
||||
'import_type', unreal.AlembicImportType.STATIC_MESH)
|
||||
|
||||
task.options = options
|
||||
# do import fbx and replace existing data
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
|
||||
container_path = "{}/{}".format(container["namespace"],
|
||||
container["objectName"])
|
||||
# update metadata
|
||||
unreal_pipeline.imprint(
|
||||
container_path,
|
||||
{
|
||||
"representation": str(representation["_id"]),
|
||||
"parent": str(representation["parent"])
|
||||
})
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
destination_path, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
|
||||
def remove(self, container):
|
||||
path = container["namespace"]
|
||||
parent_path = os.path.dirname(path)
|
||||
|
||||
unreal.EditorAssetLibrary.delete_directory(path)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
parent_path, recursive=False
|
||||
)
|
||||
|
||||
if len(asset_content) == 0:
|
||||
unreal.EditorAssetLibrary.delete_directory(parent_path)
|
||||
|
|
@ -1,7 +1,6 @@
|
|||
import os
|
||||
|
||||
from avalon import api, pipeline
|
||||
from avalon import unreal as avalon_unreal
|
||||
from avalon.unreal import lib
|
||||
from avalon.unreal import pipeline as unreal_pipeline
|
||||
import unreal
|
||||
|
|
|
|||
|
|
@ -1,30 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from avalon import api, pipeline
|
||||
|
||||
PACKAGE_DIR = os.path.dirname(__file__)
|
||||
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins", "launcher")
|
||||
ACTIONS_DIR = os.path.join(PLUGINS_DIR, "actions")
|
||||
|
||||
|
||||
def register_launcher_actions():
|
||||
"""Register specific actions which should be accessible in the launcher"""
|
||||
|
||||
actions = []
|
||||
ext = ".py"
|
||||
sys.path.append(ACTIONS_DIR)
|
||||
|
||||
for f in os.listdir(ACTIONS_DIR):
|
||||
file, extention = os.path.splitext(f)
|
||||
if ext in extention:
|
||||
module = __import__(file)
|
||||
klass = getattr(module, file)
|
||||
actions.append(klass)
|
||||
|
||||
if actions is []:
|
||||
return
|
||||
|
||||
for action in actions:
|
||||
print("Using launcher action from config @ '{}'".format(action.name))
|
||||
pipeline.register_plugin(api.Action, action)
|
||||
|
|
@ -79,6 +79,16 @@ from .avalon_context import (
|
|||
change_timer_to_current_context
|
||||
)
|
||||
|
||||
from .local_settings import (
|
||||
IniSettingRegistry,
|
||||
JSONSettingRegistry,
|
||||
OpenPypeSecureRegistry,
|
||||
OpenPypeSettingsRegistry,
|
||||
get_local_site_id,
|
||||
change_openpype_mongo_url,
|
||||
get_openpype_username
|
||||
)
|
||||
|
||||
from .applications import (
|
||||
ApplicationLaunchFailed,
|
||||
ApplictionExecutableNotFound,
|
||||
|
|
@ -112,15 +122,6 @@ from .plugin_tools import (
|
|||
should_decompress
|
||||
)
|
||||
|
||||
from .local_settings import (
|
||||
IniSettingRegistry,
|
||||
JSONSettingRegistry,
|
||||
OpenPypeSecureRegistry,
|
||||
OpenPypeSettingsRegistry,
|
||||
get_local_site_id,
|
||||
change_openpype_mongo_url
|
||||
)
|
||||
|
||||
from .path_tools import (
|
||||
version_up,
|
||||
get_version_from_path,
|
||||
|
|
@ -179,6 +180,14 @@ __all__ = [
|
|||
|
||||
"change_timer_to_current_context",
|
||||
|
||||
"IniSettingRegistry",
|
||||
"JSONSettingRegistry",
|
||||
"OpenPypeSecureRegistry",
|
||||
"OpenPypeSettingsRegistry",
|
||||
"get_local_site_id",
|
||||
"change_openpype_mongo_url",
|
||||
"get_openpype_username",
|
||||
|
||||
"ApplicationLaunchFailed",
|
||||
"ApplictionExecutableNotFound",
|
||||
"ApplicationNotFound",
|
||||
|
|
@ -224,13 +233,6 @@ __all__ = [
|
|||
"validate_mongo_connection",
|
||||
"OpenPypeMongoConnection",
|
||||
|
||||
"IniSettingRegistry",
|
||||
"JSONSettingRegistry",
|
||||
"OpenPypeSecureRegistry",
|
||||
"OpenPypeSettingsRegistry",
|
||||
"get_local_site_id",
|
||||
"change_openpype_mongo_url",
|
||||
|
||||
"timeit",
|
||||
|
||||
"is_overlapping_otio_ranges",
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ from . import (
|
|||
PypeLogger,
|
||||
Anatomy
|
||||
)
|
||||
from .local_settings import get_openpype_username
|
||||
from .avalon_context import (
|
||||
get_workdir_data,
|
||||
get_workdir_with_workdir_data
|
||||
|
|
@ -262,14 +263,32 @@ class Application:
|
|||
|
||||
|
||||
class ApplicationManager:
|
||||
def __init__(self):
|
||||
self.log = PypeLogger().get_logger(self.__class__.__name__)
|
||||
"""Load applications and tools and store them by their full name.
|
||||
|
||||
Args:
|
||||
system_settings (dict): Preloaded system settings. When passed manager
|
||||
will always use these values. Gives ability to create manager
|
||||
using different settings.
|
||||
"""
|
||||
def __init__(self, system_settings=None):
|
||||
self.log = PypeLogger.get_logger(self.__class__.__name__)
|
||||
|
||||
self.app_groups = {}
|
||||
self.applications = {}
|
||||
self.tool_groups = {}
|
||||
self.tools = {}
|
||||
|
||||
self._system_settings = system_settings
|
||||
|
||||
self.refresh()
|
||||
|
||||
def set_system_settings(self, system_settings):
|
||||
"""Ability to change init system settings.
|
||||
|
||||
This will trigger refresh of manager.
|
||||
"""
|
||||
self._system_settings = system_settings
|
||||
|
||||
self.refresh()
|
||||
|
||||
def refresh(self):
|
||||
|
|
@ -279,9 +298,12 @@ class ApplicationManager:
|
|||
self.tool_groups.clear()
|
||||
self.tools.clear()
|
||||
|
||||
settings = get_system_settings(
|
||||
clear_metadata=False, exclude_locals=False
|
||||
)
|
||||
if self._system_settings is not None:
|
||||
settings = copy.deepcopy(self._system_settings)
|
||||
else:
|
||||
settings = get_system_settings(
|
||||
clear_metadata=False, exclude_locals=False
|
||||
)
|
||||
|
||||
app_defs = settings["applications"]
|
||||
for group_name, variant_defs in app_defs.items():
|
||||
|
|
@ -1225,7 +1247,7 @@ def _prepare_last_workfile(data, workdir):
|
|||
file_template = anatomy.templates["work"]["file"]
|
||||
workdir_data.update({
|
||||
"version": 1,
|
||||
"user": os.environ.get("OPENPYPE_USERNAME") or getpass.getuser(),
|
||||
"user": get_openpype_username(),
|
||||
"ext": extensions[0]
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package to deal with saving and retrieving user specific settings."""
|
||||
import os
|
||||
import json
|
||||
import getpass
|
||||
import platform
|
||||
from datetime import datetime
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import json
|
||||
|
||||
# TODO Use pype igniter logic instead of using duplicated code
|
||||
# disable lru cache in Python 2
|
||||
|
|
@ -24,11 +26,11 @@ try:
|
|||
except ImportError:
|
||||
import ConfigParser as configparser
|
||||
|
||||
import platform
|
||||
|
||||
import six
|
||||
import appdirs
|
||||
|
||||
from openpype.settings import get_local_settings
|
||||
|
||||
from .import validate_mongo_connection
|
||||
|
||||
_PLACEHOLDER = object()
|
||||
|
|
@ -538,3 +540,25 @@ def change_openpype_mongo_url(new_mongo_url):
|
|||
if existing_value is not None:
|
||||
registry.delete_item(key)
|
||||
registry.set_item(key, new_mongo_url)
|
||||
|
||||
|
||||
def get_openpype_username():
|
||||
"""OpenPype username used for templates and publishing.
|
||||
|
||||
May be different than machine's username.
|
||||
|
||||
Always returns "OPENPYPE_USERNAME" environment if is set then tries local
|
||||
settings and last option is to use `getpass.getuser()` which returns
|
||||
machine username.
|
||||
"""
|
||||
username = os.environ.get("OPENPYPE_USERNAME")
|
||||
if not username:
|
||||
local_settings = get_local_settings()
|
||||
username = (
|
||||
local_settings
|
||||
.get("general", {})
|
||||
.get("username")
|
||||
)
|
||||
if not username:
|
||||
username = getpass.getuser()
|
||||
return username
|
||||
|
|
|
|||
|
|
@ -123,6 +123,8 @@ class PypeFormatter(logging.Formatter):
|
|||
|
||||
if record.exc_info is not None:
|
||||
line_len = len(str(record.exc_info[1]))
|
||||
if line_len > 30:
|
||||
line_len = 30
|
||||
out = "{}\n{}\n{}\n{}\n{}".format(
|
||||
out,
|
||||
line_len * "=",
|
||||
|
|
|
|||
|
|
@ -18,10 +18,6 @@ from .webserver import (
|
|||
WebServerModule,
|
||||
IWebServerRoutes
|
||||
)
|
||||
from .user import (
|
||||
UserModule,
|
||||
IUserModule
|
||||
)
|
||||
from .idle_manager import (
|
||||
IdleManager,
|
||||
IIdleManager
|
||||
|
|
@ -60,9 +56,6 @@ __all__ = (
|
|||
"WebServerModule",
|
||||
"IWebServerRoutes",
|
||||
|
||||
"UserModule",
|
||||
"IUserModule",
|
||||
|
||||
"IdleManager",
|
||||
"IIdleManager",
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,6 @@ class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
|
|||
"AVALON_ASSET",
|
||||
"AVALON_TASK",
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_USERNAME",
|
||||
"OPENPYPE_DEV",
|
||||
"OPENPYPE_LOG_NO_COLORS"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -273,7 +273,6 @@ class HarmonySubmitDeadline(
|
|||
"AVALON_ASSET",
|
||||
"AVALON_TASK",
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_USERNAME",
|
||||
"OPENPYPE_DEV",
|
||||
"OPENPYPE_LOG_NO_COLORS"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ payload_skeleton_template = {
|
|||
"BatchName": None, # Top-level group name
|
||||
"Name": None, # Job name, as seen in Monitor
|
||||
"UserName": None,
|
||||
"Plugin": "MayaPype",
|
||||
"Plugin": "MayaBatch",
|
||||
"Frames": "{start}-{end}x{step}",
|
||||
"Comment": None,
|
||||
"Priority": 50,
|
||||
|
|
@ -396,7 +396,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
step=int(self._instance.data["byFrameStep"]))
|
||||
|
||||
self.payload_skeleton["JobInfo"]["Plugin"] = self._instance.data.get(
|
||||
"mayaRenderPlugin", "MayaPype")
|
||||
"mayaRenderPlugin", "MayaBatch")
|
||||
|
||||
self.payload_skeleton["JobInfo"]["BatchName"] = filename
|
||||
# Job name, as seen in Monitor
|
||||
|
|
@ -441,7 +441,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"AVALON_ASSET",
|
||||
"AVALON_TASK",
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_USERNAME",
|
||||
"OPENPYPE_DEV",
|
||||
"OPENPYPE_LOG_NO_COLORS"
|
||||
]
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
group = ""
|
||||
department = ""
|
||||
limit_groups = {}
|
||||
use_gpu = False
|
||||
|
||||
def process(self, instance):
|
||||
instance.data["toBeRenderedOn"] = "deadline"
|
||||
|
|
@ -206,6 +207,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
# Resolve relative references
|
||||
"ProjectPath": script_path,
|
||||
"AWSAssetFile0": render_path,
|
||||
|
||||
# using GPU by default
|
||||
"UseGpu": self.use_gpu,
|
||||
|
||||
# Only the specific write node is rendered.
|
||||
"WriteNode": exe_node_name
|
||||
},
|
||||
|
|
@ -375,7 +380,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
list: captured groups list
|
||||
"""
|
||||
captured_groups = []
|
||||
for lg_name, list_node_class in self.deadline_limit_groups.items():
|
||||
for lg_name, list_node_class in self.limit_groups.items():
|
||||
for node_class in list_node_class:
|
||||
for node in nuke.allNodes(recurseGroups=True):
|
||||
# ignore all nodes not member of defined class
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@ import json
|
|||
|
||||
from openpype.api import ProjectSettings
|
||||
|
||||
from openpype.modules.ftrack.lib import ServerAction
|
||||
from openpype.modules.ftrack.lib.avalon_sync import (
|
||||
get_pype_attr,
|
||||
from openpype.modules.ftrack.lib import (
|
||||
ServerAction,
|
||||
get_openpype_attr,
|
||||
CUST_ATTR_AUTO_SYNC
|
||||
)
|
||||
|
||||
|
|
@ -159,7 +159,7 @@ class PrepareProjectServer(ServerAction):
|
|||
for key, entity in project_anatom_settings["attributes"].items():
|
||||
attribute_values_by_key[key] = entity.value
|
||||
|
||||
cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True)
|
||||
cust_attrs, hier_cust_attrs = get_openpype_attr(self.session, True)
|
||||
|
||||
for attr in hier_cust_attrs:
|
||||
key = attr["key"]
|
||||
|
|
|
|||
|
|
@ -18,12 +18,15 @@ from avalon import schema
|
|||
from avalon.api import AvalonMongoDB
|
||||
|
||||
from openpype.modules.ftrack.lib import (
|
||||
get_openpype_attr,
|
||||
CUST_ATTR_ID_KEY,
|
||||
CUST_ATTR_AUTO_SYNC,
|
||||
|
||||
avalon_sync,
|
||||
|
||||
BaseEvent
|
||||
)
|
||||
from openpype.modules.ftrack.lib.avalon_sync import (
|
||||
CUST_ATTR_ID_KEY,
|
||||
CUST_ATTR_AUTO_SYNC,
|
||||
EntitySchemas
|
||||
)
|
||||
|
||||
|
|
@ -125,7 +128,7 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
@property
|
||||
def avalon_cust_attrs(self):
|
||||
if self._avalon_cust_attrs is None:
|
||||
self._avalon_cust_attrs = avalon_sync.get_pype_attr(
|
||||
self._avalon_cust_attrs = get_openpype_attr(
|
||||
self.process_session, query_keys=self.cust_attr_query_keys
|
||||
)
|
||||
return self._avalon_cust_attrs
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
import collections
|
||||
import ftrack_api
|
||||
from openpype.modules.ftrack.lib import BaseAction, statics_icon
|
||||
from openpype.modules.ftrack.lib.avalon_sync import get_pype_attr
|
||||
from openpype.modules.ftrack.lib import (
|
||||
BaseAction,
|
||||
statics_icon,
|
||||
get_openpype_attr
|
||||
)
|
||||
|
||||
|
||||
class CleanHierarchicalAttrsAction(BaseAction):
|
||||
|
|
@ -52,7 +55,7 @@ class CleanHierarchicalAttrsAction(BaseAction):
|
|||
)
|
||||
entity_ids_joined = ", ".join(all_entities_ids)
|
||||
|
||||
attrs, hier_attrs = get_pype_attr(session)
|
||||
attrs, hier_attrs = get_openpype_attr(session)
|
||||
|
||||
for attr in hier_attrs:
|
||||
configuration_key = attr["key"]
|
||||
|
|
|
|||
|
|
@ -2,10 +2,20 @@ import collections
|
|||
import json
|
||||
import arrow
|
||||
import ftrack_api
|
||||
from openpype.modules.ftrack.lib import BaseAction, statics_icon
|
||||
from openpype.modules.ftrack.lib.avalon_sync import (
|
||||
CUST_ATTR_ID_KEY, CUST_ATTR_GROUP, default_custom_attributes_definition
|
||||
from openpype.modules.ftrack.lib import (
|
||||
BaseAction,
|
||||
statics_icon,
|
||||
|
||||
CUST_ATTR_ID_KEY,
|
||||
CUST_ATTR_GROUP,
|
||||
CUST_ATTR_TOOLS,
|
||||
CUST_ATTR_APPLICATIONS,
|
||||
|
||||
default_custom_attributes_definition,
|
||||
app_definitions_from_app_manager,
|
||||
tool_definitions_from_app_manager
|
||||
)
|
||||
|
||||
from openpype.api import get_system_settings
|
||||
from openpype.lib import ApplicationManager
|
||||
|
||||
|
|
@ -370,24 +380,12 @@ class CustomAttributes(BaseAction):
|
|||
exc_info=True
|
||||
)
|
||||
|
||||
def app_defs_from_app_manager(self):
|
||||
app_definitions = []
|
||||
for app_name, app in self.app_manager.applications.items():
|
||||
if app.enabled and app.is_host:
|
||||
app_definitions.append({
|
||||
app_name: app.full_label
|
||||
})
|
||||
|
||||
if not app_definitions:
|
||||
app_definitions.append({"empty": "< Empty >"})
|
||||
return app_definitions
|
||||
|
||||
def applications_attribute(self, event):
|
||||
apps_data = self.app_defs_from_app_manager()
|
||||
apps_data = app_definitions_from_app_manager(self.app_manager)
|
||||
|
||||
applications_custom_attr_data = {
|
||||
"label": "Applications",
|
||||
"key": "applications",
|
||||
"key": CUST_ATTR_APPLICATIONS,
|
||||
"type": "enumerator",
|
||||
"entity_type": "show",
|
||||
"group": CUST_ATTR_GROUP,
|
||||
|
|
@ -399,19 +397,11 @@ class CustomAttributes(BaseAction):
|
|||
self.process_attr_data(applications_custom_attr_data, event)
|
||||
|
||||
def tools_attribute(self, event):
|
||||
tools_data = []
|
||||
for tool_name, tool in self.app_manager.tools.items():
|
||||
tools_data.append({
|
||||
tool_name: tool.label
|
||||
})
|
||||
|
||||
# Make sure there is at least one item
|
||||
if not tools_data:
|
||||
tools_data.append({"empty": "< Empty >"})
|
||||
tools_data = tool_definitions_from_app_manager(self.app_manager)
|
||||
|
||||
tools_custom_attr_data = {
|
||||
"label": "Tools",
|
||||
"key": "tools_env",
|
||||
"key": CUST_ATTR_TOOLS,
|
||||
"type": "enumerator",
|
||||
"is_hierarchical": True,
|
||||
"group": CUST_ATTR_GROUP,
|
||||
|
|
|
|||
|
|
@ -4,10 +4,8 @@ from openpype.api import ProjectSettings
|
|||
|
||||
from openpype.modules.ftrack.lib import (
|
||||
BaseAction,
|
||||
statics_icon
|
||||
)
|
||||
from openpype.modules.ftrack.lib.avalon_sync import (
|
||||
get_pype_attr,
|
||||
statics_icon,
|
||||
get_openpype_attr,
|
||||
CUST_ATTR_AUTO_SYNC
|
||||
)
|
||||
|
||||
|
|
@ -162,7 +160,7 @@ class PrepareProjectLocal(BaseAction):
|
|||
for key, entity in project_anatom_settings["attributes"].items():
|
||||
attribute_values_by_key[key] = entity.value
|
||||
|
||||
cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True)
|
||||
cust_attrs, hier_cust_attrs = get_openpype_attr(self.session, True)
|
||||
|
||||
for attr in hier_cust_attrs:
|
||||
key = attr["key"]
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import json
|
||||
import collections
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import six
|
||||
|
|
@ -8,10 +9,10 @@ from openpype.modules import (
|
|||
ITrayModule,
|
||||
IPluginPaths,
|
||||
ITimersManager,
|
||||
IUserModule,
|
||||
ILaunchHookPaths,
|
||||
ISettingsChangeListener
|
||||
)
|
||||
from openpype.settings import SaveWarningExc
|
||||
|
||||
FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
|
@ -32,7 +33,6 @@ class FtrackModule(
|
|||
ITrayModule,
|
||||
IPluginPaths,
|
||||
ITimersManager,
|
||||
IUserModule,
|
||||
ILaunchHookPaths,
|
||||
ISettingsChangeListener
|
||||
):
|
||||
|
|
@ -123,15 +123,86 @@ class FtrackModule(
|
|||
if self.tray_module:
|
||||
self.tray_module.stop_timer_manager()
|
||||
|
||||
def on_pype_user_change(self, username):
|
||||
"""Implementation of IUserModule interface."""
|
||||
if self.tray_module:
|
||||
self.tray_module.changed_user()
|
||||
|
||||
def on_system_settings_save(self, *_args, **_kwargs):
|
||||
def on_system_settings_save(
|
||||
self, old_value, new_value, changes, new_value_metadata
|
||||
):
|
||||
"""Implementation of ISettingsChangeListener interface."""
|
||||
# Ignore
|
||||
return
|
||||
try:
|
||||
session = self.create_ftrack_session()
|
||||
except Exception:
|
||||
self.log.warning("Couldn't create ftrack session.", exc_info=True)
|
||||
raise SaveWarningExc((
|
||||
"Saving of attributes to ftrack wasn't successful,"
|
||||
" try running Create/Update Avalon Attributes in ftrack."
|
||||
))
|
||||
|
||||
from .lib import (
|
||||
get_openpype_attr,
|
||||
CUST_ATTR_APPLICATIONS,
|
||||
CUST_ATTR_TOOLS,
|
||||
app_definitions_from_app_manager,
|
||||
tool_definitions_from_app_manager
|
||||
)
|
||||
from openpype.api import ApplicationManager
|
||||
query_keys = [
|
||||
"id",
|
||||
"key",
|
||||
"config"
|
||||
]
|
||||
custom_attributes = get_openpype_attr(
|
||||
session,
|
||||
split_hierarchical=False,
|
||||
query_keys=query_keys
|
||||
)
|
||||
app_attribute = None
|
||||
tool_attribute = None
|
||||
for custom_attribute in custom_attributes:
|
||||
key = custom_attribute["key"]
|
||||
if key == CUST_ATTR_APPLICATIONS:
|
||||
app_attribute = custom_attribute
|
||||
elif key == CUST_ATTR_TOOLS:
|
||||
tool_attribute = custom_attribute
|
||||
|
||||
app_manager = ApplicationManager(new_value_metadata)
|
||||
missing_attributes = []
|
||||
if not app_attribute:
|
||||
missing_attributes.append(CUST_ATTR_APPLICATIONS)
|
||||
else:
|
||||
config = json.loads(app_attribute["config"])
|
||||
new_data = app_definitions_from_app_manager(app_manager)
|
||||
prepared_data = []
|
||||
for item in new_data:
|
||||
for key, label in item.items():
|
||||
prepared_data.append({
|
||||
"menu": label,
|
||||
"value": key
|
||||
})
|
||||
|
||||
config["data"] = json.dumps(prepared_data)
|
||||
app_attribute["config"] = json.dumps(config)
|
||||
|
||||
if not tool_attribute:
|
||||
missing_attributes.append(CUST_ATTR_TOOLS)
|
||||
else:
|
||||
config = json.loads(tool_attribute["config"])
|
||||
new_data = tool_definitions_from_app_manager(app_manager)
|
||||
prepared_data = []
|
||||
for item in new_data:
|
||||
for key, label in item.items():
|
||||
prepared_data.append({
|
||||
"menu": label,
|
||||
"value": key
|
||||
})
|
||||
config["data"] = json.dumps(prepared_data)
|
||||
tool_attribute["config"] = json.dumps(config)
|
||||
|
||||
session.commit()
|
||||
|
||||
if missing_attributes:
|
||||
raise SaveWarningExc((
|
||||
"Couldn't find custom attribute/s ({}) to update."
|
||||
" Try running Create/Update Avalon Attributes in ftrack."
|
||||
).format(", ".join(missing_attributes)))
|
||||
|
||||
def on_project_settings_save(self, *_args, **_kwargs):
|
||||
"""Implementation of ISettingsChangeListener interface."""
|
||||
|
|
@ -139,7 +210,7 @@ class FtrackModule(
|
|||
return
|
||||
|
||||
def on_project_anatomy_save(
|
||||
self, old_value, new_value, changes, project_name
|
||||
self, old_value, new_value, changes, project_name, new_value_metadata
|
||||
):
|
||||
"""Implementation of ISettingsChangeListener interface."""
|
||||
if not project_name:
|
||||
|
|
@ -150,32 +221,49 @@ class FtrackModule(
|
|||
return
|
||||
|
||||
import ftrack_api
|
||||
from openpype.modules.ftrack.lib import avalon_sync
|
||||
from openpype.modules.ftrack.lib import get_openpype_attr
|
||||
|
||||
try:
|
||||
session = self.create_ftrack_session()
|
||||
except Exception:
|
||||
self.log.warning("Couldn't create ftrack session.", exc_info=True)
|
||||
raise SaveWarningExc((
|
||||
"Saving of attributes to ftrack wasn't successful,"
|
||||
" try running Create/Update Avalon Attributes in ftrack."
|
||||
))
|
||||
|
||||
session = self.create_ftrack_session()
|
||||
project_entity = session.query(
|
||||
"Project where full_name is \"{}\"".format(project_name)
|
||||
).first()
|
||||
|
||||
if not project_entity:
|
||||
self.log.warning((
|
||||
"Ftrack project with names \"{}\" was not found."
|
||||
" Skipping settings attributes change callback."
|
||||
))
|
||||
return
|
||||
msg = (
|
||||
"Ftrack project with name \"{}\" was not found in Ftrack."
|
||||
" Can't push attribute changes."
|
||||
).format(project_name)
|
||||
self.log.warning(msg)
|
||||
raise SaveWarningExc(msg)
|
||||
|
||||
project_id = project_entity["id"]
|
||||
|
||||
cust_attr, hier_attr = avalon_sync.get_pype_attr(session)
|
||||
cust_attr, hier_attr = get_openpype_attr(session)
|
||||
cust_attr_by_key = {attr["key"]: attr for attr in cust_attr}
|
||||
hier_attrs_by_key = {attr["key"]: attr for attr in hier_attr}
|
||||
|
||||
failed = {}
|
||||
missing = {}
|
||||
for key, value in attributes_changes.items():
|
||||
configuration = hier_attrs_by_key.get(key)
|
||||
if not configuration:
|
||||
configuration = cust_attr_by_key.get(key)
|
||||
if not configuration:
|
||||
self.log.warning(
|
||||
"Custom attribute \"{}\" was not found.".format(key)
|
||||
)
|
||||
missing[key] = value
|
||||
continue
|
||||
|
||||
# TODO add add permissions check
|
||||
# TODO add value validations
|
||||
# - value type and list items
|
||||
entity_key = collections.OrderedDict()
|
||||
|
|
@ -189,10 +277,45 @@ class FtrackModule(
|
|||
"value",
|
||||
ftrack_api.symbol.NOT_SET,
|
||||
value
|
||||
|
||||
)
|
||||
)
|
||||
session.commit()
|
||||
try:
|
||||
session.commit()
|
||||
self.log.debug(
|
||||
"Changed project custom attribute \"{}\" to \"{}\"".format(
|
||||
key, value
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Failed to set \"{}\" to \"{}\"".format(key, value),
|
||||
exc_info=True
|
||||
)
|
||||
session.rollback()
|
||||
failed[key] = value
|
||||
|
||||
if not failed and not missing:
|
||||
return
|
||||
|
||||
error_msg = (
|
||||
"Values were not updated on Ftrack which may cause issues."
|
||||
" try running Create/Update Avalon Attributes in ftrack "
|
||||
" and resave project settings."
|
||||
)
|
||||
if missing:
|
||||
error_msg += "\nMissing Custom attributes on Ftrack: {}.".format(
|
||||
", ".join([
|
||||
'"{}"'.format(key)
|
||||
for key in missing.keys()
|
||||
])
|
||||
)
|
||||
if failed:
|
||||
joined_failed = ", ".join([
|
||||
'"{}": "{}"'.format(key, value)
|
||||
for key, value in failed.items()
|
||||
])
|
||||
error_msg += "\nFailed to set: {}".format(joined_failed)
|
||||
raise SaveWarningExc(error_msg)
|
||||
|
||||
def create_ftrack_session(self, **session_kwargs):
|
||||
import ftrack_api
|
||||
|
|
|
|||
|
|
@ -1,7 +1,21 @@
|
|||
from .constants import (
|
||||
CUST_ATTR_ID_KEY,
|
||||
CUST_ATTR_AUTO_SYNC,
|
||||
CUST_ATTR_GROUP,
|
||||
CUST_ATTR_TOOLS,
|
||||
CUST_ATTR_APPLICATIONS
|
||||
)
|
||||
from . settings import (
|
||||
get_ftrack_url_from_settings,
|
||||
get_ftrack_event_mongo_info
|
||||
)
|
||||
from .custom_attributes import (
|
||||
default_custom_attributes_definition,
|
||||
app_definitions_from_app_manager,
|
||||
tool_definitions_from_app_manager,
|
||||
get_openpype_attr
|
||||
)
|
||||
|
||||
from . import avalon_sync
|
||||
from . import credentials
|
||||
from .ftrack_base_handler import BaseHandler
|
||||
|
|
@ -10,9 +24,20 @@ from .ftrack_action_handler import BaseAction, ServerAction, statics_icon
|
|||
|
||||
|
||||
__all__ = (
|
||||
"CUST_ATTR_ID_KEY",
|
||||
"CUST_ATTR_AUTO_SYNC",
|
||||
"CUST_ATTR_GROUP",
|
||||
"CUST_ATTR_TOOLS",
|
||||
"CUST_ATTR_APPLICATIONS",
|
||||
|
||||
"get_ftrack_url_from_settings",
|
||||
"get_ftrack_event_mongo_info",
|
||||
|
||||
"default_custom_attributes_definition",
|
||||
"app_definitions_from_app_manager",
|
||||
"tool_definitions_from_app_manager",
|
||||
"get_openpype_attr",
|
||||
|
||||
"avalon_sync",
|
||||
|
||||
"credentials",
|
||||
|
|
|
|||
|
|
@ -14,17 +14,21 @@ else:
|
|||
from avalon.api import AvalonMongoDB
|
||||
|
||||
import avalon
|
||||
|
||||
from openpype.api import (
|
||||
Logger,
|
||||
Anatomy,
|
||||
get_anatomy_settings
|
||||
)
|
||||
from openpype.lib import ApplicationManager
|
||||
|
||||
from .constants import CUST_ATTR_ID_KEY
|
||||
from .custom_attributes import get_openpype_attr
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
from bson.errors import InvalidId
|
||||
from pymongo import UpdateOne
|
||||
import ftrack_api
|
||||
from openpype.lib import ApplicationManager
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
|
@ -36,23 +40,6 @@ EntitySchemas = {
|
|||
"config": "openpype:config-2.0"
|
||||
}
|
||||
|
||||
# Group name of custom attributes
|
||||
CUST_ATTR_GROUP = "openpype"
|
||||
|
||||
# name of Custom attribute that stores mongo_id from avalon db
|
||||
CUST_ATTR_ID_KEY = "avalon_mongo_id"
|
||||
CUST_ATTR_AUTO_SYNC = "avalon_auto_sync"
|
||||
|
||||
|
||||
def default_custom_attributes_definition():
|
||||
json_file_path = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)),
|
||||
"custom_attributes.json"
|
||||
)
|
||||
with open(json_file_path, "r") as json_stream:
|
||||
data = json.load(json_stream)
|
||||
return data
|
||||
|
||||
|
||||
def check_regex(name, entity_type, in_schema=None, schema_patterns=None):
|
||||
schema_name = "asset-3.0"
|
||||
|
|
@ -91,39 +78,6 @@ def join_query_keys(keys):
|
|||
return ",".join(["\"{}\"".format(key) for key in keys])
|
||||
|
||||
|
||||
def get_pype_attr(session, split_hierarchical=True, query_keys=None):
|
||||
custom_attributes = []
|
||||
hier_custom_attributes = []
|
||||
if not query_keys:
|
||||
query_keys = [
|
||||
"id",
|
||||
"entity_type",
|
||||
"object_type_id",
|
||||
"is_hierarchical",
|
||||
"default"
|
||||
]
|
||||
# TODO remove deprecated "pype" group from query
|
||||
cust_attrs_query = (
|
||||
"select {}"
|
||||
" from CustomAttributeConfiguration"
|
||||
# Kept `pype` for Backwards Compatiblity
|
||||
" where group.name in (\"pype\", \"{}\")"
|
||||
).format(", ".join(query_keys), CUST_ATTR_GROUP)
|
||||
all_avalon_attr = session.query(cust_attrs_query).all()
|
||||
for cust_attr in all_avalon_attr:
|
||||
if split_hierarchical and cust_attr["is_hierarchical"]:
|
||||
hier_custom_attributes.append(cust_attr)
|
||||
continue
|
||||
|
||||
custom_attributes.append(cust_attr)
|
||||
|
||||
if split_hierarchical:
|
||||
# return tuple
|
||||
return custom_attributes, hier_custom_attributes
|
||||
|
||||
return custom_attributes
|
||||
|
||||
|
||||
def get_python_type_for_custom_attribute(cust_attr, cust_attr_type_name=None):
|
||||
"""Python type that should value of custom attribute have.
|
||||
|
||||
|
|
@ -921,7 +875,7 @@ class SyncEntitiesFactory:
|
|||
def set_cutom_attributes(self):
|
||||
self.log.debug("* Preparing custom attributes")
|
||||
# Get custom attributes and values
|
||||
custom_attrs, hier_attrs = get_pype_attr(
|
||||
custom_attrs, hier_attrs = get_openpype_attr(
|
||||
self.session, query_keys=self.cust_attr_query_keys
|
||||
)
|
||||
ent_types = self.session.query("select id, name from ObjectType").all()
|
||||
|
|
@ -2508,7 +2462,7 @@ class SyncEntitiesFactory:
|
|||
if new_entity_id not in p_chilren:
|
||||
self.entities_dict[parent_id]["children"].append(new_entity_id)
|
||||
|
||||
cust_attr, _ = get_pype_attr(self.session)
|
||||
cust_attr, _ = get_openpype_attr(self.session)
|
||||
for _attr in cust_attr:
|
||||
key = _attr["key"]
|
||||
if key not in av_entity["data"]:
|
||||
|
|
|
|||
12
openpype/modules/ftrack/lib/constants.py
Normal file
12
openpype/modules/ftrack/lib/constants.py
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
# Group name of custom attributes
|
||||
CUST_ATTR_GROUP = "openpype"
|
||||
|
||||
# name of Custom attribute that stores mongo_id from avalon db
|
||||
CUST_ATTR_ID_KEY = "avalon_mongo_id"
|
||||
# Auto sync of project
|
||||
CUST_ATTR_AUTO_SYNC = "avalon_auto_sync"
|
||||
|
||||
# Applications custom attribute name
|
||||
CUST_ATTR_APPLICATIONS = "applications"
|
||||
# Environment tools custom attribute
|
||||
CUST_ATTR_TOOLS = "tools_env"
|
||||
73
openpype/modules/ftrack/lib/custom_attributes.py
Normal file
73
openpype/modules/ftrack/lib/custom_attributes.py
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
from .constants import CUST_ATTR_GROUP
|
||||
|
||||
|
||||
def default_custom_attributes_definition():
|
||||
json_file_path = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)),
|
||||
"custom_attributes.json"
|
||||
)
|
||||
with open(json_file_path, "r") as json_stream:
|
||||
data = json.load(json_stream)
|
||||
return data
|
||||
|
||||
|
||||
def app_definitions_from_app_manager(app_manager):
|
||||
app_definitions = []
|
||||
for app_name, app in app_manager.applications.items():
|
||||
if app.enabled and app.is_host:
|
||||
app_definitions.append({
|
||||
app_name: app.full_label
|
||||
})
|
||||
|
||||
if not app_definitions:
|
||||
app_definitions.append({"empty": "< Empty >"})
|
||||
return app_definitions
|
||||
|
||||
|
||||
def tool_definitions_from_app_manager(app_manager):
|
||||
tools_data = []
|
||||
for tool_name, tool in app_manager.tools.items():
|
||||
tools_data.append({
|
||||
tool_name: tool.label
|
||||
})
|
||||
|
||||
# Make sure there is at least one item
|
||||
if not tools_data:
|
||||
tools_data.append({"empty": "< Empty >"})
|
||||
return tools_data
|
||||
|
||||
|
||||
def get_openpype_attr(session, split_hierarchical=True, query_keys=None):
|
||||
custom_attributes = []
|
||||
hier_custom_attributes = []
|
||||
if not query_keys:
|
||||
query_keys = [
|
||||
"id",
|
||||
"entity_type",
|
||||
"object_type_id",
|
||||
"is_hierarchical",
|
||||
"default"
|
||||
]
|
||||
# TODO remove deprecated "pype" group from query
|
||||
cust_attrs_query = (
|
||||
"select {}"
|
||||
" from CustomAttributeConfiguration"
|
||||
# Kept `pype` for Backwards Compatiblity
|
||||
" where group.name in (\"pype\", \"{}\")"
|
||||
).format(", ".join(query_keys), CUST_ATTR_GROUP)
|
||||
all_avalon_attr = session.query(cust_attrs_query).all()
|
||||
for cust_attr in all_avalon_attr:
|
||||
if split_hierarchical and cust_attr["is_hierarchical"]:
|
||||
hier_custom_attributes.append(cust_attr)
|
||||
continue
|
||||
|
||||
custom_attributes.append(cust_attr)
|
||||
|
||||
if split_hierarchical:
|
||||
# return tuple
|
||||
return custom_attributes, hier_custom_attributes
|
||||
|
||||
return custom_attributes
|
||||
|
|
@ -22,7 +22,6 @@ class LauncherAction(PypeModule, ITrayAction):
|
|||
# Register actions
|
||||
if self.tray_initialized:
|
||||
from openpype.tools.launcher import actions
|
||||
# actions.register_default_actions()
|
||||
actions.register_config_actions()
|
||||
actions_paths = self.manager.collect_plugin_paths()["actions"]
|
||||
actions.register_actions_from_paths(actions_paths)
|
||||
|
|
|
|||
|
|
@ -16,18 +16,20 @@ class ISettingsChangeListener:
|
|||
}
|
||||
"""
|
||||
@abstractmethod
|
||||
def on_system_settings_save(self, old_value, new_value, changes):
|
||||
def on_system_settings_save(
|
||||
self, old_value, new_value, changes, new_value_metadata
|
||||
):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def on_project_settings_save(
|
||||
self, old_value, new_value, changes, project_name
|
||||
self, old_value, new_value, changes, project_name, new_value_metadata
|
||||
):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def on_project_anatomy_save(
|
||||
self, old_value, new_value, changes, project_name
|
||||
self, old_value, new_value, changes, project_name, new_value_metadata
|
||||
):
|
||||
pass
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from .abstract_provider import AbstractProvider
|
|||
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
|
||||
from openpype.api import Logger
|
||||
from openpype.api import get_system_settings
|
||||
from ..utils import time_function
|
||||
from ..utils import time_function, ResumableError
|
||||
import time
|
||||
|
||||
|
||||
|
|
@ -63,7 +63,14 @@ class GDriveHandler(AbstractProvider):
|
|||
return
|
||||
|
||||
self.service = self._get_gd_service()
|
||||
self.root = self._prepare_root_info()
|
||||
try:
|
||||
self.root = self._prepare_root_info()
|
||||
except errors.HttpError:
|
||||
log.warning("HttpError in sync loop, "
|
||||
"trying next loop",
|
||||
exc_info=True)
|
||||
raise ResumableError
|
||||
|
||||
self._tree = tree
|
||||
self.active = True
|
||||
|
||||
|
|
|
|||
|
|
@ -92,4 +92,4 @@ factory = ProviderFactory()
|
|||
# 7 denotes number of files that could be synced in single loop - learned by
|
||||
# trial and error
|
||||
factory.register_provider('gdrive', GDriveHandler, 7)
|
||||
factory.register_provider('local_drive', LocalDriveHandler, 10)
|
||||
factory.register_provider('local_drive', LocalDriveHandler, 50)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from concurrent.futures._base import CancelledError
|
|||
from .providers import lib
|
||||
from openpype.lib import PypeLogger
|
||||
|
||||
from .utils import SyncStatus
|
||||
from .utils import SyncStatus, ResumableError
|
||||
|
||||
|
||||
log = PypeLogger().get_logger("SyncServer")
|
||||
|
|
@ -232,6 +232,7 @@ class SyncServerThread(threading.Thread):
|
|||
self.loop = None
|
||||
self.is_running = False
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
|
||||
self.timer = None
|
||||
|
||||
def run(self):
|
||||
self.is_running = True
|
||||
|
|
@ -266,8 +267,8 @@ class SyncServerThread(threading.Thread):
|
|||
Returns:
|
||||
|
||||
"""
|
||||
try:
|
||||
while self.is_running and not self.module.is_paused():
|
||||
while self.is_running and not self.module.is_paused():
|
||||
try:
|
||||
import time
|
||||
start_time = None
|
||||
self.module.set_sync_project_settings() # clean cache
|
||||
|
|
@ -384,17 +385,27 @@ class SyncServerThread(threading.Thread):
|
|||
|
||||
duration = time.time() - start_time
|
||||
log.debug("One loop took {:.2f}s".format(duration))
|
||||
await asyncio.sleep(self.module.get_loop_delay(collection))
|
||||
except ConnectionResetError:
|
||||
log.warning("ConnectionResetError in sync loop, trying next loop",
|
||||
exc_info=True)
|
||||
except CancelledError:
|
||||
# just stopping server
|
||||
pass
|
||||
except Exception:
|
||||
self.stop()
|
||||
log.warning("Unhandled exception in sync loop, stopping server",
|
||||
exc_info=True)
|
||||
|
||||
delay = self.module.get_loop_delay(collection)
|
||||
log.debug("Waiting for {} seconds to new loop".format(delay))
|
||||
self.timer = asyncio.create_task(self.run_timer(delay))
|
||||
await asyncio.gather(self.timer)
|
||||
|
||||
except ConnectionResetError:
|
||||
log.warning("ConnectionResetError in sync loop, "
|
||||
"trying next loop",
|
||||
exc_info=True)
|
||||
except CancelledError:
|
||||
# just stopping server
|
||||
pass
|
||||
except ResumableError:
|
||||
log.warning("ResumableError in sync loop, "
|
||||
"trying next loop",
|
||||
exc_info=True)
|
||||
except Exception:
|
||||
self.stop()
|
||||
log.warning("Unhandled except. in sync loop, stopping server",
|
||||
exc_info=True)
|
||||
|
||||
def stop(self):
|
||||
"""Sets is_running flag to false, 'check_shutdown' shuts server down"""
|
||||
|
|
@ -417,6 +428,17 @@ class SyncServerThread(threading.Thread):
|
|||
await asyncio.sleep(0.07)
|
||||
self.loop.stop()
|
||||
|
||||
async def run_timer(self, delay):
|
||||
"""Wait for 'delay' seconds to start next loop"""
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
def reset_timer(self):
|
||||
"""Called when waiting for next loop should be skipped"""
|
||||
log.debug("Resetting timer")
|
||||
if self.timer:
|
||||
self.timer.cancel()
|
||||
self.timer = None
|
||||
|
||||
def _working_sites(self, collection):
|
||||
if self.module.is_project_paused(collection):
|
||||
log.debug("Both sites same, skipping")
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue