mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'chore/AY-4908_move-maya-code' of github.com:ynput/ayon-core into chore/AY-4908_move-maya-code
This commit is contained in:
commit
a1cd314bc6
364 changed files with 2094 additions and 569 deletions
|
|
@ -212,7 +212,13 @@ class ApplicationsAddonSettings(BaseSettingsModel):
|
|||
scope=["studio"]
|
||||
)
|
||||
only_available: bool = SettingsField(
|
||||
True, title="Show only available applications")
|
||||
True,
|
||||
title="Show only available applications",
|
||||
description="Enable to show only applications in AYON Launcher"
|
||||
" for which the executable paths are found on the running machine."
|
||||
" This applies as an additional filter to the applications defined in a "
|
||||
" project's anatomy settings to ignore unavailable applications."
|
||||
)
|
||||
|
||||
@validator("tool_groups")
|
||||
def validate_unique_name(cls, value):
|
||||
|
|
|
|||
13
server_addon/celaction/client/ayon_celaction/__init__.py
Normal file
13
server_addon/celaction/client/ayon_celaction/__init__.py
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
from .version import __version__
|
||||
from .addon import (
|
||||
CELACTION_ROOT_DIR,
|
||||
CelactionAddon,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"__version__",
|
||||
|
||||
"CELACTION_ROOT_DIR",
|
||||
"CelactionAddon",
|
||||
)
|
||||
31
server_addon/celaction/client/ayon_celaction/addon.py
Normal file
31
server_addon/celaction/client/ayon_celaction/addon.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
import os
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
from .version import __version__
|
||||
|
||||
CELACTION_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class CelactionAddon(AYONAddon, IHostAddon):
|
||||
name = "celaction"
|
||||
version = __version__
|
||||
host_name = "celaction"
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(CELACTION_ROOT_DIR, "hooks")
|
||||
]
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
# Set default values if are not already set via settings
|
||||
defaults = {
|
||||
"LOGLEVEL": "DEBUG"
|
||||
}
|
||||
for key, value in defaults.items():
|
||||
if not env.get(key):
|
||||
env[key] = value
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".scn"]
|
||||
|
|
@ -0,0 +1,152 @@
|
|||
import os
|
||||
import shutil
|
||||
import winreg
|
||||
import subprocess
|
||||
from ayon_core.lib import get_ayon_launcher_args
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_celaction import CELACTION_ROOT_DIR
|
||||
|
||||
|
||||
class CelactionPrelaunchHook(PreLaunchHook):
|
||||
"""Bootstrap celacion with AYON"""
|
||||
app_groups = {"celaction"}
|
||||
platforms = {"windows"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
folder_attributes = self.data["folder_entity"]["attrib"]
|
||||
width = folder_attributes["resolutionWidth"]
|
||||
height = folder_attributes["resolutionHeight"]
|
||||
|
||||
# Add workfile path to launch arguments
|
||||
workfile_path = self.workfile_path()
|
||||
if workfile_path:
|
||||
self.launch_context.launch_args.append(workfile_path)
|
||||
|
||||
# setting output parameters
|
||||
path_user_settings = "\\".join([
|
||||
"Software", "CelAction", "CelAction2D", "User Settings"
|
||||
])
|
||||
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_user_settings)
|
||||
hKey = winreg.OpenKey(
|
||||
winreg.HKEY_CURRENT_USER, path_user_settings, 0,
|
||||
winreg.KEY_ALL_ACCESS
|
||||
)
|
||||
|
||||
path_to_cli = os.path.join(
|
||||
CELACTION_ROOT_DIR, "scripts", "publish_cli.py"
|
||||
)
|
||||
subprocess_args = get_ayon_launcher_args("run", path_to_cli)
|
||||
executable = subprocess_args.pop(0)
|
||||
workfile_settings = self.get_workfile_settings()
|
||||
|
||||
winreg.SetValueEx(
|
||||
hKey,
|
||||
"SubmitAppTitle",
|
||||
0,
|
||||
winreg.REG_SZ,
|
||||
executable
|
||||
)
|
||||
|
||||
# add required arguments for workfile path
|
||||
parameters = subprocess_args + [
|
||||
"--currentFile", "*SCENE*"
|
||||
]
|
||||
|
||||
# Add custom parameters from workfile settings
|
||||
if "render_chunk" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--chunk", "*CHUNK*"
|
||||
]
|
||||
if "resolution" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--resolutionWidth", "*X*",
|
||||
"--resolutionHeight", "*Y*"
|
||||
]
|
||||
if "frame_range" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--frameStart", "*START*",
|
||||
"--frameEnd", "*END*"
|
||||
]
|
||||
|
||||
winreg.SetValueEx(
|
||||
hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
|
||||
subprocess.list2cmdline(parameters)
|
||||
)
|
||||
|
||||
self.log.debug(f"__ parameters: \"{parameters}\"")
|
||||
|
||||
# setting resolution parameters
|
||||
path_submit = "\\".join([
|
||||
path_user_settings, "Dialogs", "SubmitOutput"
|
||||
])
|
||||
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_submit)
|
||||
hKey = winreg.OpenKey(
|
||||
winreg.HKEY_CURRENT_USER, path_submit, 0,
|
||||
winreg.KEY_ALL_ACCESS
|
||||
)
|
||||
winreg.SetValueEx(hKey, "SaveScene", 0, winreg.REG_DWORD, 1)
|
||||
winreg.SetValueEx(hKey, "CustomX", 0, winreg.REG_DWORD, width)
|
||||
winreg.SetValueEx(hKey, "CustomY", 0, winreg.REG_DWORD, height)
|
||||
|
||||
# making sure message dialogs don't appear when overwriting
|
||||
path_overwrite_scene = "\\".join([
|
||||
path_user_settings, "Messages", "OverwriteScene"
|
||||
])
|
||||
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_overwrite_scene)
|
||||
hKey = winreg.OpenKey(
|
||||
winreg.HKEY_CURRENT_USER, path_overwrite_scene, 0,
|
||||
winreg.KEY_ALL_ACCESS
|
||||
)
|
||||
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 6)
|
||||
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
|
||||
|
||||
# set scane as not saved
|
||||
path_scene_saved = "\\".join([
|
||||
path_user_settings, "Messages", "SceneSaved"
|
||||
])
|
||||
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_scene_saved)
|
||||
hKey = winreg.OpenKey(
|
||||
winreg.HKEY_CURRENT_USER, path_scene_saved, 0,
|
||||
winreg.KEY_ALL_ACCESS
|
||||
)
|
||||
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 1)
|
||||
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
|
||||
|
||||
def workfile_path(self):
|
||||
workfile_path = self.data["last_workfile_path"]
|
||||
|
||||
# copy workfile from template if doesn't exist any on path
|
||||
if not os.path.exists(workfile_path):
|
||||
# TODO add ability to set different template workfile path via
|
||||
# settings
|
||||
template_path = os.path.join(
|
||||
CELACTION_ROOT_DIR,
|
||||
"resources",
|
||||
"celaction_template_scene.scn"
|
||||
)
|
||||
|
||||
if not os.path.exists(template_path):
|
||||
self.log.warning(
|
||||
"Couldn't find workfile template file in {}".format(
|
||||
template_path
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
self.log.info(
|
||||
f"Creating workfile from template: \"{template_path}\""
|
||||
)
|
||||
|
||||
# Copy template workfile to new destinantion
|
||||
shutil.copy2(
|
||||
os.path.normpath(template_path),
|
||||
os.path.normpath(workfile_path)
|
||||
)
|
||||
|
||||
self.log.info(f"Workfile to open: \"{workfile_path}\"")
|
||||
|
||||
return workfile_path
|
||||
|
||||
def get_workfile_settings(self):
|
||||
return self.data["project_settings"]["celaction"]["workfile"]
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
import pyblish.api
|
||||
import sys
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class CollectCelactionCliKwargs(pyblish.api.ContextPlugin):
|
||||
""" Collects all keyword arguments passed from the terminal """
|
||||
|
||||
label = "Collect Celaction Cli Kwargs"
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
|
||||
def process(self, context):
|
||||
args = list(sys.argv[1:])
|
||||
self.log.info(str(args))
|
||||
missing_kwargs = []
|
||||
passing_kwargs = {}
|
||||
for key in (
|
||||
"chunk",
|
||||
"frameStart",
|
||||
"frameEnd",
|
||||
"resolutionWidth",
|
||||
"resolutionHeight",
|
||||
"currentFile",
|
||||
):
|
||||
arg_key = f"--{key}"
|
||||
if arg_key not in args:
|
||||
missing_kwargs.append(key)
|
||||
continue
|
||||
arg_idx = args.index(arg_key)
|
||||
args.pop(arg_idx)
|
||||
if key != "currentFile":
|
||||
value = args.pop(arg_idx)
|
||||
else:
|
||||
path_parts = []
|
||||
while arg_idx < len(args):
|
||||
path_parts.append(args.pop(arg_idx))
|
||||
value = " ".join(path_parts).strip('"')
|
||||
|
||||
passing_kwargs[key] = value
|
||||
|
||||
if missing_kwargs:
|
||||
self.log.debug("Missing arguments {}".format(
|
||||
", ".join(
|
||||
[f'"{key}"' for key in missing_kwargs]
|
||||
)
|
||||
))
|
||||
|
||||
self.log.info("Storing kwargs ...")
|
||||
self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs)))
|
||||
|
||||
# set kwargs to context data
|
||||
context.set_data("passingKwargs", passing_kwargs)
|
||||
|
||||
# get kwargs onto context data as keys with values
|
||||
for k, v in passing_kwargs.items():
|
||||
self.log.info(f"Setting `{k}` to instance.data with value: `{v}`")
|
||||
if k in ["frameStart", "frameEnd"]:
|
||||
context.data[k] = passing_kwargs[k] = int(v)
|
||||
else:
|
||||
context.data[k] = v
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectCelactionInstances(pyblish.api.ContextPlugin):
|
||||
""" Adds the celaction render instances """
|
||||
|
||||
label = "Collect Celaction Instances"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
task = context.data["task"]
|
||||
current_file = context.data["currentFile"]
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
scene_file = os.path.basename(current_file)
|
||||
version = context.data["version"]
|
||||
|
||||
folder_entity = context.data["folderEntity"]
|
||||
|
||||
folder_attributes = folder_entity["attrib"]
|
||||
|
||||
shared_instance_data = {
|
||||
"folderPath": folder_entity["path"],
|
||||
"frameStart": folder_attributes["frameStart"],
|
||||
"frameEnd": folder_attributes["frameEnd"],
|
||||
"handleStart": folder_attributes["handleStart"],
|
||||
"handleEnd": folder_attributes["handleEnd"],
|
||||
"fps": folder_attributes["fps"],
|
||||
"resolutionWidth": folder_attributes["resolutionWidth"],
|
||||
"resolutionHeight": folder_attributes["resolutionHeight"],
|
||||
"pixelAspect": 1,
|
||||
"step": 1,
|
||||
"version": version
|
||||
}
|
||||
|
||||
celaction_kwargs = context.data.get(
|
||||
"passingKwargs", {})
|
||||
|
||||
if celaction_kwargs:
|
||||
shared_instance_data.update(celaction_kwargs)
|
||||
|
||||
# workfile instance
|
||||
product_type = "workfile"
|
||||
product_name = product_type + task.capitalize()
|
||||
# Create instance
|
||||
instance = context.create_instance(product_name)
|
||||
|
||||
# creating instance data
|
||||
instance.data.update({
|
||||
"label": scene_file,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"representations": []
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
# creating representation
|
||||
representation = {
|
||||
'name': 'scn',
|
||||
'ext': 'scn',
|
||||
'files': scene_file,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info('Publishing Celaction workfile')
|
||||
|
||||
# render instance
|
||||
product_name = f"render{task}Main"
|
||||
product_type = "render.farm"
|
||||
instance = context.create_instance(name=product_name)
|
||||
# getting instance state
|
||||
instance.data["publish"] = True
|
||||
|
||||
# add folderEntity data into instance
|
||||
instance.data.update({
|
||||
"label": "{} - farm".format(product_name),
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"productName": product_name
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
self.log.info('Publishing Celaction render instance')
|
||||
self.log.debug(f"Instance data: `{instance.data}`")
|
||||
|
||||
for i in context:
|
||||
self.log.debug(f"{i.data['families']}")
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
import os
|
||||
import copy
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectRenderPath(pyblish.api.InstancePlugin):
|
||||
"""Generate file and directory path where rendered images will be"""
|
||||
|
||||
label = "Collect Render Path"
|
||||
order = pyblish.api.CollectorOrder + 0.495
|
||||
families = ["render.farm"]
|
||||
|
||||
settings_category = "celaction"
|
||||
|
||||
# Presets
|
||||
output_extension = "png"
|
||||
anatomy_template_key_render_files = None
|
||||
anatomy_template_key_metadata = None
|
||||
|
||||
def process(self, instance):
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
padding = anatomy.templates_obj.frame_padding
|
||||
product_type = "render"
|
||||
anatomy_data.update({
|
||||
"frame": f"%0{padding}d",
|
||||
"family": product_type,
|
||||
"representation": self.output_extension,
|
||||
"ext": self.output_extension
|
||||
})
|
||||
anatomy_data["product"]["type"] = product_type
|
||||
|
||||
# get anatomy rendering keys
|
||||
r_anatomy_key = self.anatomy_template_key_render_files
|
||||
m_anatomy_key = self.anatomy_template_key_metadata
|
||||
|
||||
# get folder and path for rendering images from celaction
|
||||
r_template_item = anatomy.get_template_item("publish", r_anatomy_key)
|
||||
render_dir = r_template_item["directory"].format_strict(anatomy_data)
|
||||
render_path = r_template_item["path"].format_strict(anatomy_data)
|
||||
self.log.debug("__ render_path: `{}`".format(render_path))
|
||||
|
||||
# create dir if it doesn't exists
|
||||
try:
|
||||
if not os.path.isdir(render_dir):
|
||||
os.makedirs(render_dir, exist_ok=True)
|
||||
except OSError:
|
||||
# directory is not available
|
||||
self.log.warning("Path is unreachable: `{}`".format(render_dir))
|
||||
|
||||
# add rendering path to instance data
|
||||
instance.data["path"] = render_path
|
||||
|
||||
# get anatomy for published renders folder path
|
||||
m_template_item = anatomy.get_template_item(
|
||||
"publish", m_anatomy_key, default=None
|
||||
)
|
||||
if m_template_item is not None:
|
||||
metadata_path = m_template_item["directory"].format_strict(
|
||||
anatomy_data
|
||||
)
|
||||
instance.data["publishRenderMetadataFolder"] = metadata_path
|
||||
self.log.info("Metadata render path: `{}`".format(metadata_path))
|
||||
|
||||
self.log.info(f"Render output path set to: `{render_path}`")
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import shutil
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import version_up
|
||||
|
||||
|
||||
class VersionUpScene(pyblish.api.ContextPlugin):
|
||||
order = pyblish.api.IntegratorOrder + 0.5
|
||||
label = 'Version Up Scene'
|
||||
families = ['workfile']
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, context):
|
||||
current_file = context.data.get('currentFile')
|
||||
v_up = version_up(current_file)
|
||||
self.log.debug('Current file is: {}'.format(current_file))
|
||||
self.log.debug('Version up: {}'.format(v_up))
|
||||
|
||||
shutil.copy2(current_file, v_up)
|
||||
self.log.info('Scene saved into new version: {}'.format(v_up))
|
||||
Binary file not shown.
|
|
@ -0,0 +1,36 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
import pyblish.api
|
||||
import pyblish.util
|
||||
|
||||
from ayon_celaction import CELACTION_ROOT_DIR
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.pipeline import install_ayon_plugins
|
||||
|
||||
|
||||
log = Logger.get_logger("celaction")
|
||||
|
||||
PUBLISH_HOST = "celaction"
|
||||
PLUGINS_DIR = os.path.join(CELACTION_ROOT_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
|
||||
|
||||
def main():
|
||||
# Registers global pyblish plugins
|
||||
install_ayon_plugins()
|
||||
|
||||
if os.path.exists(PUBLISH_PATH):
|
||||
log.info(f"Registering path: {PUBLISH_PATH}")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
|
||||
pyblish.api.register_host(PUBLISH_HOST)
|
||||
pyblish.api.register_target("local")
|
||||
|
||||
return host_tools.show_publish()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main()
|
||||
sys.exit(not bool(result))
|
||||
3
server_addon/celaction/client/ayon_celaction/version.py
Normal file
3
server_addon/celaction/client/ayon_celaction/version.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'celaction' version."""
|
||||
__version__ = "0.2.0"
|
||||
|
|
@ -1,3 +1,12 @@
|
|||
name = "celaction"
|
||||
title = "CelAction"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
|
||||
client_dir = "ayon_celaction"
|
||||
|
||||
ayon_required_addons = {
|
||||
"core": ">0.3.2",
|
||||
}
|
||||
ayon_compatible_addons = {
|
||||
"applications": ">=0.2.0",
|
||||
}
|
||||
|
|
|
|||
13
server_addon/flame/client/ayon_flame/__init__.py
Normal file
13
server_addon/flame/client/ayon_flame/__init__.py
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
from .version import __version__
|
||||
from .addon import (
|
||||
FLAME_ADDON_ROOT,
|
||||
FlameAddon,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"__version__",
|
||||
|
||||
"FLAME_ADDON_ROOT",
|
||||
"FlameAddon",
|
||||
)
|
||||
35
server_addon/flame/client/ayon_flame/addon.py
Normal file
35
server_addon/flame/client/ayon_flame/addon.py
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
import os
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
from .version import __version__
|
||||
|
||||
FLAME_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class FlameAddon(AYONAddon, IHostAddon):
|
||||
name = "flame"
|
||||
version = __version__
|
||||
host_name = "flame"
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
# Add requirements to DL_PYTHON_HOOK_PATH
|
||||
env["DL_PYTHON_HOOK_PATH"] = os.path.join(FLAME_ADDON_ROOT, "startup")
|
||||
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
|
||||
|
||||
# Set default values if are not already set via settings
|
||||
defaults = {
|
||||
"LOGLEVEL": "DEBUG"
|
||||
}
|
||||
for key, value in defaults.items():
|
||||
if not env.get(key):
|
||||
env[key] = value
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(FLAME_ADDON_ROOT, "hooks")
|
||||
]
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".otoc"]
|
||||
159
server_addon/flame/client/ayon_flame/api/__init__.py
Normal file
159
server_addon/flame/client/ayon_flame/api/__init__.py
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
"""
|
||||
AYON Autodesk Flame api
|
||||
"""
|
||||
from .constants import (
|
||||
COLOR_MAP,
|
||||
MARKER_NAME,
|
||||
MARKER_COLOR,
|
||||
MARKER_DURATION,
|
||||
MARKER_PUBLISH_DEFAULT
|
||||
)
|
||||
from .lib import (
|
||||
CTX,
|
||||
FlameAppFramework,
|
||||
get_current_project,
|
||||
get_current_sequence,
|
||||
create_segment_data_marker,
|
||||
get_segment_data_marker,
|
||||
set_segment_data_marker,
|
||||
set_publish_attribute,
|
||||
get_publish_attribute,
|
||||
get_sequence_segments,
|
||||
maintained_segment_selection,
|
||||
reset_segment_selection,
|
||||
get_segment_attributes,
|
||||
get_clips_in_reels,
|
||||
get_reformatted_filename,
|
||||
get_frame_from_filename,
|
||||
get_padding_from_filename,
|
||||
maintained_object_duplication,
|
||||
maintained_temp_file_path,
|
||||
get_clip_segment,
|
||||
get_batch_group_from_desktop,
|
||||
MediaInfoFile,
|
||||
TimeEffectMetadata
|
||||
)
|
||||
from .utils import (
|
||||
setup,
|
||||
get_flame_version,
|
||||
get_flame_install_root
|
||||
)
|
||||
from .pipeline import (
|
||||
install,
|
||||
uninstall,
|
||||
ls,
|
||||
containerise,
|
||||
update_container,
|
||||
remove_instance,
|
||||
list_instances,
|
||||
imprint,
|
||||
maintained_selection
|
||||
)
|
||||
from .menu import (
|
||||
FlameMenuProjectConnect,
|
||||
FlameMenuTimeline,
|
||||
FlameMenuUniversal
|
||||
)
|
||||
from .plugin import (
|
||||
Creator,
|
||||
PublishableClip,
|
||||
ClipLoader,
|
||||
OpenClipSolver
|
||||
)
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
current_file,
|
||||
has_unsaved_changes,
|
||||
file_extensions,
|
||||
work_root
|
||||
)
|
||||
from .render_utils import (
|
||||
export_clip,
|
||||
get_preset_path_by_xml_name,
|
||||
modify_preset_file
|
||||
)
|
||||
from .batch_utils import (
|
||||
create_batch_group,
|
||||
create_batch_group_conent
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# constants
|
||||
"COLOR_MAP",
|
||||
"MARKER_NAME",
|
||||
"MARKER_COLOR",
|
||||
"MARKER_DURATION",
|
||||
"MARKER_PUBLISH_DEFAULT",
|
||||
|
||||
# lib
|
||||
"CTX",
|
||||
"FlameAppFramework",
|
||||
"get_current_project",
|
||||
"get_current_sequence",
|
||||
"create_segment_data_marker",
|
||||
"get_segment_data_marker",
|
||||
"set_segment_data_marker",
|
||||
"set_publish_attribute",
|
||||
"get_publish_attribute",
|
||||
"get_sequence_segments",
|
||||
"maintained_segment_selection",
|
||||
"reset_segment_selection",
|
||||
"get_segment_attributes",
|
||||
"get_clips_in_reels",
|
||||
"get_reformatted_filename",
|
||||
"get_frame_from_filename",
|
||||
"get_padding_from_filename",
|
||||
"maintained_object_duplication",
|
||||
"maintained_temp_file_path",
|
||||
"get_clip_segment",
|
||||
"get_batch_group_from_desktop",
|
||||
"MediaInfoFile",
|
||||
"TimeEffectMetadata",
|
||||
|
||||
# pipeline
|
||||
"install",
|
||||
"uninstall",
|
||||
"ls",
|
||||
"containerise",
|
||||
"update_container",
|
||||
"reload_pipeline",
|
||||
"maintained_selection",
|
||||
"remove_instance",
|
||||
"list_instances",
|
||||
"imprint",
|
||||
"maintained_selection",
|
||||
|
||||
# utils
|
||||
"setup",
|
||||
"get_flame_version",
|
||||
"get_flame_install_root",
|
||||
|
||||
# menu
|
||||
"FlameMenuProjectConnect",
|
||||
"FlameMenuTimeline",
|
||||
"FlameMenuUniversal",
|
||||
|
||||
# plugin
|
||||
"Creator",
|
||||
"PublishableClip",
|
||||
"ClipLoader",
|
||||
"OpenClipSolver",
|
||||
|
||||
# workio
|
||||
"open_file",
|
||||
"save_file",
|
||||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
# render utils
|
||||
"export_clip",
|
||||
"get_preset_path_by_xml_name",
|
||||
"modify_preset_file",
|
||||
|
||||
# batch utils
|
||||
"create_batch_group",
|
||||
"create_batch_group_conent"
|
||||
]
|
||||
151
server_addon/flame/client/ayon_flame/api/batch_utils.py
Normal file
151
server_addon/flame/client/ayon_flame/api/batch_utils.py
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
import flame
|
||||
|
||||
|
||||
def create_batch_group(
|
||||
name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
update_batch_group=None,
|
||||
**kwargs
|
||||
):
|
||||
"""Create Batch Group in active project's Desktop
|
||||
|
||||
Args:
|
||||
name (str): name of batch group to be created
|
||||
frame_start (int): start frame of batch
|
||||
frame_end (int): end frame of batch
|
||||
update_batch_group (PyBatch)[optional]: batch group to update
|
||||
|
||||
Return:
|
||||
PyBatch: active flame batch group
|
||||
"""
|
||||
# make sure some batch obj is present
|
||||
batch_group = update_batch_group or flame.batch
|
||||
|
||||
schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1']
|
||||
shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1']
|
||||
|
||||
handle_start = kwargs.get("handleStart") or 0
|
||||
handle_end = kwargs.get("handleEnd") or 0
|
||||
|
||||
frame_start -= handle_start
|
||||
frame_duration += handle_start + handle_end
|
||||
|
||||
if not update_batch_group:
|
||||
# Create batch group with name, start_frame value, duration value,
|
||||
# set of schematic reel names, set of shelf reel names
|
||||
batch_group = batch_group.create_batch_group(
|
||||
name,
|
||||
start_frame=frame_start,
|
||||
duration=frame_duration,
|
||||
reels=schematic_reels,
|
||||
shelf_reels=shelf_reels
|
||||
)
|
||||
else:
|
||||
batch_group.name = name
|
||||
batch_group.start_frame = frame_start
|
||||
batch_group.duration = frame_duration
|
||||
|
||||
# add reels to batch group
|
||||
_add_reels_to_batch_group(
|
||||
batch_group, schematic_reels, shelf_reels)
|
||||
|
||||
# TODO: also update write node if there is any
|
||||
# TODO: also update loaders to start from correct frameStart
|
||||
|
||||
if kwargs.get("switch_batch_tab"):
|
||||
# use this command to switch to the batch tab
|
||||
batch_group.go_to()
|
||||
|
||||
return batch_group
|
||||
|
||||
|
||||
def _add_reels_to_batch_group(batch_group, reels, shelf_reels):
|
||||
# update or create defined reels
|
||||
# helper variables
|
||||
reel_names = [
|
||||
r.name.get_value()
|
||||
for r in batch_group.reels
|
||||
]
|
||||
shelf_reel_names = [
|
||||
r.name.get_value()
|
||||
for r in batch_group.shelf_reels
|
||||
]
|
||||
# add schematic reels
|
||||
for _r in reels:
|
||||
if _r in reel_names:
|
||||
continue
|
||||
batch_group.create_reel(_r)
|
||||
|
||||
# add shelf reels
|
||||
for _sr in shelf_reels:
|
||||
if _sr in shelf_reel_names:
|
||||
continue
|
||||
batch_group.create_shelf_reel(_sr)
|
||||
|
||||
|
||||
def create_batch_group_conent(batch_nodes, batch_links, batch_group=None):
|
||||
"""Creating batch group with links
|
||||
|
||||
Args:
|
||||
batch_nodes (list of dict): each dict is node definition
|
||||
batch_links (list of dict): each dict is link definition
|
||||
batch_group (PyBatch, optional): batch group. Defaults to None.
|
||||
|
||||
Return:
|
||||
dict: all batch nodes {name or id: PyNode}
|
||||
"""
|
||||
# make sure some batch obj is present
|
||||
batch_group = batch_group or flame.batch
|
||||
all_batch_nodes = {
|
||||
b.name.get_value(): b
|
||||
for b in batch_group.nodes
|
||||
}
|
||||
for node in batch_nodes:
|
||||
# NOTE: node_props needs to be ideally OrederDict type
|
||||
node_id, node_type, node_props = (
|
||||
node["id"], node["type"], node["properties"])
|
||||
|
||||
# get node name for checking if exists
|
||||
node_name = node_props.pop("name", None) or node_id
|
||||
|
||||
if all_batch_nodes.get(node_name):
|
||||
# update existing batch node
|
||||
batch_node = all_batch_nodes[node_name]
|
||||
else:
|
||||
# create new batch node
|
||||
batch_node = batch_group.create_node(node_type)
|
||||
|
||||
# set name
|
||||
batch_node.name.set_value(node_name)
|
||||
|
||||
# set attributes found in node props
|
||||
for key, value in node_props.items():
|
||||
if not hasattr(batch_node, key):
|
||||
continue
|
||||
setattr(batch_node, key, value)
|
||||
|
||||
# add created node for possible linking
|
||||
all_batch_nodes[node_id] = batch_node
|
||||
|
||||
# link nodes to each other
|
||||
for link in batch_links:
|
||||
_from_n, _to_n = link["from_node"], link["to_node"]
|
||||
|
||||
# check if all linking nodes are available
|
||||
if not all([
|
||||
all_batch_nodes.get(_from_n["id"]),
|
||||
all_batch_nodes.get(_to_n["id"])
|
||||
]):
|
||||
continue
|
||||
|
||||
# link nodes in defined link
|
||||
batch_group.connect_nodes(
|
||||
all_batch_nodes[_from_n["id"]], _from_n["connector"],
|
||||
all_batch_nodes[_to_n["id"]], _to_n["connector"]
|
||||
)
|
||||
|
||||
# sort batch nodes
|
||||
batch_group.organize()
|
||||
|
||||
return all_batch_nodes
|
||||
24
server_addon/flame/client/ayon_flame/api/constants.py
Normal file
24
server_addon/flame/client/ayon_flame/api/constants.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
|
||||
"""
|
||||
AYON Flame api constances
|
||||
"""
|
||||
# AYON marker workflow variables
|
||||
MARKER_NAME = "OpenPypeData"
|
||||
MARKER_DURATION = 0
|
||||
MARKER_COLOR = "cyan"
|
||||
MARKER_PUBLISH_DEFAULT = False
|
||||
|
||||
# AYON color definitions
|
||||
COLOR_MAP = {
|
||||
"red": (1.0, 0.0, 0.0),
|
||||
"orange": (1.0, 0.5, 0.0),
|
||||
"yellow": (1.0, 1.0, 0.0),
|
||||
"pink": (1.0, 0.5, 1.0),
|
||||
"white": (1.0, 1.0, 1.0),
|
||||
"green": (0.0, 1.0, 0.0),
|
||||
"cyan": (0.0, 1.0, 1.0),
|
||||
"blue": (0.0, 0.0, 1.0),
|
||||
"purple": (0.5, 0.0, 0.5),
|
||||
"magenta": (0.5, 0.0, 1.0),
|
||||
"black": (0.0, 0.0, 0.0)
|
||||
}
|
||||
1272
server_addon/flame/client/ayon_flame/api/lib.py
Normal file
1272
server_addon/flame/client/ayon_flame/api/lib.py
Normal file
File diff suppressed because it is too large
Load diff
256
server_addon/flame/client/ayon_flame/api/menu.py
Normal file
256
server_addon/flame/client/ayon_flame/api/menu.py
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
from copy import deepcopy
|
||||
from pprint import pformat
|
||||
|
||||
from qtpy import QtWidgets
|
||||
|
||||
from ayon_core.pipeline import get_current_project_name
|
||||
from ayon_core.tools.utils.host_tools import HostToolsHelper
|
||||
|
||||
menu_group_name = 'OpenPype'
|
||||
|
||||
default_flame_export_presets = {
|
||||
'Publish': {
|
||||
'PresetVisibility': 2,
|
||||
'PresetType': 0,
|
||||
'PresetFile': 'OpenEXR/OpenEXR (16-bit fp PIZ).xml'
|
||||
},
|
||||
'Preview': {
|
||||
'PresetVisibility': 3,
|
||||
'PresetType': 2,
|
||||
'PresetFile': 'Generate Preview.xml'
|
||||
},
|
||||
'Thumbnail': {
|
||||
'PresetVisibility': 3,
|
||||
'PresetType': 0,
|
||||
'PresetFile': 'Generate Thumbnail.xml'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def callback_selection(selection, function):
|
||||
import ayon_flame.api as opfapi
|
||||
opfapi.CTX.selection = selection
|
||||
print("Hook Selection: \n\t{}".format(
|
||||
pformat({
|
||||
index: (type(item), item.name)
|
||||
for index, item in enumerate(opfapi.CTX.selection)})
|
||||
))
|
||||
function()
|
||||
|
||||
|
||||
class _FlameMenuApp(object):
|
||||
def __init__(self, framework):
|
||||
self.name = self.__class__.__name__
|
||||
self.framework = framework
|
||||
self.log = framework.log
|
||||
self.menu_group_name = menu_group_name
|
||||
self.dynamic_menu_data = {}
|
||||
|
||||
# flame module is only available when a
|
||||
# flame project is loaded and initialized
|
||||
self.flame = None
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
self.flame_project_name = flame.project.current_project.name
|
||||
self.prefs = self.framework.prefs_dict(self.framework.prefs, self.name)
|
||||
self.prefs_user = self.framework.prefs_dict(
|
||||
self.framework.prefs_user, self.name)
|
||||
self.prefs_global = self.framework.prefs_dict(
|
||||
self.framework.prefs_global, self.name)
|
||||
|
||||
self.mbox = QtWidgets.QMessageBox()
|
||||
project_name = get_current_project_name()
|
||||
self.menu = {
|
||||
"actions": [{
|
||||
'name': project_name or "project",
|
||||
'isEnabled': False
|
||||
}],
|
||||
"name": self.menu_group_name
|
||||
}
|
||||
self.tools_helper = HostToolsHelper()
|
||||
|
||||
def __getattr__(self, name):
|
||||
def method(*args, **kwargs):
|
||||
print('calling %s' % name)
|
||||
return method
|
||||
|
||||
def rescan(self, *args, **kwargs):
|
||||
if not self.flame:
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
if self.flame:
|
||||
self.flame.execute_shortcut('Rescan Python Hooks')
|
||||
self.log.info('Rescan Python Hooks')
|
||||
|
||||
|
||||
class FlameMenuProjectConnect(_FlameMenuApp):
|
||||
|
||||
# flameMenuProjectconnect app takes care of the preferences dialog as well
|
||||
|
||||
def __init__(self, framework):
|
||||
_FlameMenuApp.__init__(self, framework)
|
||||
|
||||
def __getattr__(self, name):
|
||||
def method(*args, **kwargs):
|
||||
project = self.dynamic_menu_data.get(name)
|
||||
if project:
|
||||
self.link_project(project)
|
||||
return method
|
||||
|
||||
def build_menu(self):
|
||||
if not self.flame:
|
||||
return []
|
||||
|
||||
menu = deepcopy(self.menu)
|
||||
|
||||
menu['actions'].append({
|
||||
"name": "Workfiles...",
|
||||
"execute": lambda x: self.tools_helper.show_workfiles()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Load...",
|
||||
"execute": lambda x: self.tools_helper.show_loader()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Manage...",
|
||||
"execute": lambda x: self.tools_helper.show_scene_inventory()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Library...",
|
||||
"execute": lambda x: self.tools_helper.show_library_loader()
|
||||
})
|
||||
return menu
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
self.rescan()
|
||||
|
||||
def rescan(self, *args, **kwargs):
|
||||
if not self.flame:
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
if self.flame:
|
||||
self.flame.execute_shortcut('Rescan Python Hooks')
|
||||
self.log.info('Rescan Python Hooks')
|
||||
|
||||
|
||||
class FlameMenuTimeline(_FlameMenuApp):
|
||||
|
||||
# flameMenuProjectconnect app takes care of the preferences dialog as well
|
||||
|
||||
def __init__(self, framework):
|
||||
_FlameMenuApp.__init__(self, framework)
|
||||
|
||||
def __getattr__(self, name):
|
||||
def method(*args, **kwargs):
|
||||
project = self.dynamic_menu_data.get(name)
|
||||
if project:
|
||||
self.link_project(project)
|
||||
return method
|
||||
|
||||
def build_menu(self):
|
||||
if not self.flame:
|
||||
return []
|
||||
|
||||
menu = deepcopy(self.menu)
|
||||
|
||||
menu['actions'].append({
|
||||
"name": "Create...",
|
||||
"execute": lambda x: callback_selection(
|
||||
x, self.tools_helper.show_creator)
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Publish...",
|
||||
"execute": lambda x: callback_selection(
|
||||
x, self.tools_helper.show_publish)
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Load...",
|
||||
"execute": lambda x: self.tools_helper.show_loader()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Manage...",
|
||||
"execute": lambda x: self.tools_helper.show_scene_inventory()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Library...",
|
||||
"execute": lambda x: self.tools_helper.show_library_loader()
|
||||
})
|
||||
return menu
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
self.rescan()
|
||||
|
||||
def rescan(self, *args, **kwargs):
|
||||
if not self.flame:
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
if self.flame:
|
||||
self.flame.execute_shortcut('Rescan Python Hooks')
|
||||
self.log.info('Rescan Python Hooks')
|
||||
|
||||
|
||||
class FlameMenuUniversal(_FlameMenuApp):
|
||||
|
||||
# flameMenuProjectconnect app takes care of the preferences dialog as well
|
||||
|
||||
def __init__(self, framework):
|
||||
_FlameMenuApp.__init__(self, framework)
|
||||
|
||||
def __getattr__(self, name):
|
||||
def method(*args, **kwargs):
|
||||
project = self.dynamic_menu_data.get(name)
|
||||
if project:
|
||||
self.link_project(project)
|
||||
return method
|
||||
|
||||
def build_menu(self):
|
||||
if not self.flame:
|
||||
return []
|
||||
|
||||
menu = deepcopy(self.menu)
|
||||
|
||||
menu['actions'].append({
|
||||
"name": "Load...",
|
||||
"execute": lambda x: callback_selection(
|
||||
x, self.tools_helper.show_loader)
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Manage...",
|
||||
"execute": lambda x: self.tools_helper.show_scene_inventory()
|
||||
})
|
||||
menu['actions'].append({
|
||||
"name": "Library...",
|
||||
"execute": lambda x: self.tools_helper.show_library_loader()
|
||||
})
|
||||
return menu
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
self.rescan()
|
||||
|
||||
def rescan(self, *args, **kwargs):
|
||||
if not self.flame:
|
||||
try:
|
||||
import flame
|
||||
self.flame = flame
|
||||
except ImportError:
|
||||
self.flame = None
|
||||
|
||||
if self.flame:
|
||||
self.flame.execute_shortcut('Rescan Python Hooks')
|
||||
self.log.info('Rescan Python Hooks')
|
||||
174
server_addon/flame/client/ayon_flame/api/pipeline.py
Normal file
174
server_addon/flame/client/ayon_flame/api/pipeline.py
Normal file
|
|
@ -0,0 +1,174 @@
|
|||
"""
|
||||
Basic avalon integration
|
||||
"""
|
||||
import os
|
||||
import contextlib
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import (
|
||||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
deregister_loader_plugin_path,
|
||||
deregister_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from ayon_flame import FLAME_ADDON_ROOT
|
||||
from .lib import (
|
||||
set_segment_data_marker,
|
||||
set_publish_attribute,
|
||||
maintained_segment_selection,
|
||||
get_current_sequence,
|
||||
reset_segment_selection
|
||||
)
|
||||
|
||||
PLUGINS_DIR = os.path.join(FLAME_ADDON_ROOT, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
|
||||
AVALON_CONTAINERS = "AVALON_CONTAINERS"
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def install():
|
||||
pyblish.register_host("flame")
|
||||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
log.info("AYON Flame plug-ins registered ...")
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
log.info("AYON Flame host installed ...")
|
||||
|
||||
|
||||
def uninstall():
|
||||
pyblish.deregister_host("flame")
|
||||
|
||||
log.info("Deregistering Flame plug-ins..")
|
||||
pyblish.deregister_plugin_path(PUBLISH_PATH)
|
||||
deregister_loader_plugin_path(LOAD_PATH)
|
||||
deregister_creator_plugin_path(CREATE_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
log.info("AYON Flame host uninstalled ...")
|
||||
|
||||
|
||||
def containerise(flame_clip_segment,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
loader=None,
|
||||
data=None):
|
||||
|
||||
data_imprint = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": str(name),
|
||||
"namespace": str(namespace),
|
||||
"loader": str(loader),
|
||||
"representation": context["representation"]["id"],
|
||||
}
|
||||
|
||||
if data:
|
||||
for k, v in data.items():
|
||||
data_imprint[k] = v
|
||||
|
||||
log.debug("_ data_imprint: {}".format(data_imprint))
|
||||
|
||||
set_segment_data_marker(flame_clip_segment, data_imprint)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def ls():
|
||||
"""List available containers.
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
def parse_container(tl_segment, validate=True):
|
||||
"""Return container data from timeline_item's openpype tag.
|
||||
"""
|
||||
# TODO: parse_container
|
||||
pass
|
||||
|
||||
|
||||
def update_container(tl_segment, data=None):
|
||||
"""Update container data to input timeline_item's openpype tag.
|
||||
"""
|
||||
# TODO: update_container
|
||||
pass
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node passthrough states on instance toggles."""
|
||||
|
||||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
# # Whether instances should be passthrough based on new value
|
||||
# timeline_item = instance.data["item"]
|
||||
# set_publish_attribute(timeline_item, new_value)
|
||||
|
||||
|
||||
def remove_instance(instance):
|
||||
"""Remove instance marker from track item."""
|
||||
# TODO: remove_instance
|
||||
pass
|
||||
|
||||
|
||||
def list_instances():
|
||||
"""List all created instances from current workfile."""
|
||||
# TODO: list_instances
|
||||
pass
|
||||
|
||||
|
||||
def imprint(segment, data=None):
|
||||
"""
|
||||
Adding openpype data to Flame timeline segment.
|
||||
|
||||
Also including publish attribute into tag.
|
||||
|
||||
Arguments:
|
||||
segment (flame.PySegment)): flame api object
|
||||
data (dict): Any data which needst to be imprinted
|
||||
|
||||
Examples:
|
||||
data = {
|
||||
'asset': 'sq020sh0280',
|
||||
'productType': 'render',
|
||||
'productName': 'productMain'
|
||||
}
|
||||
"""
|
||||
data = data or {}
|
||||
|
||||
set_segment_data_marker(segment, data)
|
||||
|
||||
# add publish attribute
|
||||
set_publish_attribute(segment, True)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
import flame
|
||||
from .lib import CTX
|
||||
|
||||
# check if segment is selected
|
||||
if isinstance(CTX.selection[0], flame.PySegment):
|
||||
sequence = get_current_sequence(CTX.selection)
|
||||
|
||||
try:
|
||||
with maintained_segment_selection(sequence) as selected:
|
||||
yield
|
||||
finally:
|
||||
# reset all selected clips
|
||||
reset_segment_selection(sequence)
|
||||
# select only original selection of segments
|
||||
for segment in selected:
|
||||
segment.selected = True
|
||||
1089
server_addon/flame/client/ayon_flame/api/plugin.py
Normal file
1089
server_addon/flame/client/ayon_flame/api/plugin.py
Normal file
File diff suppressed because it is too large
Load diff
185
server_addon/flame/client/ayon_flame/api/render_utils.py
Normal file
185
server_addon/flame/client/ayon_flame/api/render_utils.py
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
import os
|
||||
from xml.etree import ElementTree as ET
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def export_clip(export_path, clip, preset_path, **kwargs):
|
||||
"""Flame exported wrapper
|
||||
|
||||
Args:
|
||||
export_path (str): exporting directory path
|
||||
clip (PyClip): flame api object
|
||||
preset_path (str): full export path to xml file
|
||||
|
||||
Kwargs:
|
||||
thumb_frame_number (int)[optional]: source frame number
|
||||
in_mark (int)[optional]: cut in mark
|
||||
out_mark (int)[optional]: cut out mark
|
||||
|
||||
Raises:
|
||||
KeyError: Missing input kwarg `thumb_frame_number`
|
||||
in case `thumbnail` in `export_preset`
|
||||
FileExistsError: Missing export preset in shared folder
|
||||
"""
|
||||
import flame
|
||||
|
||||
in_mark = out_mark = None
|
||||
|
||||
# Set exporter
|
||||
exporter = flame.PyExporter()
|
||||
exporter.foreground = True
|
||||
exporter.export_between_marks = True
|
||||
|
||||
if kwargs.get("thumb_frame_number"):
|
||||
thumb_frame_number = kwargs["thumb_frame_number"]
|
||||
# make sure it exists in kwargs
|
||||
if not thumb_frame_number:
|
||||
raise KeyError(
|
||||
"Missing key `thumb_frame_number` in input kwargs")
|
||||
|
||||
in_mark = int(thumb_frame_number)
|
||||
out_mark = int(thumb_frame_number) + 1
|
||||
|
||||
elif kwargs.get("in_mark") and kwargs.get("out_mark"):
|
||||
in_mark = int(kwargs["in_mark"])
|
||||
out_mark = int(kwargs["out_mark"])
|
||||
else:
|
||||
exporter.export_between_marks = False
|
||||
|
||||
try:
|
||||
# set in and out marks if they are available
|
||||
if in_mark and out_mark:
|
||||
clip.in_mark = in_mark
|
||||
clip.out_mark = out_mark
|
||||
|
||||
# export with exporter
|
||||
exporter.export(clip, preset_path, export_path)
|
||||
finally:
|
||||
print('Exported: {} at {}-{}'.format(
|
||||
clip.name.get_value(),
|
||||
clip.in_mark,
|
||||
clip.out_mark
|
||||
))
|
||||
|
||||
|
||||
def get_preset_path_by_xml_name(xml_preset_name):
|
||||
def _search_path(root):
|
||||
output = []
|
||||
for root, _dirs, files in os.walk(root):
|
||||
for f in files:
|
||||
if f != xml_preset_name:
|
||||
continue
|
||||
file_path = os.path.join(root, f)
|
||||
output.append(file_path)
|
||||
return output
|
||||
|
||||
def _validate_results(results):
|
||||
if results and len(results) == 1:
|
||||
return results.pop()
|
||||
elif results and len(results) > 1:
|
||||
print((
|
||||
"More matching presets for `{}`: /n"
|
||||
"{}").format(xml_preset_name, results))
|
||||
return results.pop()
|
||||
else:
|
||||
return None
|
||||
|
||||
from .utils import (
|
||||
get_flame_install_root,
|
||||
get_flame_version
|
||||
)
|
||||
|
||||
# get actual flame version and install path
|
||||
_version = get_flame_version()["full"]
|
||||
_install_root = get_flame_install_root()
|
||||
|
||||
# search path templates
|
||||
shared_search_root = "{install_root}/shared/export/presets"
|
||||
install_search_root = (
|
||||
"{install_root}/presets/{version}/export/presets/flame")
|
||||
|
||||
# fill templates
|
||||
shared_search_root = shared_search_root.format(
|
||||
install_root=_install_root
|
||||
)
|
||||
install_search_root = install_search_root.format(
|
||||
install_root=_install_root,
|
||||
version=_version
|
||||
)
|
||||
|
||||
# get search results
|
||||
shared_results = _search_path(shared_search_root)
|
||||
installed_results = _search_path(install_search_root)
|
||||
|
||||
# first try to return shared results
|
||||
shared_preset_path = _validate_results(shared_results)
|
||||
|
||||
if shared_preset_path:
|
||||
return os.path.dirname(shared_preset_path)
|
||||
|
||||
# then try installed results
|
||||
installed_preset_path = _validate_results(installed_results)
|
||||
|
||||
if installed_preset_path:
|
||||
return os.path.dirname(installed_preset_path)
|
||||
|
||||
# if nothing found then return False
|
||||
return False
|
||||
|
||||
|
||||
def modify_preset_file(xml_path, staging_dir, data):
|
||||
"""Modify xml preset with input data
|
||||
|
||||
Args:
|
||||
xml_path (str ): path for input xml preset
|
||||
staging_dir (str): staging dir path
|
||||
data (dict): data where key is xmlTag and value as string
|
||||
|
||||
Returns:
|
||||
str: _description_
|
||||
"""
|
||||
# create temp path
|
||||
dirname, basename = os.path.split(xml_path)
|
||||
temp_path = os.path.join(staging_dir, basename)
|
||||
|
||||
# change xml following data keys
|
||||
with open(xml_path, "r") as datafile:
|
||||
_root = ET.parse(datafile)
|
||||
|
||||
for key, value in data.items():
|
||||
try:
|
||||
if "/" in key:
|
||||
if not key.startswith("./"):
|
||||
key = ".//" + key
|
||||
|
||||
split_key_path = key.split("/")
|
||||
element_key = split_key_path[-1]
|
||||
parent_obj_path = "/".join(split_key_path[:-1])
|
||||
|
||||
parent_obj = _root.find(parent_obj_path)
|
||||
element_obj = parent_obj.find(element_key)
|
||||
if not element_obj:
|
||||
append_element(parent_obj, element_key, value)
|
||||
else:
|
||||
finds = _root.findall(".//{}".format(key))
|
||||
if not finds:
|
||||
raise AttributeError
|
||||
for element in finds:
|
||||
element.text = str(value)
|
||||
except AttributeError:
|
||||
log.warning(
|
||||
"Cannot create attribute: {}: {}. Skipping".format(
|
||||
key, value
|
||||
))
|
||||
_root.write(temp_path)
|
||||
|
||||
return temp_path
|
||||
|
||||
|
||||
def append_element(root_element_obj, key, value):
|
||||
new_element_obj = ET.Element(key)
|
||||
log.debug("__ new_element_obj: {}".format(new_element_obj))
|
||||
new_element_obj.text = str(value)
|
||||
root_element_obj.insert(0, new_element_obj)
|
||||
504
server_addon/flame/client/ayon_flame/api/scripts/wiretap_com.py
Normal file
504
server_addon/flame/client/ayon_flame/api/scripts/wiretap_com.py
Normal file
|
|
@ -0,0 +1,504 @@
|
|||
#!/usr/bin/env python2.7
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import json
|
||||
import xml.dom.minidom as minidom
|
||||
from copy import deepcopy
|
||||
import datetime
|
||||
from libwiretapPythonClientAPI import ( # noqa
|
||||
WireTapClientInit,
|
||||
WireTapClientUninit,
|
||||
WireTapNodeHandle,
|
||||
WireTapServerHandle,
|
||||
WireTapInt,
|
||||
WireTapStr
|
||||
)
|
||||
|
||||
|
||||
class WireTapCom(object):
|
||||
"""
|
||||
Comunicator class wrapper for talking to WireTap db.
|
||||
|
||||
This way we are able to set new project with settings and
|
||||
correct colorspace policy. Also we are able to create new user
|
||||
or get actual user with similar name (users are usually cloning
|
||||
their profiles and adding date stamp into suffix).
|
||||
"""
|
||||
|
||||
def __init__(self, host_name=None, volume_name=None, group_name=None):
|
||||
"""Initialisation of WireTap communication class
|
||||
|
||||
Args:
|
||||
host_name (str, optional): Name of host server. Defaults to None.
|
||||
volume_name (str, optional): Name of volume. Defaults to None.
|
||||
group_name (str, optional): Name of user group. Defaults to None.
|
||||
"""
|
||||
# set main attributes of server
|
||||
# if there are none set the default installation
|
||||
self.host_name = host_name or "localhost"
|
||||
self.volume_name = volume_name or "stonefs"
|
||||
self.group_name = group_name or "staff"
|
||||
|
||||
# wiretap tools dir path
|
||||
self.wiretap_tools_dir = os.getenv("AYON_WIRETAP_TOOLS")
|
||||
|
||||
# initialize WireTap client
|
||||
WireTapClientInit()
|
||||
|
||||
# add the server to shared variable
|
||||
self._server = WireTapServerHandle("{}:IFFFS".format(self.host_name))
|
||||
print("WireTap connected at '{}'...".format(
|
||||
self.host_name))
|
||||
|
||||
def close(self):
|
||||
self._server = None
|
||||
WireTapClientUninit()
|
||||
print("WireTap closed...")
|
||||
|
||||
def get_launch_args(
|
||||
self, project_name, project_data, user_name, *args, **kwargs):
|
||||
"""Forming launch arguments for AYON launcher.
|
||||
|
||||
Args:
|
||||
project_name (str): name of project
|
||||
project_data (dict): Flame compatible project data
|
||||
user_name (str): name of user
|
||||
|
||||
Returns:
|
||||
list: arguments
|
||||
"""
|
||||
|
||||
workspace_name = kwargs.get("workspace_name")
|
||||
color_policy = kwargs.get("color_policy")
|
||||
|
||||
project_exists = self._project_prep(project_name)
|
||||
if not project_exists:
|
||||
self._set_project_settings(project_name, project_data)
|
||||
self._set_project_colorspace(project_name, color_policy)
|
||||
|
||||
user_name = self._user_prep(user_name)
|
||||
|
||||
if workspace_name is None:
|
||||
# default workspace
|
||||
print("Using a default workspace")
|
||||
return [
|
||||
"--start-project={}".format(project_name),
|
||||
"--start-user={}".format(user_name),
|
||||
"--create-workspace"
|
||||
]
|
||||
|
||||
else:
|
||||
print(
|
||||
"Using a custom workspace '{}'".format(workspace_name))
|
||||
|
||||
self._workspace_prep(project_name, workspace_name)
|
||||
return [
|
||||
"--start-project={}".format(project_name),
|
||||
"--start-user={}".format(user_name),
|
||||
"--create-workspace",
|
||||
"--start-workspace={}".format(workspace_name)
|
||||
]
|
||||
|
||||
def _workspace_prep(self, project_name, workspace_name):
|
||||
"""Preparing a workspace
|
||||
|
||||
In case it doesn not exists it will create one
|
||||
|
||||
Args:
|
||||
project_name (str): project name
|
||||
workspace_name (str): workspace name
|
||||
|
||||
Raises:
|
||||
AttributeError: unable to create workspace
|
||||
"""
|
||||
workspace_exists = self._child_is_in_parent_path(
|
||||
"/projects/{}".format(project_name), workspace_name, "WORKSPACE"
|
||||
)
|
||||
if not workspace_exists:
|
||||
project = WireTapNodeHandle(
|
||||
self._server, "/projects/{}".format(project_name))
|
||||
|
||||
workspace_node = WireTapNodeHandle()
|
||||
created_workspace = project.createNode(
|
||||
workspace_name, "WORKSPACE", workspace_node)
|
||||
|
||||
if not created_workspace:
|
||||
raise AttributeError(
|
||||
"Cannot create workspace `{}` in "
|
||||
"project `{}`: `{}`".format(
|
||||
workspace_name, project_name, project.lastError())
|
||||
)
|
||||
|
||||
print(
|
||||
"Workspace `{}` is successfully created".format(workspace_name))
|
||||
|
||||
def _project_prep(self, project_name):
|
||||
"""Preparing a project
|
||||
|
||||
In case it doesn not exists it will create one
|
||||
|
||||
Args:
|
||||
project_name (str): project name
|
||||
|
||||
Raises:
|
||||
AttributeError: unable to create project
|
||||
"""
|
||||
# test if projeft exists
|
||||
project_exists = self._child_is_in_parent_path(
|
||||
"/projects", project_name, "PROJECT")
|
||||
|
||||
if not project_exists:
|
||||
volumes = self._get_all_volumes()
|
||||
|
||||
if len(volumes) == 0:
|
||||
raise AttributeError(
|
||||
"Not able to create new project. No Volumes existing"
|
||||
)
|
||||
|
||||
# check if volumes exists
|
||||
if self.volume_name not in volumes:
|
||||
raise AttributeError(
|
||||
("Volume '{}' does not exist in '{}'").format(
|
||||
self.volume_name, volumes)
|
||||
)
|
||||
|
||||
# form cmd arguments
|
||||
project_create_cmd = [
|
||||
os.path.join(
|
||||
self.wiretap_tools_dir,
|
||||
"wiretap_create_node"
|
||||
),
|
||||
'-n',
|
||||
os.path.join("/volumes", self.volume_name),
|
||||
'-d',
|
||||
project_name,
|
||||
'-g',
|
||||
]
|
||||
|
||||
project_create_cmd.append(self.group_name)
|
||||
|
||||
print(project_create_cmd)
|
||||
|
||||
exit_code = subprocess.call(
|
||||
project_create_cmd,
|
||||
cwd=os.path.expanduser('~'),
|
||||
preexec_fn=_subprocess_preexec_fn
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
RuntimeError("Cannot create project in flame db")
|
||||
|
||||
print(
|
||||
"A new project '{}' is created.".format(project_name))
|
||||
return project_exists
|
||||
|
||||
def _get_all_volumes(self):
|
||||
"""Request all available volumens from WireTap
|
||||
|
||||
Returns:
|
||||
list: all available volumes in server
|
||||
|
||||
Rises:
|
||||
AttributeError: unable to get any volumes children from server
|
||||
"""
|
||||
root = WireTapNodeHandle(self._server, "/volumes")
|
||||
children_num = WireTapInt(0)
|
||||
|
||||
get_children_num = root.getNumChildren(children_num)
|
||||
if not get_children_num:
|
||||
raise AttributeError(
|
||||
"Cannot get number of volumes: {}".format(root.lastError())
|
||||
)
|
||||
|
||||
volumes = []
|
||||
|
||||
# go through all children and get volume names
|
||||
child_obj = WireTapNodeHandle()
|
||||
for child_idx in range(children_num):
|
||||
|
||||
# get a child
|
||||
if not root.getChild(child_idx, child_obj):
|
||||
raise AttributeError(
|
||||
"Unable to get child: {}".format(root.lastError()))
|
||||
|
||||
node_name = WireTapStr()
|
||||
get_children_name = child_obj.getDisplayName(node_name)
|
||||
|
||||
if not get_children_name:
|
||||
raise AttributeError(
|
||||
"Unable to get child name: {}".format(
|
||||
child_obj.lastError())
|
||||
)
|
||||
|
||||
volumes.append(node_name.c_str())
|
||||
|
||||
return volumes
|
||||
|
||||
def _user_prep(self, user_name):
|
||||
"""Ensuring user does exists in user's stack
|
||||
|
||||
Args:
|
||||
user_name (str): name of a user
|
||||
|
||||
Raises:
|
||||
AttributeError: unable to create user
|
||||
"""
|
||||
|
||||
# get all used usernames in db
|
||||
used_names = self._get_usernames()
|
||||
print(">> used_names: {}".format(used_names))
|
||||
|
||||
# filter only those which are sharing input user name
|
||||
filtered_users = [user for user in used_names if user_name in user]
|
||||
|
||||
if filtered_users:
|
||||
# TODO: need to find lastly created following regex pattern for
|
||||
# date used in name
|
||||
return filtered_users.pop()
|
||||
|
||||
# create new user name with date in suffix
|
||||
now = datetime.datetime.now() # current date and time
|
||||
date = now.strftime("%Y%m%d")
|
||||
new_user_name = "{}_{}".format(user_name, date)
|
||||
print(new_user_name)
|
||||
|
||||
if not self._child_is_in_parent_path("/users", new_user_name, "USER"):
|
||||
# Create the new user
|
||||
users = WireTapNodeHandle(self._server, "/users")
|
||||
|
||||
user_node = WireTapNodeHandle()
|
||||
created_user = users.createNode(new_user_name, "USER", user_node)
|
||||
if not created_user:
|
||||
raise AttributeError(
|
||||
"User {} cannot be created: {}".format(
|
||||
new_user_name, users.lastError())
|
||||
)
|
||||
|
||||
print("User `{}` is created".format(new_user_name))
|
||||
return new_user_name
|
||||
|
||||
def _get_usernames(self):
|
||||
"""Requesting all available users from WireTap
|
||||
|
||||
Returns:
|
||||
list: all available user names
|
||||
|
||||
Raises:
|
||||
AttributeError: there are no users in server
|
||||
"""
|
||||
root = WireTapNodeHandle(self._server, "/users")
|
||||
children_num = WireTapInt(0)
|
||||
|
||||
get_children_num = root.getNumChildren(children_num)
|
||||
if not get_children_num:
|
||||
raise AttributeError(
|
||||
"Cannot get number of volumes: {}".format(root.lastError())
|
||||
)
|
||||
|
||||
usernames = []
|
||||
|
||||
# go through all children and get volume names
|
||||
child_obj = WireTapNodeHandle()
|
||||
for child_idx in range(children_num):
|
||||
|
||||
# get a child
|
||||
if not root.getChild(child_idx, child_obj):
|
||||
raise AttributeError(
|
||||
"Unable to get child: {}".format(root.lastError()))
|
||||
|
||||
node_name = WireTapStr()
|
||||
get_children_name = child_obj.getDisplayName(node_name)
|
||||
|
||||
if not get_children_name:
|
||||
raise AttributeError(
|
||||
"Unable to get child name: {}".format(
|
||||
child_obj.lastError())
|
||||
)
|
||||
|
||||
usernames.append(node_name.c_str())
|
||||
|
||||
return usernames
|
||||
|
||||
def _child_is_in_parent_path(self, parent_path, child_name, child_type):
|
||||
"""Checking if a given child is in parent path.
|
||||
|
||||
Args:
|
||||
parent_path (str): db path to parent
|
||||
child_name (str): name of child
|
||||
child_type (str): type of child
|
||||
|
||||
Raises:
|
||||
AttributeError: Not able to get number of children
|
||||
AttributeError: Not able to get children form parent
|
||||
AttributeError: Not able to get children name
|
||||
AttributeError: Not able to get children type
|
||||
|
||||
Returns:
|
||||
bool: True if child is in parent path
|
||||
"""
|
||||
parent = WireTapNodeHandle(self._server, parent_path)
|
||||
|
||||
# iterate number of children
|
||||
children_num = WireTapInt(0)
|
||||
requested = parent.getNumChildren(children_num)
|
||||
if not requested:
|
||||
raise AttributeError((
|
||||
"Error: Cannot request number of "
|
||||
"children from the node {}. Make sure your "
|
||||
"wiretap service is running: {}").format(
|
||||
parent_path, parent.lastError())
|
||||
)
|
||||
|
||||
# iterate children
|
||||
child_obj = WireTapNodeHandle()
|
||||
for child_idx in range(children_num):
|
||||
if not parent.getChild(child_idx, child_obj):
|
||||
raise AttributeError(
|
||||
"Cannot get child: {}".format(
|
||||
parent.lastError()))
|
||||
|
||||
node_name = WireTapStr()
|
||||
node_type = WireTapStr()
|
||||
|
||||
if not child_obj.getDisplayName(node_name):
|
||||
raise AttributeError(
|
||||
"Unable to get child name: %s" % child_obj.lastError()
|
||||
)
|
||||
if not child_obj.getNodeTypeStr(node_type):
|
||||
raise AttributeError(
|
||||
"Unable to obtain child type: %s" % child_obj.lastError()
|
||||
)
|
||||
|
||||
if (node_name.c_str() == child_name) and (
|
||||
node_type.c_str() == child_type):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _set_project_settings(self, project_name, project_data):
|
||||
"""Setting project attributes.
|
||||
|
||||
Args:
|
||||
project_name (str): name of project
|
||||
project_data (dict): data with project attributes
|
||||
(flame compatible)
|
||||
|
||||
Raises:
|
||||
AttributeError: Not able to set project attributes
|
||||
"""
|
||||
# generated xml from project_data dict
|
||||
_xml = "<Project>"
|
||||
for key, value in project_data.items():
|
||||
_xml += "<{}>{}</{}>".format(key, value, key)
|
||||
_xml += "</Project>"
|
||||
|
||||
pretty_xml = minidom.parseString(_xml).toprettyxml()
|
||||
print("__ xml: {}".format(pretty_xml))
|
||||
|
||||
# set project data to wiretap
|
||||
project_node = WireTapNodeHandle(
|
||||
self._server, "/projects/{}".format(project_name))
|
||||
|
||||
if not project_node.setMetaData("XML", _xml):
|
||||
raise AttributeError(
|
||||
"Not able to set project attributes {}. Error: {}".format(
|
||||
project_name, project_node.lastError())
|
||||
)
|
||||
|
||||
print("Project settings successfully set.")
|
||||
|
||||
def _set_project_colorspace(self, project_name, color_policy):
|
||||
"""Set project's colorspace policy.
|
||||
|
||||
Args:
|
||||
project_name (str): name of project
|
||||
color_policy (str): name of policy
|
||||
|
||||
Raises:
|
||||
RuntimeError: Not able to set colorspace policy
|
||||
"""
|
||||
color_policy = color_policy or "Legacy"
|
||||
|
||||
# check if the colour policy in custom dir
|
||||
if "/" in color_policy:
|
||||
# if unlikelly full path was used make it redundant
|
||||
color_policy = color_policy.replace("/syncolor/policies/", "")
|
||||
# expecting input is `Shared/NameOfPolicy`
|
||||
color_policy = "/syncolor/policies/{}".format(
|
||||
color_policy)
|
||||
else:
|
||||
color_policy = "/syncolor/policies/Autodesk/{}".format(
|
||||
color_policy)
|
||||
|
||||
# create arguments
|
||||
project_colorspace_cmd = [
|
||||
os.path.join(
|
||||
self.wiretap_tools_dir,
|
||||
"wiretap_duplicate_node"
|
||||
),
|
||||
"-s",
|
||||
color_policy,
|
||||
"-n",
|
||||
"/projects/{}/syncolor".format(project_name)
|
||||
]
|
||||
|
||||
print(project_colorspace_cmd)
|
||||
|
||||
exit_code = subprocess.call(
|
||||
project_colorspace_cmd,
|
||||
cwd=os.path.expanduser('~'),
|
||||
preexec_fn=_subprocess_preexec_fn
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
RuntimeError("Cannot set colorspace {} on project {}".format(
|
||||
color_policy, project_name
|
||||
))
|
||||
|
||||
|
||||
def _subprocess_preexec_fn():
|
||||
""" Helper function
|
||||
|
||||
Setting permission mask to 0777
|
||||
"""
|
||||
os.setpgrp()
|
||||
os.umask(0o000)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# get json exchange data
|
||||
json_path = sys.argv[-1]
|
||||
json_data = open(json_path).read()
|
||||
in_data = json.loads(json_data)
|
||||
out_data = deepcopy(in_data)
|
||||
|
||||
# get main server attributes
|
||||
host_name = in_data.pop("host_name")
|
||||
volume_name = in_data.pop("volume_name")
|
||||
group_name = in_data.pop("group_name")
|
||||
|
||||
# initialize class
|
||||
wiretap_handler = WireTapCom(host_name, volume_name, group_name)
|
||||
|
||||
try:
|
||||
app_args = wiretap_handler.get_launch_args(
|
||||
project_name=in_data.pop("project_name"),
|
||||
project_data=in_data.pop("project_data"),
|
||||
user_name=in_data.pop("user_name"),
|
||||
**in_data
|
||||
)
|
||||
finally:
|
||||
wiretap_handler.close()
|
||||
|
||||
# set returned args back to out data
|
||||
out_data.update({
|
||||
"app_args": app_args
|
||||
})
|
||||
|
||||
# write it out back to the exchange json file
|
||||
with open(json_path, "w") as file_stream:
|
||||
json.dump(out_data, file_stream, indent=4)
|
||||
143
server_addon/flame/client/ayon_flame/api/utils.py
Normal file
143
server_addon/flame/client/ayon_flame/api/utils.py
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
"""
|
||||
Flame utils for syncing scripts
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_flame import FLAME_ADDON_ROOT
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def _sync_utility_scripts(env=None):
|
||||
""" Synchronizing basic utlility scripts for flame.
|
||||
|
||||
To be able to run start AYON within Flame we have to copy
|
||||
all utility_scripts and additional FLAME_SCRIPT_DIR into
|
||||
`/opt/Autodesk/shared/python`. This will be always synchronizing those
|
||||
folders.
|
||||
"""
|
||||
|
||||
env = env or os.environ
|
||||
|
||||
# initiate inputs
|
||||
scripts = {}
|
||||
fsd_env = env.get("FLAME_SCRIPT_DIRS", "")
|
||||
flame_shared_dir = "/opt/Autodesk/shared/python"
|
||||
|
||||
fsd_paths = [os.path.join(
|
||||
FLAME_ADDON_ROOT,
|
||||
"api",
|
||||
"utility_scripts"
|
||||
)]
|
||||
|
||||
# collect script dirs
|
||||
log.info("FLAME_SCRIPT_DIRS: `{fsd_env}`".format(**locals()))
|
||||
log.info("fsd_paths: `{fsd_paths}`".format(**locals()))
|
||||
|
||||
# add application environment setting for FLAME_SCRIPT_DIR
|
||||
# to script path search
|
||||
for _dirpath in fsd_env.split(os.pathsep):
|
||||
if not os.path.isdir(_dirpath):
|
||||
log.warning("Path is not a valid dir: `{_dirpath}`".format(
|
||||
**locals()))
|
||||
continue
|
||||
fsd_paths.append(_dirpath)
|
||||
|
||||
# collect scripts from dirs
|
||||
for path in fsd_paths:
|
||||
scripts.update({path: os.listdir(path)})
|
||||
|
||||
remove_black_list = []
|
||||
for _k, s_list in scripts.items():
|
||||
remove_black_list += s_list
|
||||
|
||||
log.info("remove_black_list: `{remove_black_list}`".format(**locals()))
|
||||
log.info("Additional Flame script paths: `{fsd_paths}`".format(**locals()))
|
||||
log.info("Flame Scripts: `{scripts}`".format(**locals()))
|
||||
|
||||
# make sure no script file is in folder
|
||||
if next(iter(os.listdir(flame_shared_dir)), None):
|
||||
for _itm in os.listdir(flame_shared_dir):
|
||||
skip = False
|
||||
|
||||
# skip all scripts and folders which are not maintained
|
||||
if _itm not in remove_black_list:
|
||||
skip = True
|
||||
|
||||
# do not skip if pyc in extension
|
||||
if not os.path.isdir(_itm) and "pyc" in os.path.splitext(_itm)[-1]:
|
||||
skip = False
|
||||
|
||||
# continue if skip in true
|
||||
if skip:
|
||||
continue
|
||||
|
||||
path = os.path.join(flame_shared_dir, _itm)
|
||||
log.info("Removing `{path}`...".format(**locals()))
|
||||
|
||||
try:
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path, onerror=None)
|
||||
else:
|
||||
os.remove(path)
|
||||
except PermissionError as msg:
|
||||
log.warning(
|
||||
"Not able to remove: `{}`, Problem with: `{}`".format(
|
||||
path,
|
||||
msg
|
||||
)
|
||||
)
|
||||
|
||||
# copy scripts into Resolve's utility scripts dir
|
||||
for dirpath, scriptlist in scripts.items():
|
||||
# directory and scripts list
|
||||
for _script in scriptlist:
|
||||
# script in script list
|
||||
src = os.path.join(dirpath, _script)
|
||||
dst = os.path.join(flame_shared_dir, _script)
|
||||
log.info("Copying `{src}` to `{dst}`...".format(**locals()))
|
||||
|
||||
try:
|
||||
if os.path.isdir(src):
|
||||
shutil.copytree(
|
||||
src, dst, symlinks=False,
|
||||
ignore=None, ignore_dangling_symlinks=False
|
||||
)
|
||||
else:
|
||||
shutil.copy2(src, dst)
|
||||
except (PermissionError, FileExistsError) as msg:
|
||||
log.warning(
|
||||
"Not able to copy to: `{}`, Problem with: `{}`".format(
|
||||
dst,
|
||||
msg
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def setup(env=None):
|
||||
""" Wrapper installer started from
|
||||
`flame/hooks/pre_flame_setup.py`
|
||||
"""
|
||||
env = env or os.environ
|
||||
|
||||
# synchronize resolve utility scripts
|
||||
_sync_utility_scripts(env)
|
||||
|
||||
log.info("Flame AYON wrapper has been installed")
|
||||
|
||||
|
||||
def get_flame_version():
|
||||
import flame
|
||||
|
||||
return {
|
||||
"full": flame.get_version(),
|
||||
"major": flame.get_version_major(),
|
||||
"minor": flame.get_version_minor(),
|
||||
"patch": flame.get_version_patch()
|
||||
}
|
||||
|
||||
|
||||
def get_flame_install_root():
|
||||
return "/opt/Autodesk"
|
||||
37
server_addon/flame/client/ayon_flame/api/workio.py
Normal file
37
server_addon/flame/client/ayon_flame/api/workio.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
"""Host API required Work Files tool"""
|
||||
|
||||
import os
|
||||
from ayon_core.lib import Logger
|
||||
# from .. import (
|
||||
# get_project_manager,
|
||||
# get_current_project
|
||||
# )
|
||||
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
exported_projet_ext = ".otoc"
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return [exported_projet_ext]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
pass
|
||||
|
||||
|
||||
def save_file(filepath):
|
||||
pass
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
pass
|
||||
|
||||
|
||||
def current_file():
|
||||
pass
|
||||
|
||||
|
||||
def work_root(session):
|
||||
return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/")
|
||||
239
server_addon/flame/client/ayon_flame/hooks/pre_flame_setup.py
Normal file
239
server_addon/flame/client/ayon_flame/hooks/pre_flame_setup.py
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
import os
|
||||
import json
|
||||
import tempfile
|
||||
import contextlib
|
||||
import socket
|
||||
from pprint import pformat
|
||||
|
||||
from ayon_core.lib import (
|
||||
get_ayon_username,
|
||||
run_subprocess,
|
||||
)
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_flame import FLAME_ADDON_ROOT
|
||||
|
||||
|
||||
class FlamePrelaunch(PreLaunchHook):
|
||||
""" Flame prelaunch hook
|
||||
|
||||
Will make sure flame_script_dirs are copied to user's folder defined
|
||||
in environment var FLAME_SCRIPT_DIR.
|
||||
"""
|
||||
app_groups = {"flame"}
|
||||
permissions = 0o777
|
||||
|
||||
wtc_script_path = os.path.join(
|
||||
FLAME_ADDON_ROOT, "api", "scripts", "wiretap_com.py"
|
||||
)
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.signature = "( {} )".format(self.__class__.__name__)
|
||||
|
||||
def execute(self):
|
||||
_env = self.launch_context.env
|
||||
self.flame_python_exe = _env["AYON_FLAME_PYTHON_EXEC"]
|
||||
self.flame_pythonpath = _env["AYON_FLAME_PYTHONPATH"]
|
||||
|
||||
"""Hook entry method."""
|
||||
project_entity = self.data["project_entity"]
|
||||
project_name = project_entity["name"]
|
||||
volume_name = _env.get("FLAME_WIRETAP_VOLUME")
|
||||
|
||||
# get image io
|
||||
project_settings = self.data["project_settings"]
|
||||
|
||||
imageio_flame = project_settings["flame"]["imageio"]
|
||||
|
||||
# Check whether 'enabled' key from host imageio settings exists
|
||||
# so we can tell if host is using the new colormanagement framework.
|
||||
# If the 'enabled' isn't found we want 'colormanaged' set to True
|
||||
# because prior to the key existing we always did colormanagement for
|
||||
# Flame
|
||||
colormanaged = imageio_flame.get("enabled")
|
||||
# if key was not found, set to True
|
||||
# ensuring backward compatibility
|
||||
if colormanaged is None:
|
||||
colormanaged = True
|
||||
|
||||
# get user name and host name
|
||||
user_name = get_ayon_username()
|
||||
user_name = user_name.replace(".", "_")
|
||||
|
||||
hostname = socket.gethostname() # not returning wiretap host name
|
||||
|
||||
self.log.debug("Collected user \"{}\"".format(user_name))
|
||||
self.log.info(pformat(project_entity))
|
||||
project_attribs = project_entity["attrib"]
|
||||
width = project_attribs["resolutionWidth"]
|
||||
height = project_attribs["resolutionHeight"]
|
||||
fps = float(project_attribs["fps"])
|
||||
|
||||
project_data = {
|
||||
"Name": project_entity["name"],
|
||||
"Nickname": project_entity["code"],
|
||||
"Description": "Created by AYON",
|
||||
"SetupDir": project_entity["name"],
|
||||
"FrameWidth": int(width),
|
||||
"FrameHeight": int(height),
|
||||
"AspectRatio": float(
|
||||
(width / height) * project_attribs["pixelAspect"]
|
||||
),
|
||||
"FrameRate": self._get_flame_fps(fps)
|
||||
}
|
||||
|
||||
data_to_script = {
|
||||
# from settings
|
||||
"host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname,
|
||||
"volume_name": volume_name,
|
||||
"group_name": _env.get("FLAME_WIRETAP_GROUP"),
|
||||
|
||||
# from project
|
||||
"project_name": project_name,
|
||||
"user_name": user_name,
|
||||
"project_data": project_data
|
||||
}
|
||||
|
||||
# add color management data
|
||||
if colormanaged:
|
||||
project_data.update({
|
||||
"FrameDepth": str(imageio_flame["project"]["frameDepth"]),
|
||||
"FieldDominance": str(
|
||||
imageio_flame["project"]["fieldDominance"])
|
||||
})
|
||||
data_to_script["color_policy"] = str(
|
||||
imageio_flame["project"]["colourPolicy"])
|
||||
|
||||
self.log.info(pformat(dict(_env)))
|
||||
self.log.info(pformat(data_to_script))
|
||||
|
||||
# add to python path from settings
|
||||
self._add_pythonpath()
|
||||
|
||||
app_arguments = self._get_launch_arguments(data_to_script)
|
||||
|
||||
# fix project data permission issue
|
||||
self._fix_permissions(project_name, volume_name)
|
||||
|
||||
self.launch_context.launch_args.extend(app_arguments)
|
||||
|
||||
def _fix_permissions(self, project_name, volume_name):
|
||||
"""Work around for project data permissions
|
||||
|
||||
Reported issue: when project is created locally on one machine,
|
||||
it is impossible to migrate it to other machine. Autodesk Flame
|
||||
is crating some unmanagable files which needs to be opened to 0o777.
|
||||
|
||||
Args:
|
||||
project_name (str): project name
|
||||
volume_name (str): studio volume
|
||||
"""
|
||||
dirs_to_modify = [
|
||||
"/usr/discreet/project/{}".format(project_name),
|
||||
"/opt/Autodesk/clip/{}/{}.prj".format(volume_name, project_name),
|
||||
"/usr/discreet/clip/{}/{}.prj".format(volume_name, project_name)
|
||||
]
|
||||
|
||||
for dirtm in dirs_to_modify:
|
||||
for root, dirs, files in os.walk(dirtm):
|
||||
try:
|
||||
for name in set(dirs) | set(files):
|
||||
path = os.path.join(root, name)
|
||||
st = os.stat(path)
|
||||
if oct(st.st_mode) != self.permissions:
|
||||
os.chmod(path, self.permissions)
|
||||
|
||||
except OSError as exc:
|
||||
self.log.warning("Not able to open files: {}".format(exc))
|
||||
|
||||
def _get_flame_fps(self, fps_num):
|
||||
fps_table = {
|
||||
float(23.976): "23.976 fps",
|
||||
int(25): "25 fps",
|
||||
int(24): "24 fps",
|
||||
float(29.97): "29.97 fps DF",
|
||||
int(30): "30 fps",
|
||||
int(50): "50 fps",
|
||||
float(59.94): "59.94 fps DF",
|
||||
int(60): "60 fps"
|
||||
}
|
||||
|
||||
match_key = min(fps_table.keys(), key=lambda x: abs(x - fps_num))
|
||||
|
||||
try:
|
||||
return fps_table[match_key]
|
||||
except KeyError as msg:
|
||||
raise KeyError((
|
||||
"Missing FPS key in conversion table. "
|
||||
"Following keys are available: {}".format(fps_table.keys())
|
||||
)) from msg
|
||||
|
||||
def _add_pythonpath(self):
|
||||
pythonpath = self.launch_context.env.get("PYTHONPATH")
|
||||
|
||||
# separate it explicitly by `;` that is what we use in settings
|
||||
new_pythonpath = self.flame_pythonpath.split(os.pathsep)
|
||||
new_pythonpath += pythonpath.split(os.pathsep)
|
||||
|
||||
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(new_pythonpath)
|
||||
|
||||
def _get_launch_arguments(self, script_data):
|
||||
# Dump data to string
|
||||
dumped_script_data = json.dumps(script_data)
|
||||
|
||||
with make_temp_file(dumped_script_data) as tmp_json_path:
|
||||
# Prepare subprocess arguments
|
||||
args = [
|
||||
self.flame_python_exe.format(
|
||||
**self.launch_context.env
|
||||
),
|
||||
self.wtc_script_path,
|
||||
tmp_json_path
|
||||
]
|
||||
self.log.info("Executing: {}".format(" ".join(args)))
|
||||
|
||||
process_kwargs = {
|
||||
"logger": self.log,
|
||||
"env": self.launch_context.env
|
||||
}
|
||||
|
||||
run_subprocess(args, **process_kwargs)
|
||||
|
||||
# process returned json file to pass launch args
|
||||
return_json_data = open(tmp_json_path).read()
|
||||
returned_data = json.loads(return_json_data)
|
||||
app_args = returned_data.get("app_args")
|
||||
self.log.info("____ app_args: `{}`".format(app_args))
|
||||
|
||||
if not app_args:
|
||||
RuntimeError("App arguments were not solved")
|
||||
|
||||
return app_args
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def make_temp_file(data):
|
||||
try:
|
||||
# Store dumped json to temporary file
|
||||
temporary_json_file = tempfile.NamedTemporaryFile(
|
||||
mode="w", suffix=".json", delete=False
|
||||
)
|
||||
temporary_json_file.write(data)
|
||||
temporary_json_file.close()
|
||||
temporary_json_filepath = temporary_json_file.name.replace(
|
||||
"\\", "/"
|
||||
)
|
||||
|
||||
yield temporary_json_filepath
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError(
|
||||
"Not able to create temp json file: {}".format(
|
||||
_error
|
||||
)
|
||||
)
|
||||
|
||||
finally:
|
||||
# Remove the temporary json
|
||||
os.remove(temporary_json_filepath)
|
||||
0
server_addon/flame/client/ayon_flame/otio/__init__.py
Normal file
0
server_addon/flame/client/ayon_flame/otio/__init__.py
Normal file
624
server_addon/flame/client/ayon_flame/otio/flame_export.py
Normal file
624
server_addon/flame/client/ayon_flame/otio/flame_export.py
Normal file
|
|
@ -0,0 +1,624 @@
|
|||
""" compatibility OpenTimelineIO 0.12.0 and newer
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import logging
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
|
||||
import flame
|
||||
from pprint import pformat
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
TRACK_TYPES = {
|
||||
"video": otio.schema.TrackKind.Video,
|
||||
"audio": otio.schema.TrackKind.Audio
|
||||
}
|
||||
MARKERS_COLOR_MAP = {
|
||||
(1.0, 0.0, 0.0): otio.schema.MarkerColor.RED,
|
||||
(1.0, 0.5, 0.0): otio.schema.MarkerColor.ORANGE,
|
||||
(1.0, 1.0, 0.0): otio.schema.MarkerColor.YELLOW,
|
||||
(1.0, 0.5, 1.0): otio.schema.MarkerColor.PINK,
|
||||
(1.0, 1.0, 1.0): otio.schema.MarkerColor.WHITE,
|
||||
(0.0, 1.0, 0.0): otio.schema.MarkerColor.GREEN,
|
||||
(0.0, 1.0, 1.0): otio.schema.MarkerColor.CYAN,
|
||||
(0.0, 0.0, 1.0): otio.schema.MarkerColor.BLUE,
|
||||
(0.5, 0.0, 0.5): otio.schema.MarkerColor.PURPLE,
|
||||
(0.5, 0.0, 1.0): otio.schema.MarkerColor.MAGENTA,
|
||||
(0.0, 0.0, 0.0): otio.schema.MarkerColor.BLACK
|
||||
}
|
||||
MARKERS_INCLUDE = True
|
||||
|
||||
|
||||
class CTX:
|
||||
_fps = None
|
||||
_tl_start_frame = None
|
||||
project = None
|
||||
clips = None
|
||||
|
||||
@classmethod
|
||||
def set_fps(cls, new_fps):
|
||||
if not isinstance(new_fps, float):
|
||||
raise TypeError("Invalid fps type {}".format(type(new_fps)))
|
||||
if cls._fps != new_fps:
|
||||
cls._fps = new_fps
|
||||
|
||||
@classmethod
|
||||
def get_fps(cls):
|
||||
return cls._fps
|
||||
|
||||
@classmethod
|
||||
def set_tl_start_frame(cls, number):
|
||||
if not isinstance(number, int):
|
||||
raise TypeError("Invalid timeline start frame type {}".format(
|
||||
type(number)))
|
||||
if cls._tl_start_frame != number:
|
||||
cls._tl_start_frame = number
|
||||
|
||||
@classmethod
|
||||
def get_tl_start_frame(cls):
|
||||
return cls._tl_start_frame
|
||||
|
||||
|
||||
def flatten(_list):
|
||||
for item in _list:
|
||||
if isinstance(item, (list, tuple)):
|
||||
for sub_item in flatten(item):
|
||||
yield sub_item
|
||||
else:
|
||||
yield item
|
||||
|
||||
|
||||
def get_current_flame_project():
|
||||
project = flame.project.current_project
|
||||
return project
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
return otio.opentime.RationalTime(
|
||||
float(frame),
|
||||
float(fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_time_range(start_frame, frame_duration, fps):
|
||||
return otio.opentime.TimeRange(
|
||||
start_time=create_otio_rational_time(start_frame, fps),
|
||||
duration=create_otio_rational_time(frame_duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def _get_metadata(item):
|
||||
if hasattr(item, 'metadata'):
|
||||
return dict(item.metadata) if item.metadata else {}
|
||||
return {}
|
||||
|
||||
|
||||
def create_time_effects(otio_clip, speed):
|
||||
otio_effect = None
|
||||
|
||||
# retime on track item
|
||||
if speed != 1.:
|
||||
# make effect
|
||||
otio_effect = otio.schema.LinearTimeWarp()
|
||||
otio_effect.name = "Speed"
|
||||
otio_effect.time_scalar = speed
|
||||
otio_effect.metadata = {}
|
||||
|
||||
# freeze frame effect
|
||||
if speed == 0.:
|
||||
otio_effect = otio.schema.FreezeFrame()
|
||||
otio_effect.name = "FreezeFrame"
|
||||
otio_effect.metadata = {}
|
||||
|
||||
if otio_effect:
|
||||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
|
||||
def _get_marker_color(flame_colour):
|
||||
# clamp colors to closes half numbers
|
||||
_flame_colour = [
|
||||
(lambda x: round(x * 2) / 2)(c)
|
||||
for c in flame_colour]
|
||||
|
||||
for color, otio_color_type in MARKERS_COLOR_MAP.items():
|
||||
if _flame_colour == list(color):
|
||||
return otio_color_type
|
||||
|
||||
return otio.schema.MarkerColor.RED
|
||||
|
||||
|
||||
def _get_flame_markers(item):
|
||||
output_markers = []
|
||||
|
||||
time_in = item.record_in.relative_frame
|
||||
|
||||
for marker in item.markers:
|
||||
log.debug(marker)
|
||||
start_frame = marker.location.get_value().relative_frame
|
||||
|
||||
start_frame = (start_frame - time_in) + 1
|
||||
|
||||
marker_data = {
|
||||
"name": marker.name.get_value(),
|
||||
"duration": marker.duration.get_value().relative_frame,
|
||||
"comment": marker.comment.get_value(),
|
||||
"start_frame": start_frame,
|
||||
"colour": marker.colour.get_value()
|
||||
}
|
||||
|
||||
output_markers.append(marker_data)
|
||||
|
||||
return output_markers
|
||||
|
||||
|
||||
def create_otio_markers(otio_item, item):
|
||||
markers = _get_flame_markers(item)
|
||||
for marker in markers:
|
||||
frame_rate = CTX.get_fps()
|
||||
|
||||
marked_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
marker["start_frame"],
|
||||
frame_rate
|
||||
),
|
||||
duration=otio.opentime.RationalTime(
|
||||
marker["duration"],
|
||||
frame_rate
|
||||
)
|
||||
)
|
||||
|
||||
# testing the comment if it is not containing json string
|
||||
check_if_json = re.findall(
|
||||
re.compile(r"[{:}]"),
|
||||
marker["comment"]
|
||||
)
|
||||
|
||||
# to identify this as json, at least 3 items in the list should
|
||||
# be present ["{", ":", "}"]
|
||||
metadata = {}
|
||||
if len(check_if_json) >= 3:
|
||||
# this is json string
|
||||
try:
|
||||
# capture exceptions which are related to strings only
|
||||
metadata.update(
|
||||
json.loads(marker["comment"])
|
||||
)
|
||||
except ValueError as msg:
|
||||
log.error("Marker json conversion: {}".format(msg))
|
||||
else:
|
||||
metadata["comment"] = marker["comment"]
|
||||
|
||||
otio_marker = otio.schema.Marker(
|
||||
name=marker["name"],
|
||||
color=_get_marker_color(
|
||||
marker["colour"]),
|
||||
marked_range=marked_range,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
otio_item.markers.append(otio_marker)
|
||||
|
||||
|
||||
def create_otio_reference(clip_data, fps=None):
|
||||
metadata = _get_metadata(clip_data)
|
||||
duration = int(clip_data["source_duration"])
|
||||
|
||||
# get file info for path and start frame
|
||||
frame_start = 0
|
||||
fps = fps or CTX.get_fps()
|
||||
|
||||
path = clip_data["fpath"]
|
||||
|
||||
file_name = os.path.basename(path)
|
||||
file_head, extension = os.path.splitext(file_name)
|
||||
|
||||
# get padding and other file infos
|
||||
log.debug("_ path: {}".format(path))
|
||||
|
||||
otio_ex_ref_item = None
|
||||
|
||||
is_sequence = frame_number = utils.get_frame_from_filename(file_name)
|
||||
if is_sequence:
|
||||
file_head = file_name.split(frame_number)[:-1]
|
||||
frame_start = int(frame_number)
|
||||
padding = len(frame_number)
|
||||
|
||||
metadata.update({
|
||||
"isSequence": True,
|
||||
"padding": padding
|
||||
})
|
||||
|
||||
# if it is file sequence try to create `ImageSequenceReference`
|
||||
# the OTIO might not be compatible so return nothing and do it old way
|
||||
try:
|
||||
dirname = os.path.dirname(path)
|
||||
otio_ex_ref_item = otio.schema.ImageSequenceReference(
|
||||
target_url_base=dirname + os.sep,
|
||||
name_prefix=file_head,
|
||||
name_suffix=extension,
|
||||
start_frame=frame_start,
|
||||
frame_zero_padding=padding,
|
||||
rate=fps,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if not otio_ex_ref_item:
|
||||
dirname, file_name = os.path.split(path)
|
||||
file_name = utils.get_reformatted_filename(file_name, padded=False)
|
||||
reformated_path = os.path.join(dirname, file_name)
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
otio_ex_ref_item = otio.schema.ExternalReference(
|
||||
target_url=reformated_path,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
# add metadata to otio item
|
||||
add_otio_metadata(otio_ex_ref_item, clip_data, **metadata)
|
||||
|
||||
return otio_ex_ref_item
|
||||
|
||||
|
||||
def create_otio_clip(clip_data):
|
||||
from ayon_flame.api import MediaInfoFile, TimeEffectMetadata
|
||||
|
||||
segment = clip_data["PySegment"]
|
||||
|
||||
# calculate source in
|
||||
media_info = MediaInfoFile(clip_data["fpath"], logger=log)
|
||||
media_timecode_start = media_info.start_frame
|
||||
media_fps = media_info.fps
|
||||
|
||||
# Timewarp metadata
|
||||
tw_data = TimeEffectMetadata(segment, logger=log).data
|
||||
log.debug("__ tw_data: {}".format(tw_data))
|
||||
|
||||
# define first frame
|
||||
file_first_frame = utils.get_frame_from_filename(
|
||||
clip_data["fpath"])
|
||||
if file_first_frame:
|
||||
file_first_frame = int(file_first_frame)
|
||||
|
||||
first_frame = media_timecode_start or file_first_frame or 0
|
||||
|
||||
_clip_source_in = int(clip_data["source_in"])
|
||||
_clip_source_out = int(clip_data["source_out"])
|
||||
_clip_record_in = clip_data["record_in"]
|
||||
_clip_record_out = clip_data["record_out"]
|
||||
_clip_record_duration = int(clip_data["record_duration"])
|
||||
|
||||
log.debug("_ file_first_frame: {}".format(file_first_frame))
|
||||
log.debug("_ first_frame: {}".format(first_frame))
|
||||
log.debug("_ _clip_source_in: {}".format(_clip_source_in))
|
||||
log.debug("_ _clip_source_out: {}".format(_clip_source_out))
|
||||
log.debug("_ _clip_record_in: {}".format(_clip_record_in))
|
||||
log.debug("_ _clip_record_out: {}".format(_clip_record_out))
|
||||
|
||||
# first solve if the reverse timing
|
||||
speed = 1
|
||||
if clip_data["source_in"] > clip_data["source_out"]:
|
||||
source_in = _clip_source_out - int(first_frame)
|
||||
source_out = _clip_source_in - int(first_frame)
|
||||
speed = -1
|
||||
else:
|
||||
source_in = _clip_source_in - int(first_frame)
|
||||
source_out = _clip_source_out - int(first_frame)
|
||||
|
||||
log.debug("_ source_in: {}".format(source_in))
|
||||
log.debug("_ source_out: {}".format(source_out))
|
||||
|
||||
if file_first_frame:
|
||||
log.debug("_ file_source_in: {}".format(
|
||||
file_first_frame + source_in))
|
||||
log.debug("_ file_source_in: {}".format(
|
||||
file_first_frame + source_out))
|
||||
|
||||
source_duration = (source_out - source_in + 1)
|
||||
|
||||
# secondly check if any change of speed
|
||||
if source_duration != _clip_record_duration:
|
||||
retime_speed = float(source_duration) / float(_clip_record_duration)
|
||||
log.debug("_ calculated speed: {}".format(retime_speed))
|
||||
speed *= retime_speed
|
||||
|
||||
# get speed from metadata if available
|
||||
if tw_data.get("speed"):
|
||||
speed = tw_data["speed"]
|
||||
log.debug("_ metadata speed: {}".format(speed))
|
||||
|
||||
log.debug("_ speed: {}".format(speed))
|
||||
log.debug("_ source_duration: {}".format(source_duration))
|
||||
log.debug("_ _clip_record_duration: {}".format(_clip_record_duration))
|
||||
|
||||
# create media reference
|
||||
media_reference = create_otio_reference(
|
||||
clip_data, media_fps)
|
||||
|
||||
# creatae source range
|
||||
source_range = create_otio_time_range(
|
||||
source_in,
|
||||
_clip_record_duration,
|
||||
CTX.get_fps()
|
||||
)
|
||||
|
||||
otio_clip = otio.schema.Clip(
|
||||
name=clip_data["segment_name"],
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
|
||||
# Add markers
|
||||
if MARKERS_INCLUDE:
|
||||
create_otio_markers(otio_clip, segment)
|
||||
|
||||
if speed != 1:
|
||||
create_time_effects(otio_clip, speed)
|
||||
|
||||
return otio_clip
|
||||
|
||||
|
||||
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
||||
return otio.schema.Gap(
|
||||
source_range=create_otio_time_range(
|
||||
gap_start,
|
||||
(clip_start - tl_start_frame) - gap_start,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _get_colourspace_policy():
|
||||
|
||||
output = {}
|
||||
# get policies project path
|
||||
policy_dir = "/opt/Autodesk/project/{}/synColor/policy".format(
|
||||
CTX.project.name
|
||||
)
|
||||
log.debug(policy_dir)
|
||||
policy_fp = os.path.join(policy_dir, "policy.cfg")
|
||||
|
||||
if not os.path.exists(policy_fp):
|
||||
return output
|
||||
|
||||
with open(policy_fp) as file:
|
||||
dict_conf = dict(line.strip().split(' = ', 1) for line in file)
|
||||
output.update(
|
||||
{"openpype.flame.{}".format(k): v for k, v in dict_conf.items()}
|
||||
)
|
||||
return output
|
||||
|
||||
|
||||
def _create_otio_timeline(sequence):
|
||||
|
||||
metadata = _get_metadata(sequence)
|
||||
|
||||
# find colour policy files and add them to metadata
|
||||
colorspace_policy = _get_colourspace_policy()
|
||||
metadata.update(colorspace_policy)
|
||||
|
||||
metadata.update({
|
||||
"openpype.timeline.width": int(sequence.width),
|
||||
"openpype.timeline.height": int(sequence.height),
|
||||
"openpype.timeline.pixelAspect": 1
|
||||
})
|
||||
|
||||
rt_start_time = create_otio_rational_time(
|
||||
CTX.get_tl_start_frame(), CTX.get_fps())
|
||||
|
||||
return otio.schema.Timeline(
|
||||
name=str(sequence.name)[1:-1],
|
||||
global_start_time=rt_start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
|
||||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=TRACK_TYPES[track_type]
|
||||
)
|
||||
|
||||
|
||||
def add_otio_gap(clip_data, otio_track, prev_out):
|
||||
gap_length = clip_data["record_in"] - prev_out
|
||||
if prev_out != 0:
|
||||
gap_length -= 1
|
||||
|
||||
gap = otio.opentime.TimeRange(
|
||||
duration=otio.opentime.RationalTime(
|
||||
gap_length,
|
||||
CTX.get_fps()
|
||||
)
|
||||
)
|
||||
otio_gap = otio.schema.Gap(source_range=gap)
|
||||
otio_track.append(otio_gap)
|
||||
|
||||
|
||||
def add_otio_metadata(otio_item, item, **kwargs):
|
||||
metadata = _get_metadata(item)
|
||||
|
||||
# add additional metadata from kwargs
|
||||
if kwargs:
|
||||
metadata.update(kwargs)
|
||||
|
||||
# add metadata to otio item metadata
|
||||
for key, value in metadata.items():
|
||||
otio_item.metadata.update({key: value})
|
||||
|
||||
|
||||
def _get_shot_tokens_values(clip, tokens):
|
||||
old_value = None
|
||||
output = {}
|
||||
|
||||
old_value = clip.shot_name.get_value()
|
||||
|
||||
for token in tokens:
|
||||
clip.shot_name.set_value(token)
|
||||
_key = re.sub("[ <>]", "", token)
|
||||
|
||||
try:
|
||||
output[_key] = int(clip.shot_name.get_value())
|
||||
except ValueError:
|
||||
output[_key] = clip.shot_name.get_value()
|
||||
|
||||
clip.shot_name.set_value(old_value)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _get_segment_attributes(segment):
|
||||
|
||||
log.debug("Segment name|hidden: {}|{}".format(
|
||||
segment.name.get_value(), segment.hidden
|
||||
))
|
||||
if (
|
||||
segment.name.get_value() == ""
|
||||
or segment.hidden.get_value()
|
||||
):
|
||||
return None
|
||||
|
||||
# Add timeline segment to tree
|
||||
clip_data = {
|
||||
"segment_name": segment.name.get_value(),
|
||||
"segment_comment": segment.comment.get_value(),
|
||||
"shot_name": segment.shot_name.get_value(),
|
||||
"tape_name": segment.tape_name,
|
||||
"source_name": segment.source_name,
|
||||
"fpath": segment.file_path,
|
||||
"PySegment": segment
|
||||
}
|
||||
|
||||
# add all available shot tokens
|
||||
shot_tokens = _get_shot_tokens_values(
|
||||
segment,
|
||||
["<colour space>", "<width>", "<height>", "<depth>"]
|
||||
)
|
||||
clip_data.update(shot_tokens)
|
||||
|
||||
# populate shot source metadata
|
||||
segment_attrs = [
|
||||
"record_duration", "record_in", "record_out",
|
||||
"source_duration", "source_in", "source_out"
|
||||
]
|
||||
segment_attrs_data = {}
|
||||
for attr in segment_attrs:
|
||||
if not hasattr(segment, attr):
|
||||
continue
|
||||
_value = getattr(segment, attr)
|
||||
segment_attrs_data[attr] = str(_value).replace("+", ":")
|
||||
|
||||
if attr in ["record_in", "record_out"]:
|
||||
clip_data[attr] = _value.relative_frame
|
||||
else:
|
||||
clip_data[attr] = _value.frame
|
||||
|
||||
clip_data["segment_timecodes"] = segment_attrs_data
|
||||
|
||||
return clip_data
|
||||
|
||||
|
||||
def create_otio_timeline(sequence):
|
||||
log.info(dir(sequence))
|
||||
log.info(sequence.attributes)
|
||||
|
||||
CTX.project = get_current_flame_project()
|
||||
|
||||
# get current timeline
|
||||
CTX.set_fps(
|
||||
float(str(sequence.frame_rate)[:-4]))
|
||||
|
||||
tl_start_frame = utils.timecode_to_frames(
|
||||
str(sequence.start_time).replace("+", ":"),
|
||||
CTX.get_fps()
|
||||
)
|
||||
CTX.set_tl_start_frame(tl_start_frame)
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline(sequence)
|
||||
|
||||
# create otio tracks and clips
|
||||
for ver in sequence.versions:
|
||||
for track in ver.tracks:
|
||||
# avoid all empty tracks
|
||||
# or hidden tracks
|
||||
if (
|
||||
len(track.segments) == 0
|
||||
or track.hidden.get_value()
|
||||
):
|
||||
continue
|
||||
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
"video", str(track.name)[1:-1])
|
||||
|
||||
all_segments = []
|
||||
for segment in track.segments:
|
||||
clip_data = _get_segment_attributes(segment)
|
||||
if not clip_data:
|
||||
continue
|
||||
all_segments.append(clip_data)
|
||||
|
||||
segments_ordered = dict(enumerate(all_segments))
|
||||
log.debug("_ segments_ordered: {}".format(
|
||||
pformat(segments_ordered)
|
||||
))
|
||||
if not segments_ordered:
|
||||
continue
|
||||
|
||||
for itemindex, segment_data in segments_ordered.items():
|
||||
log.debug("_ itemindex: {}".format(itemindex))
|
||||
|
||||
# Add Gap if needed
|
||||
prev_item = (
|
||||
segment_data
|
||||
if itemindex == 0
|
||||
else segments_ordered[itemindex - 1]
|
||||
)
|
||||
log.debug("_ segment_data: {}".format(segment_data))
|
||||
|
||||
# calculate clip frame range difference from each other
|
||||
clip_diff = segment_data["record_in"] - prev_item["record_out"]
|
||||
|
||||
# add gap if first track item is not starting
|
||||
# at first timeline frame
|
||||
if itemindex == 0 and segment_data["record_in"] > 0:
|
||||
add_otio_gap(segment_data, otio_track, 0)
|
||||
|
||||
# or add gap if following track items are having
|
||||
# frame range differences from each other
|
||||
elif itemindex and clip_diff != 1:
|
||||
add_otio_gap(
|
||||
segment_data, otio_track, prev_item["record_out"])
|
||||
|
||||
# create otio clip and add it to track
|
||||
otio_clip = create_otio_clip(segment_data)
|
||||
otio_track.append(otio_clip)
|
||||
|
||||
log.debug("_ otio_clip: {}".format(otio_clip))
|
||||
|
||||
# create otio marker
|
||||
# create otio metadata
|
||||
|
||||
# add track to otio timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def write_to_file(otio_timeline, path):
|
||||
otio.adapters.write_to_file(otio_timeline, path)
|
||||
91
server_addon/flame/client/ayon_flame/otio/utils.py
Normal file
91
server_addon/flame/client/ayon_flame/otio/utils.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
import re
|
||||
import opentimelineio as otio
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
rt = otio.opentime.from_timecode(timecode, framerate)
|
||||
return int(otio.opentime.to_frames(rt))
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_timecode(rt)
|
||||
|
||||
|
||||
def frames_to_seconds(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def get_reformatted_filename(filename, padded=True):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
type: string with reformatted path
|
||||
|
||||
Example:
|
||||
get_reformatted_filename("plate.1001.exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
found = FRAME_PATTERN.search(filename)
|
||||
|
||||
if not found:
|
||||
log.info("File name is not sequence: {}".format(filename))
|
||||
return filename
|
||||
|
||||
padding = get_padding_from_filename(filename)
|
||||
|
||||
replacement = "%0{}d".format(padding) if padded else "%d"
|
||||
start_idx, end_idx = found.span(1)
|
||||
|
||||
return replacement.join(
|
||||
[filename[:start_idx], filename[end_idx:]]
|
||||
)
|
||||
|
||||
|
||||
def get_padding_from_filename(filename):
|
||||
"""
|
||||
Return padding number from Flame path style
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_filename("plate.0001.exr") > 4
|
||||
|
||||
"""
|
||||
found = get_frame_from_filename(filename)
|
||||
|
||||
return len(found) if found else None
|
||||
|
||||
|
||||
def get_frame_from_filename(filename):
|
||||
"""
|
||||
Return sequence number from Flame path style
|
||||
|
||||
Args:
|
||||
filename (str): file name
|
||||
|
||||
Returns:
|
||||
int: sequence frame number
|
||||
|
||||
Example:
|
||||
def get_frame_from_filename(path):
|
||||
("plate.0001.exr") > 0001
|
||||
|
||||
"""
|
||||
|
||||
found = re.findall(FRAME_PATTERN, filename)
|
||||
|
||||
return found.pop() if found else None
|
||||
|
|
@ -0,0 +1,307 @@
|
|||
from copy import deepcopy
|
||||
import ayon_flame.api as opfapi
|
||||
|
||||
|
||||
class CreateShotClip(opfapi.Creator):
|
||||
"""Publishable clip"""
|
||||
|
||||
label = "Create Publishable Clip"
|
||||
product_type = "clip"
|
||||
icon = "film"
|
||||
defaults = ["Main"]
|
||||
|
||||
presets = None
|
||||
|
||||
def process(self):
|
||||
# Creator copy of object attributes that are modified during `process`
|
||||
presets = deepcopy(self.presets)
|
||||
gui_inputs = self.get_gui_inputs()
|
||||
|
||||
# get key pairs from presets and match it on ui inputs
|
||||
for k, v in gui_inputs.items():
|
||||
if v["type"] in ("dict", "section"):
|
||||
# nested dictionary (only one level allowed
|
||||
# for sections and dict)
|
||||
for _k, _v in v["value"].items():
|
||||
if presets.get(_k) is not None:
|
||||
gui_inputs[k][
|
||||
"value"][_k]["value"] = presets[_k]
|
||||
|
||||
if presets.get(k) is not None:
|
||||
gui_inputs[k]["value"] = presets[k]
|
||||
|
||||
# open widget for plugins inputs
|
||||
results_back = self.create_widget(
|
||||
"AYON publish attributes creator",
|
||||
"Define sequential rename and fill hierarchy data.",
|
||||
gui_inputs
|
||||
)
|
||||
|
||||
if len(self.selected) < 1:
|
||||
return
|
||||
|
||||
if not results_back:
|
||||
print("Operation aborted")
|
||||
return
|
||||
|
||||
# get ui output for track name for vertical sync
|
||||
v_sync_track = results_back["vSyncTrack"]["value"]
|
||||
|
||||
# sort selected trackItems by
|
||||
sorted_selected_segments = []
|
||||
unsorted_selected_segments = []
|
||||
for _segment in self.selected:
|
||||
if _segment.parent.name.get_value() in v_sync_track:
|
||||
sorted_selected_segments.append(_segment)
|
||||
else:
|
||||
unsorted_selected_segments.append(_segment)
|
||||
|
||||
sorted_selected_segments.extend(unsorted_selected_segments)
|
||||
|
||||
kwargs = {
|
||||
"log": self.log,
|
||||
"ui_inputs": results_back,
|
||||
"avalon": self.data,
|
||||
"product_type": self.data["productType"]
|
||||
}
|
||||
|
||||
for i, segment in enumerate(sorted_selected_segments):
|
||||
kwargs["rename_index"] = i
|
||||
# convert track item to timeline media pool item
|
||||
opfapi.PublishableClip(segment, **kwargs).convert()
|
||||
|
||||
def get_gui_inputs(self):
|
||||
gui_tracks = self._get_video_track_names(
|
||||
opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||
)
|
||||
return deepcopy({
|
||||
"renameHierarchy": {
|
||||
"type": "section",
|
||||
"label": "Shot Hierarchy And Rename Settings",
|
||||
"target": "ui",
|
||||
"order": 0,
|
||||
"value": {
|
||||
"hierarchy": {
|
||||
"value": "{folder}/{sequence}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Shot Parent Hierarchy",
|
||||
"target": "tag",
|
||||
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
|
||||
"order": 0},
|
||||
"useShotName": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Use Shot Name",
|
||||
"target": "ui",
|
||||
"toolTip": "Use name form Shot name clip attribute", # noqa
|
||||
"order": 1},
|
||||
"clipRename": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Rename clips",
|
||||
"target": "ui",
|
||||
"toolTip": "Renaming selected clips on fly", # noqa
|
||||
"order": 2},
|
||||
"clipName": {
|
||||
"value": "{sequence}{shot}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Clip Name Template",
|
||||
"target": "ui",
|
||||
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
|
||||
"order": 3},
|
||||
"segmentIndex": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Segment index",
|
||||
"target": "ui",
|
||||
"toolTip": "Take number from segment index", # noqa
|
||||
"order": 4},
|
||||
"countFrom": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Count sequence from",
|
||||
"target": "ui",
|
||||
"toolTip": "Set when the sequence number stafrom", # noqa
|
||||
"order": 5},
|
||||
"countSteps": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Stepping number",
|
||||
"target": "ui",
|
||||
"toolTip": "What number is adding every new step", # noqa
|
||||
"order": 6},
|
||||
}
|
||||
},
|
||||
"hierarchyData": {
|
||||
"type": "dict",
|
||||
"label": "Shot Template Keywords",
|
||||
"target": "tag",
|
||||
"order": 1,
|
||||
"value": {
|
||||
"folder": {
|
||||
"value": "shots",
|
||||
"type": "QLineEdit",
|
||||
"label": "{folder}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 0},
|
||||
"episode": {
|
||||
"value": "ep01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{episode}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 1},
|
||||
"sequence": {
|
||||
"value": "sq01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{sequence}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 2},
|
||||
"track": {
|
||||
"value": "{_track_}",
|
||||
"type": "QLineEdit",
|
||||
"label": "{track}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 3},
|
||||
"shot": {
|
||||
"value": "sh###",
|
||||
"type": "QLineEdit",
|
||||
"label": "{shot}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 4}
|
||||
}
|
||||
},
|
||||
"verticalSync": {
|
||||
"type": "section",
|
||||
"label": "Vertical Synchronization Of Attributes",
|
||||
"target": "ui",
|
||||
"order": 2,
|
||||
"value": {
|
||||
"vSyncOn": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Enable Vertical Sync",
|
||||
"target": "ui",
|
||||
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
|
||||
"order": 0},
|
||||
"vSyncTrack": {
|
||||
"value": gui_tracks, # noqa
|
||||
"type": "QComboBox",
|
||||
"label": "Hero track",
|
||||
"target": "ui",
|
||||
"toolTip": "Select driving track name which should be hero for all others", # noqa
|
||||
"order": 1}
|
||||
}
|
||||
},
|
||||
"publishSettings": {
|
||||
"type": "section",
|
||||
"label": "Publish Settings",
|
||||
"target": "ui",
|
||||
"order": 3,
|
||||
"value": {
|
||||
"productName": {
|
||||
"value": ["[ track name ]", "main", "bg", "fg", "bg",
|
||||
"animatic"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product Name",
|
||||
"target": "ui",
|
||||
"toolTip": "chose product name pattern, if [ track name ] is selected, name of track layer will be used", # noqa
|
||||
"order": 0},
|
||||
"productType": {
|
||||
"value": ["plate", "take"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product Type",
|
||||
"target": "ui", "toolTip": "What use of this product is for", # noqa
|
||||
"order": 1},
|
||||
"reviewTrack": {
|
||||
"value": ["< none >"] + gui_tracks,
|
||||
"type": "QComboBox",
|
||||
"label": "Use Review Track",
|
||||
"target": "ui",
|
||||
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
|
||||
"order": 2},
|
||||
"audio": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Include audio",
|
||||
"target": "tag",
|
||||
"toolTip": "Process products with corresponding audio", # noqa
|
||||
"order": 3},
|
||||
"sourceResolution": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Source resolution",
|
||||
"target": "tag",
|
||||
"toolTip": "Is resolution taken from timeline or source?", # noqa
|
||||
"order": 4},
|
||||
}
|
||||
},
|
||||
"frameRangeAttr": {
|
||||
"type": "section",
|
||||
"label": "Shot Attributes",
|
||||
"target": "ui",
|
||||
"order": 4,
|
||||
"value": {
|
||||
"workfileFrameStart": {
|
||||
"value": 1001,
|
||||
"type": "QSpinBox",
|
||||
"label": "Workfiles Start Frame",
|
||||
"target": "tag",
|
||||
"toolTip": "Set workfile starting frame number", # noqa
|
||||
"order": 0
|
||||
},
|
||||
"handleStart": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle Start",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at start of clip", # noqa
|
||||
"order": 1
|
||||
},
|
||||
"handleEnd": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle End",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at end of clip", # noqa
|
||||
"order": 2
|
||||
},
|
||||
"includeHandles": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Include handles",
|
||||
"target": "tag",
|
||||
"toolTip": "By default handles are excluded", # noqa
|
||||
"order": 3
|
||||
},
|
||||
"retimedHandles": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Retimed handles",
|
||||
"target": "tag",
|
||||
"toolTip": "By default handles are retimed.", # noqa
|
||||
"order": 4
|
||||
},
|
||||
"retimedFramerange": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Retimed framerange",
|
||||
"target": "tag",
|
||||
"toolTip": "By default framerange is retimed.", # noqa
|
||||
"order": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def _get_video_track_names(self, sequence):
|
||||
track_names = []
|
||||
for ver in sequence.versions:
|
||||
for track in ver.tracks:
|
||||
track_names.append(track.name.get_value())
|
||||
|
||||
return track_names
|
||||
274
server_addon/flame/client/ayon_flame/plugins/load/load_clip.py
Normal file
274
server_addon/flame/client/ayon_flame/plugins/load/load_clip.py
Normal file
|
|
@ -0,0 +1,274 @@
|
|||
from copy import deepcopy
|
||||
import os
|
||||
import flame
|
||||
from pprint import pformat
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
|
||||
class LoadClip(opfapi.ClipLoader):
|
||||
"""Load a product to timeline as clip
|
||||
|
||||
Place clip to timeline on its asset origin timings collected
|
||||
during conforming to project
|
||||
"""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
representations = {"*"}
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# settings
|
||||
reel_group_name = "OpenPype_Reels"
|
||||
reel_name = "Loaded"
|
||||
clip_name_template = "{folder[name]}_{product[name]}<_{output}>"
|
||||
|
||||
""" Anatomy keys from version context data and dynamically added:
|
||||
- {layerName} - original layer name token
|
||||
- {layerUID} - original layer UID token
|
||||
- {originalBasename} - original clip name taken from file
|
||||
"""
|
||||
layer_rename_template = "{folder[name]}_{product[name]}<_{output}>"
|
||||
layer_rename_patterns = []
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# get flame objects
|
||||
fproject = flame.project.current_project
|
||||
self.fpd = fproject.current_workspace.desktop
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
version_entity = context["version"]
|
||||
version_attributes = version_entity["attrib"]
|
||||
version_name = version_entity["version"]
|
||||
colorspace = self.get_colorspace(context)
|
||||
|
||||
# in case output is not in context replace key to representation
|
||||
if not context["representation"]["context"].get("output"):
|
||||
self.clip_name_template = self.clip_name_template.replace(
|
||||
"output", "representation")
|
||||
self.layer_rename_template = self.layer_rename_template.replace(
|
||||
"output", "representation")
|
||||
|
||||
formatting_data = deepcopy(context["representation"]["context"])
|
||||
clip_name = StringTemplate(self.clip_name_template).format(
|
||||
formatting_data)
|
||||
|
||||
# convert colorspace with ocio to flame mapping
|
||||
# in imageio flame section
|
||||
colorspace = self.get_native_colorspace(colorspace)
|
||||
self.log.info("Loading with colorspace: `{}`".format(colorspace))
|
||||
|
||||
# create workfile path
|
||||
workfile_dir = os.environ["AYON_WORKDIR"]
|
||||
openclip_dir = os.path.join(
|
||||
workfile_dir, clip_name
|
||||
)
|
||||
openclip_path = os.path.join(
|
||||
openclip_dir, clip_name + ".clip"
|
||||
)
|
||||
if not os.path.exists(openclip_dir):
|
||||
os.makedirs(openclip_dir)
|
||||
|
||||
# prepare clip data from context ad send it to openClipLoader
|
||||
path = self.filepath_from_context(context)
|
||||
loading_context = {
|
||||
"path": path.replace("\\", "/"),
|
||||
"colorspace": colorspace,
|
||||
"version": "v{:0>3}".format(version_name),
|
||||
"layer_rename_template": self.layer_rename_template,
|
||||
"layer_rename_patterns": self.layer_rename_patterns,
|
||||
"context_data": formatting_data
|
||||
}
|
||||
self.log.debug(pformat(
|
||||
loading_context
|
||||
))
|
||||
self.log.debug(openclip_path)
|
||||
|
||||
# make openpype clip file
|
||||
opfapi.OpenClipSolver(
|
||||
openclip_path, loading_context, logger=self.log).make()
|
||||
|
||||
# prepare Reel group in actual desktop
|
||||
opc = self._get_clip(
|
||||
clip_name,
|
||||
openclip_path
|
||||
)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
"frameStart", "frameEnd", "source", "author",
|
||||
"fps", "handleStart", "handleEnd"
|
||||
]
|
||||
|
||||
# move all version data keys to tag data
|
||||
data_imprint = {
|
||||
key: version_attributes.get(key, str(None))
|
||||
for key in add_keys
|
||||
}
|
||||
|
||||
# add variables related to version context
|
||||
data_imprint.update({
|
||||
"version": version_name,
|
||||
"colorspace": colorspace,
|
||||
"objectName": clip_name
|
||||
})
|
||||
|
||||
# TODO: finish the containerisation
|
||||
# opc_segment = opfapi.get_clip_segment(opc)
|
||||
|
||||
# return opfapi.containerise(
|
||||
# opc_segment,
|
||||
# name, namespace, context,
|
||||
# self.__class__.__name__,
|
||||
# data_imprint)
|
||||
|
||||
return opc
|
||||
|
||||
def _get_clip(self, name, clip_path):
|
||||
reel = self._get_reel()
|
||||
# with maintained openclip as opc
|
||||
matching_clip = [cl for cl in reel.clips
|
||||
if cl.name.get_value() == name]
|
||||
if matching_clip:
|
||||
return matching_clip.pop()
|
||||
else:
|
||||
created_clips = flame.import_clips(str(clip_path), reel)
|
||||
return created_clips.pop()
|
||||
|
||||
def _get_reel(self):
|
||||
|
||||
matching_rgroup = [
|
||||
rg for rg in self.fpd.reel_groups
|
||||
if rg.name.get_value() == self.reel_group_name
|
||||
]
|
||||
|
||||
if not matching_rgroup:
|
||||
reel_group = self.fpd.create_reel_group(str(self.reel_group_name))
|
||||
for _r in reel_group.reels:
|
||||
if "reel" not in _r.name.get_value().lower():
|
||||
continue
|
||||
self.log.debug("Removing: {}".format(_r.name))
|
||||
flame.delete(_r)
|
||||
else:
|
||||
reel_group = matching_rgroup.pop()
|
||||
|
||||
matching_reel = [
|
||||
re for re in reel_group.reels
|
||||
if re.name.get_value() == self.reel_name
|
||||
]
|
||||
|
||||
if not matching_reel:
|
||||
reel_group = reel_group.create_reel(str(self.reel_name))
|
||||
else:
|
||||
reel_group = matching_reel.pop()
|
||||
|
||||
return reel_group
|
||||
|
||||
def _get_segment_from_clip(self, clip):
|
||||
# unwrapping segment from input clip
|
||||
pass
|
||||
|
||||
# def switch(self, container, context):
|
||||
# self.update(container, context)
|
||||
|
||||
# def update(self, container, context):
|
||||
# """ Updating previously loaded clips
|
||||
# """
|
||||
# # load clip to timeline and get main variables
|
||||
# repre_entity = context['representation']
|
||||
# name = container['name']
|
||||
# namespace = container['namespace']
|
||||
# track_item = phiero.get_track_items(
|
||||
# track_item_name=namespace)
|
||||
# version = io.find_one({
|
||||
# "type": "version",
|
||||
# "id": repre_entity["versionId"]
|
||||
# })
|
||||
# version_data = version.get("data", {})
|
||||
# version_name = version.get("name", None)
|
||||
# colorspace = version_data.get("colorSpace", None)
|
||||
# object_name = "{}_{}".format(name, namespace)
|
||||
# file = get_representation_path(repre_entity).replace("\\", "/")
|
||||
# clip = track_item.source()
|
||||
|
||||
# # reconnect media to new path
|
||||
# clip.reconnectMedia(file)
|
||||
|
||||
# # set colorspace
|
||||
# if colorspace:
|
||||
# clip.setSourceMediaColourTransform(colorspace)
|
||||
|
||||
# # add additional metadata from the version to imprint Avalon knob
|
||||
# add_keys = [
|
||||
# "frameStart", "frameEnd", "source", "author",
|
||||
# "fps", "handleStart", "handleEnd"
|
||||
# ]
|
||||
|
||||
# # move all version data keys to tag data
|
||||
# data_imprint = {}
|
||||
# for key in add_keys:
|
||||
# data_imprint.update({
|
||||
# key: version_data.get(key, str(None))
|
||||
# })
|
||||
|
||||
# # add variables related to version context
|
||||
# data_imprint.update({
|
||||
# "representation": repre_entity["id"],
|
||||
# "version": version_name,
|
||||
# "colorspace": colorspace,
|
||||
# "objectName": object_name
|
||||
# })
|
||||
|
||||
# # update color of clip regarding the version order
|
||||
# self.set_item_color(track_item, version)
|
||||
|
||||
# return phiero.update_container(track_item, data_imprint)
|
||||
|
||||
# def remove(self, container):
|
||||
# """ Removing previously loaded clips
|
||||
# """
|
||||
# # load clip to timeline and get main variables
|
||||
# namespace = container['namespace']
|
||||
# track_item = phiero.get_track_items(
|
||||
# track_item_name=namespace)
|
||||
# track = track_item.parent()
|
||||
|
||||
# # remove track item from track
|
||||
# track.removeItem(track_item)
|
||||
|
||||
# @classmethod
|
||||
# def multiselection(cls, track_item):
|
||||
# if not cls.track:
|
||||
# cls.track = track_item.parent()
|
||||
# cls.sequence = cls.track.parent()
|
||||
|
||||
# @classmethod
|
||||
# def set_item_color(cls, track_item, version):
|
||||
|
||||
# clip = track_item.source()
|
||||
# # define version name
|
||||
# version_name = version.get("name", None)
|
||||
# # get all versions in list
|
||||
# versions = io.find({
|
||||
# "type": "version",
|
||||
# "parent": version["parent"]
|
||||
# }).distinct('name')
|
||||
|
||||
# max_version = max(versions)
|
||||
|
||||
# # set clip colour
|
||||
# if version_name == max_version:
|
||||
# clip.binItem().setColor(cls.clip_color_last)
|
||||
# else:
|
||||
# clip.binItem().setColor(cls.clip_color)
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
from copy import deepcopy
|
||||
import os
|
||||
import flame
|
||||
from pprint import pformat
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
class LoadClipBatch(opfapi.ClipLoader):
|
||||
"""Load a product to timeline as clip
|
||||
|
||||
Place clip to timeline on its asset origin timings collected
|
||||
during conforming to project
|
||||
"""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
representations = {"*"}
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip to current batch"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# settings
|
||||
reel_name = "OP_LoadedReel"
|
||||
clip_name_template = "{batch}_{folder[name]}_{product[name]}<_{output}>"
|
||||
|
||||
""" Anatomy keys from version context data and dynamically added:
|
||||
- {layerName} - original layer name token
|
||||
- {layerUID} - original layer UID token
|
||||
- {originalBasename} - original clip name taken from file
|
||||
"""
|
||||
layer_rename_template = "{folder[name]}_{product[name]}<_{output}>"
|
||||
layer_rename_patterns = []
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# get flame objects
|
||||
self.batch = options.get("batch") or flame.batch
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
version_entity = context["version"]
|
||||
version_attributes =version_entity["attrib"]
|
||||
version_name = version_entity["version"]
|
||||
colorspace = self.get_colorspace(context)
|
||||
|
||||
clip_name_template = self.clip_name_template
|
||||
layer_rename_template = self.layer_rename_template
|
||||
# in case output is not in context replace key to representation
|
||||
if not context["representation"]["context"].get("output"):
|
||||
clip_name_template = clip_name_template.replace(
|
||||
"output", "representation")
|
||||
layer_rename_template = layer_rename_template.replace(
|
||||
"output", "representation")
|
||||
|
||||
folder_entity = context["folder"]
|
||||
product_entity = context["product"]
|
||||
formatting_data = deepcopy(context["representation"]["context"])
|
||||
formatting_data["batch"] = self.batch.name.get_value()
|
||||
formatting_data.update({
|
||||
"asset": folder_entity["name"],
|
||||
"folder": {
|
||||
"name": folder_entity["name"],
|
||||
},
|
||||
"subset": product_entity["name"],
|
||||
"family": product_entity["productType"],
|
||||
"product": {
|
||||
"name": product_entity["name"],
|
||||
"type": product_entity["productType"],
|
||||
}
|
||||
})
|
||||
|
||||
clip_name = StringTemplate(clip_name_template).format(
|
||||
formatting_data)
|
||||
|
||||
# convert colorspace with ocio to flame mapping
|
||||
# in imageio flame section
|
||||
colorspace = self.get_native_colorspace(colorspace)
|
||||
self.log.info("Loading with colorspace: `{}`".format(colorspace))
|
||||
|
||||
# create workfile path
|
||||
workfile_dir = options.get("workdir") or os.environ["AYON_WORKDIR"]
|
||||
openclip_dir = os.path.join(
|
||||
workfile_dir, clip_name
|
||||
)
|
||||
openclip_path = os.path.join(
|
||||
openclip_dir, clip_name + ".clip"
|
||||
)
|
||||
|
||||
if not os.path.exists(openclip_dir):
|
||||
os.makedirs(openclip_dir)
|
||||
|
||||
# prepare clip data from context and send it to openClipLoader
|
||||
path = self.filepath_from_context(context)
|
||||
loading_context = {
|
||||
"path": path.replace("\\", "/"),
|
||||
"colorspace": colorspace,
|
||||
"version": "v{:0>3}".format(version_name),
|
||||
"layer_rename_template": layer_rename_template,
|
||||
"layer_rename_patterns": self.layer_rename_patterns,
|
||||
"context_data": formatting_data
|
||||
}
|
||||
self.log.debug(pformat(
|
||||
loading_context
|
||||
))
|
||||
self.log.debug(openclip_path)
|
||||
|
||||
# make openpype clip file
|
||||
opfapi.OpenClipSolver(
|
||||
openclip_path, loading_context, logger=self.log).make()
|
||||
|
||||
# prepare Reel group in actual desktop
|
||||
opc = self._get_clip(
|
||||
clip_name,
|
||||
openclip_path
|
||||
)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
"frameStart", "frameEnd", "source", "author",
|
||||
"fps", "handleStart", "handleEnd"
|
||||
]
|
||||
|
||||
# move all version data keys to tag data
|
||||
data_imprint = {
|
||||
key: version_attributes.get(key, str(None))
|
||||
for key in add_keys
|
||||
}
|
||||
# add variables related to version context
|
||||
data_imprint.update({
|
||||
"version": version_name,
|
||||
"colorspace": colorspace,
|
||||
"objectName": clip_name
|
||||
})
|
||||
|
||||
# TODO: finish the containerisation
|
||||
# opc_segment = opfapi.get_clip_segment(opc)
|
||||
|
||||
# return opfapi.containerise(
|
||||
# opc_segment,
|
||||
# name, namespace, context,
|
||||
# self.__class__.__name__,
|
||||
# data_imprint)
|
||||
|
||||
return opc
|
||||
|
||||
def _get_clip(self, name, clip_path):
|
||||
reel = self._get_reel()
|
||||
|
||||
# with maintained openclip as opc
|
||||
matching_clip = None
|
||||
for cl in reel.clips:
|
||||
if cl.name.get_value() != name:
|
||||
continue
|
||||
matching_clip = cl
|
||||
|
||||
if not matching_clip:
|
||||
created_clips = flame.import_clips(str(clip_path), reel)
|
||||
return created_clips.pop()
|
||||
|
||||
return matching_clip
|
||||
|
||||
def _get_reel(self):
|
||||
|
||||
matching_reel = [
|
||||
rg for rg in self.batch.reels
|
||||
if rg.name.get_value() == self.reel_name
|
||||
]
|
||||
|
||||
return (
|
||||
matching_reel.pop()
|
||||
if matching_reel
|
||||
else self.batch.create_reel(str(self.reel_name))
|
||||
)
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import tempfile
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_flame.otio import flame_export as otio_export
|
||||
import opentimelineio as otio
|
||||
from pprint import pformat
|
||||
reload(otio_export) # noqa
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class CollectTestSelection(pyblish.api.ContextPlugin):
|
||||
"""testing selection sharing
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "test selection"
|
||||
hosts = ["flame"]
|
||||
active = False
|
||||
|
||||
def process(self, context):
|
||||
self.log.info(
|
||||
"Active Selection: {}".format(opfapi.CTX.selection))
|
||||
|
||||
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||
|
||||
self.test_imprint_data(sequence)
|
||||
self.test_otio_export(sequence)
|
||||
|
||||
def test_otio_export(self, sequence):
|
||||
test_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="test_pyblish_tmp_")
|
||||
)
|
||||
export_path = os.path.normpath(
|
||||
os.path.join(
|
||||
test_dir, "otio_timeline_export.otio"
|
||||
)
|
||||
)
|
||||
otio_timeline = otio_export.create_otio_timeline(sequence)
|
||||
otio_export.write_to_file(
|
||||
otio_timeline, export_path
|
||||
)
|
||||
read_timeline_otio = otio.adapters.read_from_file(export_path)
|
||||
|
||||
if otio_timeline != read_timeline_otio:
|
||||
raise Exception("Exported timeline is different from original")
|
||||
|
||||
self.log.info(pformat(otio_timeline))
|
||||
self.log.info("Otio exported to: {}".format(export_path))
|
||||
|
||||
def test_imprint_data(self, sequence):
|
||||
with opfapi.maintained_segment_selection(sequence) as sel_segments:
|
||||
for segment in sel_segments:
|
||||
if str(segment.name)[1:-1] == "":
|
||||
continue
|
||||
|
||||
self.log.debug("Segment with OpenPypeData: {}".format(
|
||||
segment.name))
|
||||
|
||||
opfapi.imprint(segment, {
|
||||
'asset': segment.name.get_value(),
|
||||
'productType': 'render',
|
||||
'productName': 'productMain'
|
||||
})
|
||||
|
|
@ -0,0 +1,417 @@
|
|||
import re
|
||||
from types import NoneType
|
||||
import pyblish
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_flame.otio import flame_export
|
||||
from ayon_core.pipeline import AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
from ayon_core.pipeline.editorial import (
|
||||
is_overlapping_otio_ranges,
|
||||
get_media_range_with_retimes
|
||||
)
|
||||
|
||||
# # developer reload modules
|
||||
from pprint import pformat
|
||||
|
||||
# constatns
|
||||
NUM_PATERN = re.compile(r"([0-9\.]+)")
|
||||
TXT_PATERN = re.compile(r"([a-zA-Z]+)")
|
||||
|
||||
|
||||
class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect all Timeline segment selection."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.09
|
||||
label = "Collect timeline Instances"
|
||||
hosts = ["flame"]
|
||||
|
||||
settings_category = "flame"
|
||||
|
||||
audio_track_items = []
|
||||
|
||||
# settings
|
||||
xml_preset_attrs_from_comments = []
|
||||
add_tasks = []
|
||||
|
||||
def process(self, context):
|
||||
selected_segments = context.data["flameSelectedSegments"]
|
||||
self.log.debug("__ selected_segments: {}".format(selected_segments))
|
||||
|
||||
self.otio_timeline = context.data["otioTimeline"]
|
||||
self.fps = context.data["fps"]
|
||||
|
||||
# process all selected
|
||||
for segment in selected_segments:
|
||||
# get openpype tag data
|
||||
marker_data = opfapi.get_segment_data_marker(segment)
|
||||
|
||||
self.log.debug("__ marker_data: {}".format(
|
||||
pformat(marker_data)))
|
||||
|
||||
if not marker_data:
|
||||
continue
|
||||
|
||||
if marker_data.get("id") not in {
|
||||
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
}:
|
||||
continue
|
||||
|
||||
self.log.debug("__ segment.name: {}".format(
|
||||
segment.name
|
||||
))
|
||||
|
||||
comment_attributes = self._get_comment_attributes(segment)
|
||||
|
||||
self.log.debug("_ comment_attributes: {}".format(
|
||||
pformat(comment_attributes)))
|
||||
|
||||
clip_data = opfapi.get_segment_attributes(segment)
|
||||
clip_name = clip_data["segment_name"]
|
||||
self.log.debug("clip_name: {}".format(clip_name))
|
||||
|
||||
# get otio clip data
|
||||
otio_data = self._get_otio_clip_instance_data(clip_data) or {}
|
||||
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
|
||||
|
||||
# get file path
|
||||
file_path = clip_data["fpath"]
|
||||
|
||||
first_frame = opfapi.get_frame_from_filename(file_path) or 0
|
||||
|
||||
head, tail = self._get_head_tail(
|
||||
clip_data,
|
||||
otio_data["otioClip"],
|
||||
marker_data["handleStart"],
|
||||
marker_data["handleEnd"]
|
||||
)
|
||||
|
||||
# make sure there is not NoneType rather 0
|
||||
if isinstance(head, NoneType):
|
||||
head = 0
|
||||
if isinstance(tail, NoneType):
|
||||
tail = 0
|
||||
|
||||
# make sure value is absolute
|
||||
if head != 0:
|
||||
head = abs(head)
|
||||
if tail != 0:
|
||||
tail = abs(tail)
|
||||
|
||||
# solve handles length
|
||||
marker_data["handleStart"] = min(
|
||||
marker_data["handleStart"], head)
|
||||
marker_data["handleEnd"] = min(
|
||||
marker_data["handleEnd"], tail)
|
||||
|
||||
# Backward compatibility fix of 'entity_type' > 'folder_type'
|
||||
if "parents" in marker_data:
|
||||
for parent in marker_data["parents"]:
|
||||
if "entity_type" in parent:
|
||||
parent["folder_type"] = parent.pop("entity_type")
|
||||
|
||||
workfile_start = self._set_workfile_start(marker_data)
|
||||
|
||||
with_audio = bool(marker_data.pop("audio"))
|
||||
|
||||
# add marker data to instance data
|
||||
inst_data = dict(marker_data.items())
|
||||
|
||||
# add ocio_data to instance data
|
||||
inst_data.update(otio_data)
|
||||
|
||||
folder_path = marker_data["folderPath"]
|
||||
folder_name = folder_path.rsplit("/")[-1]
|
||||
product_name = marker_data["productName"]
|
||||
|
||||
# insert product type into families
|
||||
product_type = marker_data["productType"]
|
||||
families = [str(f) for f in marker_data["families"]]
|
||||
families.insert(0, str(product_type))
|
||||
|
||||
# form label
|
||||
label = folder_name
|
||||
if folder_name != clip_name:
|
||||
label += " ({})".format(clip_name)
|
||||
label += " {} [{}]".format(product_name, ", ".join(families))
|
||||
|
||||
inst_data.update({
|
||||
"name": "{}_{}".format(folder_name, product_name),
|
||||
"label": label,
|
||||
"folderPath": folder_path,
|
||||
"item": segment,
|
||||
"families": families,
|
||||
"publish": marker_data["publish"],
|
||||
"fps": self.fps,
|
||||
"workfileFrameStart": workfile_start,
|
||||
"sourceFirstFrame": int(first_frame),
|
||||
"retimedHandles": marker_data.get("retimedHandles"),
|
||||
"shotDurationFromSource": (
|
||||
not marker_data.get("retimedFramerange")),
|
||||
"path": file_path,
|
||||
"flameAddTasks": self.add_tasks,
|
||||
"tasks": {
|
||||
task["name"]: {"type": task["type"]}
|
||||
for task in self.add_tasks},
|
||||
"representations": [],
|
||||
"newAssetPublishing": True
|
||||
})
|
||||
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
|
||||
|
||||
# add resolution
|
||||
self._get_resolution_to_data(inst_data, context)
|
||||
|
||||
# add comment attributes if any
|
||||
inst_data.update(comment_attributes)
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(**inst_data)
|
||||
|
||||
# add colorspace data
|
||||
instance.data.update({
|
||||
"versionData": {
|
||||
"colorspace": clip_data["colour_space"],
|
||||
}
|
||||
})
|
||||
|
||||
# create shot instance for shot attributes create/update
|
||||
self._create_shot_instance(context, clip_name, **inst_data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.info(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
if not with_audio:
|
||||
continue
|
||||
|
||||
# add audioReview attribute to plate instance data
|
||||
# if reviewTrack is on
|
||||
if marker_data.get("reviewTrack") is not None:
|
||||
instance.data["reviewAudio"] = True
|
||||
|
||||
@staticmethod
|
||||
def _set_workfile_start(data):
|
||||
include_handles = data.get("includeHandles")
|
||||
workfile_start = data["workfileFrameStart"]
|
||||
handle_start = data["handleStart"]
|
||||
|
||||
if include_handles:
|
||||
workfile_start += handle_start
|
||||
|
||||
return workfile_start
|
||||
|
||||
def _get_comment_attributes(self, segment):
|
||||
comment = segment.comment.get_value()
|
||||
|
||||
# try to find attributes
|
||||
attributes = {
|
||||
"xml_overrides": {
|
||||
"pixelRatio": 1.00}
|
||||
}
|
||||
# search for `:`
|
||||
for split in self._split_comments(comment):
|
||||
# make sure we ignore if not `:` in key
|
||||
if ":" not in split:
|
||||
continue
|
||||
|
||||
self._get_xml_preset_attrs(
|
||||
attributes, split)
|
||||
|
||||
# add xml overrides resolution to instance data
|
||||
xml_overrides = attributes["xml_overrides"]
|
||||
if xml_overrides.get("width"):
|
||||
attributes.update({
|
||||
"resolutionWidth": xml_overrides["width"],
|
||||
"resolutionHeight": xml_overrides["height"],
|
||||
"pixelAspect": xml_overrides["pixelRatio"]
|
||||
})
|
||||
|
||||
return attributes
|
||||
|
||||
def _get_xml_preset_attrs(self, attributes, split):
|
||||
|
||||
# split to key and value
|
||||
key, value = split.split(":")
|
||||
|
||||
for attr_data in self.xml_preset_attrs_from_comments:
|
||||
a_name = attr_data["name"]
|
||||
a_type = attr_data["type"]
|
||||
|
||||
# exclude all not related attributes
|
||||
if a_name.lower() not in key.lower():
|
||||
continue
|
||||
|
||||
# get pattern defined by type
|
||||
pattern = TXT_PATERN
|
||||
if a_type in ("number", "float"):
|
||||
pattern = NUM_PATERN
|
||||
|
||||
res_goup = pattern.findall(value)
|
||||
|
||||
# raise if nothing is found as it is not correctly defined
|
||||
if not res_goup:
|
||||
raise ValueError((
|
||||
"Value for `{}` attribute is not "
|
||||
"set correctly: `{}`").format(a_name, split))
|
||||
|
||||
if "string" in a_type:
|
||||
_value = res_goup[0]
|
||||
if "float" in a_type:
|
||||
_value = float(res_goup[0])
|
||||
if "number" in a_type:
|
||||
_value = int(res_goup[0])
|
||||
|
||||
attributes["xml_overrides"][a_name] = _value
|
||||
|
||||
# condition for resolution in key
|
||||
if "resolution" in key.lower():
|
||||
res_goup = NUM_PATERN.findall(value)
|
||||
# check if axpect was also defined
|
||||
# 1920x1080x1.5
|
||||
aspect = res_goup[2] if len(res_goup) > 2 else 1
|
||||
|
||||
width = int(res_goup[0])
|
||||
height = int(res_goup[1])
|
||||
pixel_ratio = float(aspect)
|
||||
attributes["xml_overrides"].update({
|
||||
"width": width,
|
||||
"height": height,
|
||||
"pixelRatio": pixel_ratio
|
||||
})
|
||||
|
||||
def _split_comments(self, comment_string):
|
||||
# first split comment by comma
|
||||
split_comments = []
|
||||
if "," in comment_string:
|
||||
split_comments.extend(comment_string.split(","))
|
||||
elif ";" in comment_string:
|
||||
split_comments.extend(comment_string.split(";"))
|
||||
else:
|
||||
split_comments.append(comment_string)
|
||||
|
||||
return split_comments
|
||||
|
||||
def _get_head_tail(self, clip_data, otio_clip, handle_start, handle_end):
|
||||
# calculate head and tail with forward compatibility
|
||||
head = clip_data.get("segment_head")
|
||||
tail = clip_data.get("segment_tail")
|
||||
self.log.debug("__ head: `{}`".format(head))
|
||||
self.log.debug("__ tail: `{}`".format(tail))
|
||||
|
||||
# HACK: it is here to serve for versions below 2021.1
|
||||
if not any([head, tail]):
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
||||
# retimed head and tail
|
||||
head = int(retimed_attributes["handleStart"])
|
||||
tail = int(retimed_attributes["handleEnd"])
|
||||
|
||||
return head, tail
|
||||
|
||||
def _get_resolution_to_data(self, data, context):
|
||||
assert data.get("otioClip"), "Missing `otioClip` data"
|
||||
|
||||
# solve source resolution option
|
||||
if data.get("sourceResolution", None):
|
||||
otio_clip_metadata = data[
|
||||
"otioClip"].media_reference.metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_clip_metadata[
|
||||
"openpype.source.width"],
|
||||
"resolutionHeight": otio_clip_metadata[
|
||||
"openpype.source.height"],
|
||||
"pixelAspect": otio_clip_metadata[
|
||||
"openpype.source.pixelAspect"]
|
||||
})
|
||||
else:
|
||||
otio_tl_metadata = context.data["otioTimeline"].metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_tl_metadata["openpype.timeline.width"],
|
||||
"resolutionHeight": otio_tl_metadata[
|
||||
"openpype.timeline.height"],
|
||||
"pixelAspect": otio_tl_metadata[
|
||||
"openpype.timeline.pixelAspect"]
|
||||
})
|
||||
|
||||
def _create_shot_instance(self, context, clip_name, **data):
|
||||
master_layer = data.get("heroTrack")
|
||||
hierarchy_data = data.get("hierarchyData")
|
||||
|
||||
if not master_layer:
|
||||
return
|
||||
|
||||
if not hierarchy_data:
|
||||
return
|
||||
|
||||
folder_path = data["folderPath"]
|
||||
folder_name = folder_path.rsplit("/")[-1]
|
||||
product_name = "shotMain"
|
||||
|
||||
# insert product type into families
|
||||
product_type = "shot"
|
||||
|
||||
# form label
|
||||
label = folder_name
|
||||
if folder_name != clip_name:
|
||||
label += " ({}) ".format(clip_name)
|
||||
label += " {}".format(product_name)
|
||||
label += " [{}]".format(product_type)
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_name, product_name),
|
||||
"label": label,
|
||||
"productName": product_name,
|
||||
"folderPath": folder_path,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type]
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def _get_otio_clip_instance_data(self, clip_data):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
Args:
|
||||
timeline_item_data (dict): timeline_item_data from list returned by
|
||||
resolve.get_current_timeline_items()
|
||||
otio_timeline (otio.schema.Timeline): otio object
|
||||
|
||||
Returns:
|
||||
dict: otio clip object
|
||||
|
||||
"""
|
||||
segment = clip_data["PySegment"]
|
||||
s_track_name = segment.parent.name.get_value()
|
||||
timeline_range = self._create_otio_time_range_from_timeline_item_data(
|
||||
clip_data)
|
||||
|
||||
for otio_clip in self.otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if s_track_name not in track_name:
|
||||
continue
|
||||
if otio_clip.name not in segment.name.get_value():
|
||||
continue
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if opfapi.MARKER_NAME in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
return None
|
||||
|
||||
def _create_otio_time_range_from_timeline_item_data(self, clip_data):
|
||||
frame_start = int(clip_data["record_in"])
|
||||
frame_duration = int(clip_data["record_duration"])
|
||||
|
||||
return flame_export.create_otio_time_range(
|
||||
frame_start, frame_duration, self.fps)
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
import pyblish.api
|
||||
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_flame.otio import flame_export
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
|
||||
|
||||
class CollecTimelineOTIO(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working context into publish context"""
|
||||
|
||||
label = "Collect Timeline OTIO"
|
||||
order = pyblish.api.CollectorOrder - 0.099
|
||||
|
||||
def process(self, context):
|
||||
# plugin defined
|
||||
product_type = "workfile"
|
||||
variant = "otioTimeline"
|
||||
|
||||
# main
|
||||
folder_entity = context.data["folderEntity"]
|
||||
project = opfapi.get_current_project()
|
||||
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||
|
||||
# create product name
|
||||
task_entity = context.data["taskEntity"]
|
||||
task_name = task_type = None
|
||||
if task_entity:
|
||||
task_name = task_entity["name"]
|
||||
task_type = task_entity["taskType"]
|
||||
product_name = get_product_name(
|
||||
context.data["projectName"],
|
||||
task_name,
|
||||
task_type,
|
||||
context.data["hostName"],
|
||||
product_type,
|
||||
variant,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
# adding otio timeline to context
|
||||
with opfapi.maintained_segment_selection(sequence) as selected_seg:
|
||||
otio_timeline = flame_export.create_otio_timeline(sequence)
|
||||
|
||||
instance_data = {
|
||||
"name": product_name,
|
||||
"folderPath": folder_entity["path"],
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type]
|
||||
}
|
||||
|
||||
# create instance with workfile
|
||||
instance = context.create_instance(**instance_data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
|
||||
# update context with main project attributes
|
||||
context.data.update({
|
||||
"flameProject": project,
|
||||
"flameSequence": sequence,
|
||||
"otioTimeline": otio_timeline,
|
||||
"currentFile": "Flame/{}/{}".format(
|
||||
project.name, sequence.name
|
||||
),
|
||||
"flameSelectedSegments": selected_seg,
|
||||
"fps": float(str(sequence.frame_rate)[:-4])
|
||||
})
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import opentimelineio as otio
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractOTIOFile(publish.Extractor):
|
||||
"""
|
||||
Extractor export OTIO file
|
||||
"""
|
||||
|
||||
label = "Extract OTIO file"
|
||||
order = pyblish.api.ExtractorOrder - 0.45
|
||||
families = ["workfile"]
|
||||
hosts = ["flame"]
|
||||
|
||||
def process(self, instance):
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
name = instance.data["name"]
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
otio_timeline = instance.context.data["otioTimeline"]
|
||||
# create otio timeline representation
|
||||
otio_file_name = name + ".otio"
|
||||
otio_file_path = os.path.join(staging_dir, otio_file_name)
|
||||
|
||||
# export otio file to temp dir
|
||||
otio.adapters.write_to_file(otio_timeline, otio_file_path)
|
||||
|
||||
representation_otio = {
|
||||
'name': "otio",
|
||||
'ext': "otio",
|
||||
'files': otio_file_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation_otio)
|
||||
|
||||
self.log.info("Added OTIO file representation: {}".format(
|
||||
representation_otio))
|
||||
|
|
@ -0,0 +1,560 @@
|
|||
import os
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_flame import api as opfapi
|
||||
from ayon_flame.api import MediaInfoFile
|
||||
from ayon_core.pipeline.editorial import (
|
||||
get_media_range_with_retimes
|
||||
)
|
||||
|
||||
import flame
|
||||
|
||||
|
||||
class ExtractProductResources(publish.Extractor):
|
||||
"""
|
||||
Extractor for transcoding files from Flame clip
|
||||
"""
|
||||
|
||||
label = "Extract product resources"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["clip"]
|
||||
hosts = ["flame"]
|
||||
|
||||
settings_category = "flame"
|
||||
|
||||
# plugin defaults
|
||||
keep_original_representation = False
|
||||
|
||||
default_presets = {
|
||||
"thumbnail": {
|
||||
"active": True,
|
||||
"ext": "jpg",
|
||||
"xml_preset_file": "Jpeg (8-bit).xml",
|
||||
"xml_preset_dir": "",
|
||||
"export_type": "File Sequence",
|
||||
"parsed_comment_attrs": False,
|
||||
"colorspace_out": "Output - sRGB",
|
||||
"representation_add_range": False,
|
||||
"representation_tags": ["thumbnail"],
|
||||
"path_regex": ".*"
|
||||
}
|
||||
}
|
||||
|
||||
# hide publisher during exporting
|
||||
hide_ui_on_process = True
|
||||
|
||||
# settings
|
||||
export_presets_mapping = []
|
||||
|
||||
def process(self, instance):
|
||||
if not self.keep_original_representation:
|
||||
# remove previeous representation if not needed
|
||||
instance.data["representations"] = []
|
||||
|
||||
# flame objects
|
||||
segment = instance.data["item"]
|
||||
folder_path = instance.data["folderPath"]
|
||||
segment_name = segment.name.get_value()
|
||||
clip_path = instance.data["path"]
|
||||
sequence_clip = instance.context.data["flameSequence"]
|
||||
|
||||
# segment's parent track name
|
||||
s_track_name = segment.parent.name.get_value()
|
||||
|
||||
# get configured workfile frame start/end (handles excluded)
|
||||
frame_start = instance.data["frameStart"]
|
||||
# get media source first frame
|
||||
source_first_frame = instance.data["sourceFirstFrame"]
|
||||
|
||||
self.log.debug("_ frame_start: {}".format(frame_start))
|
||||
self.log.debug("_ source_first_frame: {}".format(source_first_frame))
|
||||
|
||||
# get timeline in/out of segment
|
||||
clip_in = instance.data["clipIn"]
|
||||
clip_out = instance.data["clipOut"]
|
||||
|
||||
# get retimed attributres
|
||||
retimed_data = self._get_retimed_attributes(instance)
|
||||
|
||||
# get individual keys
|
||||
retimed_handle_start = retimed_data["handle_start"]
|
||||
retimed_handle_end = retimed_data["handle_end"]
|
||||
retimed_source_duration = retimed_data["source_duration"]
|
||||
retimed_speed = retimed_data["speed"]
|
||||
|
||||
# get handles value - take only the max from both
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
handles = max(handle_start, handle_end)
|
||||
include_handles = instance.data.get("includeHandles")
|
||||
retimed_handles = instance.data.get("retimedHandles")
|
||||
|
||||
# get media source range with handles
|
||||
source_start_handles = instance.data["sourceStartH"]
|
||||
source_end_handles = instance.data["sourceEndH"]
|
||||
|
||||
# retime if needed
|
||||
if retimed_speed != 1.0:
|
||||
if retimed_handles:
|
||||
# handles are retimed
|
||||
source_start_handles = (
|
||||
instance.data["sourceStart"] - retimed_handle_start)
|
||||
source_end_handles = (
|
||||
source_start_handles
|
||||
+ (retimed_source_duration - 1)
|
||||
+ retimed_handle_start
|
||||
+ retimed_handle_end
|
||||
)
|
||||
|
||||
else:
|
||||
# handles are not retimed
|
||||
source_end_handles = (
|
||||
source_start_handles
|
||||
+ (retimed_source_duration - 1)
|
||||
+ handle_start
|
||||
+ handle_end
|
||||
)
|
||||
|
||||
# get frame range with handles for representation range
|
||||
frame_start_handle = frame_start - handle_start
|
||||
repre_frame_start = frame_start_handle
|
||||
if include_handles:
|
||||
if retimed_speed == 1.0 or not retimed_handles:
|
||||
frame_start_handle = frame_start
|
||||
else:
|
||||
frame_start_handle = (
|
||||
frame_start - handle_start) + retimed_handle_start
|
||||
|
||||
self.log.debug("_ frame_start_handle: {}".format(
|
||||
frame_start_handle))
|
||||
self.log.debug("_ repre_frame_start: {}".format(
|
||||
repre_frame_start))
|
||||
|
||||
# calculate duration with handles
|
||||
source_duration_handles = (
|
||||
source_end_handles - source_start_handles) + 1
|
||||
|
||||
self.log.debug("_ source_duration_handles: {}".format(
|
||||
source_duration_handles))
|
||||
|
||||
# create staging dir path
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
# append staging dir for later cleanup
|
||||
instance.context.data["cleanupFullPaths"].append(staging_dir)
|
||||
|
||||
export_presets_mapping = {}
|
||||
for preset_mapping in deepcopy(self.export_presets_mapping):
|
||||
name = preset_mapping.pop("name")
|
||||
export_presets_mapping[name] = preset_mapping
|
||||
|
||||
# add default preset type for thumbnail and reviewable video
|
||||
# update them with settings and override in case the same
|
||||
# are found in there
|
||||
_preset_keys = [k.split('_')[0] for k in export_presets_mapping]
|
||||
export_presets = {
|
||||
k: v
|
||||
for k, v in deepcopy(self.default_presets).items()
|
||||
if k not in _preset_keys
|
||||
}
|
||||
export_presets.update(export_presets_mapping)
|
||||
|
||||
if not instance.data.get("versionData"):
|
||||
instance.data["versionData"] = {}
|
||||
|
||||
# set versiondata if any retime
|
||||
version_data = retimed_data.get("version_data")
|
||||
self.log.debug("_ version_data: {}".format(version_data))
|
||||
|
||||
if version_data:
|
||||
instance.data["versionData"].update(version_data)
|
||||
|
||||
# version data start frame
|
||||
version_frame_start = frame_start
|
||||
if include_handles:
|
||||
version_frame_start = frame_start_handle
|
||||
if retimed_speed != 1.0:
|
||||
if retimed_handles:
|
||||
instance.data["versionData"].update({
|
||||
"frameStart": version_frame_start,
|
||||
"frameEnd": (
|
||||
(version_frame_start + source_duration_handles - 1)
|
||||
- (retimed_handle_start + retimed_handle_end)
|
||||
)
|
||||
})
|
||||
else:
|
||||
instance.data["versionData"].update({
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": version_frame_start,
|
||||
"frameEnd": (
|
||||
(version_frame_start + source_duration_handles - 1)
|
||||
- (handle_start + handle_end)
|
||||
)
|
||||
})
|
||||
self.log.debug("_ version_data: {}".format(
|
||||
instance.data["versionData"]
|
||||
))
|
||||
|
||||
# loop all preset names and
|
||||
for unique_name, preset_config in export_presets.items():
|
||||
modify_xml_data = {}
|
||||
|
||||
if self._should_skip(preset_config, clip_path, unique_name):
|
||||
continue
|
||||
|
||||
# get all presets attributes
|
||||
extension = preset_config["ext"]
|
||||
preset_file = preset_config["xml_preset_file"]
|
||||
preset_dir = preset_config["xml_preset_dir"]
|
||||
export_type = preset_config["export_type"]
|
||||
repre_tags = preset_config["representation_tags"]
|
||||
parsed_comment_attrs = preset_config["parsed_comment_attrs"]
|
||||
color_out = preset_config["colorspace_out"]
|
||||
|
||||
self.log.info(
|
||||
"Processing `{}` as `{}` to `{}` type...".format(
|
||||
preset_file, export_type, extension
|
||||
)
|
||||
)
|
||||
|
||||
exporting_clip = None
|
||||
name_patern_xml = "<name>_{}.".format(
|
||||
unique_name)
|
||||
|
||||
if export_type == "Sequence Publish":
|
||||
# change export clip to sequence
|
||||
exporting_clip = flame.duplicate(sequence_clip)
|
||||
|
||||
# only keep visible layer where instance segment is child
|
||||
self.hide_others(
|
||||
exporting_clip, segment_name, s_track_name)
|
||||
|
||||
# change name pattern
|
||||
name_patern_xml = (
|
||||
"<segment name>_<shot name>_{}.").format(
|
||||
unique_name)
|
||||
|
||||
# only for h264 with baked retime
|
||||
in_mark = clip_in
|
||||
out_mark = clip_out + 1
|
||||
modify_xml_data.update({
|
||||
"exportHandles": True,
|
||||
"nbHandles": handles
|
||||
})
|
||||
else:
|
||||
in_mark = (source_start_handles - source_first_frame) + 1
|
||||
out_mark = in_mark + source_duration_handles
|
||||
exporting_clip = self.import_clip(clip_path)
|
||||
exporting_clip.name.set_value("{}_{}".format(
|
||||
folder_path, segment_name))
|
||||
|
||||
# add xml tags modifications
|
||||
modify_xml_data.update({
|
||||
# enum position low start from 0
|
||||
"frameIndex": 0,
|
||||
"startFrame": repre_frame_start,
|
||||
"namePattern": name_patern_xml
|
||||
})
|
||||
|
||||
if parsed_comment_attrs:
|
||||
# add any xml overrides collected form segment.comment
|
||||
modify_xml_data.update(instance.data["xml_overrides"])
|
||||
|
||||
self.log.debug("_ in_mark: {}".format(in_mark))
|
||||
self.log.debug("_ out_mark: {}".format(out_mark))
|
||||
|
||||
export_kwargs = {}
|
||||
# validate xml preset file is filled
|
||||
if preset_file == "":
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` is not filled").format(
|
||||
unique_name)
|
||||
)
|
||||
|
||||
# resolve xml preset dir if not filled
|
||||
if preset_dir == "":
|
||||
preset_dir = opfapi.get_preset_path_by_xml_name(
|
||||
preset_file)
|
||||
|
||||
if not preset_dir:
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` {} is not found").format(
|
||||
unique_name, preset_file)
|
||||
)
|
||||
|
||||
# create preset path
|
||||
preset_orig_xml_path = str(os.path.join(
|
||||
preset_dir, preset_file
|
||||
))
|
||||
|
||||
# define kwargs based on preset type
|
||||
if "thumbnail" in unique_name:
|
||||
modify_xml_data.update({
|
||||
"video/posterFrame": True,
|
||||
"video/useFrameAsPoster": 1,
|
||||
"namePattern": "__thumbnail"
|
||||
})
|
||||
thumb_frame_number = int(in_mark + (
|
||||
(out_mark - in_mark + 1) / 2))
|
||||
|
||||
self.log.debug("__ thumb_frame_number: {}".format(
|
||||
thumb_frame_number
|
||||
))
|
||||
|
||||
export_kwargs["thumb_frame_number"] = thumb_frame_number
|
||||
else:
|
||||
export_kwargs.update({
|
||||
"in_mark": in_mark,
|
||||
"out_mark": out_mark
|
||||
})
|
||||
|
||||
preset_path = opfapi.modify_preset_file(
|
||||
preset_orig_xml_path, staging_dir, modify_xml_data)
|
||||
|
||||
# get and make export dir paths
|
||||
export_dir_path = str(os.path.join(
|
||||
staging_dir, unique_name
|
||||
))
|
||||
os.makedirs(export_dir_path)
|
||||
|
||||
# export
|
||||
opfapi.export_clip(
|
||||
export_dir_path, exporting_clip, preset_path, **export_kwargs)
|
||||
|
||||
repr_name = unique_name
|
||||
# make sure only first segment is used if underscore in name
|
||||
# HACK: `ftrackreview_withLUT` will result only in `ftrackreview`
|
||||
if (
|
||||
"thumbnail" in unique_name
|
||||
or "ftrackreview" in unique_name
|
||||
):
|
||||
repr_name = unique_name.split("_")[0]
|
||||
|
||||
# create representation data
|
||||
representation_data = {
|
||||
"name": repr_name,
|
||||
"outputName": repr_name,
|
||||
"ext": extension,
|
||||
"stagingDir": export_dir_path,
|
||||
"tags": repre_tags,
|
||||
"data": {
|
||||
"colorspace": color_out
|
||||
},
|
||||
"load_to_batch_group": preset_config.get(
|
||||
"load_to_batch_group"),
|
||||
"batch_group_loader_name": preset_config.get(
|
||||
"batch_group_loader_name") or None
|
||||
}
|
||||
|
||||
# collect all available content of export dir
|
||||
files = os.listdir(export_dir_path)
|
||||
|
||||
# make sure no nested folders inside
|
||||
n_stage_dir, n_files = self._unfolds_nested_folders(
|
||||
export_dir_path, files, extension)
|
||||
|
||||
# fix representation in case of nested folders
|
||||
if n_stage_dir:
|
||||
representation_data["stagingDir"] = n_stage_dir
|
||||
files = n_files
|
||||
|
||||
# add files to representation but add
|
||||
# imagesequence as list
|
||||
if (
|
||||
# first check if path in files is not mov extension
|
||||
[
|
||||
f for f in files
|
||||
if os.path.splitext(f)[-1] == ".mov"
|
||||
]
|
||||
# then try if thumbnail is not in unique name
|
||||
or repr_name == "thumbnail"
|
||||
):
|
||||
representation_data["files"] = files.pop()
|
||||
else:
|
||||
representation_data["files"] = files
|
||||
|
||||
# add frame range
|
||||
if preset_config["representation_add_range"]:
|
||||
representation_data.update({
|
||||
"frameStart": repre_frame_start,
|
||||
"frameEnd": (
|
||||
repre_frame_start + source_duration_handles) - 1,
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
|
||||
instance.data["representations"].append(representation_data)
|
||||
|
||||
# add review family if found in tags
|
||||
if "review" in repre_tags:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
self.log.info("Added representation: {}".format(
|
||||
representation_data))
|
||||
|
||||
if export_type == "Sequence Publish":
|
||||
# at the end remove the duplicated clip
|
||||
flame.delete(exporting_clip)
|
||||
|
||||
def _get_retimed_attributes(self, instance):
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
# get available range trimmed with processed retimes
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
||||
r_media_in = int(retimed_attributes["mediaIn"])
|
||||
r_media_out = int(retimed_attributes["mediaOut"])
|
||||
version_data = retimed_attributes.get("versionData")
|
||||
|
||||
return {
|
||||
"version_data": version_data,
|
||||
"handle_start": int(retimed_attributes["handleStart"]),
|
||||
"handle_end": int(retimed_attributes["handleEnd"]),
|
||||
"source_duration": (
|
||||
(r_media_out - r_media_in) + 1
|
||||
),
|
||||
"speed": float(retimed_attributes["speed"])
|
||||
}
|
||||
|
||||
def _should_skip(self, preset_config, clip_path, unique_name):
|
||||
# get activating attributes
|
||||
activated_preset = preset_config["active"]
|
||||
filter_path_regex = preset_config.get("filter_path_regex")
|
||||
|
||||
self.log.info(
|
||||
"Preset `{}` is active `{}` with filter `{}`".format(
|
||||
unique_name, activated_preset, filter_path_regex
|
||||
)
|
||||
)
|
||||
|
||||
# skip if not activated presete
|
||||
if not activated_preset:
|
||||
return True
|
||||
|
||||
# exclude by regex filter if any
|
||||
if (
|
||||
filter_path_regex
|
||||
and not re.search(filter_path_regex, clip_path)
|
||||
):
|
||||
return True
|
||||
|
||||
def _unfolds_nested_folders(self, stage_dir, files_list, ext):
|
||||
"""Unfolds nested folders
|
||||
|
||||
Args:
|
||||
stage_dir (str): path string with directory
|
||||
files_list (list): list of file names
|
||||
ext (str): extension (jpg)[without dot]
|
||||
|
||||
Raises:
|
||||
IOError: in case no files were collected form any directory
|
||||
|
||||
Returns:
|
||||
str, list: new staging dir path, new list of file names
|
||||
or
|
||||
None, None: In case single file in `files_list`
|
||||
"""
|
||||
# exclude single files which are having extension
|
||||
# the same as input ext attr
|
||||
if (
|
||||
# only one file in list
|
||||
len(files_list) == 1
|
||||
# file is having extension as input
|
||||
and ext in os.path.splitext(files_list[0])[-1]
|
||||
):
|
||||
return None, None
|
||||
elif (
|
||||
# more then one file in list
|
||||
len(files_list) >= 1
|
||||
# extension is correct
|
||||
and ext in os.path.splitext(files_list[0])[-1]
|
||||
# test file exists
|
||||
and os.path.exists(
|
||||
os.path.join(stage_dir, files_list[0])
|
||||
)
|
||||
):
|
||||
return None, None
|
||||
|
||||
new_stage_dir = None
|
||||
new_files_list = []
|
||||
for file in files_list:
|
||||
search_path = os.path.join(stage_dir, file)
|
||||
if not os.path.isdir(search_path):
|
||||
continue
|
||||
for root, _dirs, files in os.walk(search_path):
|
||||
for _file in files:
|
||||
_fn, _ext = os.path.splitext(_file)
|
||||
if ext.lower() != _ext[1:].lower():
|
||||
continue
|
||||
new_files_list.append(_file)
|
||||
if not new_stage_dir:
|
||||
new_stage_dir = root
|
||||
|
||||
if not new_stage_dir:
|
||||
raise AssertionError(
|
||||
"Files in `{}` are not correct! Check `{}`".format(
|
||||
files_list, stage_dir)
|
||||
)
|
||||
|
||||
return new_stage_dir, new_files_list
|
||||
|
||||
def hide_others(self, sequence_clip, segment_name, track_name):
|
||||
"""Helper method used only if sequence clip is used
|
||||
|
||||
Args:
|
||||
sequence_clip (flame.Clip): sequence clip
|
||||
segment_name (str): segment name
|
||||
track_name (str): track name
|
||||
"""
|
||||
# create otio tracks and clips
|
||||
for ver in sequence_clip.versions:
|
||||
for track in ver.tracks:
|
||||
if len(track.segments) == 0 and track.hidden.get_value():
|
||||
continue
|
||||
|
||||
# hide tracks which are not parent track
|
||||
if track.name.get_value() != track_name:
|
||||
track.hidden = True
|
||||
continue
|
||||
|
||||
# hidde all other segments
|
||||
for segment in track.segments:
|
||||
if segment.name.get_value() != segment_name:
|
||||
segment.hidden = True
|
||||
|
||||
def import_clip(self, path):
|
||||
"""
|
||||
Import clip from path
|
||||
"""
|
||||
dir_path = os.path.dirname(path)
|
||||
media_info = MediaInfoFile(path, logger=self.log)
|
||||
file_pattern = media_info.file_pattern
|
||||
self.log.debug("__ file_pattern: {}".format(file_pattern))
|
||||
|
||||
# rejoin the pattern to dir path
|
||||
new_path = os.path.join(dir_path, file_pattern)
|
||||
|
||||
clips = flame.import_clips(new_path)
|
||||
self.log.info("Clips [{}] imported from `{}`".format(clips, path))
|
||||
|
||||
if not clips:
|
||||
self.log.warning("Path `{}` is not having any clips".format(path))
|
||||
return None
|
||||
elif len(clips) > 1:
|
||||
self.log.warning(
|
||||
"Path `{}` is containing more that one clip".format(path)
|
||||
)
|
||||
return clips[0]
|
||||
|
|
@ -0,0 +1,339 @@
|
|||
import os
|
||||
import copy
|
||||
from collections import OrderedDict
|
||||
from pprint import pformat
|
||||
import pyblish
|
||||
import ayon_flame.api as opfapi
|
||||
import ayon_core.pipeline as op_pipeline
|
||||
from ayon_core.pipeline.workfile import get_workdir
|
||||
|
||||
|
||||
class IntegrateBatchGroup(pyblish.api.InstancePlugin):
|
||||
"""Integrate published shot to batch group"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 0.45
|
||||
label = "Integrate Batch Groups"
|
||||
hosts = ["flame"]
|
||||
families = ["clip"]
|
||||
|
||||
settings_category = "flame"
|
||||
|
||||
# settings
|
||||
default_loader = "LoadClip"
|
||||
|
||||
def process(self, instance):
|
||||
add_tasks = instance.data["flameAddTasks"]
|
||||
|
||||
# iterate all tasks from settings
|
||||
for task_data in add_tasks:
|
||||
# exclude batch group
|
||||
if not task_data["create_batch_group"]:
|
||||
continue
|
||||
|
||||
# create or get already created batch group
|
||||
bgroup = self._get_batch_group(instance, task_data)
|
||||
|
||||
# add batch group content
|
||||
all_batch_nodes = self._add_nodes_to_batch_with_links(
|
||||
instance, task_data, bgroup)
|
||||
|
||||
for name, node in all_batch_nodes.items():
|
||||
self.log.debug("name: {}, dir: {}".format(
|
||||
name, dir(node)
|
||||
))
|
||||
self.log.debug("__ node.attributes: {}".format(
|
||||
node.attributes
|
||||
))
|
||||
|
||||
# load plate to batch group
|
||||
self.log.info("Loading product `{}` into batch `{}`".format(
|
||||
instance.data["productName"], bgroup.name.get_value()
|
||||
))
|
||||
self._load_clip_to_context(instance, bgroup)
|
||||
|
||||
def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group):
|
||||
# get write file node properties > OrederDict because order does matter
|
||||
write_pref_data = self._get_write_prefs(instance, task_data)
|
||||
|
||||
batch_nodes = [
|
||||
{
|
||||
"type": "comp",
|
||||
"properties": {},
|
||||
"id": "comp_node01"
|
||||
},
|
||||
{
|
||||
"type": "Write File",
|
||||
"properties": write_pref_data,
|
||||
"id": "write_file_node01"
|
||||
}
|
||||
]
|
||||
batch_links = [
|
||||
{
|
||||
"from_node": {
|
||||
"id": "comp_node01",
|
||||
"connector": "Result"
|
||||
},
|
||||
"to_node": {
|
||||
"id": "write_file_node01",
|
||||
"connector": "Front"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# add nodes into batch group
|
||||
return opfapi.create_batch_group_conent(
|
||||
batch_nodes, batch_links, batch_group)
|
||||
|
||||
def _load_clip_to_context(self, instance, bgroup):
|
||||
# get all loaders for host
|
||||
loaders_by_name = {
|
||||
loader.__name__: loader
|
||||
for loader in op_pipeline.discover_loader_plugins()
|
||||
}
|
||||
|
||||
# get all published representations
|
||||
published_representations = instance.data["published_representations"]
|
||||
repres_db_id_by_name = {
|
||||
repre_info["representation"]["name"]: repre_id
|
||||
for repre_id, repre_info in published_representations.items()
|
||||
}
|
||||
|
||||
# get all loadable representations
|
||||
repres_by_name = {
|
||||
repre["name"]: repre for repre in instance.data["representations"]
|
||||
}
|
||||
|
||||
# get repre_id for the loadable representations
|
||||
loader_name_by_repre_id = {
|
||||
repres_db_id_by_name[repr_name]: {
|
||||
"loader": repr_data["batch_group_loader_name"],
|
||||
# add repre data for exception logging
|
||||
"_repre_data": repr_data
|
||||
}
|
||||
for repr_name, repr_data in repres_by_name.items()
|
||||
if repr_data.get("load_to_batch_group")
|
||||
}
|
||||
|
||||
self.log.debug("__ loader_name_by_repre_id: {}".format(pformat(
|
||||
loader_name_by_repre_id)))
|
||||
|
||||
# get representation context from the repre_id
|
||||
repre_contexts = op_pipeline.load.get_repres_contexts(
|
||||
loader_name_by_repre_id.keys())
|
||||
|
||||
self.log.debug("__ repre_contexts: {}".format(pformat(
|
||||
repre_contexts)))
|
||||
|
||||
# loop all returned repres from repre_context dict
|
||||
for repre_id, repre_context in repre_contexts.items():
|
||||
self.log.debug("__ repre_id: {}".format(repre_id))
|
||||
# get loader name by representation id
|
||||
loader_name = (
|
||||
loader_name_by_repre_id[repre_id]["loader"]
|
||||
# if nothing was added to settings fallback to default
|
||||
or self.default_loader
|
||||
)
|
||||
|
||||
# get loader plugin
|
||||
loader_plugin = loaders_by_name.get(loader_name)
|
||||
if loader_plugin:
|
||||
# load to flame by representation context
|
||||
try:
|
||||
op_pipeline.load.load_with_repre_context(
|
||||
loader_plugin, repre_context, **{
|
||||
"data": {
|
||||
"workdir": self.task_workdir,
|
||||
"batch": bgroup
|
||||
}
|
||||
})
|
||||
except op_pipeline.load.IncompatibleLoaderError as msg:
|
||||
self.log.error(
|
||||
"Check allowed representations for Loader `{}` "
|
||||
"in settings > error: {}".format(
|
||||
loader_plugin.__name__, msg))
|
||||
self.log.error(
|
||||
"Representaton context >>{}<< is not compatible "
|
||||
"with loader `{}`".format(
|
||||
pformat(repre_context), loader_plugin.__name__
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.log.warning(
|
||||
"Something got wrong and there is not Loader found for "
|
||||
"following data: {}".format(
|
||||
pformat(loader_name_by_repre_id))
|
||||
)
|
||||
|
||||
def _get_batch_group(self, instance, task_data):
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
frame_duration = (frame_end - frame_start) + 1
|
||||
folder_path = instance.data["folderPath"]
|
||||
|
||||
task_name = task_data["name"]
|
||||
batchgroup_name = "{}_{}".format(folder_path, task_name)
|
||||
|
||||
batch_data = {
|
||||
"shematic_reels": [
|
||||
"OP_LoadedReel"
|
||||
],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
self.log.debug(
|
||||
"__ batch_data: {}".format(pformat(batch_data)))
|
||||
|
||||
# check if the batch group already exists
|
||||
bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name)
|
||||
|
||||
if not bgroup:
|
||||
self.log.info(
|
||||
"Creating new batch group: {}".format(batchgroup_name))
|
||||
# create batch with utils
|
||||
bgroup = opfapi.create_batch_group(
|
||||
batchgroup_name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
**batch_data
|
||||
)
|
||||
|
||||
else:
|
||||
self.log.info(
|
||||
"Updating batch group: {}".format(batchgroup_name))
|
||||
# update already created batch group
|
||||
bgroup = opfapi.create_batch_group(
|
||||
batchgroup_name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
update_batch_group=bgroup,
|
||||
**batch_data
|
||||
)
|
||||
|
||||
return bgroup
|
||||
|
||||
def _get_anamoty_data_with_current_task(self, instance, task_data):
|
||||
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
task_name = task_data["name"]
|
||||
task_type = task_data["type"]
|
||||
anatomy_obj = instance.context.data["anatomy"]
|
||||
|
||||
# update task data in anatomy data
|
||||
project_task_types = anatomy_obj["tasks"]
|
||||
task_code = project_task_types.get(task_type, {}).get("shortName")
|
||||
anatomy_data.update({
|
||||
"task": {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code
|
||||
}
|
||||
})
|
||||
return anatomy_data
|
||||
|
||||
def _get_write_prefs(self, instance, task_data):
|
||||
# update task in anatomy data
|
||||
anatomy_data = self._get_anamoty_data_with_current_task(
|
||||
instance, task_data)
|
||||
|
||||
self.task_workdir = self._get_shot_task_dir_path(
|
||||
instance, task_data)
|
||||
self.log.debug("__ task_workdir: {}".format(
|
||||
self.task_workdir))
|
||||
|
||||
# TODO: this might be done with template in settings
|
||||
render_dir_path = os.path.join(
|
||||
self.task_workdir, "render", "flame")
|
||||
|
||||
if not os.path.exists(render_dir_path):
|
||||
os.makedirs(render_dir_path, mode=0o777)
|
||||
|
||||
# TODO: add most of these to `imageio/flame/batch/write_node`
|
||||
name = "{project[code]}_{folder[name]}_{task[name]}".format(
|
||||
**anatomy_data
|
||||
)
|
||||
|
||||
# The path attribute where the rendered clip is exported
|
||||
# /path/to/file.[0001-0010].exr
|
||||
media_path = render_dir_path
|
||||
# name of file represented by tokens
|
||||
media_path_pattern = (
|
||||
"<name>_v<iteration###>/<name>_v<iteration###>.<frame><ext>")
|
||||
# The Create Open Clip attribute of the Write File node. \
|
||||
# Determines if an Open Clip is created by the Write File node.
|
||||
create_clip = True
|
||||
# The Include Setup attribute of the Write File node.
|
||||
# Determines if a Batch Setup file is created by the Write File node.
|
||||
include_setup = True
|
||||
# The path attribute where the Open Clip file is exported by
|
||||
# the Write File node.
|
||||
create_clip_path = "<name>"
|
||||
# The path attribute where the Batch setup file
|
||||
# is exported by the Write File node.
|
||||
include_setup_path = "./<name>_v<iteration###>"
|
||||
# The file type for the files written by the Write File node.
|
||||
# Setting this attribute also overwrites format_extension,
|
||||
# bit_depth and compress_mode to match the defaults for
|
||||
# this file type.
|
||||
file_type = "OpenEXR"
|
||||
# The file extension for the files written by the Write File node.
|
||||
# This attribute resets to match file_type whenever file_type
|
||||
# is set. If you require a specific extension, you must
|
||||
# set format_extension after setting file_type.
|
||||
format_extension = "exr"
|
||||
# The bit depth for the files written by the Write File node.
|
||||
# This attribute resets to match file_type whenever file_type is set.
|
||||
bit_depth = "16"
|
||||
# The compressing attribute for the files exported by the Write
|
||||
# File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff'
|
||||
compress = True
|
||||
# The compression format attribute for the specific File Types
|
||||
# export by the Write File node. You must set compress_mode
|
||||
# after setting file_type.
|
||||
compress_mode = "DWAB"
|
||||
# The frame index mode attribute of the Write File node.
|
||||
# Value range: `Use Timecode` or `Use Start Frame`
|
||||
frame_index_mode = "Use Start Frame"
|
||||
frame_padding = 6
|
||||
# The versioning mode of the Open Clip exported by the Write File node.
|
||||
# Only available if create_clip = True.
|
||||
version_mode = "Follow Iteration"
|
||||
version_name = "v<version>"
|
||||
version_padding = 3
|
||||
|
||||
# need to make sure the order of keys is correct
|
||||
return OrderedDict((
|
||||
("name", name),
|
||||
("media_path", media_path),
|
||||
("media_path_pattern", media_path_pattern),
|
||||
("create_clip", create_clip),
|
||||
("include_setup", include_setup),
|
||||
("create_clip_path", create_clip_path),
|
||||
("include_setup_path", include_setup_path),
|
||||
("file_type", file_type),
|
||||
("format_extension", format_extension),
|
||||
("bit_depth", bit_depth),
|
||||
("compress", compress),
|
||||
("compress_mode", compress_mode),
|
||||
("frame_index_mode", frame_index_mode),
|
||||
("frame_padding", frame_padding),
|
||||
("version_mode", version_mode),
|
||||
("version_name", version_name),
|
||||
("version_padding", version_padding)
|
||||
))
|
||||
|
||||
def _get_shot_task_dir_path(self, instance, task_data):
|
||||
project_entity = instance.data["projectEntity"]
|
||||
folder_entity = instance.data["folderEntity"]
|
||||
task_entity = instance.data["taskEntity"]
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
|
||||
return get_workdir(
|
||||
project_entity,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
"flame",
|
||||
anatomy=anatomy,
|
||||
project_settings=project_settings
|
||||
)
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
<?xml version="1.0"?>
|
||||
<preset version="9">
|
||||
<type>sequence</type>
|
||||
<comment>Creates a 8-bit Jpeg file per segment. </comment>
|
||||
<sequence>
|
||||
<fileType>NONE</fileType>
|
||||
<namePattern></namePattern>
|
||||
<composition><name></composition>
|
||||
<includeVideo>True</includeVideo>
|
||||
<exportVideo>True</exportVideo>
|
||||
<videoMedia>
|
||||
<mediaFileType>image</mediaFileType>
|
||||
<commit>FX</commit>
|
||||
<flatten>NoChange</flatten>
|
||||
<exportHandles>False</exportHandles>
|
||||
<nbHandles>10</nbHandles>
|
||||
</videoMedia>
|
||||
<includeAudio>True</includeAudio>
|
||||
<exportAudio>False</exportAudio>
|
||||
<audioMedia>
|
||||
<mediaFileType>audio</mediaFileType>
|
||||
<commit>FX</commit>
|
||||
<flatten>FlattenTracks</flatten>
|
||||
<exportHandles>True</exportHandles>
|
||||
<nbHandles>10</nbHandles>
|
||||
</audioMedia>
|
||||
</sequence>
|
||||
<video>
|
||||
<fileType>Jpeg</fileType>
|
||||
<codec>923688</codec>
|
||||
<codecProfile></codecProfile>
|
||||
<namePattern><shot name></namePattern>
|
||||
<compressionQuality>100</compressionQuality>
|
||||
<transferCharacteristic>2</transferCharacteristic>
|
||||
<colorimetricSpecification>4</colorimetricSpecification>
|
||||
<includeAlpha>False</includeAlpha>
|
||||
<overwriteWithVersions>False</overwriteWithVersions>
|
||||
<posterFrame>True</posterFrame>
|
||||
<useFrameAsPoster>1</useFrameAsPoster>
|
||||
<resize>
|
||||
<resizeType>fit</resizeType>
|
||||
<resizeFilter>lanczos</resizeFilter>
|
||||
<width>1920</width>
|
||||
<height>1080</height>
|
||||
<bitsPerChannel>8</bitsPerChannel>
|
||||
<numChannels>3</numChannels>
|
||||
<floatingPoint>False</floatingPoint>
|
||||
<bigEndian>True</bigEndian>
|
||||
<pixelRatio>1</pixelRatio>
|
||||
<scanFormat>P</scanFormat>
|
||||
</resize>
|
||||
</video>
|
||||
<name>
|
||||
<framePadding>4</framePadding>
|
||||
<startFrame>1</startFrame>
|
||||
<frameIndex>2</frameIndex>
|
||||
</name>
|
||||
</preset>
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
<?xml version="1.0"?>
|
||||
<preset version="10">
|
||||
<type>sequence</type>
|
||||
<comment>Create MOV H264 files per segment with thumbnail</comment>
|
||||
<sequence>
|
||||
<fileType>NONE</fileType>
|
||||
<namePattern></namePattern>
|
||||
<composition><name></composition>
|
||||
<includeVideo>True</includeVideo>
|
||||
<exportVideo>True</exportVideo>
|
||||
<videoMedia>
|
||||
<mediaFileType>movie</mediaFileType>
|
||||
<commit>FX</commit>
|
||||
<flatten>FlattenTracks</flatten>
|
||||
<exportHandles>True</exportHandles>
|
||||
<nbHandles>5</nbHandles>
|
||||
</videoMedia>
|
||||
<includeAudio>True</includeAudio>
|
||||
<exportAudio>False</exportAudio>
|
||||
<audioMedia>
|
||||
<mediaFileType>audio</mediaFileType>
|
||||
<commit>Original</commit>
|
||||
<flatten>NoChange</flatten>
|
||||
<exportHandles>True</exportHandles>
|
||||
<nbHandles>5</nbHandles>
|
||||
</audioMedia>
|
||||
</sequence>
|
||||
<movie>
|
||||
<fileType>QuickTime</fileType>
|
||||
<namePattern><shot name></namePattern>
|
||||
<yuvHeadroom>0</yuvHeadroom>
|
||||
<yuvColourSpace>PCS_709</yuvColourSpace>
|
||||
<operationalPattern>None</operationalPattern>
|
||||
<companyName>Autodesk</companyName>
|
||||
<productName>Flame</productName>
|
||||
<versionName>2021</versionName>
|
||||
</movie>
|
||||
<video>
|
||||
<fileType>QuickTime</fileType>
|
||||
<codec>33622016</codec>
|
||||
<codecProfile>
|
||||
<rootPath>/opt/Autodesk/mediaconverter/</rootPath>
|
||||
<targetVersion>2021</targetVersion>
|
||||
<pathSuffix>/profiles/.33622016/HDTV_720p_8Mbits.cdxprof</pathSuffix>
|
||||
</codecProfile>
|
||||
<namePattern><shot name>_<video codec></namePattern>
|
||||
<compressionQuality>50</compressionQuality>
|
||||
<transferCharacteristic>2</transferCharacteristic>
|
||||
<colorimetricSpecification>4</colorimetricSpecification>
|
||||
<includeAlpha>False</includeAlpha>
|
||||
<overwriteWithVersions>False</overwriteWithVersions>
|
||||
<posterFrame>False</posterFrame>
|
||||
<useFrameAsPoster>1</useFrameAsPoster>
|
||||
<resize>
|
||||
<resizeType>fit</resizeType>
|
||||
<resizeFilter>gaussian</resizeFilter>
|
||||
<width>1920</width>
|
||||
<height>1080</height>
|
||||
<bitsPerChannel>8</bitsPerChannel>
|
||||
<numChannels>3</numChannels>
|
||||
<floatingPoint>False</floatingPoint>
|
||||
<bigEndian>True</bigEndian>
|
||||
<pixelRatio>1</pixelRatio>
|
||||
<scanFormat>P</scanFormat>
|
||||
</resize>
|
||||
</video>
|
||||
<name>
|
||||
<framePadding>4</framePadding>
|
||||
<startFrame>1</startFrame>
|
||||
<frameIndex>2</frameIndex>
|
||||
</name>
|
||||
</preset>
|
||||
|
|
@ -0,0 +1,162 @@
|
|||
import os
|
||||
import io
|
||||
import ConfigParser as CP
|
||||
from xml.etree import ElementTree as ET
|
||||
from contextlib import contextmanager
|
||||
|
||||
PLUGIN_DIR = os.path.dirname(os.path.dirname(__file__))
|
||||
EXPORT_PRESETS_DIR = os.path.join(PLUGIN_DIR, "export_preset")
|
||||
|
||||
CONFIG_DIR = os.path.join(os.path.expanduser(
|
||||
"~/.openpype"), "openpype_babypublisher")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def make_temp_dir():
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
dirpath = tempfile.mkdtemp()
|
||||
|
||||
yield dirpath
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError("Not able to create temp dir file: {}".format(_error))
|
||||
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_config(section=None):
|
||||
cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini")
|
||||
|
||||
# create config dir
|
||||
if not os.path.exists(CONFIG_DIR):
|
||||
print("making dirs at: `{}`".format(CONFIG_DIR))
|
||||
os.makedirs(CONFIG_DIR, mode=0o777)
|
||||
|
||||
# write default data to settings.ini
|
||||
if not os.path.exists(cfg_file_path):
|
||||
default_cfg = cfg_default()
|
||||
config = CP.RawConfigParser()
|
||||
config.readfp(io.BytesIO(default_cfg))
|
||||
with open(cfg_file_path, 'wb') as cfg_file:
|
||||
config.write(cfg_file)
|
||||
|
||||
try:
|
||||
config = CP.RawConfigParser()
|
||||
config.read(cfg_file_path)
|
||||
if section:
|
||||
_cfg_data = {
|
||||
k: v
|
||||
for s in config.sections()
|
||||
for k, v in config.items(s)
|
||||
if s == section
|
||||
}
|
||||
else:
|
||||
_cfg_data = {s: dict(config.items(s)) for s in config.sections()}
|
||||
|
||||
yield _cfg_data
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError('Not able to read settings.ini file: {}'.format(_error))
|
||||
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
def set_config(cfg_data, section=None):
|
||||
cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini")
|
||||
|
||||
config = CP.RawConfigParser()
|
||||
config.read(cfg_file_path)
|
||||
|
||||
try:
|
||||
if not section:
|
||||
for section in cfg_data:
|
||||
for key, value in cfg_data[section].items():
|
||||
config.set(section, key, value)
|
||||
else:
|
||||
for key, value in cfg_data.items():
|
||||
config.set(section, key, value)
|
||||
|
||||
with open(cfg_file_path, 'wb') as cfg_file:
|
||||
config.write(cfg_file)
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError('Not able to write settings.ini file: {}'.format(_error))
|
||||
|
||||
|
||||
def cfg_default():
|
||||
return """
|
||||
[main]
|
||||
workfile_start_frame = 1001
|
||||
shot_handles = 0
|
||||
shot_name_template = {sequence}_{shot}
|
||||
hierarchy_template = shots[Folder]/{sequence}[Sequence]
|
||||
create_task_type = Compositing
|
||||
"""
|
||||
|
||||
|
||||
def configure_preset(file_path, data):
|
||||
split_fp = os.path.splitext(file_path)
|
||||
new_file_path = split_fp[0] + "_tmp" + split_fp[-1]
|
||||
with open(file_path, "r") as datafile:
|
||||
tree = ET.parse(datafile)
|
||||
for key, value in data.items():
|
||||
for element in tree.findall(".//{}".format(key)):
|
||||
print(element)
|
||||
element.text = str(value)
|
||||
tree.write(new_file_path)
|
||||
|
||||
return new_file_path
|
||||
|
||||
|
||||
def export_thumbnail(sequence, tempdir_path, data):
|
||||
import flame
|
||||
export_preset = os.path.join(
|
||||
EXPORT_PRESETS_DIR,
|
||||
"openpype_seg_thumbnails_jpg.xml"
|
||||
)
|
||||
new_path = configure_preset(export_preset, data)
|
||||
poster_frame_exporter = flame.PyExporter()
|
||||
poster_frame_exporter.foreground = True
|
||||
poster_frame_exporter.export(sequence, new_path, tempdir_path)
|
||||
|
||||
|
||||
def export_video(sequence, tempdir_path, data):
|
||||
import flame
|
||||
export_preset = os.path.join(
|
||||
EXPORT_PRESETS_DIR,
|
||||
"openpype_seg_video_h264.xml"
|
||||
)
|
||||
new_path = configure_preset(export_preset, data)
|
||||
poster_frame_exporter = flame.PyExporter()
|
||||
poster_frame_exporter.foreground = True
|
||||
poster_frame_exporter.export(sequence, new_path, tempdir_path)
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
def _seconds(value):
|
||||
if isinstance(value, str):
|
||||
_zip_ft = zip((3600, 60, 1, 1 / framerate), value.split(':'))
|
||||
return sum(f * float(t) for f, t in _zip_ft)
|
||||
elif isinstance(value, (int, float)):
|
||||
return value / framerate
|
||||
return 0
|
||||
|
||||
def _frames(seconds):
|
||||
return seconds * framerate
|
||||
|
||||
def tc_to_frames(_timecode, start=None):
|
||||
return _frames(_seconds(_timecode) - _seconds(start))
|
||||
|
||||
if '+' in timecode:
|
||||
timecode = timecode.replace('+', ':')
|
||||
elif '#' in timecode:
|
||||
timecode = timecode.replace('#', ':')
|
||||
|
||||
frames = int(round(tc_to_frames(timecode, start='00:00:00:00')))
|
||||
|
||||
return frames
|
||||
|
|
@ -0,0 +1,459 @@
|
|||
import os
|
||||
import sys
|
||||
import six
|
||||
import re
|
||||
import json
|
||||
|
||||
import app_utils
|
||||
|
||||
# Fill following constants or set them via environment variable
|
||||
FTRACK_MODULE_PATH = None
|
||||
FTRACK_API_KEY = None
|
||||
FTRACK_API_USER = None
|
||||
FTRACK_SERVER = None
|
||||
|
||||
|
||||
def import_ftrack_api():
|
||||
try:
|
||||
import ftrack_api
|
||||
return ftrack_api
|
||||
except ImportError:
|
||||
import sys
|
||||
ftrk_m_p = FTRACK_MODULE_PATH or os.getenv("FTRACK_MODULE_PATH")
|
||||
sys.path.append(ftrk_m_p)
|
||||
import ftrack_api
|
||||
return ftrack_api
|
||||
|
||||
|
||||
def get_ftrack_session():
|
||||
import os
|
||||
ftrack_api = import_ftrack_api()
|
||||
|
||||
# fill your own credentials
|
||||
url = FTRACK_SERVER or os.getenv("FTRACK_SERVER") or ""
|
||||
user = FTRACK_API_USER or os.getenv("FTRACK_API_USER") or ""
|
||||
api = FTRACK_API_KEY or os.getenv("FTRACK_API_KEY") or ""
|
||||
|
||||
first_validation = True
|
||||
if not user:
|
||||
print('- Ftrack Username is not set')
|
||||
first_validation = False
|
||||
if not api:
|
||||
print('- Ftrack API key is not set')
|
||||
first_validation = False
|
||||
if not first_validation:
|
||||
return False
|
||||
|
||||
try:
|
||||
return ftrack_api.Session(
|
||||
server_url=url,
|
||||
api_user=user,
|
||||
api_key=api
|
||||
)
|
||||
except Exception as _e:
|
||||
print("Can't log into Ftrack with used credentials: {}".format(_e))
|
||||
ftrack_cred = {
|
||||
'Ftrack server': str(url),
|
||||
'Username': str(user),
|
||||
'API key': str(api),
|
||||
}
|
||||
|
||||
item_lens = [len(key) + 1 for key in ftrack_cred]
|
||||
justify_len = max(*item_lens)
|
||||
for key, value in ftrack_cred.items():
|
||||
print('{} {}'.format((key + ':').ljust(justify_len, ' '), value))
|
||||
return False
|
||||
|
||||
|
||||
def get_project_task_types(project_entity):
|
||||
tasks = {}
|
||||
proj_template = project_entity['project_schema']
|
||||
temp_task_types = proj_template['_task_type_schema']['types']
|
||||
|
||||
for type in temp_task_types:
|
||||
if type['name'] not in tasks:
|
||||
tasks[type['name']] = type
|
||||
|
||||
return tasks
|
||||
|
||||
|
||||
class FtrackComponentCreator:
|
||||
default_location = "ftrack.server"
|
||||
ftrack_locations = {}
|
||||
thumbnails = []
|
||||
videos = []
|
||||
temp_dir = None
|
||||
|
||||
def __init__(self, session):
|
||||
self.session = session
|
||||
self._get_ftrack_location()
|
||||
|
||||
def generate_temp_data(self, selection, change_preset_data):
|
||||
with app_utils.make_temp_dir() as tempdir_path:
|
||||
for seq in selection:
|
||||
app_utils.export_thumbnail(
|
||||
seq, tempdir_path, change_preset_data)
|
||||
app_utils.export_video(seq, tempdir_path, change_preset_data)
|
||||
|
||||
return tempdir_path
|
||||
|
||||
def collect_generated_data(self, tempdir_path):
|
||||
temp_files = os.listdir(tempdir_path)
|
||||
self.thumbnails = [f for f in temp_files if "jpg" in f]
|
||||
self.videos = [f for f in temp_files if "mov" in f]
|
||||
self.temp_dir = tempdir_path
|
||||
|
||||
def get_thumb_path(self, shot_name):
|
||||
# get component files
|
||||
thumb_f = next((f for f in self.thumbnails if shot_name in f), None)
|
||||
return os.path.join(self.temp_dir, thumb_f)
|
||||
|
||||
def get_video_path(self, shot_name):
|
||||
# get component files
|
||||
video_f = next((f for f in self.videos if shot_name in f), None)
|
||||
return os.path.join(self.temp_dir, video_f)
|
||||
|
||||
def close(self):
|
||||
self.ftrack_locations = {}
|
||||
self.session = None
|
||||
|
||||
def create_comonent(self, shot_entity, data, assetversion_entity=None):
|
||||
self.shot_entity = shot_entity
|
||||
location = self._get_ftrack_location()
|
||||
|
||||
file_path = data["file_path"]
|
||||
|
||||
# get extension
|
||||
file = os.path.basename(file_path)
|
||||
_n, ext = os.path.splitext(file)
|
||||
|
||||
name = "ftrackreview-mp4" if "mov" in ext else "thumbnail"
|
||||
|
||||
component_data = {
|
||||
"name": name,
|
||||
"file_path": file_path,
|
||||
"file_type": ext,
|
||||
"location": location
|
||||
}
|
||||
|
||||
if name == "ftrackreview-mp4":
|
||||
duration = data["duration"]
|
||||
handles = data["handles"]
|
||||
fps = data["fps"]
|
||||
component_data["metadata"] = {
|
||||
'ftr_meta': json.dumps({
|
||||
'frameIn': int(0),
|
||||
'frameOut': int(duration + (handles * 2)),
|
||||
'frameRate': float(fps)
|
||||
})
|
||||
}
|
||||
if not assetversion_entity:
|
||||
# get assettype entity from session
|
||||
assettype_entity = self._get_assettype({"short": "reference"})
|
||||
|
||||
# get or create asset entity from session
|
||||
asset_entity = self._get_asset({
|
||||
"name": "plateReference",
|
||||
"type": assettype_entity,
|
||||
"parent": self.shot_entity
|
||||
})
|
||||
|
||||
# get or create assetversion entity from session
|
||||
assetversion_entity = self._get_assetversion({
|
||||
"version": 0,
|
||||
"asset": asset_entity
|
||||
})
|
||||
|
||||
# get or create component entity
|
||||
self._set_component(component_data, {
|
||||
"name": name,
|
||||
"version": assetversion_entity,
|
||||
})
|
||||
|
||||
return assetversion_entity
|
||||
|
||||
def _overwrite_members(self, entity, data):
|
||||
origin_location = self._get_ftrack_location("ftrack.origin")
|
||||
location = data.pop("location")
|
||||
|
||||
self._remove_component_from_location(entity, location)
|
||||
|
||||
entity["file_type"] = data["file_type"]
|
||||
|
||||
try:
|
||||
origin_location.add_component(
|
||||
entity, data["file_path"]
|
||||
)
|
||||
# Add components to location.
|
||||
location.add_component(
|
||||
entity, origin_location, recursive=True)
|
||||
except Exception as __e:
|
||||
print("Error: {}".format(__e))
|
||||
self._remove_component_from_location(entity, origin_location)
|
||||
origin_location.add_component(
|
||||
entity, data["file_path"]
|
||||
)
|
||||
# Add components to location.
|
||||
location.add_component(
|
||||
entity, origin_location, recursive=True)
|
||||
|
||||
def _remove_component_from_location(self, entity, location):
|
||||
print(location)
|
||||
# Removing existing members from location
|
||||
components = list(entity.get("members", []))
|
||||
components += [entity]
|
||||
for component in components:
|
||||
for loc in component.get("component_locations", []):
|
||||
if location["id"] == loc["location_id"]:
|
||||
print("<< Removing component: {}".format(component))
|
||||
location.remove_component(
|
||||
component, recursive=False
|
||||
)
|
||||
|
||||
# Deleting existing members on component entity
|
||||
for member in entity.get("members", []):
|
||||
self.session.delete(member)
|
||||
print("<< Deleting member: {}".format(member))
|
||||
del(member)
|
||||
|
||||
self._commit()
|
||||
|
||||
# Reset members in memory
|
||||
if "members" in entity.keys():
|
||||
entity["members"] = []
|
||||
|
||||
def _get_assettype(self, data):
|
||||
return self.session.query(
|
||||
self._query("AssetType", data)).first()
|
||||
|
||||
def _set_component(self, comp_data, base_data):
|
||||
component_metadata = comp_data.pop("metadata", {})
|
||||
|
||||
component_entity = self.session.query(
|
||||
self._query("Component", base_data)
|
||||
).first()
|
||||
|
||||
if component_entity:
|
||||
# overwrite existing members in component entity
|
||||
# - get data for member from `ftrack.origin` location
|
||||
self._overwrite_members(component_entity, comp_data)
|
||||
|
||||
# Adding metadata
|
||||
existing_component_metadata = component_entity["metadata"]
|
||||
existing_component_metadata.update(component_metadata)
|
||||
component_entity["metadata"] = existing_component_metadata
|
||||
return
|
||||
|
||||
assetversion_entity = base_data["version"]
|
||||
location = comp_data.pop("location")
|
||||
|
||||
component_entity = assetversion_entity.create_component(
|
||||
comp_data["file_path"],
|
||||
data=comp_data,
|
||||
location=location
|
||||
)
|
||||
|
||||
# Adding metadata
|
||||
existing_component_metadata = component_entity["metadata"]
|
||||
existing_component_metadata.update(component_metadata)
|
||||
component_entity["metadata"] = existing_component_metadata
|
||||
|
||||
if comp_data["name"] == "thumbnail":
|
||||
self.shot_entity["thumbnail_id"] = component_entity["id"]
|
||||
assetversion_entity["thumbnail_id"] = component_entity["id"]
|
||||
|
||||
self._commit()
|
||||
|
||||
def _get_asset(self, data):
|
||||
# first find already created
|
||||
asset_entity = self.session.query(
|
||||
self._query("Asset", data)
|
||||
).first()
|
||||
|
||||
if asset_entity:
|
||||
return asset_entity
|
||||
|
||||
asset_entity = self.session.create("Asset", data)
|
||||
|
||||
# _commit if created
|
||||
self._commit()
|
||||
|
||||
return asset_entity
|
||||
|
||||
def _get_assetversion(self, data):
|
||||
assetversion_entity = self.session.query(
|
||||
self._query("AssetVersion", data)
|
||||
).first()
|
||||
|
||||
if assetversion_entity:
|
||||
return assetversion_entity
|
||||
|
||||
assetversion_entity = self.session.create("AssetVersion", data)
|
||||
|
||||
# _commit if created
|
||||
self._commit()
|
||||
|
||||
return assetversion_entity
|
||||
|
||||
def _commit(self):
|
||||
try:
|
||||
self.session.commit()
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
# self.session.rollback()
|
||||
# self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
def _get_ftrack_location(self, name=None):
|
||||
name = name or self.default_location
|
||||
|
||||
if name in self.ftrack_locations:
|
||||
return self.ftrack_locations[name]
|
||||
|
||||
location = self.session.query(
|
||||
'Location where name is "{}"'.format(name)
|
||||
).one()
|
||||
self.ftrack_locations[name] = location
|
||||
return location
|
||||
|
||||
def _query(self, entitytype, data):
|
||||
""" Generate a query expression from data supplied.
|
||||
|
||||
If a value is not a string, we'll add the id of the entity to the
|
||||
query.
|
||||
|
||||
Args:
|
||||
entitytype (str): The type of entity to query.
|
||||
data (dict): The data to identify the entity.
|
||||
exclusions (list): All keys to exclude from the query.
|
||||
|
||||
Returns:
|
||||
str: String query to use with "session.query"
|
||||
"""
|
||||
queries = []
|
||||
if sys.version_info[0] < 3:
|
||||
for key, value in data.items():
|
||||
if not isinstance(value, (str, int)):
|
||||
print("value: {}".format(value))
|
||||
if "id" in value.keys():
|
||||
queries.append(
|
||||
"{0}.id is \"{1}\"".format(key, value["id"])
|
||||
)
|
||||
else:
|
||||
queries.append("{0} is \"{1}\"".format(key, value))
|
||||
else:
|
||||
for key, value in data.items():
|
||||
if not isinstance(value, (str, int)):
|
||||
print("value: {}".format(value))
|
||||
if "id" in value.keys():
|
||||
queries.append(
|
||||
"{0}.id is \"{1}\"".format(key, value["id"])
|
||||
)
|
||||
else:
|
||||
queries.append("{0} is \"{1}\"".format(key, value))
|
||||
|
||||
query = (
|
||||
"select id from " + entitytype + " where " + " and ".join(queries)
|
||||
)
|
||||
print(query)
|
||||
return query
|
||||
|
||||
|
||||
class FtrackEntityOperator:
|
||||
existing_tasks = []
|
||||
|
||||
def __init__(self, session, project_entity):
|
||||
self.session = session
|
||||
self.project_entity = project_entity
|
||||
|
||||
def commit(self):
|
||||
try:
|
||||
self.session.commit()
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
self.session.rollback()
|
||||
self.session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
def create_ftrack_entity(self, session, type, name, parent=None):
|
||||
parent = parent or self.project_entity
|
||||
entity = session.create(type, {
|
||||
'name': name,
|
||||
'parent': parent
|
||||
})
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
session._configure_locations()
|
||||
six.reraise(tp, value, tb)
|
||||
return entity
|
||||
|
||||
def get_ftrack_entity(self, session, type, name, parent):
|
||||
query = '{} where name is "{}" and project_id is "{}"'.format(
|
||||
type, name, self.project_entity["id"])
|
||||
|
||||
entity = session.query(query).first()
|
||||
|
||||
# if entity doesn't exist then create one
|
||||
if not entity:
|
||||
entity = self.create_ftrack_entity(
|
||||
session,
|
||||
type,
|
||||
name,
|
||||
parent
|
||||
)
|
||||
|
||||
return entity
|
||||
|
||||
def create_parents(self, template):
|
||||
parents = []
|
||||
t_split = template.split("/")
|
||||
replace_patern = re.compile(r"(\[.*\])")
|
||||
type_patern = re.compile(r"\[(.*)\]")
|
||||
|
||||
for t_s in t_split:
|
||||
match_type = type_patern.findall(t_s)
|
||||
if not match_type:
|
||||
raise Exception((
|
||||
"Missing correct type flag in : {}"
|
||||
"/n Example: name[Type]").format(
|
||||
t_s)
|
||||
)
|
||||
new_name = re.sub(replace_patern, "", t_s)
|
||||
f_type = match_type.pop()
|
||||
|
||||
parents.append((new_name, f_type))
|
||||
|
||||
return parents
|
||||
|
||||
def create_task(self, task_type, task_types, parent):
|
||||
_exising_tasks = [
|
||||
child for child in parent['children']
|
||||
if child.entity_type.lower() == 'task'
|
||||
]
|
||||
|
||||
# add task into existing tasks if they are not already there
|
||||
for _t in _exising_tasks:
|
||||
if _t in self.existing_tasks:
|
||||
continue
|
||||
self.existing_tasks.append(_t)
|
||||
|
||||
existing_task = [
|
||||
task for task in self.existing_tasks
|
||||
if task['name'].lower() in task_type.lower()
|
||||
if task['parent'] == parent
|
||||
]
|
||||
|
||||
if existing_task:
|
||||
return existing_task.pop()
|
||||
|
||||
task = self.session.create('Task', {
|
||||
"name": task_type.lower(),
|
||||
"parent": parent
|
||||
})
|
||||
task["type"] = task_types[task_type]
|
||||
|
||||
self.existing_tasks.append(task)
|
||||
return task
|
||||
|
|
@ -0,0 +1,529 @@
|
|||
from qtpy import QtWidgets, QtCore
|
||||
|
||||
import uiwidgets
|
||||
import app_utils
|
||||
import ftrack_lib
|
||||
|
||||
|
||||
def clear_inner_modules():
|
||||
import sys
|
||||
|
||||
if "ftrack_lib" in sys.modules.keys():
|
||||
del sys.modules["ftrack_lib"]
|
||||
print("Ftrack Lib module removed from sys.modules")
|
||||
|
||||
if "app_utils" in sys.modules.keys():
|
||||
del sys.modules["app_utils"]
|
||||
print("app_utils module removed from sys.modules")
|
||||
|
||||
if "uiwidgets" in sys.modules.keys():
|
||||
del sys.modules["uiwidgets"]
|
||||
print("uiwidgets module removed from sys.modules")
|
||||
|
||||
|
||||
class MainWindow(QtWidgets.QWidget):
|
||||
|
||||
def __init__(self, klass, *args, **kwargs):
|
||||
super(MainWindow, self).__init__(*args, **kwargs)
|
||||
self.panel_class = klass
|
||||
|
||||
def closeEvent(self, event):
|
||||
# clear all temp data
|
||||
print("Removing temp data")
|
||||
self.panel_class.clear_temp_data()
|
||||
self.panel_class.close()
|
||||
clear_inner_modules()
|
||||
ftrack_lib.FtrackEntityOperator.existing_tasks = []
|
||||
# now the panel can be closed
|
||||
event.accept()
|
||||
|
||||
|
||||
class FlameBabyPublisherPanel(object):
|
||||
session = None
|
||||
temp_data_dir = None
|
||||
processed_components = []
|
||||
project_entity = None
|
||||
task_types = {}
|
||||
all_task_types = {}
|
||||
|
||||
# TreeWidget
|
||||
columns = {
|
||||
"Sequence name": {
|
||||
"columnWidth": 200,
|
||||
"order": 0
|
||||
},
|
||||
"Shot name": {
|
||||
"columnWidth": 200,
|
||||
"order": 1
|
||||
},
|
||||
"Clip duration": {
|
||||
"columnWidth": 100,
|
||||
"order": 2
|
||||
},
|
||||
"Shot description": {
|
||||
"columnWidth": 500,
|
||||
"order": 3
|
||||
},
|
||||
"Task description": {
|
||||
"columnWidth": 500,
|
||||
"order": 4
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, selection):
|
||||
print(selection)
|
||||
|
||||
self.session = ftrack_lib.get_ftrack_session()
|
||||
self.selection = selection
|
||||
self.window = MainWindow(self)
|
||||
|
||||
# creating ui
|
||||
self.window.setMinimumSize(1500, 600)
|
||||
self.window.setWindowTitle('AYON: Baby-publisher')
|
||||
self.window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
|
||||
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
|
||||
self.window.setFocusPolicy(QtCore.Qt.StrongFocus)
|
||||
self.window.setStyleSheet('background-color: #313131')
|
||||
|
||||
self._create_project_widget()
|
||||
self._create_tree_widget()
|
||||
self._set_sequence_params()
|
||||
self._generate_widgets()
|
||||
self._generate_layouts()
|
||||
self._timeline_info()
|
||||
self._fix_resolution()
|
||||
|
||||
self.window.show()
|
||||
|
||||
def _generate_widgets(self):
|
||||
with app_utils.get_config("main") as cfg_data:
|
||||
cfg_d = cfg_data
|
||||
|
||||
self._create_task_type_widget(cfg_d)
|
||||
|
||||
# input fields
|
||||
self.shot_name_label = uiwidgets.FlameLabel(
|
||||
'Shot name template', 'normal', self.window)
|
||||
self.shot_name_template_input = uiwidgets.FlameLineEdit(
|
||||
cfg_d["shot_name_template"], self.window)
|
||||
|
||||
self.hierarchy_label = uiwidgets.FlameLabel(
|
||||
'Parents template', 'normal', self.window)
|
||||
self.hierarchy_template_input = uiwidgets.FlameLineEdit(
|
||||
cfg_d["hierarchy_template"], self.window)
|
||||
|
||||
self.start_frame_label = uiwidgets.FlameLabel(
|
||||
'Workfile start frame', 'normal', self.window)
|
||||
self.start_frame_input = uiwidgets.FlameLineEdit(
|
||||
cfg_d["workfile_start_frame"], self.window)
|
||||
|
||||
self.handles_label = uiwidgets.FlameLabel(
|
||||
'Shot handles', 'normal', self.window)
|
||||
self.handles_input = uiwidgets.FlameLineEdit(
|
||||
cfg_d["shot_handles"], self.window)
|
||||
|
||||
self.width_label = uiwidgets.FlameLabel(
|
||||
'Sequence width', 'normal', self.window)
|
||||
self.width_input = uiwidgets.FlameLineEdit(
|
||||
str(self.seq_width), self.window)
|
||||
|
||||
self.height_label = uiwidgets.FlameLabel(
|
||||
'Sequence height', 'normal', self.window)
|
||||
self.height_input = uiwidgets.FlameLineEdit(
|
||||
str(self.seq_height), self.window)
|
||||
|
||||
self.pixel_aspect_label = uiwidgets.FlameLabel(
|
||||
'Pixel aspect ratio', 'normal', self.window)
|
||||
self.pixel_aspect_input = uiwidgets.FlameLineEdit(
|
||||
str(1.00), self.window)
|
||||
|
||||
self.fps_label = uiwidgets.FlameLabel(
|
||||
'Frame rate', 'normal', self.window)
|
||||
self.fps_input = uiwidgets.FlameLineEdit(
|
||||
str(self.fps), self.window)
|
||||
|
||||
# Button
|
||||
self.select_all_btn = uiwidgets.FlameButton(
|
||||
'Select All', self.select_all, self.window)
|
||||
|
||||
self.remove_temp_data_btn = uiwidgets.FlameButton(
|
||||
'Remove temp data', self.clear_temp_data, self.window)
|
||||
|
||||
self.ftrack_send_btn = uiwidgets.FlameButton(
|
||||
'Send to Ftrack', self._send_to_ftrack, self.window)
|
||||
|
||||
def _generate_layouts(self):
|
||||
# left props
|
||||
v_shift = 0
|
||||
prop_layout_l = QtWidgets.QGridLayout()
|
||||
prop_layout_l.setHorizontalSpacing(30)
|
||||
if self.project_selector_enabled:
|
||||
prop_layout_l.addWidget(self.project_select_label, v_shift, 0)
|
||||
prop_layout_l.addWidget(self.project_select_input, v_shift, 1)
|
||||
v_shift += 1
|
||||
prop_layout_l.addWidget(self.shot_name_label, (v_shift + 0), 0)
|
||||
prop_layout_l.addWidget(
|
||||
self.shot_name_template_input, (v_shift + 0), 1)
|
||||
prop_layout_l.addWidget(self.hierarchy_label, (v_shift + 1), 0)
|
||||
prop_layout_l.addWidget(
|
||||
self.hierarchy_template_input, (v_shift + 1), 1)
|
||||
prop_layout_l.addWidget(self.start_frame_label, (v_shift + 2), 0)
|
||||
prop_layout_l.addWidget(self.start_frame_input, (v_shift + 2), 1)
|
||||
prop_layout_l.addWidget(self.handles_label, (v_shift + 3), 0)
|
||||
prop_layout_l.addWidget(self.handles_input, (v_shift + 3), 1)
|
||||
prop_layout_l.addWidget(self.task_type_label, (v_shift + 4), 0)
|
||||
prop_layout_l.addWidget(
|
||||
self.task_type_input, (v_shift + 4), 1)
|
||||
|
||||
# right props
|
||||
prop_widget_r = QtWidgets.QWidget(self.window)
|
||||
prop_layout_r = QtWidgets.QGridLayout(prop_widget_r)
|
||||
prop_layout_r.setHorizontalSpacing(30)
|
||||
prop_layout_r.setAlignment(
|
||||
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
|
||||
prop_layout_r.setContentsMargins(0, 0, 0, 0)
|
||||
prop_layout_r.addWidget(self.width_label, 1, 0)
|
||||
prop_layout_r.addWidget(self.width_input, 1, 1)
|
||||
prop_layout_r.addWidget(self.height_label, 2, 0)
|
||||
prop_layout_r.addWidget(self.height_input, 2, 1)
|
||||
prop_layout_r.addWidget(self.pixel_aspect_label, 3, 0)
|
||||
prop_layout_r.addWidget(self.pixel_aspect_input, 3, 1)
|
||||
prop_layout_r.addWidget(self.fps_label, 4, 0)
|
||||
prop_layout_r.addWidget(self.fps_input, 4, 1)
|
||||
|
||||
# prop layout
|
||||
prop_main_layout = QtWidgets.QHBoxLayout()
|
||||
prop_main_layout.addLayout(prop_layout_l, 1)
|
||||
prop_main_layout.addSpacing(20)
|
||||
prop_main_layout.addWidget(prop_widget_r, 1)
|
||||
|
||||
# buttons layout
|
||||
hbox = QtWidgets.QHBoxLayout()
|
||||
hbox.addWidget(self.remove_temp_data_btn)
|
||||
hbox.addWidget(self.select_all_btn)
|
||||
hbox.addWidget(self.ftrack_send_btn)
|
||||
|
||||
# put all layouts together
|
||||
main_frame = QtWidgets.QVBoxLayout(self.window)
|
||||
main_frame.setMargin(20)
|
||||
main_frame.addLayout(prop_main_layout)
|
||||
main_frame.addWidget(self.tree)
|
||||
main_frame.addLayout(hbox)
|
||||
|
||||
def _set_sequence_params(self):
|
||||
for select in self.selection:
|
||||
self.seq_height = select.height
|
||||
self.seq_width = select.width
|
||||
self.fps = float(str(select.frame_rate)[:-4])
|
||||
break
|
||||
|
||||
def _create_task_type_widget(self, cfg_d):
|
||||
print(self.project_entity)
|
||||
self.task_types = ftrack_lib.get_project_task_types(
|
||||
self.project_entity)
|
||||
|
||||
self.task_type_label = uiwidgets.FlameLabel(
|
||||
'Create Task (type)', 'normal', self.window)
|
||||
self.task_type_input = uiwidgets.FlamePushButtonMenu(
|
||||
cfg_d["create_task_type"], self.task_types.keys(), self.window)
|
||||
|
||||
def _create_project_widget(self):
|
||||
import flame
|
||||
# get project name from flame current project
|
||||
self.project_name = flame.project.current_project.name
|
||||
|
||||
# get project from ftrack -
|
||||
# ftrack project name has to be the same as flame project!
|
||||
query = 'Project where full_name is "{}"'.format(self.project_name)
|
||||
|
||||
# globally used variables
|
||||
self.project_entity = self.session.query(query).first()
|
||||
|
||||
self.project_selector_enabled = bool(not self.project_entity)
|
||||
|
||||
if self.project_selector_enabled:
|
||||
self.all_projects = self.session.query(
|
||||
"Project where status is active").all()
|
||||
self.project_entity = self.all_projects[0]
|
||||
project_names = [p["full_name"] for p in self.all_projects]
|
||||
self.all_task_types = {
|
||||
p["full_name"]: ftrack_lib.get_project_task_types(p).keys()
|
||||
for p in self.all_projects
|
||||
}
|
||||
self.project_select_label = uiwidgets.FlameLabel(
|
||||
'Select Ftrack project', 'normal', self.window)
|
||||
self.project_select_input = uiwidgets.FlamePushButtonMenu(
|
||||
self.project_entity["full_name"], project_names, self.window)
|
||||
self.project_select_input.selection_changed.connect(
|
||||
self._on_project_changed)
|
||||
|
||||
def _create_tree_widget(self):
|
||||
ordered_column_labels = self.columns.keys()
|
||||
for _name, _value in self.columns.items():
|
||||
ordered_column_labels.pop(_value["order"])
|
||||
ordered_column_labels.insert(_value["order"], _name)
|
||||
|
||||
self.tree = uiwidgets.FlameTreeWidget(
|
||||
ordered_column_labels, self.window)
|
||||
|
||||
# Allow multiple items in tree to be selected
|
||||
self.tree.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
|
||||
|
||||
# Set tree column width
|
||||
for _name, _val in self.columns.items():
|
||||
self.tree.setColumnWidth(
|
||||
_val["order"],
|
||||
_val["columnWidth"]
|
||||
)
|
||||
|
||||
# Prevent weird characters when shrinking tree columns
|
||||
self.tree.setTextElideMode(QtCore.Qt.ElideNone)
|
||||
|
||||
def _resolve_project_entity(self):
|
||||
if self.project_selector_enabled:
|
||||
selected_project_name = self.project_select_input.text()
|
||||
self.project_entity = next(
|
||||
(p for p in self.all_projects
|
||||
if p["full_name"] in selected_project_name),
|
||||
None
|
||||
)
|
||||
|
||||
def _save_ui_state_to_cfg(self):
|
||||
_cfg_data_back = {
|
||||
"shot_name_template": self.shot_name_template_input.text(),
|
||||
"workfile_start_frame": self.start_frame_input.text(),
|
||||
"shot_handles": self.handles_input.text(),
|
||||
"hierarchy_template": self.hierarchy_template_input.text(),
|
||||
"create_task_type": self.task_type_input.text()
|
||||
}
|
||||
|
||||
# add cfg data back to settings.ini
|
||||
app_utils.set_config(_cfg_data_back, "main")
|
||||
|
||||
def _send_to_ftrack(self):
|
||||
# resolve active project and add it to self.project_entity
|
||||
self._resolve_project_entity()
|
||||
self._save_ui_state_to_cfg()
|
||||
|
||||
# get handles from gui input
|
||||
handles = self.handles_input.text()
|
||||
|
||||
# get frame start from gui input
|
||||
frame_start = int(self.start_frame_input.text())
|
||||
|
||||
# get task type from gui input
|
||||
task_type = self.task_type_input.text()
|
||||
|
||||
# get resolution from gui inputs
|
||||
fps = self.fps_input.text()
|
||||
|
||||
entity_operator = ftrack_lib.FtrackEntityOperator(
|
||||
self.session, self.project_entity)
|
||||
component_creator = ftrack_lib.FtrackComponentCreator(self.session)
|
||||
|
||||
if not self.temp_data_dir:
|
||||
self.window.hide()
|
||||
self.temp_data_dir = component_creator.generate_temp_data(
|
||||
self.selection,
|
||||
{
|
||||
"nbHandles": handles
|
||||
}
|
||||
)
|
||||
self.window.show()
|
||||
|
||||
# collect generated files to list data for farther use
|
||||
component_creator.collect_generated_data(self.temp_data_dir)
|
||||
|
||||
# Get all selected items from treewidget
|
||||
for item in self.tree.selectedItems():
|
||||
# frame ranges
|
||||
frame_duration = int(item.text(2))
|
||||
frame_end = frame_start + frame_duration
|
||||
|
||||
# description
|
||||
shot_description = item.text(3)
|
||||
task_description = item.text(4)
|
||||
|
||||
# other
|
||||
sequence_name = item.text(0)
|
||||
shot_name = item.text(1)
|
||||
|
||||
thumb_fp = component_creator.get_thumb_path(shot_name)
|
||||
video_fp = component_creator.get_video_path(shot_name)
|
||||
|
||||
print("processed comps: {}".format(self.processed_components))
|
||||
print("processed thumb_fp: {}".format(thumb_fp))
|
||||
|
||||
processed = False
|
||||
if thumb_fp not in self.processed_components:
|
||||
self.processed_components.append(thumb_fp)
|
||||
else:
|
||||
processed = True
|
||||
|
||||
print("processed: {}".format(processed))
|
||||
|
||||
# populate full shot info
|
||||
shot_attributes = {
|
||||
"sequence": sequence_name,
|
||||
"shot": shot_name,
|
||||
"task": task_type
|
||||
}
|
||||
|
||||
# format shot name template
|
||||
_shot_name = self.shot_name_template_input.text().format(
|
||||
**shot_attributes)
|
||||
|
||||
# format hierarchy template
|
||||
_hierarchy_text = self.hierarchy_template_input.text().format(
|
||||
**shot_attributes)
|
||||
print(_hierarchy_text)
|
||||
|
||||
# solve parents
|
||||
parents = entity_operator.create_parents(_hierarchy_text)
|
||||
print(parents)
|
||||
|
||||
# obtain shot parents entities
|
||||
_parent = None
|
||||
for _name, _type in parents:
|
||||
p_entity = entity_operator.get_ftrack_entity(
|
||||
self.session,
|
||||
_type,
|
||||
_name,
|
||||
_parent
|
||||
)
|
||||
print(p_entity)
|
||||
_parent = p_entity
|
||||
|
||||
# obtain shot ftrack entity
|
||||
f_s_entity = entity_operator.get_ftrack_entity(
|
||||
self.session,
|
||||
"Shot",
|
||||
_shot_name,
|
||||
_parent
|
||||
)
|
||||
print("Shot entity is: {}".format(f_s_entity))
|
||||
|
||||
if not processed:
|
||||
# first create thumbnail and get version entity
|
||||
assetversion_entity = component_creator.create_comonent(
|
||||
f_s_entity, {
|
||||
"file_path": thumb_fp
|
||||
}
|
||||
)
|
||||
|
||||
# secondly add video to version entity
|
||||
component_creator.create_comonent(
|
||||
f_s_entity, {
|
||||
"file_path": video_fp,
|
||||
"duration": frame_duration,
|
||||
"handles": int(handles),
|
||||
"fps": float(fps)
|
||||
}, assetversion_entity
|
||||
)
|
||||
|
||||
# create custom attributtes
|
||||
custom_attrs = {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": int(handles),
|
||||
"handleEnd": int(handles),
|
||||
"resolutionWidth": int(self.width_input.text()),
|
||||
"resolutionHeight": int(self.height_input.text()),
|
||||
"pixelAspect": float(self.pixel_aspect_input.text()),
|
||||
"fps": float(fps)
|
||||
}
|
||||
|
||||
# update custom attributes on shot entity
|
||||
for key in custom_attrs:
|
||||
f_s_entity['custom_attributes'][key] = custom_attrs[key]
|
||||
|
||||
task_entity = entity_operator.create_task(
|
||||
task_type, self.task_types, f_s_entity)
|
||||
|
||||
# Create notes.
|
||||
user = self.session.query(
|
||||
"User where username is \"{}\"".format(self.session.api_user)
|
||||
).first()
|
||||
|
||||
f_s_entity.create_note(shot_description, author=user)
|
||||
|
||||
if task_description:
|
||||
task_entity.create_note(task_description, user)
|
||||
|
||||
entity_operator.commit()
|
||||
|
||||
component_creator.close()
|
||||
|
||||
def _fix_resolution(self):
|
||||
# Center window in linux
|
||||
resolution = QtWidgets.QDesktopWidget().screenGeometry()
|
||||
self.window.move(
|
||||
(resolution.width() / 2) - (self.window.frameSize().width() / 2),
|
||||
(resolution.height() / 2) - (self.window.frameSize().height() / 2))
|
||||
|
||||
def _on_project_changed(self):
|
||||
task_types = self.all_task_types[self.project_name]
|
||||
self.task_type_input.set_menu_options(task_types)
|
||||
|
||||
def _timeline_info(self):
|
||||
# identificar as informacoes dos segmentos na timeline
|
||||
for sequence in self.selection:
|
||||
frame_rate = float(str(sequence.frame_rate)[:-4])
|
||||
for ver in sequence.versions:
|
||||
for track in ver.tracks:
|
||||
if len(track.segments) == 0 and track.hidden:
|
||||
continue
|
||||
for segment in track.segments:
|
||||
print(segment.attributes)
|
||||
if segment.name.get_value() == "":
|
||||
continue
|
||||
if segment.hidden.get_value() is True:
|
||||
continue
|
||||
# get clip frame duration
|
||||
record_duration = str(segment.record_duration)[1:-1]
|
||||
clip_duration = app_utils.timecode_to_frames(
|
||||
record_duration, frame_rate)
|
||||
|
||||
# populate shot source metadata
|
||||
shot_description = ""
|
||||
for attr in ["tape_name", "source_name", "head",
|
||||
"tail", "file_path"]:
|
||||
if not hasattr(segment, attr):
|
||||
continue
|
||||
_value = getattr(segment, attr)
|
||||
_label = attr.replace("_", " ").capitalize()
|
||||
row = "{}: {}\n".format(_label, _value)
|
||||
shot_description += row
|
||||
|
||||
# Add timeline segment to tree
|
||||
QtWidgets.QTreeWidgetItem(self.tree, [
|
||||
sequence.name.get_value(), # seq name
|
||||
segment.shot_name.get_value(), # shot name
|
||||
str(clip_duration), # clip duration
|
||||
shot_description, # shot description
|
||||
segment.comment.get_value() # task description
|
||||
]).setFlags(
|
||||
QtCore.Qt.ItemIsEditable
|
||||
| QtCore.Qt.ItemIsEnabled
|
||||
| QtCore.Qt.ItemIsSelectable
|
||||
)
|
||||
|
||||
# Select top item in tree
|
||||
self.tree.setCurrentItem(self.tree.topLevelItem(0))
|
||||
|
||||
def select_all(self, ):
|
||||
self.tree.selectAll()
|
||||
|
||||
def clear_temp_data(self):
|
||||
import shutil
|
||||
|
||||
self.processed_components = []
|
||||
|
||||
if self.temp_data_dir:
|
||||
shutil.rmtree(self.temp_data_dir)
|
||||
self.temp_data_dir = None
|
||||
print("All Temp data were destroyed ...")
|
||||
|
||||
def close(self):
|
||||
self._save_ui_state_to_cfg()
|
||||
self.session.close()
|
||||
|
|
@ -0,0 +1,212 @@
|
|||
from qtpy import QtWidgets, QtCore
|
||||
|
||||
|
||||
class FlameLabel(QtWidgets.QLabel):
|
||||
"""
|
||||
Custom Qt Flame Label Widget
|
||||
|
||||
For different label looks set label_type as:
|
||||
'normal', 'background', or 'outline'
|
||||
|
||||
To use:
|
||||
|
||||
label = FlameLabel('Label Name', 'normal', window)
|
||||
"""
|
||||
|
||||
def __init__(self, label_name, label_type, parent_window, *args, **kwargs):
|
||||
super(FlameLabel, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setText(label_name)
|
||||
self.setParent(parent_window)
|
||||
self.setMinimumSize(130, 28)
|
||||
self.setMaximumHeight(28)
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
|
||||
# Set label stylesheet based on label_type
|
||||
|
||||
if label_type == 'normal':
|
||||
self.setStyleSheet(
|
||||
'QLabel {color: #9a9a9a; border-bottom: 1px inset #282828; font: 14px "Discreet"}' # noqa
|
||||
'QLabel:disabled {color: #6a6a6a}'
|
||||
)
|
||||
elif label_type == 'background':
|
||||
self.setAlignment(QtCore.Qt.AlignCenter)
|
||||
self.setStyleSheet(
|
||||
'color: #9a9a9a; background-color: #393939; font: 14px "Discreet"' # noqa
|
||||
)
|
||||
elif label_type == 'outline':
|
||||
self.setAlignment(QtCore.Qt.AlignCenter)
|
||||
self.setStyleSheet(
|
||||
'color: #9a9a9a; background-color: #212121; border: 1px solid #404040; font: 14px "Discreet"' # noqa
|
||||
)
|
||||
|
||||
|
||||
class FlameLineEdit(QtWidgets.QLineEdit):
|
||||
"""
|
||||
Custom Qt Flame Line Edit Widget
|
||||
|
||||
Main window should include this:
|
||||
window.setFocusPolicy(QtCore.Qt.StrongFocus)
|
||||
|
||||
To use:
|
||||
|
||||
line_edit = FlameLineEdit('Some text here', window)
|
||||
"""
|
||||
|
||||
def __init__(self, text, parent_window, *args, **kwargs):
|
||||
super(FlameLineEdit, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setText(text)
|
||||
self.setParent(parent_window)
|
||||
self.setMinimumHeight(28)
|
||||
self.setMinimumWidth(110)
|
||||
self.setStyleSheet(
|
||||
'QLineEdit {color: #9a9a9a; background-color: #373e47; selection-color: #262626; selection-background-color: #b8b1a7; font: 14px "Discreet"}' # noqa
|
||||
'QLineEdit:focus {background-color: #474e58}' # noqa
|
||||
'QLineEdit:disabled {color: #6a6a6a; background-color: #373737}'
|
||||
)
|
||||
|
||||
|
||||
class FlameTreeWidget(QtWidgets.QTreeWidget):
|
||||
"""
|
||||
Custom Qt Flame Tree Widget
|
||||
|
||||
To use:
|
||||
|
||||
tree_headers = ['Header1', 'Header2', 'Header3', 'Header4']
|
||||
tree = FlameTreeWidget(tree_headers, window)
|
||||
"""
|
||||
|
||||
def __init__(self, tree_headers, parent_window, *args, **kwargs):
|
||||
super(FlameTreeWidget, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setMinimumWidth(1000)
|
||||
self.setMinimumHeight(300)
|
||||
self.setSortingEnabled(True)
|
||||
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
|
||||
self.setAlternatingRowColors(True)
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
self.setStyleSheet(
|
||||
'QTreeWidget {color: #9a9a9a; background-color: #2a2a2a; alternate-background-color: #2d2d2d; font: 14px "Discreet"}' # noqa
|
||||
'QTreeWidget::item:selected {color: #d9d9d9; background-color: #474747; border: 1px solid #111111}' # noqa
|
||||
'QHeaderView {color: #9a9a9a; background-color: #393939; font: 14px "Discreet"}' # noqa
|
||||
'QTreeWidget::item:selected {selection-background-color: #111111}'
|
||||
'QMenu {color: #9a9a9a; background-color: #24303d; font: 14px "Discreet"}' # noqa
|
||||
'QMenu::item:selected {color: #d9d9d9; background-color: #3a4551}'
|
||||
)
|
||||
self.verticalScrollBar().setStyleSheet('color: #818181')
|
||||
self.horizontalScrollBar().setStyleSheet('color: #818181')
|
||||
self.setHeaderLabels(tree_headers)
|
||||
|
||||
|
||||
class FlameButton(QtWidgets.QPushButton):
|
||||
"""
|
||||
Custom Qt Flame Button Widget
|
||||
|
||||
To use:
|
||||
|
||||
button = FlameButton('Button Name', do_this_when_pressed, window)
|
||||
"""
|
||||
|
||||
def __init__(self, button_name, do_when_pressed, parent_window,
|
||||
*args, **kwargs):
|
||||
super(FlameButton, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setText(button_name)
|
||||
self.setParent(parent_window)
|
||||
self.setMinimumSize(QtCore.QSize(110, 28))
|
||||
self.setMaximumSize(QtCore.QSize(110, 28))
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
self.clicked.connect(do_when_pressed)
|
||||
self.setStyleSheet(
|
||||
'QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black; font: 14px "Discreet"}' # noqa
|
||||
'QPushButton:pressed {color: #d9d9d9; background-color: #4f4f4f; border-top: 1px inset #666666; font: italic}' # noqa
|
||||
'QPushButton:disabled {color: #747474; background-color: #353535; border-top: 1px solid #444444; border-bottom: 1px solid #242424}' # noqa
|
||||
)
|
||||
|
||||
|
||||
class FlamePushButton(QtWidgets.QPushButton):
|
||||
"""
|
||||
Custom Qt Flame Push Button Widget
|
||||
|
||||
To use:
|
||||
|
||||
pushbutton = FlamePushButton(' Button Name', True_or_False, window)
|
||||
"""
|
||||
|
||||
def __init__(self, button_name, button_checked, parent_window,
|
||||
*args, **kwargs):
|
||||
super(FlamePushButton, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setText(button_name)
|
||||
self.setParent(parent_window)
|
||||
self.setCheckable(True)
|
||||
self.setChecked(button_checked)
|
||||
self.setMinimumSize(155, 28)
|
||||
self.setMaximumSize(155, 28)
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
self.setStyleSheet(
|
||||
'QPushButton {color: #9a9a9a; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #424142, stop: .94 #2e3b48); text-align: left; border-top: 1px inset #555555; border-bottom: 1px inset black; font: 14px "Discreet"}' # noqa
|
||||
'QPushButton:checked {color: #d9d9d9; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #4f4f4f, stop: .94 #5a7fb4); font: italic; border: 1px inset black; border-bottom: 1px inset #404040; border-right: 1px inset #404040}' # noqa
|
||||
'QPushButton:disabled {color: #6a6a6a; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #383838, stop: .94 #353535); font: light; border-top: 1px solid #575757; border-bottom: 1px solid #242424; border-right: 1px solid #353535; border-left: 1px solid #353535}' # noqa
|
||||
'QToolTip {color: black; background-color: #ffffde; border: black solid 1px}' # noqa
|
||||
)
|
||||
|
||||
|
||||
class FlamePushButtonMenu(QtWidgets.QPushButton):
|
||||
"""
|
||||
Custom Qt Flame Menu Push Button Widget
|
||||
|
||||
To use:
|
||||
|
||||
push_button_menu_options = ['Item 1', 'Item 2', 'Item 3', 'Item 4']
|
||||
menu_push_button = FlamePushButtonMenu('push_button_name',
|
||||
push_button_menu_options, window)
|
||||
|
||||
or
|
||||
|
||||
push_button_menu_options = ['Item 1', 'Item 2', 'Item 3', 'Item 4']
|
||||
menu_push_button = FlamePushButtonMenu(push_button_menu_options[0],
|
||||
push_button_menu_options, window)
|
||||
"""
|
||||
selection_changed = QtCore.Signal(str)
|
||||
|
||||
def __init__(self, button_name, menu_options, parent_window,
|
||||
*args, **kwargs):
|
||||
super(FlamePushButtonMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setParent(parent_window)
|
||||
self.setMinimumHeight(28)
|
||||
self.setMinimumWidth(110)
|
||||
self.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
self.setStyleSheet(
|
||||
'QPushButton {color: #9a9a9a; background-color: #24303d; font: 14px "Discreet"}' # noqa
|
||||
'QPushButton:disabled {color: #747474; background-color: #353535; border-top: 1px solid #444444; border-bottom: 1px solid #242424}' # noqa
|
||||
)
|
||||
|
||||
pushbutton_menu = QtWidgets.QMenu(parent_window)
|
||||
pushbutton_menu.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
pushbutton_menu.setStyleSheet(
|
||||
'QMenu {color: #9a9a9a; background-color:#24303d; font: 14px "Discreet"}' # noqa
|
||||
'QMenu::item:selected {color: #d9d9d9; background-color: #3a4551}'
|
||||
)
|
||||
|
||||
self._pushbutton_menu = pushbutton_menu
|
||||
self.setMenu(pushbutton_menu)
|
||||
self.set_menu_options(menu_options, button_name)
|
||||
|
||||
def set_menu_options(self, menu_options, current_option=None):
|
||||
self._pushbutton_menu.clear()
|
||||
current_option = current_option or menu_options[0]
|
||||
|
||||
for option in menu_options:
|
||||
action = self._pushbutton_menu.addAction(option)
|
||||
action.triggered.connect(self._on_action_trigger)
|
||||
|
||||
if current_option is not None:
|
||||
self.setText(current_option)
|
||||
|
||||
def _on_action_trigger(self):
|
||||
action = self.sender()
|
||||
self.setText(action.text())
|
||||
self.selection_changed.emit(action.text())
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# only testing dependency for nested modules in package
|
||||
import six # noqa
|
||||
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(__file__)
|
||||
PACKAGE_DIR = os.path.join(SCRIPT_DIR, "modules")
|
||||
sys.path.append(PACKAGE_DIR)
|
||||
|
||||
|
||||
def flame_panel_executor(selection):
|
||||
if "panel_app" in sys.modules.keys():
|
||||
print("panel_app module is already loaded")
|
||||
del sys.modules["panel_app"]
|
||||
import panel_app
|
||||
reload(panel_app) # noqa
|
||||
print("panel_app module removed from sys.modules")
|
||||
|
||||
panel_app.FlameBabyPublisherPanel(selection)
|
||||
|
||||
|
||||
def scope_sequence(selection):
|
||||
import flame
|
||||
return any(isinstance(item, flame.PySequence) for item in selection)
|
||||
|
||||
|
||||
def get_media_panel_custom_ui_actions():
|
||||
return [
|
||||
{
|
||||
"name": "AYON: Baby-publisher",
|
||||
"actions": [
|
||||
{
|
||||
"name": "Create Shots",
|
||||
"isVisible": scope_sequence,
|
||||
"execute": flame_panel_executor
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,219 @@
|
|||
from __future__ import print_function
|
||||
import sys
|
||||
from qtpy import QtWidgets
|
||||
from pprint import pformat
|
||||
import atexit
|
||||
|
||||
import ayon_flame.api as opfapi
|
||||
from ayon_core.pipeline import (
|
||||
install_host,
|
||||
registered_host,
|
||||
)
|
||||
|
||||
|
||||
def openpype_install():
|
||||
"""Registering AYON in context
|
||||
"""
|
||||
install_host(opfapi)
|
||||
print("Registered host: {}".format(registered_host()))
|
||||
|
||||
|
||||
# Exception handler
|
||||
def exeption_handler(exctype, value, _traceback):
|
||||
"""Exception handler for improving UX
|
||||
|
||||
Args:
|
||||
exctype (str): type of exception
|
||||
value (str): exception value
|
||||
tb (str): traceback to show
|
||||
"""
|
||||
import traceback
|
||||
msg = "AYON: Python exception {} in {}".format(value, exctype)
|
||||
mbox = QtWidgets.QMessageBox()
|
||||
mbox.setText(msg)
|
||||
mbox.setDetailedText(
|
||||
pformat(traceback.format_exception(exctype, value, _traceback)))
|
||||
mbox.setStyleSheet('QLabel{min-width: 800px;}')
|
||||
mbox.exec_()
|
||||
sys.__excepthook__(exctype, value, _traceback)
|
||||
|
||||
|
||||
# add exception handler into sys module
|
||||
sys.excepthook = exeption_handler
|
||||
|
||||
|
||||
# register clean up logic to be called at Flame exit
|
||||
def cleanup():
|
||||
"""Cleaning up Flame framework context
|
||||
"""
|
||||
if opfapi.CTX.flame_apps:
|
||||
print('`{}` cleaning up flame_apps:\n {}\n'.format(
|
||||
__file__, pformat(opfapi.CTX.flame_apps)))
|
||||
while len(opfapi.CTX.flame_apps):
|
||||
app = opfapi.CTX.flame_apps.pop()
|
||||
print('`{}` removing : {}'.format(__file__, app.name))
|
||||
del app
|
||||
opfapi.CTX.flame_apps = []
|
||||
|
||||
if opfapi.CTX.app_framework:
|
||||
print('openpype\t: {} cleaning up'.format(
|
||||
opfapi.CTX.app_framework.bundle_name)
|
||||
)
|
||||
opfapi.CTX.app_framework.save_prefs()
|
||||
opfapi.CTX.app_framework = None
|
||||
|
||||
|
||||
atexit.register(cleanup)
|
||||
|
||||
|
||||
def load_apps():
|
||||
"""Load available flame_apps into Flame framework
|
||||
"""
|
||||
opfapi.CTX.flame_apps.append(
|
||||
opfapi.FlameMenuProjectConnect(opfapi.CTX.app_framework))
|
||||
opfapi.CTX.flame_apps.append(
|
||||
opfapi.FlameMenuTimeline(opfapi.CTX.app_framework))
|
||||
opfapi.CTX.flame_apps.append(
|
||||
opfapi.FlameMenuUniversal(opfapi.CTX.app_framework))
|
||||
opfapi.CTX.app_framework.log.info("Apps are loaded")
|
||||
|
||||
|
||||
def project_changed_dict(info):
|
||||
"""Hook for project change action
|
||||
|
||||
Args:
|
||||
info (str): info text
|
||||
"""
|
||||
cleanup()
|
||||
|
||||
|
||||
def app_initialized(parent=None):
|
||||
"""Inicialization of Framework
|
||||
|
||||
Args:
|
||||
parent (obj, optional): Parent object. Defaults to None.
|
||||
"""
|
||||
opfapi.CTX.app_framework = opfapi.FlameAppFramework()
|
||||
|
||||
print("{} initializing".format(
|
||||
opfapi.CTX.app_framework.bundle_name))
|
||||
|
||||
load_apps()
|
||||
|
||||
|
||||
"""
|
||||
Initialisation of the hook is starting from here
|
||||
|
||||
First it needs to test if it can import the flame module.
|
||||
This will happen only in case a project has been loaded.
|
||||
Then `app_initialized` will load main Framework which will load
|
||||
all menu objects as flame_apps.
|
||||
"""
|
||||
|
||||
try:
|
||||
import flame # noqa
|
||||
app_initialized(parent=None)
|
||||
except ImportError:
|
||||
print("!!!! not able to import flame module !!!!")
|
||||
|
||||
|
||||
def rescan_hooks():
|
||||
import flame # noqa
|
||||
flame.execute_shortcut('Rescan Python Hooks')
|
||||
|
||||
|
||||
def _build_app_menu(app_name):
|
||||
"""Flame menu object generator
|
||||
|
||||
Args:
|
||||
app_name (str): name of menu object app
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
menu = []
|
||||
|
||||
# first find the relative appname
|
||||
app = None
|
||||
for _app in opfapi.CTX.flame_apps:
|
||||
if _app.__class__.__name__ == app_name:
|
||||
app = _app
|
||||
|
||||
if app:
|
||||
menu.append(app.build_menu())
|
||||
|
||||
if opfapi.CTX.app_framework:
|
||||
menu_auto_refresh = opfapi.CTX.app_framework.prefs_global.get(
|
||||
'menu_auto_refresh', {})
|
||||
if menu_auto_refresh.get('timeline_menu', True):
|
||||
try:
|
||||
import flame # noqa
|
||||
flame.schedule_idle_event(rescan_hooks)
|
||||
except ImportError:
|
||||
print("!-!!! not able to import flame module !!!!")
|
||||
|
||||
return menu
|
||||
|
||||
|
||||
""" Flame hooks are starting here
|
||||
"""
|
||||
|
||||
|
||||
def project_saved(project_name, save_time, is_auto_save):
|
||||
"""Hook to activate when project is saved
|
||||
|
||||
Args:
|
||||
project_name (str): name of project
|
||||
save_time (str): time when it was saved
|
||||
is_auto_save (bool): autosave is on or off
|
||||
"""
|
||||
if opfapi.CTX.app_framework:
|
||||
opfapi.CTX.app_framework.save_prefs()
|
||||
|
||||
|
||||
def get_main_menu_custom_ui_actions():
|
||||
"""Hook to create submenu in start menu
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
# install openpype and the host
|
||||
openpype_install()
|
||||
|
||||
return _build_app_menu("FlameMenuProjectConnect")
|
||||
|
||||
|
||||
def get_timeline_custom_ui_actions():
|
||||
"""Hook to create submenu in timeline
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
# install openpype and the host
|
||||
openpype_install()
|
||||
|
||||
return _build_app_menu("FlameMenuTimeline")
|
||||
|
||||
|
||||
def get_batch_custom_ui_actions():
|
||||
"""Hook to create submenu in batch
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
# install openpype and the host
|
||||
openpype_install()
|
||||
|
||||
return _build_app_menu("FlameMenuUniversal")
|
||||
|
||||
|
||||
def get_media_panel_custom_ui_actions():
|
||||
"""Hook to create submenu in desktop
|
||||
|
||||
Returns:
|
||||
list: menu object
|
||||
"""
|
||||
# install openpype and the host
|
||||
openpype_install()
|
||||
|
||||
return _build_app_menu("FlameMenuUniversal")
|
||||
3
server_addon/flame/client/ayon_flame/version.py
Normal file
3
server_addon/flame/client/ayon_flame/version.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'flame' version."""
|
||||
__version__ = "0.2.0"
|
||||
|
|
@ -1,3 +1,10 @@
|
|||
name = "flame"
|
||||
title = "Flame"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
|
||||
client_dir = "ayon_flame"
|
||||
|
||||
ayon_required_addons = {
|
||||
"core": ">0.3.2",
|
||||
}
|
||||
ayon_compatible_addons = {}
|
||||
|
|
|
|||
10
server_addon/max/client/ayon_max/__init__.py
Normal file
10
server_addon/max/client/ayon_max/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
from .addon import (
|
||||
MaxAddon,
|
||||
MAX_HOST_DIR,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"MaxAddon",
|
||||
"MAX_HOST_DIR",
|
||||
)
|
||||
25
server_addon/max/client/ayon_max/addon.py
Normal file
25
server_addon/max/client/ayon_max/addon.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
MAX_HOST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class MaxAddon(AYONAddon, IHostAddon):
|
||||
name = "max"
|
||||
host_name = "max"
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
# Remove auto screen scale factor for Qt
|
||||
# - let 3dsmax decide it's value
|
||||
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".max"]
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(MAX_HOST_DIR, "hooks")
|
||||
]
|
||||
20
server_addon/max/client/ayon_max/api/__init__.py
Normal file
20
server_addon/max/client/ayon_max/api/__init__.py
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Public API for 3dsmax"""
|
||||
|
||||
from .pipeline import (
|
||||
MaxHost,
|
||||
)
|
||||
|
||||
|
||||
from .lib import (
|
||||
maintained_selection,
|
||||
lsattr,
|
||||
get_all_children
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"MaxHost",
|
||||
"maintained_selection",
|
||||
"lsattr",
|
||||
"get_all_children"
|
||||
]
|
||||
42
server_addon/max/client/ayon_max/api/action.py
Normal file
42
server_addon/max/client/ayon_max/api/action.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
from pymxs import runtime as rt
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.publish import get_errored_instances_from_context
|
||||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid objects in Blender when a publish plug-in failed."""
|
||||
label = "Select Invalid"
|
||||
on = "failed"
|
||||
icon = "search"
|
||||
|
||||
def process(self, context, plugin):
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes...")
|
||||
invalid = list()
|
||||
for instance in errored_instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning(
|
||||
"Failed plug-in doesn't have any selectable objects."
|
||||
)
|
||||
|
||||
if not invalid:
|
||||
self.log.info("No invalid nodes found.")
|
||||
return
|
||||
invalid_names = [obj.name for obj in invalid if not isinstance(obj, tuple)]
|
||||
if not invalid_names:
|
||||
invalid_names = [obj.name for obj, _ in invalid]
|
||||
invalid = [obj for obj, _ in invalid]
|
||||
self.log.info(
|
||||
"Selecting invalid objects: %s", ", ".join(invalid_names)
|
||||
)
|
||||
|
||||
rt.Select(invalid)
|
||||
50
server_addon/max/client/ayon_max/api/colorspace.py
Normal file
50
server_addon/max/client/ayon_max/api/colorspace.py
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
import attr
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
@attr.s
|
||||
class LayerMetadata(object):
|
||||
"""Data class for Render Layer metadata."""
|
||||
frameStart = attr.ib()
|
||||
frameEnd = attr.ib()
|
||||
|
||||
|
||||
@attr.s
|
||||
class RenderProduct(object):
|
||||
"""Getting Colorspace as
|
||||
Specific Render Product Parameter for submitting
|
||||
publish job.
|
||||
"""
|
||||
colorspace = attr.ib() # colorspace
|
||||
view = attr.ib()
|
||||
productName = attr.ib(default=None)
|
||||
|
||||
|
||||
class ARenderProduct(object):
|
||||
|
||||
def __init__(self):
|
||||
"""Constructor."""
|
||||
# Initialize
|
||||
self.layer_data = self._get_layer_data()
|
||||
self.layer_data.products = self.get_colorspace_data()
|
||||
|
||||
def _get_layer_data(self):
|
||||
return LayerMetadata(
|
||||
frameStart=int(rt.rendStart),
|
||||
frameEnd=int(rt.rendEnd),
|
||||
)
|
||||
|
||||
def get_colorspace_data(self):
|
||||
"""To be implemented by renderer class.
|
||||
This should return a list of RenderProducts.
|
||||
Returns:
|
||||
list: List of RenderProduct
|
||||
"""
|
||||
colorspace_data = [
|
||||
RenderProduct(
|
||||
colorspace="sRGB",
|
||||
view="ACES 1.0",
|
||||
productName=""
|
||||
)
|
||||
]
|
||||
return colorspace_data
|
||||
575
server_addon/max/client/ayon_max/api/lib.py
Normal file
575
server_addon/max/client/ayon_max/api/lib.py
Normal file
|
|
@ -0,0 +1,575 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Library of functions useful for 3dsmax pipeline."""
|
||||
import contextlib
|
||||
import logging
|
||||
import json
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
import six
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
get_current_project_name,
|
||||
colorspace
|
||||
)
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.pipeline.context_tools import (
|
||||
get_current_task_entity
|
||||
)
|
||||
from ayon_core.style import load_stylesheet
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
JSON_PREFIX = "JSON::"
|
||||
log = logging.getLogger("ayon_max")
|
||||
|
||||
|
||||
def get_main_window():
|
||||
"""Acquire Max's main window"""
|
||||
from qtpy import QtWidgets
|
||||
top_widgets = QtWidgets.QApplication.topLevelWidgets()
|
||||
name = "QmaxApplicationWindow"
|
||||
for widget in top_widgets:
|
||||
if (
|
||||
widget.inherits("QMainWindow")
|
||||
and widget.metaObject().className() == name
|
||||
):
|
||||
return widget
|
||||
raise RuntimeError('Count not find 3dsMax main window.')
|
||||
|
||||
|
||||
def imprint(node_name: str, data: dict) -> bool:
|
||||
node = rt.GetNodeByName(node_name)
|
||||
if not node:
|
||||
return False
|
||||
|
||||
for k, v in data.items():
|
||||
if isinstance(v, (dict, list)):
|
||||
rt.SetUserProp(node, k, f"{JSON_PREFIX}{json.dumps(v)}")
|
||||
else:
|
||||
rt.SetUserProp(node, k, v)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def lsattr(
|
||||
attr: str,
|
||||
value: Union[str, None] = None,
|
||||
root: Union[str, None] = None) -> list:
|
||||
"""List nodes having attribute with specified value.
|
||||
|
||||
Args:
|
||||
attr (str): Attribute name to match.
|
||||
value (str, Optional): Value to match, of omitted, all nodes
|
||||
with specified attribute are returned no matter of value.
|
||||
root (str, Optional): Root node name. If omitted, scene root is used.
|
||||
|
||||
Returns:
|
||||
list of nodes.
|
||||
"""
|
||||
root = rt.RootNode if root is None else rt.GetNodeByName(root)
|
||||
|
||||
def output_node(node, nodes):
|
||||
nodes.append(node)
|
||||
for child in node.Children:
|
||||
output_node(child, nodes)
|
||||
|
||||
nodes = []
|
||||
output_node(root, nodes)
|
||||
return [
|
||||
n for n in nodes
|
||||
if rt.GetUserProp(n, attr) == value
|
||||
] if value else [
|
||||
n for n in nodes
|
||||
if rt.GetUserProp(n, attr)
|
||||
]
|
||||
|
||||
|
||||
def read(container) -> dict:
|
||||
data = {}
|
||||
props = rt.GetUserPropBuffer(container)
|
||||
# this shouldn't happen but let's guard against it anyway
|
||||
if not props:
|
||||
return data
|
||||
|
||||
for line in props.split("\r\n"):
|
||||
try:
|
||||
key, value = line.split("=")
|
||||
except ValueError:
|
||||
# if the line cannot be split we can't really parse it
|
||||
continue
|
||||
|
||||
value = value.strip()
|
||||
if isinstance(value.strip(), six.string_types) and \
|
||||
value.startswith(JSON_PREFIX):
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
value = json.loads(value[len(JSON_PREFIX):])
|
||||
|
||||
# default value behavior
|
||||
# convert maxscript boolean values
|
||||
if value == "true":
|
||||
value = True
|
||||
elif value == "false":
|
||||
value = False
|
||||
|
||||
data[key.strip()] = value
|
||||
|
||||
data["instance_node"] = container.Name
|
||||
|
||||
return data
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
previous_selection = rt.GetCurrentSelection()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if previous_selection:
|
||||
rt.Select(previous_selection)
|
||||
else:
|
||||
rt.Select()
|
||||
|
||||
|
||||
def get_all_children(parent, node_type=None):
|
||||
"""Handy function to get all the children of a given node
|
||||
|
||||
Args:
|
||||
parent (3dsmax Node1): Node to get all children of.
|
||||
node_type (None, runtime.class): give class to check for
|
||||
e.g. rt.FFDBox/rt.GeometryClass etc.
|
||||
|
||||
Returns:
|
||||
list: list of all children of the parent node
|
||||
"""
|
||||
def list_children(node):
|
||||
children = []
|
||||
for c in node.Children:
|
||||
children.append(c)
|
||||
children = children + list_children(c)
|
||||
return children
|
||||
child_list = list_children(parent)
|
||||
|
||||
return ([x for x in child_list if rt.SuperClassOf(x) == node_type]
|
||||
if node_type else child_list)
|
||||
|
||||
|
||||
def get_current_renderer():
|
||||
"""
|
||||
Notes:
|
||||
Get current renderer for Max
|
||||
|
||||
Returns:
|
||||
"{Current Renderer}:{Current Renderer}"
|
||||
e.g. "Redshift_Renderer:Redshift_Renderer"
|
||||
"""
|
||||
return rt.renderers.production
|
||||
|
||||
|
||||
def get_default_render_folder(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["default_render_image_folder"])
|
||||
|
||||
|
||||
def set_render_frame_range(start_frame, end_frame):
|
||||
"""
|
||||
Note:
|
||||
Frame range can be specified in different types. Possible values are:
|
||||
* `1` - Single frame.
|
||||
* `2` - Active time segment ( animationRange ).
|
||||
* `3` - User specified Range.
|
||||
* `4` - User specified Frame pickup string (for example `1,3,5-12`).
|
||||
|
||||
Todo:
|
||||
Current type is hard-coded, there should be a custom setting for this.
|
||||
"""
|
||||
rt.rendTimeType = 3
|
||||
if start_frame is not None and end_frame is not None:
|
||||
rt.rendStart = int(start_frame)
|
||||
rt.rendEnd = int(end_frame)
|
||||
|
||||
|
||||
def get_multipass_setting(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["multipass"])
|
||||
|
||||
|
||||
def set_scene_resolution(width: int, height: int):
|
||||
"""Set the render resolution
|
||||
|
||||
Args:
|
||||
width(int): value of the width
|
||||
height(int): value of the height
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
# make sure the render dialog is closed
|
||||
# for the update of resolution
|
||||
# Changing the Render Setup dialog settings should be done
|
||||
# with the actual Render Setup dialog in a closed state.
|
||||
if rt.renderSceneDialog.isOpen():
|
||||
rt.renderSceneDialog.close()
|
||||
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
"""Apply the scene resolution from the project definition
|
||||
|
||||
scene resolution can be overwritten by a folder if the folder.attrib
|
||||
contains any information regarding scene resolution.
|
||||
"""
|
||||
task_attributes = get_current_task_entity(fields={"attrib"})["attrib"]
|
||||
width = int(task_attributes["resolutionWidth"])
|
||||
height = int(task_attributes["resolutionHeight"])
|
||||
|
||||
set_scene_resolution(width, height)
|
||||
|
||||
|
||||
def get_frame_range(task_entity=None) -> Union[Dict[str, Any], None]:
|
||||
"""Get the current task frame range and handles
|
||||
|
||||
Args:
|
||||
task_entity (dict): Task Entity.
|
||||
|
||||
Returns:
|
||||
dict: with frame start, frame end, handle start, handle end.
|
||||
"""
|
||||
# Set frame start/end
|
||||
if task_entity is None:
|
||||
task_entity = get_current_task_entity(fields={"attrib"})
|
||||
task_attributes = task_entity["attrib"]
|
||||
frame_start = int(task_attributes["frameStart"])
|
||||
frame_end = int(task_attributes["frameEnd"])
|
||||
handle_start = int(task_attributes["handleStart"])
|
||||
handle_end = int(task_attributes["handleEnd"])
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
|
||||
return {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStartHandle": frame_start_handle,
|
||||
"frameEndHandle": frame_end_handle,
|
||||
}
|
||||
|
||||
|
||||
def reset_frame_range(fps: bool = True):
|
||||
"""Set frame range to current folder.
|
||||
This is part of 3dsmax documentation:
|
||||
|
||||
animationRange: A System Global variable which lets you get and
|
||||
set an Interval value that defines the start and end frames
|
||||
of the Active Time Segment.
|
||||
frameRate: A System Global variable which lets you get
|
||||
and set an Integer value that defines the current
|
||||
scene frame rate in frames-per-second.
|
||||
"""
|
||||
if fps:
|
||||
task_entity = get_current_task_entity()
|
||||
task_attributes = task_entity["attrib"]
|
||||
fps_number = float(task_attributes["fps"])
|
||||
rt.frameRate = fps_number
|
||||
frame_range = get_frame_range()
|
||||
|
||||
set_timeline(
|
||||
frame_range["frameStartHandle"], frame_range["frameEndHandle"])
|
||||
set_render_frame_range(
|
||||
frame_range["frameStartHandle"], frame_range["frameEndHandle"])
|
||||
|
||||
|
||||
def reset_unit_scale():
|
||||
"""Apply the unit scale setting to 3dsMax
|
||||
"""
|
||||
project_name = get_current_project_name()
|
||||
settings = get_project_settings(project_name).get("max")
|
||||
scene_scale = settings.get("unit_scale_settings",
|
||||
{}).get("scene_unit_scale")
|
||||
if scene_scale:
|
||||
rt.units.DisplayType = rt.Name("Metric")
|
||||
rt.units.MetricType = rt.Name(scene_scale)
|
||||
else:
|
||||
rt.units.DisplayType = rt.Name("Generic")
|
||||
|
||||
|
||||
def convert_unit_scale():
|
||||
"""Convert system unit scale in 3dsMax
|
||||
for fbx export
|
||||
|
||||
Returns:
|
||||
str: unit scale
|
||||
"""
|
||||
unit_scale_dict = {
|
||||
"millimeters": "mm",
|
||||
"centimeters": "cm",
|
||||
"meters": "m",
|
||||
"kilometers": "km"
|
||||
}
|
||||
current_unit_scale = rt.Execute("units.MetricType as string")
|
||||
return unit_scale_dict[current_unit_scale]
|
||||
|
||||
|
||||
def set_context_setting():
|
||||
"""Apply the project settings from the project definition
|
||||
|
||||
Settings can be overwritten by an folder if the folder.attrib contains
|
||||
any information regarding those settings.
|
||||
|
||||
Examples of settings:
|
||||
frame range
|
||||
resolution
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
reset_scene_resolution()
|
||||
reset_frame_range()
|
||||
reset_colorspace()
|
||||
reset_unit_scale()
|
||||
|
||||
|
||||
def get_max_version():
|
||||
"""
|
||||
Args:
|
||||
get max version date for deadline
|
||||
|
||||
Returns:
|
||||
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
|
||||
max_info[7] = max version date
|
||||
"""
|
||||
max_info = rt.MaxVersion()
|
||||
return max_info[7]
|
||||
|
||||
|
||||
def is_headless():
|
||||
"""Check if 3dsMax runs in batch mode.
|
||||
If it returns True, it runs in 3dsbatch.exe
|
||||
If it returns False, it runs in 3dsmax.exe
|
||||
"""
|
||||
return rt.maxops.isInNonInteractiveMode()
|
||||
|
||||
|
||||
def set_timeline(frameStart, frameEnd):
|
||||
"""Set frame range for timeline editor in Max
|
||||
"""
|
||||
rt.animationRange = rt.interval(frameStart, frameEnd)
|
||||
return rt.animationRange
|
||||
|
||||
|
||||
def reset_colorspace():
|
||||
"""OCIO Configuration
|
||||
Supports in 3dsMax 2024+
|
||||
|
||||
"""
|
||||
if int(get_max_version()) < 2024:
|
||||
return
|
||||
|
||||
max_config_data = colorspace.get_current_context_imageio_config_preset()
|
||||
if max_config_data:
|
||||
ocio_config_path = max_config_data["path"]
|
||||
colorspace_mgr = rt.ColorPipelineMgr
|
||||
colorspace_mgr.Mode = rt.Name("OCIO_Custom")
|
||||
colorspace_mgr.OCIOConfigPath = ocio_config_path
|
||||
|
||||
|
||||
def check_colorspace():
|
||||
parent = get_main_window()
|
||||
if parent is None:
|
||||
log.info("Skipping outdated pop-up "
|
||||
"because Max main window can't be found.")
|
||||
if int(get_max_version()) >= 2024:
|
||||
color_mgr = rt.ColorPipelineMgr
|
||||
max_config_data = colorspace.get_current_context_imageio_config_preset()
|
||||
if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"):
|
||||
if not is_headless():
|
||||
from ayon_core.tools.utils import SimplePopup
|
||||
dialog = SimplePopup(parent=parent)
|
||||
dialog.setWindowTitle("Warning: Wrong OCIO Mode")
|
||||
dialog.set_message("This scene has wrong OCIO "
|
||||
"Mode setting.")
|
||||
dialog.set_button_text("Fix")
|
||||
dialog.setStyleSheet(load_stylesheet())
|
||||
dialog.on_clicked.connect(reset_colorspace)
|
||||
dialog.show()
|
||||
|
||||
def unique_namespace(namespace, format="%02d",
|
||||
prefix="", suffix="", con_suffix="CON"):
|
||||
"""Return unique namespace
|
||||
|
||||
Arguments:
|
||||
namespace (str): Name of namespace to consider
|
||||
format (str, optional): Formatting of the given iteration number
|
||||
suffix (str, optional): Only consider namespaces with this suffix.
|
||||
con_suffix: max only, for finding the name of the master container
|
||||
|
||||
>>> unique_namespace("bar")
|
||||
# bar01
|
||||
>>> unique_namespace(":hello")
|
||||
# :hello01
|
||||
>>> unique_namespace("bar:", suffix="_NS")
|
||||
# bar01_NS:
|
||||
|
||||
"""
|
||||
|
||||
def current_namespace():
|
||||
current = namespace
|
||||
# When inside a namespace Max adds no trailing :
|
||||
if not current.endswith(":"):
|
||||
current += ":"
|
||||
return current
|
||||
|
||||
# Always check against the absolute namespace root
|
||||
# There's no clash with :x if we're defining namespace :a:x
|
||||
ROOT = ":" if namespace.startswith(":") else current_namespace()
|
||||
|
||||
# Strip trailing `:` tokens since we might want to add a suffix
|
||||
start = ":" if namespace.startswith(":") else ""
|
||||
end = ":" if namespace.endswith(":") else ""
|
||||
namespace = namespace.strip(":")
|
||||
if ":" in namespace:
|
||||
# Split off any nesting that we don't uniqify anyway.
|
||||
parents, namespace = namespace.rsplit(":", 1)
|
||||
start += parents + ":"
|
||||
ROOT += start
|
||||
|
||||
iteration = 1
|
||||
increment_version = True
|
||||
while increment_version:
|
||||
nr_namespace = namespace + format % iteration
|
||||
unique = prefix + nr_namespace + suffix
|
||||
container_name = f"{unique}:{namespace}{con_suffix}"
|
||||
if not rt.getNodeByName(container_name):
|
||||
name_space = start + unique + end
|
||||
increment_version = False
|
||||
return name_space
|
||||
else:
|
||||
increment_version = True
|
||||
iteration += 1
|
||||
|
||||
|
||||
def get_namespace(container_name):
|
||||
"""Get the namespace and name of the sub-container
|
||||
|
||||
Args:
|
||||
container_name (str): the name of master container
|
||||
|
||||
Raises:
|
||||
RuntimeError: when there is no master container found
|
||||
|
||||
Returns:
|
||||
namespace (str): namespace of the sub-container
|
||||
name (str): name of the sub-container
|
||||
"""
|
||||
node = rt.getNodeByName(container_name)
|
||||
if not node:
|
||||
raise RuntimeError("Master Container Not Found..")
|
||||
name = rt.getUserProp(node, "name")
|
||||
namespace = rt.getUserProp(node, "namespace")
|
||||
return namespace, name
|
||||
|
||||
|
||||
def object_transform_set(container_children):
|
||||
"""A function which allows to store the transform of
|
||||
previous loaded object(s)
|
||||
Args:
|
||||
container_children(list): A list of nodes
|
||||
|
||||
Returns:
|
||||
transform_set (dict): A dict with all transform data of
|
||||
the previous loaded object(s)
|
||||
"""
|
||||
transform_set = {}
|
||||
for node in container_children:
|
||||
name = f"{node}.transform"
|
||||
transform_set[name] = node.pos
|
||||
name = f"{node}.scale"
|
||||
transform_set[name] = node.scale
|
||||
return transform_set
|
||||
|
||||
|
||||
def get_plugins() -> list:
|
||||
"""Get all loaded plugins in 3dsMax
|
||||
|
||||
Returns:
|
||||
plugin_info_list: a list of loaded plugins
|
||||
"""
|
||||
manager = rt.PluginManager
|
||||
count = manager.pluginDllCount
|
||||
plugin_info_list = []
|
||||
for p in range(1, count + 1):
|
||||
plugin_info = manager.pluginDllName(p)
|
||||
plugin_info_list.append(plugin_info)
|
||||
|
||||
return plugin_info_list
|
||||
|
||||
|
||||
def update_modifier_node_names(event, node):
|
||||
"""Update the name of the nodes after renaming
|
||||
|
||||
Args:
|
||||
event (pymxs.MXSWrapperBase): Event Name (
|
||||
Mandatory argument for rt.NodeEventCallback)
|
||||
node (list): Event Number (
|
||||
Mandatory argument for rt.NodeEventCallback)
|
||||
|
||||
"""
|
||||
containers = [
|
||||
obj
|
||||
for obj in rt.Objects
|
||||
if (
|
||||
rt.ClassOf(obj) == rt.Container
|
||||
and rt.getUserProp(obj, "id") == "pyblish.avalon.instance"
|
||||
and rt.getUserProp(obj, "productType") not in {
|
||||
"workfile", "tyflow"
|
||||
}
|
||||
)
|
||||
]
|
||||
if not containers:
|
||||
return
|
||||
for container in containers:
|
||||
ayon_data = container.modifiers[0].openPypeData
|
||||
updated_node_names = [str(node.node) for node
|
||||
in ayon_data.all_handles]
|
||||
rt.setProperty(ayon_data, "sel_list", updated_node_names)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def render_resolution(width, height):
|
||||
"""Set render resolution option during context
|
||||
|
||||
Args:
|
||||
width (int): render width
|
||||
height (int): render height
|
||||
"""
|
||||
current_renderWidth = rt.renderWidth
|
||||
current_renderHeight = rt.renderHeight
|
||||
try:
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
yield
|
||||
finally:
|
||||
rt.renderWidth = current_renderWidth
|
||||
rt.renderHeight = current_renderHeight
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def suspended_refresh():
|
||||
"""Suspended refresh for scene and modify panel redraw.
|
||||
"""
|
||||
if is_headless():
|
||||
yield
|
||||
return
|
||||
rt.disableSceneRedraw()
|
||||
rt.suspendEditing()
|
||||
try:
|
||||
yield
|
||||
|
||||
finally:
|
||||
rt.enableSceneRedraw()
|
||||
rt.resumeEditing()
|
||||
275
server_addon/max/client/ayon_max/api/lib_renderproducts.py
Normal file
275
server_addon/max/client/ayon_max/api/lib_renderproducts.py
Normal file
|
|
@ -0,0 +1,275 @@
|
|||
# Render Element Example : For scanline render, VRay
|
||||
# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC
|
||||
# arnold
|
||||
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
|
||||
import os
|
||||
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from ayon_max.api.lib import get_current_renderer
|
||||
from ayon_core.pipeline import get_current_project_name
|
||||
from ayon_core.settings import get_project_settings
|
||||
|
||||
|
||||
class RenderProducts(object):
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
get_current_project_name()
|
||||
)
|
||||
|
||||
def get_beauty(self, container):
|
||||
render_dir = os.path.dirname(rt.rendOutputFilename)
|
||||
|
||||
output_file = os.path.join(render_dir, container)
|
||||
|
||||
setting = self._project_settings
|
||||
img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa
|
||||
|
||||
start_frame = int(rt.rendStart)
|
||||
end_frame = int(rt.rendEnd) + 1
|
||||
|
||||
return {
|
||||
"beauty": self.get_expected_beauty(
|
||||
output_file, start_frame, end_frame, img_fmt
|
||||
)
|
||||
}
|
||||
|
||||
def get_multiple_beauty(self, outputs, cameras):
|
||||
beauty_output_frames = dict()
|
||||
for output, camera in zip(outputs, cameras):
|
||||
filename, ext = os.path.splitext(output)
|
||||
filename = filename.replace(".", "")
|
||||
ext = ext.replace(".", "")
|
||||
start_frame = int(rt.rendStart)
|
||||
end_frame = int(rt.rendEnd) + 1
|
||||
new_beauty = self.get_expected_beauty(
|
||||
filename, start_frame, end_frame, ext
|
||||
)
|
||||
beauty_output = ({
|
||||
f"{camera}_beauty": new_beauty
|
||||
})
|
||||
beauty_output_frames.update(beauty_output)
|
||||
return beauty_output_frames
|
||||
|
||||
def get_multiple_aovs(self, outputs, cameras):
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
aovs_frames = {}
|
||||
for output, camera in zip(outputs, cameras):
|
||||
filename, ext = os.path.splitext(output)
|
||||
filename = filename.replace(".", "")
|
||||
ext = ext.replace(".", "")
|
||||
start_frame = int(rt.rendStart)
|
||||
end_frame = int(rt.rendEnd) + 1
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_aovs(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
elif renderer == "Redshift_Renderer":
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
rs_aov_files = rt.Execute("renderers.current.separateAovFiles") # noqa
|
||||
# this doesn't work, always returns False
|
||||
# rs_AovFiles = rt.RedShift_Renderer().separateAovFiles
|
||||
if ext == "exr" and not rs_aov_files:
|
||||
for name in render_name:
|
||||
if name == "RsCryptomatte":
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_aovs(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
else:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_aovs(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
elif renderer == "Arnold":
|
||||
render_name = self.get_arnold_product_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_arnold_product( # noqa
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
elif renderer in [
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3"
|
||||
]:
|
||||
if ext != "exr":
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_aovs(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
|
||||
return aovs_frames
|
||||
|
||||
def get_aovs(self, container):
|
||||
render_dir = os.path.dirname(rt.rendOutputFilename)
|
||||
|
||||
output_file = os.path.join(render_dir,
|
||||
container)
|
||||
|
||||
setting = self._project_settings
|
||||
img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa
|
||||
|
||||
start_frame = int(rt.rendStart)
|
||||
end_frame = int(rt.rendEnd) + 1
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
render_dict = {}
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
render_dict.update({
|
||||
name: self.get_expected_aovs(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt)
|
||||
})
|
||||
elif renderer == "Redshift_Renderer":
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
rs_aov_files = rt.Execute("renderers.current.separateAovFiles")
|
||||
# this doesn't work, always returns False
|
||||
# rs_AovFiles = rt.RedShift_Renderer().separateAovFiles
|
||||
if img_fmt == "exr" and not rs_aov_files:
|
||||
for name in render_name:
|
||||
if name == "RsCryptomatte":
|
||||
render_dict.update({
|
||||
name: self.get_expected_aovs(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt)
|
||||
})
|
||||
else:
|
||||
for name in render_name:
|
||||
render_dict.update({
|
||||
name: self.get_expected_aovs(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt)
|
||||
})
|
||||
|
||||
elif renderer == "Arnold":
|
||||
render_name = self.get_arnold_product_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
render_dict.update({
|
||||
name: self.get_expected_arnold_product(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt)
|
||||
})
|
||||
elif renderer in [
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3"
|
||||
]:
|
||||
if img_fmt != "exr":
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
render_dict.update({
|
||||
name: self.get_expected_aovs(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt) # noqa
|
||||
})
|
||||
|
||||
return render_dict
|
||||
|
||||
def get_expected_beauty(self, folder, start_frame, end_frame, fmt):
|
||||
beauty_frame_range = []
|
||||
for f in range(start_frame, end_frame):
|
||||
frame = "%04d" % f
|
||||
beauty_output = f"{folder}.{frame}.{fmt}"
|
||||
beauty_output = beauty_output.replace("\\", "/")
|
||||
beauty_frame_range.append(beauty_output)
|
||||
|
||||
return beauty_frame_range
|
||||
|
||||
def get_arnold_product_name(self):
|
||||
"""Get all the Arnold AOVs name"""
|
||||
aov_name = []
|
||||
|
||||
amw = rt.MaxToAOps.AOVsManagerWindow()
|
||||
aov_mgr = rt.renderers.current.AOVManager
|
||||
# Check if there is any aov group set in AOV manager
|
||||
aov_group_num = len(aov_mgr.drivers)
|
||||
if aov_group_num < 1:
|
||||
return
|
||||
for i in range(aov_group_num):
|
||||
# get the specific AOV group
|
||||
aov_name.extend(aov.name for aov in aov_mgr.drivers[i].aov_list)
|
||||
# close the AOVs manager window
|
||||
amw.close()
|
||||
|
||||
return aov_name
|
||||
|
||||
def get_expected_arnold_product(self, folder, name,
|
||||
start_frame, end_frame, fmt):
|
||||
"""Get all the expected Arnold AOVs"""
|
||||
aov_list = []
|
||||
for f in range(start_frame, end_frame):
|
||||
frame = "%04d" % f
|
||||
render_element = f"{folder}_{name}.{frame}.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
aov_list.append(render_element)
|
||||
|
||||
return aov_list
|
||||
|
||||
def get_render_elements_name(self):
|
||||
"""Get all the render element names for general """
|
||||
render_name = []
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 1:
|
||||
return
|
||||
# get render elements from the renders
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
if renderlayer_name.enabled:
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
render_name.append(renderpass)
|
||||
|
||||
return render_name
|
||||
|
||||
def get_expected_aovs(self, folder, name,
|
||||
start_frame, end_frame, fmt):
|
||||
"""Get all the expected render element output files. """
|
||||
render_elements = []
|
||||
for f in range(start_frame, end_frame):
|
||||
frame = "%04d" % f
|
||||
render_element = f"{folder}_{name}.{frame}.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
render_elements.append(render_element)
|
||||
|
||||
return render_elements
|
||||
|
||||
def image_format(self):
|
||||
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
227
server_addon/max/client/ayon_max/api/lib_rendersettings.py
Normal file
227
server_addon/max/client/ayon_max/api/lib_rendersettings.py
Normal file
|
|
@ -0,0 +1,227 @@
|
|||
import os
|
||||
from pymxs import runtime as rt
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.pipeline import get_current_project_name
|
||||
from ayon_core.pipeline.context_tools import get_current_folder_entity
|
||||
|
||||
from ayon_max.api.lib import (
|
||||
set_render_frame_range,
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
|
||||
|
||||
class RenderSettings(object):
|
||||
|
||||
log = Logger.get_logger("RenderSettings")
|
||||
|
||||
_aov_chars = {
|
||||
"dot": ".",
|
||||
"dash": "-",
|
||||
"underscore": "_"
|
||||
}
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
"""
|
||||
Set up the naming convention for the render
|
||||
elements for the deadline submission
|
||||
"""
|
||||
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
get_current_project_name()
|
||||
)
|
||||
|
||||
def set_render_camera(self, selection):
|
||||
for sel in selection:
|
||||
# to avoid Attribute Error from pymxs wrapper
|
||||
if rt.classOf(sel) in rt.Camera.classes:
|
||||
rt.viewport.setCamera(sel)
|
||||
return
|
||||
raise RuntimeError("Active Camera not found")
|
||||
|
||||
def render_output(self, container):
|
||||
folder = rt.maxFilePath
|
||||
# hard-coded, should be customized in the setting
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
# hard-coded, set the renderoutput path
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
output_dir = os.path.join(folder,
|
||||
render_folder,
|
||||
filename)
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
# hard-coded, should be customized in the setting
|
||||
folder_attributes = get_current_folder_entity()["attrib"]
|
||||
|
||||
# get project resolution
|
||||
width = folder_attributes.get("resolutionWidth")
|
||||
height = folder_attributes.get("resolutionHeight")
|
||||
# Set Frame Range
|
||||
frame_start = folder_attributes.get("frame_start")
|
||||
frame_end = folder_attributes.get("frame_end")
|
||||
set_render_frame_range(frame_start, frame_end)
|
||||
# get the production render
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output = os.path.join(output_dir, container)
|
||||
try:
|
||||
aov_separator = self._aov_chars[(
|
||||
self._project_settings["max"]
|
||||
["RenderSettings"]
|
||||
["aov_separator"]
|
||||
)]
|
||||
except KeyError:
|
||||
aov_separator = "."
|
||||
output_filename = f"{output}..{img_fmt}"
|
||||
output_filename = output_filename.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
rt.rendOutputFilename = output_filename
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return
|
||||
# TODO: Finish the arnold render setup
|
||||
if renderer == "Arnold":
|
||||
self.arnold_setup()
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
self.render_element_layer(output, width, height, img_fmt)
|
||||
|
||||
rt.rendSaveFile = True
|
||||
|
||||
if rt.renderSceneDialog.isOpen():
|
||||
rt.renderSceneDialog.close()
|
||||
|
||||
def arnold_setup(self):
|
||||
# get Arnold RenderView run in the background
|
||||
# for setting up renderable camera
|
||||
arv = rt.MAXToAOps.ArnoldRenderView()
|
||||
render_camera = rt.viewport.GetCamera()
|
||||
if render_camera:
|
||||
arv.setOption("Camera", str(render_camera))
|
||||
|
||||
# TODO: add AOVs and extension
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
setup_cmd = (
|
||||
f"""
|
||||
amw = MaxtoAOps.AOVsManagerWindow()
|
||||
amw.close()
|
||||
aovmgr = renderers.current.AOVManager
|
||||
aovmgr.drivers = #()
|
||||
img_fmt = "{img_fmt}"
|
||||
if img_fmt == "png" then driver = ArnoldPNGDriver()
|
||||
if img_fmt == "jpg" then driver = ArnoldJPEGDriver()
|
||||
if img_fmt == "exr" then driver = ArnoldEXRDriver()
|
||||
if img_fmt == "tif" then driver = ArnoldTIFFDriver()
|
||||
if img_fmt == "tiff" then driver = ArnoldTIFFDriver()
|
||||
append aovmgr.drivers driver
|
||||
aovmgr.drivers[1].aov_list = #()
|
||||
""")
|
||||
|
||||
rt.execute(setup_cmd)
|
||||
arv.close()
|
||||
|
||||
def render_element_layer(self, dir, width, height, ext):
|
||||
"""For Renderers with render elements"""
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = f"{dir}_{renderpass}..{ext}"
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
|
||||
def get_render_output(self, container, output_dir):
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output_filename = f"{output}..{img_fmt}"
|
||||
return output_filename
|
||||
|
||||
def get_render_element(self):
|
||||
orig_render_elem = []
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
render_element = render_elem.GetRenderElementFilename(i)
|
||||
orig_render_elem.append(render_element)
|
||||
|
||||
return orig_render_elem
|
||||
|
||||
def get_batch_render_elements(self, container,
|
||||
output_dir, camera):
|
||||
render_element_list = list()
|
||||
output = os.path.join(output_dir, container)
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = f"{output}_{camera}_{renderpass}..{img_fmt}"
|
||||
render_element_list.append(aov_name)
|
||||
return render_element_list
|
||||
|
||||
def get_batch_render_output(self, camera):
|
||||
target_layer_no = rt.batchRenderMgr.FindView(camera)
|
||||
target_layer = rt.batchRenderMgr.GetView(target_layer_no)
|
||||
return target_layer.outputFilename
|
||||
|
||||
def batch_render_elements(self, camera):
|
||||
target_layer_no = rt.batchRenderMgr.FindView(camera)
|
||||
target_layer = rt.batchRenderMgr.GetView(target_layer_no)
|
||||
outputfilename = target_layer.outputFilename
|
||||
directory = os.path.dirname(outputfilename)
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
ext = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = f"{directory}_{camera}_{renderpass}..{ext}"
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
|
||||
def batch_render_layer(self, container,
|
||||
output_dir, cameras):
|
||||
outputs = list()
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
for cam in cameras:
|
||||
camera = rt.getNodeByName(cam)
|
||||
layer_no = rt.batchRenderMgr.FindView(cam)
|
||||
renderlayer = None
|
||||
if layer_no == 0:
|
||||
renderlayer = rt.batchRenderMgr.CreateView(camera)
|
||||
else:
|
||||
renderlayer = rt.batchRenderMgr.GetView(layer_no)
|
||||
# use camera name as renderlayer name
|
||||
renderlayer.name = cam
|
||||
renderlayer.outputFilename = f"{output}_{cam}..{img_fmt}"
|
||||
outputs.append(renderlayer.outputFilename)
|
||||
return outputs
|
||||
167
server_addon/max/client/ayon_max/api/menu.py
Normal file
167
server_addon/max/client/ayon_max/api/menu.py
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""3dsmax menu definition of AYON."""
|
||||
import os
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_max.api import lib
|
||||
|
||||
|
||||
class AYONMenu(object):
|
||||
"""Object representing AYON menu.
|
||||
|
||||
This is using "hack" to inject itself before "Help" menu of 3dsmax.
|
||||
For some reason `postLoadingMenus` event doesn't fire, and main menu
|
||||
if probably re-initialized by menu templates, se we wait for at least
|
||||
1 event Qt event loop before trying to insert.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.main_widget = self.get_main_widget()
|
||||
self.menu = None
|
||||
|
||||
timer = QtCore.QTimer()
|
||||
# set number of event loops to wait.
|
||||
timer.setInterval(1)
|
||||
timer.timeout.connect(self._on_timer)
|
||||
timer.start()
|
||||
|
||||
self._timer = timer
|
||||
self._counter = 0
|
||||
|
||||
def _on_timer(self):
|
||||
if self._counter < 1:
|
||||
self._counter += 1
|
||||
return
|
||||
|
||||
self._counter = 0
|
||||
self._timer.stop()
|
||||
self._build_ayon_menu()
|
||||
|
||||
@staticmethod
|
||||
def get_main_widget():
|
||||
"""Get 3dsmax main window."""
|
||||
return QtWidgets.QWidget.find(rt.windows.getMAXHWND())
|
||||
|
||||
def get_main_menubar(self) -> QtWidgets.QMenuBar:
|
||||
"""Get main Menubar by 3dsmax main window."""
|
||||
return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0]
|
||||
|
||||
def _get_or_create_ayon_menu(
|
||||
self, name: str = "&AYON",
|
||||
before: str = "&Help") -> QtWidgets.QAction:
|
||||
"""Create AYON menu.
|
||||
|
||||
Args:
|
||||
name (str, Optional): AYON menu name.
|
||||
before (str, Optional): Name of the 3dsmax main menu item to
|
||||
add AYON menu before.
|
||||
|
||||
Returns:
|
||||
QtWidgets.QAction: AYON menu action.
|
||||
|
||||
"""
|
||||
if self.menu is not None:
|
||||
return self.menu
|
||||
|
||||
menu_bar = self.get_main_menubar()
|
||||
menu_items = menu_bar.findChildren(
|
||||
QtWidgets.QMenu, options=QtCore.Qt.FindDirectChildrenOnly)
|
||||
help_action = None
|
||||
for item in menu_items:
|
||||
if name in item.title():
|
||||
# we already have AYON menu
|
||||
return item
|
||||
|
||||
if before in item.title():
|
||||
help_action = item.menuAction()
|
||||
tab_menu_label = os.environ.get("AYON_MENU_LABEL") or "AYON"
|
||||
op_menu = QtWidgets.QMenu("&{}".format(tab_menu_label))
|
||||
menu_bar.insertMenu(help_action, op_menu)
|
||||
|
||||
self.menu = op_menu
|
||||
return op_menu
|
||||
|
||||
def _build_ayon_menu(self) -> QtWidgets.QAction:
|
||||
"""Build items in AYON menu."""
|
||||
ayon_menu = self._get_or_create_ayon_menu()
|
||||
load_action = QtWidgets.QAction("Load...", ayon_menu)
|
||||
load_action.triggered.connect(self.load_callback)
|
||||
ayon_menu.addAction(load_action)
|
||||
|
||||
publish_action = QtWidgets.QAction("Publish...", ayon_menu)
|
||||
publish_action.triggered.connect(self.publish_callback)
|
||||
ayon_menu.addAction(publish_action)
|
||||
|
||||
manage_action = QtWidgets.QAction("Manage...", ayon_menu)
|
||||
manage_action.triggered.connect(self.manage_callback)
|
||||
ayon_menu.addAction(manage_action)
|
||||
|
||||
library_action = QtWidgets.QAction("Library...", ayon_menu)
|
||||
library_action.triggered.connect(self.library_callback)
|
||||
ayon_menu.addAction(library_action)
|
||||
|
||||
ayon_menu.addSeparator()
|
||||
|
||||
workfiles_action = QtWidgets.QAction("Work Files...", ayon_menu)
|
||||
workfiles_action.triggered.connect(self.workfiles_callback)
|
||||
ayon_menu.addAction(workfiles_action)
|
||||
|
||||
ayon_menu.addSeparator()
|
||||
|
||||
res_action = QtWidgets.QAction("Set Resolution", ayon_menu)
|
||||
res_action.triggered.connect(self.resolution_callback)
|
||||
ayon_menu.addAction(res_action)
|
||||
|
||||
frame_action = QtWidgets.QAction("Set Frame Range", ayon_menu)
|
||||
frame_action.triggered.connect(self.frame_range_callback)
|
||||
ayon_menu.addAction(frame_action)
|
||||
|
||||
colorspace_action = QtWidgets.QAction("Set Colorspace", ayon_menu)
|
||||
colorspace_action.triggered.connect(self.colorspace_callback)
|
||||
ayon_menu.addAction(colorspace_action)
|
||||
|
||||
unit_scale_action = QtWidgets.QAction("Set Unit Scale", ayon_menu)
|
||||
unit_scale_action.triggered.connect(self.unit_scale_callback)
|
||||
ayon_menu.addAction(unit_scale_action)
|
||||
|
||||
return ayon_menu
|
||||
|
||||
def load_callback(self):
|
||||
"""Callback to show Loader tool."""
|
||||
host_tools.show_loader(parent=self.main_widget)
|
||||
|
||||
def publish_callback(self):
|
||||
"""Callback to show Publisher tool."""
|
||||
host_tools.show_publisher(parent=self.main_widget)
|
||||
|
||||
def manage_callback(self):
|
||||
"""Callback to show Scene Manager/Inventory tool."""
|
||||
host_tools.show_scene_inventory(parent=self.main_widget)
|
||||
|
||||
def library_callback(self):
|
||||
"""Callback to show Library Loader tool."""
|
||||
host_tools.show_library_loader(parent=self.main_widget)
|
||||
|
||||
def workfiles_callback(self):
|
||||
"""Callback to show Workfiles tool."""
|
||||
host_tools.show_workfiles(parent=self.main_widget)
|
||||
|
||||
def resolution_callback(self):
|
||||
"""Callback to reset scene resolution"""
|
||||
return lib.reset_scene_resolution()
|
||||
|
||||
def frame_range_callback(self):
|
||||
"""Callback to reset frame range"""
|
||||
return lib.reset_frame_range()
|
||||
|
||||
def colorspace_callback(self):
|
||||
"""Callback to reset colorspace"""
|
||||
return lib.reset_colorspace()
|
||||
|
||||
def unit_scale_callback(self):
|
||||
"""Callback to reset unit scale"""
|
||||
return lib.reset_unit_scale()
|
||||
277
server_addon/max/client/ayon_max/api/pipeline.py
Normal file
277
server_addon/max/client/ayon_max/api/pipeline.py
Normal file
|
|
@ -0,0 +1,277 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Pipeline tools for AYON 3ds max integration."""
|
||||
import os
|
||||
import logging
|
||||
from operator import attrgetter
|
||||
|
||||
import json
|
||||
|
||||
from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import (
|
||||
register_creator_plugin_path,
|
||||
register_loader_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
AYON_CONTAINER_ID,
|
||||
)
|
||||
from ayon_max.api.menu import AYONMenu
|
||||
from ayon_max.api import lib
|
||||
from ayon_max.api.plugin import MS_CUSTOM_ATTRIB
|
||||
from ayon_max import MAX_HOST_DIR
|
||||
|
||||
from pymxs import runtime as rt # noqa
|
||||
|
||||
log = logging.getLogger("ayon_max")
|
||||
|
||||
PLUGINS_DIR = os.path.join(MAX_HOST_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
|
||||
|
||||
class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||
|
||||
name = "max"
|
||||
menu = None
|
||||
|
||||
def __init__(self):
|
||||
super(MaxHost, self).__init__()
|
||||
self._op_events = {}
|
||||
self._has_been_setup = False
|
||||
|
||||
def install(self):
|
||||
pyblish.api.register_host("max")
|
||||
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
|
||||
# self._register_callbacks()
|
||||
self.menu = AYONMenu()
|
||||
|
||||
self._has_been_setup = True
|
||||
|
||||
rt.callbacks.addScript(rt.Name('systemPostNew'), on_new)
|
||||
|
||||
rt.callbacks.addScript(rt.Name('filePostOpen'),
|
||||
lib.check_colorspace)
|
||||
|
||||
rt.callbacks.addScript(rt.Name('postWorkspaceChange'),
|
||||
self._deferred_menu_creation)
|
||||
rt.NodeEventCallback(
|
||||
nameChanged=lib.update_modifier_node_names)
|
||||
|
||||
def workfile_has_unsaved_changes(self):
|
||||
return rt.getSaveRequired()
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".max"]
|
||||
|
||||
def save_workfile(self, dst_path=None):
|
||||
rt.saveMaxFile(dst_path)
|
||||
return dst_path
|
||||
|
||||
def open_workfile(self, filepath):
|
||||
rt.checkForSave()
|
||||
rt.loadMaxFile(filepath)
|
||||
return filepath
|
||||
|
||||
def get_current_workfile(self):
|
||||
return os.path.join(rt.maxFilePath, rt.maxFileName)
|
||||
|
||||
def get_containers(self):
|
||||
return ls()
|
||||
|
||||
def _register_callbacks(self):
|
||||
rt.callbacks.removeScripts(id=rt.name("OpenPypeCallbacks"))
|
||||
|
||||
rt.callbacks.addScript(
|
||||
rt.Name("postLoadingMenus"),
|
||||
self._deferred_menu_creation, id=rt.Name('OpenPypeCallbacks'))
|
||||
|
||||
def _deferred_menu_creation(self):
|
||||
self.log.info("Building menu ...")
|
||||
self.menu = AYONMenu()
|
||||
|
||||
@staticmethod
|
||||
def create_context_node():
|
||||
"""Helper for creating context holding node."""
|
||||
|
||||
root_scene = rt.rootScene
|
||||
|
||||
create_attr_script = ("""
|
||||
attributes "OpenPypeContext"
|
||||
(
|
||||
parameters main rollout:params
|
||||
(
|
||||
context type: #string
|
||||
)
|
||||
|
||||
rollout params "OpenPype Parameters"
|
||||
(
|
||||
editText editTextContext "Context" type: #string
|
||||
)
|
||||
)
|
||||
""")
|
||||
|
||||
attr = rt.execute(create_attr_script)
|
||||
rt.custAttributes.add(root_scene, attr)
|
||||
|
||||
return root_scene.OpenPypeContext.context
|
||||
|
||||
def update_context_data(self, data, changes):
|
||||
try:
|
||||
_ = rt.rootScene.OpenPypeContext.context
|
||||
except AttributeError:
|
||||
# context node doesn't exists
|
||||
self.create_context_node()
|
||||
|
||||
rt.rootScene.OpenPypeContext.context = json.dumps(data)
|
||||
|
||||
def get_context_data(self):
|
||||
try:
|
||||
context = rt.rootScene.OpenPypeContext.context
|
||||
except AttributeError:
|
||||
# context node doesn't exists
|
||||
context = self.create_context_node()
|
||||
if not context:
|
||||
context = "{}"
|
||||
return json.loads(context)
|
||||
|
||||
def save_file(self, dst_path=None):
|
||||
# Force forwards slashes to avoid segfault
|
||||
dst_path = dst_path.replace("\\", "/")
|
||||
rt.saveMaxFile(dst_path)
|
||||
|
||||
|
||||
def ls() -> list:
|
||||
"""Get all AYON containers."""
|
||||
objs = rt.objects
|
||||
containers = [
|
||||
obj for obj in objs
|
||||
if rt.getUserProp(obj, "id") in {
|
||||
AYON_CONTAINER_ID, AVALON_CONTAINER_ID
|
||||
}
|
||||
]
|
||||
|
||||
for container in sorted(containers, key=attrgetter("name")):
|
||||
yield lib.read(container)
|
||||
|
||||
|
||||
def on_new():
|
||||
lib.set_context_setting()
|
||||
if rt.checkForSave():
|
||||
rt.resetMaxFile(rt.Name("noPrompt"))
|
||||
rt.clearUndoBuffer()
|
||||
rt.redrawViews()
|
||||
|
||||
|
||||
def containerise(name: str, nodes: list, context,
|
||||
namespace=None, loader=None, suffix="_CON"):
|
||||
data = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": name,
|
||||
"namespace": namespace or "",
|
||||
"loader": loader,
|
||||
"representation": context["representation"]["id"],
|
||||
}
|
||||
container_name = f"{namespace}:{name}{suffix}"
|
||||
container = rt.container(name=container_name)
|
||||
import_custom_attribute_data(container, nodes)
|
||||
if not lib.imprint(container_name, data):
|
||||
print(f"imprinting of {container_name} failed.")
|
||||
return container
|
||||
|
||||
|
||||
def load_custom_attribute_data():
|
||||
"""Re-loading the AYON custom parameter built by the creator
|
||||
|
||||
Returns:
|
||||
attribute: re-loading the custom OP attributes set in Maxscript
|
||||
"""
|
||||
return rt.Execute(MS_CUSTOM_ATTRIB)
|
||||
|
||||
|
||||
def import_custom_attribute_data(container: str, selections: list):
|
||||
"""Importing the Openpype/AYON custom parameter built by the creator
|
||||
|
||||
Args:
|
||||
container (str): target container which adds custom attributes
|
||||
selections (list): nodes to be added into
|
||||
group in custom attributes
|
||||
"""
|
||||
attrs = load_custom_attribute_data()
|
||||
modifier = rt.EmptyModifier()
|
||||
rt.addModifier(container, modifier)
|
||||
container.modifiers[0].name = "OP Data"
|
||||
rt.custAttributes.add(container.modifiers[0], attrs)
|
||||
node_list = []
|
||||
sel_list = []
|
||||
for i in selections:
|
||||
node_ref = rt.NodeTransformMonitor(node=i)
|
||||
node_list.append(node_ref)
|
||||
sel_list.append(str(i))
|
||||
|
||||
# Setting the property
|
||||
rt.setProperty(
|
||||
container.modifiers[0].openPypeData,
|
||||
"all_handles", node_list)
|
||||
rt.setProperty(
|
||||
container.modifiers[0].openPypeData,
|
||||
"sel_list", sel_list)
|
||||
|
||||
|
||||
def update_custom_attribute_data(container: str, selections: list):
|
||||
"""Updating the AYON custom parameter built by the creator
|
||||
|
||||
Args:
|
||||
container (str): target container which adds custom attributes
|
||||
selections (list): nodes to be added into
|
||||
group in custom attributes
|
||||
"""
|
||||
if container.modifiers[0].name == "OP Data":
|
||||
rt.deleteModifier(container, container.modifiers[0])
|
||||
import_custom_attribute_data(container, selections)
|
||||
|
||||
|
||||
def get_previous_loaded_object(container: str):
|
||||
"""Get previous loaded_object through the OP data
|
||||
|
||||
Args:
|
||||
container (str): the container which stores the OP data
|
||||
|
||||
Returns:
|
||||
node_list(list): list of nodes which are previously loaded
|
||||
"""
|
||||
node_list = []
|
||||
node_transform_monitor_list = rt.getProperty(
|
||||
container.modifiers[0].openPypeData, "all_handles")
|
||||
for node_transform_monitor in node_transform_monitor_list:
|
||||
node_list.append(node_transform_monitor.node)
|
||||
return node_list
|
||||
|
||||
|
||||
def remove_container_data(container_node: str):
|
||||
"""Function to remove container data after updating, switching or deleting it.
|
||||
|
||||
Args:
|
||||
container_node (str): container node
|
||||
"""
|
||||
if container_node.modifiers[0].name == "OP Data":
|
||||
all_set_members_names = [
|
||||
member.node for member
|
||||
in container_node.modifiers[0].openPypeData.all_handles]
|
||||
# clean up the children of alembic dummy objects
|
||||
for current_set_member in all_set_members_names:
|
||||
shape_list = [members for members in current_set_member.Children
|
||||
if rt.ClassOf(members) == rt.AlembicObject
|
||||
or rt.isValidNode(members)]
|
||||
if shape_list: # noqa
|
||||
rt.Delete(shape_list)
|
||||
rt.Delete(current_set_member)
|
||||
rt.deleteModifier(container_node, container_node.modifiers[0])
|
||||
|
||||
rt.Delete(container_node)
|
||||
rt.redrawViews()
|
||||
298
server_addon/max/client/ayon_max/api/plugin.py
Normal file
298
server_addon/max/client/ayon_max/api/plugin.py
Normal file
|
|
@ -0,0 +1,298 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""3dsmax specific AYON/Pyblish plugin definitions."""
|
||||
from abc import ABCMeta
|
||||
|
||||
import six
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from ayon_core.lib import BoolDef
|
||||
from ayon_core.pipeline import (
|
||||
CreatedInstance,
|
||||
Creator,
|
||||
CreatorError,
|
||||
AYON_INSTANCE_ID,
|
||||
AVALON_INSTANCE_ID,
|
||||
)
|
||||
|
||||
from .lib import imprint, lsattr, read
|
||||
|
||||
MS_CUSTOM_ATTRIB = """attributes "openPypeData"
|
||||
(
|
||||
parameters main rollout:OPparams
|
||||
(
|
||||
all_handles type:#maxObjectTab tabSize:0 tabSizeVariable:on
|
||||
sel_list type:#stringTab tabSize:0 tabSizeVariable:on
|
||||
)
|
||||
|
||||
rollout OPparams "OP Parameters"
|
||||
(
|
||||
listbox list_node "Node References" items:#()
|
||||
button button_add "Add to Container"
|
||||
button button_del "Delete from Container"
|
||||
|
||||
fn node_to_name the_node =
|
||||
(
|
||||
handle = the_node.handle
|
||||
obj_name = the_node.name
|
||||
handle_name = obj_name + "<" + handle as string + ">"
|
||||
return handle_name
|
||||
)
|
||||
fn nodes_to_add node =
|
||||
(
|
||||
sceneObjs = #()
|
||||
if classOf node == Container do return false
|
||||
n = node as string
|
||||
for obj in Objects do
|
||||
(
|
||||
tmp_obj = obj as string
|
||||
append sceneObjs tmp_obj
|
||||
)
|
||||
if sel_list != undefined do
|
||||
(
|
||||
for obj in sel_list do
|
||||
(
|
||||
idx = findItem sceneObjs obj
|
||||
if idx do
|
||||
(
|
||||
deleteItem sceneObjs idx
|
||||
)
|
||||
)
|
||||
)
|
||||
idx = findItem sceneObjs n
|
||||
if idx then return true else false
|
||||
)
|
||||
|
||||
fn nodes_to_rmv node =
|
||||
(
|
||||
n = node as string
|
||||
idx = findItem sel_list n
|
||||
if idx then return true else false
|
||||
)
|
||||
|
||||
on button_add pressed do
|
||||
(
|
||||
current_sel = selectByName title:"Select Objects to add to
|
||||
the Container" buttontext:"Add" filter:nodes_to_add
|
||||
if current_sel == undefined then return False
|
||||
temp_arr = #()
|
||||
i_node_arr = #()
|
||||
for c in current_sel do
|
||||
(
|
||||
handle_name = node_to_name c
|
||||
node_ref = NodeTransformMonitor node:c
|
||||
idx = finditem list_node.items handle_name
|
||||
if idx do (
|
||||
continue
|
||||
)
|
||||
name = c as string
|
||||
append temp_arr handle_name
|
||||
append i_node_arr node_ref
|
||||
append sel_list name
|
||||
)
|
||||
all_handles = join i_node_arr all_handles
|
||||
list_node.items = join temp_arr list_node.items
|
||||
)
|
||||
|
||||
on button_del pressed do
|
||||
(
|
||||
current_sel = selectByName title:"Select Objects to remove
|
||||
from the Container" buttontext:"Remove" filter: nodes_to_rmv
|
||||
if current_sel == undefined or current_sel.count == 0 then
|
||||
(
|
||||
return False
|
||||
)
|
||||
temp_arr = #()
|
||||
i_node_arr = #()
|
||||
new_i_node_arr = #()
|
||||
new_temp_arr = #()
|
||||
|
||||
for c in current_sel do
|
||||
(
|
||||
node_ref = NodeTransformMonitor node:c as string
|
||||
handle_name = node_to_name c
|
||||
n = c as string
|
||||
tmp_all_handles = #()
|
||||
for i in all_handles do
|
||||
(
|
||||
tmp = i as string
|
||||
append tmp_all_handles tmp
|
||||
)
|
||||
idx = finditem tmp_all_handles node_ref
|
||||
if idx do
|
||||
(
|
||||
new_i_node_arr = DeleteItem all_handles idx
|
||||
|
||||
)
|
||||
idx = finditem list_node.items handle_name
|
||||
if idx do
|
||||
(
|
||||
new_temp_arr = DeleteItem list_node.items idx
|
||||
)
|
||||
idx = finditem sel_list n
|
||||
if idx do
|
||||
(
|
||||
sel_list = DeleteItem sel_list idx
|
||||
)
|
||||
)
|
||||
all_handles = join i_node_arr new_i_node_arr
|
||||
list_node.items = join temp_arr new_temp_arr
|
||||
)
|
||||
|
||||
on OPparams open do
|
||||
(
|
||||
if all_handles.count != 0 then
|
||||
(
|
||||
temp_arr = #()
|
||||
for x in all_handles do
|
||||
(
|
||||
if x.node == undefined do continue
|
||||
handle_name = node_to_name x.node
|
||||
append temp_arr handle_name
|
||||
)
|
||||
list_node.items = temp_arr
|
||||
)
|
||||
)
|
||||
)
|
||||
)"""
|
||||
|
||||
|
||||
class MaxCreatorBase(object):
|
||||
|
||||
@staticmethod
|
||||
def cache_instance_data(shared_data):
|
||||
if shared_data.get("max_cached_instances") is not None:
|
||||
return shared_data
|
||||
|
||||
shared_data["max_cached_instances"] = {}
|
||||
|
||||
cached_instances = []
|
||||
for id_type in [AYON_INSTANCE_ID, AVALON_INSTANCE_ID]:
|
||||
cached_instances.extend(lsattr("id", id_type))
|
||||
|
||||
for i in cached_instances:
|
||||
creator_id = rt.GetUserProp(i, "creator_identifier")
|
||||
if creator_id not in shared_data["max_cached_instances"]:
|
||||
shared_data["max_cached_instances"][creator_id] = [i.name]
|
||||
else:
|
||||
shared_data[
|
||||
"max_cached_instances"][creator_id].append(i.name)
|
||||
return shared_data
|
||||
|
||||
@staticmethod
|
||||
def create_instance_node(node):
|
||||
"""Create instance node.
|
||||
|
||||
If the supplied node is existing node, it will be used to hold the
|
||||
instance, otherwise new node of type Dummy will be created.
|
||||
|
||||
Args:
|
||||
node (rt.MXSWrapperBase, str): Node or node name to use.
|
||||
|
||||
Returns:
|
||||
instance
|
||||
"""
|
||||
if isinstance(node, str):
|
||||
node = rt.Container(name=node)
|
||||
|
||||
attrs = rt.Execute(MS_CUSTOM_ATTRIB)
|
||||
modifier = rt.EmptyModifier()
|
||||
rt.addModifier(node, modifier)
|
||||
node.modifiers[0].name = "OP Data"
|
||||
rt.custAttributes.add(node.modifiers[0], attrs)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class MaxCreator(Creator, MaxCreatorBase):
|
||||
selected_nodes = []
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
if pre_create_data.get("use_selection"):
|
||||
self.selected_nodes = rt.GetCurrentSelection()
|
||||
if rt.getNodeByName(product_name):
|
||||
raise CreatorError(f"'{product_name}' is already created..")
|
||||
|
||||
instance_node = self.create_instance_node(product_name)
|
||||
instance_data["instance_node"] = instance_node.name
|
||||
instance = CreatedInstance(
|
||||
self.product_type,
|
||||
product_name,
|
||||
instance_data,
|
||||
self
|
||||
)
|
||||
if pre_create_data.get("use_selection"):
|
||||
|
||||
node_list = []
|
||||
sel_list = []
|
||||
for i in self.selected_nodes:
|
||||
node_ref = rt.NodeTransformMonitor(node=i)
|
||||
node_list.append(node_ref)
|
||||
sel_list.append(str(i))
|
||||
|
||||
# Setting the property
|
||||
rt.setProperty(
|
||||
instance_node.modifiers[0].openPypeData,
|
||||
"all_handles", node_list)
|
||||
rt.setProperty(
|
||||
instance_node.modifiers[0].openPypeData,
|
||||
"sel_list", sel_list)
|
||||
|
||||
self._add_instance_to_context(instance)
|
||||
imprint(instance_node.name, instance.data_to_store())
|
||||
|
||||
return instance
|
||||
|
||||
def collect_instances(self):
|
||||
self.cache_instance_data(self.collection_shared_data)
|
||||
for instance in self.collection_shared_data["max_cached_instances"].get(self.identifier, []): # noqa
|
||||
created_instance = CreatedInstance.from_existing(
|
||||
read(rt.GetNodeByName(instance)), self
|
||||
)
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, changes in update_list:
|
||||
instance_node = created_inst.get("instance_node")
|
||||
new_values = {
|
||||
key: changes[key].new_value
|
||||
for key in changes.changed_keys
|
||||
}
|
||||
product_name = new_values.get("productName", "")
|
||||
if product_name and instance_node != product_name:
|
||||
node = rt.getNodeByName(instance_node)
|
||||
new_product_name = new_values["productName"]
|
||||
if rt.getNodeByName(new_product_name):
|
||||
raise CreatorError(
|
||||
"The product '{}' already exists.".format(
|
||||
new_product_name))
|
||||
instance_node = new_product_name
|
||||
created_inst["instance_node"] = instance_node
|
||||
node.name = instance_node
|
||||
|
||||
imprint(
|
||||
instance_node,
|
||||
created_inst.data_to_store(),
|
||||
)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
"""Remove specified instance from the scene.
|
||||
|
||||
This is only removing `id` parameter so instance is no longer
|
||||
instance, because it might contain valuable data for artist.
|
||||
|
||||
"""
|
||||
for instance in instances:
|
||||
instance_node = rt.GetNodeByName(
|
||||
instance.data.get("instance_node"))
|
||||
if instance_node:
|
||||
count = rt.custAttributes.count(instance_node.modifiers[0])
|
||||
rt.custAttributes.delete(instance_node.modifiers[0], count)
|
||||
rt.Delete(instance_node)
|
||||
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
return [
|
||||
BoolDef("use_selection", label="Use selection")
|
||||
]
|
||||
344
server_addon/max/client/ayon_max/api/preview_animation.py
Normal file
344
server_addon/max/client/ayon_max/api/preview_animation.py
Normal file
|
|
@ -0,0 +1,344 @@
|
|||
import logging
|
||||
import contextlib
|
||||
from pymxs import runtime as rt
|
||||
from .lib import get_max_version, render_resolution
|
||||
|
||||
log = logging.getLogger("ayon_max")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def play_preview_when_done(has_autoplay):
|
||||
"""Set preview playback option during context
|
||||
|
||||
Args:
|
||||
has_autoplay (bool): autoplay during creating
|
||||
preview animation
|
||||
"""
|
||||
current_playback = rt.preferences.playPreviewWhenDone
|
||||
try:
|
||||
rt.preferences.playPreviewWhenDone = has_autoplay
|
||||
yield
|
||||
finally:
|
||||
rt.preferences.playPreviewWhenDone = current_playback
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def viewport_layout_and_camera(camera, layout="layout_1"):
|
||||
"""Set viewport layout and camera during context
|
||||
***For 3dsMax 2024+
|
||||
Args:
|
||||
camera (str): viewport camera
|
||||
layout (str): layout to use in viewport, defaults to `layout_1`
|
||||
Use None to not change viewport layout during context.
|
||||
"""
|
||||
needs_maximise = 0
|
||||
# Set to first active non extended viewport
|
||||
rt.viewport.activeViewportEx(1)
|
||||
original_camera = rt.viewport.getCamera()
|
||||
original_type = rt.viewport.getType()
|
||||
review_camera = rt.getNodeByName(camera)
|
||||
|
||||
try:
|
||||
if rt.viewport.getLayout() != rt.name(layout):
|
||||
rt.execute("max tool maximize")
|
||||
needs_maximise = 1
|
||||
rt.viewport.setCamera(review_camera)
|
||||
yield
|
||||
finally:
|
||||
if needs_maximise == 1:
|
||||
rt.execute("max tool maximize")
|
||||
if original_type == rt.Name("view_camera"):
|
||||
rt.viewport.setCamera(original_camera)
|
||||
else:
|
||||
rt.viewport.setType(original_type)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def viewport_preference_setting(general_viewport,
|
||||
nitrous_manager,
|
||||
nitrous_viewport,
|
||||
vp_button_mgr):
|
||||
"""Function to set viewport setting during context
|
||||
***For Max Version < 2024
|
||||
Args:
|
||||
camera (str): Viewport camera for review render
|
||||
general_viewport (dict): General viewport setting
|
||||
nitrous_manager (dict): Nitrous graphic manager
|
||||
nitrous_viewport (dict): Nitrous setting for
|
||||
preview animation
|
||||
vp_button_mgr (dict): Viewport button manager Setting
|
||||
preview_preferences (dict): Preview Preferences Setting
|
||||
"""
|
||||
orig_vp_grid = rt.viewport.getGridVisibility(1)
|
||||
orig_vp_bkg = rt.viewport.IsSolidBackgroundColorMode()
|
||||
|
||||
nitrousGraphicMgr = rt.NitrousGraphicsManager
|
||||
viewport_setting = nitrousGraphicMgr.GetActiveViewportSetting()
|
||||
vp_button_mgr_original = {
|
||||
key: getattr(rt.ViewportButtonMgr, key) for key in vp_button_mgr
|
||||
}
|
||||
nitrous_manager_original = {
|
||||
key: getattr(nitrousGraphicMgr, key) for key in nitrous_manager
|
||||
}
|
||||
nitrous_viewport_original = {
|
||||
key: getattr(viewport_setting, key) for key in nitrous_viewport
|
||||
}
|
||||
|
||||
try:
|
||||
rt.viewport.setGridVisibility(1, general_viewport["dspGrid"])
|
||||
rt.viewport.EnableSolidBackgroundColorMode(general_viewport["dspBkg"])
|
||||
for key, value in vp_button_mgr.items():
|
||||
setattr(rt.ViewportButtonMgr, key, value)
|
||||
for key, value in nitrous_manager.items():
|
||||
setattr(nitrousGraphicMgr, key, value)
|
||||
for key, value in nitrous_viewport.items():
|
||||
if nitrous_viewport[key] != nitrous_viewport_original[key]:
|
||||
setattr(viewport_setting, key, value)
|
||||
yield
|
||||
|
||||
finally:
|
||||
rt.viewport.setGridVisibility(1, orig_vp_grid)
|
||||
rt.viewport.EnableSolidBackgroundColorMode(orig_vp_bkg)
|
||||
for key, value in vp_button_mgr_original.items():
|
||||
setattr(rt.ViewportButtonMgr, key, value)
|
||||
for key, value in nitrous_manager_original.items():
|
||||
setattr(nitrousGraphicMgr, key, value)
|
||||
for key, value in nitrous_viewport_original.items():
|
||||
setattr(viewport_setting, key, value)
|
||||
|
||||
|
||||
def _render_preview_animation_max_2024(
|
||||
filepath, start, end, percentSize, ext, viewport_options):
|
||||
"""Render viewport preview with MaxScript using `CreateAnimation`.
|
||||
****For 3dsMax 2024+
|
||||
Args:
|
||||
filepath (str): filepath for render output without frame number and
|
||||
extension, for example: /path/to/file
|
||||
start (int): startFrame
|
||||
end (int): endFrame
|
||||
percentSize (float): render resolution multiplier by 100
|
||||
e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x
|
||||
viewport_options (dict): viewport setting options, e.g.
|
||||
{"vpStyle": "defaultshading", "vpPreset": "highquality"}
|
||||
Returns:
|
||||
list: Created files
|
||||
"""
|
||||
# the percentSize argument must be integer
|
||||
percent = int(percentSize)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
preview_output = f"{filepath}..{ext}"
|
||||
frame_template = f"{filepath}.{{:04d}}.{ext}"
|
||||
job_args = []
|
||||
for key, value in viewport_options.items():
|
||||
if isinstance(value, bool):
|
||||
if value:
|
||||
job_args.append(f"{key}:{value}")
|
||||
elif isinstance(value, str):
|
||||
if key == "vpStyle":
|
||||
if value == "Realistic":
|
||||
value = "defaultshading"
|
||||
elif value == "Shaded":
|
||||
log.warning(
|
||||
"'Shaded' Mode not supported in "
|
||||
"preview animation in Max 2024.\n"
|
||||
"Using 'defaultshading' instead.")
|
||||
value = "defaultshading"
|
||||
elif value == "ConsistentColors":
|
||||
value = "flatcolor"
|
||||
else:
|
||||
value = value.lower()
|
||||
elif key == "vpPreset":
|
||||
if value == "Quality":
|
||||
value = "highquality"
|
||||
elif value == "Customize":
|
||||
value = "userdefined"
|
||||
else:
|
||||
value = value.lower()
|
||||
job_args.append(f"{key}: #{value}")
|
||||
|
||||
job_str = (
|
||||
f'CreatePreview filename:"{preview_output}" outputAVI:false '
|
||||
f"percentSize:{percent} start:{start} end:{end} "
|
||||
f"{' '.join(job_args)} "
|
||||
"autoPlay:false"
|
||||
)
|
||||
rt.completeRedraw()
|
||||
rt.execute(job_str)
|
||||
# Return the created files
|
||||
return [frame_template.format(frame) for frame in range(start, end + 1)]
|
||||
|
||||
|
||||
def _render_preview_animation_max_pre_2024(
|
||||
filepath, startFrame, endFrame,
|
||||
width, height, percentSize, ext):
|
||||
"""Render viewport animation by creating bitmaps
|
||||
***For 3dsMax Version <2024
|
||||
Args:
|
||||
filepath (str): filepath without frame numbers and extension
|
||||
startFrame (int): start frame
|
||||
endFrame (int): end frame
|
||||
width (int): render resolution width
|
||||
height (int): render resolution height
|
||||
percentSize (float): render resolution multiplier by 100
|
||||
e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x
|
||||
ext (str): image extension
|
||||
Returns:
|
||||
list: Created filepaths
|
||||
"""
|
||||
|
||||
# get the screenshot
|
||||
percent = percentSize / 100.0
|
||||
res_width = width * percent
|
||||
res_height = height * percent
|
||||
frame_template = "{}.{{:04}}.{}".format(filepath, ext)
|
||||
frame_template.replace("\\", "/")
|
||||
files = []
|
||||
user_cancelled = False
|
||||
for frame in range(startFrame, endFrame + 1):
|
||||
rt.sliderTime = frame
|
||||
filepath = frame_template.format(frame)
|
||||
preview_res = rt.bitmap(
|
||||
res_width, res_height, filename=filepath
|
||||
)
|
||||
dib = rt.gw.getViewportDib()
|
||||
dib_width = float(dib.width)
|
||||
dib_height = float(dib.height)
|
||||
# aspect ratio
|
||||
viewportRatio = dib_width / dib_height
|
||||
renderRatio = float(res_width / res_height)
|
||||
if viewportRatio < renderRatio:
|
||||
heightCrop = (dib_width / renderRatio)
|
||||
topEdge = int((dib_height - heightCrop) / 2.0)
|
||||
tempImage_bmp = rt.bitmap(dib_width, heightCrop)
|
||||
src_box_value = rt.Box2(0, topEdge, dib_width, heightCrop)
|
||||
rt.pasteBitmap(dib, tempImage_bmp, src_box_value, rt.Point2(0, 0))
|
||||
rt.copy(tempImage_bmp, preview_res)
|
||||
rt.close(tempImage_bmp)
|
||||
elif viewportRatio > renderRatio:
|
||||
widthCrop = dib_height * renderRatio
|
||||
leftEdge = int((dib_width - widthCrop) / 2.0)
|
||||
tempImage_bmp = rt.bitmap(widthCrop, dib_height)
|
||||
src_box_value = rt.Box2(leftEdge, 0, widthCrop, dib_height)
|
||||
rt.pasteBitmap(dib, tempImage_bmp, src_box_value, rt.Point2(0, 0))
|
||||
rt.copy(tempImage_bmp, preview_res)
|
||||
rt.close(tempImage_bmp)
|
||||
else:
|
||||
rt.copy(dib, preview_res)
|
||||
rt.save(preview_res)
|
||||
rt.close(preview_res)
|
||||
rt.close(dib)
|
||||
files.append(filepath)
|
||||
if rt.keyboard.escPressed:
|
||||
user_cancelled = True
|
||||
break
|
||||
# clean up the cache
|
||||
rt.gc(delayed=True)
|
||||
if user_cancelled:
|
||||
raise RuntimeError("User cancelled rendering of viewport animation.")
|
||||
return files
|
||||
|
||||
|
||||
def render_preview_animation(
|
||||
filepath,
|
||||
ext,
|
||||
camera,
|
||||
start_frame=None,
|
||||
end_frame=None,
|
||||
percentSize=100.0,
|
||||
width=1920,
|
||||
height=1080,
|
||||
viewport_options=None):
|
||||
"""Render camera review animation
|
||||
Args:
|
||||
filepath (str): filepath to render to, without frame number and
|
||||
extension
|
||||
ext (str): output file extension
|
||||
camera (str): viewport camera for preview render
|
||||
start_frame (int): start frame
|
||||
end_frame (int): end frame
|
||||
percentSize (float): render resolution multiplier by 100
|
||||
e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x
|
||||
width (int): render resolution width
|
||||
height (int): render resolution height
|
||||
viewport_options (dict): viewport setting options
|
||||
Returns:
|
||||
list: Rendered output files
|
||||
"""
|
||||
if start_frame is None:
|
||||
start_frame = int(rt.animationRange.start)
|
||||
if end_frame is None:
|
||||
end_frame = int(rt.animationRange.end)
|
||||
|
||||
if viewport_options is None:
|
||||
viewport_options = viewport_options_for_preview_animation()
|
||||
with play_preview_when_done(False):
|
||||
with viewport_layout_and_camera(camera):
|
||||
if int(get_max_version()) < 2024:
|
||||
with viewport_preference_setting(
|
||||
viewport_options["general_viewport"],
|
||||
viewport_options["nitrous_manager"],
|
||||
viewport_options["nitrous_viewport"],
|
||||
viewport_options["vp_btn_mgr"]
|
||||
):
|
||||
return _render_preview_animation_max_pre_2024(
|
||||
filepath,
|
||||
start_frame,
|
||||
end_frame,
|
||||
width,
|
||||
height,
|
||||
percentSize,
|
||||
ext
|
||||
)
|
||||
else:
|
||||
with render_resolution(width, height):
|
||||
return _render_preview_animation_max_2024(
|
||||
filepath,
|
||||
start_frame,
|
||||
end_frame,
|
||||
percentSize,
|
||||
ext,
|
||||
viewport_options
|
||||
)
|
||||
|
||||
|
||||
def viewport_options_for_preview_animation():
|
||||
"""Get default viewport options for `render_preview_animation`.
|
||||
|
||||
Returns:
|
||||
dict: viewport setting options
|
||||
"""
|
||||
# viewport_options should be the dictionary
|
||||
if int(get_max_version()) < 2024:
|
||||
return {
|
||||
"visualStyleMode": "defaultshading",
|
||||
"viewportPreset": "highquality",
|
||||
"vpTexture": False,
|
||||
"dspGeometry": True,
|
||||
"dspShapes": False,
|
||||
"dspLights": False,
|
||||
"dspCameras": False,
|
||||
"dspHelpers": False,
|
||||
"dspParticles": True,
|
||||
"dspBones": False,
|
||||
"dspBkg": True,
|
||||
"dspGrid": False,
|
||||
"dspSafeFrame": False,
|
||||
"dspFrameNums": False
|
||||
}
|
||||
else:
|
||||
viewport_options = {}
|
||||
viewport_options["general_viewport"] = {
|
||||
"dspBkg": True,
|
||||
"dspGrid": False
|
||||
}
|
||||
viewport_options["nitrous_manager"] = {
|
||||
"AntialiasingQuality": "None"
|
||||
}
|
||||
viewport_options["nitrous_viewport"] = {
|
||||
"VisualStyleMode": "defaultshading",
|
||||
"ViewportPreset": "highquality",
|
||||
"UseTextureEnabled": False
|
||||
}
|
||||
viewport_options["vp_btn_mgr"] = {
|
||||
"EnableButtons": False}
|
||||
return viewport_options
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Pre-launch to force 3ds max startup script."""
|
||||
import os
|
||||
from ayon_max import MAX_HOST_DIR
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
|
||||
|
||||
class ForceStartupScript(PreLaunchHook):
|
||||
"""Inject AYON environment to 3ds max.
|
||||
|
||||
Note that this works in combination whit 3dsmax startup script that
|
||||
is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH
|
||||
environment.
|
||||
|
||||
Hook `GlobalHostDataHook` must be executed before this hook.
|
||||
"""
|
||||
app_groups = {"3dsmax", "adsk_3dsmax"}
|
||||
order = 11
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
startup_args = [
|
||||
"-U",
|
||||
"MAXScript",
|
||||
os.path.join(MAX_HOST_DIR, "startup", "startup.ms"),
|
||||
]
|
||||
self.launch_context.launch_args.append(startup_args)
|
||||
20
server_addon/max/client/ayon_max/hooks/inject_python.py
Normal file
20
server_addon/max/client/ayon_max/hooks/inject_python.py
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Pre-launch hook to inject python environment."""
|
||||
import os
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
|
||||
|
||||
class InjectPythonPath(PreLaunchHook):
|
||||
"""Inject AYON environment to 3dsmax.
|
||||
|
||||
Note that this works in combination whit 3dsmax startup script that
|
||||
is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH
|
||||
environment.
|
||||
|
||||
Hook `GlobalHostDataHook` must be executed before this hook.
|
||||
"""
|
||||
app_groups = {"3dsmax", "adsk_3dsmax"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
self.launch_context.env["MAX_PYTHONPATH"] = os.environ["PYTHONPATH"]
|
||||
18
server_addon/max/client/ayon_max/hooks/set_paths.py
Normal file
18
server_addon/max/client/ayon_max/hooks/set_paths.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
|
||||
|
||||
class SetPath(PreLaunchHook):
|
||||
"""Set current dir to workdir.
|
||||
|
||||
Hook `GlobalHostDataHook` must be executed before this hook.
|
||||
"""
|
||||
app_groups = {"max"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
workdir = self.launch_context.env.get("AYON_WORKDIR", "")
|
||||
if not workdir:
|
||||
self.log.warning("BUG: Workdir is not filled.")
|
||||
return
|
||||
|
||||
self.launch_context.kwargs["cwd"] = workdir
|
||||
0
server_addon/max/client/ayon_max/plugins/__init__.py
Normal file
0
server_addon/max/client/ayon_max/plugins/__init__.py
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
from ayon_max.api import plugin
|
||||
|
||||
|
||||
class CreateCamera(plugin.MaxCreator):
|
||||
"""Creator plugin for Camera."""
|
||||
identifier = "io.openpype.creators.max.camera"
|
||||
label = "Camera"
|
||||
product_type = "camera"
|
||||
icon = "gear"
|
||||
|
||||
settings_category = "max"
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating raw max scene."""
|
||||
from ayon_max.api import plugin
|
||||
|
||||
|
||||
class CreateMaxScene(plugin.MaxCreator):
|
||||
"""Creator plugin for 3ds max scenes."""
|
||||
identifier = "io.openpype.creators.max.maxScene"
|
||||
label = "Max Scene"
|
||||
product_type = "maxScene"
|
||||
icon = "gear"
|
||||
|
||||
settings_category = "max"
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for model."""
|
||||
from ayon_max.api import plugin
|
||||
|
||||
|
||||
class CreateModel(plugin.MaxCreator):
|
||||
"""Creator plugin for Model."""
|
||||
identifier = "io.openpype.creators.max.model"
|
||||
label = "Model"
|
||||
product_type = "model"
|
||||
icon = "gear"
|
||||
|
||||
settings_category = "max"
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating pointcache alembics."""
|
||||
from ayon_max.api import plugin
|
||||
|
||||
|
||||
class CreatePointCache(plugin.MaxCreator):
|
||||
"""Creator plugin for Point caches."""
|
||||
identifier = "io.openpype.creators.max.pointcache"
|
||||
label = "Point Cache"
|
||||
product_type = "pointcache"
|
||||
icon = "gear"
|
||||
|
||||
settings_category = "max"
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating point cloud."""
|
||||
from ayon_max.api import plugin
|
||||
|
||||
|
||||
class CreatePointCloud(plugin.MaxCreator):
|
||||
"""Creator plugin for Point Clouds."""
|
||||
identifier = "io.openpype.creators.max.pointcloud"
|
||||
label = "Point Cloud"
|
||||
product_type = "pointcloud"
|
||||
icon = "gear"
|
||||
|
||||
settings_category = "max"
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
from ayon_max.api import plugin
|
||||
|
||||
|
||||
class CreateRedshiftProxy(plugin.MaxCreator):
|
||||
identifier = "io.openpype.creators.max.redshiftproxy"
|
||||
label = "Redshift Proxy"
|
||||
product_type = "redshiftproxy"
|
||||
icon = "gear"
|
||||
|
||||
settings_category = "max"
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
import os
|
||||
from ayon_max.api import plugin
|
||||
from ayon_core.lib import BoolDef
|
||||
from ayon_max.api.lib_rendersettings import RenderSettings
|
||||
|
||||
|
||||
class CreateRender(plugin.MaxCreator):
|
||||
"""Creator plugin for Renders."""
|
||||
identifier = "io.openpype.creators.max.render"
|
||||
label = "Render"
|
||||
product_type = "maxrender"
|
||||
icon = "gear"
|
||||
|
||||
settings_category = "max"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
file = rt.maxFileName
|
||||
filename, _ = os.path.splitext(file)
|
||||
instance_data["AssetName"] = filename
|
||||
instance_data["multiCamera"] = pre_create_data.get("multi_cam")
|
||||
num_of_renderlayer = rt.batchRenderMgr.numViews
|
||||
if num_of_renderlayer > 0:
|
||||
rt.batchRenderMgr.DeleteView(num_of_renderlayer)
|
||||
|
||||
instance = super(CreateRender, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
container_name = instance.data.get("instance_node")
|
||||
# set output paths for rendering(mandatory for deadline)
|
||||
RenderSettings().render_output(container_name)
|
||||
# TODO: create multiple camera options
|
||||
if self.selected_nodes:
|
||||
selected_nodes_name = []
|
||||
for sel in self.selected_nodes:
|
||||
name = sel.name
|
||||
selected_nodes_name.append(name)
|
||||
RenderSettings().batch_render_layer(
|
||||
container_name, filename,
|
||||
selected_nodes_name)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateRender, self).get_pre_create_attr_defs()
|
||||
return attrs + [
|
||||
BoolDef("multi_cam",
|
||||
label="Multiple Cameras Submission",
|
||||
default=False),
|
||||
]
|
||||
122
server_addon/max/client/ayon_max/plugins/create/create_review.py
Normal file
122
server_addon/max/client/ayon_max/plugins/create/create_review.py
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating review in Max."""
|
||||
from ayon_max.api import plugin
|
||||
from ayon_core.lib import BoolDef, EnumDef, NumberDef
|
||||
|
||||
|
||||
class CreateReview(plugin.MaxCreator):
|
||||
"""Review in 3dsMax"""
|
||||
|
||||
identifier = "io.openpype.creators.max.review"
|
||||
label = "Review"
|
||||
product_type = "review"
|
||||
icon = "video-camera"
|
||||
|
||||
settings_category = "max"
|
||||
|
||||
review_width = 1920
|
||||
review_height = 1080
|
||||
percentSize = 100
|
||||
keep_images = False
|
||||
image_format = "png"
|
||||
visual_style = "Realistic"
|
||||
viewport_preset = "Quality"
|
||||
vp_texture = True
|
||||
anti_aliasing = "None"
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
settings = project_settings["max"]["CreateReview"] # noqa
|
||||
|
||||
# Take some defaults from settings
|
||||
self.review_width = settings.get("review_width", self.review_width)
|
||||
self.review_height = settings.get("review_height", self.review_height)
|
||||
self.percentSize = settings.get("percentSize", self.percentSize)
|
||||
self.keep_images = settings.get("keep_images", self.keep_images)
|
||||
self.image_format = settings.get("image_format", self.image_format)
|
||||
self.visual_style = settings.get("visual_style", self.visual_style)
|
||||
self.viewport_preset = settings.get(
|
||||
"viewport_preset", self.viewport_preset)
|
||||
self.anti_aliasing = settings.get(
|
||||
"anti_aliasing", self.anti_aliasing)
|
||||
self.vp_texture = settings.get("vp_texture", self.vp_texture)
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["imageFormat",
|
||||
"keepImages",
|
||||
"review_width",
|
||||
"review_height",
|
||||
"percentSize",
|
||||
"visualStyleMode",
|
||||
"viewportPreset",
|
||||
"antialiasingQuality",
|
||||
"vpTexture"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
super(CreateReview, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
image_format_enum = ["exr", "jpg", "png", "tga"]
|
||||
|
||||
visual_style_preset_enum = [
|
||||
"Realistic", "Shaded", "Facets",
|
||||
"ConsistentColors", "HiddenLine",
|
||||
"Wireframe", "BoundingBox", "Ink",
|
||||
"ColorInk", "Acrylic", "Tech", "Graphite",
|
||||
"ColorPencil", "Pastel", "Clay", "ModelAssist"
|
||||
]
|
||||
preview_preset_enum = [
|
||||
"Quality", "Standard", "Performance",
|
||||
"DXMode", "Customize"]
|
||||
anti_aliasing_enum = ["None", "2X", "4X", "8X"]
|
||||
|
||||
return [
|
||||
NumberDef("review_width",
|
||||
label="Review width",
|
||||
decimals=0,
|
||||
minimum=0,
|
||||
default=self.review_width),
|
||||
NumberDef("review_height",
|
||||
label="Review height",
|
||||
decimals=0,
|
||||
minimum=0,
|
||||
default=self.review_height),
|
||||
NumberDef("percentSize",
|
||||
label="Percent of Output",
|
||||
default=self.percentSize,
|
||||
minimum=1,
|
||||
decimals=0),
|
||||
BoolDef("keepImages",
|
||||
label="Keep Image Sequences",
|
||||
default=self.keep_images),
|
||||
EnumDef("imageFormat",
|
||||
image_format_enum,
|
||||
default=self.image_format,
|
||||
label="Image Format Options"),
|
||||
EnumDef("visualStyleMode",
|
||||
visual_style_preset_enum,
|
||||
default=self.visual_style,
|
||||
label="Preference"),
|
||||
EnumDef("viewportPreset",
|
||||
preview_preset_enum,
|
||||
default=self.viewport_preset,
|
||||
label="Preview Preset"),
|
||||
EnumDef("antialiasingQuality",
|
||||
anti_aliasing_enum,
|
||||
default=self.anti_aliasing,
|
||||
label="Anti-aliasing Quality"),
|
||||
BoolDef("vpTexture",
|
||||
label="Viewport Texture",
|
||||
default=self.vp_texture)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Use same attributes as for instance attributes
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating TyCache."""
|
||||
from ayon_max.api import plugin
|
||||
|
||||
|
||||
class CreateTyCache(plugin.MaxCreator):
|
||||
"""Creator plugin for TyCache."""
|
||||
identifier = "io.openpype.creators.max.tycache"
|
||||
label = "TyCache"
|
||||
product_type = "tycache"
|
||||
icon = "gear"
|
||||
|
||||
settings_category = "max"
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating workfiles."""
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline import CreatedInstance, AutoCreator
|
||||
from ayon_max.api import plugin
|
||||
from ayon_max.api.lib import read, imprint
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class CreateWorkfile(plugin.MaxCreatorBase, AutoCreator):
|
||||
"""Workfile auto-creator."""
|
||||
identifier = "io.ayon.creators.max.workfile"
|
||||
label = "Workfile"
|
||||
product_type = "workfile"
|
||||
icon = "fa5.file"
|
||||
|
||||
default_variant = "Main"
|
||||
|
||||
settings_category = "max"
|
||||
|
||||
def create(self):
|
||||
variant = self.default_variant
|
||||
current_instance = next(
|
||||
(
|
||||
instance for instance in self.create_context.instances
|
||||
if instance.creator_identifier == self.identifier
|
||||
), None)
|
||||
project_name = self.project_name
|
||||
folder_path = self.create_context.get_current_folder_path()
|
||||
task_name = self.create_context.get_current_task_name()
|
||||
host_name = self.create_context.host_name
|
||||
|
||||
if current_instance is None:
|
||||
folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, folder_path
|
||||
)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name
|
||||
)
|
||||
product_name = self.get_product_name(
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
variant,
|
||||
host_name,
|
||||
)
|
||||
data = {
|
||||
"folderPath": folder_path,
|
||||
"task": task_name,
|
||||
"variant": variant
|
||||
}
|
||||
|
||||
data.update(
|
||||
self.get_dynamic_data(
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
variant,
|
||||
host_name,
|
||||
current_instance)
|
||||
)
|
||||
self.log.info("Auto-creating workfile instance...")
|
||||
instance_node = self.create_node(product_name)
|
||||
data["instance_node"] = instance_node.name
|
||||
current_instance = CreatedInstance(
|
||||
self.product_type, product_name, data, self
|
||||
)
|
||||
self._add_instance_to_context(current_instance)
|
||||
imprint(instance_node.name, current_instance.data)
|
||||
elif (
|
||||
current_instance["folderPath"] != folder_path
|
||||
or current_instance["task"] != task_name
|
||||
):
|
||||
# Update instance context if is not the same
|
||||
folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, folder_path
|
||||
)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name
|
||||
)
|
||||
product_name = self.get_product_name(
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
variant,
|
||||
host_name,
|
||||
)
|
||||
|
||||
current_instance["folderPath"] = folder_entity["path"]
|
||||
current_instance["task"] = task_name
|
||||
current_instance["productName"] = product_name
|
||||
|
||||
def collect_instances(self):
|
||||
self.cache_instance_data(self.collection_shared_data)
|
||||
cached_instances = self.collection_shared_data["max_cached_instances"]
|
||||
for instance in cached_instances.get(self.identifier, []):
|
||||
if not rt.getNodeByName(instance):
|
||||
continue
|
||||
created_instance = CreatedInstance.from_existing(
|
||||
read(rt.GetNodeByName(instance)), self
|
||||
)
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _ in update_list:
|
||||
instance_node = created_inst.get("instance_node")
|
||||
imprint(
|
||||
instance_node,
|
||||
created_inst.data_to_store()
|
||||
)
|
||||
|
||||
def create_node(self, product_name):
|
||||
if rt.getNodeByName(product_name):
|
||||
node = rt.getNodeByName(product_name)
|
||||
return node
|
||||
node = rt.Container(name=product_name)
|
||||
node.isHidden = True
|
||||
return node
|
||||
101
server_addon/max/client/ayon_max/plugins/load/load_camera_fbx.py
Normal file
101
server_addon/max/client/ayon_max/plugins/load/load_camera_fbx.py
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
import os
|
||||
|
||||
from ayon_max.api import lib
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set
|
||||
)
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
update_custom_attribute_data,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_core.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class FbxLoader(load.LoaderPlugin):
|
||||
"""Fbx Loader."""
|
||||
|
||||
product_types = {"camera"}
|
||||
representations = {"fbx"}
|
||||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
filepath = self.filepath_from_context(context)
|
||||
filepath = os.path.normpath(filepath)
|
||||
rt.FBXImporterSetParam("Animation", True)
|
||||
rt.FBXImporterSetParam("Camera", True)
|
||||
rt.FBXImporterSetParam("AxisConversionMethod", True)
|
||||
rt.FBXImporterSetParam("Mode", rt.Name("create"))
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.ImportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
using=rt.FBXIMP)
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
selections = rt.GetCurrentSelection()
|
||||
|
||||
for selection in selections:
|
||||
selection.name = f"{namespace}:{selection.name}"
|
||||
|
||||
return containerise(
|
||||
name, selections, context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, context):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.getNodeByName(node_name)
|
||||
namespace, _ = get_namespace(node_name)
|
||||
|
||||
node_list = get_previous_loaded_object(node)
|
||||
rt.Select(node_list)
|
||||
prev_fbx_objects = rt.GetCurrentSelection()
|
||||
transform_data = object_transform_set(prev_fbx_objects)
|
||||
for prev_fbx_obj in prev_fbx_objects:
|
||||
if rt.isValidNode(prev_fbx_obj):
|
||||
rt.Delete(prev_fbx_obj)
|
||||
|
||||
rt.FBXImporterSetParam("Animation", True)
|
||||
rt.FBXImporterSetParam("Camera", True)
|
||||
rt.FBXImporterSetParam("Mode", rt.Name("merge"))
|
||||
rt.FBXImporterSetParam("AxisConversionMethod", True)
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.ImportFile(
|
||||
path, rt.name("noPrompt"), using=rt.FBXIMP)
|
||||
current_fbx_objects = rt.GetCurrentSelection()
|
||||
fbx_objects = []
|
||||
for fbx_object in current_fbx_objects:
|
||||
fbx_object.name = f"{namespace}:{fbx_object.name}"
|
||||
fbx_objects.append(fbx_object)
|
||||
fbx_transform = f"{fbx_object.name}.transform"
|
||||
if fbx_transform in transform_data.keys():
|
||||
fbx_object.pos = transform_data[fbx_transform] or 0
|
||||
fbx_object.scale = transform_data[
|
||||
f"{fbx_object.name}.scale"] or 0
|
||||
|
||||
update_custom_attribute_data(node, fbx_objects)
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": repre_entity["id"]
|
||||
})
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
178
server_addon/max/client/ayon_max/plugins/load/load_max_scene.py
Normal file
178
server_addon/max/client/ayon_max/plugins/load/load_max_scene.py
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
import os
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from ayon_core.lib.attribute_definitions import EnumDef
|
||||
from ayon_max.api import lib
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set,
|
||||
is_headless
|
||||
)
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise, get_previous_loaded_object,
|
||||
update_custom_attribute_data,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_core.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class MaterialDupOptionsWindow(QtWidgets.QDialog):
|
||||
"""The pop-up dialog allows users to choose material
|
||||
duplicate options for importing Max objects when updating
|
||||
or switching assets.
|
||||
"""
|
||||
def __init__(self, material_options):
|
||||
super(MaterialDupOptionsWindow, self).__init__()
|
||||
self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint)
|
||||
|
||||
self.material_option = None
|
||||
self.material_options = material_options
|
||||
|
||||
self.widgets = {
|
||||
"label": QtWidgets.QLabel(
|
||||
"Select material duplicate options before loading the max scene."),
|
||||
"material_options_list": QtWidgets.QListWidget(),
|
||||
"warning": QtWidgets.QLabel("No material options selected!"),
|
||||
"buttons": QtWidgets.QWidget(),
|
||||
"okButton": QtWidgets.QPushButton("Ok"),
|
||||
"cancelButton": QtWidgets.QPushButton("Cancel")
|
||||
}
|
||||
for key, value in material_options.items():
|
||||
item = QtWidgets.QListWidgetItem(value)
|
||||
self.widgets["material_options_list"].addItem(item)
|
||||
item.setData(QtCore.Qt.UserRole, key)
|
||||
# Build buttons.
|
||||
layout = QtWidgets.QHBoxLayout(self.widgets["buttons"])
|
||||
layout.addWidget(self.widgets["okButton"])
|
||||
layout.addWidget(self.widgets["cancelButton"])
|
||||
# Build layout.
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.addWidget(self.widgets["label"])
|
||||
layout.addWidget(self.widgets["material_options_list"])
|
||||
layout.addWidget(self.widgets["buttons"])
|
||||
|
||||
self.widgets["okButton"].pressed.connect(self.on_ok_pressed)
|
||||
self.widgets["cancelButton"].pressed.connect(self.on_cancel_pressed)
|
||||
self.widgets["material_options_list"].itemPressed.connect(
|
||||
self.on_material_options_pressed)
|
||||
|
||||
def on_material_options_pressed(self, item):
|
||||
self.material_option = item.data(QtCore.Qt.UserRole)
|
||||
|
||||
def on_ok_pressed(self):
|
||||
if self.material_option is None:
|
||||
self.widgets["warning"].setVisible(True)
|
||||
return
|
||||
self.close()
|
||||
|
||||
def on_cancel_pressed(self):
|
||||
self.material_option = "promptMtlDups"
|
||||
self.close()
|
||||
|
||||
class MaxSceneLoader(load.LoaderPlugin):
|
||||
"""Max Scene Loader."""
|
||||
|
||||
product_types = {
|
||||
"camera",
|
||||
"maxScene",
|
||||
"model",
|
||||
}
|
||||
|
||||
representations = {"max"}
|
||||
order = -8
|
||||
icon = "code-fork"
|
||||
color = "green"
|
||||
mtl_dup_default = "promptMtlDups"
|
||||
mtl_dup_enum_dict = {
|
||||
"promptMtlDups": "Prompt on Duplicate Materials",
|
||||
"useMergedMtlDups": "Use Incoming Material",
|
||||
"useSceneMtlDups": "Use Scene Material",
|
||||
"renameMtlDups": "Merge and Rename Incoming Material"
|
||||
}
|
||||
@classmethod
|
||||
def get_options(cls, contexts):
|
||||
return [
|
||||
EnumDef("mtldup",
|
||||
items=cls.mtl_dup_enum_dict,
|
||||
default=cls.mtl_dup_default,
|
||||
label="Material Duplicate Options")
|
||||
]
|
||||
|
||||
def load(self, context, name=None, namespace=None, options=None):
|
||||
from pymxs import runtime as rt
|
||||
mat_dup_options = options.get("mtldup", self.mtl_dup_default)
|
||||
path = self.filepath_from_context(context)
|
||||
path = os.path.normpath(path)
|
||||
# import the max scene by using "merge file"
|
||||
path = path.replace('\\', '/')
|
||||
rt.MergeMaxFile(path, rt.Name(mat_dup_options),
|
||||
quiet=True, includeFullGroup=True)
|
||||
max_objects = rt.getLastMergedNodes()
|
||||
max_object_names = [obj.name for obj in max_objects]
|
||||
# implement the OP/AYON custom attributes before load
|
||||
max_container = []
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
for max_obj, obj_name in zip(max_objects, max_object_names):
|
||||
max_obj.name = f"{namespace}:{obj_name}"
|
||||
max_container.append(max_obj)
|
||||
return containerise(
|
||||
name, max_container, context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, context):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.getNodeByName(node_name)
|
||||
namespace, _ = get_namespace(node_name)
|
||||
# delete the old container with attribute
|
||||
# delete old duplicate
|
||||
# use the modifier OP data to delete the data
|
||||
node_list = get_previous_loaded_object(node)
|
||||
rt.select(node_list)
|
||||
prev_max_objects = rt.GetCurrentSelection()
|
||||
transform_data = object_transform_set(prev_max_objects)
|
||||
|
||||
for prev_max_obj in prev_max_objects:
|
||||
if rt.isValidNode(prev_max_obj): # noqa
|
||||
rt.Delete(prev_max_obj)
|
||||
material_option = self.mtl_dup_default
|
||||
if not is_headless():
|
||||
window = MaterialDupOptionsWindow(self.mtl_dup_enum_dict)
|
||||
window.exec_()
|
||||
material_option = window.material_option
|
||||
rt.MergeMaxFile(path, rt.Name(material_option), quiet=True)
|
||||
|
||||
current_max_objects = rt.getLastMergedNodes()
|
||||
|
||||
current_max_object_names = [obj.name for obj
|
||||
in current_max_objects]
|
||||
|
||||
max_objects = []
|
||||
for max_obj, obj_name in zip(current_max_objects,
|
||||
current_max_object_names):
|
||||
max_obj.name = f"{namespace}:{obj_name}"
|
||||
max_objects.append(max_obj)
|
||||
max_transform = f"{max_obj}.transform"
|
||||
if max_transform in transform_data.keys():
|
||||
max_obj.pos = transform_data[max_transform] or 0
|
||||
max_obj.scale = transform_data[
|
||||
f"{max_obj}.scale"] or 0
|
||||
|
||||
update_custom_attribute_data(node, max_objects)
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": repre_entity["id"]
|
||||
})
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
123
server_addon/max/client/ayon_max/plugins/load/load_model.py
Normal file
123
server_addon/max/client/ayon_max/plugins/load/load_model.py
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
import os
|
||||
from ayon_core.pipeline import load, get_representation_path
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_max.api import lib
|
||||
from ayon_max.api.lib import (
|
||||
maintained_selection, unique_namespace
|
||||
)
|
||||
|
||||
|
||||
class ModelAbcLoader(load.LoaderPlugin):
|
||||
"""Loading model with the Alembic loader."""
|
||||
|
||||
product_types = {"model"}
|
||||
label = "Load Model with Alembic"
|
||||
representations = {"abc"}
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
file_path = os.path.normpath(self.filepath_from_context(context))
|
||||
|
||||
abc_before = {
|
||||
c
|
||||
for c in rt.rootNode.Children
|
||||
if rt.classOf(c) == rt.AlembicContainer
|
||||
}
|
||||
|
||||
rt.AlembicImport.ImportToRoot = False
|
||||
rt.AlembicImport.CustomAttributes = True
|
||||
rt.AlembicImport.UVs = True
|
||||
rt.AlembicImport.VertexColors = True
|
||||
rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport)
|
||||
|
||||
abc_after = {
|
||||
c
|
||||
for c in rt.rootNode.Children
|
||||
if rt.classOf(c) == rt.AlembicContainer
|
||||
}
|
||||
|
||||
# This should yield new AlembicContainer node
|
||||
abc_containers = abc_after.difference(abc_before)
|
||||
|
||||
if len(abc_containers) != 1:
|
||||
self.log.error("Something failed when loading.")
|
||||
|
||||
abc_container = abc_containers.pop()
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
abc_objects = []
|
||||
for abc_object in abc_container.Children:
|
||||
abc_object.name = f"{namespace}:{abc_object.name}"
|
||||
abc_objects.append(abc_object)
|
||||
# rename the abc container with namespace
|
||||
abc_container_name = f"{namespace}:{name}"
|
||||
abc_container.name = abc_container_name
|
||||
abc_objects.append(abc_container)
|
||||
|
||||
return containerise(
|
||||
name, abc_objects, context,
|
||||
namespace, loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, context):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
node_list = [n for n in get_previous_loaded_object(node)
|
||||
if rt.ClassOf(n) == rt.AlembicContainer]
|
||||
with maintained_selection():
|
||||
rt.Select(node_list)
|
||||
|
||||
for alembic in rt.Selection:
|
||||
abc = rt.GetNodeByName(alembic.name)
|
||||
rt.Select(abc.Children)
|
||||
for abc_con in abc.Children:
|
||||
abc_con.source = path
|
||||
rt.Select(abc_con.Children)
|
||||
for abc_obj in abc_con.Children:
|
||||
abc_obj.source = path
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
{"representation": repre_entity["id"]},
|
||||
)
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_container_children(parent, type_name):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
def list_children(node):
|
||||
children = []
|
||||
for c in node.Children:
|
||||
children.append(c)
|
||||
children += list_children(c)
|
||||
return children
|
||||
|
||||
filtered = []
|
||||
for child in list_children(parent):
|
||||
class_type = str(rt.ClassOf(child.baseObject))
|
||||
if class_type == type_name:
|
||||
filtered.append(child)
|
||||
|
||||
return filtered
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
import os
|
||||
from ayon_core.pipeline import load, get_representation_path
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise, get_previous_loaded_object,
|
||||
update_custom_attribute_data,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_max.api import lib
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set
|
||||
)
|
||||
from ayon_max.api.lib import maintained_selection
|
||||
|
||||
|
||||
class FbxModelLoader(load.LoaderPlugin):
|
||||
"""Fbx Model Loader."""
|
||||
|
||||
product_types = {"model"}
|
||||
representations = {"fbx"}
|
||||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
filepath = self.filepath_from_context(context)
|
||||
filepath = os.path.normpath(filepath)
|
||||
rt.FBXImporterSetParam("Animation", False)
|
||||
rt.FBXImporterSetParam("Cameras", False)
|
||||
rt.FBXImporterSetParam("Mode", rt.Name("create"))
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.importFile(
|
||||
filepath, rt.name("noPrompt"), using=rt.FBXIMP)
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
selections = rt.GetCurrentSelection()
|
||||
|
||||
for selection in selections:
|
||||
selection.name = f"{namespace}:{selection.name}"
|
||||
|
||||
return containerise(
|
||||
name, selections, context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, context):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.getNodeByName(node_name)
|
||||
if not node:
|
||||
rt.Container(name=node_name)
|
||||
namespace, _ = get_namespace(node_name)
|
||||
|
||||
node_list = get_previous_loaded_object(node)
|
||||
rt.Select(node_list)
|
||||
prev_fbx_objects = rt.GetCurrentSelection()
|
||||
transform_data = object_transform_set(prev_fbx_objects)
|
||||
for prev_fbx_obj in prev_fbx_objects:
|
||||
if rt.isValidNode(prev_fbx_obj):
|
||||
rt.Delete(prev_fbx_obj)
|
||||
|
||||
rt.FBXImporterSetParam("Animation", False)
|
||||
rt.FBXImporterSetParam("Cameras", False)
|
||||
rt.FBXImporterSetParam("Mode", rt.Name("create"))
|
||||
rt.FBXImporterSetParam("Preserveinstances", True)
|
||||
rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP)
|
||||
current_fbx_objects = rt.GetCurrentSelection()
|
||||
fbx_objects = []
|
||||
for fbx_object in current_fbx_objects:
|
||||
fbx_object.name = f"{namespace}:{fbx_object.name}"
|
||||
fbx_objects.append(fbx_object)
|
||||
fbx_transform = f"{fbx_object}.transform"
|
||||
if fbx_transform in transform_data.keys():
|
||||
fbx_object.pos = transform_data[fbx_transform] or 0
|
||||
fbx_object.scale = transform_data[
|
||||
f"{fbx_object}.scale"] or 0
|
||||
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
update_custom_attribute_data(node, fbx_objects)
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": repre_entity["id"]
|
||||
})
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
import os
|
||||
|
||||
from ayon_max.api import lib
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
maintained_selection,
|
||||
object_transform_set
|
||||
)
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
update_custom_attribute_data,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_core.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class ObjLoader(load.LoaderPlugin):
|
||||
"""Obj Loader."""
|
||||
|
||||
product_types = {"model"}
|
||||
representations = {"obj"}
|
||||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
filepath = os.path.normpath(self.filepath_from_context(context))
|
||||
self.log.debug("Executing command to import..")
|
||||
|
||||
rt.Execute(f'importFile @"{filepath}" #noPrompt using:ObjImp')
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
# create "missing" container for obj import
|
||||
selections = rt.GetCurrentSelection()
|
||||
# get current selection
|
||||
for selection in selections:
|
||||
selection.name = f"{namespace}:{selection.name}"
|
||||
return containerise(
|
||||
name, selections, context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, context):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.getNodeByName(node_name)
|
||||
namespace, _ = get_namespace(node_name)
|
||||
node_list = get_previous_loaded_object(node)
|
||||
rt.Select(node_list)
|
||||
previous_objects = rt.GetCurrentSelection()
|
||||
transform_data = object_transform_set(previous_objects)
|
||||
for prev_obj in previous_objects:
|
||||
if rt.isValidNode(prev_obj):
|
||||
rt.Delete(prev_obj)
|
||||
|
||||
rt.Execute(f'importFile @"{path}" #noPrompt using:ObjImp')
|
||||
# get current selection
|
||||
selections = rt.GetCurrentSelection()
|
||||
for selection in selections:
|
||||
selection.name = f"{namespace}:{selection.name}"
|
||||
selection_transform = f"{selection}.transform"
|
||||
if selection_transform in transform_data.keys():
|
||||
selection.pos = transform_data[selection_transform] or 0
|
||||
selection.scale = transform_data[
|
||||
f"{selection}.scale"] or 0
|
||||
update_custom_attribute_data(node, selections)
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
|
||||
lib.imprint(node_name, {
|
||||
"representation": repre_entity["id"]
|
||||
})
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
120
server_addon/max/client/ayon_max/plugins/load/load_model_usd.py
Normal file
120
server_addon/max/client/ayon_max/plugins/load/load_model_usd.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
import os
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from ayon_core.pipeline.load import LoadError
|
||||
from ayon_max.api import lib
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set,
|
||||
get_plugins
|
||||
)
|
||||
from ayon_max.api.lib import maintained_selection
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
update_custom_attribute_data,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_core.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class ModelUSDLoader(load.LoaderPlugin):
|
||||
"""Loading model with the USD loader."""
|
||||
|
||||
product_types = {"model"}
|
||||
label = "Load Model(USD)"
|
||||
representations = {"usda"}
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
# asset_filepath
|
||||
plugin_info = get_plugins()
|
||||
if "usdimport.dli" not in plugin_info:
|
||||
raise LoadError("No USDImporter loaded/installed in Max..")
|
||||
filepath = os.path.normpath(self.filepath_from_context(context))
|
||||
import_options = rt.USDImporter.CreateOptions()
|
||||
base_filename = os.path.basename(filepath)
|
||||
_, ext = os.path.splitext(base_filename)
|
||||
log_filepath = filepath.replace(ext, "txt")
|
||||
|
||||
rt.LogPath = log_filepath
|
||||
rt.LogLevel = rt.Name("info")
|
||||
rt.USDImporter.importFile(filepath,
|
||||
importOptions=import_options)
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
asset = rt.GetNodeByName(name)
|
||||
usd_objects = []
|
||||
|
||||
for usd_asset in asset.Children:
|
||||
usd_asset.name = f"{namespace}:{usd_asset.name}"
|
||||
usd_objects.append(usd_asset)
|
||||
|
||||
asset_name = f"{namespace}:{name}"
|
||||
asset.name = asset_name
|
||||
# need to get the correct container after renamed
|
||||
asset = rt.GetNodeByName(asset_name)
|
||||
usd_objects.append(asset)
|
||||
|
||||
return containerise(
|
||||
name, usd_objects, context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, context):
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node_name = container["instance_node"]
|
||||
node = rt.GetNodeByName(node_name)
|
||||
namespace, name = get_namespace(node_name)
|
||||
node_list = get_previous_loaded_object(node)
|
||||
rt.Select(node_list)
|
||||
prev_objects = [sel for sel in rt.GetCurrentSelection()
|
||||
if sel != rt.Container
|
||||
and sel.name != node_name]
|
||||
transform_data = object_transform_set(prev_objects)
|
||||
for n in prev_objects:
|
||||
rt.Delete(n)
|
||||
|
||||
import_options = rt.USDImporter.CreateOptions()
|
||||
base_filename = os.path.basename(path)
|
||||
_, ext = os.path.splitext(base_filename)
|
||||
log_filepath = path.replace(ext, "txt")
|
||||
|
||||
rt.LogPath = log_filepath
|
||||
rt.LogLevel = rt.Name("info")
|
||||
rt.USDImporter.importFile(
|
||||
path, importOptions=import_options)
|
||||
|
||||
asset = rt.GetNodeByName(name)
|
||||
usd_objects = []
|
||||
for children in asset.Children:
|
||||
children.name = f"{namespace}:{children.name}"
|
||||
usd_objects.append(children)
|
||||
children_transform = f"{children}.transform"
|
||||
if children_transform in transform_data.keys():
|
||||
children.pos = transform_data[children_transform] or 0
|
||||
children.scale = transform_data[
|
||||
f"{children}.scale"] or 0
|
||||
|
||||
asset.name = f"{namespace}:{asset.name}"
|
||||
usd_objects.append(asset)
|
||||
update_custom_attribute_data(node, usd_objects)
|
||||
with maintained_selection():
|
||||
rt.Select(node)
|
||||
|
||||
lib.imprint(node_name, {
|
||||
"representation": repre_entity["id"]
|
||||
})
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
129
server_addon/max/client/ayon_max/plugins/load/load_pointcache.py
Normal file
129
server_addon/max/client/ayon_max/plugins/load/load_pointcache.py
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Simple alembic loader for 3dsmax.
|
||||
|
||||
Because of limited api, alembics can be only loaded, but not easily updated.
|
||||
|
||||
"""
|
||||
import os
|
||||
from ayon_core.pipeline import load, get_representation_path
|
||||
from ayon_max.api import lib, maintained_selection
|
||||
from ayon_max.api.lib import unique_namespace
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
remove_container_data
|
||||
)
|
||||
|
||||
|
||||
class AbcLoader(load.LoaderPlugin):
|
||||
"""Alembic loader."""
|
||||
|
||||
product_types = {"camera", "animation", "pointcache"}
|
||||
label = "Load Alembic"
|
||||
representations = {"abc"}
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
file_path = self.filepath_from_context(context)
|
||||
file_path = os.path.normpath(file_path)
|
||||
|
||||
abc_before = {
|
||||
c
|
||||
for c in rt.rootNode.Children
|
||||
if rt.classOf(c) == rt.AlembicContainer
|
||||
}
|
||||
|
||||
rt.AlembicImport.ImportToRoot = False
|
||||
rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport)
|
||||
|
||||
abc_after = {
|
||||
c
|
||||
for c in rt.rootNode.Children
|
||||
if rt.classOf(c) == rt.AlembicContainer
|
||||
}
|
||||
|
||||
# This should yield new AlembicContainer node
|
||||
abc_containers = abc_after.difference(abc_before)
|
||||
|
||||
if len(abc_containers) != 1:
|
||||
self.log.error("Something failed when loading.")
|
||||
|
||||
abc_container = abc_containers.pop()
|
||||
selections = rt.GetCurrentSelection()
|
||||
for abc in selections:
|
||||
for cam_shape in abc.Children:
|
||||
cam_shape.playbackType = 0
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
abc_objects = []
|
||||
for abc_object in abc_container.Children:
|
||||
abc_object.name = f"{namespace}:{abc_object.name}"
|
||||
abc_objects.append(abc_object)
|
||||
# rename the abc container with namespace
|
||||
abc_container_name = f"{namespace}:{name}"
|
||||
abc_container.name = abc_container_name
|
||||
abc_objects.append(abc_container)
|
||||
|
||||
return containerise(
|
||||
name, abc_objects, context,
|
||||
namespace, loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, context):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
abc_container = [n for n in get_previous_loaded_object(node)
|
||||
if rt.ClassOf(n) == rt.AlembicContainer]
|
||||
with maintained_selection():
|
||||
rt.Select(abc_container)
|
||||
|
||||
for alembic in rt.Selection:
|
||||
abc = rt.GetNodeByName(alembic.name)
|
||||
rt.Select(abc.Children)
|
||||
for abc_con in abc.Children:
|
||||
abc_con.source = path
|
||||
rt.Select(abc_con.Children)
|
||||
for abc_obj in abc_con.Children:
|
||||
abc_obj.source = path
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
{"representation": repre_entity["id"]},
|
||||
)
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_container_children(parent, type_name):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
def list_children(node):
|
||||
children = []
|
||||
for c in node.Children:
|
||||
children.append(c)
|
||||
children += list_children(c)
|
||||
return children
|
||||
|
||||
filtered = []
|
||||
for child in list_children(parent):
|
||||
class_type = str(rt.classOf(child.baseObject))
|
||||
if class_type == type_name:
|
||||
filtered.append(child)
|
||||
|
||||
return filtered
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
import os
|
||||
from ayon_core.pipeline import load, get_representation_path
|
||||
from ayon_core.pipeline.load import LoadError
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
update_custom_attribute_data,
|
||||
remove_container_data
|
||||
)
|
||||
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
get_namespace,
|
||||
object_transform_set,
|
||||
get_plugins
|
||||
)
|
||||
from ayon_max.api import lib
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class OxAbcLoader(load.LoaderPlugin):
|
||||
"""Ornatrix Alembic loader."""
|
||||
|
||||
product_types = {"camera", "animation", "pointcache"}
|
||||
label = "Load Alembic with Ornatrix"
|
||||
representations = {"abc"}
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
plugin_list = get_plugins()
|
||||
if "ephere.plugins.autodesk.max.ornatrix.dlo" not in plugin_list:
|
||||
raise LoadError("Ornatrix plugin not "
|
||||
"found/installed in Max yet..")
|
||||
|
||||
file_path = os.path.normpath(self.filepath_from_context(context))
|
||||
rt.AlembicImport.ImportToRoot = True
|
||||
rt.AlembicImport.CustomAttributes = True
|
||||
rt.importFile(
|
||||
file_path, rt.name("noPrompt"),
|
||||
using=rt.Ornatrix_Alembic_Importer)
|
||||
|
||||
scene_object = []
|
||||
for obj in rt.rootNode.Children:
|
||||
obj_type = rt.ClassOf(obj)
|
||||
if str(obj_type).startswith("Ox_"):
|
||||
scene_object.append(obj)
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
abc_container = []
|
||||
for abc in scene_object:
|
||||
abc.name = f"{namespace}:{abc.name}"
|
||||
abc_container.append(abc)
|
||||
|
||||
return containerise(
|
||||
name, abc_container, context,
|
||||
namespace, loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, context):
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node_name = container["instance_node"]
|
||||
namespace, name = get_namespace(node_name)
|
||||
node = rt.getNodeByName(node_name)
|
||||
node_list = get_previous_loaded_object(node)
|
||||
rt.Select(node_list)
|
||||
selections = rt.getCurrentSelection()
|
||||
transform_data = object_transform_set(selections)
|
||||
for prev_obj in selections:
|
||||
if rt.isValidNode(prev_obj):
|
||||
rt.Delete(prev_obj)
|
||||
|
||||
rt.AlembicImport.ImportToRoot = False
|
||||
rt.AlembicImport.CustomAttributes = True
|
||||
rt.importFile(
|
||||
path, rt.name("noPrompt"),
|
||||
using=rt.Ornatrix_Alembic_Importer)
|
||||
|
||||
scene_object = []
|
||||
for obj in rt.rootNode.Children:
|
||||
obj_type = rt.ClassOf(obj)
|
||||
if str(obj_type).startswith("Ox_"):
|
||||
scene_object.append(obj)
|
||||
ox_abc_objects = []
|
||||
for abc in scene_object:
|
||||
abc.Parent = container
|
||||
abc.name = f"{namespace}:{abc.name}"
|
||||
ox_abc_objects.append(abc)
|
||||
ox_transform = f"{abc}.transform"
|
||||
if ox_transform in transform_data.keys():
|
||||
abc.pos = transform_data[ox_transform] or 0
|
||||
abc.scale = transform_data[f"{abc}.scale"] or 0
|
||||
update_custom_attribute_data(node, ox_abc_objects)
|
||||
lib.imprint(
|
||||
container["instance_node"],
|
||||
{"representation": repre_entity["id"]},
|
||||
)
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
import os
|
||||
|
||||
from ayon_max.api import lib, maintained_selection
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
|
||||
)
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
update_custom_attribute_data,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_core.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class PointCloudLoader(load.LoaderPlugin):
|
||||
"""Point Cloud Loader."""
|
||||
|
||||
product_types = {"pointcloud"}
|
||||
representations = {"prt"}
|
||||
order = -8
|
||||
icon = "code-fork"
|
||||
color = "green"
|
||||
postfix = "param"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""load point cloud by tyCache"""
|
||||
from pymxs import runtime as rt
|
||||
filepath = os.path.normpath(self.filepath_from_context(context))
|
||||
obj = rt.tyCache()
|
||||
obj.filename = filepath
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
obj.name = f"{namespace}:{obj.name}"
|
||||
|
||||
return containerise(
|
||||
name, [obj], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, context):
|
||||
"""update the container"""
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
node_list = get_previous_loaded_object(node)
|
||||
update_custom_attribute_data(
|
||||
node, node_list)
|
||||
with maintained_selection():
|
||||
rt.Select(node_list)
|
||||
for prt in rt.Selection:
|
||||
prt.filename = path
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": repre_entity["id"]
|
||||
})
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
"""remove the container"""
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
import os
|
||||
import clique
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
from ayon_core.pipeline.load import LoadError
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
update_custom_attribute_data,
|
||||
get_previous_loaded_object,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_max.api import lib
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
get_plugins
|
||||
)
|
||||
|
||||
|
||||
class RedshiftProxyLoader(load.LoaderPlugin):
|
||||
"""Load rs files with Redshift Proxy"""
|
||||
|
||||
label = "Load Redshift Proxy"
|
||||
product_types = {"redshiftproxy"}
|
||||
representations = {"rs"}
|
||||
order = -9
|
||||
icon = "code-fork"
|
||||
color = "white"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
from pymxs import runtime as rt
|
||||
plugin_info = get_plugins()
|
||||
if "redshift4max.dlr" not in plugin_info:
|
||||
raise LoadError("Redshift not loaded/installed in Max..")
|
||||
filepath = self.filepath_from_context(context)
|
||||
rs_proxy = rt.RedshiftProxy()
|
||||
rs_proxy.file = filepath
|
||||
files_in_folder = os.listdir(os.path.dirname(filepath))
|
||||
collections, remainder = clique.assemble(files_in_folder)
|
||||
if collections:
|
||||
rs_proxy.is_sequence = True
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
rs_proxy.name = f"{namespace}:{rs_proxy.name}"
|
||||
|
||||
return containerise(
|
||||
name, [rs_proxy], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, context):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
node_list = get_previous_loaded_object(node)
|
||||
rt.Select(node_list)
|
||||
update_custom_attribute_data(
|
||||
node, rt.Selection)
|
||||
for proxy in rt.Selection:
|
||||
proxy.file = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": repre_entity["id"]
|
||||
})
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
import os
|
||||
from ayon_max.api import lib, maintained_selection
|
||||
from ayon_max.api.lib import (
|
||||
unique_namespace,
|
||||
|
||||
)
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
update_custom_attribute_data,
|
||||
remove_container_data
|
||||
)
|
||||
from ayon_core.pipeline import get_representation_path, load
|
||||
|
||||
|
||||
class TyCacheLoader(load.LoaderPlugin):
|
||||
"""TyCache Loader."""
|
||||
|
||||
product_types = {"tycache"}
|
||||
representations = {"tyc"}
|
||||
order = -8
|
||||
icon = "code-fork"
|
||||
color = "green"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""Load tyCache"""
|
||||
from pymxs import runtime as rt
|
||||
filepath = os.path.normpath(self.filepath_from_context(context))
|
||||
obj = rt.tyCache()
|
||||
obj.filename = filepath
|
||||
|
||||
namespace = unique_namespace(
|
||||
name + "_",
|
||||
suffix="_",
|
||||
)
|
||||
obj.name = f"{namespace}:{obj.name}"
|
||||
|
||||
return containerise(
|
||||
name, [obj], context,
|
||||
namespace, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, context):
|
||||
"""update the container"""
|
||||
from pymxs import runtime as rt
|
||||
|
||||
repre_entity = context["representation"]
|
||||
path = get_representation_path(repre_entity)
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
node_list = get_previous_loaded_object(node)
|
||||
update_custom_attribute_data(node, node_list)
|
||||
with maintained_selection():
|
||||
for tyc in node_list:
|
||||
tyc.filename = path
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": repre_entity["id"]
|
||||
})
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
"""remove the container"""
|
||||
from pymxs import runtime as rt
|
||||
node = rt.GetNodeByName(container["instance_node"])
|
||||
remove_container_data(node)
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class CollectCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
label = "Max Current File"
|
||||
hosts = ['max']
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
if not folder or not file:
|
||||
self.log.error("Scene is not saved.")
|
||||
current_file = os.path.join(folder, file)
|
||||
|
||||
context.data["currentFile"] = current_file
|
||||
self.log.debug("Scene path: {}".format(current_file))
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class CollectFrameRange(pyblish.api.InstancePlugin):
|
||||
"""Collect Frame Range."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect Frame Range"
|
||||
hosts = ['max']
|
||||
families = ["camera", "maxrender",
|
||||
"pointcache", "pointcloud",
|
||||
"review", "redshiftproxy"]
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data["productType"] == "maxrender":
|
||||
instance.data["frameStartHandle"] = int(rt.rendStart)
|
||||
instance.data["frameEndHandle"] = int(rt.rendEnd)
|
||||
else:
|
||||
instance.data["frameStartHandle"] = int(rt.animationRange.start)
|
||||
instance.data["frameEndHandle"] = int(rt.animationRange.end)
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect instance members."""
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class CollectMembers(pyblish.api.InstancePlugin):
|
||||
"""Collect Set Members."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect Instance Members"
|
||||
hosts = ['max']
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data["productType"] == "workfile":
|
||||
self.log.debug(
|
||||
"Skipping Collecting Members for workfile product type."
|
||||
)
|
||||
return
|
||||
if instance.data.get("instance_node"):
|
||||
container = rt.GetNodeByName(instance.data["instance_node"])
|
||||
instance.data["members"] = [
|
||||
member.node for member
|
||||
in container.modifiers[0].openPypeData.all_handles
|
||||
]
|
||||
self.log.debug("{}".format(instance.data["members"]))
|
||||
|
|
@ -0,0 +1,122 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect Render"""
|
||||
import os
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
from ayon_max.api import colorspace
|
||||
from ayon_max.api.lib import get_max_version, get_current_renderer
|
||||
from ayon_max.api.lib_rendersettings import RenderSettings
|
||||
from ayon_max.api.lib_renderproducts import RenderProducts
|
||||
|
||||
|
||||
class CollectRender(pyblish.api.InstancePlugin):
|
||||
"""Collect Render for Deadline"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Collect 3dsmax Render Layers"
|
||||
hosts = ['max']
|
||||
families = ["maxrender"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
current_file = os.path.join(folder, file)
|
||||
filepath = current_file.replace("\\", "/")
|
||||
context.data['currentFile'] = current_file
|
||||
|
||||
files_by_aov = RenderProducts().get_beauty(instance.name)
|
||||
aovs = RenderProducts().get_aovs(instance.name)
|
||||
files_by_aov.update(aovs)
|
||||
|
||||
camera = rt.viewport.GetCamera()
|
||||
if instance.data.get("members"):
|
||||
camera_list = [member for member in instance.data["members"]
|
||||
if rt.ClassOf(member) == rt.Camera.Classes]
|
||||
if camera_list:
|
||||
camera = camera_list[-1]
|
||||
|
||||
instance.data["cameras"] = [camera.name] if camera else None # noqa
|
||||
|
||||
if instance.data.get("multiCamera"):
|
||||
cameras = instance.data.get("members")
|
||||
if not cameras:
|
||||
raise KnownPublishError("There should be at least"
|
||||
" one renderable camera in container")
|
||||
sel_cam = [
|
||||
c.name for c in cameras
|
||||
if rt.classOf(c) in rt.Camera.classes]
|
||||
container_name = instance.data.get("instance_node")
|
||||
render_dir = os.path.dirname(rt.rendOutputFilename)
|
||||
outputs = RenderSettings().batch_render_layer(
|
||||
container_name, render_dir, sel_cam
|
||||
)
|
||||
|
||||
instance.data["cameras"] = sel_cam
|
||||
|
||||
files_by_aov = RenderProducts().get_multiple_beauty(
|
||||
outputs, sel_cam)
|
||||
aovs = RenderProducts().get_multiple_aovs(
|
||||
outputs, sel_cam)
|
||||
files_by_aov.update(aovs)
|
||||
|
||||
if "expectedFiles" not in instance.data:
|
||||
instance.data["expectedFiles"] = list()
|
||||
instance.data["files"] = list()
|
||||
instance.data["expectedFiles"].append(files_by_aov)
|
||||
instance.data["files"].append(files_by_aov)
|
||||
|
||||
img_format = RenderProducts().image_format()
|
||||
# OCIO config not support in
|
||||
# most of the 3dsmax renderers
|
||||
# so this is currently hard coded
|
||||
# TODO: add options for redshift/vray ocio config
|
||||
instance.data["colorspaceConfig"] = ""
|
||||
instance.data["colorspaceDisplay"] = "sRGB"
|
||||
instance.data["colorspaceView"] = "ACES 1.0 SDR-video"
|
||||
|
||||
if int(get_max_version()) >= 2024:
|
||||
colorspace_mgr = rt.ColorPipelineMgr # noqa
|
||||
display = next(
|
||||
(display for display in colorspace_mgr.GetDisplayList()))
|
||||
view_transform = next(
|
||||
(view for view in colorspace_mgr.GetViewList(display)))
|
||||
instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath
|
||||
instance.data["colorspaceDisplay"] = display
|
||||
instance.data["colorspaceView"] = view_transform
|
||||
|
||||
instance.data["renderProducts"] = colorspace.ARenderProduct()
|
||||
instance.data["publishJobState"] = "Suspended"
|
||||
instance.data["attachTo"] = []
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
product_type = "maxrender"
|
||||
# also need to get the render dir for conversion
|
||||
data = {
|
||||
"folderPath": instance.data["folderPath"],
|
||||
"productName": str(instance.name),
|
||||
"publish": True,
|
||||
"maxversion": str(get_max_version()),
|
||||
"imageFormat": img_format,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"renderer": renderer,
|
||||
"source": filepath,
|
||||
"plugin": "3dsmax",
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"],
|
||||
"farm": True
|
||||
}
|
||||
instance.data.update(data)
|
||||
|
||||
# TODO: this should be unified with maya and its "multipart" flag
|
||||
# on instance.
|
||||
if renderer == "Redshift_Renderer":
|
||||
instance.data.update(
|
||||
{"separateAovFiles": rt.Execute(
|
||||
"renderers.current.separateAovFiles")})
|
||||
|
||||
self.log.info("data: {0}".format(data))
|
||||
|
|
@ -0,0 +1,153 @@
|
|||
# dont forget getting the focal length for burnin
|
||||
"""Collect Review"""
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from ayon_core.lib import BoolDef
|
||||
from ayon_max.api.lib import get_max_version
|
||||
from ayon_core.pipeline.publish import (
|
||||
AYONPyblishPluginMixin,
|
||||
KnownPublishError
|
||||
)
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin,
|
||||
AYONPyblishPluginMixin):
|
||||
"""Collect Review Data for Preview Animation"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Collect Review Data"
|
||||
hosts = ['max']
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
nodes = instance.data["members"]
|
||||
|
||||
def is_camera(node):
|
||||
is_camera_class = rt.classOf(node) in rt.Camera.classes
|
||||
return is_camera_class and rt.isProperty(node, "fov")
|
||||
|
||||
# Use first camera in instance
|
||||
cameras = [node for node in nodes if is_camera(node)]
|
||||
if cameras:
|
||||
if len(cameras) > 1:
|
||||
self.log.warning(
|
||||
"Found more than one camera in instance, using first "
|
||||
f"one found: {cameras[0]}"
|
||||
)
|
||||
camera = cameras[0]
|
||||
camera_name = camera.name
|
||||
focal_length = camera.fov
|
||||
else:
|
||||
raise KnownPublishError(
|
||||
"Unable to find a valid camera in 'Review' container."
|
||||
" Only native max Camera supported. "
|
||||
f"Found objects: {nodes}"
|
||||
)
|
||||
creator_attrs = instance.data["creator_attributes"]
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
|
||||
general_preview_data = {
|
||||
"review_camera": camera_name,
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"],
|
||||
"percentSize": creator_attrs["percentSize"],
|
||||
"imageFormat": creator_attrs["imageFormat"],
|
||||
"keepImages": creator_attrs["keepImages"],
|
||||
"fps": instance.context.data["fps"],
|
||||
"review_width": creator_attrs["review_width"],
|
||||
"review_height": creator_attrs["review_height"],
|
||||
}
|
||||
|
||||
if int(get_max_version()) >= 2024:
|
||||
colorspace_mgr = rt.ColorPipelineMgr # noqa
|
||||
display = next(
|
||||
(display for display in colorspace_mgr.GetDisplayList()))
|
||||
view_transform = next(
|
||||
(view for view in colorspace_mgr.GetViewList(display)))
|
||||
instance.data["colorspaceConfig"] = colorspace_mgr.OCIOConfigPath
|
||||
instance.data["colorspaceDisplay"] = display
|
||||
instance.data["colorspaceView"] = view_transform
|
||||
|
||||
preview_data = {
|
||||
"vpStyle": creator_attrs["visualStyleMode"],
|
||||
"vpPreset": creator_attrs["viewportPreset"],
|
||||
"vpTextures": creator_attrs["vpTexture"],
|
||||
"dspGeometry": attr_values.get("dspGeometry"),
|
||||
"dspShapes": attr_values.get("dspShapes"),
|
||||
"dspLights": attr_values.get("dspLights"),
|
||||
"dspCameras": attr_values.get("dspCameras"),
|
||||
"dspHelpers": attr_values.get("dspHelpers"),
|
||||
"dspParticles": attr_values.get("dspParticles"),
|
||||
"dspBones": attr_values.get("dspBones"),
|
||||
"dspBkg": attr_values.get("dspBkg"),
|
||||
"dspGrid": attr_values.get("dspGrid"),
|
||||
"dspSafeFrame": attr_values.get("dspSafeFrame"),
|
||||
"dspFrameNums": attr_values.get("dspFrameNums")
|
||||
}
|
||||
else:
|
||||
general_viewport = {
|
||||
"dspBkg": attr_values.get("dspBkg"),
|
||||
"dspGrid": attr_values.get("dspGrid")
|
||||
}
|
||||
nitrous_manager = {
|
||||
"AntialiasingQuality": creator_attrs["antialiasingQuality"],
|
||||
}
|
||||
nitrous_viewport = {
|
||||
"VisualStyleMode": creator_attrs["visualStyleMode"],
|
||||
"ViewportPreset": creator_attrs["viewportPreset"],
|
||||
"UseTextureEnabled": creator_attrs["vpTexture"]
|
||||
}
|
||||
preview_data = {
|
||||
"general_viewport": general_viewport,
|
||||
"nitrous_manager": nitrous_manager,
|
||||
"nitrous_viewport": nitrous_viewport,
|
||||
"vp_btn_mgr": {"EnableButtons": False}
|
||||
}
|
||||
|
||||
# Enable ftrack functionality
|
||||
instance.data.setdefault("families", []).append('ftrack')
|
||||
|
||||
burnin_members = instance.data.setdefault("burninDataMembers", {})
|
||||
burnin_members["focalLength"] = focal_length
|
||||
|
||||
instance.data.update(general_preview_data)
|
||||
instance.data["viewport_options"] = preview_data
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
BoolDef("dspGeometry",
|
||||
label="Geometry",
|
||||
default=True),
|
||||
BoolDef("dspShapes",
|
||||
label="Shapes",
|
||||
default=False),
|
||||
BoolDef("dspLights",
|
||||
label="Lights",
|
||||
default=False),
|
||||
BoolDef("dspCameras",
|
||||
label="Cameras",
|
||||
default=False),
|
||||
BoolDef("dspHelpers",
|
||||
label="Helpers",
|
||||
default=False),
|
||||
BoolDef("dspParticles",
|
||||
label="Particle Systems",
|
||||
default=True),
|
||||
BoolDef("dspBones",
|
||||
label="Bone Objects",
|
||||
default=False),
|
||||
BoolDef("dspBkg",
|
||||
label="Background",
|
||||
default=True),
|
||||
BoolDef("dspGrid",
|
||||
label="Active Grid",
|
||||
default=False),
|
||||
BoolDef("dspSafeFrame",
|
||||
label="Safe Frames",
|
||||
default=False),
|
||||
BoolDef("dspFrameNums",
|
||||
label="Frame Numbers",
|
||||
default=False)
|
||||
]
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import EnumDef, TextDef
|
||||
from ayon_core.pipeline.publish import AYONPyblishPluginMixin
|
||||
|
||||
|
||||
class CollectTyCacheData(pyblish.api.InstancePlugin,
|
||||
AYONPyblishPluginMixin):
|
||||
"""Collect Channel Attributes for TyCache Export"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Collect tyCache attribute Data"
|
||||
hosts = ['max']
|
||||
families = ["tycache"]
|
||||
|
||||
def process(self, instance):
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
attributes = {}
|
||||
for attr_key in attr_values.get("tycacheAttributes", []):
|
||||
attributes[attr_key] = True
|
||||
|
||||
for key in ["tycacheLayer", "tycacheObjectName"]:
|
||||
attributes[key] = attr_values.get(key, "")
|
||||
|
||||
# Collect the selected channel data before exporting
|
||||
instance.data["tyc_attrs"] = attributes
|
||||
self.log.debug(
|
||||
f"Found tycache attributes: {attributes}"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
# TODO: Support the attributes with maxObject array
|
||||
tyc_attr_enum = ["tycacheChanAge", "tycacheChanGroups",
|
||||
"tycacheChanPos", "tycacheChanRot",
|
||||
"tycacheChanScale", "tycacheChanVel",
|
||||
"tycacheChanSpin", "tycacheChanShape",
|
||||
"tycacheChanMatID", "tycacheChanMapping",
|
||||
"tycacheChanMaterials", "tycacheChanCustomFloat"
|
||||
"tycacheChanCustomVector", "tycacheChanCustomTM",
|
||||
"tycacheChanPhysX", "tycacheMeshBackup",
|
||||
"tycacheCreateObject",
|
||||
"tycacheCreateObjectIfNotCreated",
|
||||
"tycacheAdditionalCloth",
|
||||
"tycacheAdditionalSkin",
|
||||
"tycacheAdditionalSkinID",
|
||||
"tycacheAdditionalSkinIDValue",
|
||||
"tycacheAdditionalTerrain",
|
||||
"tycacheAdditionalVDB",
|
||||
"tycacheAdditionalSplinePaths",
|
||||
"tycacheAdditionalGeo",
|
||||
"tycacheAdditionalGeoActivateModifiers",
|
||||
"tycacheSplines",
|
||||
"tycacheSplinesAdditionalSplines"
|
||||
]
|
||||
tyc_default_attrs = ["tycacheChanGroups", "tycacheChanPos",
|
||||
"tycacheChanRot", "tycacheChanScale",
|
||||
"tycacheChanVel", "tycacheChanShape",
|
||||
"tycacheChanMatID", "tycacheChanMapping",
|
||||
"tycacheChanMaterials",
|
||||
"tycacheCreateObjectIfNotCreated"]
|
||||
return [
|
||||
EnumDef("tycacheAttributes",
|
||||
tyc_attr_enum,
|
||||
default=tyc_default_attrs,
|
||||
multiselection=True,
|
||||
label="TyCache Attributes"),
|
||||
TextDef("tycacheLayer",
|
||||
label="TyCache Layer",
|
||||
tooltip="Name of tycache layer",
|
||||
default="$(tyFlowLayer)"),
|
||||
TextDef("tycacheObjectName",
|
||||
label="TyCache Object Name",
|
||||
tooltip="TyCache Object Name",
|
||||
default="$(tyFlowName)_tyCache")
|
||||
]
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect current work file."""
|
||||
import os
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.InstancePlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.01
|
||||
label = "Collect 3dsmax Workfile"
|
||||
hosts = ['max']
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Inject the current working file."""
|
||||
context = instance.context
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
if not folder or not file:
|
||||
self.log.error("Scene is not saved.")
|
||||
ext = os.path.splitext(file)[-1].lstrip(".")
|
||||
|
||||
data = {}
|
||||
|
||||
data.update({
|
||||
"setMembers": context.data["currentFile"],
|
||||
"frameStart": context.data["frameStart"],
|
||||
"frameEnd": context.data["frameEnd"],
|
||||
"handleStart": context.data["handleStart"],
|
||||
"handleEnd": context.data["handleEnd"]
|
||||
})
|
||||
|
||||
data["representations"] = [{
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": file,
|
||||
"stagingDir": folder,
|
||||
}]
|
||||
|
||||
instance.data.update(data)
|
||||
self.log.debug("Collected data: {}".format(data))
|
||||
self.log.debug("Collected instance: {}".format(file))
|
||||
self.log.debug("staging Dir: {}".format(folder))
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Export alembic file.
|
||||
|
||||
Note:
|
||||
Parameters on AlembicExport (AlembicExport.Parameter):
|
||||
|
||||
ParticleAsMesh (bool): Sets whether particle shapes are exported
|
||||
as meshes.
|
||||
AnimTimeRange (enum): How animation is saved:
|
||||
#CurrentFrame: saves current frame
|
||||
#TimeSlider: saves the active time segments on time slider (default)
|
||||
#StartEnd: saves a range specified by the Step
|
||||
StartFrame (int)
|
||||
EnFrame (int)
|
||||
ShapeSuffix (bool): When set to true, appends the string "Shape" to the
|
||||
name of each exported mesh. This property is set to false by default.
|
||||
SamplesPerFrame (int): Sets the number of animation samples per frame.
|
||||
Hidden (bool): When true, export hidden geometry.
|
||||
UVs (bool): When true, export the mesh UV map channel.
|
||||
Normals (bool): When true, export the mesh normals.
|
||||
VertexColors (bool): When true, export the mesh vertex color map 0 and the
|
||||
current vertex color display data when it differs
|
||||
ExtraChannels (bool): When true, export the mesh extra map channels
|
||||
(map channels greater than channel 1)
|
||||
Velocity (bool): When true, export the meh vertex and particle velocity
|
||||
data.
|
||||
MaterialIDs (bool): When true, export the mesh material ID as
|
||||
Alembic face sets.
|
||||
Visibility (bool): When true, export the node visibility data.
|
||||
LayerName (bool): When true, export the node layer name as an Alembic
|
||||
object property.
|
||||
MaterialName (bool): When true, export the geometry node material name as
|
||||
an Alembic object property
|
||||
ObjectID (bool): When true, export the geometry node g-buffer object ID as
|
||||
an Alembic object property.
|
||||
CustomAttributes (bool): When true, export the node and its modifiers
|
||||
custom attributes into an Alembic object compound property.
|
||||
"""
|
||||
import os
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from ayon_max.api import maintained_selection
|
||||
from ayon_max.api.lib import suspended_refresh
|
||||
from ayon_core.lib import BoolDef
|
||||
|
||||
|
||||
class ExtractAlembic(publish.Extractor,
|
||||
OptionalPyblishPluginMixin):
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Pointcache"
|
||||
hosts = ["max"]
|
||||
families = ["pointcache"]
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
parent_dir = self.staging_dir(instance)
|
||||
file_name = "{name}.abc".format(**instance.data)
|
||||
path = os.path.join(parent_dir, file_name)
|
||||
|
||||
with suspended_refresh():
|
||||
self._set_abc_attributes(instance)
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
path,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": "abc",
|
||||
"ext": "abc",
|
||||
"files": file_name,
|
||||
"stagingDir": parent_dir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def _set_abc_attributes(self, instance):
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
custom_attrs = attr_values.get("custom_attrs", False)
|
||||
if not custom_attrs:
|
||||
self.log.debug(
|
||||
"No Custom Attributes included in this abc export...")
|
||||
rt.AlembicExport.ArchiveType = rt.Name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.Name("maya")
|
||||
rt.AlembicExport.StartFrame = start
|
||||
rt.AlembicExport.EndFrame = end
|
||||
rt.AlembicExport.CustomAttributes = custom_attrs
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
defs = super(ExtractAlembic, cls).get_attribute_defs()
|
||||
defs.extend([
|
||||
BoolDef("custom_attrs",
|
||||
label="Custom Attributes",
|
||||
default=False),
|
||||
])
|
||||
return defs
|
||||
|
||||
|
||||
class ExtractCameraAlembic(ExtractAlembic):
|
||||
"""Extract Camera with AlembicExport."""
|
||||
label = "Extract Alembic Camera"
|
||||
families = ["camera"]
|
||||
optional = True
|
||||
|
||||
|
||||
class ExtractModelAlembic(ExtractAlembic):
|
||||
"""Extract Geometry in Alembic Format"""
|
||||
label = "Extract Geometry (Alembic)"
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def _set_abc_attributes(self, instance):
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
custom_attrs = attr_values.get("custom_attrs", False)
|
||||
if not custom_attrs:
|
||||
self.log.debug(
|
||||
"No Custom Attributes included in this abc export...")
|
||||
rt.AlembicExport.ArchiveType = rt.name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.name("maya")
|
||||
rt.AlembicExport.CustomAttributes = custom_attrs
|
||||
rt.AlembicExport.UVs = True
|
||||
rt.AlembicExport.VertexColors = True
|
||||
rt.AlembicExport.PreserveInstances = True
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from ayon_max.api import maintained_selection
|
||||
from ayon_max.api.lib import convert_unit_scale
|
||||
|
||||
|
||||
class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Geometry in FBX Format
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.05
|
||||
label = "Extract FBX"
|
||||
hosts = ["max"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.fbx".format(**instance.data)
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self._set_fbx_attributes()
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.FBXEXP,
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": "fbx",
|
||||
"ext": "fbx",
|
||||
"files": filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(
|
||||
"Extracted instance '%s' to: %s" % (instance.name, filepath)
|
||||
)
|
||||
|
||||
def _set_fbx_attributes(self):
|
||||
unit_scale = convert_unit_scale()
|
||||
rt.FBXExporterSetParam("Animation", False)
|
||||
rt.FBXExporterSetParam("Cameras", False)
|
||||
rt.FBXExporterSetParam("Lights", False)
|
||||
rt.FBXExporterSetParam("PointCache", False)
|
||||
rt.FBXExporterSetParam("AxisConversionMethod", "Animation")
|
||||
rt.FBXExporterSetParam("UpAxis", "Y")
|
||||
rt.FBXExporterSetParam("Preserveinstances", True)
|
||||
if unit_scale:
|
||||
rt.FBXExporterSetParam("ConvertUnit", unit_scale)
|
||||
|
||||
|
||||
class ExtractCameraFbx(ExtractModelFbx):
|
||||
"""Extract Camera with FbxExporter."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract Fbx Camera"
|
||||
families = ["camera"]
|
||||
optional = True
|
||||
|
||||
def _set_fbx_attributes(self):
|
||||
unit_scale = convert_unit_scale()
|
||||
rt.FBXExporterSetParam("Animation", True)
|
||||
rt.FBXExporterSetParam("Cameras", True)
|
||||
rt.FBXExporterSetParam("AxisConversionMethod", "Animation")
|
||||
rt.FBXExporterSetParam("UpAxis", "Y")
|
||||
rt.FBXExporterSetParam("Preserveinstances", True)
|
||||
if unit_scale:
|
||||
rt.FBXExporterSetParam("ConvertUnit", unit_scale)
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Raw Max Scene with SaveSelected
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract Max Scene (Raw)"
|
||||
hosts = ["max"]
|
||||
families = ["camera", "maxScene", "model"]
|
||||
optional = True
|
||||
|
||||
settings_category = "max"
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
# publish the raw scene for camera
|
||||
self.log.debug("Extracting Raw Max Scene ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.max".format(**instance.data)
|
||||
|
||||
max_path = os.path.join(stagingdir, filename)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
nodes = instance.data["members"]
|
||||
rt.saveNodes(nodes, max_path, quiet=True)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
|
||||
representation = {
|
||||
"name": "max",
|
||||
"ext": "max",
|
||||
"files": filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(
|
||||
"Extracted instance '%s' to: %s" % (instance.name, max_path)
|
||||
)
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from ayon_max.api import maintained_selection
|
||||
from ayon_max.api.lib import suspended_refresh
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
|
||||
|
||||
class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Geometry in OBJ Format
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.05
|
||||
label = "Extract OBJ"
|
||||
hosts = ["max"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
settings_category = "max"
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.obj".format(**instance.data)
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
with suspended_refresh():
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.ObjExp,
|
||||
)
|
||||
if not os.path.exists(filepath):
|
||||
raise KnownPublishError(
|
||||
"File {} wasn't produced by 3ds max, please check the logs.")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": "obj",
|
||||
"ext": "obj",
|
||||
"files": filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(
|
||||
"Extracted instance '%s' to: %s" % (instance.name, filepath)
|
||||
)
|
||||
|
|
@ -0,0 +1,94 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from ayon_max.api import maintained_selection
|
||||
from ayon_core.pipeline import OptionalPyblishPluginMixin, publish
|
||||
|
||||
|
||||
class ExtractModelUSD(publish.Extractor,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Extract Geometry in USDA Format."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.05
|
||||
label = "Extract Geometry (USD)"
|
||||
hosts = ["max"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
settings_category = "max"
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.info("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
asset_filename = "{name}.usda".format(**instance.data)
|
||||
asset_filepath = os.path.join(stagingdir,
|
||||
asset_filename)
|
||||
self.log.info(f"Writing USD '{asset_filepath}' to '{stagingdir}'")
|
||||
|
||||
log_filename = "{name}.txt".format(**instance.data)
|
||||
log_filepath = os.path.join(stagingdir,
|
||||
log_filename)
|
||||
self.log.info(f"Writing log '{log_filepath}' to '{stagingdir}'")
|
||||
|
||||
# get the nodes which need to be exported
|
||||
export_options = self.get_export_options(log_filepath)
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.USDExporter.ExportFile(asset_filepath,
|
||||
exportOptions=export_options,
|
||||
contentSource=rt.Name("selected"),
|
||||
nodeList=node_list)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'usda',
|
||||
'ext': 'usda',
|
||||
'files': asset_filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
log_representation = {
|
||||
'name': 'txt',
|
||||
'ext': 'txt',
|
||||
'files': log_filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(log_representation)
|
||||
|
||||
self.log.info(
|
||||
f"Extracted instance '{instance.name}' to: {asset_filepath}")
|
||||
|
||||
@staticmethod
|
||||
def get_export_options(log_path):
|
||||
"""Set Export Options for USD Exporter"""
|
||||
|
||||
export_options = rt.USDExporter.createOptions()
|
||||
|
||||
export_options.Meshes = True
|
||||
export_options.Shapes = False
|
||||
export_options.Lights = False
|
||||
export_options.Cameras = False
|
||||
export_options.Materials = False
|
||||
export_options.MeshFormat = rt.Name('fromScene')
|
||||
export_options.FileFormat = rt.Name('ascii')
|
||||
export_options.UpAxis = rt.Name('y')
|
||||
export_options.LogLevel = rt.Name('info')
|
||||
export_options.LogPath = log_path
|
||||
export_options.PreserveEdgeOrientation = True
|
||||
export_options.TimeMode = rt.Name('current')
|
||||
|
||||
rt.USDexporter.UIOptions = export_options
|
||||
|
||||
return export_options
|
||||
|
|
@ -0,0 +1,242 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from ayon_max.api import maintained_selection
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractPointCloud(publish.Extractor):
|
||||
"""
|
||||
Extract PRT format with tyFlow operators.
|
||||
|
||||
Notes:
|
||||
Currently only works for the default partition setting
|
||||
|
||||
Args:
|
||||
self.export_particle(): sets up all job arguments for attributes
|
||||
to be exported in MAXscript
|
||||
|
||||
self.get_operators(): get the export_particle operator
|
||||
|
||||
self.get_custom_attr(): get all custom channel attributes from Openpype
|
||||
setting and sets it as job arguments before exporting
|
||||
|
||||
self.get_files(): get the files with tyFlow naming convention
|
||||
before publishing
|
||||
|
||||
self.partition_output_name(): get the naming with partition settings.
|
||||
|
||||
self.get_partition(): get partition value
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract Point Cloud"
|
||||
hosts = ["max"]
|
||||
families = ["pointcloud"]
|
||||
settings = []
|
||||
|
||||
def process(self, instance):
|
||||
self.settings = self.get_setting(instance)
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
self.log.info("Extracting PRT...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.prt".format(**instance.data)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
with maintained_selection():
|
||||
job_args = self.export_particle(instance.data["members"],
|
||||
start,
|
||||
end,
|
||||
path)
|
||||
|
||||
for job in job_args:
|
||||
rt.Execute(job)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
self.log.info("Writing PRT with TyFlow Plugin...")
|
||||
filenames = self.get_files(
|
||||
instance.data["members"], path, start, end)
|
||||
self.log.debug(f"filenames: {filenames}")
|
||||
|
||||
partition = self.partition_output_name(
|
||||
instance.data["members"])
|
||||
|
||||
representation = {
|
||||
'name': 'prt',
|
||||
'ext': 'prt',
|
||||
'files': filenames if len(filenames) > 1 else filenames[0],
|
||||
"stagingDir": stagingdir,
|
||||
"outputName": partition # partition value
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(f"Extracted instance '{instance.name}' to: {path}")
|
||||
|
||||
def export_particle(self,
|
||||
members,
|
||||
start,
|
||||
end,
|
||||
filepath):
|
||||
"""Sets up all job arguments for attributes.
|
||||
|
||||
Those attributes are to be exported in MAX Script.
|
||||
|
||||
Args:
|
||||
members (list): Member nodes of the instance.
|
||||
start (int): Start frame.
|
||||
end (int): End frame.
|
||||
filepath (str): Path to PRT file.
|
||||
|
||||
Returns:
|
||||
list of arguments for MAX Script.
|
||||
|
||||
"""
|
||||
job_args = []
|
||||
opt_list = self.get_operators(members)
|
||||
for operator in opt_list:
|
||||
start_frame = f"{operator}.frameStart={start}"
|
||||
job_args.append(start_frame)
|
||||
end_frame = f"{operator}.frameEnd={end}"
|
||||
job_args.append(end_frame)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
prt_filename = f'{operator}.PRTFilename="{filepath}"'
|
||||
job_args.append(prt_filename)
|
||||
# Partition
|
||||
mode = f"{operator}.PRTPartitionsMode=2"
|
||||
job_args.append(mode)
|
||||
|
||||
additional_args = self.get_custom_attr(operator)
|
||||
job_args.extend(iter(additional_args))
|
||||
prt_export = f"{operator}.exportPRT()"
|
||||
job_args.append(prt_export)
|
||||
|
||||
return job_args
|
||||
|
||||
@staticmethod
|
||||
def get_operators(members):
|
||||
"""Get Export Particles Operator.
|
||||
|
||||
Args:
|
||||
members (list): Instance members.
|
||||
|
||||
Returns:
|
||||
list of particle operators
|
||||
|
||||
"""
|
||||
opt_list = []
|
||||
for member in members:
|
||||
obj = member.baseobject
|
||||
# TODO: to see if it can be used maxscript instead
|
||||
anim_names = rt.GetSubAnimNames(obj)
|
||||
for anim_name in anim_names:
|
||||
sub_anim = rt.GetSubAnim(obj, anim_name)
|
||||
boolean = rt.IsProperty(sub_anim, "Export_Particles")
|
||||
if boolean:
|
||||
event_name = sub_anim.Name
|
||||
opt = f"${member.Name}.{event_name}.export_particles"
|
||||
opt_list.append(opt)
|
||||
|
||||
return opt_list
|
||||
|
||||
@staticmethod
|
||||
def get_setting(instance):
|
||||
project_setting = instance.context.data["project_settings"]
|
||||
return project_setting["max"]["PointCloud"]
|
||||
|
||||
def get_custom_attr(self, operator):
|
||||
"""Get Custom Attributes"""
|
||||
|
||||
custom_attr_list = []
|
||||
attr_settings = self.settings["attribute"]
|
||||
for attr in attr_settings:
|
||||
key = attr["name"]
|
||||
value = attr["value"]
|
||||
custom_attr = "{0}.PRTChannels_{1}=True".format(operator,
|
||||
value)
|
||||
self.log.debug(
|
||||
"{0} will be added as custom attribute".format(key)
|
||||
)
|
||||
custom_attr_list.append(custom_attr)
|
||||
|
||||
return custom_attr_list
|
||||
|
||||
def get_files(self,
|
||||
container,
|
||||
path,
|
||||
start_frame,
|
||||
end_frame):
|
||||
"""Get file names for tyFlow.
|
||||
|
||||
Set the filenames accordingly to the tyFlow file
|
||||
naming extension for the publishing purpose
|
||||
|
||||
Actual File Output from tyFlow::
|
||||
<SceneFile>__part<PartitionStart>of<PartitionCount>.<frame>.prt
|
||||
|
||||
e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt
|
||||
|
||||
Args:
|
||||
container: Instance node.
|
||||
path (str): Output directory.
|
||||
start_frame (int): Start frame.
|
||||
end_frame (int): End frame.
|
||||
|
||||
Returns:
|
||||
list of filenames
|
||||
|
||||
"""
|
||||
filenames = []
|
||||
filename = os.path.basename(path)
|
||||
orig_name, ext = os.path.splitext(filename)
|
||||
partition_count, partition_start = self.get_partition(container)
|
||||
for frame in range(int(start_frame), int(end_frame) + 1):
|
||||
actual_name = "{}__part{:03}of{}_{:05}".format(orig_name,
|
||||
partition_start,
|
||||
partition_count,
|
||||
frame)
|
||||
actual_filename = path.replace(orig_name, actual_name)
|
||||
filenames.append(os.path.basename(actual_filename))
|
||||
|
||||
return filenames
|
||||
|
||||
def partition_output_name(self, container):
|
||||
"""Get partition output name.
|
||||
|
||||
Partition output name set for mapping
|
||||
the published file output.
|
||||
|
||||
Todo:
|
||||
Customizes the setting for the output.
|
||||
|
||||
Args:
|
||||
container: Instance node.
|
||||
|
||||
Returns:
|
||||
str: Partition name.
|
||||
|
||||
"""
|
||||
partition_count, partition_start = self.get_partition(container)
|
||||
return f"_part{partition_start:03}of{partition_count}"
|
||||
|
||||
def get_partition(self, container):
|
||||
"""Get Partition value.
|
||||
|
||||
Args:
|
||||
container: Instance node.
|
||||
|
||||
"""
|
||||
opt_list = self.get_operators(container)
|
||||
# TODO: This looks strange? Iterating over
|
||||
# the opt_list but returning from inside?
|
||||
for operator in opt_list:
|
||||
count = rt.Execute(f'{operator}.PRTPartitionsCount')
|
||||
start = rt.Execute(f'{operator}.PRTPartitionsFrom')
|
||||
|
||||
return count, start
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue