mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
0ca9a6005a
466 changed files with 44768 additions and 811 deletions
8
.gitmodules
vendored
8
.gitmodules
vendored
|
|
@ -3,10 +3,4 @@
|
|||
url = https://github.com/pypeclub/avalon-core.git
|
||||
[submodule "repos/avalon-unreal-integration"]
|
||||
path = repos/avalon-unreal-integration
|
||||
url = https://github.com/pypeclub/avalon-unreal-integration.git
|
||||
[submodule "openpype/modules/default_modules/ftrack/python2_vendor/arrow"]
|
||||
path = openpype/modules/default_modules/ftrack/python2_vendor/arrow
|
||||
url = https://github.com/arrow-py/arrow.git
|
||||
[submodule "openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api"]
|
||||
path = openpype/modules/default_modules/ftrack/python2_vendor/ftrack-python-api
|
||||
url = https://bitbucket.org/ftrack/ftrack-python-api.git
|
||||
url = https://github.com/pypeclub/avalon-unreal-integration.git
|
||||
|
|
@ -42,6 +42,12 @@ def standalonepublisher():
|
|||
PypeCommands().launch_standalone_publisher()
|
||||
|
||||
|
||||
@main.command()
|
||||
def traypublisher():
|
||||
"""Show new OpenPype Standalone publisher UI."""
|
||||
PypeCommands().launch_traypublisher()
|
||||
|
||||
|
||||
@main.command()
|
||||
@click.option("-d", "--debug",
|
||||
is_flag=True, help=("Run pype tray in debug mode"))
|
||||
|
|
@ -371,10 +377,15 @@ def run(script):
|
|||
"--app_variant",
|
||||
help="Provide specific app variant for test, empty for latest",
|
||||
default=None)
|
||||
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant):
|
||||
@click.option("-t",
|
||||
"--timeout",
|
||||
help="Provide specific timeout value for test case",
|
||||
default=None)
|
||||
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
|
||||
timeout):
|
||||
"""Run all automatic tests after proper initialization via start.py"""
|
||||
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
|
||||
persist, app_variant)
|
||||
persist, app_variant, timeout)
|
||||
|
||||
|
||||
@main.command()
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from openpype.api import Anatomy
|
|||
from openpype.lib import (
|
||||
PreLaunchHook,
|
||||
EnvironmentPrepData,
|
||||
prepare_host_environments,
|
||||
prepare_app_environments,
|
||||
prepare_context_environments
|
||||
)
|
||||
|
||||
|
|
@ -14,14 +14,6 @@ class GlobalHostDataHook(PreLaunchHook):
|
|||
|
||||
def execute(self):
|
||||
"""Prepare global objects to `data` that will be used for sure."""
|
||||
if not self.application.is_host:
|
||||
self.log.info(
|
||||
"Skipped hook {}. Application is not marked as host.".format(
|
||||
self.__class__.__name__
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
self.prepare_global_data()
|
||||
|
||||
if not self.data.get("asset_doc"):
|
||||
|
|
@ -49,7 +41,7 @@ class GlobalHostDataHook(PreLaunchHook):
|
|||
"log": self.log
|
||||
})
|
||||
|
||||
prepare_host_environments(temp_data, self.launch_context.env_group)
|
||||
prepare_app_environments(temp_data, self.launch_context.env_group)
|
||||
prepare_context_environments(temp_data)
|
||||
|
||||
temp_data.pop("log")
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@
|
|||
<fileType>Jpeg</fileType>
|
||||
<codec>923688</codec>
|
||||
<codecProfile></codecProfile>
|
||||
<namePattern><segment name></namePattern>
|
||||
<namePattern><shot name></namePattern>
|
||||
<compressionQuality>100</compressionQuality>
|
||||
<transferCharacteristic>2</transferCharacteristic>
|
||||
<colorimetricSpecification>4</colorimetricSpecification>
|
||||
|
|
@ -27,7 +27,7 @@
|
|||
</sequence>
|
||||
<movie>
|
||||
<fileType>QuickTime</fileType>
|
||||
<namePattern><segment name></namePattern>
|
||||
<namePattern><shot name></namePattern>
|
||||
<yuvHeadroom>0</yuvHeadroom>
|
||||
<yuvColourSpace>PCS_709</yuvColourSpace>
|
||||
<operationalPattern>None</operationalPattern>
|
||||
|
|
@ -43,7 +43,7 @@
|
|||
<targetVersion>2021</targetVersion>
|
||||
<pathSuffix>/profiles/.33622016/HDTV_720p_8Mbits.cdxprof</pathSuffix>
|
||||
</codecProfile>
|
||||
<namePattern><segment name>_<video codec></namePattern>
|
||||
<namePattern><shot name>_<video codec></namePattern>
|
||||
<compressionQuality>50</compressionQuality>
|
||||
<transferCharacteristic>2</transferCharacteristic>
|
||||
<colorimetricSpecification>4</colorimetricSpecification>
|
||||
|
|
@ -8,7 +8,7 @@ PLUGIN_DIR = os.path.dirname(os.path.dirname(__file__))
|
|||
EXPORT_PRESETS_DIR = os.path.join(PLUGIN_DIR, "export_preset")
|
||||
|
||||
CONFIG_DIR = os.path.join(os.path.expanduser(
|
||||
"~/.openpype"), "openpype_flame_to_ftrack")
|
||||
"~/.openpype"), "openpype_babypublisher")
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
|
@ -360,6 +360,8 @@ class FtrackComponentCreator:
|
|||
|
||||
|
||||
class FtrackEntityOperator:
|
||||
existing_tasks = []
|
||||
|
||||
def __init__(self, session, project_entity):
|
||||
self.session = session
|
||||
self.project_entity = project_entity
|
||||
|
|
@ -392,10 +394,7 @@ class FtrackEntityOperator:
|
|||
query = '{} where name is "{}" and project_id is "{}"'.format(
|
||||
type, name, self.project_entity["id"])
|
||||
|
||||
try:
|
||||
entity = session.query(query).one()
|
||||
except Exception:
|
||||
entity = None
|
||||
entity = session.query(query).first()
|
||||
|
||||
# if entity doesnt exist then create one
|
||||
if not entity:
|
||||
|
|
@ -430,10 +429,21 @@ class FtrackEntityOperator:
|
|||
return parents
|
||||
|
||||
def create_task(self, task_type, task_types, parent):
|
||||
existing_task = [
|
||||
_exising_tasks = [
|
||||
child for child in parent['children']
|
||||
if child.entity_type.lower() == 'task'
|
||||
if child['name'].lower() in task_type.lower()
|
||||
]
|
||||
|
||||
# add task into existing tasks if they are not already there
|
||||
for _t in _exising_tasks:
|
||||
if _t in self.existing_tasks:
|
||||
continue
|
||||
self.existing_tasks.append(_t)
|
||||
|
||||
existing_task = [
|
||||
task for task in self.existing_tasks
|
||||
if task['name'].lower() in task_type.lower()
|
||||
if task['parent'] == parent
|
||||
]
|
||||
|
||||
if existing_task:
|
||||
|
|
@ -445,4 +455,5 @@ class FtrackEntityOperator:
|
|||
})
|
||||
task["type"] = task_types[task_type]
|
||||
|
||||
self.existing_tasks.append(task)
|
||||
return task
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from PySide2 import QtWidgets, QtCore
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
||||
import uiwidgets
|
||||
import app_utils
|
||||
|
|
@ -33,11 +33,12 @@ class MainWindow(QtWidgets.QWidget):
|
|||
self.panel_class.clear_temp_data()
|
||||
self.panel_class.close()
|
||||
clear_inner_modules()
|
||||
ftrack_lib.FtrackEntityOperator.existing_tasks = []
|
||||
# now the panel can be closed
|
||||
event.accept()
|
||||
|
||||
|
||||
class FlameToFtrackPanel(object):
|
||||
class FlameBabyPublisherPanel(object):
|
||||
session = None
|
||||
temp_data_dir = None
|
||||
processed_components = []
|
||||
|
|
@ -78,7 +79,7 @@ class FlameToFtrackPanel(object):
|
|||
|
||||
# creating ui
|
||||
self.window.setMinimumSize(1500, 600)
|
||||
self.window.setWindowTitle('Sequence Shots to Ftrack')
|
||||
self.window.setWindowTitle('OpenPype: Baby-publisher')
|
||||
self.window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
|
||||
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
|
||||
self.window.setFocusPolicy(QtCore.Qt.StrongFocus)
|
||||
|
|
@ -469,10 +470,14 @@ class FlameToFtrackPanel(object):
|
|||
for sequence in self.selection:
|
||||
frame_rate = float(str(sequence.frame_rate)[:-4])
|
||||
for ver in sequence.versions:
|
||||
for tracks in ver.tracks:
|
||||
for segment in tracks.segments:
|
||||
for track in ver.tracks:
|
||||
if len(track.segments) == 0 and track.hidden:
|
||||
continue
|
||||
for segment in track.segments:
|
||||
print(segment.attributes)
|
||||
if str(segment.name)[1:-1] == "":
|
||||
if segment.name.get_value() == "":
|
||||
continue
|
||||
if segment.hidden.get_value() is True:
|
||||
continue
|
||||
# get clip frame duration
|
||||
record_duration = str(segment.record_duration)[1:-1]
|
||||
|
|
@ -492,11 +497,11 @@ class FlameToFtrackPanel(object):
|
|||
|
||||
# Add timeline segment to tree
|
||||
QtWidgets.QTreeWidgetItem(self.tree, [
|
||||
str(sequence.name)[1:-1], # seq
|
||||
str(segment.name)[1:-1], # shot
|
||||
sequence.name.get_value(), # seq name
|
||||
segment.shot_name.get_value(), # shot name
|
||||
str(clip_duration), # clip duration
|
||||
shot_description, # shot description
|
||||
str(segment.comment)[1:-1] # task description
|
||||
segment.comment.get_value() # task description
|
||||
]).setFlags(
|
||||
QtCore.Qt.ItemIsEditable
|
||||
| QtCore.Qt.ItemIsEnabled
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from PySide2 import QtWidgets, QtCore
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
||||
|
||||
class FlameLabel(QtWidgets.QLabel):
|
||||
|
|
@ -16,10 +16,11 @@ def flame_panel_executor(selection):
|
|||
if "panel_app" in sys.modules.keys():
|
||||
print("panel_app module is already loaded")
|
||||
del sys.modules["panel_app"]
|
||||
import panel_app
|
||||
reload(panel_app) # noqa
|
||||
print("panel_app module removed from sys.modules")
|
||||
|
||||
import panel_app
|
||||
panel_app.FlameToFtrackPanel(selection)
|
||||
panel_app.FlameBabyPublisherPanel(selection)
|
||||
|
||||
|
||||
def scope_sequence(selection):
|
||||
|
|
@ -30,7 +31,7 @@ def scope_sequence(selection):
|
|||
def get_media_panel_custom_ui_actions():
|
||||
return [
|
||||
{
|
||||
"name": "OpenPype: Ftrack",
|
||||
"name": "OpenPype: Baby-publisher",
|
||||
"actions": [
|
||||
{
|
||||
"name": "Create Shots",
|
||||
|
|
@ -24,8 +24,7 @@ from .lib import (
|
|||
lsattrs,
|
||||
read,
|
||||
|
||||
maintained_selection,
|
||||
unique_name
|
||||
maintained_selection
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -51,8 +50,7 @@ __all__ = [
|
|||
"lsattrs",
|
||||
"read",
|
||||
|
||||
"maintained_selection",
|
||||
"unique_name"
|
||||
"maintained_selection"
|
||||
]
|
||||
|
||||
# Backwards API compatibility
|
||||
|
|
|
|||
|
|
@ -99,65 +99,6 @@ def get_id_required_nodes():
|
|||
return list(nodes)
|
||||
|
||||
|
||||
def get_additional_data(container):
|
||||
"""Not implemented yet!"""
|
||||
return container
|
||||
|
||||
|
||||
def set_parameter_callback(node, parameter, language, callback):
|
||||
"""Link a callback to a parameter of a node
|
||||
|
||||
Args:
|
||||
node(hou.Node): instance of the nodee
|
||||
parameter(str): name of the parameter
|
||||
language(str): name of the language, e.g.: python
|
||||
callback(str): command which needs to be triggered
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
template_grp = node.parmTemplateGroup()
|
||||
template = template_grp.find(parameter)
|
||||
if not template:
|
||||
return
|
||||
|
||||
script_language = (hou.scriptLanguage.Python if language == "python" else
|
||||
hou.scriptLanguage.Hscript)
|
||||
|
||||
template.setScriptCallbackLanguage(script_language)
|
||||
template.setScriptCallback(callback)
|
||||
|
||||
template.setTags({"script_callback": callback,
|
||||
"script_callback_language": language.lower()})
|
||||
|
||||
# Replace the existing template with the adjusted one
|
||||
template_grp.replace(parameter, template)
|
||||
|
||||
node.setParmTemplateGroup(template_grp)
|
||||
|
||||
|
||||
def set_parameter_callbacks(node, parameter_callbacks):
|
||||
"""Set callbacks for multiple parameters of a node
|
||||
|
||||
Args:
|
||||
node(hou.Node): instance of a hou.Node
|
||||
parameter_callbacks(dict): collection of parameter and callback data
|
||||
example: {"active" :
|
||||
{"language": "python",
|
||||
"callback": "print('hello world)'"}
|
||||
}
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
for parameter, data in parameter_callbacks.items():
|
||||
language = data["language"]
|
||||
callback = data["callback"]
|
||||
|
||||
set_parameter_callback(node, parameter, language, callback)
|
||||
|
||||
|
||||
def get_output_parameter(node):
|
||||
"""Return the render output parameter name of the given node
|
||||
|
||||
|
|
@ -189,19 +130,6 @@ def get_output_parameter(node):
|
|||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def attribute_values(node, data):
|
||||
|
||||
previous_attrs = {key: node.parm(key).eval() for key in data.keys()}
|
||||
try:
|
||||
node.setParms(data)
|
||||
yield
|
||||
except Exception as exc:
|
||||
pass
|
||||
finally:
|
||||
node.setParms(previous_attrs)
|
||||
|
||||
|
||||
def set_scene_fps(fps):
|
||||
hou.setFps(fps)
|
||||
|
||||
|
|
@ -349,10 +277,6 @@ def render_rop(ropnode):
|
|||
raise RuntimeError("Render failed: {0}".format(exc))
|
||||
|
||||
|
||||
def children_as_string(node):
|
||||
return [c.name() for c in node.children()]
|
||||
|
||||
|
||||
def imprint(node, data):
|
||||
"""Store attributes with value on a node
|
||||
|
||||
|
|
@ -473,53 +397,6 @@ def read(node):
|
|||
parameter in node.spareParms()}
|
||||
|
||||
|
||||
def unique_name(name, format="%03d", namespace="", prefix="", suffix="",
|
||||
separator="_"):
|
||||
"""Return unique `name`
|
||||
|
||||
The function takes into consideration an optional `namespace`
|
||||
and `suffix`. The suffix is included in evaluating whether a
|
||||
name exists - such as `name` + "_GRP" - but isn't included
|
||||
in the returned value.
|
||||
|
||||
If a namespace is provided, only names within that namespace
|
||||
are considered when evaluating whether the name is unique.
|
||||
|
||||
Arguments:
|
||||
format (str, optional): The `name` is given a number, this determines
|
||||
how this number is formatted. Defaults to a padding of 2.
|
||||
E.g. my_name01, my_name02.
|
||||
namespace (str, optional): Only consider names within this namespace.
|
||||
suffix (str, optional): Only consider names with this suffix.
|
||||
|
||||
Example:
|
||||
>>> name = hou.node("/obj").createNode("geo", name="MyName")
|
||||
>>> assert hou.node("/obj/MyName")
|
||||
True
|
||||
>>> unique = unique_name(name)
|
||||
>>> assert hou.node("/obj/{}".format(unique))
|
||||
False
|
||||
|
||||
"""
|
||||
|
||||
iteration = 1
|
||||
|
||||
parts = [prefix, name, format % iteration, suffix]
|
||||
if namespace:
|
||||
parts.insert(0, namespace)
|
||||
|
||||
unique = separator.join(parts)
|
||||
children = children_as_string(hou.node("/obj"))
|
||||
while unique in children:
|
||||
iteration += 1
|
||||
unique = separator.join(parts)
|
||||
|
||||
if suffix:
|
||||
return unique[:-len(suffix)]
|
||||
|
||||
return unique
|
||||
|
||||
|
||||
@contextmanager
|
||||
def maintained_selection():
|
||||
"""Maintain selection during context
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import logging
|
|||
|
||||
from functools import partial
|
||||
|
||||
import maya.cmds as mc
|
||||
import maya.cmds as cmds
|
||||
import maya.mel as mel
|
||||
|
||||
from openpype.api import resources
|
||||
|
|
@ -30,9 +30,9 @@ def override_component_mask_commands():
|
|||
log.info("Installing override_component_mask_commands..")
|
||||
|
||||
# Get all object mask buttons
|
||||
buttons = mc.formLayout("objectMaskIcons",
|
||||
query=True,
|
||||
childArray=True)
|
||||
buttons = cmds.formLayout("objectMaskIcons",
|
||||
query=True,
|
||||
childArray=True)
|
||||
# Skip the triangle list item
|
||||
buttons = [btn for btn in buttons if btn != "objPickMenuLayout"]
|
||||
|
||||
|
|
@ -43,14 +43,14 @@ def override_component_mask_commands():
|
|||
# toggle the others based on whether any of the buttons
|
||||
# was remaining active after the toggle, if not then
|
||||
# enable all
|
||||
if mc.getModifiers() == 4: # = CTRL
|
||||
if cmds.getModifiers() == 4: # = CTRL
|
||||
state = True
|
||||
active = [mc.iconTextCheckBox(btn, query=True, value=True) for btn
|
||||
in buttons]
|
||||
active = [cmds.iconTextCheckBox(btn, query=True, value=True)
|
||||
for btn in buttons]
|
||||
if any(active):
|
||||
mc.selectType(allObjects=False)
|
||||
cmds.selectType(allObjects=False)
|
||||
else:
|
||||
mc.selectType(allObjects=True)
|
||||
cmds.selectType(allObjects=True)
|
||||
|
||||
# Replace #1 with the current button state
|
||||
cmd = raw_command.replace(" #1", " {}".format(int(state)))
|
||||
|
|
@ -63,13 +63,13 @@ def override_component_mask_commands():
|
|||
# try to implement the fix. (This also allows us to
|
||||
# "uninstall" the behavior later)
|
||||
if btn not in COMPONENT_MASK_ORIGINAL:
|
||||
original = mc.iconTextCheckBox(btn, query=True, cc=True)
|
||||
original = cmds.iconTextCheckBox(btn, query=True, cc=True)
|
||||
COMPONENT_MASK_ORIGINAL[btn] = original
|
||||
|
||||
# Assign the special callback
|
||||
original = COMPONENT_MASK_ORIGINAL[btn]
|
||||
new_fn = partial(on_changed_callback, original)
|
||||
mc.iconTextCheckBox(btn, edit=True, cc=new_fn)
|
||||
cmds.iconTextCheckBox(btn, edit=True, cc=new_fn)
|
||||
|
||||
|
||||
def override_toolbox_ui():
|
||||
|
|
@ -78,25 +78,36 @@ def override_toolbox_ui():
|
|||
parent_widget = get_main_window()
|
||||
|
||||
# Ensure the maya web icon on toolbox exists
|
||||
web_button = "ToolBox|MainToolboxLayout|mayaWebButton"
|
||||
if not mc.iconTextButton(web_button, query=True, exists=True):
|
||||
button_names = [
|
||||
# Maya 2022.1+ with maya.cmds.iconTextStaticLabel
|
||||
"ToolBox|MainToolboxLayout|mayaHomeToolboxButton",
|
||||
# Older with maya.cmds.iconTextButton
|
||||
"ToolBox|MainToolboxLayout|mayaWebButton"
|
||||
]
|
||||
for name in button_names:
|
||||
if cmds.control(name, query=True, exists=True):
|
||||
web_button = name
|
||||
break
|
||||
else:
|
||||
# Button does not exist
|
||||
log.warning("Can't find Maya Home/Web button to override toolbox ui..")
|
||||
return
|
||||
|
||||
mc.iconTextButton(web_button, edit=True, visible=False)
|
||||
cmds.control(web_button, edit=True, visible=False)
|
||||
|
||||
# real = 32, but 36 with padding - according to toolbox mel script
|
||||
icon_size = 36
|
||||
parent = web_button.rsplit("|", 1)[0]
|
||||
|
||||
# Ensure the parent is a formLayout
|
||||
if not mc.objectTypeUI(parent) == "formLayout":
|
||||
if not cmds.objectTypeUI(parent) == "formLayout":
|
||||
return
|
||||
|
||||
# Create our controls
|
||||
controls = []
|
||||
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
cmds.iconTextButton(
|
||||
"pype_toolbox_lookmanager",
|
||||
annotation="Look Manager",
|
||||
label="Look Manager",
|
||||
|
|
@ -109,7 +120,7 @@ def override_toolbox_ui():
|
|||
)
|
||||
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
cmds.iconTextButton(
|
||||
"pype_toolbox_workfiles",
|
||||
annotation="Work Files",
|
||||
label="Work Files",
|
||||
|
|
@ -124,7 +135,7 @@ def override_toolbox_ui():
|
|||
)
|
||||
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
cmds.iconTextButton(
|
||||
"pype_toolbox_loader",
|
||||
annotation="Loader",
|
||||
label="Loader",
|
||||
|
|
@ -139,7 +150,7 @@ def override_toolbox_ui():
|
|||
)
|
||||
|
||||
controls.append(
|
||||
mc.iconTextButton(
|
||||
cmds.iconTextButton(
|
||||
"pype_toolbox_manager",
|
||||
annotation="Inventory",
|
||||
label="Inventory",
|
||||
|
|
@ -159,7 +170,7 @@ def override_toolbox_ui():
|
|||
for i, control in enumerate(controls):
|
||||
previous = controls[i - 1] if i > 0 else web_button
|
||||
|
||||
mc.formLayout(parent, edit=True,
|
||||
attachControl=[control, "bottom", 0, previous],
|
||||
attachForm=([control, "left", 1],
|
||||
[control, "right", 1]))
|
||||
cmds.formLayout(parent, edit=True,
|
||||
attachControl=[control, "bottom", 0, previous],
|
||||
attachForm=([control, "left", 1],
|
||||
[control, "right", 1]))
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import math
|
|||
|
||||
import json
|
||||
import logging
|
||||
import itertools
|
||||
import contextlib
|
||||
from collections import OrderedDict, defaultdict
|
||||
from math import ceil
|
||||
|
|
@ -267,8 +266,10 @@ def float_round(num, places=0, direction=ceil):
|
|||
|
||||
def pairwise(iterable):
|
||||
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
|
||||
from six.moves import zip
|
||||
|
||||
a = iter(iterable)
|
||||
return itertools.izip(a, a)
|
||||
return zip(a, a)
|
||||
|
||||
|
||||
def export_alembic(nodes,
|
||||
|
|
@ -2986,7 +2987,27 @@ def set_colorspace():
|
|||
"""
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
imageio = get_anatomy_settings(project_name)["imageio"]["maya"]
|
||||
root_dict = imageio["colorManagementPreference"]
|
||||
|
||||
# Maya 2022+ introduces new OCIO v2 color management settings that
|
||||
# can override the old color managenement preferences. OpenPype has
|
||||
# separate settings for both so we fall back when necessary.
|
||||
use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"]
|
||||
required_maya_version = 2022
|
||||
maya_version = int(cmds.about(version=True))
|
||||
maya_supports_ocio_v2 = maya_version >= required_maya_version
|
||||
if use_ocio_v2 and not maya_supports_ocio_v2:
|
||||
# Fallback to legacy behavior with a warning
|
||||
log.warning("Color Management Preference v2 is enabled but not "
|
||||
"supported by current Maya version: {} (< {}). Falling "
|
||||
"back to legacy settings.".format(
|
||||
maya_version, required_maya_version)
|
||||
)
|
||||
use_ocio_v2 = False
|
||||
|
||||
if use_ocio_v2:
|
||||
root_dict = imageio["colorManagementPreference_v2"]
|
||||
else:
|
||||
root_dict = imageio["colorManagementPreference"]
|
||||
|
||||
if not isinstance(root_dict, dict):
|
||||
msg = "set_colorspace(): argument should be dictionary"
|
||||
|
|
@ -2994,11 +3015,12 @@ def set_colorspace():
|
|||
|
||||
log.debug(">> root_dict: {}".format(root_dict))
|
||||
|
||||
# first enable color management
|
||||
# enable color management
|
||||
cmds.colorManagementPrefs(e=True, cmEnabled=True)
|
||||
cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True)
|
||||
|
||||
# second set config path
|
||||
# set config path
|
||||
custom_ocio_config = False
|
||||
if root_dict.get("configFilePath"):
|
||||
unresolved_path = root_dict["configFilePath"]
|
||||
ocio_paths = unresolved_path[platform.system().lower()]
|
||||
|
|
@ -3015,16 +3037,50 @@ def set_colorspace():
|
|||
cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=True)
|
||||
log.debug("maya '{}' changed to: {}".format(
|
||||
"configFilePath", resolved_path))
|
||||
root_dict.pop("configFilePath")
|
||||
custom_ocio_config = True
|
||||
else:
|
||||
cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=False)
|
||||
cmds.colorManagementPrefs(e=True, configFilePath="" )
|
||||
cmds.colorManagementPrefs(e=True, configFilePath="")
|
||||
|
||||
# third set rendering space and view transform
|
||||
renderSpace = root_dict["renderSpace"]
|
||||
cmds.colorManagementPrefs(e=True, renderingSpaceName=renderSpace)
|
||||
viewTransform = root_dict["viewTransform"]
|
||||
cmds.colorManagementPrefs(e=True, viewTransformName=viewTransform)
|
||||
# If no custom OCIO config file was set we make sure that Maya 2022+
|
||||
# either chooses between Maya's newer default v2 or legacy config based
|
||||
# on OpenPype setting to use ocio v2 or not.
|
||||
if maya_supports_ocio_v2 and not custom_ocio_config:
|
||||
if use_ocio_v2:
|
||||
# Use Maya 2022+ default OCIO v2 config
|
||||
log.info("Setting default Maya OCIO v2 config")
|
||||
cmds.colorManagementPrefs(edit=True, configFilePath="")
|
||||
else:
|
||||
# Set the Maya default config file path
|
||||
log.info("Setting default Maya OCIO v1 legacy config")
|
||||
cmds.colorManagementPrefs(edit=True, configFilePath="legacy")
|
||||
|
||||
# set color spaces for rendering space and view transforms
|
||||
def _colormanage(**kwargs):
|
||||
"""Wrapper around `cmds.colorManagementPrefs`.
|
||||
|
||||
This logs errors instead of raising an error so color management
|
||||
settings get applied as much as possible.
|
||||
|
||||
"""
|
||||
assert len(kwargs) == 1, "Must receive one keyword argument"
|
||||
try:
|
||||
cmds.colorManagementPrefs(edit=True, **kwargs)
|
||||
log.debug("Setting Color Management Preference: {}".format(kwargs))
|
||||
except RuntimeError as exc:
|
||||
log.error(exc)
|
||||
|
||||
if use_ocio_v2:
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(displayName=root_dict["displayName"])
|
||||
_colormanage(viewName=root_dict["viewName"])
|
||||
else:
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
if maya_supports_ocio_v2:
|
||||
_colormanage(viewName=root_dict["viewTransform"])
|
||||
_colormanage(displayName="legacy")
|
||||
else:
|
||||
_colormanage(viewTransformName=root_dict["viewTransform"])
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
|
|||
|
|
@ -253,7 +253,7 @@ class CreateRender(plugin.Creator):
|
|||
# get pools
|
||||
pool_names = []
|
||||
|
||||
self.server_aliases = self.deadline_servers.keys()
|
||||
self.server_aliases = list(self.deadline_servers.keys())
|
||||
self.data["deadlineServers"] = self.server_aliases
|
||||
self.data["suspendPublishJob"] = False
|
||||
self.data["review"] = True
|
||||
|
|
@ -286,15 +286,12 @@ class CreateRender(plugin.Creator):
|
|||
raise RuntimeError("Both Deadline and Muster are enabled")
|
||||
|
||||
if deadline_enabled:
|
||||
# if default server is not between selected, use first one for
|
||||
# initial list of pools.
|
||||
try:
|
||||
deadline_url = self.deadline_servers["default"]
|
||||
except KeyError:
|
||||
deadline_url = [
|
||||
self.deadline_servers[k]
|
||||
for k in self.deadline_servers.keys()
|
||||
][0]
|
||||
# if 'default' server is not between selected,
|
||||
# use first one for initial list of pools.
|
||||
deadline_url = next(iter(self.deadline_servers.values()))
|
||||
|
||||
pool_names = self._get_deadline_pools(deadline_url)
|
||||
|
||||
|
|
|
|||
|
|
@ -320,7 +320,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
|
||||
# Collect file nodes used by shading engines (if we have any)
|
||||
files = []
|
||||
look_sets = sets.keys()
|
||||
look_sets = list(sets.keys())
|
||||
shader_attrs = [
|
||||
"surfaceShader",
|
||||
"volumeShader",
|
||||
|
|
|
|||
|
|
@ -234,13 +234,14 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
publish_meta_path = None
|
||||
for aov in exp_files:
|
||||
full_paths = []
|
||||
for file in aov[aov.keys()[0]]:
|
||||
aov_first_key = list(aov.keys())[0]
|
||||
for file in aov[aov_first_key]:
|
||||
full_path = os.path.join(workspace, default_render_file,
|
||||
file)
|
||||
full_path = full_path.replace("\\", "/")
|
||||
full_paths.append(full_path)
|
||||
publish_meta_path = os.path.dirname(full_path)
|
||||
aov_dict[aov.keys()[0]] = full_paths
|
||||
aov_dict[aov_first_key] = full_paths
|
||||
|
||||
frame_start_render = int(self.get_render_attribute(
|
||||
"startFrame", layer=layer_name))
|
||||
|
|
|
|||
|
|
@ -43,7 +43,8 @@ def grouper(iterable, n, fillvalue=None):
|
|||
|
||||
"""
|
||||
args = [iter(iterable)] * n
|
||||
return itertools.izip_longest(fillvalue=fillvalue, *args)
|
||||
from six.moves import zip_longest
|
||||
return zip_longest(fillvalue=fillvalue, *args)
|
||||
|
||||
|
||||
def unlock(plug):
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import os
|
|||
import sys
|
||||
import json
|
||||
import tempfile
|
||||
import platform
|
||||
import contextlib
|
||||
import subprocess
|
||||
from collections import OrderedDict
|
||||
|
|
@ -62,6 +63,11 @@ def maketx(source, destination, *args):
|
|||
from openpype.lib import get_oiio_tools_path
|
||||
|
||||
maketx_path = get_oiio_tools_path("maketx")
|
||||
|
||||
if platform.system().lower() == "windows":
|
||||
# Ensure .exe extension
|
||||
maketx_path += ".exe"
|
||||
|
||||
if not os.path.exists(maketx_path):
|
||||
print(
|
||||
"OIIO tool not found in {}".format(maketx_path))
|
||||
|
|
@ -216,7 +222,7 @@ class ExtractLook(openpype.api.Extractor):
|
|||
self.log.info("Extract sets (%s) ..." % _scene_type)
|
||||
lookdata = instance.data["lookData"]
|
||||
relationships = lookdata["relationships"]
|
||||
sets = relationships.keys()
|
||||
sets = list(relationships.keys())
|
||||
if not sets:
|
||||
self.log.info("No sets found")
|
||||
return
|
||||
|
|
|
|||
|
|
@ -110,9 +110,9 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
|
|||
Maya API will return a list of values, which need to be properly
|
||||
handled to evaluate properly.
|
||||
"""
|
||||
if isinstance(attr_val, types.BooleanType):
|
||||
if isinstance(attr_val, bool):
|
||||
return attr_val
|
||||
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
|
||||
elif isinstance(attr_val, (list, types.GeneratorType)):
|
||||
return any(attr_val)
|
||||
else:
|
||||
return bool(attr_val)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ import math
|
|||
import maya.api.OpenMaya as om
|
||||
import pymel.core as pm
|
||||
|
||||
from six.moves import xrange
|
||||
|
||||
|
||||
class GetOverlappingUVs(object):
|
||||
|
||||
|
|
|
|||
|
|
@ -82,9 +82,9 @@ class ValidateVrayReferencedAOVs(pyblish.api.InstancePlugin):
|
|||
bool: cast Maya attribute to Pythons boolean value.
|
||||
|
||||
"""
|
||||
if isinstance(attr_val, types.BooleanType):
|
||||
if isinstance(attr_val, bool):
|
||||
return attr_val
|
||||
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
|
||||
elif isinstance(attr_val, (list, types.GeneratorType)):
|
||||
return any(attr_val)
|
||||
else:
|
||||
return bool(attr_val)
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ class ProcessLauncher(QtCore.QObject):
|
|||
def start(self):
|
||||
if self._started:
|
||||
return
|
||||
self.log.info("Started launch logic of AfterEffects")
|
||||
self.log.info("Started launch logic of Photoshop")
|
||||
self._started = True
|
||||
self._start_process_timer.start()
|
||||
|
||||
|
|
|
|||
|
|
@ -344,6 +344,28 @@ class PhotoshopServerStub:
|
|||
)
|
||||
)
|
||||
|
||||
def hide_all_others_layers(self, layers):
|
||||
"""hides all layers that are not part of the list or that are not
|
||||
children of this list
|
||||
|
||||
Args:
|
||||
layers (list): list of PSItem - highest hierarchy
|
||||
"""
|
||||
extract_ids = set([ll.id for ll in self.get_layers_in_layers(layers)])
|
||||
|
||||
self.hide_all_others_layers_ids(extract_ids)
|
||||
|
||||
def hide_all_others_layers_ids(self, extract_ids):
|
||||
"""hides all layers that are not part of the list or that are not
|
||||
children of this list
|
||||
|
||||
Args:
|
||||
extract_ids (list): list of integer that should be visible
|
||||
"""
|
||||
for layer in self.get_layers():
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
self.set_visible(layer.id, False)
|
||||
|
||||
def get_layers_metadata(self):
|
||||
"""Reads layers metadata from Headline from active document in PS.
|
||||
(Headline accessible by File > File Info)
|
||||
|
|
|
|||
|
|
@ -38,10 +38,15 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
self.log.info("CollectColorCodedInstances")
|
||||
self.log.debug("mapping:: {}".format(self.color_code_mapping))
|
||||
batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
|
||||
if (os.environ.get("IS_TEST") and
|
||||
(not batch_dir or not os.path.exists(batch_dir))):
|
||||
self.log.debug("Automatic testing, no batch data, skipping")
|
||||
return
|
||||
|
||||
existing_subset_names = self._get_existing_subset_names(context)
|
||||
asset_name, task_name, variant = self._parse_batch()
|
||||
|
||||
asset_name, task_name, variant = self._parse_batch(batch_dir)
|
||||
|
||||
stub = photoshop.stub()
|
||||
layers = stub.get_layers()
|
||||
|
|
@ -125,9 +130,8 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
return existing_subset_names
|
||||
|
||||
def _parse_batch(self):
|
||||
def _parse_batch(self, batch_dir):
|
||||
"""Parses asset_name, task_name, variant from batch manifest."""
|
||||
batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
|
||||
task_data = None
|
||||
if batch_dir and os.path.exists(batch_dir):
|
||||
task_data = parse_json(os.path.join(batch_dir,
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ class ExtractImage(openpype.api.Extractor):
|
|||
with photoshop.maintained_selection():
|
||||
self.log.info("Extracting %s" % str(list(instance)))
|
||||
with photoshop.maintained_visibility():
|
||||
# Hide all other layers.
|
||||
layer = instance.data.get("layer")
|
||||
ids = set([layer.id])
|
||||
add_ids = instance.data.pop("ids", None)
|
||||
|
|
@ -34,11 +33,7 @@ class ExtractImage(openpype.api.Extractor):
|
|||
ids.update(set(add_ids))
|
||||
extract_ids = set([ll.id for ll in stub.
|
||||
get_layers_in_layers_ids(ids)])
|
||||
|
||||
for layer in stub.get_layers():
|
||||
# limit unnecessary calls to client
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
stub.set_visible(layer.id, False)
|
||||
stub.hide_all_others_layers_ids(extract_ids)
|
||||
|
||||
file_basename = os.path.splitext(
|
||||
stub.get_active_document_name()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import shutil
|
||||
|
||||
import openpype.api
|
||||
import openpype.lib
|
||||
|
|
@ -7,7 +8,7 @@ from openpype.hosts.photoshop import api as photoshop
|
|||
|
||||
class ExtractReview(openpype.api.Extractor):
|
||||
"""
|
||||
Produce a flattened image file from all 'image' instances.
|
||||
Produce a flattened or sequence image file from all 'image' instances.
|
||||
|
||||
If no 'image' instance is created, it produces flattened image from
|
||||
all visible layers.
|
||||
|
|
@ -20,54 +21,58 @@ class ExtractReview(openpype.api.Extractor):
|
|||
# Extract Options
|
||||
jpg_options = None
|
||||
mov_options = None
|
||||
make_image_sequence = None
|
||||
|
||||
def process(self, instance):
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Outputting image to {}".format(staging_dir))
|
||||
|
||||
fps = instance.data.get("fps", 25)
|
||||
stub = photoshop.stub()
|
||||
self.output_seq_filename = os.path.splitext(
|
||||
stub.get_active_document_name())[0] + ".%04d.jpg"
|
||||
|
||||
layers = []
|
||||
for image_instance in instance.context:
|
||||
if image_instance.data["family"] != "image":
|
||||
continue
|
||||
layers.append(image_instance.data.get("layer"))
|
||||
layers = self._get_layers_from_image_instances(instance)
|
||||
self.log.info("Layers image instance found: {}".format(layers))
|
||||
|
||||
# Perform extraction
|
||||
output_image = "{}.jpg".format(
|
||||
os.path.splitext(stub.get_active_document_name())[0]
|
||||
)
|
||||
output_image_path = os.path.join(staging_dir, output_image)
|
||||
with photoshop.maintained_visibility():
|
||||
if layers:
|
||||
# Hide all other layers.
|
||||
extract_ids = set([ll.id for ll in stub.
|
||||
get_layers_in_layers(layers)])
|
||||
self.log.debug("extract_ids {}".format(extract_ids))
|
||||
for layer in stub.get_layers():
|
||||
# limit unnecessary calls to client
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
stub.set_visible(layer.id, False)
|
||||
if self.make_image_sequence and len(layers) > 1:
|
||||
self.log.info("Extract layers to image sequence.")
|
||||
img_list = self._saves_sequences_layers(staging_dir, layers)
|
||||
|
||||
stub.saveAs(output_image_path, 'jpg', True)
|
||||
instance.data["representations"].append({
|
||||
"name": "jpg",
|
||||
"ext": "jpg",
|
||||
"files": img_list,
|
||||
"frameStart": 0,
|
||||
"frameEnd": len(img_list),
|
||||
"fps": fps,
|
||||
"stagingDir": staging_dir,
|
||||
"tags": self.jpg_options['tags'],
|
||||
})
|
||||
|
||||
else:
|
||||
self.log.info("Extract layers to flatten image.")
|
||||
img_list = self._saves_flattened_layers(staging_dir, layers)
|
||||
|
||||
instance.data["representations"].append({
|
||||
"name": "jpg",
|
||||
"ext": "jpg",
|
||||
"files": img_list,
|
||||
"stagingDir": staging_dir,
|
||||
"tags": self.jpg_options['tags']
|
||||
})
|
||||
|
||||
ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
instance.data["representations"].append({
|
||||
"name": "jpg",
|
||||
"ext": "jpg",
|
||||
"files": output_image,
|
||||
"stagingDir": staging_dir,
|
||||
"tags": self.jpg_options['tags']
|
||||
})
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
# Generate thumbnail.
|
||||
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
|
||||
self.log.info(f"Generate thumbnail {thumbnail_path}")
|
||||
args = [
|
||||
ffmpeg_path,
|
||||
"-y",
|
||||
"-i", output_image_path,
|
||||
"-i", os.path.join(staging_dir, self.output_seq_filename),
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
thumbnail_path
|
||||
|
|
@ -81,14 +86,17 @@ class ExtractReview(openpype.api.Extractor):
|
|||
"stagingDir": staging_dir,
|
||||
"tags": ["thumbnail"]
|
||||
})
|
||||
|
||||
# Generate mov.
|
||||
mov_path = os.path.join(staging_dir, "review.mov")
|
||||
self.log.info(f"Generate mov review: {mov_path}")
|
||||
img_number = len(img_list)
|
||||
args = [
|
||||
ffmpeg_path,
|
||||
"-y",
|
||||
"-i", output_image_path,
|
||||
"-i", os.path.join(staging_dir, self.output_seq_filename),
|
||||
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
|
||||
"-vframes", "1",
|
||||
"-vframes", str(img_number),
|
||||
mov_path
|
||||
]
|
||||
output = openpype.lib.run_subprocess(args)
|
||||
|
|
@ -99,15 +107,86 @@ class ExtractReview(openpype.api.Extractor):
|
|||
"files": os.path.basename(mov_path),
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": 1,
|
||||
"frameEnd": 1,
|
||||
"fps": 25,
|
||||
"frameEnd": img_number,
|
||||
"fps": fps,
|
||||
"preview": True,
|
||||
"tags": self.mov_options['tags']
|
||||
})
|
||||
|
||||
# Required for extract_review plugin (L222 onwards).
|
||||
instance.data["frameStart"] = 1
|
||||
instance.data["frameEnd"] = 1
|
||||
instance.data["frameEnd"] = img_number
|
||||
instance.data["fps"] = 25
|
||||
|
||||
self.log.info(f"Extracted {instance} to {staging_dir}")
|
||||
|
||||
def _get_image_path_from_instances(self, instance):
|
||||
img_list = []
|
||||
|
||||
for instance in sorted(instance.context):
|
||||
if instance.data["family"] != "image":
|
||||
continue
|
||||
|
||||
for rep in instance.data["representations"]:
|
||||
img_path = os.path.join(
|
||||
rep["stagingDir"],
|
||||
rep["files"]
|
||||
)
|
||||
img_list.append(img_path)
|
||||
|
||||
return img_list
|
||||
|
||||
def _copy_image_to_staging_dir(self, staging_dir, img_list):
|
||||
copy_files = []
|
||||
for i, img_src in enumerate(img_list):
|
||||
img_filename = self.output_seq_filename % i
|
||||
img_dst = os.path.join(staging_dir, img_filename)
|
||||
|
||||
self.log.debug(
|
||||
"Copying file .. {} -> {}".format(img_src, img_dst)
|
||||
)
|
||||
shutil.copy(img_src, img_dst)
|
||||
copy_files.append(img_filename)
|
||||
|
||||
return copy_files
|
||||
|
||||
def _get_layers_from_image_instances(self, instance):
|
||||
layers = []
|
||||
for image_instance in instance.context:
|
||||
if image_instance.data["family"] != "image":
|
||||
continue
|
||||
layers.append(image_instance.data.get("layer"))
|
||||
|
||||
return sorted(layers)
|
||||
|
||||
def _saves_flattened_layers(self, staging_dir, layers):
|
||||
img_filename = self.output_seq_filename % 0
|
||||
output_image_path = os.path.join(staging_dir, img_filename)
|
||||
stub = photoshop.stub()
|
||||
|
||||
with photoshop.maintained_visibility():
|
||||
self.log.info("Extracting {}".format(layers))
|
||||
if layers:
|
||||
stub.hide_all_others_layers(layers)
|
||||
|
||||
stub.saveAs(output_image_path, 'jpg', True)
|
||||
|
||||
return img_filename
|
||||
|
||||
def _saves_sequences_layers(self, staging_dir, layers):
|
||||
stub = photoshop.stub()
|
||||
|
||||
list_img_filename = []
|
||||
with photoshop.maintained_visibility():
|
||||
for i, layer in enumerate(layers):
|
||||
self.log.info("Extracting {}".format(layer))
|
||||
|
||||
img_filename = self.output_seq_filename % i
|
||||
output_image_path = os.path.join(staging_dir, img_filename)
|
||||
list_img_filename.append(img_filename)
|
||||
|
||||
with photoshop.maintained_visibility():
|
||||
stub.hide_all_others_layers([layer])
|
||||
stub.saveAs(output_image_path, 'jpg', True)
|
||||
|
||||
return list_img_filename
|
||||
|
|
|
|||
|
|
@ -70,9 +70,9 @@ def get_resolve_module():
|
|||
sys.exit()
|
||||
# assign global var and return
|
||||
bmdvr = bmd.scriptapp("Resolve")
|
||||
# bmdvf = bmd.scriptapp("Fusion")
|
||||
bmdvf = bmd.scriptapp("Fusion")
|
||||
resolve.api.bmdvr = bmdvr
|
||||
resolve.api.bmdvf = bmdvr.Fusion()
|
||||
resolve.api.bmdvf = bmdvf
|
||||
log.info(("Assigning resolve module to "
|
||||
f"`pype.hosts.resolve.api.bmdvr`: {resolve.api.bmdvr}"))
|
||||
log.info(("Assigning resolve module to "
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
"asset": "sq01_sh0010",
|
||||
"task": "Compositing",
|
||||
"variant": "myVariant",
|
||||
"uuid": "a485f148-9121-46a5-8157-aa64df0fb449",
|
||||
"instance_id": "a485f148-9121-46a5-8157-aa64df0fb449",
|
||||
"creator_attributes": {
|
||||
"number_key": 10,
|
||||
"ha": 10
|
||||
|
|
@ -29,8 +29,8 @@
|
|||
"asset": "sq01_sh0010",
|
||||
"task": "Compositing",
|
||||
"variant": "myVariant2",
|
||||
"uuid": "a485f148-9121-46a5-8157-aa64df0fb444",
|
||||
"creator_attributes": {},
|
||||
"instance_id": "a485f148-9121-46a5-8157-aa64df0fb444",
|
||||
"publish_attributes": {
|
||||
"CollectFtrackApi": {
|
||||
"add_ftrack_family": true
|
||||
|
|
@ -47,8 +47,8 @@
|
|||
"asset": "sq01_sh0010",
|
||||
"task": "Compositing",
|
||||
"variant": "Main",
|
||||
"uuid": "3607bc95-75f6-4648-a58d-e699f413d09f",
|
||||
"creator_attributes": {},
|
||||
"instance_id": "3607bc95-75f6-4648-a58d-e699f413d09f",
|
||||
"publish_attributes": {
|
||||
"CollectFtrackApi": {
|
||||
"add_ftrack_family": true
|
||||
|
|
@ -65,7 +65,7 @@
|
|||
"asset": "sq01_sh0020",
|
||||
"task": "Compositing",
|
||||
"variant": "Main2",
|
||||
"uuid": "4ccf56f6-9982-4837-967c-a49695dbe8eb",
|
||||
"instance_id": "4ccf56f6-9982-4837-967c-a49695dbe8eb",
|
||||
"creator_attributes": {},
|
||||
"publish_attributes": {
|
||||
"CollectFtrackApi": {
|
||||
|
|
@ -83,7 +83,7 @@
|
|||
"asset": "sq01_sh0020",
|
||||
"task": "Compositing",
|
||||
"variant": "Main2",
|
||||
"uuid": "4ccf56f6-9982-4837-967c-a49695dbe8ec",
|
||||
"instance_id": "4ccf56f6-9982-4837-967c-a49695dbe8ec",
|
||||
"creator_attributes": {},
|
||||
"publish_attributes": {
|
||||
"CollectFtrackApi": {
|
||||
|
|
@ -101,7 +101,7 @@
|
|||
"asset": "Alpaca_01",
|
||||
"task": "modeling",
|
||||
"variant": "Main",
|
||||
"uuid": "7c9ddfc7-9f9c-4c1c-b233-38c966735fb6",
|
||||
"instance_id": "7c9ddfc7-9f9c-4c1c-b233-38c966735fb6",
|
||||
"creator_attributes": {},
|
||||
"publish_attributes": {}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ def update_instances(update_list):
|
|||
|
||||
instances = HostContext.get_instances()
|
||||
for instance_data in instances:
|
||||
instance_id = instance_data["uuid"]
|
||||
instance_id = instance_data["instance_id"]
|
||||
if instance_id in updated_instances:
|
||||
new_instance_data = updated_instances[instance_id]
|
||||
old_keys = set(instance_data.keys())
|
||||
|
|
@ -132,10 +132,10 @@ def remove_instances(instances):
|
|||
|
||||
current_instances = HostContext.get_instances()
|
||||
for instance in instances:
|
||||
instance_id = instance.data["uuid"]
|
||||
instance_id = instance.data["instance_id"]
|
||||
found_idx = None
|
||||
for idx, _instance in enumerate(current_instances):
|
||||
if instance_id == _instance["uuid"]:
|
||||
if instance_id == _instance["instance_id"]:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
|
|
|
|||
20
openpype/hosts/traypublisher/api/__init__.py
Normal file
20
openpype/hosts/traypublisher/api/__init__.py
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
from .pipeline import (
|
||||
install,
|
||||
ls,
|
||||
|
||||
set_project_name,
|
||||
get_context_title,
|
||||
get_context_data,
|
||||
update_context_data,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"install",
|
||||
"ls",
|
||||
|
||||
"set_project_name",
|
||||
"get_context_title",
|
||||
"get_context_data",
|
||||
"update_context_data",
|
||||
)
|
||||
180
openpype/hosts/traypublisher/api/pipeline.py
Normal file
180
openpype/hosts/traypublisher/api/pipeline.py
Normal file
|
|
@ -0,0 +1,180 @@
|
|||
import os
|
||||
import json
|
||||
import tempfile
|
||||
import atexit
|
||||
|
||||
from avalon import io
|
||||
import avalon.api
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import BaseCreator
|
||||
|
||||
ROOT_DIR = os.path.dirname(os.path.dirname(
|
||||
os.path.abspath(__file__)
|
||||
))
|
||||
PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish")
|
||||
CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create")
|
||||
|
||||
|
||||
class HostContext:
|
||||
_context_json_path = None
|
||||
|
||||
@staticmethod
|
||||
def _on_exit():
|
||||
if (
|
||||
HostContext._context_json_path
|
||||
and os.path.exists(HostContext._context_json_path)
|
||||
):
|
||||
os.remove(HostContext._context_json_path)
|
||||
|
||||
@classmethod
|
||||
def get_context_json_path(cls):
|
||||
if cls._context_json_path is None:
|
||||
output_file = tempfile.NamedTemporaryFile(
|
||||
mode="w", prefix="traypub_", suffix=".json"
|
||||
)
|
||||
output_file.close()
|
||||
cls._context_json_path = output_file.name
|
||||
atexit.register(HostContext._on_exit)
|
||||
print(cls._context_json_path)
|
||||
return cls._context_json_path
|
||||
|
||||
@classmethod
|
||||
def _get_data(cls, group=None):
|
||||
json_path = cls.get_context_json_path()
|
||||
data = {}
|
||||
if not os.path.exists(json_path):
|
||||
with open(json_path, "w") as json_stream:
|
||||
json.dump(data, json_stream)
|
||||
else:
|
||||
with open(json_path, "r") as json_stream:
|
||||
content = json_stream.read()
|
||||
if content:
|
||||
data = json.loads(content)
|
||||
if group is None:
|
||||
return data
|
||||
return data.get(group)
|
||||
|
||||
@classmethod
|
||||
def _save_data(cls, group, new_data):
|
||||
json_path = cls.get_context_json_path()
|
||||
data = cls._get_data()
|
||||
data[group] = new_data
|
||||
with open(json_path, "w") as json_stream:
|
||||
json.dump(data, json_stream)
|
||||
|
||||
@classmethod
|
||||
def add_instance(cls, instance):
|
||||
instances = cls.get_instances()
|
||||
instances.append(instance)
|
||||
cls.save_instances(instances)
|
||||
|
||||
@classmethod
|
||||
def get_instances(cls):
|
||||
return cls._get_data("instances") or []
|
||||
|
||||
@classmethod
|
||||
def save_instances(cls, instances):
|
||||
cls._save_data("instances", instances)
|
||||
|
||||
@classmethod
|
||||
def get_context_data(cls):
|
||||
return cls._get_data("context") or {}
|
||||
|
||||
@classmethod
|
||||
def save_context_data(cls, data):
|
||||
cls._save_data("context", data)
|
||||
|
||||
@classmethod
|
||||
def get_project_name(cls):
|
||||
return cls._get_data("project_name")
|
||||
|
||||
@classmethod
|
||||
def set_project_name(cls, project_name):
|
||||
cls._save_data("project_name", project_name)
|
||||
|
||||
@classmethod
|
||||
def get_data_to_store(cls):
|
||||
return {
|
||||
"project_name": cls.get_project_name(),
|
||||
"instances": cls.get_instances(),
|
||||
"context": cls.get_context_data(),
|
||||
}
|
||||
|
||||
|
||||
def list_instances():
|
||||
return HostContext.get_instances()
|
||||
|
||||
|
||||
def update_instances(update_list):
|
||||
updated_instances = {}
|
||||
for instance, _changes in update_list:
|
||||
updated_instances[instance.id] = instance.data_to_store()
|
||||
|
||||
instances = HostContext.get_instances()
|
||||
for instance_data in instances:
|
||||
instance_id = instance_data["instance_id"]
|
||||
if instance_id in updated_instances:
|
||||
new_instance_data = updated_instances[instance_id]
|
||||
old_keys = set(instance_data.keys())
|
||||
new_keys = set(new_instance_data.keys())
|
||||
instance_data.update(new_instance_data)
|
||||
for key in (old_keys - new_keys):
|
||||
instance_data.pop(key)
|
||||
|
||||
HostContext.save_instances(instances)
|
||||
|
||||
|
||||
def remove_instances(instances):
|
||||
if not isinstance(instances, (tuple, list)):
|
||||
instances = [instances]
|
||||
|
||||
current_instances = HostContext.get_instances()
|
||||
for instance in instances:
|
||||
instance_id = instance.data["instance_id"]
|
||||
found_idx = None
|
||||
for idx, _instance in enumerate(current_instances):
|
||||
if instance_id == _instance["instance_id"]:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
if found_idx is not None:
|
||||
current_instances.pop(found_idx)
|
||||
HostContext.save_instances(current_instances)
|
||||
|
||||
|
||||
def get_context_data():
|
||||
return HostContext.get_context_data()
|
||||
|
||||
|
||||
def update_context_data(data, changes):
|
||||
HostContext.save_context_data(data)
|
||||
|
||||
|
||||
def get_context_title():
|
||||
return HostContext.get_project_name()
|
||||
|
||||
|
||||
def ls():
|
||||
"""Probably will never return loaded containers."""
|
||||
return []
|
||||
|
||||
|
||||
def install():
|
||||
"""This is called before a project is known.
|
||||
|
||||
Project is defined with 'set_project_name'.
|
||||
"""
|
||||
os.environ["AVALON_APP"] = "traypublisher"
|
||||
|
||||
pyblish.api.register_host("traypublisher")
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
avalon.api.register_plugin_path(BaseCreator, CREATE_PATH)
|
||||
|
||||
|
||||
def set_project_name(project_name):
|
||||
# TODO Deregister project specific plugins and register new project plugins
|
||||
os.environ["AVALON_PROJECT"] = project_name
|
||||
avalon.api.Session["AVALON_PROJECT"] = project_name
|
||||
io.install()
|
||||
HostContext.set_project_name(project_name)
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
from openpype.hosts.traypublisher.api import pipeline
|
||||
from openpype.pipeline import (
|
||||
Creator,
|
||||
CreatedInstance,
|
||||
lib
|
||||
)
|
||||
|
||||
|
||||
class WorkfileCreator(Creator):
|
||||
identifier = "workfile"
|
||||
label = "Workfile"
|
||||
family = "workfile"
|
||||
description = "Publish backup of workfile"
|
||||
|
||||
create_allow_context_change = True
|
||||
|
||||
extensions = [
|
||||
# Maya
|
||||
".ma", ".mb",
|
||||
# Nuke
|
||||
".nk",
|
||||
# Hiero
|
||||
".hrox",
|
||||
# Houdini
|
||||
".hip", ".hiplc", ".hipnc",
|
||||
# Blender
|
||||
".blend",
|
||||
# Celaction
|
||||
".scn",
|
||||
# TVPaint
|
||||
".tvpp",
|
||||
# Fusion
|
||||
".comp",
|
||||
# Harmony
|
||||
".zip",
|
||||
# Premiere
|
||||
".prproj",
|
||||
# Resolve
|
||||
".drp",
|
||||
# Photoshop
|
||||
".psd", ".psb",
|
||||
# Aftereffects
|
||||
".aep"
|
||||
]
|
||||
|
||||
def get_icon(self):
|
||||
return "fa.file"
|
||||
|
||||
def collect_instances(self):
|
||||
for instance_data in pipeline.list_instances():
|
||||
creator_id = instance_data.get("creator_identifier")
|
||||
if creator_id == self.identifier:
|
||||
instance = CreatedInstance.from_existing(
|
||||
instance_data, self
|
||||
)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
pipeline.update_instances(update_list)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
pipeline.remove_instances(instances)
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def create(self, subset_name, data, pre_create_data):
|
||||
# Pass precreate data to creator attributes
|
||||
data["creator_attributes"] = pre_create_data
|
||||
# Create new instance
|
||||
new_instance = CreatedInstance(self.family, subset_name, data, self)
|
||||
# Host implementation of storing metadata about instance
|
||||
pipeline.HostContext.add_instance(new_instance.data_to_store())
|
||||
# Add instance to current context
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
def get_default_variants(self):
|
||||
return [
|
||||
"Main"
|
||||
]
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
output = [
|
||||
lib.FileDef(
|
||||
"filepath",
|
||||
folders=False,
|
||||
extensions=self.extensions,
|
||||
label="Filepath"
|
||||
)
|
||||
]
|
||||
return output
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Use same attributes as for instance attrobites
|
||||
return self.get_instance_attr_defs()
|
||||
|
||||
def get_detail_description(self):
|
||||
return """# Publish workfile backup"""
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectSource(pyblish.api.ContextPlugin):
|
||||
"""Collecting instances from traypublisher host."""
|
||||
|
||||
label = "Collect source"
|
||||
order = pyblish.api.CollectorOrder - 0.49
|
||||
hosts = ["traypublisher"]
|
||||
|
||||
def process(self, context):
|
||||
# get json paths from os and load them
|
||||
source_name = "traypublisher"
|
||||
for instance in context:
|
||||
source = instance.data.get("source")
|
||||
if not source:
|
||||
instance.data["source"] = source_name
|
||||
self.log.info((
|
||||
"Source of instance \"{}\" is changed to \"{}\""
|
||||
).format(instance.data["name"], source_name))
|
||||
else:
|
||||
self.log.info((
|
||||
"Source of instance \"{}\" was already set to \"{}\""
|
||||
).format(instance.data["name"], source))
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.InstancePlugin):
|
||||
"""Collect representation of workfile instances."""
|
||||
|
||||
label = "Collect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.49
|
||||
families = ["workfile"]
|
||||
hosts = ["traypublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
repres = instance.data["representations"]
|
||||
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
filepath = creator_attributes["filepath"]
|
||||
instance.data["sourceFilepath"] = filepath
|
||||
|
||||
staging_dir = os.path.dirname(filepath)
|
||||
filename = os.path.basename(filepath)
|
||||
ext = os.path.splitext(filename)[-1]
|
||||
|
||||
repres.append({
|
||||
"ext": ext,
|
||||
"name": ext,
|
||||
"stagingDir": staging_dir,
|
||||
"files": filename
|
||||
})
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
|
||||
class ValidateWorkfilePath(pyblish.api.InstancePlugin):
|
||||
"""Validate existence of workfile instance existence."""
|
||||
|
||||
label = "Collect Workfile"
|
||||
order = pyblish.api.ValidatorOrder - 0.49
|
||||
families = ["workfile"]
|
||||
hosts = ["traypublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
filepath = instance.data["sourceFilepath"]
|
||||
if not filepath:
|
||||
raise PublishValidationError((
|
||||
"Filepath of 'workfile' instance \"{}\" is not set"
|
||||
).format(instance.data["name"]))
|
||||
|
||||
if not os.path.exists(filepath):
|
||||
raise PublishValidationError((
|
||||
"Filepath of 'workfile' instance \"{}\" does not exist: {}"
|
||||
).format(instance.data["name"], filepath))
|
||||
|
|
@ -29,6 +29,7 @@ from .execute import (
|
|||
get_linux_launcher_args,
|
||||
execute,
|
||||
run_subprocess,
|
||||
run_detached_process,
|
||||
run_openpype_process,
|
||||
clean_envs_for_openpype_process,
|
||||
path_to_subprocess_arg,
|
||||
|
|
@ -130,7 +131,7 @@ from .applications import (
|
|||
PostLaunchHook,
|
||||
|
||||
EnvironmentPrepData,
|
||||
prepare_host_environments,
|
||||
prepare_app_environments,
|
||||
prepare_context_environments,
|
||||
get_app_environments_for_context,
|
||||
apply_project_environments_value
|
||||
|
|
@ -188,6 +189,7 @@ __all__ = [
|
|||
"get_linux_launcher_args",
|
||||
"execute",
|
||||
"run_subprocess",
|
||||
"run_detached_process",
|
||||
"run_openpype_process",
|
||||
"clean_envs_for_openpype_process",
|
||||
"path_to_subprocess_arg",
|
||||
|
|
@ -261,7 +263,7 @@ __all__ = [
|
|||
"PreLaunchHook",
|
||||
"PostLaunchHook",
|
||||
"EnvironmentPrepData",
|
||||
"prepare_host_environments",
|
||||
"prepare_app_environments",
|
||||
"prepare_context_environments",
|
||||
"get_app_environments_for_context",
|
||||
"apply_project_environments_value",
|
||||
|
|
|
|||
|
|
@ -1295,7 +1295,7 @@ def get_app_environments_for_context(
|
|||
"env": env
|
||||
})
|
||||
|
||||
prepare_host_environments(data, env_group)
|
||||
prepare_app_environments(data, env_group)
|
||||
prepare_context_environments(data, env_group)
|
||||
|
||||
# Discard avalon connection
|
||||
|
|
@ -1316,7 +1316,7 @@ def _merge_env(env, current_env):
|
|||
return result
|
||||
|
||||
|
||||
def prepare_host_environments(data, env_group=None, implementation_envs=True):
|
||||
def prepare_app_environments(data, env_group=None, implementation_envs=True):
|
||||
"""Modify launch environments based on launched app and context.
|
||||
|
||||
Args:
|
||||
|
|
@ -1474,6 +1474,22 @@ def prepare_context_environments(data, env_group=None):
|
|||
)
|
||||
|
||||
app = data["app"]
|
||||
context_env = {
|
||||
"AVALON_PROJECT": project_doc["name"],
|
||||
"AVALON_ASSET": asset_doc["name"],
|
||||
"AVALON_TASK": task_name,
|
||||
"AVALON_APP_NAME": app.full_name
|
||||
}
|
||||
|
||||
log.debug(
|
||||
"Context environments set:\n{}".format(
|
||||
json.dumps(context_env, indent=4)
|
||||
)
|
||||
)
|
||||
data["env"].update(context_env)
|
||||
if not app.is_host:
|
||||
return
|
||||
|
||||
workdir_data = get_workdir_data(
|
||||
project_doc, asset_doc, task_name, app.host_name
|
||||
)
|
||||
|
|
@ -1504,20 +1520,8 @@ def prepare_context_environments(data, env_group=None):
|
|||
"Couldn't create workdir because: {}".format(str(exc))
|
||||
)
|
||||
|
||||
context_env = {
|
||||
"AVALON_PROJECT": project_doc["name"],
|
||||
"AVALON_ASSET": asset_doc["name"],
|
||||
"AVALON_TASK": task_name,
|
||||
"AVALON_APP": app.host_name,
|
||||
"AVALON_APP_NAME": app.full_name,
|
||||
"AVALON_WORKDIR": workdir
|
||||
}
|
||||
log.debug(
|
||||
"Context environments set:\n{}".format(
|
||||
json.dumps(context_env, indent=4)
|
||||
)
|
||||
)
|
||||
data["env"].update(context_env)
|
||||
data["env"]["AVALON_APP"] = app.host_name
|
||||
data["env"]["AVALON_WORKDIR"] = workdir
|
||||
|
||||
_prepare_last_workfile(data, workdir)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ def collect_frames(files):
|
|||
Returns:
|
||||
(dict): {'/asset/subset_v001.0001.png': '0001', ....}
|
||||
"""
|
||||
collections, remainder = clique.assemble(files)
|
||||
collections, remainder = clique.assemble(files, minimum_items=1)
|
||||
|
||||
sources_and_frames = {}
|
||||
if collections:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import platform
|
||||
import json
|
||||
import tempfile
|
||||
import distutils.spawn
|
||||
|
||||
from .log import PypeLogger as Logger
|
||||
|
|
@ -181,6 +185,80 @@ def run_openpype_process(*args, **kwargs):
|
|||
return run_subprocess(args, env=env, **kwargs)
|
||||
|
||||
|
||||
def run_detached_process(args, **kwargs):
|
||||
"""Execute process with passed arguments as separated process.
|
||||
|
||||
Values from 'os.environ' are used for environments if are not passed.
|
||||
They are cleaned using 'clean_envs_for_openpype_process' function.
|
||||
|
||||
Example:
|
||||
```
|
||||
run_detached_openpype_process("run", "<path to .py script>")
|
||||
```
|
||||
|
||||
Args:
|
||||
*args (tuple): OpenPype cli arguments.
|
||||
**kwargs (dict): Keyword arguments for for subprocess.Popen.
|
||||
|
||||
Returns:
|
||||
subprocess.Popen: Pointer to launched process but it is possible that
|
||||
launched process is already killed (on linux).
|
||||
"""
|
||||
env = kwargs.pop("env", None)
|
||||
# Keep env untouched if are passed and not empty
|
||||
if not env:
|
||||
env = os.environ
|
||||
|
||||
# Create copy of passed env
|
||||
kwargs["env"] = {k: v for k, v in env.items()}
|
||||
|
||||
low_platform = platform.system().lower()
|
||||
if low_platform == "darwin":
|
||||
new_args = ["open", "-na", args.pop(0), "--args"]
|
||||
new_args.extend(args)
|
||||
args = new_args
|
||||
|
||||
elif low_platform == "windows":
|
||||
flags = (
|
||||
subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
| subprocess.DETACHED_PROCESS
|
||||
)
|
||||
kwargs["creationflags"] = flags
|
||||
|
||||
if not sys.stdout:
|
||||
kwargs["stdout"] = subprocess.DEVNULL
|
||||
kwargs["stderr"] = subprocess.DEVNULL
|
||||
|
||||
elif low_platform == "linux" and get_linux_launcher_args() is not None:
|
||||
json_data = {
|
||||
"args": args,
|
||||
"env": kwargs.pop("env")
|
||||
}
|
||||
json_temp = tempfile.NamedTemporaryFile(
|
||||
mode="w", prefix="op_app_args", suffix=".json", delete=False
|
||||
)
|
||||
json_temp.close()
|
||||
json_temp_filpath = json_temp.name
|
||||
with open(json_temp_filpath, "w") as stream:
|
||||
json.dump(json_data, stream)
|
||||
|
||||
new_args = get_linux_launcher_args()
|
||||
new_args.append(json_temp_filpath)
|
||||
|
||||
# Create mid-process which will launch application
|
||||
process = subprocess.Popen(new_args, **kwargs)
|
||||
# Wait until the process finishes
|
||||
# - This is important! The process would stay in "open" state.
|
||||
process.wait()
|
||||
# Remove the temp file
|
||||
os.remove(json_temp_filpath)
|
||||
# Return process which is already terminated
|
||||
return process
|
||||
|
||||
process = subprocess.Popen(args, **kwargs)
|
||||
return process
|
||||
|
||||
|
||||
def path_to_subprocess_arg(path):
|
||||
"""Prepare path for subprocess arguments.
|
||||
|
||||
|
|
|
|||
|
|
@ -49,11 +49,13 @@ class Terminal:
|
|||
"""
|
||||
|
||||
from openpype.lib import env_value_to_bool
|
||||
use_colors = env_value_to_bool(
|
||||
"OPENPYPE_LOG_NO_COLORS", default=Terminal.use_colors
|
||||
log_no_colors = env_value_to_bool(
|
||||
"OPENPYPE_LOG_NO_COLORS", default=None
|
||||
)
|
||||
if not use_colors:
|
||||
Terminal.use_colors = use_colors
|
||||
if log_no_colors is not None:
|
||||
Terminal.use_colors = not log_no_colors
|
||||
|
||||
if not Terminal.use_colors:
|
||||
Terminal._initialized = True
|
||||
return
|
||||
|
||||
|
|
|
|||
|
|
@ -33,14 +33,18 @@ DEFAULT_OPENPYPE_MODULES = (
|
|||
"avalon_apps",
|
||||
"clockify",
|
||||
"log_viewer",
|
||||
"deadline",
|
||||
"muster",
|
||||
"royalrender",
|
||||
"python_console_interpreter",
|
||||
"ftrack",
|
||||
"slack",
|
||||
"webserver",
|
||||
"launcher_action",
|
||||
"project_manager_action",
|
||||
"settings_action",
|
||||
"standalonepublish_action",
|
||||
"traypublish_action",
|
||||
"job_queue",
|
||||
"timers_manager",
|
||||
"sync_server",
|
||||
|
|
@ -219,8 +223,6 @@ def load_interfaces(force=False):
|
|||
|
||||
def _load_interfaces():
|
||||
# Key under which will be modules imported in `sys.modules`
|
||||
from openpype.lib import import_filepath
|
||||
|
||||
modules_key = "openpype_interfaces"
|
||||
|
||||
sys.modules[modules_key] = openpype_interfaces = (
|
||||
|
|
@ -845,6 +847,7 @@ class TrayModulesManager(ModulesManager):
|
|||
"avalon",
|
||||
"clockify",
|
||||
"standalonepublish_tool",
|
||||
"traypublish_tool",
|
||||
"log_viewer",
|
||||
"local_settings",
|
||||
"settings"
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import attr
|
|||
import requests
|
||||
|
||||
import pyblish.api
|
||||
from .abstract_metaplugins import AbstractMetaInstancePlugin
|
||||
from openpype.lib.abstract_metaplugins import AbstractMetaInstancePlugin
|
||||
|
||||
|
||||
def requests_post(*args, **kwargs):
|
||||
|
|
@ -5,9 +5,9 @@ import pyblish.api
|
|||
|
||||
from avalon import api
|
||||
|
||||
from openpype.lib import abstract_submit_deadline
|
||||
from openpype.lib.abstract_submit_deadline import DeadlineJobInfo
|
||||
from openpype.lib import env_value_to_bool
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
|
||||
|
||||
@attr.s
|
||||
|
|
@ -24,7 +24,9 @@ class DeadlinePluginInfo():
|
|||
MultiProcess = attr.ib(default=None)
|
||||
|
||||
|
||||
class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
||||
class AfterEffectsSubmitDeadline(
|
||||
abstract_submit_deadline.AbstractSubmitDeadline
|
||||
):
|
||||
|
||||
label = "Submit AE to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
|
|
@ -8,11 +8,11 @@ import re
|
|||
|
||||
import attr
|
||||
import pyblish.api
|
||||
|
||||
import openpype.lib.abstract_submit_deadline
|
||||
from openpype.lib.abstract_submit_deadline import DeadlineJobInfo
|
||||
from avalon import api
|
||||
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
|
||||
|
||||
class _ZipFile(ZipFile):
|
||||
"""Extended check for windows invalid characters."""
|
||||
|
|
@ -217,7 +217,8 @@ class PluginInfo(object):
|
|||
|
||||
|
||||
class HarmonySubmitDeadline(
|
||||
openpype.lib.abstract_submit_deadline.AbstractSubmitDeadline):
|
||||
abstract_submit_deadline.AbstractSubmitDeadline
|
||||
):
|
||||
"""Submit render write of Harmony scene to Deadline.
|
||||
|
||||
Renders are submitted to a Deadline Web Service as
|
||||
|
|
@ -1,11 +1,10 @@
|
|||
import os
|
||||
import json
|
||||
import requests
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.lib.abstract_submit_deadline import requests_get
|
||||
from openpype.lib.delivery import collect_frames
|
||||
from openpype_modules.deadline.abstract_submit_deadline import requests_get
|
||||
|
||||
|
||||
class ValidateExpectedFiles(pyblish.api.InstancePlugin):
|
||||
|
|
@ -30,47 +29,58 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
|
|||
staging_dir = repre["stagingDir"]
|
||||
existing_files = self._get_existing_files(staging_dir)
|
||||
|
||||
expected_non_existent = expected_files.difference(
|
||||
existing_files)
|
||||
if len(expected_non_existent) != 0:
|
||||
self.log.info("Some expected files missing {}".format(
|
||||
expected_non_existent))
|
||||
if self.allow_user_override:
|
||||
# We always check for user override because the user might have
|
||||
# also overridden the Job frame list to be longer than the
|
||||
# originally submitted frame range
|
||||
# todo: We should first check if Job frame range was overridden
|
||||
# at all so we don't unnecessarily override anything
|
||||
file_name_template, frame_placeholder = \
|
||||
self._get_file_name_template_and_placeholder(
|
||||
expected_files)
|
||||
|
||||
if self.allow_user_override:
|
||||
file_name_template, frame_placeholder = \
|
||||
self._get_file_name_template_and_placeholder(
|
||||
expected_files)
|
||||
if not file_name_template:
|
||||
raise RuntimeError("Unable to retrieve file_name template"
|
||||
"from files: {}".format(expected_files))
|
||||
|
||||
if not file_name_template:
|
||||
return
|
||||
job_expected_files = self._get_job_expected_files(
|
||||
file_name_template,
|
||||
frame_placeholder,
|
||||
frame_list)
|
||||
|
||||
real_expected_rendered = self._get_real_render_expected(
|
||||
file_name_template,
|
||||
frame_placeholder,
|
||||
frame_list)
|
||||
job_files_diff = job_expected_files.difference(expected_files)
|
||||
if job_files_diff:
|
||||
self.log.debug(
|
||||
"Detected difference in expected output files from "
|
||||
"Deadline job. Assuming an updated frame list by the "
|
||||
"user. Difference: {}".format(sorted(job_files_diff))
|
||||
)
|
||||
|
||||
real_expected_non_existent = \
|
||||
real_expected_rendered.difference(existing_files)
|
||||
if len(real_expected_non_existent) != 0:
|
||||
raise RuntimeError("Still missing some files {}".
|
||||
format(real_expected_non_existent))
|
||||
self.log.info("Update range from actual job range")
|
||||
repre["files"] = sorted(list(real_expected_rendered))
|
||||
else:
|
||||
raise RuntimeError("Some expected files missing {}".format(
|
||||
expected_non_existent))
|
||||
# Update the representation expected files
|
||||
self.log.info("Update range from actual job range "
|
||||
"to frame list: {}".format(frame_list))
|
||||
repre["files"] = sorted(job_expected_files)
|
||||
|
||||
# Update the expected files
|
||||
expected_files = job_expected_files
|
||||
|
||||
# We don't use set.difference because we do allow other existing
|
||||
# files to be in the folder that we might not want to use.
|
||||
missing = expected_files - existing_files
|
||||
if missing:
|
||||
raise RuntimeError("Missing expected files: {}".format(
|
||||
sorted(missing)))
|
||||
|
||||
def _get_frame_list(self, original_job_id):
|
||||
"""
|
||||
Returns list of frame ranges from all render job.
|
||||
"""Returns list of frame ranges from all render job.
|
||||
|
||||
Render job might be requeried so job_id in metadata.json is invalid
|
||||
GlobalJobPreload injects current ids to RENDER_JOB_IDS.
|
||||
Render job might be re-submitted so job_id in metadata.json could be
|
||||
invalid. GlobalJobPreload injects current job id to RENDER_JOB_IDS.
|
||||
|
||||
Args:
|
||||
original_job_id (str)
|
||||
Returns:
|
||||
(list)
|
||||
Args:
|
||||
original_job_id (str)
|
||||
Returns:
|
||||
(list)
|
||||
"""
|
||||
all_frame_lists = []
|
||||
render_job_ids = os.environ.get("RENDER_JOB_IDS")
|
||||
|
|
@ -87,13 +97,15 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
|
|||
|
||||
return all_frame_lists
|
||||
|
||||
def _get_real_render_expected(self, file_name_template, frame_placeholder,
|
||||
frame_list):
|
||||
"""
|
||||
Calculates list of names of expected rendered files.
|
||||
def _get_job_expected_files(self,
|
||||
file_name_template,
|
||||
frame_placeholder,
|
||||
frame_list):
|
||||
"""Calculates list of names of expected rendered files.
|
||||
|
||||
Might be different from expected files from submission if user
|
||||
explicitly and manually changed the frame list on the Deadline job.
|
||||
|
||||
Might be different from job expected files if user explicitly and
|
||||
manually change frame list on Deadline job.
|
||||
"""
|
||||
real_expected_rendered = set()
|
||||
src_padding_exp = "%0{}d".format(len(frame_placeholder))
|
||||
|
|
@ -115,6 +127,14 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
|
|||
|
||||
file_name_template = frame_placeholder = None
|
||||
for file_name, frame in sources_and_frames.items():
|
||||
|
||||
# There might be cases where clique was unable to collect
|
||||
# collections in `collect_frames` - thus we capture that case
|
||||
if frame is None:
|
||||
self.log.warning("Unable to detect frame from filename: "
|
||||
"{}".format(file_name))
|
||||
continue
|
||||
|
||||
frame_placeholder = "#" * len(frame)
|
||||
file_name_template = os.path.basename(
|
||||
file_name.replace(frame, frame_placeholder))
|
||||
|
|
@ -123,11 +143,11 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
|
|||
return file_name_template, frame_placeholder
|
||||
|
||||
def _get_job_info(self, job_id):
|
||||
"""
|
||||
Calls DL for actual job info for 'job_id'
|
||||
"""Calls DL for actual job info for 'job_id'
|
||||
|
||||
Might be different than job info saved in metadata.json if user
|
||||
manually changes job pre/during rendering.
|
||||
|
||||
Might be different than job info saved in metadata.json if user
|
||||
manually changes job pre/during rendering.
|
||||
"""
|
||||
# get default deadline webservice url from deadline module
|
||||
deadline_url = self.instance.context.data["defaultDeadline"]
|
||||
|
|
@ -140,8 +160,8 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
|
|||
try:
|
||||
response = requests_get(url)
|
||||
except requests.exceptions.ConnectionError:
|
||||
print("Deadline is not accessible at {}".format(deadline_url))
|
||||
# self.log("Deadline is not accessible at {}".format(deadline_url))
|
||||
self.log.error("Deadline is not accessible at "
|
||||
"{}".format(deadline_url))
|
||||
return {}
|
||||
|
||||
if not response.ok:
|
||||
|
|
@ -155,29 +175,26 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
|
|||
return json_content.pop()
|
||||
return {}
|
||||
|
||||
def _parse_metadata_json(self, json_path):
|
||||
if not os.path.exists(json_path):
|
||||
msg = "Metadata file {} doesn't exist".format(json_path)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
with open(json_path) as fp:
|
||||
try:
|
||||
return json.load(fp)
|
||||
except Exception as exc:
|
||||
self.log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(json_path, exc)
|
||||
)
|
||||
|
||||
def _get_existing_files(self, out_dir):
|
||||
"""Returns set of existing file names from 'out_dir'"""
|
||||
def _get_existing_files(self, staging_dir):
|
||||
"""Returns set of existing file names from 'staging_dir'"""
|
||||
existing_files = set()
|
||||
for file_name in os.listdir(out_dir):
|
||||
for file_name in os.listdir(staging_dir):
|
||||
existing_files.add(file_name)
|
||||
return existing_files
|
||||
|
||||
def _get_expected_files(self, repre):
|
||||
"""Returns set of file names from metadata.json"""
|
||||
"""Returns set of file names in representation['files']
|
||||
|
||||
The representations are collected from `CollectRenderedFiles` using
|
||||
the metadata.json file submitted along with the render job.
|
||||
|
||||
Args:
|
||||
repre (dict): The representation containing 'files'
|
||||
|
||||
Returns:
|
||||
set: Set of expected file_names in the staging directory.
|
||||
|
||||
"""
|
||||
expected_files = set()
|
||||
|
||||
files = repre["files"]
|
||||
|
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
|
Before Width: | Height: | Size: 124 KiB After Width: | Height: | Size: 124 KiB |
|
Before Width: | Height: | Size: 124 KiB After Width: | Height: | Size: 124 KiB |
|
|
@ -1 +0,0 @@
|
|||
Subproject commit b746fedf7286c3755a46f07ab72f4c414cd41fc0
|
||||
|
|
@ -1 +0,0 @@
|
|||
Subproject commit d277f474ab016e7b53479c36af87cb861d0cc53e
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue