mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
54838c6deb
84 changed files with 5127 additions and 1964 deletions
|
|
@ -59,7 +59,7 @@ def validate_mongo_connection(cnx: str) -> (bool, str):
|
|||
return False, "Not mongodb schema"
|
||||
|
||||
kwargs = {
|
||||
"serverSelectionTimeoutMS": 2000
|
||||
"serverSelectionTimeoutMS": os.environ.get("AVALON_TIMEOUT", 2000)
|
||||
}
|
||||
# Add certificate path if should be required
|
||||
if should_add_certificate_path_to_mongo_url(cnx):
|
||||
|
|
|
|||
|
|
@ -105,9 +105,9 @@ def menu_install():
|
|||
sceneinventory_action.triggered.connect(
|
||||
lambda: host_tools.show_scene_inventory(parent=main_window)
|
||||
)
|
||||
menu.addSeparator()
|
||||
|
||||
if os.getenv("OPENPYPE_DEVELOP"):
|
||||
menu.addSeparator()
|
||||
reload_action = menu.addAction("Reload pipeline")
|
||||
reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
reload_action.triggered.connect(reload_config)
|
||||
|
|
@ -120,3 +120,10 @@ def menu_install():
|
|||
apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips")
|
||||
apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
apply_colorspace_c_action.triggered.connect(apply_colorspace_clips)
|
||||
|
||||
menu.addSeparator()
|
||||
|
||||
exeprimental_action = menu.addAction("Experimental tools...")
|
||||
exeprimental_action.triggered.connect(
|
||||
lambda: host_tools.show_experimental_tools_dialog(parent=main_window)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -3,9 +3,10 @@
|
|||
import contextlib
|
||||
|
||||
import logging
|
||||
from Qt import QtCore, QtGui
|
||||
from openpype.tools.utils.widgets import AssetWidget
|
||||
from avalon import style, io
|
||||
from Qt import QtWidgets, QtCore, QtGui
|
||||
from avalon import io
|
||||
from openpype import style
|
||||
from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget
|
||||
|
||||
from pxr import Sdf
|
||||
|
||||
|
|
@ -13,6 +14,60 @@ from pxr import Sdf
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SelectAssetDialog(QtWidgets.QWidget):
|
||||
"""Frameless assets dialog to select asset with double click.
|
||||
|
||||
Args:
|
||||
parm: Parameter where selected asset name is set.
|
||||
"""
|
||||
def __init__(self, parm):
|
||||
self.setWindowTitle("Pick Asset")
|
||||
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup)
|
||||
|
||||
assets_widget = SingleSelectAssetsWidget(io, parent=self)
|
||||
|
||||
layout = QtWidgets.QHBoxLayout(self)
|
||||
layout.addWidget(assets_widget)
|
||||
|
||||
assets_widget.double_clicked.connect(self._set_parameter)
|
||||
self._assets_widget = assets_widget
|
||||
self._parm = parm
|
||||
|
||||
def _set_parameter(self):
|
||||
name = self._assets_widget.get_selected_asset_name()
|
||||
self._parm.set(name)
|
||||
self.close()
|
||||
|
||||
def _on_show(self):
|
||||
pos = QtGui.QCursor.pos()
|
||||
# Select the current asset if there is any
|
||||
select_id = None
|
||||
name = self._parm.eval()
|
||||
if name:
|
||||
db_asset = io.find_one(
|
||||
{"name": name, "type": "asset"},
|
||||
{"_id": True}
|
||||
)
|
||||
if db_asset:
|
||||
select_id = db_asset["_id"]
|
||||
|
||||
# Set stylesheet
|
||||
self.setStyleSheet(style.load_stylesheet())
|
||||
# Refresh assets (is threaded)
|
||||
self._assets_widget.refresh()
|
||||
# Select asset - must be done after refresh
|
||||
if select_id is not None:
|
||||
self._assets_widget.select_asset(select_id)
|
||||
|
||||
# Show cursor (top right of window) near cursor
|
||||
self.resize(250, 400)
|
||||
self.move(self.mapFromGlobal(pos) - QtCore.QPoint(self.width(), 0))
|
||||
|
||||
def showEvent(self, event):
|
||||
super(SelectAssetDialog, self).showEvent(event)
|
||||
self._on_show()
|
||||
|
||||
|
||||
def pick_asset(node):
|
||||
"""Show a user interface to select an Asset in the project
|
||||
|
||||
|
|
@ -21,43 +76,15 @@ def pick_asset(node):
|
|||
|
||||
"""
|
||||
|
||||
pos = QtGui.QCursor.pos()
|
||||
|
||||
parm = node.parm("asset_name")
|
||||
if not parm:
|
||||
log.error("Node has no 'asset' parameter: %s", node)
|
||||
return
|
||||
|
||||
# Construct the AssetWidget as a frameless popup so it automatically
|
||||
# Construct a frameless popup so it automatically
|
||||
# closes when clicked outside of it.
|
||||
global tool
|
||||
tool = AssetWidget(io)
|
||||
tool.setContentsMargins(5, 5, 5, 5)
|
||||
tool.setWindowTitle("Pick Asset")
|
||||
tool.setStyleSheet(style.load_stylesheet())
|
||||
tool.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup)
|
||||
tool.refresh()
|
||||
|
||||
# Select the current asset if there is any
|
||||
name = parm.eval()
|
||||
if name:
|
||||
db_asset = io.find_one({"name": name, "type": "asset"})
|
||||
if db_asset:
|
||||
silo = db_asset.get("silo")
|
||||
if silo:
|
||||
tool.set_silo(silo)
|
||||
tool.select_assets([name], expand=True)
|
||||
|
||||
# Show cursor (top right of window) near cursor
|
||||
tool.resize(250, 400)
|
||||
tool.move(tool.mapFromGlobal(pos) - QtCore.QPoint(tool.width(), 0))
|
||||
|
||||
def set_parameter_callback(index):
|
||||
name = index.data(tool.model.DocumentRole)["name"]
|
||||
parm.set(name)
|
||||
tool.close()
|
||||
|
||||
tool.view.doubleClicked.connect(set_parameter_callback)
|
||||
tool = SelectAssetDialog(parm)
|
||||
tool.show()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -41,6 +41,10 @@ opnl.workfiles_launched = False
|
|||
opnl._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon")
|
||||
|
||||
|
||||
def get_nuke_imageio_settings():
|
||||
return get_anatomy_settings(opnl.project_name)["imageio"]["nuke"]
|
||||
|
||||
|
||||
def get_created_node_imageio_setting(**kwarg):
|
||||
''' Get preset data for dataflow (fileType, compression, bitDepth)
|
||||
'''
|
||||
|
|
@ -51,8 +55,7 @@ def get_created_node_imageio_setting(**kwarg):
|
|||
assert any([creator, nodeclass]), nuke.message(
|
||||
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
|
||||
|
||||
imageio = get_anatomy_settings(opnl.project_name)["imageio"]
|
||||
imageio_nodes = imageio["nuke"]["nodes"]["requiredNodes"]
|
||||
imageio_nodes = get_nuke_imageio_settings()["nodes"]["requiredNodes"]
|
||||
|
||||
imageio_node = None
|
||||
for node in imageio_nodes:
|
||||
|
|
@ -70,8 +73,7 @@ def get_imageio_input_colorspace(filename):
|
|||
''' Get input file colorspace based on regex in settings.
|
||||
'''
|
||||
imageio_regex_inputs = (
|
||||
get_anatomy_settings(opnl.project_name)
|
||||
["imageio"]["nuke"]["regexInputs"]["inputs"])
|
||||
get_nuke_imageio_settings()["regexInputs"]["inputs"])
|
||||
|
||||
preset_clrsp = None
|
||||
for regexInput in imageio_regex_inputs:
|
||||
|
|
@ -553,8 +555,7 @@ def add_rendering_knobs(node, farm=True):
|
|||
Return:
|
||||
node (obj): with added knobs
|
||||
'''
|
||||
knob_options = [
|
||||
"Use existing frames", "Local"]
|
||||
knob_options = ["Use existing frames", "Local"]
|
||||
if farm:
|
||||
knob_options.append("On farm")
|
||||
|
||||
|
|
@ -912,8 +913,7 @@ class WorkfileSettings(object):
|
|||
''' Setting colorpace following presets
|
||||
'''
|
||||
# get imageio
|
||||
imageio = get_anatomy_settings(opnl.project_name)["imageio"]
|
||||
nuke_colorspace = imageio["nuke"]
|
||||
nuke_colorspace = get_nuke_imageio_settings()
|
||||
|
||||
try:
|
||||
self.set_root_colorspace(nuke_colorspace["workfile"])
|
||||
|
|
@ -1170,386 +1170,6 @@ def get_write_node_template_attr(node):
|
|||
return anlib.fix_data_for_node_create(correct_data)
|
||||
|
||||
|
||||
class ExporterReview:
|
||||
"""
|
||||
Base class object for generating review data from Nuke
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
"""
|
||||
_temp_nodes = []
|
||||
data = dict({
|
||||
"representations": list()
|
||||
})
|
||||
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance
|
||||
):
|
||||
|
||||
self.log = klass.log
|
||||
self.instance = instance
|
||||
self.path_in = self.instance.data.get("path", None)
|
||||
self.staging_dir = self.instance.data["stagingDir"]
|
||||
self.collection = self.instance.data.get("collection", None)
|
||||
|
||||
def get_file_info(self):
|
||||
if self.collection:
|
||||
self.log.debug("Collection: `{}`".format(self.collection))
|
||||
# get path
|
||||
self.fname = os.path.basename(self.collection.format(
|
||||
"{head}{padding}{tail}"))
|
||||
self.fhead = self.collection.format("{head}")
|
||||
|
||||
# get first and last frame
|
||||
self.first_frame = min(self.collection.indexes)
|
||||
self.last_frame = max(self.collection.indexes)
|
||||
if "slate" in self.instance.data["families"]:
|
||||
self.first_frame += 1
|
||||
else:
|
||||
self.fname = os.path.basename(self.path_in)
|
||||
self.fhead = os.path.splitext(self.fname)[0] + "."
|
||||
self.first_frame = self.instance.data.get("frameStartHandle", None)
|
||||
self.last_frame = self.instance.data.get("frameEndHandle", None)
|
||||
|
||||
if "#" in self.fhead:
|
||||
self.fhead = self.fhead.replace("#", "")[:-1]
|
||||
|
||||
def get_representation_data(self, tags=None, range=False):
|
||||
add_tags = []
|
||||
if tags:
|
||||
add_tags = tags
|
||||
|
||||
repre = {
|
||||
'name': self.name,
|
||||
'ext': self.ext,
|
||||
'files': self.file,
|
||||
"stagingDir": self.staging_dir,
|
||||
"tags": [self.name.replace("_", "-")] + add_tags
|
||||
}
|
||||
|
||||
if range:
|
||||
repre.update({
|
||||
"frameStart": self.first_frame,
|
||||
"frameEnd": self.last_frame,
|
||||
})
|
||||
|
||||
self.data["representations"].append(repre)
|
||||
|
||||
def get_view_process_node(self):
|
||||
"""
|
||||
Will get any active view process.
|
||||
|
||||
Arguments:
|
||||
self (class): in object definition
|
||||
|
||||
Returns:
|
||||
nuke.Node: copy node of Input Process node
|
||||
"""
|
||||
anlib.reset_selection()
|
||||
ipn_orig = None
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
ip = v['input_process'].getValue()
|
||||
ipn = v['input_process_node'].getValue()
|
||||
if "VIEWER_INPUT" not in ipn and ip:
|
||||
ipn_orig = nuke.toNode(ipn)
|
||||
ipn_orig.setSelected(True)
|
||||
|
||||
if ipn_orig:
|
||||
# copy selected to clipboard
|
||||
nuke.nodeCopy('%clipboard%')
|
||||
# reset selection
|
||||
anlib.reset_selection()
|
||||
# paste node and selection is on it only
|
||||
nuke.nodePaste('%clipboard%')
|
||||
# assign to variable
|
||||
ipn = nuke.selectedNode()
|
||||
|
||||
return ipn
|
||||
|
||||
def clean_nodes(self):
|
||||
for node in self._temp_nodes:
|
||||
nuke.delete(node)
|
||||
self._temp_nodes = []
|
||||
self.log.info("Deleted nodes...")
|
||||
|
||||
|
||||
class ExporterReviewLut(ExporterReview):
|
||||
"""
|
||||
Generator object for review lut from Nuke
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance,
|
||||
name=None,
|
||||
ext=None,
|
||||
cube_size=None,
|
||||
lut_size=None,
|
||||
lut_style=None):
|
||||
# initialize parent class
|
||||
ExporterReview.__init__(self, klass, instance)
|
||||
self._temp_nodes = []
|
||||
|
||||
# deal with now lut defined in viewer lut
|
||||
if hasattr(klass, "viewer_lut_raw"):
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
else:
|
||||
self.viewer_lut_raw = False
|
||||
|
||||
self.name = name or "baked_lut"
|
||||
self.ext = ext or "cube"
|
||||
self.cube_size = cube_size or 32
|
||||
self.lut_size = lut_size or 1024
|
||||
self.lut_style = lut_style or "linear"
|
||||
|
||||
# set frame start / end and file name to self
|
||||
self.get_file_info()
|
||||
|
||||
self.log.info("File info was set...")
|
||||
|
||||
self.file = self.fhead + self.name + ".{}".format(self.ext)
|
||||
self.path = os.path.join(
|
||||
self.staging_dir, self.file).replace("\\", "/")
|
||||
|
||||
def generate_lut(self):
|
||||
# ---------- start nodes creation
|
||||
|
||||
# CMSTestPattern
|
||||
cms_node = nuke.createNode("CMSTestPattern")
|
||||
cms_node["cube_size"].setValue(self.cube_size)
|
||||
# connect
|
||||
self._temp_nodes.append(cms_node)
|
||||
self.previous_node = cms_node
|
||||
self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
|
||||
|
||||
# Node View Process
|
||||
ipn = self.get_view_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(ipn)
|
||||
self.previous_node = ipn
|
||||
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(dag_node)
|
||||
self.previous_node = dag_node
|
||||
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
|
||||
|
||||
# GenerateLUT
|
||||
gen_lut_node = nuke.createNode("GenerateLUT")
|
||||
gen_lut_node["file"].setValue(self.path)
|
||||
gen_lut_node["file_type"].setValue(".{}".format(self.ext))
|
||||
gen_lut_node["lut1d"].setValue(self.lut_size)
|
||||
gen_lut_node["style1d"].setValue(self.lut_style)
|
||||
# connect
|
||||
gen_lut_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(gen_lut_node)
|
||||
self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
|
||||
|
||||
# ---------- end nodes creation
|
||||
|
||||
# Export lut file
|
||||
nuke.execute(
|
||||
gen_lut_node.name(),
|
||||
int(self.first_frame),
|
||||
int(self.first_frame))
|
||||
|
||||
self.log.info("Exported...")
|
||||
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data()
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
||||
# ---------- Clean up
|
||||
self.clean_nodes()
|
||||
|
||||
return self.data
|
||||
|
||||
|
||||
class ExporterReviewMov(ExporterReview):
|
||||
"""
|
||||
Metaclass for generating review mov files
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance,
|
||||
name=None,
|
||||
ext=None,
|
||||
):
|
||||
# initialize parent class
|
||||
ExporterReview.__init__(self, klass, instance)
|
||||
|
||||
# passing presets for nodes to self
|
||||
if hasattr(klass, "nodes"):
|
||||
self.nodes = klass.nodes
|
||||
else:
|
||||
self.nodes = {}
|
||||
|
||||
# deal with now lut defined in viewer lut
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
self.bake_colorspace_fallback = klass.bake_colorspace_fallback
|
||||
self.bake_colorspace_main = klass.bake_colorspace_main
|
||||
self.write_colorspace = instance.data["colorspace"]
|
||||
|
||||
self.name = name or "baked"
|
||||
self.ext = ext or "mov"
|
||||
|
||||
# set frame start / end and file name to self
|
||||
self.get_file_info()
|
||||
|
||||
self.log.info("File info was set...")
|
||||
|
||||
self.file = self.fhead + self.name + ".{}".format(self.ext)
|
||||
self.path = os.path.join(
|
||||
self.staging_dir, self.file).replace("\\", "/")
|
||||
|
||||
def render(self, render_node_name):
|
||||
self.log.info("Rendering... ")
|
||||
# Render Write node
|
||||
nuke.execute(
|
||||
render_node_name,
|
||||
int(self.first_frame),
|
||||
int(self.last_frame))
|
||||
|
||||
self.log.info("Rendered...")
|
||||
|
||||
def save_file(self):
|
||||
import shutil
|
||||
with anlib.maintained_selection():
|
||||
self.log.info("Saving nodes as file... ")
|
||||
# create nk path
|
||||
path = os.path.splitext(self.path)[0] + ".nk"
|
||||
# save file to the path
|
||||
shutil.copyfile(self.instance.context.data["currentFile"], path)
|
||||
|
||||
self.log.info("Nodes exported...")
|
||||
return path
|
||||
|
||||
def generate_mov(self, farm=False):
|
||||
# ---------- start nodes creation
|
||||
|
||||
# Read node
|
||||
r_node = nuke.createNode("Read")
|
||||
r_node["file"].setValue(self.path_in)
|
||||
r_node["first"].setValue(self.first_frame)
|
||||
r_node["origfirst"].setValue(self.first_frame)
|
||||
r_node["last"].setValue(self.last_frame)
|
||||
r_node["origlast"].setValue(self.last_frame)
|
||||
r_node["colorspace"].setValue(self.write_colorspace)
|
||||
|
||||
# connect
|
||||
self._temp_nodes.append(r_node)
|
||||
self.previous_node = r_node
|
||||
self.log.debug("Read... `{}`".format(self._temp_nodes))
|
||||
|
||||
# View Process node
|
||||
ipn = self.get_view_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(ipn)
|
||||
self.previous_node = ipn
|
||||
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
colorspaces = [
|
||||
self.bake_colorspace_main, self.bake_colorspace_fallback
|
||||
]
|
||||
|
||||
if any(colorspaces):
|
||||
# OCIOColorSpace with controled output
|
||||
dag_node = nuke.createNode("OCIOColorSpace")
|
||||
self._temp_nodes.append(dag_node)
|
||||
for c in colorspaces:
|
||||
test = dag_node["out_colorspace"].setValue(str(c))
|
||||
if test:
|
||||
self.log.info(
|
||||
"Baking in colorspace... `{}`".format(c))
|
||||
break
|
||||
|
||||
if not test:
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
else:
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(dag_node)
|
||||
self.previous_node = dag_node
|
||||
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
|
||||
|
||||
# Write node
|
||||
write_node = nuke.createNode("Write")
|
||||
self.log.debug("Path: {}".format(self.path))
|
||||
write_node["file"].setValue(self.path)
|
||||
write_node["file_type"].setValue(self.ext)
|
||||
|
||||
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
|
||||
# TODO change this to use conditions, if possible.
|
||||
try:
|
||||
write_node["meta_codec"].setValue("ap4h")
|
||||
except Exception:
|
||||
self.log.info("`meta_codec` knob was not found")
|
||||
|
||||
try:
|
||||
write_node["mov64_codec"].setValue("ap4h")
|
||||
except Exception:
|
||||
self.log.info("`mov64_codec` knob was not found")
|
||||
write_node["mov64_write_timecode"].setValue(1)
|
||||
write_node["raw"].setValue(1)
|
||||
# connect
|
||||
write_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(write_node)
|
||||
self.log.debug("Write... `{}`".format(self._temp_nodes))
|
||||
# ---------- end nodes creation
|
||||
|
||||
# ---------- render or save to nk
|
||||
if farm:
|
||||
nuke.scriptSave()
|
||||
path_nk = self.save_file()
|
||||
self.data.update({
|
||||
"bakeScriptPath": path_nk,
|
||||
"bakeWriteNodeName": write_node.name(),
|
||||
"bakeRenderPath": self.path
|
||||
})
|
||||
else:
|
||||
self.render(write_node.name())
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data(
|
||||
tags=["review", "delete"],
|
||||
range=True
|
||||
)
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
||||
# ---------- Clean up
|
||||
self.clean_nodes()
|
||||
nuke.scriptSave()
|
||||
return self.data
|
||||
|
||||
|
||||
def get_dependent_nodes(nodes):
|
||||
"""Get all dependent nodes connected to the list of nodes.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import nuke
|
||||
from avalon.api import Session
|
||||
from avalon.nuke.pipeline import get_main_window
|
||||
|
||||
from .lib import WorkfileSettings
|
||||
from openpype.api import Logger, BuildWorkfile, get_current_project_settings
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import random
|
||||
import string
|
||||
|
||||
|
|
@ -100,3 +101,415 @@ class NukeLoader(api.Loader):
|
|||
nuke.delete(member)
|
||||
|
||||
return dependent_nodes
|
||||
|
||||
|
||||
class ExporterReview(object):
|
||||
"""
|
||||
Base class object for generating review data from Nuke
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
"""
|
||||
data = None
|
||||
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance
|
||||
):
|
||||
|
||||
self.log = klass.log
|
||||
self.instance = instance
|
||||
self.path_in = self.instance.data.get("path", None)
|
||||
self.staging_dir = self.instance.data["stagingDir"]
|
||||
self.collection = self.instance.data.get("collection", None)
|
||||
self.data = dict({
|
||||
"representations": list()
|
||||
})
|
||||
|
||||
def get_file_info(self):
|
||||
if self.collection:
|
||||
self.log.debug("Collection: `{}`".format(self.collection))
|
||||
# get path
|
||||
self.fname = os.path.basename(self.collection.format(
|
||||
"{head}{padding}{tail}"))
|
||||
self.fhead = self.collection.format("{head}")
|
||||
|
||||
# get first and last frame
|
||||
self.first_frame = min(self.collection.indexes)
|
||||
self.last_frame = max(self.collection.indexes)
|
||||
if "slate" in self.instance.data["families"]:
|
||||
self.first_frame += 1
|
||||
else:
|
||||
self.fname = os.path.basename(self.path_in)
|
||||
self.fhead = os.path.splitext(self.fname)[0] + "."
|
||||
self.first_frame = self.instance.data.get("frameStartHandle", None)
|
||||
self.last_frame = self.instance.data.get("frameEndHandle", None)
|
||||
|
||||
if "#" in self.fhead:
|
||||
self.fhead = self.fhead.replace("#", "")[:-1]
|
||||
|
||||
def get_representation_data(self, tags=None, range=False):
|
||||
add_tags = tags or []
|
||||
|
||||
repre = {
|
||||
'outputName': self.name,
|
||||
'name': self.name,
|
||||
'ext': self.ext,
|
||||
'files': self.file,
|
||||
"stagingDir": self.staging_dir,
|
||||
"tags": [self.name.replace("_", "-")] + add_tags
|
||||
}
|
||||
|
||||
if range:
|
||||
repre.update({
|
||||
"frameStart": self.first_frame,
|
||||
"frameEnd": self.last_frame,
|
||||
})
|
||||
|
||||
self.data["representations"].append(repre)
|
||||
|
||||
def get_view_input_process_node(self):
|
||||
"""
|
||||
Will get any active view process.
|
||||
|
||||
Arguments:
|
||||
self (class): in object definition
|
||||
|
||||
Returns:
|
||||
nuke.Node: copy node of Input Process node
|
||||
"""
|
||||
anlib.reset_selection()
|
||||
ipn_orig = None
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
ip = v['input_process'].getValue()
|
||||
ipn = v['input_process_node'].getValue()
|
||||
if "VIEWER_INPUT" not in ipn and ip:
|
||||
ipn_orig = nuke.toNode(ipn)
|
||||
ipn_orig.setSelected(True)
|
||||
|
||||
if ipn_orig:
|
||||
# copy selected to clipboard
|
||||
nuke.nodeCopy('%clipboard%')
|
||||
# reset selection
|
||||
anlib.reset_selection()
|
||||
# paste node and selection is on it only
|
||||
nuke.nodePaste('%clipboard%')
|
||||
# assign to variable
|
||||
ipn = nuke.selectedNode()
|
||||
|
||||
return ipn
|
||||
|
||||
def get_imageio_baking_profile(self):
|
||||
from . import lib as opnlib
|
||||
nuke_imageio = opnlib.get_nuke_imageio_settings()
|
||||
|
||||
# TODO: this is only securing backward compatibility lets remove
|
||||
# this once all projects's anotomy are upated to newer config
|
||||
if "baking" in nuke_imageio.keys():
|
||||
return nuke_imageio["baking"]["viewerProcess"]
|
||||
else:
|
||||
return nuke_imageio["viewer"]["viewerProcess"]
|
||||
|
||||
|
||||
|
||||
|
||||
class ExporterReviewLut(ExporterReview):
|
||||
"""
|
||||
Generator object for review lut from Nuke
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
|
||||
"""
|
||||
_temp_nodes = []
|
||||
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance,
|
||||
name=None,
|
||||
ext=None,
|
||||
cube_size=None,
|
||||
lut_size=None,
|
||||
lut_style=None):
|
||||
# initialize parent class
|
||||
super(ExporterReviewLut, self).__init__(klass, instance)
|
||||
|
||||
# deal with now lut defined in viewer lut
|
||||
if hasattr(klass, "viewer_lut_raw"):
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
else:
|
||||
self.viewer_lut_raw = False
|
||||
|
||||
self.name = name or "baked_lut"
|
||||
self.ext = ext or "cube"
|
||||
self.cube_size = cube_size or 32
|
||||
self.lut_size = lut_size or 1024
|
||||
self.lut_style = lut_style or "linear"
|
||||
|
||||
# set frame start / end and file name to self
|
||||
self.get_file_info()
|
||||
|
||||
self.log.info("File info was set...")
|
||||
|
||||
self.file = self.fhead + self.name + ".{}".format(self.ext)
|
||||
self.path = os.path.join(
|
||||
self.staging_dir, self.file).replace("\\", "/")
|
||||
|
||||
def clean_nodes(self):
|
||||
for node in self._temp_nodes:
|
||||
nuke.delete(node)
|
||||
self._temp_nodes = []
|
||||
self.log.info("Deleted nodes...")
|
||||
|
||||
def generate_lut(self):
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
|
||||
# ---------- start nodes creation
|
||||
|
||||
# CMSTestPattern
|
||||
cms_node = nuke.createNode("CMSTestPattern")
|
||||
cms_node["cube_size"].setValue(self.cube_size)
|
||||
# connect
|
||||
self._temp_nodes.append(cms_node)
|
||||
self.previous_node = cms_node
|
||||
self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
|
||||
|
||||
if bake_viewer_process:
|
||||
# Node View Process
|
||||
if bake_viewer_input_process_node:
|
||||
ipn = self.get_view_input_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(ipn)
|
||||
self.previous_node = ipn
|
||||
self.log.debug(
|
||||
"ViewProcess... `{}`".format(self._temp_nodes))
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(dag_node)
|
||||
self.previous_node = dag_node
|
||||
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
|
||||
|
||||
# GenerateLUT
|
||||
gen_lut_node = nuke.createNode("GenerateLUT")
|
||||
gen_lut_node["file"].setValue(self.path)
|
||||
gen_lut_node["file_type"].setValue(".{}".format(self.ext))
|
||||
gen_lut_node["lut1d"].setValue(self.lut_size)
|
||||
gen_lut_node["style1d"].setValue(self.lut_style)
|
||||
# connect
|
||||
gen_lut_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(gen_lut_node)
|
||||
self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
|
||||
|
||||
# ---------- end nodes creation
|
||||
|
||||
# Export lut file
|
||||
nuke.execute(
|
||||
gen_lut_node.name(),
|
||||
int(self.first_frame),
|
||||
int(self.first_frame))
|
||||
|
||||
self.log.info("Exported...")
|
||||
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data()
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
||||
# ---------- Clean up
|
||||
self.clean_nodes()
|
||||
|
||||
return self.data
|
||||
|
||||
|
||||
class ExporterReviewMov(ExporterReview):
|
||||
"""
|
||||
Metaclass for generating review mov files
|
||||
|
||||
Args:
|
||||
klass (pyblish.plugin): pyblish plugin parent
|
||||
instance (pyblish.instance): instance of pyblish context
|
||||
|
||||
"""
|
||||
_temp_nodes = {}
|
||||
|
||||
def __init__(self,
|
||||
klass,
|
||||
instance,
|
||||
name=None,
|
||||
ext=None,
|
||||
):
|
||||
# initialize parent class
|
||||
super(ExporterReviewMov, self).__init__(klass, instance)
|
||||
# passing presets for nodes to self
|
||||
self.nodes = klass.nodes if hasattr(klass, "nodes") else {}
|
||||
|
||||
# deal with now lut defined in viewer lut
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
self.write_colorspace = instance.data["colorspace"]
|
||||
|
||||
self.name = name or "baked"
|
||||
self.ext = ext or "mov"
|
||||
|
||||
# set frame start / end and file name to self
|
||||
self.get_file_info()
|
||||
|
||||
self.log.info("File info was set...")
|
||||
|
||||
self.file = self.fhead + self.name + ".{}".format(self.ext)
|
||||
self.path = os.path.join(
|
||||
self.staging_dir, self.file).replace("\\", "/")
|
||||
|
||||
def clean_nodes(self, node_name):
|
||||
for node in self._temp_nodes[node_name]:
|
||||
nuke.delete(node)
|
||||
self._temp_nodes[node_name] = []
|
||||
self.log.info("Deleted nodes...")
|
||||
|
||||
def render(self, render_node_name):
|
||||
self.log.info("Rendering... ")
|
||||
# Render Write node
|
||||
nuke.execute(
|
||||
render_node_name,
|
||||
int(self.first_frame),
|
||||
int(self.last_frame))
|
||||
|
||||
self.log.info("Rendered...")
|
||||
|
||||
def save_file(self):
|
||||
import shutil
|
||||
with anlib.maintained_selection():
|
||||
self.log.info("Saving nodes as file... ")
|
||||
# create nk path
|
||||
path = os.path.splitext(self.path)[0] + ".nk"
|
||||
# save file to the path
|
||||
shutil.copyfile(self.instance.context.data["currentFile"], path)
|
||||
|
||||
self.log.info("Nodes exported...")
|
||||
return path
|
||||
|
||||
def generate_mov(self, farm=False, **kwargs):
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
viewer_process_override = kwargs[
|
||||
"viewer_process_override"]
|
||||
|
||||
baking_view_profile = (
|
||||
viewer_process_override or self.get_imageio_baking_profile())
|
||||
|
||||
fps = self.instance.context.data["fps"]
|
||||
|
||||
self.log.debug(">> baking_view_profile `{}`".format(
|
||||
baking_view_profile))
|
||||
|
||||
add_tags = kwargs.get("add_tags", [])
|
||||
|
||||
self.log.info(
|
||||
"__ add_tags: `{0}`".format(add_tags))
|
||||
|
||||
subset = self.instance.data["subset"]
|
||||
self._temp_nodes[subset] = []
|
||||
# ---------- start nodes creation
|
||||
|
||||
# Read node
|
||||
r_node = nuke.createNode("Read")
|
||||
r_node["file"].setValue(self.path_in)
|
||||
r_node["first"].setValue(self.first_frame)
|
||||
r_node["origfirst"].setValue(self.first_frame)
|
||||
r_node["last"].setValue(self.last_frame)
|
||||
r_node["origlast"].setValue(self.last_frame)
|
||||
r_node["colorspace"].setValue(self.write_colorspace)
|
||||
|
||||
# connect
|
||||
self._temp_nodes[subset].append(r_node)
|
||||
self.previous_node = r_node
|
||||
self.log.debug("Read... `{}`".format(self._temp_nodes[subset]))
|
||||
|
||||
# only create colorspace baking if toggled on
|
||||
if bake_viewer_process:
|
||||
if bake_viewer_input_process_node:
|
||||
# View Process node
|
||||
ipn = self.get_view_input_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
self._temp_nodes[subset].append(ipn)
|
||||
self.previous_node = ipn
|
||||
self.log.debug(
|
||||
"ViewProcess... `{}`".format(
|
||||
self._temp_nodes[subset]))
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
dag_node["view"].setValue(str(baking_view_profile))
|
||||
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes[subset].append(dag_node)
|
||||
self.previous_node = dag_node
|
||||
self.log.debug("OCIODisplay... `{}`".format(
|
||||
self._temp_nodes[subset]))
|
||||
|
||||
# Write node
|
||||
write_node = nuke.createNode("Write")
|
||||
self.log.debug("Path: {}".format(self.path))
|
||||
write_node["file"].setValue(str(self.path))
|
||||
write_node["file_type"].setValue(str(self.ext))
|
||||
|
||||
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
|
||||
# TODO should't this come from settings on outputs?
|
||||
try:
|
||||
write_node["meta_codec"].setValue("ap4h")
|
||||
except Exception:
|
||||
self.log.info("`meta_codec` knob was not found")
|
||||
|
||||
try:
|
||||
write_node["mov64_codec"].setValue("ap4h")
|
||||
write_node["mov64_fps"].setValue(float(fps))
|
||||
except Exception:
|
||||
self.log.info("`mov64_codec` knob was not found")
|
||||
|
||||
write_node["mov64_write_timecode"].setValue(1)
|
||||
write_node["raw"].setValue(1)
|
||||
# connect
|
||||
write_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes[subset].append(write_node)
|
||||
self.log.debug("Write... `{}`".format(self._temp_nodes[subset]))
|
||||
# ---------- end nodes creation
|
||||
|
||||
# ---------- render or save to nk
|
||||
if farm:
|
||||
nuke.scriptSave()
|
||||
path_nk = self.save_file()
|
||||
self.data.update({
|
||||
"bakeScriptPath": path_nk,
|
||||
"bakeWriteNodeName": write_node.name(),
|
||||
"bakeRenderPath": self.path
|
||||
})
|
||||
else:
|
||||
self.render(write_node.name())
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data(
|
||||
tags=["review", "delete"] + add_tags,
|
||||
range=True
|
||||
)
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
||||
self.clean_nodes(subset)
|
||||
nuke.scriptSave()
|
||||
|
||||
return self.data
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import nukescripts
|
|||
from openpype.hosts.nuke.api import lib as pnlib
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import containerise, update_container
|
||||
reload(pnlib)
|
||||
|
||||
class LoadBackdropNodes(api.Loader):
|
||||
"""Loading Published Backdrop nodes (workfile, nukenodes)"""
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ from openpype.hosts.nuke.api import lib as pnlib
|
|||
import nuke
|
||||
import os
|
||||
import openpype
|
||||
reload(pnlib)
|
||||
|
||||
class ExtractBackdropNode(openpype.api.Extractor):
|
||||
"""Extracting content of backdrop nodes
|
||||
|
|
|
|||
|
|
@ -1,16 +1,9 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from avalon.nuke import lib as anlib
|
||||
from openpype.hosts.nuke.api import lib as pnlib
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
import openpype
|
||||
|
||||
try:
|
||||
from __builtin__ import reload
|
||||
except ImportError:
|
||||
from importlib import reload
|
||||
|
||||
reload(pnlib)
|
||||
|
||||
|
||||
class ExtractReviewDataLut(openpype.api.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
|
@ -45,7 +38,7 @@ class ExtractReviewDataLut(openpype.api.Extractor):
|
|||
|
||||
# generate data
|
||||
with anlib.maintained_selection():
|
||||
exporter = pnlib.ExporterReviewLut(
|
||||
exporter = plugin.ExporterReviewLut(
|
||||
self, instance
|
||||
)
|
||||
data = exporter.generate_lut()
|
||||
|
|
|
|||
|
|
@ -1,16 +1,9 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from avalon.nuke import lib as anlib
|
||||
from openpype.hosts.nuke.api import lib as pnlib
|
||||
from openpype.hosts.nuke.api import plugin
|
||||
import openpype
|
||||
|
||||
try:
|
||||
from __builtin__ import reload
|
||||
except ImportError:
|
||||
from importlib import reload
|
||||
|
||||
reload(pnlib)
|
||||
|
||||
|
||||
class ExtractReviewDataMov(openpype.api.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
|
@ -27,15 +20,15 @@ class ExtractReviewDataMov(openpype.api.Extractor):
|
|||
|
||||
# presets
|
||||
viewer_lut_raw = None
|
||||
bake_colorspace_fallback = None
|
||||
bake_colorspace_main = None
|
||||
outputs = {}
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
task_type = instance.context.data["taskType"]
|
||||
self.log.info("Creating staging dir...")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = list()
|
||||
instance.data["representations"] = []
|
||||
|
||||
staging_dir = os.path.normpath(
|
||||
os.path.dirname(instance.data['path']))
|
||||
|
|
@ -45,28 +38,80 @@ class ExtractReviewDataMov(openpype.api.Extractor):
|
|||
self.log.info(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
self.log.info(self.outputs)
|
||||
|
||||
# generate data
|
||||
with anlib.maintained_selection():
|
||||
exporter = pnlib.ExporterReviewMov(
|
||||
self, instance)
|
||||
for o_name, o_data in self.outputs.items():
|
||||
f_families = o_data["filter"]["families"]
|
||||
f_task_types = o_data["filter"]["task_types"]
|
||||
|
||||
if "render.farm" in families:
|
||||
instance.data["families"].remove("review")
|
||||
data = exporter.generate_mov(farm=True)
|
||||
# test if family found in context
|
||||
test_families = any([
|
||||
# first if exact family set is mathing
|
||||
# make sure only interesetion of list is correct
|
||||
bool(set(families).intersection(f_families)),
|
||||
# and if famiies are set at all
|
||||
# if not then return True because we want this preset
|
||||
# to be active if nothig is set
|
||||
bool(not f_families)
|
||||
])
|
||||
|
||||
self.log.debug(
|
||||
"_ data: {}".format(data))
|
||||
# test task types from filter
|
||||
test_task_types = any([
|
||||
# check if actual task type is defined in task types
|
||||
# set in preset's filter
|
||||
bool(task_type in f_task_types),
|
||||
# and if taskTypes are defined in preset filter
|
||||
# if not then return True, because we want this filter
|
||||
# to be active if no taskType is set
|
||||
bool(not f_task_types)
|
||||
])
|
||||
|
||||
instance.data.update({
|
||||
"bakeRenderPath": data.get("bakeRenderPath"),
|
||||
"bakeScriptPath": data.get("bakeScriptPath"),
|
||||
"bakeWriteNodeName": data.get("bakeWriteNodeName")
|
||||
})
|
||||
else:
|
||||
data = exporter.generate_mov()
|
||||
# we need all filters to be positive for this
|
||||
# preset to be activated
|
||||
test_all = all([
|
||||
test_families,
|
||||
test_task_types
|
||||
])
|
||||
|
||||
# assign to representations
|
||||
instance.data["representations"] += data["representations"]
|
||||
# if it is not positive then skip this preset
|
||||
if not test_all:
|
||||
continue
|
||||
|
||||
self.log.info(
|
||||
"Baking output `{}` with settings: {}".format(
|
||||
o_name, o_data))
|
||||
|
||||
# create exporter instance
|
||||
exporter = plugin.ExporterReviewMov(
|
||||
self, instance, o_name, o_data["extension"])
|
||||
|
||||
if "render.farm" in families:
|
||||
if "review" in instance.data["families"]:
|
||||
instance.data["families"].remove("review")
|
||||
|
||||
data = exporter.generate_mov(farm=True, **o_data)
|
||||
|
||||
self.log.debug(
|
||||
"_ data: {}".format(data))
|
||||
|
||||
if not instance.data.get("bakingNukeScripts"):
|
||||
instance.data["bakingNukeScripts"] = []
|
||||
|
||||
instance.data["bakingNukeScripts"].append({
|
||||
"bakeRenderPath": data.get("bakeRenderPath"),
|
||||
"bakeScriptPath": data.get("bakeScriptPath"),
|
||||
"bakeWriteNodeName": data.get("bakeWriteNodeName")
|
||||
})
|
||||
else:
|
||||
data = exporter.generate_mov(**o_data)
|
||||
|
||||
self.log.info(data["representations"])
|
||||
|
||||
# assign to representations
|
||||
instance.data["representations"] += data["representations"]
|
||||
|
||||
self.log.debug(
|
||||
"_ representations: {}".format(instance.data["representations"]))
|
||||
"_ representations: {}".format(
|
||||
instance.data["representations"]))
|
||||
|
|
|
|||
682
openpype/hosts/tvpaint/lib.py
Normal file
682
openpype/hosts/tvpaint/lib.py
Normal file
|
|
@ -0,0 +1,682 @@
|
|||
import os
|
||||
import shutil
|
||||
import collections
|
||||
from PIL import Image, ImageDraw
|
||||
|
||||
|
||||
def backwards_id_conversion(data_by_layer_id):
|
||||
"""Convert layer ids to strings from integers."""
|
||||
for key in tuple(data_by_layer_id.keys()):
|
||||
if not isinstance(key, str):
|
||||
data_by_layer_id[str(key)] = data_by_layer_id.pop(key)
|
||||
|
||||
|
||||
def get_frame_filename_template(frame_end, filename_prefix=None, ext=None):
|
||||
"""Get file template with frame key for rendered files.
|
||||
|
||||
This is simple template contains `{frame}{ext}` for sequential outputs
|
||||
and `single_file{ext}` for single file output. Output is rendered to
|
||||
temporary folder so filename should not matter as integrator change
|
||||
them.
|
||||
"""
|
||||
frame_padding = 4
|
||||
frame_end_str_len = len(str(frame_end))
|
||||
if frame_end_str_len > frame_padding:
|
||||
frame_padding = frame_end_str_len
|
||||
|
||||
ext = ext or ".png"
|
||||
filename_prefix = filename_prefix or ""
|
||||
|
||||
return "{}{{frame:0>{}}}{}".format(filename_prefix, frame_padding, ext)
|
||||
|
||||
|
||||
def get_layer_pos_filename_template(range_end, filename_prefix=None, ext=None):
|
||||
filename_prefix = filename_prefix or ""
|
||||
new_filename_prefix = filename_prefix + "pos_{pos}."
|
||||
return get_frame_filename_template(range_end, new_filename_prefix, ext)
|
||||
|
||||
|
||||
def _calculate_pre_behavior_copy(
|
||||
range_start, exposure_frames, pre_beh,
|
||||
layer_frame_start, layer_frame_end,
|
||||
output_idx_by_frame_idx
|
||||
):
|
||||
"""Calculate frames before first exposure frame based on pre behavior.
|
||||
|
||||
Function may skip whole processing if first exposure frame is before
|
||||
layer's first frame. In that case pre behavior does not make sense.
|
||||
|
||||
Args:
|
||||
range_start(int): First frame of range which should be rendered.
|
||||
exposure_frames(list): List of all exposure frames on layer.
|
||||
pre_beh(str): Pre behavior of layer (enum of 4 strings).
|
||||
layer_frame_start(int): First frame of layer.
|
||||
layer_frame_end(int): Last frame of layer.
|
||||
output_idx_by_frame_idx(dict): References to already prepared frames
|
||||
and where result will be stored.
|
||||
"""
|
||||
# Check if last layer frame is after range end
|
||||
if layer_frame_start < range_start:
|
||||
return
|
||||
|
||||
first_exposure_frame = min(exposure_frames)
|
||||
# Skip if last exposure frame is after range end
|
||||
if first_exposure_frame < range_start:
|
||||
return
|
||||
|
||||
# Calculate frame count of layer
|
||||
frame_count = layer_frame_end - layer_frame_start + 1
|
||||
|
||||
if pre_beh == "none":
|
||||
# Just fill all frames from last exposure frame to range end with None
|
||||
for frame_idx in range(range_start, layer_frame_start):
|
||||
output_idx_by_frame_idx[frame_idx] = None
|
||||
|
||||
elif pre_beh == "hold":
|
||||
# Keep first frame for whole time
|
||||
for frame_idx in range(range_start, layer_frame_start):
|
||||
output_idx_by_frame_idx[frame_idx] = first_exposure_frame
|
||||
|
||||
elif pre_beh in ("loop", "repeat"):
|
||||
# Loop backwards from last frame of layer
|
||||
for frame_idx in reversed(range(range_start, layer_frame_start)):
|
||||
eq_frame_idx_offset = (
|
||||
(layer_frame_end - frame_idx) % frame_count
|
||||
)
|
||||
eq_frame_idx = layer_frame_end - eq_frame_idx_offset
|
||||
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
|
||||
|
||||
elif pre_beh == "pingpong":
|
||||
half_seq_len = frame_count - 1
|
||||
seq_len = half_seq_len * 2
|
||||
for frame_idx in reversed(range(range_start, layer_frame_start)):
|
||||
eq_frame_idx_offset = (layer_frame_start - frame_idx) % seq_len
|
||||
if eq_frame_idx_offset > half_seq_len:
|
||||
eq_frame_idx_offset = (seq_len - eq_frame_idx_offset)
|
||||
eq_frame_idx = layer_frame_start + eq_frame_idx_offset
|
||||
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
|
||||
|
||||
|
||||
def _calculate_post_behavior_copy(
|
||||
range_end, exposure_frames, post_beh,
|
||||
layer_frame_start, layer_frame_end,
|
||||
output_idx_by_frame_idx
|
||||
):
|
||||
"""Calculate frames after last frame of layer based on post behavior.
|
||||
|
||||
Function may skip whole processing if last layer frame is after range_end.
|
||||
In that case post behavior does not make sense.
|
||||
|
||||
Args:
|
||||
range_end(int): Last frame of range which should be rendered.
|
||||
exposure_frames(list): List of all exposure frames on layer.
|
||||
post_beh(str): Post behavior of layer (enum of 4 strings).
|
||||
layer_frame_start(int): First frame of layer.
|
||||
layer_frame_end(int): Last frame of layer.
|
||||
output_idx_by_frame_idx(dict): References to already prepared frames
|
||||
and where result will be stored.
|
||||
"""
|
||||
# Check if last layer frame is after range end
|
||||
if layer_frame_end >= range_end:
|
||||
return
|
||||
|
||||
last_exposure_frame = max(exposure_frames)
|
||||
# Skip if last exposure frame is after range end
|
||||
# - this is probably irrelevant with layer frame end check?
|
||||
if last_exposure_frame >= range_end:
|
||||
return
|
||||
|
||||
# Calculate frame count of layer
|
||||
frame_count = layer_frame_end - layer_frame_start + 1
|
||||
|
||||
if post_beh == "none":
|
||||
# Just fill all frames from last exposure frame to range end with None
|
||||
for frame_idx in range(layer_frame_end + 1, range_end + 1):
|
||||
output_idx_by_frame_idx[frame_idx] = None
|
||||
|
||||
elif post_beh == "hold":
|
||||
# Keep last exposure frame to the end
|
||||
for frame_idx in range(layer_frame_end + 1, range_end + 1):
|
||||
output_idx_by_frame_idx[frame_idx] = last_exposure_frame
|
||||
|
||||
elif post_beh in ("loop", "repeat"):
|
||||
# Loop backwards from last frame of layer
|
||||
for frame_idx in range(layer_frame_end + 1, range_end + 1):
|
||||
eq_frame_idx = frame_idx % frame_count
|
||||
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
|
||||
|
||||
elif post_beh == "pingpong":
|
||||
half_seq_len = frame_count - 1
|
||||
seq_len = half_seq_len * 2
|
||||
for frame_idx in range(layer_frame_end + 1, range_end + 1):
|
||||
eq_frame_idx_offset = (frame_idx - layer_frame_end) % seq_len
|
||||
if eq_frame_idx_offset > half_seq_len:
|
||||
eq_frame_idx_offset = seq_len - eq_frame_idx_offset
|
||||
eq_frame_idx = layer_frame_end - eq_frame_idx_offset
|
||||
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
|
||||
|
||||
|
||||
def _calculate_in_range_frames(
|
||||
range_start, range_end,
|
||||
exposure_frames, layer_frame_end,
|
||||
output_idx_by_frame_idx
|
||||
):
|
||||
"""Calculate frame references in defined range.
|
||||
|
||||
Function may skip whole processing if last layer frame is after range_end.
|
||||
In that case post behavior does not make sense.
|
||||
|
||||
Args:
|
||||
range_start(int): First frame of range which should be rendered.
|
||||
range_end(int): Last frame of range which should be rendered.
|
||||
exposure_frames(list): List of all exposure frames on layer.
|
||||
layer_frame_end(int): Last frame of layer.
|
||||
output_idx_by_frame_idx(dict): References to already prepared frames
|
||||
and where result will be stored.
|
||||
"""
|
||||
# Calculate in range frames
|
||||
in_range_frames = []
|
||||
for frame_idx in exposure_frames:
|
||||
if range_start <= frame_idx <= range_end:
|
||||
output_idx_by_frame_idx[frame_idx] = frame_idx
|
||||
in_range_frames.append(frame_idx)
|
||||
|
||||
if in_range_frames:
|
||||
first_in_range_frame = min(in_range_frames)
|
||||
# Calculate frames from first exposure frames to range end or last
|
||||
# frame of layer (post behavior should be calculated since that time)
|
||||
previous_exposure = first_in_range_frame
|
||||
for frame_idx in range(first_in_range_frame, range_end + 1):
|
||||
if frame_idx > layer_frame_end:
|
||||
break
|
||||
|
||||
if frame_idx in exposure_frames:
|
||||
previous_exposure = frame_idx
|
||||
else:
|
||||
output_idx_by_frame_idx[frame_idx] = previous_exposure
|
||||
|
||||
# There can be frames before first exposure frame in range
|
||||
# First check if we don't alreade have first range frame filled
|
||||
if range_start in output_idx_by_frame_idx:
|
||||
return
|
||||
|
||||
first_exposure_frame = max(exposure_frames)
|
||||
last_exposure_frame = max(exposure_frames)
|
||||
# Check if is first exposure frame smaller than defined range
|
||||
# if not then skip
|
||||
if first_exposure_frame >= range_start:
|
||||
return
|
||||
|
||||
# Check is if last exposure frame is also before range start
|
||||
# in that case we can't use fill frames before out range
|
||||
if last_exposure_frame < range_start:
|
||||
return
|
||||
|
||||
closest_exposure_frame = first_exposure_frame
|
||||
for frame_idx in exposure_frames:
|
||||
if frame_idx >= range_start:
|
||||
break
|
||||
if frame_idx > closest_exposure_frame:
|
||||
closest_exposure_frame = frame_idx
|
||||
|
||||
output_idx_by_frame_idx[closest_exposure_frame] = closest_exposure_frame
|
||||
for frame_idx in range(range_start, range_end + 1):
|
||||
if frame_idx in output_idx_by_frame_idx:
|
||||
break
|
||||
output_idx_by_frame_idx[frame_idx] = closest_exposure_frame
|
||||
|
||||
|
||||
def _cleanup_frame_references(output_idx_by_frame_idx):
|
||||
"""Cleanup frame references to frame reference.
|
||||
|
||||
Cleanup not direct references to rendered frame.
|
||||
```
|
||||
// Example input
|
||||
{
|
||||
1: 1,
|
||||
2: 1,
|
||||
3: 2
|
||||
}
|
||||
// Result
|
||||
{
|
||||
1: 1,
|
||||
2: 1,
|
||||
3: 1 // Changed reference to final rendered frame
|
||||
}
|
||||
```
|
||||
Result is dictionary where keys leads to frame that should be rendered.
|
||||
"""
|
||||
for frame_idx in tuple(output_idx_by_frame_idx.keys()):
|
||||
reference_idx = output_idx_by_frame_idx[frame_idx]
|
||||
# Skip transparent frames
|
||||
if reference_idx is None or reference_idx == frame_idx:
|
||||
continue
|
||||
|
||||
real_reference_idx = reference_idx
|
||||
_tmp_reference_idx = reference_idx
|
||||
while True:
|
||||
_temp = output_idx_by_frame_idx[_tmp_reference_idx]
|
||||
if _temp == _tmp_reference_idx:
|
||||
real_reference_idx = _tmp_reference_idx
|
||||
break
|
||||
_tmp_reference_idx = _temp
|
||||
|
||||
if real_reference_idx != reference_idx:
|
||||
output_idx_by_frame_idx[frame_idx] = real_reference_idx
|
||||
|
||||
|
||||
def _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end):
|
||||
"""Cleanup frame references to frames out of passed range.
|
||||
|
||||
First available frame in range is used
|
||||
```
|
||||
// Example input. Range 2-3
|
||||
{
|
||||
1: 1,
|
||||
2: 1,
|
||||
3: 1
|
||||
}
|
||||
// Result
|
||||
{
|
||||
2: 2, // Redirect to self as is first that refence out range
|
||||
3: 2 // Redirect to first redirected frame
|
||||
}
|
||||
```
|
||||
Result is dictionary where keys leads to frame that should be rendered.
|
||||
"""
|
||||
in_range_frames_by_out_frames = collections.defaultdict(set)
|
||||
out_range_frames = set()
|
||||
for frame_idx in tuple(output_idx_by_frame_idx.keys()):
|
||||
# Skip frames that are already out of range
|
||||
if frame_idx < range_start or frame_idx > range_end:
|
||||
out_range_frames.add(frame_idx)
|
||||
continue
|
||||
|
||||
reference_idx = output_idx_by_frame_idx[frame_idx]
|
||||
# Skip transparent frames
|
||||
if reference_idx is None:
|
||||
continue
|
||||
|
||||
# Skip references in range
|
||||
if reference_idx < range_start or reference_idx > range_end:
|
||||
in_range_frames_by_out_frames[reference_idx].add(frame_idx)
|
||||
|
||||
for reference_idx in tuple(in_range_frames_by_out_frames.keys()):
|
||||
frame_indexes = in_range_frames_by_out_frames.pop(reference_idx)
|
||||
new_reference = None
|
||||
for frame_idx in frame_indexes:
|
||||
if new_reference is None:
|
||||
new_reference = frame_idx
|
||||
output_idx_by_frame_idx[frame_idx] = new_reference
|
||||
|
||||
# Finally remove out of range frames
|
||||
for frame_idx in out_range_frames:
|
||||
output_idx_by_frame_idx.pop(frame_idx)
|
||||
|
||||
|
||||
def calculate_layer_frame_references(
|
||||
range_start, range_end,
|
||||
layer_frame_start,
|
||||
layer_frame_end,
|
||||
exposure_frames,
|
||||
pre_beh, post_beh
|
||||
):
|
||||
"""Calculate frame references for one layer based on it's data.
|
||||
|
||||
Output is dictionary where key is frame index referencing to rendered frame
|
||||
index. If frame index should be rendered then is referencing to self.
|
||||
|
||||
```
|
||||
// Example output
|
||||
{
|
||||
1: 1, // Reference to self - will be rendered
|
||||
2: 1, // Reference to frame 1 - will be copied
|
||||
3: 1, // Reference to frame 1 - will be copied
|
||||
4: 4, // Reference to self - will be rendered
|
||||
...
|
||||
20: 4 // Reference to frame 4 - will be copied
|
||||
21: None // Has reference to None - transparent image
|
||||
}
|
||||
```
|
||||
|
||||
Args:
|
||||
range_start(int): First frame of range which should be rendered.
|
||||
range_end(int): Last frame of range which should be rendered.
|
||||
layer_frame_start(int)L First frame of layer.
|
||||
layer_frame_end(int): Last frame of layer.
|
||||
exposure_frames(list): List of all exposure frames on layer.
|
||||
pre_beh(str): Pre behavior of layer (enum of 4 strings).
|
||||
post_beh(str): Post behavior of layer (enum of 4 strings).
|
||||
"""
|
||||
# Output variable
|
||||
output_idx_by_frame_idx = {}
|
||||
# Skip if layer does not have any exposure frames
|
||||
if not exposure_frames:
|
||||
return output_idx_by_frame_idx
|
||||
|
||||
# First calculate in range frames
|
||||
_calculate_in_range_frames(
|
||||
range_start, range_end,
|
||||
exposure_frames, layer_frame_end,
|
||||
output_idx_by_frame_idx
|
||||
)
|
||||
# Calculate frames by pre behavior of layer
|
||||
_calculate_pre_behavior_copy(
|
||||
range_start, exposure_frames, pre_beh,
|
||||
layer_frame_start, layer_frame_end,
|
||||
output_idx_by_frame_idx
|
||||
)
|
||||
# Calculate frames by post behavior of layer
|
||||
_calculate_post_behavior_copy(
|
||||
range_end, exposure_frames, post_beh,
|
||||
layer_frame_start, layer_frame_end,
|
||||
output_idx_by_frame_idx
|
||||
)
|
||||
# Cleanup of referenced frames
|
||||
_cleanup_frame_references(output_idx_by_frame_idx)
|
||||
|
||||
# Remove frames out of range
|
||||
_cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end)
|
||||
|
||||
return output_idx_by_frame_idx
|
||||
|
||||
|
||||
def calculate_layers_extraction_data(
|
||||
layers_data,
|
||||
exposure_frames_by_layer_id,
|
||||
behavior_by_layer_id,
|
||||
range_start,
|
||||
range_end,
|
||||
skip_not_visible=True,
|
||||
filename_prefix=None,
|
||||
ext=None
|
||||
):
|
||||
"""Calculate extraction data for passed layers data.
|
||||
|
||||
```
|
||||
{
|
||||
<layer_id>: {
|
||||
"frame_references": {...},
|
||||
"filenames_by_frame_index": {...}
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Frame references contains frame index reference to rendered frame index.
|
||||
|
||||
Filename by frame index represents filename under which should be frame
|
||||
stored. Directory is not handled here because each usage may need different
|
||||
approach.
|
||||
|
||||
Args:
|
||||
layers_data(list): Layers data loaded from TVPaint.
|
||||
exposure_frames_by_layer_id(dict): Exposure frames of layers stored by
|
||||
layer id.
|
||||
behavior_by_layer_id(dict): Pre and Post behavior of layers stored by
|
||||
layer id.
|
||||
range_start(int): First frame of rendered range.
|
||||
range_end(int): Last frame of rendered range.
|
||||
skip_not_visible(bool): Skip calculations for hidden layers (Skipped
|
||||
by default).
|
||||
filename_prefix(str): Prefix before filename.
|
||||
ext(str): Extension which filenames will have ('.png' is default).
|
||||
|
||||
Returns:
|
||||
dict: Prepared data for rendering by layer position.
|
||||
"""
|
||||
# Make sure layer ids are strings
|
||||
# backwards compatibility when layer ids were integers
|
||||
backwards_id_conversion(exposure_frames_by_layer_id)
|
||||
backwards_id_conversion(behavior_by_layer_id)
|
||||
|
||||
layer_template = get_layer_pos_filename_template(
|
||||
range_end, filename_prefix, ext
|
||||
)
|
||||
output = {}
|
||||
for layer_data in layers_data:
|
||||
if skip_not_visible and not layer_data["visible"]:
|
||||
continue
|
||||
|
||||
orig_layer_id = layer_data["layer_id"]
|
||||
layer_id = str(orig_layer_id)
|
||||
|
||||
# Skip if does not have any exposure frames (empty layer)
|
||||
exposure_frames = exposure_frames_by_layer_id[layer_id]
|
||||
if not exposure_frames:
|
||||
continue
|
||||
|
||||
layer_position = layer_data["position"]
|
||||
layer_frame_start = layer_data["frame_start"]
|
||||
layer_frame_end = layer_data["frame_end"]
|
||||
|
||||
layer_behavior = behavior_by_layer_id[layer_id]
|
||||
|
||||
pre_behavior = layer_behavior["pre"]
|
||||
post_behavior = layer_behavior["post"]
|
||||
|
||||
frame_references = calculate_layer_frame_references(
|
||||
range_start, range_end,
|
||||
layer_frame_start,
|
||||
layer_frame_end,
|
||||
exposure_frames,
|
||||
pre_behavior, post_behavior
|
||||
)
|
||||
# All values in 'frame_references' reference to a frame that must be
|
||||
# rendered out
|
||||
frames_to_render = set(frame_references.values())
|
||||
# Remove 'None' reference (transparent image)
|
||||
if None in frames_to_render:
|
||||
frames_to_render.remove(None)
|
||||
|
||||
# Skip layer if has nothing to render
|
||||
if not frames_to_render:
|
||||
continue
|
||||
|
||||
# All filenames that should be as output (not final output)
|
||||
filename_frames = (
|
||||
set(range(range_start, range_end + 1))
|
||||
| frames_to_render
|
||||
)
|
||||
filenames_by_frame_index = {}
|
||||
for frame_idx in filename_frames:
|
||||
filenames_by_frame_index[frame_idx] = layer_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
|
||||
# Store objects under the layer id
|
||||
output[orig_layer_id] = {
|
||||
"frame_references": frame_references,
|
||||
"filenames_by_frame_index": filenames_by_frame_index
|
||||
}
|
||||
return output
|
||||
|
||||
|
||||
def create_transparent_image_from_source(src_filepath, dst_filepath):
|
||||
"""Create transparent image of same type and size as source image."""
|
||||
img_obj = Image.open(src_filepath)
|
||||
painter = ImageDraw.Draw(img_obj)
|
||||
painter.rectangle((0, 0, *img_obj.size), fill=(0, 0, 0, 0))
|
||||
img_obj.save(dst_filepath)
|
||||
|
||||
|
||||
def fill_reference_frames(frame_references, filepaths_by_frame):
|
||||
# Store path to first transparent image if there is any
|
||||
for frame_idx, ref_idx in frame_references.items():
|
||||
# Frame referencing to self should be rendered and used as source
|
||||
# and reference indexes with None can't be filled
|
||||
if ref_idx is None or frame_idx == ref_idx:
|
||||
continue
|
||||
|
||||
# Get destination filepath
|
||||
src_filepath = filepaths_by_frame[ref_idx]
|
||||
dst_filepath = filepaths_by_frame[frame_idx]
|
||||
|
||||
if hasattr(os, "link"):
|
||||
os.link(src_filepath, dst_filepath)
|
||||
else:
|
||||
shutil.copy(src_filepath, dst_filepath)
|
||||
|
||||
|
||||
def copy_render_file(src_path, dst_path):
|
||||
"""Create copy file of an image."""
|
||||
if hasattr(os, "link"):
|
||||
os.link(src_path, dst_path)
|
||||
else:
|
||||
shutil.copy(src_path, dst_path)
|
||||
|
||||
|
||||
def cleanup_rendered_layers(filepaths_by_layer_id):
|
||||
"""Delete all files for each individual layer files after compositing."""
|
||||
# Collect all filepaths from data
|
||||
all_filepaths = []
|
||||
for filepaths_by_frame in filepaths_by_layer_id.values():
|
||||
all_filepaths.extend(filepaths_by_frame.values())
|
||||
|
||||
# Loop over loop
|
||||
for filepath in set(all_filepaths):
|
||||
if filepath is not None and os.path.exists(filepath):
|
||||
os.remove(filepath)
|
||||
|
||||
|
||||
def composite_rendered_layers(
|
||||
layers_data, filepaths_by_layer_id,
|
||||
range_start, range_end,
|
||||
dst_filepaths_by_frame, cleanup=True
|
||||
):
|
||||
"""Composite multiple rendered layers by their position.
|
||||
|
||||
Result is single frame sequence with transparency matching content
|
||||
created in TVPaint. Missing source filepaths are replaced with transparent
|
||||
images but at least one image must be rendered and exist.
|
||||
|
||||
Function can be used even if single layer was created to fill transparent
|
||||
filepaths.
|
||||
|
||||
Args:
|
||||
layers_data(list): Layers data loaded from TVPaint.
|
||||
filepaths_by_layer_id(dict): Rendered filepaths stored by frame index
|
||||
per layer id. Used as source for compositing.
|
||||
range_start(int): First frame of rendered range.
|
||||
range_end(int): Last frame of rendered range.
|
||||
dst_filepaths_by_frame(dict): Output filepaths by frame where final
|
||||
image after compositing will be stored. Path must not clash with
|
||||
source filepaths.
|
||||
cleanup(bool): Remove all source filepaths when done with compositing.
|
||||
"""
|
||||
# Prepare layers by their position
|
||||
# - position tells in which order will compositing happen
|
||||
layer_ids_by_position = {}
|
||||
for layer in layers_data:
|
||||
layer_position = layer["position"]
|
||||
layer_ids_by_position[layer_position] = layer["layer_id"]
|
||||
|
||||
# Sort layer positions
|
||||
sorted_positions = tuple(sorted(layer_ids_by_position.keys()))
|
||||
# Prepare variable where filepaths without any rendered content
|
||||
# - transparent will be created
|
||||
transparent_filepaths = set()
|
||||
# Store first final filepath
|
||||
first_dst_filepath = None
|
||||
for frame_idx in range(range_start, range_end + 1):
|
||||
dst_filepath = dst_filepaths_by_frame[frame_idx]
|
||||
src_filepaths = []
|
||||
for layer_position in sorted_positions:
|
||||
layer_id = layer_ids_by_position[layer_position]
|
||||
filepaths_by_frame = filepaths_by_layer_id[layer_id]
|
||||
src_filepath = filepaths_by_frame.get(frame_idx)
|
||||
if src_filepath is not None:
|
||||
src_filepaths.append(src_filepath)
|
||||
|
||||
if not src_filepaths:
|
||||
transparent_filepaths.add(dst_filepath)
|
||||
continue
|
||||
|
||||
# Store first destionation filepath to be used for transparent images
|
||||
if first_dst_filepath is None:
|
||||
first_dst_filepath = dst_filepath
|
||||
|
||||
if len(src_filepaths) == 1:
|
||||
src_filepath = src_filepaths[0]
|
||||
if cleanup:
|
||||
os.rename(src_filepath, dst_filepath)
|
||||
else:
|
||||
copy_render_file(src_filepath, dst_filepath)
|
||||
|
||||
else:
|
||||
composite_images(src_filepaths, dst_filepath)
|
||||
|
||||
# Store first transparent filepath to be able copy it
|
||||
transparent_filepath = None
|
||||
for dst_filepath in transparent_filepaths:
|
||||
if transparent_filepath is None:
|
||||
create_transparent_image_from_source(
|
||||
first_dst_filepath, dst_filepath
|
||||
)
|
||||
transparent_filepath = dst_filepath
|
||||
else:
|
||||
copy_render_file(transparent_filepath, dst_filepath)
|
||||
|
||||
# Remove all files that were used as source for compositing
|
||||
if cleanup:
|
||||
cleanup_rendered_layers(filepaths_by_layer_id)
|
||||
|
||||
|
||||
def composite_images(input_image_paths, output_filepath):
|
||||
"""Composite images in order from passed list.
|
||||
|
||||
Raises:
|
||||
ValueError: When entered list is empty.
|
||||
"""
|
||||
if not input_image_paths:
|
||||
raise ValueError("Nothing to composite.")
|
||||
|
||||
img_obj = None
|
||||
for image_filepath in input_image_paths:
|
||||
_img_obj = Image.open(image_filepath)
|
||||
if img_obj is None:
|
||||
img_obj = _img_obj
|
||||
else:
|
||||
img_obj.alpha_composite(_img_obj)
|
||||
img_obj.save(output_filepath)
|
||||
|
||||
|
||||
def rename_filepaths_by_frame_start(
|
||||
filepaths_by_frame, range_start, range_end, new_frame_start
|
||||
):
|
||||
"""Change frames in filenames of finished images to new frame start."""
|
||||
# Skip if source first frame is same as destination first frame
|
||||
if range_start == new_frame_start:
|
||||
return
|
||||
|
||||
# Calculate frame end
|
||||
new_frame_end = range_end + (new_frame_start - range_start)
|
||||
# Create filename template
|
||||
filename_template = get_frame_filename_template(
|
||||
max(range_end, new_frame_end)
|
||||
)
|
||||
|
||||
# Use differnet ranges based on Mark In and output Frame Start values
|
||||
# - this is to make sure that filename renaming won't affect files that
|
||||
# are not renamed yet
|
||||
if range_start < new_frame_start:
|
||||
source_range = range(range_end, range_start - 1, -1)
|
||||
output_range = range(new_frame_end, new_frame_start - 1, -1)
|
||||
else:
|
||||
# This is less possible situation as frame start will be in most
|
||||
# cases higher than Mark In.
|
||||
source_range = range(range_start, range_end + 1)
|
||||
output_range = range(new_frame_start, new_frame_end + 1)
|
||||
|
||||
new_dst_filepaths = {}
|
||||
for src_frame, dst_frame in zip(source_range, output_range):
|
||||
src_filepath = filepaths_by_frame[src_frame]
|
||||
src_dirpath = os.path.dirname(src_filepath)
|
||||
dst_filename = filename_template.format(frame=dst_frame)
|
||||
dst_filepath = os.path.join(src_dirpath, dst_filename)
|
||||
|
||||
os.rename(src_filepath, dst_filepath)
|
||||
|
||||
new_dst_filepaths[dst_frame] = dst_filepath
|
||||
return new_dst_filepaths
|
||||
|
|
@ -1,12 +1,18 @@
|
|||
import os
|
||||
import shutil
|
||||
import copy
|
||||
import tempfile
|
||||
|
||||
import pyblish.api
|
||||
from avalon.tvpaint import lib
|
||||
from openpype.hosts.tvpaint.api.lib import composite_images
|
||||
from PIL import Image, ImageDraw
|
||||
from openpype.hosts.tvpaint.lib import (
|
||||
calculate_layers_extraction_data,
|
||||
get_frame_filename_template,
|
||||
fill_reference_frames,
|
||||
composite_rendered_layers,
|
||||
rename_filepaths_by_frame_start
|
||||
)
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class ExtractSequence(pyblish.api.Extractor):
|
||||
|
|
@ -111,14 +117,6 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
filename_template = self._get_filename_template(
|
||||
# Use the biggest number
|
||||
max(mark_out, frame_end)
|
||||
)
|
||||
ext = os.path.splitext(filename_template)[1].replace(".", "")
|
||||
|
||||
self.log.debug("Using file template \"{}\"".format(filename_template))
|
||||
|
||||
# Save to staging dir
|
||||
output_dir = instance.data.get("stagingDir")
|
||||
if not output_dir:
|
||||
|
|
@ -133,30 +131,30 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
)
|
||||
|
||||
if instance.data["family"] == "review":
|
||||
output_filenames, thumbnail_fullpath = self.render_review(
|
||||
filename_template, output_dir, mark_in, mark_out,
|
||||
scene_bg_color
|
||||
result = self.render_review(
|
||||
output_dir, mark_in, mark_out, scene_bg_color
|
||||
)
|
||||
else:
|
||||
# Render output
|
||||
output_filenames, thumbnail_fullpath = self.render(
|
||||
filename_template, output_dir,
|
||||
mark_in, mark_out,
|
||||
filtered_layers
|
||||
result = self.render(
|
||||
output_dir, mark_in, mark_out, filtered_layers
|
||||
)
|
||||
|
||||
output_filepaths_by_frame_idx, thumbnail_fullpath = result
|
||||
|
||||
# Change scene frame Start back to previous value
|
||||
lib.execute_george("tv_startframe {}".format(scene_start_frame))
|
||||
|
||||
# Sequence of one frame
|
||||
if not output_filenames:
|
||||
if not output_filepaths_by_frame_idx:
|
||||
self.log.warning("Extractor did not create any output.")
|
||||
return
|
||||
|
||||
repre_files = self._rename_output_files(
|
||||
filename_template, output_dir,
|
||||
mark_in, mark_out,
|
||||
output_frame_start, output_frame_end
|
||||
output_filepaths_by_frame_idx,
|
||||
mark_in,
|
||||
mark_out,
|
||||
output_frame_start
|
||||
)
|
||||
|
||||
# Fill tags and new families
|
||||
|
|
@ -169,9 +167,11 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
if single_file:
|
||||
repre_files = repre_files[0]
|
||||
|
||||
# Extension is harcoded
|
||||
# - changing extension would require change code
|
||||
new_repre = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"name": "png",
|
||||
"ext": "png",
|
||||
"files": repre_files,
|
||||
"stagingDir": output_dir,
|
||||
"tags": tags
|
||||
|
|
@ -206,69 +206,28 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
}
|
||||
instance.data["representations"].append(thumbnail_repre)
|
||||
|
||||
def _get_filename_template(self, frame_end):
|
||||
"""Get filetemplate for rendered files.
|
||||
|
||||
This is simple template contains `{frame}{ext}` for sequential outputs
|
||||
and `single_file{ext}` for single file output. Output is rendered to
|
||||
temporary folder so filename should not matter as integrator change
|
||||
them.
|
||||
"""
|
||||
frame_padding = 4
|
||||
frame_end_str_len = len(str(frame_end))
|
||||
if frame_end_str_len > frame_padding:
|
||||
frame_padding = frame_end_str_len
|
||||
|
||||
return "{{frame:0>{}}}".format(frame_padding) + ".png"
|
||||
|
||||
def _rename_output_files(
|
||||
self, filename_template, output_dir,
|
||||
mark_in, mark_out, output_frame_start, output_frame_end
|
||||
self, filepaths_by_frame, mark_in, mark_out, output_frame_start
|
||||
):
|
||||
# Use differnet ranges based on Mark In and output Frame Start values
|
||||
# - this is to make sure that filename renaming won't affect files that
|
||||
# are not renamed yet
|
||||
mark_start_is_less = bool(mark_in < output_frame_start)
|
||||
if mark_start_is_less:
|
||||
marks_range = range(mark_out, mark_in - 1, -1)
|
||||
frames_range = range(output_frame_end, output_frame_start - 1, -1)
|
||||
else:
|
||||
# This is less possible situation as frame start will be in most
|
||||
# cases higher than Mark In.
|
||||
marks_range = range(mark_in, mark_out + 1)
|
||||
frames_range = range(output_frame_start, output_frame_end + 1)
|
||||
new_filepaths_by_frame = rename_filepaths_by_frame_start(
|
||||
filepaths_by_frame, mark_in, mark_out, output_frame_start
|
||||
)
|
||||
|
||||
repre_filepaths = []
|
||||
for mark, frame in zip(marks_range, frames_range):
|
||||
new_filename = filename_template.format(frame=frame)
|
||||
new_filepath = os.path.join(output_dir, new_filename)
|
||||
repre_filenames = []
|
||||
for filepath in new_filepaths_by_frame.values():
|
||||
repre_filenames.append(os.path.basename(filepath))
|
||||
|
||||
repre_filepaths.append(new_filepath)
|
||||
if mark_in < output_frame_start:
|
||||
repre_filenames = list(reversed(repre_filenames))
|
||||
|
||||
if mark != frame:
|
||||
old_filename = filename_template.format(frame=mark)
|
||||
old_filepath = os.path.join(output_dir, old_filename)
|
||||
os.rename(old_filepath, new_filepath)
|
||||
|
||||
# Reverse repre files order if output
|
||||
if mark_start_is_less:
|
||||
repre_filepaths = list(reversed(repre_filepaths))
|
||||
|
||||
return [
|
||||
os.path.basename(path)
|
||||
for path in repre_filepaths
|
||||
]
|
||||
return repre_filenames
|
||||
|
||||
def render_review(
|
||||
self, filename_template, output_dir, mark_in, mark_out, scene_bg_color
|
||||
self, output_dir, mark_in, mark_out, scene_bg_color
|
||||
):
|
||||
""" Export images from TVPaint using `tv_savesequence` command.
|
||||
|
||||
Args:
|
||||
filename_template (str): Filename template of an output. Template
|
||||
should already contain extension. Template may contain only
|
||||
keyword argument `{frame}` or index argument (for same value).
|
||||
Extension in template must match `save_mode`.
|
||||
output_dir (str): Directory where files will be stored.
|
||||
mark_in (int): Starting frame index from which export will begin.
|
||||
mark_out (int): On which frame index export will end.
|
||||
|
|
@ -279,6 +238,8 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
tuple: With 2 items first is list of filenames second is path to
|
||||
thumbnail.
|
||||
"""
|
||||
filename_template = get_frame_filename_template(mark_out)
|
||||
|
||||
self.log.debug("Preparing data for rendering.")
|
||||
first_frame_filepath = os.path.join(
|
||||
output_dir,
|
||||
|
|
@ -313,12 +274,13 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
lib.execute_george_through_file("\n".join(george_script_lines))
|
||||
|
||||
first_frame_filepath = None
|
||||
output_filenames = []
|
||||
for frame in range(mark_in, mark_out + 1):
|
||||
filename = filename_template.format(frame=frame)
|
||||
output_filenames.append(filename)
|
||||
|
||||
output_filepaths_by_frame_idx = {}
|
||||
for frame_idx in range(mark_in, mark_out + 1):
|
||||
filename = filename_template.format(frame=frame_idx)
|
||||
filepath = os.path.join(output_dir, filename)
|
||||
|
||||
output_filepaths_by_frame_idx[frame_idx] = filepath
|
||||
|
||||
if not os.path.exists(filepath):
|
||||
raise AssertionError(
|
||||
"Output was not rendered. File was not found {}".format(
|
||||
|
|
@ -337,16 +299,12 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
source_img = source_img.convert("RGB")
|
||||
source_img.save(thumbnail_filepath)
|
||||
|
||||
return output_filenames, thumbnail_filepath
|
||||
return output_filepaths_by_frame_idx, thumbnail_filepath
|
||||
|
||||
def render(self, filename_template, output_dir, mark_in, mark_out, layers):
|
||||
def render(self, output_dir, mark_in, mark_out, layers):
|
||||
""" Export images from TVPaint.
|
||||
|
||||
Args:
|
||||
filename_template (str): Filename template of an output. Template
|
||||
should already contain extension. Template may contain only
|
||||
keyword argument `{frame}` or index argument (for same value).
|
||||
Extension in template must match `save_mode`.
|
||||
output_dir (str): Directory where files will be stored.
|
||||
mark_in (int): Starting frame index from which export will begin.
|
||||
mark_out (int): On which frame index export will end.
|
||||
|
|
@ -360,12 +318,15 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
# Map layers by position
|
||||
layers_by_position = {}
|
||||
layers_by_id = {}
|
||||
layer_ids = []
|
||||
for layer in layers:
|
||||
layer_id = layer["layer_id"]
|
||||
position = layer["position"]
|
||||
layers_by_position[position] = layer
|
||||
layers_by_id[layer_id] = layer
|
||||
|
||||
layer_ids.append(layer["layer_id"])
|
||||
layer_ids.append(layer_id)
|
||||
|
||||
# Sort layer positions in reverse order
|
||||
sorted_positions = list(reversed(sorted(layers_by_position.keys())))
|
||||
|
|
@ -374,59 +335,45 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
self.log.debug("Collecting pre/post behavior of individual layers.")
|
||||
behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids)
|
||||
|
||||
tmp_filename_template = "pos_{pos}." + filename_template
|
||||
|
||||
files_by_position = {}
|
||||
for position in sorted_positions:
|
||||
layer = layers_by_position[position]
|
||||
behavior = behavior_by_layer_id[layer["layer_id"]]
|
||||
|
||||
files_by_frames = self._render_layer(
|
||||
layer,
|
||||
tmp_filename_template,
|
||||
output_dir,
|
||||
behavior,
|
||||
mark_in,
|
||||
mark_out
|
||||
)
|
||||
if files_by_frames:
|
||||
files_by_position[position] = files_by_frames
|
||||
else:
|
||||
self.log.warning((
|
||||
"Skipped layer \"{}\". Probably out of Mark In/Out range."
|
||||
).format(layer["name"]))
|
||||
|
||||
if not files_by_position:
|
||||
layer_names = set(layer["name"] for layer in layers)
|
||||
joined_names = ", ".join(
|
||||
["\"{}\"".format(name) for name in layer_names]
|
||||
)
|
||||
self.log.warning(
|
||||
"Layers {} do not have content in range {} - {}".format(
|
||||
joined_names, mark_in, mark_out
|
||||
)
|
||||
)
|
||||
return [], None
|
||||
|
||||
output_filepaths = self._composite_files(
|
||||
files_by_position,
|
||||
mark_in,
|
||||
mark_out,
|
||||
filename_template,
|
||||
output_dir
|
||||
exposure_frames_by_layer_id = lib.get_layers_exposure_frames(
|
||||
layer_ids, layers
|
||||
)
|
||||
self._cleanup_tmp_files(files_by_position)
|
||||
|
||||
output_filenames = [
|
||||
os.path.basename(filepath)
|
||||
for filepath in output_filepaths
|
||||
]
|
||||
extraction_data_by_layer_id = calculate_layers_extraction_data(
|
||||
layers,
|
||||
exposure_frames_by_layer_id,
|
||||
behavior_by_layer_id,
|
||||
mark_in,
|
||||
mark_out
|
||||
)
|
||||
# Render layers
|
||||
filepaths_by_layer_id = {}
|
||||
for layer_id, render_data in extraction_data_by_layer_id.items():
|
||||
layer = layers_by_id[layer_id]
|
||||
filepaths_by_layer_id[layer_id] = self._render_layer(
|
||||
render_data, layer, output_dir
|
||||
)
|
||||
|
||||
# Prepare final filepaths where compositing should store result
|
||||
output_filepaths_by_frame = {}
|
||||
thumbnail_src_filepath = None
|
||||
if output_filepaths:
|
||||
thumbnail_src_filepath = output_filepaths[0]
|
||||
finale_template = get_frame_filename_template(mark_out)
|
||||
for frame_idx in range(mark_in, mark_out + 1):
|
||||
filename = finale_template.format(frame=frame_idx)
|
||||
|
||||
filepath = os.path.join(output_dir, filename)
|
||||
output_filepaths_by_frame[frame_idx] = filepath
|
||||
|
||||
if thumbnail_src_filepath is None:
|
||||
thumbnail_src_filepath = filepath
|
||||
|
||||
self.log.info("Started compositing of layer frames.")
|
||||
composite_rendered_layers(
|
||||
layers, filepaths_by_layer_id,
|
||||
mark_in, mark_out,
|
||||
output_filepaths_by_frame
|
||||
)
|
||||
|
||||
self.log.info("Compositing finished")
|
||||
thumbnail_filepath = None
|
||||
if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath):
|
||||
source_img = Image.open(thumbnail_src_filepath)
|
||||
|
|
@ -449,7 +396,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
).format(source_img.mode))
|
||||
source_img.save(thumbnail_filepath)
|
||||
|
||||
return output_filenames, thumbnail_filepath
|
||||
return output_filepaths_by_frame, thumbnail_filepath
|
||||
|
||||
def _get_review_bg_color(self):
|
||||
red = green = blue = 255
|
||||
|
|
@ -460,338 +407,43 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
red, green, blue = self.review_bg
|
||||
return (red, green, blue)
|
||||
|
||||
def _render_layer(
|
||||
self,
|
||||
layer,
|
||||
tmp_filename_template,
|
||||
output_dir,
|
||||
behavior,
|
||||
mark_in_index,
|
||||
mark_out_index
|
||||
):
|
||||
def _render_layer(self, render_data, layer, output_dir):
|
||||
frame_references = render_data["frame_references"]
|
||||
filenames_by_frame_index = render_data["filenames_by_frame_index"]
|
||||
|
||||
layer_id = layer["layer_id"]
|
||||
frame_start_index = layer["frame_start"]
|
||||
frame_end_index = layer["frame_end"]
|
||||
|
||||
pre_behavior = behavior["pre"]
|
||||
post_behavior = behavior["post"]
|
||||
|
||||
# Check if layer is before mark in
|
||||
if frame_end_index < mark_in_index:
|
||||
# Skip layer if post behavior is "none"
|
||||
if post_behavior == "none":
|
||||
return {}
|
||||
|
||||
# Check if layer is after mark out
|
||||
elif frame_start_index > mark_out_index:
|
||||
# Skip layer if pre behavior is "none"
|
||||
if pre_behavior == "none":
|
||||
return {}
|
||||
|
||||
exposure_frames = lib.get_exposure_frames(
|
||||
layer_id, frame_start_index, frame_end_index
|
||||
)
|
||||
|
||||
if frame_start_index not in exposure_frames:
|
||||
exposure_frames.append(frame_start_index)
|
||||
|
||||
layer_files_by_frame = {}
|
||||
george_script_lines = [
|
||||
"tv_layerset {}".format(layer_id),
|
||||
"tv_SaveMode \"PNG\""
|
||||
]
|
||||
layer_position = layer["position"]
|
||||
|
||||
for frame_idx in exposure_frames:
|
||||
filename = tmp_filename_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
filepaths_by_frame = {}
|
||||
frames_to_render = []
|
||||
for frame_idx, ref_idx in frame_references.items():
|
||||
# None reference is skipped because does not have source
|
||||
if ref_idx is None:
|
||||
filepaths_by_frame[frame_idx] = None
|
||||
continue
|
||||
filename = filenames_by_frame_index[frame_idx]
|
||||
dst_path = "/".join([output_dir, filename])
|
||||
layer_files_by_frame[frame_idx] = os.path.normpath(dst_path)
|
||||
filepaths_by_frame[frame_idx] = dst_path
|
||||
if frame_idx != ref_idx:
|
||||
continue
|
||||
|
||||
frames_to_render.append(str(frame_idx))
|
||||
# Go to frame
|
||||
george_script_lines.append("tv_layerImage {}".format(frame_idx))
|
||||
# Store image to output
|
||||
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
|
||||
|
||||
self.log.debug("Rendering Exposure frames {} of layer {} ({})".format(
|
||||
str(exposure_frames), layer_id, layer["name"]
|
||||
",".join(frames_to_render), layer_id, layer["name"]
|
||||
))
|
||||
# Let TVPaint render layer's image
|
||||
lib.execute_george_through_file("\n".join(george_script_lines))
|
||||
|
||||
# Fill frames between `frame_start_index` and `frame_end_index`
|
||||
self.log.debug((
|
||||
"Filling frames between first and last frame of layer ({} - {})."
|
||||
).format(frame_start_index + 1, frame_end_index + 1))
|
||||
self.log.debug("Filling frames not rendered frames.")
|
||||
fill_reference_frames(frame_references, filepaths_by_frame)
|
||||
|
||||
_debug_filled_frames = []
|
||||
prev_filepath = None
|
||||
for frame_idx in range(frame_start_index, frame_end_index + 1):
|
||||
if frame_idx in layer_files_by_frame:
|
||||
prev_filepath = layer_files_by_frame[frame_idx]
|
||||
continue
|
||||
|
||||
if prev_filepath is None:
|
||||
raise ValueError("BUG: First frame of layer was not rendered!")
|
||||
_debug_filled_frames.append(frame_idx)
|
||||
filename = tmp_filename_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
new_filepath = "/".join([output_dir, filename])
|
||||
self._copy_image(prev_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
self.log.debug("Filled frames {}".format(str(_debug_filled_frames)))
|
||||
|
||||
# Fill frames by pre/post behavior of layer
|
||||
self.log.debug((
|
||||
"Completing image sequence of layer by pre/post behavior."
|
||||
" PRE: {} | POST: {}"
|
||||
).format(pre_behavior, post_behavior))
|
||||
|
||||
# Pre behavior
|
||||
self._fill_frame_by_pre_behavior(
|
||||
layer,
|
||||
pre_behavior,
|
||||
mark_in_index,
|
||||
layer_files_by_frame,
|
||||
tmp_filename_template,
|
||||
output_dir
|
||||
)
|
||||
self._fill_frame_by_post_behavior(
|
||||
layer,
|
||||
post_behavior,
|
||||
mark_out_index,
|
||||
layer_files_by_frame,
|
||||
tmp_filename_template,
|
||||
output_dir
|
||||
)
|
||||
return layer_files_by_frame
|
||||
|
||||
def _fill_frame_by_pre_behavior(
|
||||
self,
|
||||
layer,
|
||||
pre_behavior,
|
||||
mark_in_index,
|
||||
layer_files_by_frame,
|
||||
filename_template,
|
||||
output_dir
|
||||
):
|
||||
layer_position = layer["position"]
|
||||
frame_start_index = layer["frame_start"]
|
||||
frame_end_index = layer["frame_end"]
|
||||
frame_count = frame_end_index - frame_start_index + 1
|
||||
if mark_in_index >= frame_start_index:
|
||||
self.log.debug((
|
||||
"Skipping pre-behavior."
|
||||
" All frames after Mark In are rendered."
|
||||
))
|
||||
return
|
||||
|
||||
if pre_behavior == "none":
|
||||
# Empty frames are handled during `_composite_files`
|
||||
pass
|
||||
|
||||
elif pre_behavior == "hold":
|
||||
# Keep first frame for whole time
|
||||
eq_frame_filepath = layer_files_by_frame[frame_start_index]
|
||||
for frame_idx in range(mark_in_index, frame_start_index):
|
||||
filename = filename_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
new_filepath = "/".join([output_dir, filename])
|
||||
self._copy_image(eq_frame_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
elif pre_behavior in ("loop", "repeat"):
|
||||
# Loop backwards from last frame of layer
|
||||
for frame_idx in reversed(range(mark_in_index, frame_start_index)):
|
||||
eq_frame_idx_offset = (
|
||||
(frame_end_index - frame_idx) % frame_count
|
||||
)
|
||||
eq_frame_idx = frame_end_index - eq_frame_idx_offset
|
||||
eq_frame_filepath = layer_files_by_frame[eq_frame_idx]
|
||||
|
||||
filename = filename_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
new_filepath = "/".join([output_dir, filename])
|
||||
self._copy_image(eq_frame_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
elif pre_behavior == "pingpong":
|
||||
half_seq_len = frame_count - 1
|
||||
seq_len = half_seq_len * 2
|
||||
for frame_idx in reversed(range(mark_in_index, frame_start_index)):
|
||||
eq_frame_idx_offset = (frame_start_index - frame_idx) % seq_len
|
||||
if eq_frame_idx_offset > half_seq_len:
|
||||
eq_frame_idx_offset = (seq_len - eq_frame_idx_offset)
|
||||
eq_frame_idx = frame_start_index + eq_frame_idx_offset
|
||||
|
||||
eq_frame_filepath = layer_files_by_frame[eq_frame_idx]
|
||||
|
||||
filename = filename_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
new_filepath = "/".join([output_dir, filename])
|
||||
self._copy_image(eq_frame_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
def _fill_frame_by_post_behavior(
|
||||
self,
|
||||
layer,
|
||||
post_behavior,
|
||||
mark_out_index,
|
||||
layer_files_by_frame,
|
||||
filename_template,
|
||||
output_dir
|
||||
):
|
||||
layer_position = layer["position"]
|
||||
frame_start_index = layer["frame_start"]
|
||||
frame_end_index = layer["frame_end"]
|
||||
frame_count = frame_end_index - frame_start_index + 1
|
||||
if mark_out_index <= frame_end_index:
|
||||
self.log.debug((
|
||||
"Skipping post-behavior."
|
||||
" All frames up to Mark Out are rendered."
|
||||
))
|
||||
return
|
||||
|
||||
if post_behavior == "none":
|
||||
# Empty frames are handled during `_composite_files`
|
||||
pass
|
||||
|
||||
elif post_behavior == "hold":
|
||||
# Keep first frame for whole time
|
||||
eq_frame_filepath = layer_files_by_frame[frame_end_index]
|
||||
for frame_idx in range(frame_end_index + 1, mark_out_index + 1):
|
||||
filename = filename_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
new_filepath = "/".join([output_dir, filename])
|
||||
self._copy_image(eq_frame_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
elif post_behavior in ("loop", "repeat"):
|
||||
# Loop backwards from last frame of layer
|
||||
for frame_idx in range(frame_end_index + 1, mark_out_index + 1):
|
||||
eq_frame_idx = frame_idx % frame_count
|
||||
eq_frame_filepath = layer_files_by_frame[eq_frame_idx]
|
||||
|
||||
filename = filename_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
new_filepath = "/".join([output_dir, filename])
|
||||
self._copy_image(eq_frame_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
elif post_behavior == "pingpong":
|
||||
half_seq_len = frame_count - 1
|
||||
seq_len = half_seq_len * 2
|
||||
for frame_idx in range(frame_end_index + 1, mark_out_index + 1):
|
||||
eq_frame_idx_offset = (frame_idx - frame_end_index) % seq_len
|
||||
if eq_frame_idx_offset > half_seq_len:
|
||||
eq_frame_idx_offset = seq_len - eq_frame_idx_offset
|
||||
eq_frame_idx = frame_end_index - eq_frame_idx_offset
|
||||
|
||||
eq_frame_filepath = layer_files_by_frame[eq_frame_idx]
|
||||
|
||||
filename = filename_template.format(
|
||||
pos=layer_position,
|
||||
frame=frame_idx
|
||||
)
|
||||
new_filepath = "/".join([output_dir, filename])
|
||||
self._copy_image(eq_frame_filepath, new_filepath)
|
||||
layer_files_by_frame[frame_idx] = new_filepath
|
||||
|
||||
def _composite_files(
|
||||
self, files_by_position, frame_start, frame_end,
|
||||
filename_template, output_dir
|
||||
):
|
||||
"""Composite frames when more that one layer was exported.
|
||||
|
||||
This method is used when more than one layer is rendered out so and
|
||||
output should be composition of each frame of rendered layers.
|
||||
Missing frames are filled with transparent images.
|
||||
"""
|
||||
self.log.debug("Preparing files for compisiting.")
|
||||
# Prepare paths to images by frames into list where are stored
|
||||
# in order of compositing.
|
||||
images_by_frame = {}
|
||||
for frame_idx in range(frame_start, frame_end + 1):
|
||||
images_by_frame[frame_idx] = []
|
||||
for position in sorted(files_by_position.keys(), reverse=True):
|
||||
position_data = files_by_position[position]
|
||||
if frame_idx in position_data:
|
||||
filepath = position_data[frame_idx]
|
||||
images_by_frame[frame_idx].append(filepath)
|
||||
|
||||
output_filepaths = []
|
||||
missing_frame_paths = []
|
||||
random_frame_path = None
|
||||
for frame_idx in sorted(images_by_frame.keys()):
|
||||
image_filepaths = images_by_frame[frame_idx]
|
||||
output_filename = filename_template.format(frame=frame_idx)
|
||||
output_filepath = os.path.join(output_dir, output_filename)
|
||||
output_filepaths.append(output_filepath)
|
||||
|
||||
# Store information about missing frame and skip
|
||||
if not image_filepaths:
|
||||
missing_frame_paths.append(output_filepath)
|
||||
continue
|
||||
|
||||
# Just rename the file if is no need of compositing
|
||||
if len(image_filepaths) == 1:
|
||||
os.rename(image_filepaths[0], output_filepath)
|
||||
|
||||
# Composite images
|
||||
else:
|
||||
composite_images(image_filepaths, output_filepath)
|
||||
|
||||
# Store path of random output image that will 100% exist after all
|
||||
# multiprocessing as mockup for missing frames
|
||||
if random_frame_path is None:
|
||||
random_frame_path = output_filepath
|
||||
|
||||
self.log.debug(
|
||||
"Creating transparent images for frames without render {}.".format(
|
||||
str(missing_frame_paths)
|
||||
)
|
||||
)
|
||||
# Fill the sequence with transparent frames
|
||||
transparent_filepath = None
|
||||
for filepath in missing_frame_paths:
|
||||
if transparent_filepath is None:
|
||||
img_obj = Image.open(random_frame_path)
|
||||
painter = ImageDraw.Draw(img_obj)
|
||||
painter.rectangle((0, 0, *img_obj.size), fill=(0, 0, 0, 0))
|
||||
img_obj.save(filepath)
|
||||
transparent_filepath = filepath
|
||||
else:
|
||||
self._copy_image(transparent_filepath, filepath)
|
||||
return output_filepaths
|
||||
|
||||
def _cleanup_tmp_files(self, files_by_position):
|
||||
"""Remove temporary files that were used for compositing."""
|
||||
for data in files_by_position.values():
|
||||
for filepath in data.values():
|
||||
if os.path.exists(filepath):
|
||||
os.remove(filepath)
|
||||
|
||||
def _copy_image(self, src_path, dst_path):
|
||||
"""Create a copy of an image.
|
||||
|
||||
This was added to be able easier change copy method.
|
||||
"""
|
||||
# Create hardlink of image instead of copying if possible
|
||||
if hasattr(os, "link"):
|
||||
os.link(src_path, dst_path)
|
||||
else:
|
||||
shutil.copy(src_path, dst_path)
|
||||
return filepaths_by_frame
|
||||
|
|
|
|||
21
openpype/hosts/tvpaint/worker/__init__.py
Normal file
21
openpype/hosts/tvpaint/worker/__init__.py
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
from .worker_job import (
|
||||
JobFailed,
|
||||
ExecuteSimpleGeorgeScript,
|
||||
ExecuteGeorgeScript,
|
||||
CollectSceneData,
|
||||
SenderTVPaintCommands,
|
||||
ProcessTVPaintCommands
|
||||
)
|
||||
|
||||
from .worker import main
|
||||
|
||||
__all__ = (
|
||||
"JobFailed",
|
||||
"ExecuteSimpleGeorgeScript",
|
||||
"ExecuteGeorgeScript",
|
||||
"CollectSceneData",
|
||||
"SenderTVPaintCommands",
|
||||
"ProcessTVPaintCommands",
|
||||
|
||||
"main"
|
||||
)
|
||||
133
openpype/hosts/tvpaint/worker/worker.py
Normal file
133
openpype/hosts/tvpaint/worker/worker.py
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
import signal
|
||||
import time
|
||||
import asyncio
|
||||
|
||||
from avalon.tvpaint.communication_server import (
|
||||
BaseCommunicator,
|
||||
CommunicationWrapper
|
||||
)
|
||||
from openpype_modules.job_queue.job_workers import WorkerJobsConnection
|
||||
|
||||
from .worker_job import ProcessTVPaintCommands
|
||||
|
||||
|
||||
class TVPaintWorkerCommunicator(BaseCommunicator):
|
||||
"""Modified commuicator which cares about processing jobs.
|
||||
|
||||
Received jobs are send to TVPaint by parsing 'ProcessTVPaintCommands'.
|
||||
"""
|
||||
def __init__(self, server_url):
|
||||
super().__init__()
|
||||
|
||||
self.return_code = 1
|
||||
self._server_url = server_url
|
||||
self._worker_connection = None
|
||||
|
||||
def _start_webserver(self):
|
||||
"""Create connection to workers server before TVPaint server."""
|
||||
loop = self.websocket_server.loop
|
||||
self._worker_connection = WorkerJobsConnection(
|
||||
self._server_url, "tvpaint", loop
|
||||
)
|
||||
asyncio.ensure_future(
|
||||
self._worker_connection.main_loop(register_worker=False),
|
||||
loop=loop
|
||||
)
|
||||
|
||||
super()._start_webserver()
|
||||
|
||||
def _on_client_connect(self, *args, **kwargs):
|
||||
super()._on_client_connect(*args, **kwargs)
|
||||
# Register as "ready to work" worker
|
||||
self._worker_connection.register_as_worker()
|
||||
|
||||
def stop(self):
|
||||
"""Stop worker connection and TVPaint server."""
|
||||
self._worker_connection.stop()
|
||||
self.return_code = 0
|
||||
super().stop()
|
||||
|
||||
@property
|
||||
def current_job(self):
|
||||
"""Retrieve job which should be processed."""
|
||||
if self._worker_connection:
|
||||
return self._worker_connection.current_job
|
||||
return None
|
||||
|
||||
def _check_process(self):
|
||||
if self.process is None:
|
||||
return True
|
||||
|
||||
if self.process.poll() is not None:
|
||||
asyncio.ensure_future(
|
||||
self._worker_connection.disconnect(),
|
||||
loop=self.websocket_server.loop
|
||||
)
|
||||
self._exit()
|
||||
return False
|
||||
return True
|
||||
|
||||
def _process_job(self):
|
||||
job = self.current_job
|
||||
if job is None:
|
||||
return
|
||||
|
||||
# Prepare variables used for sendig
|
||||
success = False
|
||||
message = "Unknown function"
|
||||
data = None
|
||||
job_data = job["data"]
|
||||
workfile = job_data["workfile"]
|
||||
# Currently can process only "commands" function
|
||||
if job_data.get("function") == "commands":
|
||||
try:
|
||||
commands = ProcessTVPaintCommands(
|
||||
workfile, job_data["commands"], self
|
||||
)
|
||||
commands.execute()
|
||||
data = commands.response_data()
|
||||
success = True
|
||||
message = "Executed"
|
||||
|
||||
except Exception as exc:
|
||||
message = "Error on worker: {}".format(str(exc))
|
||||
|
||||
self._worker_connection.finish_job(success, message, data)
|
||||
|
||||
def main_loop(self):
|
||||
"""Main loop where jobs are processed.
|
||||
|
||||
Server is stopped by killing this process or TVPaint process.
|
||||
"""
|
||||
while self.server_is_running:
|
||||
if self._check_process():
|
||||
self._process_job()
|
||||
time.sleep(1)
|
||||
|
||||
return self.return_code
|
||||
|
||||
|
||||
def _start_tvpaint(tvpaint_executable_path, server_url):
|
||||
communicator = TVPaintWorkerCommunicator(server_url)
|
||||
CommunicationWrapper.set_communicator(communicator)
|
||||
communicator.launch([tvpaint_executable_path])
|
||||
|
||||
|
||||
def main(tvpaint_executable_path, server_url):
|
||||
# Register terminal signal handler
|
||||
def signal_handler(*_args):
|
||||
print("Termination signal received. Stopping.")
|
||||
if CommunicationWrapper.communicator is not None:
|
||||
CommunicationWrapper.communicator.stop()
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
_start_tvpaint(tvpaint_executable_path, server_url)
|
||||
|
||||
communicator = CommunicationWrapper.communicator
|
||||
if communicator is None:
|
||||
print("Communicator is not set")
|
||||
return 1
|
||||
|
||||
return communicator.main_loop()
|
||||
537
openpype/hosts/tvpaint/worker/worker_job.py
Normal file
537
openpype/hosts/tvpaint/worker/worker_job.py
Normal file
|
|
@ -0,0 +1,537 @@
|
|||
import os
|
||||
import tempfile
|
||||
import inspect
|
||||
import copy
|
||||
import json
|
||||
import time
|
||||
from uuid import uuid4
|
||||
from abc import ABCMeta, abstractmethod, abstractproperty
|
||||
|
||||
import six
|
||||
|
||||
from openpype.api import PypeLogger
|
||||
from openpype.modules import ModulesManager
|
||||
|
||||
|
||||
TMP_FILE_PREFIX = "opw_tvp_"
|
||||
|
||||
|
||||
class JobFailed(Exception):
|
||||
"""Raised when job was sent and finished unsuccessfully."""
|
||||
def __init__(self, job_status):
|
||||
job_state = job_status["state"]
|
||||
job_message = job_status["message"] or "Unknown issue"
|
||||
error_msg = (
|
||||
"Job didn't finish properly."
|
||||
" Job state: \"{}\" | Job message: \"{}\""
|
||||
).format(job_state, job_message)
|
||||
|
||||
self.job_status = job_status
|
||||
|
||||
super().__init__(error_msg)
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class BaseCommand:
|
||||
"""Abstract TVPaint command which can be executed through worker.
|
||||
|
||||
Each command must have unique name and implemented 'execute' and
|
||||
'from_existing' methods.
|
||||
|
||||
Command also have id which is created on command creation.
|
||||
|
||||
The idea is that command is just a data container on sender side send
|
||||
througth server to a worker where is replicated one by one, executed and
|
||||
result sent back to sender through server.
|
||||
"""
|
||||
@abstractproperty
|
||||
def name(self):
|
||||
"""Command name (must be unique)."""
|
||||
pass
|
||||
|
||||
def __init__(self, data=None):
|
||||
if data is None:
|
||||
data = {}
|
||||
else:
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
# Use 'id' from data when replicating on process side
|
||||
command_id = data.get("id")
|
||||
if command_id is None:
|
||||
command_id = str(uuid4())
|
||||
data["id"] = command_id
|
||||
data["command"] = self.name
|
||||
|
||||
self._parent = None
|
||||
self._result = None
|
||||
self._command_data = data
|
||||
self._done = False
|
||||
|
||||
def job_queue_root(self):
|
||||
"""Access to job queue root.
|
||||
|
||||
Job queue root is shared access point to files shared across senders
|
||||
and workers.
|
||||
"""
|
||||
if self._parent is None:
|
||||
return None
|
||||
return self._parent.job_queue_root()
|
||||
|
||||
def set_parent(self, parent):
|
||||
self._parent = parent
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
"""Command id."""
|
||||
return self._command_data["id"]
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
"""Parent of command expected type of 'TVPaintCommands'."""
|
||||
return self._parent
|
||||
|
||||
@property
|
||||
def communicator(self):
|
||||
"""TVPaint communicator.
|
||||
|
||||
Available only on worker side.
|
||||
"""
|
||||
return self._parent.communicator
|
||||
|
||||
@property
|
||||
def done(self):
|
||||
"""Is command done."""
|
||||
return self._done
|
||||
|
||||
def set_done(self):
|
||||
"""Change state of done."""
|
||||
self._done = True
|
||||
|
||||
def set_result(self, result):
|
||||
"""Set result of executed command."""
|
||||
self._result = result
|
||||
|
||||
def result(self):
|
||||
"""Result of command."""
|
||||
return copy.deepcopy(self._result)
|
||||
|
||||
def response_data(self):
|
||||
"""Data send as response to sender."""
|
||||
return {
|
||||
"id": self.id,
|
||||
"result": self._result,
|
||||
"done": self._done
|
||||
}
|
||||
|
||||
def command_data(self):
|
||||
"""Raw command data."""
|
||||
return copy.deepcopy(self._command_data)
|
||||
|
||||
@abstractmethod
|
||||
def execute(self):
|
||||
"""Execute command on worker side."""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def from_existing(cls, data):
|
||||
"""Recreate object based on passed data."""
|
||||
pass
|
||||
|
||||
def execute_george(self, george_script):
|
||||
"""Execute george script in TVPaint."""
|
||||
return self.parent.execute_george(george_script)
|
||||
|
||||
def execute_george_through_file(self, george_script):
|
||||
"""Execute george script through temp file in TVPaint."""
|
||||
return self.parent.execute_george_through_file(george_script)
|
||||
|
||||
|
||||
class ExecuteSimpleGeorgeScript(BaseCommand):
|
||||
"""Execute simple george script in TVPaint.
|
||||
|
||||
Args:
|
||||
script(str): Script that will be executed.
|
||||
"""
|
||||
name = "execute_george_simple"
|
||||
|
||||
def __init__(self, script, data=None):
|
||||
data = data or {}
|
||||
data["script"] = script
|
||||
self._script = script
|
||||
super().__init__(data)
|
||||
|
||||
def execute(self):
|
||||
self._result = self.execute_george(self._script)
|
||||
|
||||
@classmethod
|
||||
def from_existing(cls, data):
|
||||
script = data.pop("script")
|
||||
return cls(script, data)
|
||||
|
||||
|
||||
class ExecuteGeorgeScript(BaseCommand):
|
||||
"""Execute multiline george script in TVPaint.
|
||||
|
||||
Args:
|
||||
script_lines(list): Lines that will be executed in george script
|
||||
through temp george file.
|
||||
tmp_file_keys(list): List of formatting keys in george script that
|
||||
require replacement with path to a temp file where result will be
|
||||
stored. The content of file is stored to result by the key.
|
||||
root_dir_key(str): Formatting key that will be replaced in george
|
||||
script with job queue root which can be different on worker side.
|
||||
data(dict): Raw data about command.
|
||||
"""
|
||||
name = "execute_george_through_file"
|
||||
|
||||
def __init__(
|
||||
self, script_lines, tmp_file_keys=None, root_dir_key=None, data=None
|
||||
):
|
||||
data = data or {}
|
||||
if not tmp_file_keys:
|
||||
tmp_file_keys = data.get("tmp_file_keys") or []
|
||||
|
||||
data["script_lines"] = script_lines
|
||||
data["tmp_file_keys"] = tmp_file_keys
|
||||
data["root_dir_key"] = root_dir_key
|
||||
self._script_lines = script_lines
|
||||
self._tmp_file_keys = tmp_file_keys
|
||||
self._root_dir_key = root_dir_key
|
||||
super().__init__(data)
|
||||
|
||||
def execute(self):
|
||||
filepath_by_key = {}
|
||||
script = self._script_lines
|
||||
if isinstance(script, list):
|
||||
script = "\n".join(script)
|
||||
|
||||
# Replace temporary files in george script
|
||||
for key in self._tmp_file_keys:
|
||||
output_file = tempfile.NamedTemporaryFile(
|
||||
mode="w", prefix=TMP_FILE_PREFIX, suffix=".txt", delete=False
|
||||
)
|
||||
output_file.close()
|
||||
format_key = "{" + key + "}"
|
||||
output_path = output_file.name.replace("\\", "/")
|
||||
script = script.replace(format_key, output_path)
|
||||
filepath_by_key[key] = output_path
|
||||
|
||||
# Replace job queue root in script
|
||||
if self._root_dir_key:
|
||||
job_queue_root = self.job_queue_root()
|
||||
format_key = "{" + self._root_dir_key + "}"
|
||||
script = script.replace(
|
||||
format_key, job_queue_root.replace("\\", "/")
|
||||
)
|
||||
|
||||
# Execute the script
|
||||
self.execute_george_through_file(script)
|
||||
|
||||
# Store result of temporary files
|
||||
result = {}
|
||||
for key, filepath in filepath_by_key.items():
|
||||
with open(filepath, "r") as stream:
|
||||
data = stream.read()
|
||||
result[key] = data
|
||||
os.remove(filepath)
|
||||
|
||||
self._result = result
|
||||
|
||||
@classmethod
|
||||
def from_existing(cls, data):
|
||||
"""Recreate the object from data."""
|
||||
script_lines = data.pop("script_lines")
|
||||
tmp_file_keys = data.pop("tmp_file_keys", None)
|
||||
root_dir_key = data.pop("root_dir_key", None)
|
||||
return cls(script_lines, tmp_file_keys, root_dir_key, data)
|
||||
|
||||
|
||||
class CollectSceneData(BaseCommand):
|
||||
"""Helper command which will collect all usefull info about workfile.
|
||||
|
||||
Result is dictionary with all layers data, exposure frames by layer ids
|
||||
pre/post behavior of layers by their ids, group information and scene data.
|
||||
"""
|
||||
name = "collect_scene_data"
|
||||
|
||||
def execute(self):
|
||||
from avalon.tvpaint.lib import (
|
||||
get_layers_data,
|
||||
get_groups_data,
|
||||
get_layers_pre_post_behavior,
|
||||
get_layers_exposure_frames,
|
||||
get_scene_data
|
||||
)
|
||||
|
||||
groups_data = get_groups_data(communicator=self.communicator)
|
||||
layers_data = get_layers_data(communicator=self.communicator)
|
||||
layer_ids = [
|
||||
layer_data["layer_id"]
|
||||
for layer_data in layers_data
|
||||
]
|
||||
pre_post_beh_by_layer_id = get_layers_pre_post_behavior(
|
||||
layer_ids, communicator=self.communicator
|
||||
)
|
||||
exposure_frames_by_layer_id = get_layers_exposure_frames(
|
||||
layer_ids, layers_data, communicator=self.communicator
|
||||
)
|
||||
|
||||
self._result = {
|
||||
"layers_data": layers_data,
|
||||
"exposure_frames_by_layer_id": exposure_frames_by_layer_id,
|
||||
"pre_post_beh_by_layer_id": pre_post_beh_by_layer_id,
|
||||
"groups_data": groups_data,
|
||||
"scene_data": get_scene_data(self.communicator)
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_existing(cls, data):
|
||||
return cls(data)
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class TVPaintCommands:
|
||||
"""Wrapper around TVPaint commands to be able send multiple commands.
|
||||
|
||||
Commands may send one or multiple commands at once. Also gives api access
|
||||
for commands info.
|
||||
|
||||
Base for sender and receiver which are extending the logic for their
|
||||
purposes. One of differences is preparation of workfile path.
|
||||
|
||||
Args:
|
||||
workfile(str): Path to workfile.
|
||||
job_queue_module(JobQueueModule): Object of OpenPype module JobQueue.
|
||||
"""
|
||||
def __init__(self, workfile, job_queue_module=None):
|
||||
self._log = None
|
||||
self._commands = []
|
||||
self._command_classes_by_name = None
|
||||
if job_queue_module is None:
|
||||
manager = ModulesManager()
|
||||
job_queue_module = manager.modules_by_name["job_queue"]
|
||||
self._job_queue_module = job_queue_module
|
||||
|
||||
self._workfile = self._prepare_workfile(workfile)
|
||||
|
||||
@abstractmethod
|
||||
def _prepare_workfile(self, workfile):
|
||||
"""Modification of workfile path on initialization to match platorm."""
|
||||
pass
|
||||
|
||||
def job_queue_root(self):
|
||||
"""Job queue root for current platform using current settings."""
|
||||
return self._job_queue_module.get_jobs_root_from_settings()
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
"""Access to logger object."""
|
||||
if self._log is None:
|
||||
self._log = PypeLogger.get_logger(self.__class__.__name__)
|
||||
return self._log
|
||||
|
||||
@property
|
||||
def classes_by_name(self):
|
||||
"""Prepare commands classes for validation and recreation of commands.
|
||||
|
||||
It is expected that all commands are defined in this python file so
|
||||
we're looking for all implementation of BaseCommand in globals.
|
||||
"""
|
||||
if self._command_classes_by_name is None:
|
||||
command_classes_by_name = {}
|
||||
for attr in globals().values():
|
||||
if (
|
||||
not inspect.isclass(attr)
|
||||
or not issubclass(attr, BaseCommand)
|
||||
or attr is BaseCommand
|
||||
):
|
||||
continue
|
||||
|
||||
if inspect.isabstract(attr):
|
||||
self.log.debug(
|
||||
"Skipping abstract class {}".format(attr.__name__)
|
||||
)
|
||||
command_classes_by_name[attr.name] = attr
|
||||
self._command_classes_by_name = command_classes_by_name
|
||||
|
||||
return self._command_classes_by_name
|
||||
|
||||
def add_command(self, command):
|
||||
"""Add command to process."""
|
||||
command.set_parent(self)
|
||||
self._commands.append(command)
|
||||
|
||||
def result(self):
|
||||
"""Result of commands in list in which they were processed."""
|
||||
return [
|
||||
command.result()
|
||||
for command in self._commands
|
||||
]
|
||||
|
||||
def response_data(self):
|
||||
"""Data which should be send from worker."""
|
||||
return [
|
||||
command.response_data()
|
||||
for command in self._commands
|
||||
]
|
||||
|
||||
|
||||
class SenderTVPaintCommands(TVPaintCommands):
|
||||
"""Sender implementation of TVPaint Commands."""
|
||||
def _prepare_workfile(self, workfile):
|
||||
"""Remove job queue root from workfile path.
|
||||
|
||||
It is expected that worker will add it's root before passed workfile.
|
||||
"""
|
||||
new_workfile = workfile.replace("\\", "/")
|
||||
job_queue_root = self.job_queue_root().replace("\\", "/")
|
||||
if job_queue_root not in new_workfile:
|
||||
raise ValueError((
|
||||
"Workfile is not located in JobQueue root."
|
||||
" Workfile path: \"{}\". JobQueue root: \"{}\""
|
||||
).format(workfile, job_queue_root))
|
||||
return new_workfile.replace(job_queue_root, "")
|
||||
|
||||
def commands_data(self):
|
||||
"""Commands data to be able recreate them."""
|
||||
return [
|
||||
command.command_data()
|
||||
for command in self._commands
|
||||
]
|
||||
|
||||
def to_job_data(self):
|
||||
"""Convert commands to job data before sending to workers server."""
|
||||
return {
|
||||
"workfile": self._workfile,
|
||||
"function": "commands",
|
||||
"commands": self.commands_data()
|
||||
}
|
||||
|
||||
def set_result(self, result):
|
||||
commands_by_id = {
|
||||
command.id: command
|
||||
for command in self._commands
|
||||
}
|
||||
|
||||
for item in result:
|
||||
command = commands_by_id[item["id"]]
|
||||
command.set_result(item["result"])
|
||||
command.set_done()
|
||||
|
||||
def _send_job(self):
|
||||
"""Send job to a workers server."""
|
||||
# Send job data to job queue server
|
||||
job_data = self.to_job_data()
|
||||
self.log.debug("Sending job to JobQueue server.\n{}".format(
|
||||
json.dumps(job_data, indent=4)
|
||||
))
|
||||
job_id = self._job_queue_module.send_job("tvpaint", job_data)
|
||||
self.log.info((
|
||||
"Job sent to JobQueue server and got id \"{}\"."
|
||||
" Waiting for finishing the job."
|
||||
).format(job_id))
|
||||
|
||||
return job_id
|
||||
|
||||
def send_job_and_wait(self):
|
||||
"""Send job to workers server and wait for response.
|
||||
|
||||
Result of job is stored into the object.
|
||||
|
||||
Raises:
|
||||
JobFailed: When job was finished but not successfully.
|
||||
"""
|
||||
job_id = self._send_job()
|
||||
while True:
|
||||
job_status = self._job_queue_module.get_job_status(job_id)
|
||||
if job_status["done"]:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
# Check if job state is done
|
||||
if job_status["state"] != "done":
|
||||
raise JobFailed(job_status)
|
||||
|
||||
self.set_result(job_status["result"])
|
||||
|
||||
self.log.debug("Job is done and result is stored.")
|
||||
|
||||
|
||||
class ProcessTVPaintCommands(TVPaintCommands):
|
||||
"""Worker side of TVPaint Commands.
|
||||
|
||||
It is expected this object is created only on worker's side from existing
|
||||
data loaded from job.
|
||||
|
||||
Workfile path logic is based on 'SenderTVPaintCommands'.
|
||||
"""
|
||||
def __init__(self, workfile, commands, communicator):
|
||||
super(ProcessTVPaintCommands, self).__init__(workfile)
|
||||
|
||||
self._communicator = communicator
|
||||
|
||||
self.commands_from_data(commands)
|
||||
|
||||
def _prepare_workfile(self, workfile):
|
||||
"""Preprend job queue root before passed workfile."""
|
||||
workfile = workfile.replace("\\", "/")
|
||||
job_queue_root = self.job_queue_root().replace("\\", "/")
|
||||
new_workfile = "/".join([job_queue_root, workfile])
|
||||
while "//" in new_workfile:
|
||||
new_workfile = new_workfile.replace("//", "/")
|
||||
return os.path.normpath(new_workfile)
|
||||
|
||||
@property
|
||||
def communicator(self):
|
||||
"""Access to TVPaint communicator."""
|
||||
return self._communicator
|
||||
|
||||
def commands_from_data(self, commands_data):
|
||||
"""Recreate command from passed data."""
|
||||
for command_data in commands_data:
|
||||
command_name = command_data["command"]
|
||||
|
||||
klass = self.classes_by_name[command_name]
|
||||
command = klass.from_existing(command_data)
|
||||
self.add_command(command)
|
||||
|
||||
def execute_george(self, george_script):
|
||||
"""Helper method to execute george script."""
|
||||
return self.communicator.execute_george(george_script)
|
||||
|
||||
def execute_george_through_file(self, george_script):
|
||||
"""Helper method to execute george script through temp file."""
|
||||
temporary_file = tempfile.NamedTemporaryFile(
|
||||
mode="w", prefix=TMP_FILE_PREFIX, suffix=".grg", delete=False
|
||||
)
|
||||
temporary_file.write(george_script)
|
||||
temporary_file.close()
|
||||
temp_file_path = temporary_file.name.replace("\\", "/")
|
||||
self.execute_george("tv_runscript {}".format(temp_file_path))
|
||||
os.remove(temp_file_path)
|
||||
|
||||
def _open_workfile(self):
|
||||
"""Open workfile in TVPaint."""
|
||||
workfile = self._workfile
|
||||
print("Opening workfile {}".format(workfile))
|
||||
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(workfile)
|
||||
self.execute_george_through_file(george_script)
|
||||
|
||||
def _close_workfile(self):
|
||||
"""Close workfile in TVPaint."""
|
||||
print("Closing workfile")
|
||||
self.execute_george_through_file("tv_projectclose")
|
||||
|
||||
def execute(self):
|
||||
"""Execute commands."""
|
||||
# First open the workfile
|
||||
self._open_workfile()
|
||||
# Execute commands one by one
|
||||
# TODO maybe stop processing when command fails?
|
||||
print("Commands execution started ({})".format(len(self._commands)))
|
||||
for command in self._commands:
|
||||
command.execute()
|
||||
command.set_done()
|
||||
# Finally close workfile
|
||||
self._close_workfile()
|
||||
|
|
@ -0,0 +1,255 @@
|
|||
"""
|
||||
Requires:
|
||||
CollectTVPaintWorkfileData
|
||||
|
||||
Provides:
|
||||
Instances
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import copy
|
||||
import pyblish.api
|
||||
|
||||
from openpype.lib import get_subset_name_with_asset_doc
|
||||
|
||||
|
||||
class CollectTVPaintInstances(pyblish.api.ContextPlugin):
|
||||
label = "Collect TVPaint Instances"
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
hosts = ["webpublisher"]
|
||||
targets = ["tvpaint_worker"]
|
||||
|
||||
workfile_family = "workfile"
|
||||
workfile_variant = ""
|
||||
review_family = "review"
|
||||
review_variant = "Main"
|
||||
render_pass_family = "renderPass"
|
||||
render_layer_family = "renderLayer"
|
||||
render_layer_pass_name = "beauty"
|
||||
|
||||
# Set by settings
|
||||
# Regex must constain 'layer' and 'variant' groups which are extracted from
|
||||
# name when instances are created
|
||||
layer_name_regex = r"(?P<layer>L[0-9]{3}_\w+)_(?P<pass>.+)"
|
||||
|
||||
def process(self, context):
|
||||
# Prepare compiled regex
|
||||
layer_name_regex = re.compile(self.layer_name_regex)
|
||||
|
||||
layers_data = context.data["layersData"]
|
||||
|
||||
host_name = "tvpaint"
|
||||
task_name = context.data.get("task")
|
||||
asset_doc = context.data["assetEntity"]
|
||||
project_doc = context.data["projectEntity"]
|
||||
project_name = project_doc["name"]
|
||||
|
||||
new_instances = []
|
||||
|
||||
# Workfile instance
|
||||
workfile_subset_name = get_subset_name_with_asset_doc(
|
||||
self.workfile_family,
|
||||
self.workfile_variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name
|
||||
)
|
||||
workfile_instance = self._create_workfile_instance(
|
||||
context, workfile_subset_name
|
||||
)
|
||||
new_instances.append(workfile_instance)
|
||||
|
||||
# Review instance
|
||||
review_subset_name = get_subset_name_with_asset_doc(
|
||||
self.review_family,
|
||||
self.review_variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name
|
||||
)
|
||||
review_instance = self._create_review_instance(
|
||||
context, review_subset_name
|
||||
)
|
||||
new_instances.append(review_instance)
|
||||
|
||||
# Get render layers and passes from TVPaint layers
|
||||
# - it's based on regex extraction
|
||||
layers_by_layer_and_pass = {}
|
||||
for layer in layers_data:
|
||||
# Filter only visible layers
|
||||
if not layer["visible"]:
|
||||
continue
|
||||
|
||||
result = layer_name_regex.search(layer["name"])
|
||||
# Layer name not matching layer name regex
|
||||
# should raise an exception?
|
||||
if result is None:
|
||||
continue
|
||||
render_layer = result.group("layer")
|
||||
render_pass = result.group("pass")
|
||||
|
||||
render_pass_maping = layers_by_layer_and_pass.get(
|
||||
render_layer
|
||||
)
|
||||
if render_pass_maping is None:
|
||||
render_pass_maping = {}
|
||||
layers_by_layer_and_pass[render_layer] = render_pass_maping
|
||||
|
||||
if render_pass not in render_pass_maping:
|
||||
render_pass_maping[render_pass] = []
|
||||
render_pass_maping[render_pass].append(copy.deepcopy(layer))
|
||||
|
||||
layers_by_render_layer = {}
|
||||
for render_layer, render_passes in layers_by_layer_and_pass.items():
|
||||
render_layer_layers = []
|
||||
layers_by_render_layer[render_layer] = render_layer_layers
|
||||
for render_pass, layers in render_passes.items():
|
||||
render_layer_layers.extend(copy.deepcopy(layers))
|
||||
dynamic_data = {
|
||||
"render_pass": render_pass,
|
||||
"render_layer": render_layer,
|
||||
# Override family for subset name
|
||||
"family": "render"
|
||||
}
|
||||
|
||||
subset_name = get_subset_name_with_asset_doc(
|
||||
self.render_pass_family,
|
||||
render_pass,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
dynamic_data=dynamic_data
|
||||
)
|
||||
|
||||
instance = self._create_render_pass_instance(
|
||||
context, layers, subset_name
|
||||
)
|
||||
new_instances.append(instance)
|
||||
|
||||
for render_layer, layers in layers_by_render_layer.items():
|
||||
variant = render_layer
|
||||
dynamic_data = {
|
||||
"render_pass": self.render_layer_pass_name,
|
||||
"render_layer": render_layer,
|
||||
# Override family for subset name
|
||||
"family": "render"
|
||||
}
|
||||
subset_name = get_subset_name_with_asset_doc(
|
||||
self.render_pass_family,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
dynamic_data=dynamic_data
|
||||
)
|
||||
instance = self._create_render_layer_instance(
|
||||
context, layers, subset_name
|
||||
)
|
||||
new_instances.append(instance)
|
||||
|
||||
# Set data same for all instances
|
||||
frame_start = context.data.get("frameStart")
|
||||
frame_end = context.data.get("frameEnd")
|
||||
|
||||
for instance in new_instances:
|
||||
if (
|
||||
instance.data.get("frameStart") is None
|
||||
or instance.data.get("frameEnd") is None
|
||||
):
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
|
||||
if instance.data.get("asset") is None:
|
||||
instance.data["asset"] = asset_doc["name"]
|
||||
|
||||
if instance.data.get("task") is None:
|
||||
instance.data["task"] = task_name
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
if "source" not in instance.data:
|
||||
instance.data["source"] = "webpublisher"
|
||||
|
||||
def _create_workfile_instance(self, context, subset_name):
|
||||
workfile_path = context.data["workfilePath"]
|
||||
staging_dir = os.path.dirname(workfile_path)
|
||||
filename = os.path.basename(workfile_path)
|
||||
ext = os.path.splitext(filename)[-1]
|
||||
|
||||
return context.create_instance(**{
|
||||
"name": subset_name,
|
||||
"label": subset_name,
|
||||
"subset": subset_name,
|
||||
"family": self.workfile_family,
|
||||
"families": [],
|
||||
"stagingDir": staging_dir,
|
||||
"representations": [{
|
||||
"name": ext.lstrip("."),
|
||||
"ext": ext.lstrip("."),
|
||||
"files": filename,
|
||||
"stagingDir": staging_dir
|
||||
}]
|
||||
})
|
||||
|
||||
def _create_review_instance(self, context, subset_name):
|
||||
staging_dir = self._create_staging_dir(context, subset_name)
|
||||
layers_data = context.data["layersData"]
|
||||
# Filter hidden layers
|
||||
filtered_layers_data = [
|
||||
copy.deepcopy(layer)
|
||||
for layer in layers_data
|
||||
if layer["visible"]
|
||||
]
|
||||
return context.create_instance(**{
|
||||
"name": subset_name,
|
||||
"label": subset_name,
|
||||
"subset": subset_name,
|
||||
"family": self.review_family,
|
||||
"families": [],
|
||||
"layers": filtered_layers_data,
|
||||
"stagingDir": staging_dir
|
||||
})
|
||||
|
||||
def _create_render_pass_instance(self, context, layers, subset_name):
|
||||
staging_dir = self._create_staging_dir(context, subset_name)
|
||||
# Global instance data modifications
|
||||
# Fill families
|
||||
return context.create_instance(**{
|
||||
"name": subset_name,
|
||||
"subset": subset_name,
|
||||
"label": subset_name,
|
||||
"family": self.render_pass_family,
|
||||
# Add `review` family for thumbnail integration
|
||||
"families": [self.render_pass_family, "review"],
|
||||
"representations": [],
|
||||
"layers": layers,
|
||||
"stagingDir": staging_dir
|
||||
})
|
||||
|
||||
def _create_render_layer_instance(self, context, layers, subset_name):
|
||||
staging_dir = self._create_staging_dir(context, subset_name)
|
||||
# Global instance data modifications
|
||||
# Fill families
|
||||
return context.create_instance(**{
|
||||
"name": subset_name,
|
||||
"subset": subset_name,
|
||||
"label": subset_name,
|
||||
"family": self.render_pass_family,
|
||||
# Add `review` family for thumbnail integration
|
||||
"families": [self.render_pass_family, "review"],
|
||||
"representations": [],
|
||||
"layers": layers,
|
||||
"stagingDir": staging_dir
|
||||
})
|
||||
|
||||
def _create_staging_dir(self, context, subset_name):
|
||||
context_staging_dir = context.data["contextStagingDir"]
|
||||
staging_dir = os.path.join(context_staging_dir, subset_name)
|
||||
if not os.path.exists(staging_dir):
|
||||
os.makedirs(staging_dir)
|
||||
return staging_dir
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
"""
|
||||
Requires:
|
||||
CollectPublishedFiles
|
||||
CollectModules
|
||||
|
||||
Provides:
|
||||
workfilePath - Path to tvpaint workfile
|
||||
sceneData - Scene data loaded from the workfile
|
||||
groupsData -
|
||||
layersData
|
||||
layersExposureFrames
|
||||
layersPrePostBehavior
|
||||
"""
|
||||
import os
|
||||
import uuid
|
||||
import json
|
||||
import shutil
|
||||
import pyblish.api
|
||||
from openpype.lib.plugin_tools import parse_json
|
||||
from openpype.hosts.tvpaint.worker import (
|
||||
SenderTVPaintCommands,
|
||||
CollectSceneData
|
||||
)
|
||||
|
||||
|
||||
class CollectTVPaintWorkfileData(pyblish.api.ContextPlugin):
|
||||
label = "Collect TVPaint Workfile data"
|
||||
order = pyblish.api.CollectorOrder - 0.4
|
||||
hosts = ["webpublisher"]
|
||||
targets = ["tvpaint_worker"]
|
||||
|
||||
def process(self, context):
|
||||
# Get JobQueue module
|
||||
modules = context.data["openPypeModules"]
|
||||
job_queue_module = modules["job_queue"]
|
||||
jobs_root = job_queue_module.get_jobs_root()
|
||||
if not jobs_root:
|
||||
raise ValueError("Job Queue root is not set.")
|
||||
|
||||
context.data["jobsRoot"] = jobs_root
|
||||
|
||||
context_staging_dir = self._create_context_staging_dir(jobs_root)
|
||||
workfile_path = self._extract_workfile_path(
|
||||
context, context_staging_dir
|
||||
)
|
||||
context.data["contextStagingDir"] = context_staging_dir
|
||||
context.data["workfilePath"] = workfile_path
|
||||
|
||||
# Prepare tvpaint command
|
||||
collect_scene_data_command = CollectSceneData()
|
||||
# Create TVPaint sender commands
|
||||
commands = SenderTVPaintCommands(workfile_path, job_queue_module)
|
||||
commands.add_command(collect_scene_data_command)
|
||||
|
||||
# Send job and wait for answer
|
||||
commands.send_job_and_wait()
|
||||
|
||||
collected_data = collect_scene_data_command.result()
|
||||
layers_data = collected_data["layers_data"]
|
||||
groups_data = collected_data["groups_data"]
|
||||
scene_data = collected_data["scene_data"]
|
||||
exposure_frames_by_layer_id = (
|
||||
collected_data["exposure_frames_by_layer_id"]
|
||||
)
|
||||
pre_post_beh_by_layer_id = (
|
||||
collected_data["pre_post_beh_by_layer_id"]
|
||||
)
|
||||
|
||||
# Store results
|
||||
# scene data store the same way as TVPaint collector
|
||||
scene_data = {
|
||||
"sceneWidth": scene_data["width"],
|
||||
"sceneHeight": scene_data["height"],
|
||||
"scenePixelAspect": scene_data["pixel_aspect"],
|
||||
"sceneFps": scene_data["fps"],
|
||||
"sceneFieldOrder": scene_data["field_order"],
|
||||
"sceneMarkIn": scene_data["mark_in"],
|
||||
# scene_data["mark_in_state"],
|
||||
"sceneMarkInState": scene_data["mark_in_set"],
|
||||
"sceneMarkOut": scene_data["mark_out"],
|
||||
# scene_data["mark_out_state"],
|
||||
"sceneMarkOutState": scene_data["mark_out_set"],
|
||||
"sceneStartFrame": scene_data["start_frame"],
|
||||
"sceneBgColor": scene_data["bg_color"]
|
||||
}
|
||||
context.data["sceneData"] = scene_data
|
||||
# Store only raw data
|
||||
context.data["groupsData"] = groups_data
|
||||
context.data["layersData"] = layers_data
|
||||
context.data["layersExposureFrames"] = exposure_frames_by_layer_id
|
||||
context.data["layersPrePostBehavior"] = pre_post_beh_by_layer_id
|
||||
|
||||
self.log.debug(
|
||||
(
|
||||
"Collected data"
|
||||
"\nScene data: {}"
|
||||
"\nLayers data: {}"
|
||||
"\nExposure frames: {}"
|
||||
"\nPre/Post behavior: {}"
|
||||
).format(
|
||||
json.dumps(scene_data, indent=4),
|
||||
json.dumps(layers_data, indent=4),
|
||||
json.dumps(exposure_frames_by_layer_id, indent=4),
|
||||
json.dumps(pre_post_beh_by_layer_id, indent=4)
|
||||
)
|
||||
)
|
||||
|
||||
def _create_context_staging_dir(self, jobs_root):
|
||||
if not os.path.exists(jobs_root):
|
||||
os.makedirs(jobs_root)
|
||||
|
||||
random_folder_name = str(uuid.uuid4())
|
||||
full_path = os.path.join(jobs_root, random_folder_name)
|
||||
if not os.path.exists(full_path):
|
||||
os.makedirs(full_path)
|
||||
return full_path
|
||||
|
||||
def _extract_workfile_path(self, context, context_staging_dir):
|
||||
"""Find first TVPaint file in tasks and use it."""
|
||||
batch_dir = context.data["batchDir"]
|
||||
batch_data = context.data["batchData"]
|
||||
src_workfile_path = None
|
||||
for task_id in batch_data["tasks"]:
|
||||
if src_workfile_path is not None:
|
||||
break
|
||||
task_dir = os.path.join(batch_dir, task_id)
|
||||
task_manifest_path = os.path.join(task_dir, "manifest.json")
|
||||
task_data = parse_json(task_manifest_path)
|
||||
task_files = task_data["files"]
|
||||
for filename in task_files:
|
||||
_, ext = os.path.splitext(filename)
|
||||
if ext.lower() == ".tvpp":
|
||||
src_workfile_path = os.path.join(task_dir, filename)
|
||||
break
|
||||
|
||||
# Copy workfile to job queue work root
|
||||
new_workfile_path = os.path.join(
|
||||
context_staging_dir, os.path.basename(src_workfile_path)
|
||||
)
|
||||
shutil.copy(src_workfile_path, new_workfile_path)
|
||||
|
||||
return new_workfile_path
|
||||
|
|
@ -0,0 +1,535 @@
|
|||
import os
|
||||
import copy
|
||||
|
||||
from openpype.hosts.tvpaint.worker import (
|
||||
SenderTVPaintCommands,
|
||||
ExecuteSimpleGeorgeScript,
|
||||
ExecuteGeorgeScript
|
||||
)
|
||||
|
||||
import pyblish.api
|
||||
from openpype.hosts.tvpaint.lib import (
|
||||
calculate_layers_extraction_data,
|
||||
get_frame_filename_template,
|
||||
fill_reference_frames,
|
||||
composite_rendered_layers,
|
||||
rename_filepaths_by_frame_start
|
||||
)
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class ExtractTVPaintSequences(pyblish.api.Extractor):
|
||||
label = "Extract TVPaint Sequences"
|
||||
hosts = ["webpublisher"]
|
||||
targets = ["tvpaint_worker"]
|
||||
|
||||
# Context plugin does not have families filtering
|
||||
families_filter = ["review", "renderPass", "renderLayer"]
|
||||
|
||||
job_queue_root_key = "jobs_root"
|
||||
|
||||
# Modifiable with settings
|
||||
review_bg = [255, 255, 255, 255]
|
||||
|
||||
def process(self, context):
|
||||
# Get workfle path
|
||||
workfile_path = context.data["workfilePath"]
|
||||
jobs_root = context.data["jobsRoot"]
|
||||
jobs_root_slashed = jobs_root.replace("\\", "/")
|
||||
|
||||
# Prepare scene data
|
||||
scene_data = context.data["sceneData"]
|
||||
scene_mark_in = scene_data["sceneMarkIn"]
|
||||
scene_mark_out = scene_data["sceneMarkOut"]
|
||||
scene_start_frame = scene_data["sceneStartFrame"]
|
||||
scene_bg_color = scene_data["sceneBgColor"]
|
||||
|
||||
# Prepare layers behavior
|
||||
behavior_by_layer_id = context.data["layersPrePostBehavior"]
|
||||
exposure_frames_by_layer_id = context.data["layersExposureFrames"]
|
||||
|
||||
# Handles are not stored per instance but on Context
|
||||
handle_start = context.data["handleStart"]
|
||||
handle_end = context.data["handleEnd"]
|
||||
|
||||
# Get JobQueue module
|
||||
modules = context.data["openPypeModules"]
|
||||
job_queue_module = modules["job_queue"]
|
||||
|
||||
tvpaint_commands = SenderTVPaintCommands(
|
||||
workfile_path, job_queue_module
|
||||
)
|
||||
|
||||
# Change scene Start Frame to 0 to prevent frame index issues
|
||||
# - issue is that TVPaint versions deal with frame indexes in a
|
||||
# different way when Start Frame is not `0`
|
||||
# NOTE It will be set back after rendering
|
||||
tvpaint_commands.add_command(
|
||||
ExecuteSimpleGeorgeScript("tv_startframe 0")
|
||||
)
|
||||
|
||||
root_key_replacement = "{" + self.job_queue_root_key + "}"
|
||||
after_render_instances = []
|
||||
for instance in context:
|
||||
instance_families = set(instance.data.get("families", []))
|
||||
instance_families.add(instance.data["family"])
|
||||
valid = False
|
||||
for family in instance_families:
|
||||
if family in self.families_filter:
|
||||
valid = True
|
||||
break
|
||||
|
||||
if not valid:
|
||||
continue
|
||||
|
||||
self.log.info("* Preparing commands for instance \"{}\"".format(
|
||||
instance.data["label"]
|
||||
))
|
||||
# Get all layers and filter out not visible
|
||||
layers = instance.data["layers"]
|
||||
filtered_layers = [layer for layer in layers if layer["visible"]]
|
||||
if not filtered_layers:
|
||||
self.log.info(
|
||||
"None of the layers from the instance"
|
||||
" are visible. Extraction skipped."
|
||||
)
|
||||
continue
|
||||
|
||||
joined_layer_names = ", ".join([
|
||||
"\"{}\"".format(str(layer["name"]))
|
||||
for layer in filtered_layers
|
||||
])
|
||||
self.log.debug(
|
||||
"Instance has {} layers with names: {}".format(
|
||||
len(filtered_layers), joined_layer_names
|
||||
)
|
||||
)
|
||||
|
||||
# Staging dir must be created during collection
|
||||
staging_dir = instance.data["stagingDir"].replace("\\", "/")
|
||||
|
||||
job_root_template = staging_dir.replace(
|
||||
jobs_root_slashed, root_key_replacement
|
||||
)
|
||||
|
||||
# Frame start/end may be stored as float
|
||||
frame_start = int(instance.data["frameStart"])
|
||||
frame_end = int(instance.data["frameEnd"])
|
||||
|
||||
# Prepare output frames
|
||||
output_frame_start = frame_start - handle_start
|
||||
output_frame_end = frame_end + handle_end
|
||||
|
||||
# Change output frame start to 0 if handles cause it's negative
|
||||
# number
|
||||
if output_frame_start < 0:
|
||||
self.log.warning((
|
||||
"Frame start with handles has negative value."
|
||||
" Changed to \"0\". Frames start: {}, Handle Start: {}"
|
||||
).format(frame_start, handle_start))
|
||||
output_frame_start = 0
|
||||
|
||||
# Create copy of scene Mark In/Out
|
||||
mark_in, mark_out = scene_mark_in, scene_mark_out
|
||||
|
||||
# Fix possible changes of output frame
|
||||
mark_out, output_frame_end = self._fix_range_changes(
|
||||
mark_in, mark_out, output_frame_start, output_frame_end
|
||||
)
|
||||
filename_template = get_frame_filename_template(
|
||||
max(scene_mark_out, output_frame_end)
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
self.log.debug(
|
||||
"Files will be rendered to folder: {}".format(staging_dir)
|
||||
)
|
||||
|
||||
output_filepaths_by_frame_idx = {}
|
||||
for frame_idx in range(mark_in, mark_out + 1):
|
||||
filename = filename_template.format(frame=frame_idx)
|
||||
filepath = os.path.join(staging_dir, filename)
|
||||
output_filepaths_by_frame_idx[frame_idx] = filepath
|
||||
|
||||
# Prepare data for post render processing
|
||||
post_render_data = {
|
||||
"output_dir": staging_dir,
|
||||
"layers": filtered_layers,
|
||||
"output_filepaths_by_frame_idx": output_filepaths_by_frame_idx,
|
||||
"instance": instance,
|
||||
"is_layers_render": False,
|
||||
"output_frame_start": output_frame_start,
|
||||
"output_frame_end": output_frame_end
|
||||
}
|
||||
# Store them to list
|
||||
after_render_instances.append(post_render_data)
|
||||
|
||||
# Review rendering
|
||||
if instance.data["family"] == "review":
|
||||
self.add_render_review_command(
|
||||
tvpaint_commands, mark_in, mark_out, scene_bg_color,
|
||||
job_root_template, filename_template
|
||||
)
|
||||
continue
|
||||
|
||||
# Layers rendering
|
||||
extraction_data_by_layer_id = calculate_layers_extraction_data(
|
||||
filtered_layers,
|
||||
exposure_frames_by_layer_id,
|
||||
behavior_by_layer_id,
|
||||
mark_in,
|
||||
mark_out
|
||||
)
|
||||
filepaths_by_layer_id = self.add_render_command(
|
||||
tvpaint_commands,
|
||||
job_root_template,
|
||||
staging_dir,
|
||||
filtered_layers,
|
||||
extraction_data_by_layer_id
|
||||
)
|
||||
# Add more data to post render processing
|
||||
post_render_data.update({
|
||||
"is_layers_render": True,
|
||||
"extraction_data_by_layer_id": extraction_data_by_layer_id,
|
||||
"filepaths_by_layer_id": filepaths_by_layer_id
|
||||
})
|
||||
|
||||
# Change scene frame Start back to previous value
|
||||
tvpaint_commands.add_command(
|
||||
ExecuteSimpleGeorgeScript(
|
||||
"tv_startframe {}".format(scene_start_frame)
|
||||
)
|
||||
)
|
||||
self.log.info("Sending the job and waiting for response...")
|
||||
tvpaint_commands.send_job_and_wait()
|
||||
self.log.info("Render job finished")
|
||||
|
||||
for post_render_data in after_render_instances:
|
||||
self._post_render_processing(post_render_data, mark_in, mark_out)
|
||||
|
||||
def _fix_range_changes(
|
||||
self, mark_in, mark_out, output_frame_start, output_frame_end
|
||||
):
|
||||
# Check Marks range and output range
|
||||
output_range = output_frame_end - output_frame_start
|
||||
marks_range = mark_out - mark_in
|
||||
|
||||
# Lower Mark Out if mark range is bigger than output
|
||||
# - do not rendered not used frames
|
||||
if output_range < marks_range:
|
||||
new_mark_out = mark_out - (marks_range - output_range)
|
||||
self.log.warning((
|
||||
"Lowering render range to {} frames. Changed Mark Out {} -> {}"
|
||||
).format(marks_range + 1, mark_out, new_mark_out))
|
||||
# Assign new mark out to variable
|
||||
mark_out = new_mark_out
|
||||
|
||||
# Lower output frame end so representation has right `frameEnd` value
|
||||
elif output_range > marks_range:
|
||||
new_output_frame_end = (
|
||||
output_frame_end - (output_range - marks_range)
|
||||
)
|
||||
self.log.warning((
|
||||
"Lowering representation range to {} frames."
|
||||
" Changed frame end {} -> {}"
|
||||
).format(output_range + 1, mark_out, new_output_frame_end))
|
||||
output_frame_end = new_output_frame_end
|
||||
return mark_out, output_frame_end
|
||||
|
||||
def _post_render_processing(self, post_render_data, mark_in, mark_out):
|
||||
# Unpack values
|
||||
instance = post_render_data["instance"]
|
||||
output_filepaths_by_frame_idx = (
|
||||
post_render_data["output_filepaths_by_frame_idx"]
|
||||
)
|
||||
is_layers_render = post_render_data["is_layers_render"]
|
||||
output_dir = post_render_data["output_dir"]
|
||||
layers = post_render_data["layers"]
|
||||
output_frame_start = post_render_data["output_frame_start"]
|
||||
output_frame_end = post_render_data["output_frame_end"]
|
||||
|
||||
# Trigger post processing of layers rendering
|
||||
# - only few frames were rendered this will complete the sequence
|
||||
# - multiple layers can be in single instance they must be composite
|
||||
# over each other
|
||||
if is_layers_render:
|
||||
self._finish_layer_render(
|
||||
layers,
|
||||
post_render_data["extraction_data_by_layer_id"],
|
||||
post_render_data["filepaths_by_layer_id"],
|
||||
mark_in,
|
||||
mark_out,
|
||||
output_filepaths_by_frame_idx
|
||||
)
|
||||
|
||||
# Create thumbnail
|
||||
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
|
||||
thumbnail_src_path = output_filepaths_by_frame_idx[mark_in]
|
||||
self._create_thumbnail(thumbnail_src_path, thumbnail_filepath)
|
||||
|
||||
# Rename filepaths to final frames
|
||||
repre_files = self._rename_output_files(
|
||||
output_filepaths_by_frame_idx,
|
||||
mark_in,
|
||||
mark_out,
|
||||
output_frame_start
|
||||
)
|
||||
|
||||
# Fill tags and new families
|
||||
family_lowered = instance.data["family"].lower()
|
||||
tags = []
|
||||
if family_lowered in ("review", "renderlayer"):
|
||||
tags.append("review")
|
||||
|
||||
# Sequence of one frame
|
||||
single_file = len(repre_files) == 1
|
||||
if single_file:
|
||||
repre_files = repre_files[0]
|
||||
|
||||
# Extension is harcoded
|
||||
# - changing extension would require change code
|
||||
new_repre = {
|
||||
"name": "png",
|
||||
"ext": "png",
|
||||
"files": repre_files,
|
||||
"stagingDir": output_dir,
|
||||
"tags": tags
|
||||
}
|
||||
|
||||
if not single_file:
|
||||
new_repre["frameStart"] = output_frame_start
|
||||
new_repre["frameEnd"] = output_frame_end
|
||||
|
||||
self.log.debug("Creating new representation: {}".format(new_repre))
|
||||
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
||||
if family_lowered in ("renderpass", "renderlayer"):
|
||||
# Change family to render
|
||||
instance.data["family"] = "render"
|
||||
|
||||
thumbnail_ext = os.path.splitext(thumbnail_filepath)[1]
|
||||
# Create thumbnail representation
|
||||
thumbnail_repre = {
|
||||
"name": "thumbnail",
|
||||
"ext": thumbnail_ext.replace(".", ""),
|
||||
"outputName": "thumb",
|
||||
"files": os.path.basename(thumbnail_filepath),
|
||||
"stagingDir": output_dir,
|
||||
"tags": ["thumbnail"]
|
||||
}
|
||||
instance.data["representations"].append(thumbnail_repre)
|
||||
|
||||
def _rename_output_files(
|
||||
self, filepaths_by_frame, mark_in, mark_out, output_frame_start
|
||||
):
|
||||
new_filepaths_by_frame = rename_filepaths_by_frame_start(
|
||||
filepaths_by_frame, mark_in, mark_out, output_frame_start
|
||||
)
|
||||
|
||||
repre_filenames = []
|
||||
for filepath in new_filepaths_by_frame.values():
|
||||
repre_filenames.append(os.path.basename(filepath))
|
||||
|
||||
if mark_in < output_frame_start:
|
||||
repre_filenames = list(reversed(repre_filenames))
|
||||
|
||||
return repre_filenames
|
||||
|
||||
def add_render_review_command(
|
||||
self,
|
||||
tvpaint_commands,
|
||||
mark_in,
|
||||
mark_out,
|
||||
scene_bg_color,
|
||||
job_root_template,
|
||||
filename_template
|
||||
):
|
||||
""" Export images from TVPaint using `tv_savesequence` command.
|
||||
|
||||
Args:
|
||||
output_dir (str): Directory where files will be stored.
|
||||
mark_in (int): Starting frame index from which export will begin.
|
||||
mark_out (int): On which frame index export will end.
|
||||
scene_bg_color (list): Bg color set in scene. Result of george
|
||||
script command `tv_background`.
|
||||
"""
|
||||
self.log.debug("Preparing data for rendering.")
|
||||
bg_color = self._get_review_bg_color()
|
||||
first_frame_filepath = "/".join([
|
||||
job_root_template,
|
||||
filename_template.format(frame=mark_in)
|
||||
])
|
||||
|
||||
george_script_lines = [
|
||||
# Change bg color to color from settings
|
||||
"tv_background \"color\" {} {} {}".format(*bg_color),
|
||||
"tv_SaveMode \"PNG\"",
|
||||
"export_path = \"{}\"".format(
|
||||
first_frame_filepath.replace("\\", "/")
|
||||
),
|
||||
"tv_savesequence '\"'export_path'\"' {} {}".format(
|
||||
mark_in, mark_out
|
||||
)
|
||||
]
|
||||
if scene_bg_color:
|
||||
# Change bg color back to previous scene bg color
|
||||
_scene_bg_color = copy.deepcopy(scene_bg_color)
|
||||
bg_type = _scene_bg_color.pop(0)
|
||||
orig_color_command = [
|
||||
"tv_background",
|
||||
"\"{}\"".format(bg_type)
|
||||
]
|
||||
orig_color_command.extend(_scene_bg_color)
|
||||
|
||||
george_script_lines.append(" ".join(orig_color_command))
|
||||
|
||||
tvpaint_commands.add_command(
|
||||
ExecuteGeorgeScript(
|
||||
george_script_lines,
|
||||
root_dir_key=self.job_queue_root_key
|
||||
)
|
||||
)
|
||||
|
||||
def add_render_command(
|
||||
self,
|
||||
tvpaint_commands,
|
||||
job_root_template,
|
||||
staging_dir,
|
||||
layers,
|
||||
extraction_data_by_layer_id
|
||||
):
|
||||
""" Export images from TVPaint.
|
||||
|
||||
Args:
|
||||
output_dir (str): Directory where files will be stored.
|
||||
mark_in (int): Starting frame index from which export will begin.
|
||||
mark_out (int): On which frame index export will end.
|
||||
layers (list): List of layers to be exported.
|
||||
|
||||
Retruns:
|
||||
tuple: With 2 items first is list of filenames second is path to
|
||||
thumbnail.
|
||||
"""
|
||||
# Map layers by position
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers
|
||||
}
|
||||
|
||||
# Render layers
|
||||
filepaths_by_layer_id = {}
|
||||
for layer_id, render_data in extraction_data_by_layer_id.items():
|
||||
layer = layers_by_id[layer_id]
|
||||
frame_references = render_data["frame_references"]
|
||||
filenames_by_frame_index = render_data["filenames_by_frame_index"]
|
||||
|
||||
filepaths_by_frame = {}
|
||||
command_filepath_by_frame = {}
|
||||
for frame_idx, ref_idx in frame_references.items():
|
||||
# None reference is skipped because does not have source
|
||||
if ref_idx is None:
|
||||
filepaths_by_frame[frame_idx] = None
|
||||
continue
|
||||
filename = filenames_by_frame_index[frame_idx]
|
||||
|
||||
filepaths_by_frame[frame_idx] = os.path.join(
|
||||
staging_dir, filename
|
||||
)
|
||||
if frame_idx == ref_idx:
|
||||
command_filepath_by_frame[frame_idx] = "/".join(
|
||||
[job_root_template, filename]
|
||||
)
|
||||
|
||||
self._add_render_layer_command(
|
||||
tvpaint_commands, layer, command_filepath_by_frame
|
||||
)
|
||||
filepaths_by_layer_id[layer_id] = filepaths_by_frame
|
||||
|
||||
return filepaths_by_layer_id
|
||||
|
||||
def _add_render_layer_command(
|
||||
self, tvpaint_commands, layer, filepaths_by_frame
|
||||
):
|
||||
george_script_lines = [
|
||||
# Set current layer by position
|
||||
"tv_layergetid {}".format(layer["position"]),
|
||||
"layer_id = result",
|
||||
"tv_layerset layer_id",
|
||||
"tv_SaveMode \"PNG\""
|
||||
]
|
||||
|
||||
for frame_idx, filepath in filepaths_by_frame.items():
|
||||
if filepath is None:
|
||||
continue
|
||||
|
||||
# Go to frame
|
||||
george_script_lines.append("tv_layerImage {}".format(frame_idx))
|
||||
# Store image to output
|
||||
george_script_lines.append(
|
||||
"tv_saveimage \"{}\"".format(filepath.replace("\\", "/"))
|
||||
)
|
||||
|
||||
tvpaint_commands.add_command(
|
||||
ExecuteGeorgeScript(
|
||||
george_script_lines,
|
||||
root_dir_key=self.job_queue_root_key
|
||||
)
|
||||
)
|
||||
|
||||
def _finish_layer_render(
|
||||
self,
|
||||
layers,
|
||||
extraction_data_by_layer_id,
|
||||
filepaths_by_layer_id,
|
||||
mark_in,
|
||||
mark_out,
|
||||
output_filepaths_by_frame_idx
|
||||
):
|
||||
# Fill frames between `frame_start_index` and `frame_end_index`
|
||||
self.log.debug("Filling frames not rendered frames.")
|
||||
for layer_id, render_data in extraction_data_by_layer_id.items():
|
||||
frame_references = render_data["frame_references"]
|
||||
filepaths_by_frame = filepaths_by_layer_id[layer_id]
|
||||
fill_reference_frames(frame_references, filepaths_by_frame)
|
||||
|
||||
# Prepare final filepaths where compositing should store result
|
||||
self.log.info("Started compositing of layer frames.")
|
||||
composite_rendered_layers(
|
||||
layers, filepaths_by_layer_id,
|
||||
mark_in, mark_out,
|
||||
output_filepaths_by_frame_idx
|
||||
)
|
||||
|
||||
def _create_thumbnail(self, thumbnail_src_path, thumbnail_filepath):
|
||||
if not os.path.exists(thumbnail_src_path):
|
||||
return
|
||||
|
||||
source_img = Image.open(thumbnail_src_path)
|
||||
|
||||
# Composite background only on rgba images
|
||||
# - just making sure
|
||||
if source_img.mode.lower() == "rgba":
|
||||
bg_color = self._get_review_bg_color()
|
||||
self.log.debug("Adding thumbnail background color {}.".format(
|
||||
" ".join([str(val) for val in bg_color])
|
||||
))
|
||||
bg_image = Image.new("RGBA", source_img.size, bg_color)
|
||||
thumbnail_obj = Image.alpha_composite(bg_image, source_img)
|
||||
thumbnail_obj.convert("RGB").save(thumbnail_filepath)
|
||||
|
||||
else:
|
||||
self.log.info((
|
||||
"Source for thumbnail has mode \"{}\" (Expected: RGBA)."
|
||||
" Can't use thubmanail background color."
|
||||
).format(source_img.mode))
|
||||
source_img.save(thumbnail_filepath)
|
||||
|
||||
def _get_review_bg_color(self):
|
||||
red = green = blue = 255
|
||||
if self.review_bg:
|
||||
if len(self.review_bg) == 4:
|
||||
red, green, blue, _ = self.review_bg
|
||||
elif len(self.review_bg) == 3:
|
||||
red, green, blue = self.review_bg
|
||||
return (red, green, blue)
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Cleanup leftover files from publish."""
|
||||
import os
|
||||
import shutil
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CleanUpJobRoot(pyblish.api.ContextPlugin):
|
||||
"""Cleans up the job root directory after a successful publish.
|
||||
|
||||
Remove all files in job root as all of them should be published.
|
||||
"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 1
|
||||
label = "Clean Up Job Root"
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, context):
|
||||
context_staging_dir = context.data.get("contextStagingDir")
|
||||
if not context_staging_dir:
|
||||
self.log.info("Key 'contextStagingDir' is empty.")
|
||||
|
||||
elif not os.path.exists(context_staging_dir):
|
||||
self.log.info((
|
||||
"Job root directory for this publish does not"
|
||||
" exists anymore \"{}\"."
|
||||
).format(context_staging_dir))
|
||||
else:
|
||||
self.log.info("Deleting job root with all files.")
|
||||
shutil.rmtree(context_staging_dir)
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateWorkfileData(pyblish.api.ContextPlugin):
|
||||
"""Validate mark in and out are enabled and it's duration.
|
||||
|
||||
Mark In/Out does not have to match frameStart and frameEnd but duration is
|
||||
important.
|
||||
"""
|
||||
|
||||
label = "Validate Workfile Data"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
|
||||
def process(self, context):
|
||||
# Data collected in `CollectAvalonEntities`
|
||||
frame_start = context.data["frameStart"]
|
||||
frame_end = context.data["frameEnd"]
|
||||
handle_start = context.data["handleStart"]
|
||||
handle_end = context.data["handleEnd"]
|
||||
|
||||
scene_data = context.data["sceneData"]
|
||||
scene_mark_in = scene_data["sceneMarkIn"]
|
||||
scene_mark_out = scene_data["sceneMarkOut"]
|
||||
|
||||
expected_range = (
|
||||
(frame_end - frame_start + 1)
|
||||
+ handle_start
|
||||
+ handle_end
|
||||
)
|
||||
marks_range = scene_mark_out - scene_mark_in + 1
|
||||
if expected_range != marks_range:
|
||||
raise AssertionError((
|
||||
"Wrong Mark In/Out range."
|
||||
" Expected range is {} frames got {} frames"
|
||||
).format(expected_range, marks_range))
|
||||
|
|
@ -198,6 +198,15 @@ class WebpublisherBatchPublishEndpoint(_RestApiEndpoint):
|
|||
# - filter defines command and can extend arguments dictionary
|
||||
# This is used only if 'studio_processing' is enabled on batch
|
||||
studio_processing_filters = [
|
||||
# TVPaint filter
|
||||
{
|
||||
"extensions": [".tvpp"],
|
||||
"command": "remotepublish",
|
||||
"arguments": {
|
||||
"targets": ["tvpaint_worker"]
|
||||
},
|
||||
"add_to_queue": False
|
||||
},
|
||||
# Photoshop filter
|
||||
{
|
||||
"extensions": [".psd", ".psb"],
|
||||
|
|
|
|||
|
|
@ -271,9 +271,17 @@ def get_linked_asset_ids(asset_doc):
|
|||
if not asset_doc:
|
||||
return output
|
||||
|
||||
input_links = asset_doc["data"].get("inputsLinks") or []
|
||||
input_links = asset_doc["data"].get("inputLinks") or []
|
||||
if input_links:
|
||||
output = [item["_id"] for item in input_links]
|
||||
for item in input_links:
|
||||
# Backwards compatibility for "_id" key which was replaced with
|
||||
# "id"
|
||||
if "_id" in item:
|
||||
link_id = item["_id"]
|
||||
else:
|
||||
link_id = item["id"]
|
||||
output.append(link_id)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -60,12 +60,13 @@ def path_from_representation(representation, anatomy):
|
|||
path = pipeline.format_template_with_optional_keys(
|
||||
context, template
|
||||
)
|
||||
path = os.path.normpath(path.replace("/", "\\"))
|
||||
|
||||
except KeyError:
|
||||
# Template references unavailable data
|
||||
return None
|
||||
|
||||
return os.path.normpath(path)
|
||||
return path
|
||||
|
||||
|
||||
def copy_file(src_path, dst_path):
|
||||
|
|
@ -179,9 +180,11 @@ def process_single_file(
|
|||
Returns:
|
||||
(collections.defaultdict , int)
|
||||
"""
|
||||
# Make sure path is valid for all platforms
|
||||
src_path = os.path.normpath(src_path.replace("\\", "/"))
|
||||
|
||||
if not os.path.exists(src_path):
|
||||
msg = "{} doesn't exist for {}".format(src_path,
|
||||
repre["_id"])
|
||||
msg = "{} doesn't exist for {}".format(src_path, repre["_id"])
|
||||
report_items["Source file was not found"].append(msg)
|
||||
return report_items, 0
|
||||
|
||||
|
|
@ -192,8 +195,10 @@ def process_single_file(
|
|||
else:
|
||||
delivery_path = anatomy_filled["delivery"][template_name]
|
||||
|
||||
# context.representation could be .psd
|
||||
# Backwards compatibility when extension contained `.`
|
||||
delivery_path = delivery_path.replace("..", ".")
|
||||
# Make sure path is valid for all platforms
|
||||
delivery_path = os.path.normpath(delivery_path.replace("\\", "/"))
|
||||
|
||||
delivery_folder = os.path.dirname(delivery_path)
|
||||
if not os.path.exists(delivery_folder):
|
||||
|
|
@ -230,14 +235,14 @@ def process_sequence(
|
|||
Returns:
|
||||
(collections.defaultdict , int)
|
||||
"""
|
||||
src_path = os.path.normpath(src_path.replace("\\", "/"))
|
||||
|
||||
def hash_path_exist(myPath):
|
||||
res = myPath.replace('#', '*')
|
||||
glob_search_results = glob.glob(res)
|
||||
if len(glob_search_results) > 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
|
||||
if not hash_path_exist(src_path):
|
||||
msg = "{} doesn't exist for {}".format(src_path,
|
||||
|
|
@ -307,6 +312,7 @@ def process_sequence(
|
|||
else:
|
||||
delivery_path = anatomy_filled["delivery"][template_name]
|
||||
|
||||
delivery_path = os.path.normpath(delivery_path.replace("\\", "/"))
|
||||
delivery_folder = os.path.dirname(delivery_path)
|
||||
dst_head, dst_tail = delivery_path.split(frame_indicator)
|
||||
dst_padding = src_collection.padding
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ def run_subprocess(*args, **kwargs):
|
|||
if full_output:
|
||||
full_output += "\n"
|
||||
full_output += _stderr
|
||||
logger.warning(_stderr)
|
||||
logger.info(_stderr)
|
||||
|
||||
if proc.returncode != 0:
|
||||
exc_msg = "Executing arguments was not successful: \"{}\"".format(args)
|
||||
|
|
|
|||
|
|
@ -531,12 +531,20 @@ def should_decompress(file_url):
|
|||
and we can decompress (oiiotool supported)
|
||||
"""
|
||||
if oiio_supported():
|
||||
output = run_subprocess([
|
||||
get_oiio_tools_path(),
|
||||
"--info", "-v", file_url])
|
||||
return "compression: \"dwaa\"" in output or \
|
||||
"compression: \"dwab\"" in output
|
||||
|
||||
try:
|
||||
output = run_subprocess([
|
||||
get_oiio_tools_path(),
|
||||
"--info", "-v", file_url])
|
||||
return "compression: \"dwaa\"" in output or \
|
||||
"compression: \"dwab\"" in output
|
||||
except RuntimeError:
|
||||
_name, ext = os.path.splitext(file_url)
|
||||
# TODO: should't the list of allowed extensions be
|
||||
# taken from an OIIO variable of supported formats
|
||||
if ext not in [".mxf"]:
|
||||
# Reraise exception
|
||||
raise
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -94,24 +94,27 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
render_path).replace("\\", "/")
|
||||
instance.data["publishJobState"] = "Suspended"
|
||||
|
||||
if instance.data.get("bakeScriptPath"):
|
||||
render_path = instance.data.get("bakeRenderPath")
|
||||
script_path = instance.data.get("bakeScriptPath")
|
||||
exe_node_name = instance.data.get("bakeWriteNodeName")
|
||||
if instance.data.get("bakingNukeScripts"):
|
||||
for baking_script in instance.data["bakingNukeScripts"]:
|
||||
render_path = baking_script["bakeRenderPath"]
|
||||
script_path = baking_script["bakeScriptPath"]
|
||||
exe_node_name = baking_script["bakeWriteNodeName"]
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
self._frame_start += 1
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
self._frame_start += 1
|
||||
|
||||
resp = self.payload_submit(instance,
|
||||
script_path,
|
||||
render_path,
|
||||
exe_node_name,
|
||||
response.json()
|
||||
)
|
||||
# Store output dir for unified publisher (filesequence)
|
||||
instance.data["deadlineSubmissionJob"] = resp.json()
|
||||
instance.data["publishJobState"] = "Suspended"
|
||||
resp = self.payload_submit(
|
||||
instance,
|
||||
script_path,
|
||||
render_path,
|
||||
exe_node_name,
|
||||
response.json()
|
||||
)
|
||||
|
||||
# Store output dir for unified publisher (filesequence)
|
||||
instance.data["deadlineSubmissionJob"] = resp.json()
|
||||
instance.data["publishJobState"] = "Suspended"
|
||||
|
||||
# redefinition of families
|
||||
if "render.farm" in families:
|
||||
|
|
|
|||
|
|
@ -142,8 +142,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
instance_transfer = {
|
||||
"slate": ["slateFrame"],
|
||||
"review": ["lutPath"],
|
||||
"render2d": ["bakeScriptPath", "bakeRenderPath",
|
||||
"bakeWriteNodeName", "version"],
|
||||
"render2d": ["bakingNukeScripts", "version"],
|
||||
"renderlayer": ["convertToScanline"]
|
||||
}
|
||||
|
||||
|
|
@ -506,9 +505,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
representations = []
|
||||
collections, remainders = clique.assemble(exp_files)
|
||||
bake_render_path = instance.get("bakeRenderPath", [])
|
||||
bake_renders = instance.get("bakingNukeScripts", [])
|
||||
|
||||
# create representation for every collected sequence
|
||||
# create representation for every collected sequento ce
|
||||
for collection in collections:
|
||||
ext = collection.tail.lstrip(".")
|
||||
preview = False
|
||||
|
|
@ -524,7 +523,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
preview = True
|
||||
break
|
||||
|
||||
if bake_render_path:
|
||||
if bake_renders:
|
||||
preview = False
|
||||
|
||||
staging = os.path.dirname(list(collection)[0])
|
||||
|
|
@ -596,7 +595,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
})
|
||||
self._solve_families(instance, True)
|
||||
|
||||
if remainder in bake_render_path:
|
||||
if (bake_renders
|
||||
and remainder in bake_renders[0]["bakeRenderPath"]):
|
||||
rep.update({
|
||||
"fps": instance.get("fps"),
|
||||
"tags": ["review", "delete"]
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ class SyncLinksToAvalon(BaseEvent):
|
|||
continue
|
||||
|
||||
links.append({
|
||||
"_id": ObjectId(link_mongo_id),
|
||||
"id": ObjectId(link_mongo_id),
|
||||
"linkedBy": "ftrack",
|
||||
"type": "breakdown"
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1479,7 +1479,7 @@ class SyncEntitiesFactory:
|
|||
mongo_id = self.ftrack_avalon_mapper.get(ftrack_link_id)
|
||||
if mongo_id is not None:
|
||||
input_links.append({
|
||||
"_id": ObjectId(mongo_id),
|
||||
"id": ObjectId(mongo_id),
|
||||
"linkedBy": "ftrack",
|
||||
"type": "breakdown"
|
||||
})
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class CollectUsername(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder - 0.488
|
||||
label = "Collect ftrack username"
|
||||
hosts = ["webpublisher", "photoshop"]
|
||||
targets = ["remotepublish", "filespublish"]
|
||||
targets = ["remotepublish", "filespublish", "tvpaint_worker"]
|
||||
|
||||
_context = None
|
||||
|
||||
|
|
|
|||
6
openpype/modules/default_modules/job_queue/__init__.py
Normal file
6
openpype/modules/default_modules/job_queue/__init__.py
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
from .module import JobQueueModule
|
||||
|
||||
|
||||
__all__ = (
|
||||
"JobQueueModule",
|
||||
)
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
from .server import WebServerManager
|
||||
from .utils import main
|
||||
|
||||
|
||||
__all__ = (
|
||||
"WebServerManager",
|
||||
"main"
|
||||
)
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
import json
|
||||
|
||||
from aiohttp.web_response import Response
|
||||
|
||||
|
||||
class JobQueueResource:
|
||||
def __init__(self, job_queue, server_manager):
|
||||
self.server_manager = server_manager
|
||||
|
||||
self._prefix = "/api"
|
||||
|
||||
self._job_queue = job_queue
|
||||
|
||||
self.endpoint_defs = (
|
||||
("POST", "/jobs", self.post_job),
|
||||
("GET", "/jobs", self.get_jobs),
|
||||
("GET", "/jobs/{job_id}", self.get_job)
|
||||
)
|
||||
|
||||
self.register()
|
||||
|
||||
def register(self):
|
||||
for methods, url, callback in self.endpoint_defs:
|
||||
final_url = self._prefix + url
|
||||
self.server_manager.add_route(
|
||||
methods, final_url, callback
|
||||
)
|
||||
|
||||
async def get_jobs(self, request):
|
||||
jobs_data = []
|
||||
for job in self._job_queue.get_jobs():
|
||||
jobs_data.append(job.status())
|
||||
return Response(status=200, body=self.encode(jobs_data))
|
||||
|
||||
async def post_job(self, request):
|
||||
data = await request.json()
|
||||
host_name = data.get("host_name")
|
||||
if not host_name:
|
||||
return Response(
|
||||
status=400, message="Key \"host_name\" not filled."
|
||||
)
|
||||
|
||||
job = self._job_queue.create_job(host_name, data)
|
||||
return Response(status=201, text=job.id)
|
||||
|
||||
async def get_job(self, request):
|
||||
job_id = request.match_info["job_id"]
|
||||
content = self._job_queue.get_job_status(job_id)
|
||||
if content is None:
|
||||
content = {}
|
||||
return Response(
|
||||
status=200,
|
||||
body=self.encode(content),
|
||||
content_type="application/json"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def encode(cls, data):
|
||||
return json.dumps(
|
||||
data,
|
||||
indent=4
|
||||
).encode("utf-8")
|
||||
240
openpype/modules/default_modules/job_queue/job_server/jobs.py
Normal file
240
openpype/modules/default_modules/job_queue/job_server/jobs.py
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
import datetime
|
||||
import collections
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
class Job:
|
||||
"""Job related to specific host name.
|
||||
|
||||
Data must contain everything needed to finish the job.
|
||||
"""
|
||||
# Remove done jobs each n days to clear memory
|
||||
keep_in_memory_days = 3
|
||||
|
||||
def __init__(self, host_name, data, job_id=None, created_time=None):
|
||||
if job_id is None:
|
||||
job_id = str(uuid4())
|
||||
self._id = job_id
|
||||
if created_time is None:
|
||||
created_time = datetime.datetime.now()
|
||||
self._created_time = created_time
|
||||
self._started_time = None
|
||||
self._done_time = None
|
||||
self.host_name = host_name
|
||||
self.data = data
|
||||
self._result_data = None
|
||||
|
||||
self._started = False
|
||||
self._done = False
|
||||
self._errored = False
|
||||
self._message = None
|
||||
self._deleted = False
|
||||
|
||||
self._worker = None
|
||||
|
||||
def keep_in_memory(self):
|
||||
if self._done_time is None:
|
||||
return True
|
||||
|
||||
now = datetime.datetime.now()
|
||||
delta = now - self._done_time
|
||||
return delta.days < self.keep_in_memory_days
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def done(self):
|
||||
return self._done
|
||||
|
||||
def reset(self):
|
||||
self._started = False
|
||||
self._started_time = None
|
||||
self._done = False
|
||||
self._done_time = None
|
||||
self._errored = False
|
||||
self._message = None
|
||||
|
||||
self._worker = None
|
||||
|
||||
@property
|
||||
def started(self):
|
||||
return self._started
|
||||
|
||||
@property
|
||||
def deleted(self):
|
||||
return self._deleted
|
||||
|
||||
def set_deleted(self):
|
||||
self._deleted = True
|
||||
self.set_worker(None)
|
||||
|
||||
def set_worker(self, worker):
|
||||
if worker is self._worker:
|
||||
return
|
||||
|
||||
if self._worker is not None:
|
||||
self._worker.set_current_job(None)
|
||||
|
||||
self._worker = worker
|
||||
if worker is not None:
|
||||
worker.set_current_job(self)
|
||||
|
||||
def set_started(self):
|
||||
self._started_time = datetime.datetime.now()
|
||||
self._started = True
|
||||
|
||||
def set_done(self, success=True, message=None, data=None):
|
||||
self._done = True
|
||||
self._done_time = datetime.datetime.now()
|
||||
self._errored = not success
|
||||
self._message = message
|
||||
self._result_data = data
|
||||
if self._worker is not None:
|
||||
self._worker.set_current_job(None)
|
||||
|
||||
def status(self):
|
||||
worker_id = None
|
||||
if self._worker is not None:
|
||||
worker_id = self._worker.id
|
||||
output = {
|
||||
"id": self.id,
|
||||
"worker_id": worker_id,
|
||||
"done": self._done
|
||||
}
|
||||
output["message"] = self._message or None
|
||||
|
||||
state = "waiting"
|
||||
if self._deleted:
|
||||
state = "deleted"
|
||||
elif self._errored:
|
||||
state = "error"
|
||||
elif self._done:
|
||||
state = "done"
|
||||
elif self._started:
|
||||
state = "started"
|
||||
|
||||
output["result"] = self._result_data
|
||||
|
||||
output["state"] = state
|
||||
|
||||
return output
|
||||
|
||||
|
||||
class JobQueue:
|
||||
"""Queue holds jobs that should be done and workers that can do them.
|
||||
|
||||
Also asign jobs to a worker.
|
||||
"""
|
||||
old_jobs_check_minutes_interval = 30
|
||||
|
||||
def __init__(self):
|
||||
self._last_old_jobs_check = datetime.datetime.now()
|
||||
self._jobs_by_id = {}
|
||||
self._job_queue_by_host_name = collections.defaultdict(
|
||||
collections.deque
|
||||
)
|
||||
self._workers_by_id = {}
|
||||
self._workers_by_host_name = collections.defaultdict(list)
|
||||
|
||||
def workers(self):
|
||||
"""All currently registered workers."""
|
||||
return self._workers_by_id.values()
|
||||
|
||||
def add_worker(self, worker):
|
||||
host_name = worker.host_name
|
||||
print("Added new worker for \"{}\"".format(host_name))
|
||||
self._workers_by_id[worker.id] = worker
|
||||
self._workers_by_host_name[host_name].append(worker)
|
||||
|
||||
def get_worker(self, worker_id):
|
||||
return self._workers_by_id.get(worker_id)
|
||||
|
||||
def remove_worker(self, worker):
|
||||
# Look if worker had assigned job to do
|
||||
job = worker.current_job
|
||||
if job is not None and not job.done:
|
||||
# Reset job
|
||||
job.set_worker(None)
|
||||
job.reset()
|
||||
# Add job back to queue
|
||||
self._job_queue_by_host_name[job.host_name].appendleft(job)
|
||||
|
||||
# Remove worker from registered workers
|
||||
self._workers_by_id.pop(worker.id, None)
|
||||
host_name = worker.host_name
|
||||
if worker in self._workers_by_host_name[host_name]:
|
||||
self._workers_by_host_name[host_name].remove(worker)
|
||||
|
||||
print("Removed worker for \"{}\"".format(host_name))
|
||||
|
||||
def assign_jobs(self):
|
||||
"""Try to assign job for each idle worker.
|
||||
|
||||
Error all jobs without needed worker.
|
||||
"""
|
||||
available_host_names = set()
|
||||
for worker in self._workers_by_id.values():
|
||||
host_name = worker.host_name
|
||||
available_host_names.add(host_name)
|
||||
if worker.is_idle():
|
||||
jobs = self._job_queue_by_host_name[host_name]
|
||||
while jobs:
|
||||
job = jobs.popleft()
|
||||
if not job.deleted:
|
||||
worker.set_current_job(job)
|
||||
break
|
||||
|
||||
for host_name in tuple(self._job_queue_by_host_name.keys()):
|
||||
if host_name in available_host_names:
|
||||
continue
|
||||
|
||||
jobs_deque = self._job_queue_by_host_name[host_name]
|
||||
message = ("Not available workers for \"{}\"").format(host_name)
|
||||
while jobs_deque:
|
||||
job = jobs_deque.popleft()
|
||||
if not job.deleted:
|
||||
job.set_done(False, message)
|
||||
self._remove_old_jobs()
|
||||
|
||||
def get_jobs(self):
|
||||
return self._jobs_by_id.values()
|
||||
|
||||
def get_job(self, job_id):
|
||||
"""Job by it's id."""
|
||||
return self._jobs_by_id.get(job_id)
|
||||
|
||||
def create_job(self, host_name, job_data):
|
||||
"""Create new job from passed data and add it to queue."""
|
||||
job = Job(host_name, job_data)
|
||||
self._jobs_by_id[job.id] = job
|
||||
self._job_queue_by_host_name[host_name].append(job)
|
||||
return job
|
||||
|
||||
def _remove_old_jobs(self):
|
||||
"""Once in specific time look if should remove old finished jobs."""
|
||||
delta = datetime.datetime.now() - self._last_old_jobs_check
|
||||
if delta.seconds < self.old_jobs_check_minutes_interval:
|
||||
return
|
||||
|
||||
for job_id in tuple(self._jobs_by_id.keys()):
|
||||
job = self._jobs_by_id[job_id]
|
||||
if not job.keep_in_memory():
|
||||
self._jobs_by_id.pop(job_id)
|
||||
|
||||
def remove_job(self, job_id):
|
||||
"""Delete job and eventually stop it."""
|
||||
job = self._jobs_by_id.get(job_id)
|
||||
if job is None:
|
||||
return
|
||||
|
||||
job.set_deleted()
|
||||
self._jobs_by_id.pop(job.id)
|
||||
|
||||
def get_job_status(self, job_id):
|
||||
"""Job's status based on id."""
|
||||
job = self._jobs_by_id.get(job_id)
|
||||
if job is None:
|
||||
return {}
|
||||
return job.status()
|
||||
154
openpype/modules/default_modules/job_queue/job_server/server.py
Normal file
154
openpype/modules/default_modules/job_queue/job_server/server.py
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
import threading
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from .jobs import JobQueue
|
||||
from .job_queue_route import JobQueueResource
|
||||
from .workers_rpc_route import WorkerRpc
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WebServerManager:
|
||||
"""Manger that care about web server thread."""
|
||||
def __init__(self, port, host, loop=None):
|
||||
self.port = port
|
||||
self.host = host
|
||||
self.app = web.Application()
|
||||
if loop is None:
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
# add route with multiple methods for single "external app"
|
||||
self.webserver_thread = WebServerThread(self, loop)
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return "http://{}:{}".format(self.host, self.port)
|
||||
|
||||
def add_route(self, *args, **kwargs):
|
||||
self.app.router.add_route(*args, **kwargs)
|
||||
|
||||
def add_static(self, *args, **kwargs):
|
||||
self.app.router.add_static(*args, **kwargs)
|
||||
|
||||
def start_server(self):
|
||||
if self.webserver_thread and not self.webserver_thread.is_alive():
|
||||
self.webserver_thread.start()
|
||||
|
||||
def stop_server(self):
|
||||
if not self.is_running:
|
||||
return
|
||||
|
||||
try:
|
||||
log.debug("Stopping Web server")
|
||||
self.webserver_thread.stop()
|
||||
|
||||
except Exception as exc:
|
||||
print("Errored", str(exc))
|
||||
log.warning(
|
||||
"Error has happened during Killing Web server",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
if self.webserver_thread is not None:
|
||||
return self.webserver_thread.is_running
|
||||
return False
|
||||
|
||||
|
||||
class WebServerThread(threading.Thread):
|
||||
""" Listener for requests in thread."""
|
||||
def __init__(self, manager, loop):
|
||||
super(WebServerThread, self).__init__()
|
||||
|
||||
self._is_running = False
|
||||
self._stopped = False
|
||||
self.manager = manager
|
||||
self.loop = loop
|
||||
self.runner = None
|
||||
self.site = None
|
||||
|
||||
job_queue = JobQueue()
|
||||
self.job_queue_route = JobQueueResource(job_queue, manager)
|
||||
self.workers_route = WorkerRpc(job_queue, manager, loop=loop)
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
return self.manager.port
|
||||
|
||||
@property
|
||||
def host(self):
|
||||
return self.manager.host
|
||||
|
||||
@property
|
||||
def stopped(self):
|
||||
return self._stopped
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
return self._is_running
|
||||
|
||||
def run(self):
|
||||
self._is_running = True
|
||||
|
||||
try:
|
||||
log.info("Starting WebServer server")
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.loop.run_until_complete(self.start_server())
|
||||
|
||||
asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
|
||||
self.loop.run_forever()
|
||||
|
||||
except Exception:
|
||||
log.warning(
|
||||
"Web Server service has failed", exc_info=True
|
||||
)
|
||||
finally:
|
||||
self.loop.close()
|
||||
|
||||
self._is_running = False
|
||||
log.info("Web server stopped")
|
||||
|
||||
async def start_server(self):
|
||||
""" Starts runner and TCPsite """
|
||||
self.runner = web.AppRunner(self.manager.app)
|
||||
await self.runner.setup()
|
||||
self.site = web.TCPSite(self.runner, self.host, self.port)
|
||||
await self.site.start()
|
||||
|
||||
def stop(self):
|
||||
"""Sets _stopped flag to True, 'check_shutdown' shuts server down"""
|
||||
self._stopped = True
|
||||
|
||||
async def check_shutdown(self):
|
||||
""" Future that is running and checks if server should be running
|
||||
periodically.
|
||||
"""
|
||||
while not self._stopped:
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
print("Starting shutdown")
|
||||
if self.workers_route:
|
||||
await self.workers_route.stop()
|
||||
|
||||
print("Stopping site")
|
||||
await self.site.stop()
|
||||
print("Site stopped")
|
||||
await self.runner.cleanup()
|
||||
|
||||
print("Runner stopped")
|
||||
tasks = [
|
||||
task
|
||||
for task in asyncio.all_tasks()
|
||||
if task is not asyncio.current_task()
|
||||
]
|
||||
list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
log.debug(f'Finished awaiting cancelled tasks, results: {results}...')
|
||||
await self.loop.shutdown_asyncgens()
|
||||
# to really make sure everything else has time to stop
|
||||
await asyncio.sleep(0.07)
|
||||
self.loop.stop()
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
import sys
|
||||
import signal
|
||||
import time
|
||||
import socket
|
||||
|
||||
from .server import WebServerManager
|
||||
|
||||
|
||||
class SharedObjects:
|
||||
stopped = False
|
||||
|
||||
@classmethod
|
||||
def stop(cls):
|
||||
cls.stopped = True
|
||||
|
||||
|
||||
def main(port=None, host=None):
|
||||
def signal_handler(sig, frame):
|
||||
print("Signal to kill process received. Termination starts.")
|
||||
SharedObjects.stop()
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
port = int(port or 8079)
|
||||
host = str(host or "localhost")
|
||||
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con:
|
||||
result_of_check = con.connect_ex((host, port))
|
||||
|
||||
if result_of_check == 0:
|
||||
print((
|
||||
"Server {}:{} is already running or address is occupied."
|
||||
).format(host, port))
|
||||
return 1
|
||||
|
||||
print("Running server {}:{}".format(host, port))
|
||||
manager = WebServerManager(port, host)
|
||||
manager.start_server()
|
||||
|
||||
stopped = False
|
||||
while manager.is_running:
|
||||
if not stopped and SharedObjects.stopped:
|
||||
stopped = True
|
||||
manager.stop_server()
|
||||
time.sleep(0.1)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
122
openpype/modules/default_modules/job_queue/job_server/workers.py
Normal file
122
openpype/modules/default_modules/job_queue/job_server/workers.py
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
import asyncio
|
||||
from uuid import uuid4
|
||||
from aiohttp import WSCloseCode
|
||||
from aiohttp_json_rpc.protocol import encode_request
|
||||
|
||||
|
||||
class WorkerState:
|
||||
IDLE = object()
|
||||
JOB_ASSIGNED = object()
|
||||
JOB_SENT = object()
|
||||
|
||||
|
||||
class Worker:
|
||||
"""Worker that can handle jobs of specific host."""
|
||||
def __init__(self, host_name, http_request):
|
||||
self._id = None
|
||||
self.host_name = host_name
|
||||
self._http_request = http_request
|
||||
self._state = WorkerState.IDLE
|
||||
self._job = None
|
||||
|
||||
# Give ability to send requests to worker
|
||||
http_request.request_id = str(uuid4())
|
||||
http_request.pending_requests = {}
|
||||
|
||||
async def send_job(self):
|
||||
if self._job is not None:
|
||||
data = {
|
||||
"job_id": self._job.id,
|
||||
"worker_id": self.id,
|
||||
"data": self._job.data
|
||||
}
|
||||
return await self.call("start_job", data)
|
||||
return False
|
||||
|
||||
async def call(self, method, params=None, timeout=None):
|
||||
"""Call method on worker's side."""
|
||||
request_id = self._http_request.request_id
|
||||
self._http_request.request_id = str(uuid4())
|
||||
pending_requests = self._http_request.pending_requests
|
||||
pending_requests[request_id] = asyncio.Future()
|
||||
|
||||
request = encode_request(method, id=request_id, params=params)
|
||||
|
||||
await self._http_request.ws.send_str(request)
|
||||
|
||||
if timeout:
|
||||
await asyncio.wait_for(
|
||||
pending_requests[request_id],
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
else:
|
||||
await pending_requests[request_id]
|
||||
|
||||
result = pending_requests[request_id].result()
|
||||
del pending_requests[request_id]
|
||||
|
||||
return result
|
||||
|
||||
async def close(self):
|
||||
return await self.ws.close(
|
||||
code=WSCloseCode.GOING_AWAY,
|
||||
message="Server shutdown"
|
||||
)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
if self._id is None:
|
||||
self._id = str(uuid4())
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def state(self):
|
||||
return self._state
|
||||
|
||||
@property
|
||||
def current_job(self):
|
||||
return self._job
|
||||
|
||||
@property
|
||||
def http_request(self):
|
||||
return self._http_request
|
||||
|
||||
@property
|
||||
def ws(self):
|
||||
return self.http_request.ws
|
||||
|
||||
def connection_is_alive(self):
|
||||
if self.ws.closed or self.ws._writer.transport.is_closing():
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_idle(self):
|
||||
return self._state is WorkerState.IDLE
|
||||
|
||||
def job_assigned(self):
|
||||
return (
|
||||
self._state is WorkerState.JOB_ASSIGNED
|
||||
or self._state is WorkerState.JOB_SENT
|
||||
)
|
||||
|
||||
def is_working(self):
|
||||
return self._state is WorkerState.JOB_SENT
|
||||
|
||||
def set_current_job(self, job):
|
||||
if job is self._job:
|
||||
return
|
||||
|
||||
self._job = job
|
||||
if job is None:
|
||||
self._set_idle()
|
||||
else:
|
||||
self._state = WorkerState.JOB_ASSIGNED
|
||||
job.set_worker(self)
|
||||
|
||||
def _set_idle(self):
|
||||
self._job = None
|
||||
self._state = WorkerState.IDLE
|
||||
|
||||
def set_working(self):
|
||||
self._state = WorkerState.JOB_SENT
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
import asyncio
|
||||
|
||||
import aiohttp
|
||||
from aiohttp_json_rpc import JsonRpc
|
||||
from aiohttp_json_rpc.protocol import (
|
||||
encode_error, decode_msg, JsonRpcMsgTyp
|
||||
)
|
||||
from aiohttp_json_rpc.exceptions import RpcError
|
||||
from .workers import Worker
|
||||
|
||||
|
||||
class WorkerRpc(JsonRpc):
|
||||
def __init__(self, job_queue, manager, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self._job_queue = job_queue
|
||||
self._manager = manager
|
||||
|
||||
self._stopped = False
|
||||
|
||||
# Register methods
|
||||
self.add_methods(
|
||||
("", self.register_worker),
|
||||
("", self.job_done)
|
||||
)
|
||||
asyncio.ensure_future(self._rpc_loop(), loop=self.loop)
|
||||
|
||||
self._manager.add_route(
|
||||
"*", "/ws", self.handle_request
|
||||
)
|
||||
|
||||
# Panel routes for tools
|
||||
async def register_worker(self, request, host_name):
|
||||
worker = Worker(host_name, request.http_request)
|
||||
self._job_queue.add_worker(worker)
|
||||
return worker.id
|
||||
|
||||
async def _rpc_loop(self):
|
||||
while self.loop.is_running():
|
||||
if self._stopped:
|
||||
break
|
||||
|
||||
for worker in tuple(self._job_queue.workers()):
|
||||
if not worker.connection_is_alive():
|
||||
self._job_queue.remove_worker(worker)
|
||||
self._job_queue.assign_jobs()
|
||||
|
||||
await self.send_jobs()
|
||||
await asyncio.sleep(5)
|
||||
|
||||
async def job_done(self, worker_id, job_id, success, message, data):
|
||||
worker = self._job_queue.get_worker(worker_id)
|
||||
if worker is not None:
|
||||
worker.set_current_job(None)
|
||||
|
||||
job = self._job_queue.get_job(job_id)
|
||||
if job is not None:
|
||||
job.set_done(success, message, data)
|
||||
return True
|
||||
|
||||
async def send_jobs(self):
|
||||
invalid_workers = []
|
||||
for worker in self._job_queue.workers():
|
||||
if worker.job_assigned() and not worker.is_working():
|
||||
try:
|
||||
await worker.send_job()
|
||||
|
||||
except ConnectionResetError:
|
||||
invalid_workers.append(worker)
|
||||
|
||||
for worker in invalid_workers:
|
||||
self._job_queue.remove_worker(worker)
|
||||
|
||||
async def handle_websocket_request(self, http_request):
|
||||
"""Overide this method to catch CLOSING messages."""
|
||||
http_request.msg_id = 0
|
||||
http_request.pending = {}
|
||||
|
||||
# prepare and register websocket
|
||||
ws = aiohttp.web_ws.WebSocketResponse()
|
||||
await ws.prepare(http_request)
|
||||
http_request.ws = ws
|
||||
self.clients.append(http_request)
|
||||
|
||||
while not ws.closed:
|
||||
self.logger.debug('waiting for messages')
|
||||
raw_msg = await ws.receive()
|
||||
|
||||
if raw_msg.type == aiohttp.WSMsgType.TEXT:
|
||||
self.logger.debug('raw msg received: %s', raw_msg.data)
|
||||
self.loop.create_task(
|
||||
self._handle_rpc_msg(http_request, raw_msg)
|
||||
)
|
||||
|
||||
elif raw_msg.type == aiohttp.WSMsgType.CLOSING:
|
||||
break
|
||||
|
||||
self.clients.remove(http_request)
|
||||
return ws
|
||||
|
||||
async def _handle_rpc_msg(self, http_request, raw_msg):
|
||||
# This is duplicated code from super but there is no way how to do it
|
||||
# to be able handle server->client requests
|
||||
try:
|
||||
_raw_message = raw_msg.data
|
||||
msg = decode_msg(_raw_message)
|
||||
|
||||
except RpcError as error:
|
||||
await self._ws_send_str(http_request, encode_error(error))
|
||||
return
|
||||
|
||||
if msg.type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR):
|
||||
request_id = msg.data["id"]
|
||||
if request_id in http_request.pending_requests:
|
||||
future = http_request.pending_requests[request_id]
|
||||
future.set_result(msg.data["result"])
|
||||
return
|
||||
|
||||
return await super()._handle_rpc_msg(http_request, raw_msg)
|
||||
|
||||
async def stop(self):
|
||||
self._stopped = True
|
||||
for worker in tuple(self._job_queue.workers()):
|
||||
await worker.close()
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
from .base_worker import WorkerJobsConnection
|
||||
|
||||
__all__ = (
|
||||
"WorkerJobsConnection",
|
||||
)
|
||||
|
|
@ -0,0 +1,190 @@
|
|||
import sys
|
||||
import datetime
|
||||
import asyncio
|
||||
import traceback
|
||||
|
||||
from aiohttp_json_rpc import JsonRpcClient
|
||||
|
||||
|
||||
class WorkerClient(JsonRpcClient):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.add_methods(
|
||||
("", self.start_job),
|
||||
)
|
||||
self.current_job = None
|
||||
self._id = None
|
||||
|
||||
def set_id(self, worker_id):
|
||||
self._id = worker_id
|
||||
|
||||
async def start_job(self, job_data):
|
||||
if self.current_job is not None:
|
||||
return False
|
||||
|
||||
print("Got new job {}".format(str(job_data)))
|
||||
self.current_job = job_data
|
||||
return True
|
||||
|
||||
def finish_job(self, success, message, data):
|
||||
asyncio.ensure_future(
|
||||
self._finish_job(success, message, data),
|
||||
loop=self._loop
|
||||
)
|
||||
|
||||
async def _finish_job(self, success, message, data):
|
||||
print("Current job", self.current_job)
|
||||
job_id = self.current_job["job_id"]
|
||||
self.current_job = None
|
||||
|
||||
return await self.call(
|
||||
"job_done", [self._id, job_id, success, message, data]
|
||||
)
|
||||
|
||||
|
||||
class WorkerJobsConnection:
|
||||
"""WS connection to Job server.
|
||||
|
||||
Helper class to create a connection to process jobs from job server.
|
||||
|
||||
To be able receive jobs is needed to create a connection and then register
|
||||
as worker for specific host.
|
||||
"""
|
||||
retry_time_seconds = 5
|
||||
|
||||
def __init__(self, server_url, host_name, loop=None):
|
||||
self.client = None
|
||||
self._loop = loop
|
||||
|
||||
self._host_name = host_name
|
||||
self._server_url = server_url
|
||||
|
||||
self._is_running = False
|
||||
self._connecting = False
|
||||
self._connected = False
|
||||
self._stopped = False
|
||||
|
||||
def stop(self):
|
||||
print("Stopping worker")
|
||||
self._stopped = True
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
return self._is_running
|
||||
|
||||
@property
|
||||
def current_job(self):
|
||||
if self.client is not None:
|
||||
return self.client.current_job
|
||||
return None
|
||||
|
||||
def finish_job(self, success=True, message=None, data=None):
|
||||
"""Worker finished job and sets the result which is send to server."""
|
||||
if self.client is None:
|
||||
print((
|
||||
"Couldn't sent job status to server because"
|
||||
" client is not connected."
|
||||
))
|
||||
else:
|
||||
self.client.finish_job(success, message, data)
|
||||
|
||||
async def main_loop(self, register_worker=True):
|
||||
"""Main loop of connection which keep connection to server alive."""
|
||||
self._is_running = True
|
||||
|
||||
while not self._stopped:
|
||||
start_time = datetime.datetime.now()
|
||||
await self._connection_loop(register_worker)
|
||||
delta = datetime.datetime.now() - start_time
|
||||
print("Connection loop took {}s".format(str(delta)))
|
||||
# Check if was stopped and stop while loop in that case
|
||||
if self._stopped:
|
||||
break
|
||||
|
||||
if delta.seconds < 60:
|
||||
print((
|
||||
"Can't connect to server will try in {} seconds."
|
||||
).format(self.retry_time_seconds))
|
||||
|
||||
await asyncio.sleep(self.retry_time_seconds)
|
||||
self._is_running = False
|
||||
|
||||
async def _connect(self):
|
||||
self.client = WorkerClient()
|
||||
print("Connecting to {}".format(self._server_url))
|
||||
try:
|
||||
await self.client.connect_url(self._server_url)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception:
|
||||
traceback.print_exception(*sys.exc_info())
|
||||
|
||||
async def _connection_loop(self, register_worker):
|
||||
self._connecting = True
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
self._connect(), loop=self._loop
|
||||
)
|
||||
|
||||
while self._connecting:
|
||||
if not future.done():
|
||||
await asyncio.sleep(0.07)
|
||||
continue
|
||||
|
||||
session = getattr(self.client, "_session", None)
|
||||
ws = getattr(self.client, "_ws", None)
|
||||
if session is not None:
|
||||
if session.closed:
|
||||
self._connecting = False
|
||||
self._connected = False
|
||||
break
|
||||
|
||||
elif ws is not None:
|
||||
self._connecting = False
|
||||
self._connected = True
|
||||
|
||||
if self._stopped:
|
||||
break
|
||||
|
||||
await asyncio.sleep(0.07)
|
||||
|
||||
if not self._connected:
|
||||
self.client = None
|
||||
return
|
||||
|
||||
print("Connected to job queue server")
|
||||
if register_worker:
|
||||
self.register_as_worker()
|
||||
|
||||
while self._connected and self._loop.is_running():
|
||||
if self._stopped or ws.closed:
|
||||
break
|
||||
|
||||
await asyncio.sleep(0.3)
|
||||
|
||||
await self._stop_cleanup()
|
||||
|
||||
def register_as_worker(self):
|
||||
"""Register as worker ready to work on server side."""
|
||||
asyncio.ensure_future(self._register_as_worker(), loop=self._loop)
|
||||
|
||||
async def _register_as_worker(self):
|
||||
worker_id = await self.client.call(
|
||||
"register_worker", [self._host_name]
|
||||
)
|
||||
self.client.set_id(worker_id)
|
||||
print(
|
||||
"Registered as worker with id {}".format(worker_id)
|
||||
)
|
||||
|
||||
async def disconnect(self):
|
||||
await self._stop_cleanup()
|
||||
|
||||
async def _stop_cleanup(self):
|
||||
print("Cleanup after stop")
|
||||
if self.client is not None and hasattr(self.client, "_ws"):
|
||||
await self.client.disconnect()
|
||||
|
||||
self.client = None
|
||||
self._connecting = False
|
||||
self._connected = False
|
||||
241
openpype/modules/default_modules/job_queue/module.py
Normal file
241
openpype/modules/default_modules/job_queue/module.py
Normal file
|
|
@ -0,0 +1,241 @@
|
|||
"""Job queue OpenPype module was created for remote execution of commands.
|
||||
|
||||
## Why is needed
|
||||
Primarily created for hosts which are not easilly controlled from command line
|
||||
or in headless mode and is easier to keep one process of host running listening
|
||||
for jobs to do.
|
||||
|
||||
### Example
|
||||
One of examples is TVPaint which does not have headless mode, can run only one
|
||||
process at one time and it's impossible to know what should be executed inside
|
||||
TVPaint before we know all data about the file that should be processed.
|
||||
|
||||
## Idea
|
||||
Idea is that there is a server, workers and workstation/s which need to process
|
||||
something on a worker.
|
||||
|
||||
Workers and workstation/s must have access to server through adress to it's
|
||||
running instance. Workers use WebSockets and workstations are using HTTP calls.
|
||||
Also both of them must have access to job queue root which is set in
|
||||
settings. Root is used as temp where files needed for job can be stored before
|
||||
sending the job or where result files are stored when job is done.
|
||||
|
||||
Server's address must be set in settings when is running so workers and
|
||||
workstations know where to send or receive jobs.
|
||||
|
||||
## Command line commands
|
||||
### start_server
|
||||
- start server which is handles jobs
|
||||
- it is possible to specify port and host address (default is localhost:8079)
|
||||
|
||||
### start_worker
|
||||
- start worker which will process jobs
|
||||
- has required possitional argument which is application name from OpenPype
|
||||
settings e.g. 'tvpaint/11-5' ('tvpaint' is group '11-5' is variant)
|
||||
- it is possible to specify server url but url from settings is used when not
|
||||
passed (this is added mainly for developing purposes)
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import copy
|
||||
import platform
|
||||
|
||||
import click
|
||||
from openpype.modules import OpenPypeModule
|
||||
from openpype.api import get_system_settings
|
||||
|
||||
|
||||
class JobQueueModule(OpenPypeModule):
|
||||
name = "job_queue"
|
||||
|
||||
def initialize(self, modules_settings):
|
||||
server_url = modules_settings.get("server_url") or ""
|
||||
|
||||
self._server_url = self.url_conversion(server_url)
|
||||
jobs_root_mapping = self._roots_mapping_conversion(
|
||||
modules_settings.get("jobs_root")
|
||||
)
|
||||
|
||||
self._jobs_root_mapping = jobs_root_mapping
|
||||
|
||||
# Is always enabled
|
||||
# - the module does nothing until is used
|
||||
self.enabled = True
|
||||
|
||||
@classmethod
|
||||
def _root_conversion(cls, root_path):
|
||||
"""Make sure root path does not end with slash."""
|
||||
# Return empty string if path is invalid
|
||||
if not root_path:
|
||||
return ""
|
||||
|
||||
# Remove all slashes
|
||||
while root_path.endswith("/") or root_path.endswith("\\"):
|
||||
root_path = root_path[:-1]
|
||||
return root_path
|
||||
|
||||
@classmethod
|
||||
def _roots_mapping_conversion(cls, roots_mapping):
|
||||
roots_mapping = roots_mapping or {}
|
||||
for platform_name in ("windows", "linux", "darwin"):
|
||||
roots_mapping[platform_name] = cls._root_conversion(
|
||||
roots_mapping.get(platform_name)
|
||||
)
|
||||
return roots_mapping
|
||||
|
||||
@staticmethod
|
||||
def url_conversion(url, ws=False):
|
||||
if sys.version_info[0] == 2:
|
||||
from urlparse import urlsplit, urlunsplit
|
||||
else:
|
||||
from urllib.parse import urlsplit, urlunsplit
|
||||
|
||||
if not url:
|
||||
return url
|
||||
|
||||
url_parts = list(urlsplit(url))
|
||||
scheme = url_parts[0]
|
||||
if not scheme:
|
||||
if ws:
|
||||
url = "ws://{}".format(url)
|
||||
else:
|
||||
url = "http://{}".format(url)
|
||||
url_parts = list(urlsplit(url))
|
||||
|
||||
elif ws:
|
||||
if scheme not in ("ws", "wss"):
|
||||
if scheme == "https":
|
||||
url_parts[0] = "wss"
|
||||
else:
|
||||
url_parts[0] = "ws"
|
||||
|
||||
elif scheme not in ("http", "https"):
|
||||
if scheme == "wss":
|
||||
url_parts[0] = "https"
|
||||
else:
|
||||
url_parts[0] = "http"
|
||||
|
||||
return urlunsplit(url_parts)
|
||||
|
||||
def get_jobs_root_mapping(self):
|
||||
return copy.deepcopy(self._jobs_root_mapping)
|
||||
|
||||
def get_jobs_root(self):
|
||||
return self._jobs_root_mapping.get(platform.system().lower())
|
||||
|
||||
@classmethod
|
||||
def get_jobs_root_from_settings(cls):
|
||||
module_settings = get_system_settings()["modules"]
|
||||
jobs_root_mapping = module_settings.get(cls.name, {}).get("jobs_root")
|
||||
converted_mapping = cls._roots_mapping_conversion(jobs_root_mapping)
|
||||
|
||||
return converted_mapping[platform.system().lower()]
|
||||
|
||||
@property
|
||||
def server_url(self):
|
||||
return self._server_url
|
||||
|
||||
def send_job(self, host_name, job_data):
|
||||
import requests
|
||||
|
||||
job_data = job_data or {}
|
||||
job_data["host_name"] = host_name
|
||||
api_path = "{}/api/jobs".format(self._server_url)
|
||||
post_request = requests.post(api_path, data=json.dumps(job_data))
|
||||
return str(post_request.content.decode())
|
||||
|
||||
def get_job_status(self, job_id):
|
||||
import requests
|
||||
|
||||
api_path = "{}/api/jobs/{}".format(self._server_url, job_id)
|
||||
return requests.get(api_path).json()
|
||||
|
||||
def cli(self, click_group):
|
||||
click_group.add_command(cli_main)
|
||||
|
||||
@classmethod
|
||||
def get_server_url_from_settings(cls):
|
||||
module_settings = get_system_settings()["modules"]
|
||||
return cls.url_conversion(
|
||||
module_settings
|
||||
.get(cls.name, {})
|
||||
.get("server_url")
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def start_server(cls, port=None, host=None):
|
||||
from .job_server import main
|
||||
|
||||
return main(port, host)
|
||||
|
||||
@classmethod
|
||||
def start_worker(cls, app_name, server_url=None):
|
||||
import requests
|
||||
from openpype.lib import ApplicationManager
|
||||
|
||||
if not server_url:
|
||||
server_url = cls.get_server_url_from_settings()
|
||||
|
||||
if not server_url:
|
||||
raise ValueError("Server url is not set.")
|
||||
|
||||
http_server_url = cls.url_conversion(server_url)
|
||||
|
||||
# Validate url
|
||||
requests.get(http_server_url)
|
||||
|
||||
ws_server_url = cls.url_conversion(server_url) + "/ws"
|
||||
|
||||
app_manager = ApplicationManager()
|
||||
app = app_manager.applications.get(app_name)
|
||||
if app is None:
|
||||
raise ValueError(
|
||||
"Didn't find application \"{}\" in settings.".format(app_name)
|
||||
)
|
||||
|
||||
if app.host_name == "tvpaint":
|
||||
return cls._start_tvpaint_worker(app, ws_server_url)
|
||||
raise ValueError("Unknown host \"{}\"".format(app.host_name))
|
||||
|
||||
@classmethod
|
||||
def _start_tvpaint_worker(cls, app, server_url):
|
||||
from openpype.hosts.tvpaint.worker import main
|
||||
|
||||
executable = app.find_executable()
|
||||
if not executable:
|
||||
raise ValueError((
|
||||
"Executable for app \"{}\" is not set"
|
||||
" or accessible on this workstation."
|
||||
).format(app.full_name))
|
||||
|
||||
return main(str(executable), server_url)
|
||||
|
||||
|
||||
@click.group(
|
||||
JobQueueModule.name,
|
||||
help="Application job server. Can be used as render farm."
|
||||
)
|
||||
def cli_main():
|
||||
pass
|
||||
|
||||
|
||||
@cli_main.command(
|
||||
"start_server",
|
||||
help="Start server handling workers and their jobs."
|
||||
)
|
||||
@click.option("--port", help="Server port")
|
||||
@click.option("--host", help="Server host (ip address)")
|
||||
def cli_start_server(port, host):
|
||||
JobQueueModule.start_server(port, host)
|
||||
|
||||
|
||||
@cli_main.command(
|
||||
"start_worker", help=(
|
||||
"Start a worker for a specific application. (e.g. \"tvpaint/11.5\")"
|
||||
)
|
||||
)
|
||||
@click.argument("app_name")
|
||||
@click.option("--server_url", help="Server url which handle workers and jobs.")
|
||||
def cli_start_worker(app_name, server_url):
|
||||
JobQueueModule.start_worker(app_name, server_url)
|
||||
|
|
@ -56,6 +56,13 @@ representation.files.sites:
|
|||
`db.getCollection('MY_PROJECT').update({type:"representation"},
|
||||
{$set:{"files.$[].sites.MY_CONFIGURED_REMOTE_SITE" : {}}}, true, true)`
|
||||
|
||||
I want to create new custom provider:
|
||||
-----------------------------------
|
||||
- take `providers\abstract_provider.py` as a base class
|
||||
- create provider class in `providers` with a name according to a provider (eg. 'gdrive.py' for gdrive provider etc.)
|
||||
- upload provider icon in png format, 24x24, into `providers\resources`, its name must follow name of provider (eg. 'gdrive.png' for gdrive provider)
|
||||
- register new provider into `providers.lib.py`, test how many files could be manipulated at same time, check provider's API for limits
|
||||
|
||||
Needed configuration:
|
||||
--------------------
|
||||
`pype/settings/defaults/project_settings/global.json`.`sync_server`:
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import pyblish.api
|
|||
class CollectModules(pyblish.api.ContextPlugin):
|
||||
"""Collect OpenPype modules."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder - 0.45
|
||||
label = "OpenPype Modules"
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
55
openpype/plugins/publish/collect_scene_loaded_versions.py
Normal file
55
openpype/plugins/publish/collect_scene_loaded_versions.py
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
|
||||
import pyblish.api
|
||||
from avalon import api, io
|
||||
|
||||
|
||||
class CollectSceneLoadedVersions(pyblish.api.ContextPlugin):
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.0001
|
||||
label = "Collect Versions Loaded in Scene"
|
||||
hosts = [
|
||||
"aftereffects",
|
||||
"blender",
|
||||
"celaction",
|
||||
"fusion",
|
||||
"harmony",
|
||||
"hiero",
|
||||
"houdini",
|
||||
"maya",
|
||||
"nuke",
|
||||
"photoshop",
|
||||
"resolve",
|
||||
"tvpaint"
|
||||
]
|
||||
|
||||
def process(self, context):
|
||||
host = api.registered_host()
|
||||
if host is None:
|
||||
self.log.warn("No registered host.")
|
||||
return
|
||||
|
||||
if not hasattr(host, "ls"):
|
||||
host_name = host.__name__
|
||||
self.log.warn("Host %r doesn't have ls() implemented." % host_name)
|
||||
return
|
||||
|
||||
loaded_versions = []
|
||||
_containers = list(host.ls())
|
||||
_repr_ids = [io.ObjectId(c["representation"]) for c in _containers]
|
||||
version_by_repr = {
|
||||
str(doc["_id"]): doc["parent"] for doc in
|
||||
io.find({"_id": {"$in": _repr_ids}}, projection={"parent": 1})
|
||||
}
|
||||
|
||||
for con in _containers:
|
||||
# NOTE:
|
||||
# may have more then one representation that are same version
|
||||
version = {
|
||||
"objectName": con["objectName"], # container node name
|
||||
"subsetName": con["name"],
|
||||
"representation": io.ObjectId(con["representation"]),
|
||||
"version": version_by_repr[con["representation"]], # _id
|
||||
}
|
||||
loaded_versions.append(version)
|
||||
|
||||
context.data["loadedVersions"] = loaded_versions
|
||||
|
|
@ -10,7 +10,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = 'Collect Version'
|
||||
label = 'Collect Scene Version'
|
||||
hosts = [
|
||||
"aftereffects",
|
||||
"blender",
|
||||
|
|
|
|||
|
|
@ -110,6 +110,9 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
).format(host_name, family, task_name))
|
||||
return
|
||||
|
||||
self.log.debug("profile: {}".format(
|
||||
profile))
|
||||
|
||||
# Pre-filter burnin definitions by instance families
|
||||
burnin_defs = self.filter_burnins_defs(profile, instance)
|
||||
if not burnin_defs:
|
||||
|
|
@ -126,18 +129,41 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
scriptpath = self.burnin_script_path()
|
||||
|
||||
# Executable args that will execute the script
|
||||
# [pype executable, *pype script, "run"]
|
||||
executable_args = get_pype_execute_args("run", scriptpath)
|
||||
|
||||
for idx, repre in enumerate(tuple(instance.data["representations"])):
|
||||
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
|
||||
|
||||
repre_burnin_links = repre.get("burnins", [])
|
||||
|
||||
if not self.repres_is_valid(repre):
|
||||
continue
|
||||
|
||||
self.log.debug("repre_burnin_links: {}".format(
|
||||
repre_burnin_links))
|
||||
|
||||
self.log.debug("burnin_defs.keys(): {}".format(
|
||||
burnin_defs.keys()))
|
||||
|
||||
# Filter output definition by `burnin` represetation key
|
||||
repre_linked_burnins = {
|
||||
name: output for name, output in burnin_defs.items()
|
||||
if name in repre_burnin_links
|
||||
}
|
||||
self.log.debug("repre_linked_burnins: {}".format(
|
||||
repre_linked_burnins))
|
||||
|
||||
# if any match then replace burnin defs and follow tag filtering
|
||||
_burnin_defs = copy.deepcopy(burnin_defs)
|
||||
if repre_linked_burnins:
|
||||
_burnin_defs = repre_linked_burnins
|
||||
|
||||
# Filter output definition by representation tags (optional)
|
||||
repre_burnin_defs = self.filter_burnins_by_tags(
|
||||
burnin_defs, repre["tags"]
|
||||
_burnin_defs, repre["tags"]
|
||||
)
|
||||
if not repre_burnin_defs:
|
||||
self.log.info((
|
||||
|
|
@ -283,6 +309,8 @@ class ExtractBurnin(openpype.api.Extractor):
|
|||
# NOTE we maybe can keep source representation if necessary
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
self.log.debug("Files to delete: {}".format(files_to_delete))
|
||||
|
||||
# Delete input files
|
||||
for filepath in files_to_delete:
|
||||
if os.path.exists(filepath):
|
||||
|
|
|
|||
|
|
@ -180,6 +180,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
if "tags" not in output_def:
|
||||
output_def["tags"] = []
|
||||
|
||||
if "burnins" not in output_def:
|
||||
output_def["burnins"] = []
|
||||
|
||||
# Create copy of representation
|
||||
new_repre = copy.deepcopy(repre)
|
||||
|
||||
|
|
@ -192,8 +195,20 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
if tag not in new_repre["tags"]:
|
||||
new_repre["tags"].append(tag)
|
||||
|
||||
# Add burnin link from output definition to representation
|
||||
for burnin in output_def["burnins"]:
|
||||
if burnin not in new_repre.get("burnins", []):
|
||||
if not new_repre.get("burnins"):
|
||||
new_repre["burnins"] = []
|
||||
new_repre["burnins"].append(str(burnin))
|
||||
|
||||
self.log.debug(
|
||||
"New representation tags: `{}`".format(new_repre["tags"])
|
||||
"Linked burnins: `{}`".format(new_repre.get("burnins"))
|
||||
)
|
||||
|
||||
self.log.debug(
|
||||
"New representation tags: `{}`".format(
|
||||
new_repre.get("tags"))
|
||||
)
|
||||
|
||||
temp_data = self.prepare_temp_data(
|
||||
|
|
@ -232,12 +247,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
for f in files_to_clean:
|
||||
os.unlink(f)
|
||||
|
||||
output_name = output_def["filename_suffix"]
|
||||
output_name = new_repre.get("outputName", "")
|
||||
output_ext = new_repre["ext"]
|
||||
if output_name:
|
||||
output_name += "_"
|
||||
output_name += output_def["filename_suffix"]
|
||||
if temp_data["without_handles"]:
|
||||
output_name += "_noHandles"
|
||||
|
||||
new_repre.update({
|
||||
"name": output_def["filename_suffix"],
|
||||
"name": "{}_{}".format(output_name, output_ext),
|
||||
"outputName": output_name,
|
||||
"outputDef": output_def,
|
||||
"frameStartFtrack": temp_data["output_frame_start"],
|
||||
|
|
|
|||
131
openpype/plugins/publish/integrate_inputlinks.py
Normal file
131
openpype/plugins/publish/integrate_inputlinks.py
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
|
||||
from collections import OrderedDict
|
||||
from avalon import io
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class IntegrateInputLinks(pyblish.api.ContextPlugin):
|
||||
"""Connecting version level dependency links"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 0.2
|
||||
label = "Connect Dependency InputLinks"
|
||||
|
||||
def process(self, context):
|
||||
"""Connect dependency links for all instances, globally
|
||||
|
||||
Code steps:
|
||||
* filter out instances that has "versionEntity" entry in data
|
||||
* find workfile instance within context
|
||||
* if workfile found:
|
||||
- link all `loadedVersions` as input of the workfile
|
||||
- link workfile as input of all publishing instances
|
||||
* else:
|
||||
- show "no workfile" warning
|
||||
* link instances' inputs if it's data has "inputVersions" entry
|
||||
* Write into database
|
||||
|
||||
inputVersions:
|
||||
The "inputVersions" in instance.data should be a list of
|
||||
version document's Id (str or ObjectId), which are the
|
||||
dependencies of the publishing instance that should be
|
||||
extracted from working scene by the DCC specific publish
|
||||
plugin.
|
||||
|
||||
"""
|
||||
workfile = None
|
||||
publishing = []
|
||||
|
||||
for instance in context:
|
||||
if not instance.data.get("publish", True):
|
||||
# Skip inactive instances
|
||||
continue
|
||||
|
||||
version_doc = instance.data.get("versionEntity")
|
||||
if not version_doc:
|
||||
self.log.debug("Instance %s doesn't have version." % instance)
|
||||
continue
|
||||
|
||||
version_data = version_doc.get("data", {})
|
||||
families = version_data.get("families", [])
|
||||
|
||||
if "workfile" in families:
|
||||
workfile = instance
|
||||
else:
|
||||
publishing.append(instance)
|
||||
|
||||
if workfile is None:
|
||||
self.log.warn("No workfile in this publish session.")
|
||||
else:
|
||||
workfile_version_doc = workfile.data["versionEntity"]
|
||||
# link all loaded versions in scene into workfile
|
||||
for version in context.data.get("loadedVersions", []):
|
||||
self.add_link(
|
||||
link_type="reference",
|
||||
input_id=version["version"],
|
||||
version_doc=workfile_version_doc,
|
||||
)
|
||||
# link workfile to all publishing versions
|
||||
for instance in publishing:
|
||||
self.add_link(
|
||||
link_type="generative",
|
||||
input_id=workfile_version_doc["_id"],
|
||||
version_doc=instance.data["versionEntity"],
|
||||
)
|
||||
|
||||
# link versions as dependencies to the instance
|
||||
for instance in publishing:
|
||||
for input_version in instance.data.get("inputVersions") or []:
|
||||
self.add_link(
|
||||
link_type="generative",
|
||||
input_id=input_version,
|
||||
version_doc=instance.data["versionEntity"],
|
||||
)
|
||||
|
||||
if workfile is not None:
|
||||
publishing.append(workfile)
|
||||
self.write_links_to_database(publishing)
|
||||
|
||||
def add_link(self, link_type, input_id, version_doc):
|
||||
"""Add dependency link data into version document
|
||||
|
||||
Args:
|
||||
link_type (str): Type of link, one of 'reference' or 'generative'
|
||||
input_id (str or ObjectId): Document Id of input version
|
||||
version_doc (dict): The version document that takes the input
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
# NOTE:
|
||||
# using OrderedDict() here is just for ensuring field order between
|
||||
# python versions, if we ever need to use mongodb operation '$addToSet'
|
||||
# to update and avoid duplicating elements in 'inputLinks' array in the
|
||||
# future.
|
||||
link = OrderedDict()
|
||||
link["type"] = link_type
|
||||
link["id"] = io.ObjectId(input_id)
|
||||
link["linkedBy"] = "publish"
|
||||
|
||||
if "inputLinks" not in version_doc["data"]:
|
||||
version_doc["data"]["inputLinks"] = []
|
||||
version_doc["data"]["inputLinks"].append(link)
|
||||
|
||||
def write_links_to_database(self, instances):
|
||||
"""Iter instances in context to update database
|
||||
|
||||
If `versionEntity.data.inputLinks` not None in `instance.data`, doc
|
||||
in database will be updated.
|
||||
|
||||
"""
|
||||
for instance in instances:
|
||||
version_doc = instance.data.get("versionEntity")
|
||||
if version_doc is None:
|
||||
continue
|
||||
|
||||
input_links = version_doc["data"].get("inputLinks")
|
||||
if input_links is None:
|
||||
continue
|
||||
|
||||
io.update_one({"_id": version_doc["_id"]},
|
||||
{"$set": {"data.inputLinks": input_links}})
|
||||
|
|
@ -1071,10 +1071,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
already_attached_sites[meta["name"]] = meta["created_dt"]
|
||||
|
||||
if sync_project_presets and sync_project_presets["enabled"]:
|
||||
# add remote
|
||||
meta = {"name": remote_site.strip()}
|
||||
rec["sites"].append(meta)
|
||||
already_attached_sites[meta["name"]] = None
|
||||
if remote_site and \
|
||||
remote_site not in already_attached_sites.keys():
|
||||
# add remote
|
||||
meta = {"name": remote_site.strip()}
|
||||
rec["sites"].append(meta)
|
||||
already_attached_sites[meta["name"]] = None
|
||||
|
||||
# add skeleton for site where it should be always synced to
|
||||
for always_on_site in always_accesible:
|
||||
|
|
@ -1102,8 +1104,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
local_site = local_site_id
|
||||
|
||||
remote_site = sync_project_presets["config"].get("remote_site")
|
||||
if remote_site == local_site:
|
||||
remote_site = None
|
||||
|
||||
if remote_site == 'local':
|
||||
remote_site = local_site_id
|
||||
|
|
|
|||
|
|
@ -28,6 +28,9 @@
|
|||
"viewer": {
|
||||
"viewerProcess": "sRGB"
|
||||
},
|
||||
"baking": {
|
||||
"viewerProcess": "rec709"
|
||||
},
|
||||
"workfile": {
|
||||
"colorManagement": "Nuke",
|
||||
"OCIO_config": "nuke-default",
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@
|
|||
"burnin",
|
||||
"ftrackreview"
|
||||
],
|
||||
"burnins": [],
|
||||
"ffmpeg_args": {
|
||||
"video_filters": [],
|
||||
"audio_filters": [],
|
||||
|
|
|
|||
|
|
@ -110,7 +110,20 @@
|
|||
},
|
||||
"ExtractReviewDataMov": {
|
||||
"enabled": true,
|
||||
"viewer_lut_raw": false
|
||||
"viewer_lut_raw": false,
|
||||
"outputs": {
|
||||
"baking": {
|
||||
"filter": {
|
||||
"task_types": [],
|
||||
"families": []
|
||||
},
|
||||
"extension": "mov",
|
||||
"viewer_process_override": "",
|
||||
"bake_viewer_process": true,
|
||||
"bake_viewer_input_process": true,
|
||||
"add_tags": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"ExtractSlateFrame": {
|
||||
"viewer_lut_raw": false
|
||||
|
|
|
|||
|
|
@ -115,6 +115,9 @@
|
|||
"default_task_type": "Default task type"
|
||||
}
|
||||
}
|
||||
},
|
||||
"CollectTVPaintInstances": {
|
||||
"layer_name_regex": "(?P<layer>L[0-9]{3}_\\w+)_(?P<pass>.+)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -188,5 +188,13 @@
|
|||
},
|
||||
"slack": {
|
||||
"enabled": false
|
||||
},
|
||||
"job_queue": {
|
||||
"server_url": "",
|
||||
"jobs_root": {
|
||||
"windows": "",
|
||||
"darwin": "",
|
||||
"linux": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -62,8 +62,25 @@
|
|||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "CollectTVPaintInstances",
|
||||
"label": "Collect TVPaint Instances",
|
||||
"children": [
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Regex helps to extract render layer and pass names from TVPaint layer name.<br>The regex must contain named groups <b>'layer'</b> and <b>'pass'</b> which are used for creation of RenderPass instances.<hr><br>Example layer name: <b>\"L001_Person_Hand\"</b><br>Example regex: <b>\"(?P<layer>L[0-9]{3}_\\w+)_(?P<pass>.+)\"</b><br>Extracted layer: <b>\"L001_Person\"</b><br>Extracted pass: <b>\"Hand\"</b>"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "layer_name_regex",
|
||||
"label": "Layer name regex"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -131,6 +131,19 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "baking",
|
||||
"type": "dict",
|
||||
"label": "Extract-review baking profile",
|
||||
"collapsible": false,
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "viewerProcess",
|
||||
"label": "Viewer Process"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "workfile",
|
||||
"type": "dict",
|
||||
|
|
@ -363,7 +376,7 @@
|
|||
"key": "maya",
|
||||
"type": "dict",
|
||||
"label": "Maya",
|
||||
"children": [
|
||||
"children": [
|
||||
{
|
||||
"key": "colorManagementPreference",
|
||||
"type": "dict",
|
||||
|
|
|
|||
|
|
@ -212,6 +212,12 @@
|
|||
"type": "schema",
|
||||
"name": "schema_representation_tags"
|
||||
},
|
||||
{
|
||||
"key": "burnins",
|
||||
"label": "Link to a burnin by name",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"key": "ffmpeg_args",
|
||||
"label": "FFmpeg arguments",
|
||||
|
|
|
|||
|
|
@ -167,7 +167,67 @@
|
|||
"type": "boolean",
|
||||
"key": "viewer_lut_raw",
|
||||
"label": "Viewer LUT raw"
|
||||
},
|
||||
{
|
||||
"key": "outputs",
|
||||
"label": "Output Definitions",
|
||||
"type": "dict-modifiable",
|
||||
"highlight_content": true,
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": false,
|
||||
"key": "filter",
|
||||
"label": "Filtering",
|
||||
"children": [
|
||||
{
|
||||
"key": "task_types",
|
||||
"label": "Task types",
|
||||
"type": "task-types-enum"
|
||||
},
|
||||
{
|
||||
"key": "families",
|
||||
"label": "Families",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "separator"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "extension",
|
||||
"label": "File extension"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "viewer_process_override",
|
||||
"label": "Viewer Process colorspace profile override"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "bake_viewer_process",
|
||||
"label": "Bake Viewer Process"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "bake_viewer_input_process",
|
||||
"label": "Bake Viewer Input Process (LUTs)"
|
||||
},
|
||||
{
|
||||
"key": "add_tags",
|
||||
"label": "Add additional tags to representations",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
|||
|
|
@ -262,6 +262,38 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "job_queue",
|
||||
"label": "Job Queue",
|
||||
"require_restart": true,
|
||||
"collapsible": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Address of machine where job queue server is running."
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "server_url",
|
||||
"label": "Server Rest URL"
|
||||
},
|
||||
{
|
||||
"type": "separator"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Jobs root is used as temporary directory for workers where source is copied and render output can be stored."
|
||||
},
|
||||
{
|
||||
"key": "jobs_root",
|
||||
"label": "Jobs root",
|
||||
"type": "path",
|
||||
"multipath": false,
|
||||
"multiplatform": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dynamic_schema",
|
||||
"name": "system_settings/modules"
|
||||
|
|
|
|||
|
|
@ -1,16 +1,12 @@
|
|||
import os
|
||||
import sys
|
||||
import json
|
||||
from subprocess import Popen
|
||||
try:
|
||||
import ftrack_api_old as ftrack_api
|
||||
except Exception:
|
||||
import ftrack_api
|
||||
|
||||
import ftrack_api
|
||||
from Qt import QtWidgets, QtCore
|
||||
from openpype.api import get_current_project_settings
|
||||
from openpype import lib as pypelib
|
||||
from avalon.vendor.Qt import QtWidgets, QtCore
|
||||
from openpype.tools.utils.lib import qt_app_context
|
||||
from avalon import io, api, style, schema
|
||||
from avalon.tools import lib as parentlib
|
||||
from . import widget, model
|
||||
|
||||
module = sys.modules[__name__]
|
||||
|
|
@ -630,7 +626,7 @@ def show(parent=None, debug=False, context=None):
|
|||
if debug is True:
|
||||
io.install()
|
||||
|
||||
with parentlib.application():
|
||||
with qt_app_context():
|
||||
window = Window(parent, context)
|
||||
window.setStyleSheet(style.load_stylesheet())
|
||||
window.show()
|
||||
|
|
|
|||
0
openpype/tools/assetlinks/__init__.py
Normal file
0
openpype/tools/assetlinks/__init__.py
Normal file
90
openpype/tools/assetlinks/widgets.py
Normal file
90
openpype/tools/assetlinks/widgets.py
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
|
||||
from Qt import QtWidgets
|
||||
|
||||
|
||||
class SimpleLinkView(QtWidgets.QWidget):
|
||||
|
||||
def __init__(self, dbcon, parent=None):
|
||||
super(SimpleLinkView, self).__init__(parent=parent)
|
||||
self.dbcon = dbcon
|
||||
|
||||
# TODO: display selected target
|
||||
|
||||
in_text = QtWidgets.QLabel("Inputs")
|
||||
in_view = QtWidgets.QListWidget(parent=self)
|
||||
out_text = QtWidgets.QLabel("Outputs")
|
||||
out_view = QtWidgets.QListWidget(parent=self)
|
||||
|
||||
layout = QtWidgets.QGridLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(in_text, 0, 0)
|
||||
layout.addWidget(in_view, 1, 0)
|
||||
layout.addWidget(out_text, 0, 1)
|
||||
layout.addWidget(out_view, 1, 1)
|
||||
|
||||
self._in_view = in_view
|
||||
self._out_view = out_view
|
||||
|
||||
def clear(self):
|
||||
self._in_view.clear()
|
||||
self._out_view.clear()
|
||||
|
||||
def set_version(self, version_doc):
|
||||
self.clear()
|
||||
if not version_doc or not self.isVisible():
|
||||
return
|
||||
|
||||
# inputs
|
||||
#
|
||||
for link in version_doc["data"].get("inputLinks", []):
|
||||
# Backwards compatibility for "input" key used as "id"
|
||||
if "id" not in link:
|
||||
link_id = link["input"]
|
||||
else:
|
||||
link_id = link["id"]
|
||||
version = self.dbcon.find_one(
|
||||
{"_id": link_id, "type": "version"},
|
||||
projection={"name": 1, "parent": 1}
|
||||
)
|
||||
if not version:
|
||||
continue
|
||||
subset = self.dbcon.find_one(
|
||||
{"_id": version["parent"], "type": "subset"},
|
||||
projection={"name": 1, "parent": 1}
|
||||
)
|
||||
if not subset:
|
||||
continue
|
||||
asset = self.dbcon.find_one(
|
||||
{"_id": subset["parent"], "type": "asset"},
|
||||
projection={"name": 1}
|
||||
)
|
||||
|
||||
self._in_view.addItem("{asset} {subset} v{version:0>3}".format(
|
||||
asset=asset["name"],
|
||||
subset=subset["name"],
|
||||
version=version["name"],
|
||||
))
|
||||
|
||||
# outputs
|
||||
#
|
||||
outputs = self.dbcon.find(
|
||||
{"type": "version", "data.inputLinks.input": version_doc["_id"]},
|
||||
projection={"name": 1, "parent": 1}
|
||||
)
|
||||
for version in outputs or []:
|
||||
subset = self.dbcon.find_one(
|
||||
{"_id": version["parent"], "type": "subset"},
|
||||
projection={"name": 1, "parent": 1}
|
||||
)
|
||||
if not subset:
|
||||
continue
|
||||
asset = self.dbcon.find_one(
|
||||
{"_id": subset["parent"], "type": "asset"},
|
||||
projection={"name": 1}
|
||||
)
|
||||
|
||||
self._out_view.addItem("{asset} {subset} v{version:0>3}".format(
|
||||
asset=asset["name"],
|
||||
subset=subset["name"],
|
||||
version=version["name"],
|
||||
))
|
||||
|
|
@ -31,7 +31,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog):
|
|||
message_timeout = 5000
|
||||
|
||||
def __init__(
|
||||
self, parent=None, icon=None, show_projects=False, show_libraries=True
|
||||
self, parent=None, show_projects=False, show_libraries=True
|
||||
):
|
||||
super(LibraryLoaderWindow, self).__init__(parent)
|
||||
|
||||
|
|
@ -517,10 +517,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog):
|
|||
return super(LibraryLoaderWindow, self).closeEvent(event)
|
||||
|
||||
|
||||
def show(
|
||||
debug=False, parent=None, icon=None,
|
||||
show_projects=False, show_libraries=True
|
||||
):
|
||||
def show(debug=False, parent=None, show_projects=False, show_libraries=True):
|
||||
"""Display Loader GUI
|
||||
|
||||
Arguments:
|
||||
|
|
@ -555,9 +552,9 @@ def show(
|
|||
import traceback
|
||||
sys.excepthook = lambda typ, val, tb: traceback.print_last()
|
||||
|
||||
with tools_lib.application():
|
||||
with tools_lib.qt_app_context():
|
||||
window = LibraryLoaderWindow(
|
||||
parent, icon, show_projects, show_libraries
|
||||
parent, show_projects, show_libraries
|
||||
)
|
||||
window.show()
|
||||
|
||||
|
|
|
|||
|
|
@ -631,7 +631,7 @@ def show(debug=False, parent=None, use_context=False):
|
|||
api.Session["AVALON_PROJECT"] = any_project["name"]
|
||||
module.project = any_project["name"]
|
||||
|
||||
with lib.application():
|
||||
with lib.qt_app_context():
|
||||
window = LoaderWindow(parent)
|
||||
window.show()
|
||||
|
||||
|
|
|
|||
|
|
@ -16,11 +16,15 @@ from openpype.tools.utils.delegates import (
|
|||
VersionDelegate,
|
||||
PrettyTimeDelegate
|
||||
)
|
||||
from openpype.tools.utils.widgets import OptionalMenu
|
||||
from openpype.tools.utils.widgets import (
|
||||
OptionalMenu,
|
||||
PlaceholderLineEdit
|
||||
)
|
||||
from openpype.tools.utils.views import (
|
||||
TreeViewSpinner,
|
||||
DeselectableTreeView
|
||||
)
|
||||
from openpype.tools.assetlinks.widgets import SimpleLinkView
|
||||
|
||||
from .model import (
|
||||
SubsetsModel,
|
||||
|
|
@ -174,7 +178,7 @@ class SubsetWidget(QtWidgets.QWidget):
|
|||
family_proxy = FamiliesFilterProxyModel()
|
||||
family_proxy.setSourceModel(proxy)
|
||||
|
||||
subset_filter = QtWidgets.QLineEdit(self)
|
||||
subset_filter = PlaceholderLineEdit(self)
|
||||
subset_filter.setPlaceholderText("Filter subsets..")
|
||||
|
||||
group_checkbox = QtWidgets.QCheckBox("Enable Grouping", self)
|
||||
|
|
@ -809,8 +813,9 @@ class ThumbnailWidget(QtWidgets.QLabel):
|
|||
{"_id": doc_id},
|
||||
{"data.thumbnail_id"}
|
||||
)
|
||||
|
||||
thumbnail_id = doc.get("data", {}).get("thumbnail_id")
|
||||
thumbnail_id = None
|
||||
if doc:
|
||||
thumbnail_id = doc.get("data", {}).get("thumbnail_id")
|
||||
if thumbnail_id == self.current_thumb_id:
|
||||
if self.current_thumbnail is None:
|
||||
self.set_pixmap()
|
||||
|
|
@ -845,19 +850,25 @@ class VersionWidget(QtWidgets.QWidget):
|
|||
def __init__(self, dbcon, parent=None):
|
||||
super(VersionWidget, self).__init__(parent=parent)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
label = QtWidgets.QLabel("Version", self)
|
||||
data = VersionTextEdit(dbcon, self)
|
||||
data.setReadOnly(True)
|
||||
|
||||
layout.addWidget(label)
|
||||
layout.addWidget(data)
|
||||
depend_widget = SimpleLinkView(dbcon, self)
|
||||
|
||||
tab = QtWidgets.QTabWidget()
|
||||
tab.addTab(data, "Version Info")
|
||||
tab.addTab(depend_widget, "Dependency")
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(tab)
|
||||
|
||||
self.data = data
|
||||
self.depend_widget = depend_widget
|
||||
|
||||
def set_version(self, version_doc):
|
||||
self.data.set_version(version_doc)
|
||||
self.depend_widget.set_version(version_doc)
|
||||
|
||||
|
||||
class FamilyModel(QtGui.QStandardItemModel):
|
||||
|
|
|
|||
BIN
openpype/tools/project_manager/project_manager/images/bin.png
Normal file
BIN
openpype/tools/project_manager/project_manager/images/bin.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 149 B |
|
|
@ -3,6 +3,10 @@ import copy
|
|||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from pymongo import UpdateOne, DeleteOne
|
||||
|
||||
from Qt import QtCore, QtGui
|
||||
|
||||
from .constants import (
|
||||
IDENTIFIER_ROLE,
|
||||
ITEM_TYPE_ROLE,
|
||||
|
|
@ -15,9 +19,6 @@ from .constants import (
|
|||
from .style import ResourceCache
|
||||
|
||||
from openpype.lib import CURRENT_DOC_SCHEMAS
|
||||
from pymongo import UpdateOne, DeleteOne
|
||||
from avalon.vendor import qtawesome
|
||||
from Qt import QtCore, QtGui
|
||||
|
||||
|
||||
class ProjectModel(QtGui.QStandardItemModel):
|
||||
|
|
|
|||
|
|
@ -1,9 +1,14 @@
|
|||
import os
|
||||
from Qt import QtCore, QtGui
|
||||
|
||||
from avalon.vendor import qtawesome
|
||||
|
||||
|
||||
class ResourceCache:
|
||||
# TODO use colors from OpenPype style
|
||||
colors = {
|
||||
"standard": "#bfccd6",
|
||||
"disabled": "#969696",
|
||||
"new": "#2d9a4c",
|
||||
"warning": "#c83232"
|
||||
}
|
||||
|
|
@ -58,11 +63,62 @@ class ResourceCache:
|
|||
},
|
||||
"refresh": qtawesome.icon(
|
||||
"fa.refresh",
|
||||
color=cls.colors["standard"]
|
||||
)
|
||||
color=cls.colors["standard"],
|
||||
color_disabled=cls.colors["disabled"]
|
||||
),
|
||||
"remove": cls.get_remove_icon()
|
||||
}
|
||||
return cls.icons
|
||||
|
||||
@classmethod
|
||||
def get_color(cls, color_name):
|
||||
return cls.colors[color_name]
|
||||
|
||||
@classmethod
|
||||
def get_remove_icon(cls):
|
||||
src_image = get_remove_image()
|
||||
normal_pix = paint_image_with_color(
|
||||
src_image,
|
||||
QtGui.QColor(cls.colors["standard"])
|
||||
)
|
||||
disabled_pix = paint_image_with_color(
|
||||
src_image,
|
||||
QtGui.QColor(cls.colors["disabled"])
|
||||
)
|
||||
icon = QtGui.QIcon(normal_pix)
|
||||
icon.addPixmap(disabled_pix, QtGui.QIcon.Disabled, QtGui.QIcon.On)
|
||||
icon.addPixmap(disabled_pix, QtGui.QIcon.Disabled, QtGui.QIcon.Off)
|
||||
return icon
|
||||
|
||||
|
||||
def get_remove_image():
|
||||
image_path = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)),
|
||||
"images",
|
||||
"bin.png"
|
||||
)
|
||||
return QtGui.QImage(image_path)
|
||||
|
||||
|
||||
def paint_image_with_color(image, color):
|
||||
"""TODO: This function should be imported from utils.
|
||||
|
||||
At the moment of creation is not available yet.
|
||||
"""
|
||||
width = image.width()
|
||||
height = image.height()
|
||||
|
||||
alpha_mask = image.createAlphaMask()
|
||||
alpha_region = QtGui.QRegion(QtGui.QBitmap.fromImage(alpha_mask))
|
||||
|
||||
pixmap = QtGui.QPixmap(width, height)
|
||||
pixmap.fill(QtCore.Qt.transparent)
|
||||
|
||||
painter = QtGui.QPainter(pixmap)
|
||||
painter.setClipRegion(alpha_region)
|
||||
painter.setPen(QtCore.Qt.NoPen)
|
||||
painter.setBrush(color)
|
||||
painter.drawRect(QtCore.QRect(0, 0, width, height))
|
||||
painter.end()
|
||||
|
||||
return pixmap
|
||||
|
|
|
|||
|
|
@ -288,3 +288,127 @@ class CreateProjectDialog(QtWidgets.QDialog):
|
|||
|
||||
project_codes.add(project_code)
|
||||
return project_names, project_codes
|
||||
|
||||
|
||||
class _SameSizeBtns(QtWidgets.QPushButton):
|
||||
"""Button that keep width of all button added as related.
|
||||
|
||||
This happens without changing min/max/fix size of button. Which is
|
||||
welcomed for multidisplay desktops with different resolution.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(_SameSizeBtns, self).__init__(*args, **kwargs)
|
||||
self._related_btns = []
|
||||
|
||||
def add_related_btn(self, btn):
|
||||
"""Add related button which should be checked for width.
|
||||
|
||||
Args:
|
||||
btn (_SameSizeBtns): Other object of _SameSizeBtns.
|
||||
"""
|
||||
self._related_btns.append(btn)
|
||||
|
||||
def hint_width(self):
|
||||
"""Get size hint of button not related to others."""
|
||||
return super(_SameSizeBtns, self).sizeHint().width()
|
||||
|
||||
def sizeHint(self):
|
||||
"""Calculate size hint based on size hint of this button and related.
|
||||
|
||||
If width is lower than any other button it is changed to higher.
|
||||
"""
|
||||
result = super(_SameSizeBtns, self).sizeHint()
|
||||
width = result.width()
|
||||
for btn in self._related_btns:
|
||||
btn_width = btn.hint_width()
|
||||
if btn_width > width:
|
||||
width = btn_width
|
||||
|
||||
result.setWidth(width)
|
||||
return result
|
||||
|
||||
|
||||
class ConfirmProjectDeletion(QtWidgets.QDialog):
|
||||
"""Dialog which confirms deletion of a project."""
|
||||
def __init__(self, project_name, parent):
|
||||
super(ConfirmProjectDeletion, self).__init__(parent)
|
||||
|
||||
self.setWindowTitle("Delete project?")
|
||||
|
||||
message = (
|
||||
"Project <b>\"{}\"</b> with all related data will be"
|
||||
" permanently removed from the database (This actions won't remove"
|
||||
" any files on disk)."
|
||||
).format(project_name)
|
||||
message_label = QtWidgets.QLabel(message, self)
|
||||
message_label.setWordWrap(True)
|
||||
|
||||
question_label = QtWidgets.QLabel("<b>Are you sure?</b>", self)
|
||||
|
||||
confirm_input = QtWidgets.QLineEdit(self)
|
||||
confirm_input.setPlaceholderText("Type \"Delete\" to confirm...")
|
||||
|
||||
cancel_btn = _SameSizeBtns("Cancel", self)
|
||||
cancel_btn.setToolTip("Cancel deletion of the project")
|
||||
confirm_btn = _SameSizeBtns("Delete", self)
|
||||
confirm_btn.setEnabled(False)
|
||||
confirm_btn.setToolTip("Confirm deletion")
|
||||
|
||||
cancel_btn.add_related_btn(confirm_btn)
|
||||
confirm_btn.add_related_btn(cancel_btn)
|
||||
|
||||
btns_layout = QtWidgets.QHBoxLayout()
|
||||
btns_layout.addStretch(1)
|
||||
btns_layout.addWidget(cancel_btn, 0)
|
||||
btns_layout.addWidget(confirm_btn, 0)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.addWidget(message_label, 0)
|
||||
layout.addStretch(1)
|
||||
layout.addWidget(question_label, 0)
|
||||
layout.addWidget(confirm_input, 0)
|
||||
layout.addLayout(btns_layout)
|
||||
|
||||
cancel_btn.clicked.connect(self._on_cancel_click)
|
||||
confirm_btn.clicked.connect(self._on_confirm_click)
|
||||
confirm_input.textChanged.connect(self._on_confirm_text_change)
|
||||
confirm_input.returnPressed.connect(self._on_enter_clicked)
|
||||
|
||||
self._cancel_btn = cancel_btn
|
||||
self._confirm_btn = confirm_btn
|
||||
self._confirm_input = confirm_input
|
||||
self._result = 0
|
||||
|
||||
self.setMinimumWidth(480)
|
||||
self.setMaximumWidth(650)
|
||||
self.setMaximumHeight(250)
|
||||
|
||||
def exec_(self, *args, **kwargs):
|
||||
super(ConfirmProjectDeletion, self).exec_(*args, **kwargs)
|
||||
return self._result
|
||||
|
||||
def showEvent(self, event):
|
||||
"""Reset result on show."""
|
||||
super(ConfirmProjectDeletion, self).showEvent(event)
|
||||
self._result = 0
|
||||
minimum_size_hint = self.minimumSizeHint()
|
||||
self.resize(self.width(), minimum_size_hint.height() + 30)
|
||||
|
||||
def result(self):
|
||||
"""Get result of dialog 1 for confirm 0 for cancel."""
|
||||
return self._result
|
||||
|
||||
def _on_cancel_click(self):
|
||||
self.close()
|
||||
|
||||
def _on_confirm_click(self):
|
||||
self._result = 1
|
||||
self.close()
|
||||
|
||||
def _on_enter_clicked(self):
|
||||
if self._confirm_btn.isEnabled():
|
||||
self._on_confirm_click()
|
||||
|
||||
def _on_confirm_text_change(self):
|
||||
enabled = self._confirm_input.text().lower() == "delete"
|
||||
self._confirm_btn.setEnabled(enabled)
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from . import (
|
|||
CreateProjectDialog,
|
||||
PROJECT_NAME_ROLE
|
||||
)
|
||||
from .widgets import ConfirmProjectDeletion
|
||||
from .style import ResourceCache
|
||||
from openpype.style import load_stylesheet
|
||||
from openpype.lib import is_admin_password_required
|
||||
|
|
@ -77,6 +78,10 @@ class ProjectManagerWindow(QtWidgets.QWidget):
|
|||
)
|
||||
create_folders_btn.setEnabled(False)
|
||||
|
||||
remove_projects_btn = QtWidgets.QPushButton(project_widget)
|
||||
remove_projects_btn.setIcon(ResourceCache.get_icon("remove"))
|
||||
remove_projects_btn.setObjectName("IconBtn")
|
||||
|
||||
project_layout = QtWidgets.QHBoxLayout(project_widget)
|
||||
project_layout.setContentsMargins(0, 0, 0, 0)
|
||||
project_layout.addWidget(project_combobox, 0)
|
||||
|
|
@ -84,6 +89,7 @@ class ProjectManagerWindow(QtWidgets.QWidget):
|
|||
project_layout.addWidget(create_project_btn, 0)
|
||||
project_layout.addWidget(create_folders_btn)
|
||||
project_layout.addStretch(1)
|
||||
project_layout.addWidget(remove_projects_btn)
|
||||
|
||||
# Helper buttons
|
||||
helper_btns_widget = QtWidgets.QWidget(top_part_widget)
|
||||
|
|
@ -145,11 +151,13 @@ class ProjectManagerWindow(QtWidgets.QWidget):
|
|||
refresh_projects_btn.clicked.connect(self._on_project_refresh)
|
||||
create_project_btn.clicked.connect(self._on_project_create)
|
||||
create_folders_btn.clicked.connect(self._on_create_folders)
|
||||
remove_projects_btn.clicked.connect(self._on_remove_project)
|
||||
project_combobox.currentIndexChanged.connect(self._on_project_change)
|
||||
save_btn.clicked.connect(self._on_save_click)
|
||||
add_asset_btn.clicked.connect(self._on_add_asset)
|
||||
add_task_btn.clicked.connect(self._on_add_task)
|
||||
|
||||
self._dbcon = dbcon
|
||||
self._project_model = project_model
|
||||
self._project_proxy_model = project_proxy
|
||||
|
||||
|
|
@ -162,6 +170,7 @@ class ProjectManagerWindow(QtWidgets.QWidget):
|
|||
self._project_combobox = project_combobox
|
||||
self._create_project_btn = create_project_btn
|
||||
self._create_folders_btn = create_folders_btn
|
||||
self._remove_projects_btn = remove_projects_btn
|
||||
|
||||
self._add_asset_btn = add_asset_btn
|
||||
self._add_task_btn = add_task_btn
|
||||
|
|
@ -171,6 +180,7 @@ class ProjectManagerWindow(QtWidgets.QWidget):
|
|||
|
||||
def _set_project(self, project_name=None):
|
||||
self._create_folders_btn.setEnabled(project_name is not None)
|
||||
self._remove_projects_btn.setEnabled(project_name is not None)
|
||||
self._project_proxy_model.set_filter_default(project_name is not None)
|
||||
self.hierarchy_view.set_project(project_name)
|
||||
|
||||
|
|
@ -252,6 +262,19 @@ class ProjectManagerWindow(QtWidgets.QWidget):
|
|||
exc_info=True
|
||||
)
|
||||
|
||||
def _on_remove_project(self):
|
||||
project_name = self._current_project()
|
||||
dialog = ConfirmProjectDeletion(project_name, self)
|
||||
result = dialog.exec_()
|
||||
if result != 1:
|
||||
return
|
||||
|
||||
database = self._dbcon.database
|
||||
if project_name in database.collection_names():
|
||||
collection = database[project_name]
|
||||
collection.drop()
|
||||
self.refresh_projects()
|
||||
|
||||
def show_message(self, message):
|
||||
# TODO add nicer message pop
|
||||
self.message_label.setText(message)
|
||||
|
|
|
|||
|
|
@ -564,6 +564,8 @@ class AssetsWidget(QtWidgets.QWidget):
|
|||
refreshed = QtCore.Signal()
|
||||
# on view selection change
|
||||
selection_changed = QtCore.Signal()
|
||||
# It was double clicked on view
|
||||
double_clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, dbcon, parent=None):
|
||||
super(AssetsWidget, self).__init__(parent=parent)
|
||||
|
|
@ -618,6 +620,7 @@ class AssetsWidget(QtWidgets.QWidget):
|
|||
refresh_btn.clicked.connect(self.refresh)
|
||||
current_asset_btn.clicked.connect(self.set_current_session_asset)
|
||||
model.refreshed.connect(self._on_model_refresh)
|
||||
view.doubleClicked.connect(self.double_clicked)
|
||||
|
||||
self._current_asset_btn = current_asset_btn
|
||||
self._model = model
|
||||
|
|
|
|||
|
|
@ -5,10 +5,6 @@ DEFAULT_PROJECT_LABEL = "< Default >"
|
|||
PROJECT_NAME_ROLE = QtCore.Qt.UserRole + 101
|
||||
PROJECT_IS_ACTIVE_ROLE = QtCore.Qt.UserRole + 102
|
||||
|
||||
TASK_NAME_ROLE = QtCore.Qt.UserRole + 301
|
||||
TASK_TYPE_ROLE = QtCore.Qt.UserRole + 302
|
||||
TASK_ORDER_ROLE = QtCore.Qt.UserRole + 303
|
||||
|
||||
LOCAL_PROVIDER_ROLE = QtCore.Qt.UserRole + 500 # provider of active site
|
||||
REMOTE_PROVIDER_ROLE = QtCore.Qt.UserRole + 501 # provider of remote site
|
||||
LOCAL_PROGRESS_ROLE = QtCore.Qt.UserRole + 502 # percentage downld on active
|
||||
|
|
|
|||
|
|
@ -8,10 +8,7 @@ from Qt import QtWidgets, QtGui, QtCore
|
|||
|
||||
from avalon.lib import HeroVersionType
|
||||
from openpype.style import get_objected_colors
|
||||
from .models import (
|
||||
AssetModel,
|
||||
TreeModel
|
||||
)
|
||||
from .models import TreeModel
|
||||
from . import lib
|
||||
|
||||
if Qt.__binding__ == "PySide":
|
||||
|
|
@ -22,173 +19,6 @@ elif Qt.__binding__ == "PyQt4":
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AssetDelegate(QtWidgets.QItemDelegate):
|
||||
bar_height = 3
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AssetDelegate, self).__init__(*args, **kwargs)
|
||||
asset_view_colors = get_objected_colors()["loader"]["asset-view"]
|
||||
self._selected_color = (
|
||||
asset_view_colors["selected"].get_qcolor()
|
||||
)
|
||||
self._hover_color = (
|
||||
asset_view_colors["hover"].get_qcolor()
|
||||
)
|
||||
self._selected_hover_color = (
|
||||
asset_view_colors["selected-hover"].get_qcolor()
|
||||
)
|
||||
|
||||
def sizeHint(self, option, index):
|
||||
result = super(AssetDelegate, self).sizeHint(option, index)
|
||||
height = result.height()
|
||||
result.setHeight(height + self.bar_height)
|
||||
|
||||
return result
|
||||
|
||||
def paint(self, painter, option, index):
|
||||
# Qt4 compat
|
||||
if Qt.__binding__ in ("PySide", "PyQt4"):
|
||||
option = QStyleOptionViewItemV4(option)
|
||||
|
||||
painter.save()
|
||||
|
||||
item_rect = QtCore.QRect(option.rect)
|
||||
item_rect.setHeight(option.rect.height() - self.bar_height)
|
||||
|
||||
subset_colors = index.data(AssetModel.subsetColorsRole)
|
||||
subset_colors_width = 0
|
||||
if subset_colors:
|
||||
subset_colors_width = option.rect.width() / len(subset_colors)
|
||||
|
||||
subset_rects = []
|
||||
counter = 0
|
||||
for subset_c in subset_colors:
|
||||
new_color = None
|
||||
new_rect = None
|
||||
if subset_c:
|
||||
new_color = QtGui.QColor(*subset_c)
|
||||
|
||||
new_rect = QtCore.QRect(
|
||||
option.rect.left() + (counter * subset_colors_width),
|
||||
option.rect.top() + (
|
||||
option.rect.height() - self.bar_height
|
||||
),
|
||||
subset_colors_width,
|
||||
self.bar_height
|
||||
)
|
||||
subset_rects.append((new_color, new_rect))
|
||||
counter += 1
|
||||
|
||||
# Background
|
||||
if option.state & QtWidgets.QStyle.State_Selected:
|
||||
if len(subset_colors) == 0:
|
||||
item_rect.setTop(item_rect.top() + (self.bar_height / 2))
|
||||
|
||||
if option.state & QtWidgets.QStyle.State_MouseOver:
|
||||
bg_color = self._selected_hover_color
|
||||
else:
|
||||
bg_color = self._selected_color
|
||||
else:
|
||||
item_rect.setTop(item_rect.top() + (self.bar_height / 2))
|
||||
if option.state & QtWidgets.QStyle.State_MouseOver:
|
||||
bg_color = self._hover_color
|
||||
else:
|
||||
bg_color = QtGui.QColor()
|
||||
bg_color.setAlpha(0)
|
||||
|
||||
# When not needed to do a rounded corners (easier and without
|
||||
# painter restore):
|
||||
# painter.fillRect(
|
||||
# item_rect,
|
||||
# QtGui.QBrush(bg_color)
|
||||
# )
|
||||
pen = painter.pen()
|
||||
pen.setStyle(QtCore.Qt.NoPen)
|
||||
pen.setWidth(0)
|
||||
painter.setPen(pen)
|
||||
painter.setBrush(QtGui.QBrush(bg_color))
|
||||
painter.drawRoundedRect(option.rect, 3, 3)
|
||||
|
||||
if option.state & QtWidgets.QStyle.State_Selected:
|
||||
for color, subset_rect in subset_rects:
|
||||
if not color or not subset_rect:
|
||||
continue
|
||||
painter.fillRect(subset_rect, QtGui.QBrush(color))
|
||||
|
||||
painter.restore()
|
||||
painter.save()
|
||||
|
||||
# Icon
|
||||
icon_index = index.model().index(
|
||||
index.row(), index.column(), index.parent()
|
||||
)
|
||||
# - Default icon_rect if not icon
|
||||
icon_rect = QtCore.QRect(
|
||||
item_rect.left(),
|
||||
item_rect.top(),
|
||||
# To make sure it's same size all the time
|
||||
option.rect.height() - self.bar_height,
|
||||
option.rect.height() - self.bar_height
|
||||
)
|
||||
icon = index.model().data(icon_index, QtCore.Qt.DecorationRole)
|
||||
|
||||
if icon:
|
||||
mode = QtGui.QIcon.Normal
|
||||
if not (option.state & QtWidgets.QStyle.State_Enabled):
|
||||
mode = QtGui.QIcon.Disabled
|
||||
elif option.state & QtWidgets.QStyle.State_Selected:
|
||||
mode = QtGui.QIcon.Selected
|
||||
|
||||
if isinstance(icon, QtGui.QPixmap):
|
||||
icon = QtGui.QIcon(icon)
|
||||
option.decorationSize = icon.size() / icon.devicePixelRatio()
|
||||
|
||||
elif isinstance(icon, QtGui.QColor):
|
||||
pixmap = QtGui.QPixmap(option.decorationSize)
|
||||
pixmap.fill(icon)
|
||||
icon = QtGui.QIcon(pixmap)
|
||||
|
||||
elif isinstance(icon, QtGui.QImage):
|
||||
icon = QtGui.QIcon(QtGui.QPixmap.fromImage(icon))
|
||||
option.decorationSize = icon.size() / icon.devicePixelRatio()
|
||||
|
||||
elif isinstance(icon, QtGui.QIcon):
|
||||
state = QtGui.QIcon.Off
|
||||
if option.state & QtWidgets.QStyle.State_Open:
|
||||
state = QtGui.QIcon.On
|
||||
actualSize = option.icon.actualSize(
|
||||
option.decorationSize, mode, state
|
||||
)
|
||||
option.decorationSize = QtCore.QSize(
|
||||
min(option.decorationSize.width(), actualSize.width()),
|
||||
min(option.decorationSize.height(), actualSize.height())
|
||||
)
|
||||
|
||||
state = QtGui.QIcon.Off
|
||||
if option.state & QtWidgets.QStyle.State_Open:
|
||||
state = QtGui.QIcon.On
|
||||
|
||||
icon.paint(
|
||||
painter, icon_rect,
|
||||
QtCore.Qt.AlignLeft, mode, state
|
||||
)
|
||||
|
||||
# Text
|
||||
text_rect = QtCore.QRect(
|
||||
icon_rect.left() + icon_rect.width() + 2,
|
||||
item_rect.top(),
|
||||
item_rect.width(),
|
||||
item_rect.height()
|
||||
)
|
||||
|
||||
painter.drawText(
|
||||
text_rect, QtCore.Qt.AlignVCenter,
|
||||
index.data(QtCore.Qt.DisplayRole)
|
||||
)
|
||||
|
||||
painter.restore()
|
||||
|
||||
|
||||
class VersionDelegate(QtWidgets.QStyledItemDelegate):
|
||||
"""A delegate that display version integer formatted as version string."""
|
||||
|
||||
|
|
|
|||
|
|
@ -477,6 +477,7 @@ def create_qthread(func, *args, **kwargs):
|
|||
|
||||
|
||||
def get_repre_icons():
|
||||
"""Returns a dict {'provider_name': QIcon}"""
|
||||
try:
|
||||
from openpype_modules import sync_server
|
||||
except Exception:
|
||||
|
|
@ -488,9 +489,17 @@ def get_repre_icons():
|
|||
"providers", "resources"
|
||||
)
|
||||
icons = {}
|
||||
# TODO get from sync module
|
||||
for provider in ['studio', 'local_drive', 'gdrive']:
|
||||
pix_url = "{}/{}.png".format(resource_path, provider)
|
||||
if not os.path.exists(resource_path):
|
||||
print("No icons for Site Sync found")
|
||||
return {}
|
||||
|
||||
for file_name in os.listdir(resource_path):
|
||||
if file_name and not file_name.endswith("png"):
|
||||
continue
|
||||
|
||||
provider, _ = os.path.splitext(file_name)
|
||||
|
||||
pix_url = os.path.join(resource_path, file_name)
|
||||
icons[provider] = QtGui.QIcon(pix_url)
|
||||
|
||||
return icons
|
||||
|
|
|
|||
|
|
@ -1,7 +1,5 @@
|
|||
import re
|
||||
import time
|
||||
import logging
|
||||
import collections
|
||||
|
||||
import Qt
|
||||
from Qt import QtCore, QtGui
|
||||
|
|
@ -11,10 +9,7 @@ from . import lib
|
|||
from .constants import (
|
||||
PROJECT_IS_ACTIVE_ROLE,
|
||||
PROJECT_NAME_ROLE,
|
||||
DEFAULT_PROJECT_LABEL,
|
||||
TASK_ORDER_ROLE,
|
||||
TASK_TYPE_ROLE,
|
||||
TASK_NAME_ROLE
|
||||
DEFAULT_PROJECT_LABEL
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -203,283 +198,6 @@ class Item(dict):
|
|||
self._children.append(child)
|
||||
|
||||
|
||||
class AssetModel(TreeModel):
|
||||
"""A model listing assets in the silo in the active project.
|
||||
|
||||
The assets are displayed in a treeview, they are visually parented by
|
||||
a `visualParent` field in the database containing an `_id` to a parent
|
||||
asset.
|
||||
|
||||
"""
|
||||
|
||||
Columns = ["label"]
|
||||
Name = 0
|
||||
Deprecated = 2
|
||||
ObjectId = 3
|
||||
|
||||
DocumentRole = QtCore.Qt.UserRole + 2
|
||||
ObjectIdRole = QtCore.Qt.UserRole + 3
|
||||
subsetColorsRole = QtCore.Qt.UserRole + 4
|
||||
|
||||
doc_fetched = QtCore.Signal(bool)
|
||||
refreshed = QtCore.Signal(bool)
|
||||
|
||||
# Asset document projection
|
||||
asset_projection = {
|
||||
"type": 1,
|
||||
"schema": 1,
|
||||
"name": 1,
|
||||
"silo": 1,
|
||||
"data.visualParent": 1,
|
||||
"data.label": 1,
|
||||
"data.tags": 1,
|
||||
"data.icon": 1,
|
||||
"data.color": 1,
|
||||
"data.deprecated": 1
|
||||
}
|
||||
|
||||
def __init__(self, dbcon=None, parent=None, asset_projection=None):
|
||||
super(AssetModel, self).__init__(parent=parent)
|
||||
if dbcon is None:
|
||||
dbcon = io
|
||||
self.dbcon = dbcon
|
||||
self.asset_colors = {}
|
||||
|
||||
# Projections for Mongo queries
|
||||
# - let ability to modify them if used in tools that require more than
|
||||
# defaults
|
||||
if asset_projection:
|
||||
self.asset_projection = asset_projection
|
||||
|
||||
self.asset_projection = asset_projection
|
||||
|
||||
self._doc_fetching_thread = None
|
||||
self._doc_fetching_stop = False
|
||||
self._doc_payload = {}
|
||||
|
||||
self.doc_fetched.connect(self.on_doc_fetched)
|
||||
|
||||
self.refresh()
|
||||
|
||||
def _add_hierarchy(self, assets, parent=None, silos=None):
|
||||
"""Add the assets that are related to the parent as children items.
|
||||
|
||||
This method does *not* query the database. These instead are queried
|
||||
in a single batch upfront as an optimization to reduce database
|
||||
queries. Resulting in up to 10x speed increase.
|
||||
|
||||
Args:
|
||||
assets (dict): All assets in the currently active silo stored
|
||||
by key/value
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
# Reset colors
|
||||
self.asset_colors = {}
|
||||
|
||||
if silos:
|
||||
# WARNING: Silo item "_id" is set to silo value
|
||||
# mainly because GUI issue with perserve selection and expanded row
|
||||
# and because of easier hierarchy parenting (in "assets")
|
||||
for silo in silos:
|
||||
item = Item({
|
||||
"_id": silo,
|
||||
"name": silo,
|
||||
"label": silo,
|
||||
"type": "silo"
|
||||
})
|
||||
self.add_child(item, parent=parent)
|
||||
self._add_hierarchy(assets, parent=item)
|
||||
|
||||
parent_id = parent["_id"] if parent else None
|
||||
current_assets = assets.get(parent_id, list())
|
||||
|
||||
for asset in current_assets:
|
||||
# get label from data, otherwise use name
|
||||
data = asset.get("data", {})
|
||||
label = data.get("label", asset["name"])
|
||||
tags = data.get("tags", [])
|
||||
|
||||
# store for the asset for optimization
|
||||
deprecated = "deprecated" in tags
|
||||
|
||||
item = Item({
|
||||
"_id": asset["_id"],
|
||||
"name": asset["name"],
|
||||
"label": label,
|
||||
"type": asset["type"],
|
||||
"tags": ", ".join(tags),
|
||||
"deprecated": deprecated,
|
||||
"_document": asset
|
||||
})
|
||||
self.add_child(item, parent=parent)
|
||||
|
||||
# Add asset's children recursively if it has children
|
||||
if asset["_id"] in assets:
|
||||
self._add_hierarchy(assets, parent=item)
|
||||
|
||||
self.asset_colors[asset["_id"]] = []
|
||||
|
||||
def on_doc_fetched(self, was_stopped):
|
||||
if was_stopped:
|
||||
self.stop_fetch_thread()
|
||||
return
|
||||
|
||||
self.beginResetModel()
|
||||
|
||||
assets_by_parent = self._doc_payload.get("assets_by_parent")
|
||||
silos = self._doc_payload.get("silos")
|
||||
if assets_by_parent is not None:
|
||||
# Build the hierarchical tree items recursively
|
||||
self._add_hierarchy(
|
||||
assets_by_parent,
|
||||
parent=None,
|
||||
silos=silos
|
||||
)
|
||||
|
||||
self.endResetModel()
|
||||
|
||||
has_content = bool(assets_by_parent) or bool(silos)
|
||||
self.refreshed.emit(has_content)
|
||||
|
||||
self.stop_fetch_thread()
|
||||
|
||||
def fetch(self):
|
||||
self._doc_payload = self._fetch() or {}
|
||||
# Emit doc fetched only if was not stopped
|
||||
self.doc_fetched.emit(self._doc_fetching_stop)
|
||||
|
||||
def _fetch(self):
|
||||
if not self.dbcon.Session.get("AVALON_PROJECT"):
|
||||
return
|
||||
|
||||
project_doc = self.dbcon.find_one(
|
||||
{"type": "project"},
|
||||
{"_id": True}
|
||||
)
|
||||
if not project_doc:
|
||||
return
|
||||
|
||||
# Get all assets sorted by name
|
||||
db_assets = self.dbcon.find(
|
||||
{"type": "asset"},
|
||||
self.asset_projection
|
||||
).sort("name", 1)
|
||||
|
||||
# Group the assets by their visual parent's id
|
||||
assets_by_parent = collections.defaultdict(list)
|
||||
for asset in db_assets:
|
||||
if self._doc_fetching_stop:
|
||||
return
|
||||
parent_id = asset.get("data", {}).get("visualParent")
|
||||
assets_by_parent[parent_id].append(asset)
|
||||
|
||||
return {
|
||||
"assets_by_parent": assets_by_parent,
|
||||
"silos": None
|
||||
}
|
||||
|
||||
def stop_fetch_thread(self):
|
||||
if self._doc_fetching_thread is not None:
|
||||
self._doc_fetching_stop = True
|
||||
while self._doc_fetching_thread.isRunning():
|
||||
time.sleep(0.001)
|
||||
self._doc_fetching_thread = None
|
||||
|
||||
def refresh(self, force=False):
|
||||
"""Refresh the data for the model."""
|
||||
# Skip fetch if there is already other thread fetching documents
|
||||
if self._doc_fetching_thread is not None:
|
||||
if not force:
|
||||
return
|
||||
self.stop_fetch_thread()
|
||||
|
||||
# Clear model items
|
||||
self.clear()
|
||||
|
||||
# Fetch documents from mongo
|
||||
# Restart payload
|
||||
self._doc_payload = {}
|
||||
self._doc_fetching_stop = False
|
||||
self._doc_fetching_thread = lib.create_qthread(self.fetch)
|
||||
self._doc_fetching_thread.start()
|
||||
|
||||
def flags(self, index):
|
||||
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
|
||||
|
||||
def setData(self, index, value, role=QtCore.Qt.EditRole):
|
||||
if not index.isValid():
|
||||
return False
|
||||
|
||||
if role == self.subsetColorsRole:
|
||||
asset_id = index.data(self.ObjectIdRole)
|
||||
self.asset_colors[asset_id] = value
|
||||
|
||||
if Qt.__binding__ in ("PyQt4", "PySide"):
|
||||
self.dataChanged.emit(index, index)
|
||||
else:
|
||||
self.dataChanged.emit(index, index, [role])
|
||||
|
||||
return True
|
||||
|
||||
return super(AssetModel, self).setData(index, value, role)
|
||||
|
||||
def data(self, index, role):
|
||||
if not index.isValid():
|
||||
return
|
||||
|
||||
item = index.internalPointer()
|
||||
if role == QtCore.Qt.DecorationRole:
|
||||
column = index.column()
|
||||
if column == self.Name:
|
||||
# Allow a custom icon and custom icon color to be defined
|
||||
data = item.get("_document", {}).get("data", {})
|
||||
icon = data.get("icon", None)
|
||||
if icon is None and item.get("type") == "silo":
|
||||
icon = "database"
|
||||
color = data.get("color", style.colors.default)
|
||||
|
||||
if icon is None:
|
||||
# Use default icons if no custom one is specified.
|
||||
# If it has children show a full folder, otherwise
|
||||
# show an open folder
|
||||
has_children = self.rowCount(index) > 0
|
||||
icon = "folder" if has_children else "folder-o"
|
||||
|
||||
# Make the color darker when the asset is deprecated
|
||||
if item.get("deprecated", False):
|
||||
color = QtGui.QColor(color).darker(250)
|
||||
|
||||
try:
|
||||
key = "fa.{0}".format(icon) # font-awesome key
|
||||
icon = qtawesome.icon(key, color=color)
|
||||
return icon
|
||||
except Exception as exception:
|
||||
# Log an error message instead of erroring out completely
|
||||
# when the icon couldn't be created (e.g. invalid name)
|
||||
log.error(exception)
|
||||
|
||||
return
|
||||
|
||||
if role == QtCore.Qt.ForegroundRole: # font color
|
||||
if "deprecated" in item.get("tags", []):
|
||||
return QtGui.QColor(style.colors.light).darker(250)
|
||||
|
||||
if role == self.ObjectIdRole:
|
||||
return item.get("_id", None)
|
||||
|
||||
if role == self.DocumentRole:
|
||||
return item.get("_document", None)
|
||||
|
||||
if role == self.subsetColorsRole:
|
||||
asset_id = item.get("_id", None)
|
||||
return self.asset_colors.get(asset_id) or []
|
||||
|
||||
return super(AssetModel, self).data(index, role)
|
||||
|
||||
|
||||
class RecursiveSortFilterProxyModel(QtCore.QSortFilterProxyModel):
|
||||
"""Filters to the regex if any of the children matches allow parent"""
|
||||
def filterAcceptsRow(self, row, parent):
|
||||
|
|
@ -654,163 +372,3 @@ class ProjectSortFilterProxy(QtCore.QSortFilterProxyModel):
|
|||
def set_filter_enabled(self, value):
|
||||
self._filter_enabled = value
|
||||
self.invalidateFilter()
|
||||
|
||||
|
||||
class TasksModel(QtGui.QStandardItemModel):
|
||||
"""A model listing the tasks combined for a list of assets"""
|
||||
def __init__(self, dbcon, parent=None):
|
||||
super(TasksModel, self).__init__(parent=parent)
|
||||
self.dbcon = dbcon
|
||||
self._default_icon = qtawesome.icon(
|
||||
"fa.male",
|
||||
color=style.colors.default
|
||||
)
|
||||
self._no_tasks_icon = qtawesome.icon(
|
||||
"fa.exclamation-circle",
|
||||
color=style.colors.mid
|
||||
)
|
||||
self._cached_icons = {}
|
||||
self._project_task_types = {}
|
||||
|
||||
self._last_asset_id = None
|
||||
|
||||
self.refresh()
|
||||
|
||||
def refresh(self):
|
||||
if self.dbcon.Session.get("AVALON_PROJECT"):
|
||||
self._refresh_task_types()
|
||||
self.set_asset_id(self._last_asset_id)
|
||||
else:
|
||||
self.clear()
|
||||
|
||||
def _refresh_task_types(self):
|
||||
# Get the project configured icons from database
|
||||
project = self.dbcon.find_one(
|
||||
{"type": "project"},
|
||||
{"config.tasks"}
|
||||
)
|
||||
tasks = project["config"].get("tasks") or {}
|
||||
self._project_task_types = tasks
|
||||
|
||||
def _try_get_awesome_icon(self, icon_name):
|
||||
icon = None
|
||||
if icon_name:
|
||||
try:
|
||||
icon = qtawesome.icon(
|
||||
"fa.{}".format(icon_name),
|
||||
color=style.colors.default
|
||||
)
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
return icon
|
||||
|
||||
def headerData(self, section, orientation, role):
|
||||
# Show nice labels in the header
|
||||
if (
|
||||
role == QtCore.Qt.DisplayRole
|
||||
and orientation == QtCore.Qt.Horizontal
|
||||
):
|
||||
if section == 0:
|
||||
return "Tasks"
|
||||
|
||||
return super(TasksModel, self).headerData(section, orientation, role)
|
||||
|
||||
def _get_icon(self, task_icon, task_type_icon):
|
||||
if task_icon in self._cached_icons:
|
||||
return self._cached_icons[task_icon]
|
||||
|
||||
icon = self._try_get_awesome_icon(task_icon)
|
||||
if icon is not None:
|
||||
self._cached_icons[task_icon] = icon
|
||||
return icon
|
||||
|
||||
if task_type_icon in self._cached_icons:
|
||||
icon = self._cached_icons[task_type_icon]
|
||||
self._cached_icons[task_icon] = icon
|
||||
return icon
|
||||
|
||||
icon = self._try_get_awesome_icon(task_type_icon)
|
||||
if icon is None:
|
||||
icon = self._default_icon
|
||||
|
||||
self._cached_icons[task_icon] = icon
|
||||
self._cached_icons[task_type_icon] = icon
|
||||
|
||||
return icon
|
||||
|
||||
def set_asset_id(self, asset_id):
|
||||
asset_doc = None
|
||||
if asset_id:
|
||||
asset_doc = self.dbcon.find_one(
|
||||
{"_id": asset_id},
|
||||
{"data.tasks": True}
|
||||
)
|
||||
self.set_asset(asset_doc)
|
||||
|
||||
def set_asset(self, asset_doc):
|
||||
"""Set assets to track by their database id
|
||||
|
||||
Arguments:
|
||||
asset_doc (dict): Asset document from MongoDB.
|
||||
"""
|
||||
self.clear()
|
||||
|
||||
if not asset_doc:
|
||||
self._last_asset_id = None
|
||||
return
|
||||
|
||||
self._last_asset_id = asset_doc["_id"]
|
||||
|
||||
asset_tasks = asset_doc.get("data", {}).get("tasks") or {}
|
||||
items = []
|
||||
for task_name, task_info in asset_tasks.items():
|
||||
task_icon = task_info.get("icon")
|
||||
task_type = task_info.get("type")
|
||||
task_order = task_info.get("order")
|
||||
task_type_info = self._project_task_types.get(task_type) or {}
|
||||
task_type_icon = task_type_info.get("icon")
|
||||
icon = self._get_icon(task_icon, task_type_icon)
|
||||
|
||||
label = "{} ({})".format(task_name, task_type or "type N/A")
|
||||
item = QtGui.QStandardItem(label)
|
||||
item.setData(task_name, TASK_NAME_ROLE)
|
||||
item.setData(task_type, TASK_TYPE_ROLE)
|
||||
item.setData(task_order, TASK_ORDER_ROLE)
|
||||
item.setData(icon, QtCore.Qt.DecorationRole)
|
||||
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
|
||||
items.append(item)
|
||||
|
||||
if not items:
|
||||
item = QtGui.QStandardItem("No task")
|
||||
item.setData(self._no_tasks_icon, QtCore.Qt.DecorationRole)
|
||||
item.setFlags(QtCore.Qt.NoItemFlags)
|
||||
items.append(item)
|
||||
|
||||
self.invisibleRootItem().appendRows(items)
|
||||
|
||||
|
||||
class TasksProxyModel(QtCore.QSortFilterProxyModel):
|
||||
def lessThan(self, x_index, y_index):
|
||||
x_order = x_index.data(TASK_ORDER_ROLE)
|
||||
y_order = y_index.data(TASK_ORDER_ROLE)
|
||||
if x_order is not None and y_order is not None:
|
||||
if x_order < y_order:
|
||||
return True
|
||||
if x_order > y_order:
|
||||
return False
|
||||
|
||||
elif x_order is None and y_order is not None:
|
||||
return True
|
||||
|
||||
elif y_order is None and x_order is not None:
|
||||
return False
|
||||
|
||||
x_name = x_index.data(QtCore.Qt.DisplayRole)
|
||||
y_name = y_index.data(QtCore.Qt.DisplayRole)
|
||||
if x_name == y_name:
|
||||
return True
|
||||
|
||||
if x_name == tuple(sorted((x_name, y_name)))[0]:
|
||||
return True
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -4,11 +4,11 @@ from avalon import style
|
|||
from avalon.vendor import qtawesome
|
||||
|
||||
from .views import DeselectableTreeView
|
||||
from .constants import (
|
||||
TASK_ORDER_ROLE,
|
||||
TASK_TYPE_ROLE,
|
||||
TASK_NAME_ROLE
|
||||
)
|
||||
|
||||
|
||||
TASK_NAME_ROLE = QtCore.Qt.UserRole + 1
|
||||
TASK_TYPE_ROLE = QtCore.Qt.UserRole + 2
|
||||
TASK_ORDER_ROLE = QtCore.Qt.UserRole + 3
|
||||
|
||||
|
||||
class TasksModel(QtGui.QStandardItemModel):
|
||||
|
|
|
|||
|
|
@ -61,26 +61,3 @@ class TreeViewSpinner(QtWidgets.QTreeView):
|
|||
self.paint_empty(event)
|
||||
else:
|
||||
super(TreeViewSpinner, self).paintEvent(event)
|
||||
|
||||
|
||||
class AssetsView(TreeViewSpinner, DeselectableTreeView):
|
||||
"""Item view.
|
||||
This implements a context menu.
|
||||
"""
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super(AssetsView, self).__init__(parent)
|
||||
self.setIndentation(15)
|
||||
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
self.setHeaderHidden(True)
|
||||
|
||||
def mousePressEvent(self, event):
|
||||
index = self.indexAt(event.pos())
|
||||
if not index.isValid():
|
||||
modifiers = QtWidgets.QApplication.keyboardModifiers()
|
||||
if modifiers == QtCore.Qt.ShiftModifier:
|
||||
return
|
||||
elif modifiers == QtCore.Qt.ControlModifier:
|
||||
return
|
||||
|
||||
super(AssetsView, self).mousePressEvent(event)
|
||||
|
|
|
|||
|
|
@ -1,18 +1,10 @@
|
|||
import logging
|
||||
import time
|
||||
|
||||
from . import lib
|
||||
|
||||
from Qt import QtWidgets, QtCore, QtGui
|
||||
|
||||
from avalon.vendor import qtawesome, qargparse
|
||||
|
||||
from avalon import style
|
||||
from openpype.style import get_objected_colors
|
||||
|
||||
from .models import AssetModel, RecursiveSortFilterProxyModel
|
||||
from .views import AssetsView
|
||||
from .delegates import AssetDelegate
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
@ -38,293 +30,6 @@ class PlaceholderLineEdit(QtWidgets.QLineEdit):
|
|||
self.setPalette(filter_palette)
|
||||
|
||||
|
||||
class AssetWidget(QtWidgets.QWidget):
|
||||
"""A Widget to display a tree of assets with filter
|
||||
|
||||
To list the assets of the active project:
|
||||
>>> # widget = AssetWidget()
|
||||
>>> # widget.refresh()
|
||||
>>> # widget.show()
|
||||
|
||||
"""
|
||||
|
||||
refresh_triggered = QtCore.Signal() # on model refresh
|
||||
refreshed = QtCore.Signal()
|
||||
selection_changed = QtCore.Signal() # on view selection change
|
||||
current_changed = QtCore.Signal() # on view current index change
|
||||
|
||||
def __init__(self, dbcon, multiselection=False, parent=None):
|
||||
super(AssetWidget, self).__init__(parent=parent)
|
||||
|
||||
self.dbcon = dbcon
|
||||
|
||||
# Tree View
|
||||
model = AssetModel(dbcon=self.dbcon, parent=self)
|
||||
proxy = RecursiveSortFilterProxyModel()
|
||||
proxy.setSourceModel(model)
|
||||
proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
|
||||
|
||||
view = AssetsView(self)
|
||||
view.setModel(proxy)
|
||||
if multiselection:
|
||||
asset_delegate = AssetDelegate()
|
||||
view.setSelectionMode(view.ExtendedSelection)
|
||||
view.setItemDelegate(asset_delegate)
|
||||
|
||||
icon = qtawesome.icon("fa.arrow-down", color=style.colors.light)
|
||||
set_current_asset_btn = QtWidgets.QPushButton(icon, "")
|
||||
set_current_asset_btn.setToolTip("Go to Asset from current Session")
|
||||
# Hide by default
|
||||
set_current_asset_btn.setVisible(False)
|
||||
|
||||
icon = qtawesome.icon("fa.refresh", color=style.colors.light)
|
||||
refresh = QtWidgets.QPushButton(icon, "", parent=self)
|
||||
refresh.setToolTip("Refresh items")
|
||||
|
||||
filter_input = QtWidgets.QLineEdit(self)
|
||||
filter_input.setPlaceholderText("Filter assets..")
|
||||
|
||||
# Header
|
||||
header_layout = QtWidgets.QHBoxLayout()
|
||||
header_layout.addWidget(filter_input)
|
||||
header_layout.addWidget(set_current_asset_btn)
|
||||
header_layout.addWidget(refresh)
|
||||
|
||||
# Layout
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.setSpacing(4)
|
||||
layout.addLayout(header_layout)
|
||||
layout.addWidget(view)
|
||||
|
||||
# Signals/Slots
|
||||
filter_input.textChanged.connect(proxy.setFilterFixedString)
|
||||
|
||||
selection = view.selectionModel()
|
||||
selection.selectionChanged.connect(self.selection_changed)
|
||||
selection.currentChanged.connect(self.current_changed)
|
||||
refresh.clicked.connect(self.refresh)
|
||||
set_current_asset_btn.clicked.connect(self.set_current_session_asset)
|
||||
|
||||
self.set_current_asset_btn = set_current_asset_btn
|
||||
self.model = model
|
||||
self.proxy = proxy
|
||||
self.view = view
|
||||
|
||||
self.model_selection = {}
|
||||
|
||||
def set_current_asset_btn_visibility(self, visible=None):
|
||||
"""Hide set current asset button.
|
||||
|
||||
Not all tools support using of current context asset.
|
||||
"""
|
||||
if visible is None:
|
||||
visible = not self.set_current_asset_btn.isVisible()
|
||||
self.set_current_asset_btn.setVisible(visible)
|
||||
|
||||
def _refresh_model(self):
|
||||
# Store selection
|
||||
self._store_model_selection()
|
||||
time_start = time.time()
|
||||
|
||||
self.set_loading_state(
|
||||
loading=True,
|
||||
empty=True
|
||||
)
|
||||
|
||||
def on_refreshed(has_item):
|
||||
self.set_loading_state(loading=False, empty=not has_item)
|
||||
self._restore_model_selection()
|
||||
self.model.refreshed.disconnect()
|
||||
self.refreshed.emit()
|
||||
print("Duration: %.3fs" % (time.time() - time_start))
|
||||
|
||||
# Connect to signal
|
||||
self.model.refreshed.connect(on_refreshed)
|
||||
# Trigger signal before refresh is called
|
||||
self.refresh_triggered.emit()
|
||||
# Refresh model
|
||||
self.model.refresh()
|
||||
|
||||
def refresh(self):
|
||||
self._refresh_model()
|
||||
|
||||
def get_active_asset(self):
|
||||
"""Return the asset item of the current selection."""
|
||||
current = self.view.currentIndex()
|
||||
return current.data(self.model.ItemRole)
|
||||
|
||||
def get_active_asset_document(self):
|
||||
"""Return the asset document of the current selection."""
|
||||
current = self.view.currentIndex()
|
||||
return current.data(self.model.DocumentRole)
|
||||
|
||||
def get_active_index(self):
|
||||
return self.view.currentIndex()
|
||||
|
||||
def get_selected_assets(self):
|
||||
"""Return the documents of selected assets."""
|
||||
selection = self.view.selectionModel()
|
||||
rows = selection.selectedRows()
|
||||
assets = [row.data(self.model.DocumentRole) for row in rows]
|
||||
|
||||
# NOTE: skip None object assumed they are silo (backwards comp.)
|
||||
return [asset for asset in assets if asset]
|
||||
|
||||
def select_assets(self, assets, expand=True, key="name"):
|
||||
"""Select assets by item key.
|
||||
|
||||
Args:
|
||||
assets (list): List of asset values that can be found under
|
||||
specified `key`
|
||||
expand (bool): Whether to also expand to the asset in the view
|
||||
key (string): Key that specifies where to look for `assets` values
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Default `key` is "name" in that case `assets` should contain single
|
||||
asset name or list of asset names. (It is good idea to use "_id" key
|
||||
instead of name in that case `assets` must contain `ObjectId` object/s)
|
||||
It is expected that each value in `assets` will be found only once.
|
||||
If the filters according to the `key` and `assets` correspond to
|
||||
the more asset, only the first found will be selected.
|
||||
|
||||
"""
|
||||
|
||||
if not isinstance(assets, (tuple, list)):
|
||||
assets = [assets]
|
||||
|
||||
# convert to list - tuple cant be modified
|
||||
assets = set(assets)
|
||||
|
||||
# Clear selection
|
||||
selection_model = self.view.selectionModel()
|
||||
selection_model.clearSelection()
|
||||
|
||||
# Select
|
||||
mode = selection_model.Select | selection_model.Rows
|
||||
for index in lib.iter_model_rows(
|
||||
self.proxy, column=0, include_root=False
|
||||
):
|
||||
# stop iteration if there are no assets to process
|
||||
if not assets:
|
||||
break
|
||||
|
||||
value = index.data(self.model.ItemRole).get(key)
|
||||
if value not in assets:
|
||||
continue
|
||||
|
||||
# Remove processed asset
|
||||
assets.discard(value)
|
||||
|
||||
selection_model.select(index, mode)
|
||||
if expand:
|
||||
# Expand parent index
|
||||
self.view.expand(self.proxy.parent(index))
|
||||
|
||||
# Set the currently active index
|
||||
self.view.setCurrentIndex(index)
|
||||
|
||||
def set_loading_state(self, loading, empty):
|
||||
if self.view.is_loading != loading:
|
||||
if loading:
|
||||
self.view.spinner.repaintNeeded.connect(
|
||||
self.view.viewport().update
|
||||
)
|
||||
else:
|
||||
self.view.spinner.repaintNeeded.disconnect()
|
||||
|
||||
self.view.is_loading = loading
|
||||
self.view.is_empty = empty
|
||||
|
||||
def _store_model_selection(self):
|
||||
index = self.view.currentIndex()
|
||||
current = None
|
||||
if index and index.isValid():
|
||||
current = index.data(self.model.ObjectIdRole)
|
||||
|
||||
expanded = set()
|
||||
model = self.view.model()
|
||||
for index in lib.iter_model_rows(
|
||||
model, column=0, include_root=False
|
||||
):
|
||||
if self.view.isExpanded(index):
|
||||
value = index.data(self.model.ObjectIdRole)
|
||||
expanded.add(value)
|
||||
|
||||
selection_model = self.view.selectionModel()
|
||||
|
||||
selected = None
|
||||
selected_rows = selection_model.selectedRows()
|
||||
if selected_rows:
|
||||
selected = set(
|
||||
row.data(self.model.ObjectIdRole)
|
||||
for row in selected_rows
|
||||
)
|
||||
|
||||
self.model_selection = {
|
||||
"expanded": expanded,
|
||||
"selected": selected,
|
||||
"current": current
|
||||
}
|
||||
|
||||
def _restore_model_selection(self):
|
||||
model = self.view.model()
|
||||
not_set = object()
|
||||
expanded = self.model_selection.pop("expanded", not_set)
|
||||
selected = self.model_selection.pop("selected", not_set)
|
||||
current = self.model_selection.pop("current", not_set)
|
||||
|
||||
if (
|
||||
expanded is not_set
|
||||
or selected is not_set
|
||||
or current is not_set
|
||||
):
|
||||
return
|
||||
|
||||
if expanded:
|
||||
for index in lib.iter_model_rows(
|
||||
model, column=0, include_root=False
|
||||
):
|
||||
is_expanded = index.data(self.model.ObjectIdRole) in expanded
|
||||
self.view.setExpanded(index, is_expanded)
|
||||
|
||||
if not selected and not current:
|
||||
self.set_current_session_asset()
|
||||
return
|
||||
|
||||
current_index = None
|
||||
selected_indexes = []
|
||||
# Go through all indices, select the ones with similar data
|
||||
for index in lib.iter_model_rows(
|
||||
model, column=0, include_root=False
|
||||
):
|
||||
object_id = index.data(self.model.ObjectIdRole)
|
||||
if object_id in selected:
|
||||
selected_indexes.append(index)
|
||||
|
||||
if not current_index and object_id == current:
|
||||
current_index = index
|
||||
|
||||
if current_index:
|
||||
self.view.setCurrentIndex(current_index)
|
||||
|
||||
if not selected_indexes:
|
||||
return
|
||||
selection_model = self.view.selectionModel()
|
||||
flags = selection_model.Select | selection_model.Rows
|
||||
for index in selected_indexes:
|
||||
# Ensure item is visible
|
||||
self.view.scrollTo(index)
|
||||
selection_model.select(index, flags)
|
||||
|
||||
def set_current_session_asset(self):
|
||||
asset_name = self.dbcon.Session.get("AVALON_ASSET")
|
||||
if asset_name:
|
||||
self.select_assets([asset_name])
|
||||
|
||||
|
||||
class OptionalMenu(QtWidgets.QMenu):
|
||||
"""A subclass of `QtWidgets.QMenu` to work with `OptionalAction`
|
||||
|
||||
|
|
|
|||
|
|
@ -100,7 +100,9 @@ class NameWindow(QtWidgets.QDialog):
|
|||
|
||||
# Store project anatomy
|
||||
self.anatomy = anatomy
|
||||
self.template = anatomy.templates[template_key]["file"]
|
||||
self.template = anatomy.templates[template_key]["file"].replace(
|
||||
"{task}", "{task[name]}"
|
||||
)
|
||||
self.template_key = template_key
|
||||
|
||||
# Btns widget
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
Subproject commit 9499f6517a1ff2d3bf94c5d34c0aece146734760
|
||||
Subproject commit 7e5efd6885330d84bb8495975bcab84df49bfa3d
|
||||
|
|
@ -16,8 +16,9 @@ def inject_openpype_environment(deadlinePlugin):
|
|||
job = deadlinePlugin.GetJob()
|
||||
job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache
|
||||
|
||||
print("inject_openpype_environment start")
|
||||
print(">>> Injecting OpenPype environments ...")
|
||||
try:
|
||||
print(">>> Getting OpenPype executable ...")
|
||||
exe_list = job.GetJobExtraInfoKeyValue("openpype_executables")
|
||||
openpype_app = FileUtils.SearchFileList(exe_list)
|
||||
if openpype_app == "":
|
||||
|
|
@ -27,11 +28,13 @@ def inject_openpype_environment(deadlinePlugin):
|
|||
"The path to the render executable can be configured " +
|
||||
"from the Plugin Configuration in the Deadline Monitor.")
|
||||
|
||||
print("--- OpenPype executable: {}".format(openpype_app))
|
||||
|
||||
# tempfile.TemporaryFile cannot be used because of locking
|
||||
export_url = os.path.join(tempfile.gettempdir(),
|
||||
time.strftime('%Y%m%d%H%M%S'),
|
||||
'env.json') # add HHMMSS + delete later
|
||||
print("export_url {}".format(export_url))
|
||||
print(">>> Temporary path: {}".format(export_url))
|
||||
|
||||
args = [
|
||||
openpype_app,
|
||||
|
|
@ -55,41 +58,52 @@ def inject_openpype_environment(deadlinePlugin):
|
|||
"AVALON_TASK, AVALON_APP_NAME"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
print("args:::{}".format(args))
|
||||
if not os.environ.get("OPENPYPE_MONGO"):
|
||||
print(">>> Missing OPENPYPE_MONGO env var, process won't work")
|
||||
|
||||
exit_code = subprocess.call(args, cwd=os.path.dirname(openpype_app))
|
||||
if exit_code != 0:
|
||||
raise RuntimeError("Publishing failed, check worker's log")
|
||||
env = os.environ
|
||||
env["OPENPYPE_HEADLESS_MODE"] = "1"
|
||||
env["AVALON_TIMEOUT"] = "5000"
|
||||
|
||||
print(">>> Executing: {}".format(args))
|
||||
std_output = subprocess.check_output(args,
|
||||
cwd=os.path.dirname(openpype_app),
|
||||
env=env)
|
||||
print(">>> Process result {}".format(std_output))
|
||||
|
||||
print(">>> Loading file ...")
|
||||
with open(export_url) as fp:
|
||||
contents = json.load(fp)
|
||||
for key, value in contents.items():
|
||||
deadlinePlugin.SetProcessEnvironmentVariable(key, value)
|
||||
|
||||
print(">>> Removing temporary file")
|
||||
os.remove(export_url)
|
||||
|
||||
print("inject_openpype_environment end")
|
||||
except Exception:
|
||||
print(">> Injection end.")
|
||||
except Exception as e:
|
||||
if hasattr(e, "output"):
|
||||
print(">>> Exception {}".format(e.output))
|
||||
import traceback
|
||||
print(traceback.format_exc())
|
||||
print("inject_openpype_environment failed")
|
||||
print("!!! Injection failed.")
|
||||
RepositoryUtils.FailJob(job)
|
||||
raise
|
||||
|
||||
|
||||
def inject_render_job_id(deadlinePlugin):
|
||||
"""Inject dependency ids to publish process as env var for validation."""
|
||||
print("inject_render_job_id start")
|
||||
print(">>> Injecting render job id ...")
|
||||
job = deadlinePlugin.GetJob()
|
||||
job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache
|
||||
|
||||
dependency_ids = job.JobDependencyIDs
|
||||
print("dependency_ids {}".format(dependency_ids))
|
||||
print(">>> Dependency IDs: {}".format(dependency_ids))
|
||||
render_job_ids = ",".join(dependency_ids)
|
||||
|
||||
deadlinePlugin.SetProcessEnvironmentVariable("RENDER_JOB_IDS",
|
||||
render_job_ids)
|
||||
print("inject_render_job_id end")
|
||||
print(">>> Injection end.")
|
||||
|
||||
|
||||
def pype_command_line(executable, arguments, workingDirectory):
|
||||
|
|
@ -133,10 +147,13 @@ def pype(deadlinePlugin):
|
|||
deadlinePlugin: Deadline job plugin passed by Deadline
|
||||
|
||||
"""
|
||||
print(">>> Getting job ...")
|
||||
job = deadlinePlugin.GetJob()
|
||||
# PYPE should be here, not OPENPYPE - backward compatibility!!
|
||||
pype_metadata = job.GetJobEnvironmentKeyValue("PYPE_METADATA_FILE")
|
||||
pype_python = job.GetJobEnvironmentKeyValue("PYPE_PYTHON_EXE")
|
||||
print(">>> Having backward compatible env vars {}/{}".format(pype_metadata,
|
||||
pype_python))
|
||||
# test if it is pype publish job.
|
||||
if pype_metadata:
|
||||
pype_metadata = RepositoryUtils.CheckPathMapping(pype_metadata)
|
||||
|
|
@ -162,6 +179,8 @@ def pype(deadlinePlugin):
|
|||
|
||||
|
||||
def __main__(deadlinePlugin):
|
||||
print("*** GlobalJobPreload start ...")
|
||||
print(">>> Getting job ...")
|
||||
job = deadlinePlugin.GetJob()
|
||||
job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache
|
||||
|
||||
|
|
@ -170,6 +189,8 @@ def __main__(deadlinePlugin):
|
|||
openpype_publish_job = \
|
||||
job.GetJobEnvironmentKeyValue('OPENPYPE_PUBLISH_JOB') or '0'
|
||||
|
||||
print("--- Job type - render {}".format(openpype_render_job))
|
||||
print("--- Job type - publish {}".format(openpype_publish_job))
|
||||
if openpype_publish_job == '1' and openpype_render_job == '1':
|
||||
raise RuntimeError("Misconfiguration. Job couldn't be both " +
|
||||
"render and publish.")
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ h5, h6 { font-weight: var(--ifm-font-weight-semibold); }
|
|||
}
|
||||
|
||||
.showcase .client img {
|
||||
max-height: 80px;
|
||||
max-height: 70px;
|
||||
padding: 20px;
|
||||
max-width: 120px;
|
||||
align-self: center;
|
||||
|
|
@ -215,10 +215,10 @@ h5, h6 { font-weight: var(--ifm-font-weight-semibold); }
|
|||
}
|
||||
|
||||
.showcase .collab img {
|
||||
max-height: 60px;
|
||||
max-height: 70px;
|
||||
padding: 20px;
|
||||
align-self: center;
|
||||
max-width: 200px;
|
||||
max-width: 160px;
|
||||
}
|
||||
|
||||
.showcase .pype_logo img{
|
||||
|
|
|
|||
|
|
@ -64,6 +64,10 @@ const collab = [
|
|||
title: 'Clothcat Animation',
|
||||
image: '/img/clothcat.png',
|
||||
infoLink: 'https://www.clothcatanimation.com/'
|
||||
}, {
|
||||
title: 'Ellipse Studio',
|
||||
image: '/img/ellipse-studio.png',
|
||||
infoLink: 'http://www.dargaudmedia.com'
|
||||
}
|
||||
];
|
||||
|
||||
|
|
@ -125,7 +129,7 @@ const studios = [
|
|||
title: "Moonrock Animation Studio",
|
||||
image: "/img/moonrock_logo.png",
|
||||
infoLink: "https://www.moonrock.eu/",
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
function Service({imageUrl, title, description}) {
|
||||
|
|
|
|||
BIN
website/static/img/ellipse-studio.png
Normal file
BIN
website/static/img/ellipse-studio.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 14 KiB |
Loading…
Add table
Add a link
Reference in a new issue