mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-26 13:52:15 +01:00
Merge branch 'refs/heads/develop' into feature/PYPE-570-maya-renderlayer-creator
This commit is contained in:
commit
e1937f9912
51 changed files with 1522 additions and 342 deletions
|
|
@ -1,14 +1,6 @@
|
|||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import logging
|
||||
import collections
|
||||
import json
|
||||
import re
|
||||
|
||||
import ftrack_api
|
||||
from pype.ftrack import BaseAction
|
||||
from avalon import io, inventory, schema
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
|
||||
|
||||
|
|
@ -134,7 +126,6 @@ class PypeUpdateFromV2_2_0(BaseAction):
|
|||
"title": title
|
||||
}
|
||||
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
if 'values' not in event['data']:
|
||||
return
|
||||
|
|
@ -182,7 +173,7 @@ class PypeUpdateFromV2_2_0(BaseAction):
|
|||
{"type": "asset"},
|
||||
{"$unset": {"silo": ""}}
|
||||
)
|
||||
|
||||
|
||||
self.log.debug("- setting schema of assets to v.3")
|
||||
self.db_con.update_many(
|
||||
{"type": "asset"},
|
||||
|
|
@ -191,10 +182,8 @@ class PypeUpdateFromV2_2_0(BaseAction):
|
|||
|
||||
return True
|
||||
|
||||
|
||||
def register(session, plugins_presets={}):
|
||||
"""Register plugin. Called when used as an plugin."""
|
||||
|
||||
if not isinstance(session, ftrack_api.session.Session):
|
||||
return
|
||||
|
||||
PypeUpdateFromV2_2_0(session, plugins_presets).register()
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
import ftrack_api
|
||||
from pype.ftrack import BaseEvent
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
class VersionToTaskStatus(BaseEvent):
|
||||
|
||||
default_status_mapping = {}
|
||||
|
||||
def launch(self, session, event):
|
||||
'''Propagates status from version to task when changed'''
|
||||
|
||||
|
|
@ -27,13 +29,16 @@ class VersionToTaskStatus(BaseEvent):
|
|||
self.log.info('>>> version status: [ {} ]'.format(
|
||||
version_status['name']))
|
||||
|
||||
status_to_set = None
|
||||
# Filter to versions with status change to "render complete"
|
||||
if version_status['name'].lower() == 'reviewed':
|
||||
status_to_set = 'Change requested'
|
||||
version_name_low = version_status['name'].lower()
|
||||
|
||||
if version_status['name'].lower() == 'approved':
|
||||
status_to_set = 'Complete'
|
||||
status_mapping = (
|
||||
config.get_presets()
|
||||
.get("ftrack", {})
|
||||
.get("ftrack_config", {})
|
||||
.get("status_version_to_task")
|
||||
) or self.default_status_mapping
|
||||
|
||||
status_to_set = status_mapping.get(version_name_low)
|
||||
|
||||
self.log.info(
|
||||
'>>> status to set: [ {} ]'.format(status_to_set))
|
||||
|
|
@ -46,7 +51,8 @@ class VersionToTaskStatus(BaseEvent):
|
|||
self.log.info(
|
||||
'!!! status was not found in Ftrack [ {} ]'.format(
|
||||
status_to_set
|
||||
))
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
# Proceed if the task status was set
|
||||
|
|
|
|||
|
|
@ -7,11 +7,9 @@ import socket
|
|||
import argparse
|
||||
import atexit
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import ftrack_api
|
||||
from pype.ftrack.lib import credentials
|
||||
from pype.ftrack.ftrack_server import FtrackServer
|
||||
from pype.ftrack.ftrack_server.lib import (
|
||||
ftrack_events_mongo_settings, check_ftrack_url
|
||||
)
|
||||
|
|
@ -67,9 +65,8 @@ def validate_credentials(url, user, api):
|
|||
except Exception as e:
|
||||
print(
|
||||
'ERROR: Can\'t log into Ftrack with used credentials:'
|
||||
' Ftrack server: "{}" // Username: {} // API key: {}'.format(
|
||||
url, user, api
|
||||
))
|
||||
' Ftrack server: "{}" // Username: {} // API key: {}'
|
||||
).format(url, user, api)
|
||||
return False
|
||||
|
||||
print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format(
|
||||
|
|
@ -147,9 +144,9 @@ def legacy_server(ftrack_url):
|
|||
).format(str(max_fail_count), str(wait_time_after_max_fail)))
|
||||
subproc_failed_count += 1
|
||||
elif ((
|
||||
datetime.datetime.now() - subproc_last_failed
|
||||
).seconds > wait_time_after_max_fail):
|
||||
subproc_failed_count = 0
|
||||
datetime.datetime.now() - subproc_last_failed
|
||||
).seconds > wait_time_after_max_fail):
|
||||
subproc_failed_count = 0
|
||||
|
||||
# If thread failed test Ftrack and Mongo connection
|
||||
elif subproc.poll() is not None:
|
||||
|
|
@ -277,9 +274,9 @@ def main_loop(ftrack_url):
|
|||
).format(str(max_fail_count), str(wait_time_after_max_fail)))
|
||||
storer_failed_count += 1
|
||||
elif ((
|
||||
datetime.datetime.now() - storer_last_failed
|
||||
).seconds > wait_time_after_max_fail):
|
||||
storer_failed_count = 0
|
||||
datetime.datetime.now() - storer_last_failed
|
||||
).seconds > wait_time_after_max_fail):
|
||||
storer_failed_count = 0
|
||||
|
||||
# If thread failed test Ftrack and Mongo connection
|
||||
elif not storer_thread.isAlive():
|
||||
|
|
@ -313,13 +310,13 @@ def main_loop(ftrack_url):
|
|||
processor_failed_count += 1
|
||||
|
||||
elif ((
|
||||
datetime.datetime.now() - processor_last_failed
|
||||
).seconds > wait_time_after_max_fail):
|
||||
processor_failed_count = 0
|
||||
datetime.datetime.now() - processor_last_failed
|
||||
).seconds > wait_time_after_max_fail):
|
||||
processor_failed_count = 0
|
||||
|
||||
# If thread failed test Ftrack and Mongo connection
|
||||
elif not processor_thread.isAlive():
|
||||
if storer_thread.mongo_error:
|
||||
if processor_thread.mongo_error:
|
||||
raise Exception(
|
||||
"Exiting because have issue with acces to MongoDB"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,14 +1,12 @@
|
|||
import os
|
||||
import re
|
||||
import logging
|
||||
import importlib
|
||||
import itertools
|
||||
import contextlib
|
||||
import subprocess
|
||||
import inspect
|
||||
|
||||
|
||||
import avalon.io as io
|
||||
from avalon import io
|
||||
import avalon.api
|
||||
import avalon
|
||||
|
||||
|
|
@ -19,12 +17,15 @@ log = logging.getLogger(__name__)
|
|||
def _subprocess(args):
|
||||
"""Convenience method for getting output errors for subprocess."""
|
||||
|
||||
# make sure environment contains only strings
|
||||
env = {k: str(v) for k, v in os.environ.items()}
|
||||
|
||||
proc = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdin=subprocess.PIPE,
|
||||
env=os.environ
|
||||
env=env
|
||||
)
|
||||
|
||||
output = proc.communicate()[0]
|
||||
|
|
|
|||
|
|
@ -33,5 +33,7 @@ class LogsWindow(QtWidgets.QWidget):
|
|||
|
||||
def on_selection_changed(self):
|
||||
index = self.logs_widget.selected_log()
|
||||
if not index or not index.isValid():
|
||||
return
|
||||
node = index.data(self.logs_widget.model.NodeRole)
|
||||
self.log_detail.set_detail(node)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,7 @@
|
|||
import datetime
|
||||
import inspect
|
||||
import getpass
|
||||
from Qt import QtCore, QtWidgets, QtGui
|
||||
from PyQt5.QtCore import QVariant
|
||||
from .models import LogModel
|
||||
|
||||
from .lib import preserve_states
|
||||
|
||||
|
||||
class SearchComboBox(QtWidgets.QComboBox):
|
||||
"""Searchable ComboBox with empty placeholder value as first value"""
|
||||
|
|
@ -53,6 +49,7 @@ class SearchComboBox(QtWidgets.QComboBox):
|
|||
|
||||
return text
|
||||
|
||||
|
||||
class CheckableComboBox2(QtWidgets.QComboBox):
|
||||
def __init__(self, parent=None):
|
||||
super(CheckableComboBox, self).__init__(parent)
|
||||
|
|
@ -96,9 +93,11 @@ class SelectableMenu(QtWidgets.QMenu):
|
|||
else:
|
||||
super(SelectableMenu, self).mouseReleaseEvent(event)
|
||||
|
||||
|
||||
class CustomCombo(QtWidgets.QWidget):
|
||||
|
||||
selection_changed = QtCore.Signal()
|
||||
checked_changed = QtCore.Signal(bool)
|
||||
|
||||
def __init__(self, title, parent=None):
|
||||
super(CustomCombo, self).__init__(parent)
|
||||
|
|
@ -127,12 +126,27 @@ class CustomCombo(QtWidgets.QWidget):
|
|||
self.toolmenu.clear()
|
||||
self.addItems(items)
|
||||
|
||||
def select_items(self, items, ignore_input=False):
|
||||
if not isinstance(items, list):
|
||||
items = [items]
|
||||
|
||||
for action in self.toolmenu.actions():
|
||||
check = True
|
||||
if (
|
||||
action.text() in items and ignore_input or
|
||||
action.text() not in items and not ignore_input
|
||||
):
|
||||
check = False
|
||||
|
||||
action.setChecked(check)
|
||||
|
||||
def addItems(self, items):
|
||||
for item in items:
|
||||
action = self.toolmenu.addAction(item)
|
||||
action.setCheckable(True)
|
||||
action.setChecked(True)
|
||||
self.toolmenu.addAction(action)
|
||||
action.setChecked(True)
|
||||
action.triggered.connect(self.checked_changed)
|
||||
|
||||
def items(self):
|
||||
for action in self.toolmenu.actions():
|
||||
|
|
@ -186,15 +200,42 @@ class CheckableComboBox(QtWidgets.QComboBox):
|
|||
for text, checked in items:
|
||||
text_item = QtGui.QStandardItem(text)
|
||||
checked_item = QtGui.QStandardItem()
|
||||
checked_item.setData(QVariant(checked), QtCore.Qt.CheckStateRole)
|
||||
checked_item.setData(
|
||||
QtCore.QVariant(checked), QtCore.Qt.CheckStateRole
|
||||
)
|
||||
self.model.appendRow([text_item, checked_item])
|
||||
|
||||
|
||||
class FilterLogModel(QtCore.QSortFilterProxyModel):
|
||||
sub_dict = ["$gt", "$lt", "$not"]
|
||||
def __init__(self, key_values, parent=None):
|
||||
super(FilterLogModel, self).__init__(parent)
|
||||
self.allowed_key_values = key_values
|
||||
|
||||
def filterAcceptsRow(self, row, parent):
|
||||
"""
|
||||
Reimplemented from base class.
|
||||
"""
|
||||
model = self.sourceModel()
|
||||
for key, values in self.allowed_key_values.items():
|
||||
col_indx = model.COLUMNS.index(key)
|
||||
value = model.index(row, col_indx, parent).data(
|
||||
QtCore.Qt.DisplayRole
|
||||
)
|
||||
if value not in values:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class LogsWidget(QtWidgets.QWidget):
|
||||
"""A widget that lists the published subsets for an asset"""
|
||||
|
||||
active_changed = QtCore.Signal()
|
||||
|
||||
_level_order = [
|
||||
"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
|
||||
]
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super(LogsWidget, self).__init__(parent=parent)
|
||||
|
||||
|
|
@ -202,47 +243,45 @@ class LogsWidget(QtWidgets.QWidget):
|
|||
|
||||
filter_layout = QtWidgets.QHBoxLayout()
|
||||
|
||||
# user_filter = SearchComboBox(self, "Users")
|
||||
user_filter = CustomCombo("Users", self)
|
||||
users = model.dbcon.distinct("user")
|
||||
user_filter.populate(users)
|
||||
user_filter.selection_changed.connect(self.user_changed)
|
||||
user_filter.checked_changed.connect(self.user_changed)
|
||||
user_filter.select_items(getpass.getuser())
|
||||
|
||||
level_filter = CustomCombo("Levels", self)
|
||||
# levels = [(level, True) for level in model.dbcon.distinct("level")]
|
||||
levels = model.dbcon.distinct("level")
|
||||
level_filter.addItems(levels)
|
||||
_levels = []
|
||||
for level in self._level_order:
|
||||
if level in levels:
|
||||
_levels.append(level)
|
||||
level_filter.populate(_levels)
|
||||
level_filter.checked_changed.connect(self.level_changed)
|
||||
|
||||
date_from_label = QtWidgets.QLabel("From:")
|
||||
date_filter_from = QtWidgets.QDateTimeEdit()
|
||||
|
||||
date_from_layout = QtWidgets.QVBoxLayout()
|
||||
date_from_layout.addWidget(date_from_label)
|
||||
date_from_layout.addWidget(date_filter_from)
|
||||
|
||||
# now = datetime.datetime.now()
|
||||
# QtCore.QDateTime(now.year, now.month, now.day, now.hour, now.minute, second = 0, msec = 0, timeSpec = 0)
|
||||
date_to_label = QtWidgets.QLabel("To:")
|
||||
date_filter_to = QtWidgets.QDateTimeEdit()
|
||||
|
||||
date_to_layout = QtWidgets.QVBoxLayout()
|
||||
date_to_layout.addWidget(date_to_label)
|
||||
date_to_layout.addWidget(date_filter_to)
|
||||
# date_from_label = QtWidgets.QLabel("From:")
|
||||
# date_filter_from = QtWidgets.QDateTimeEdit()
|
||||
#
|
||||
# date_from_layout = QtWidgets.QVBoxLayout()
|
||||
# date_from_layout.addWidget(date_from_label)
|
||||
# date_from_layout.addWidget(date_filter_from)
|
||||
#
|
||||
# date_to_label = QtWidgets.QLabel("To:")
|
||||
# date_filter_to = QtWidgets.QDateTimeEdit()
|
||||
#
|
||||
# date_to_layout = QtWidgets.QVBoxLayout()
|
||||
# date_to_layout.addWidget(date_to_label)
|
||||
# date_to_layout.addWidget(date_filter_to)
|
||||
|
||||
filter_layout.addWidget(user_filter)
|
||||
filter_layout.addWidget(level_filter)
|
||||
filter_layout.setAlignment(QtCore.Qt.AlignLeft)
|
||||
|
||||
filter_layout.addLayout(date_from_layout)
|
||||
filter_layout.addLayout(date_to_layout)
|
||||
# filter_layout.addLayout(date_from_layout)
|
||||
# filter_layout.addLayout(date_to_layout)
|
||||
|
||||
view = QtWidgets.QTreeView(self)
|
||||
view.setAllColumnsShowFocus(True)
|
||||
|
||||
# # Set view delegates
|
||||
# time_delegate = PrettyTimeDelegate()
|
||||
# column = model.COLUMNS.index("time")
|
||||
# view.setItemDelegateForColumn(column, time_delegate)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addLayout(filter_layout)
|
||||
|
|
@ -255,34 +294,54 @@ class LogsWidget(QtWidgets.QWidget):
|
|||
QtCore.Qt.AscendingOrder
|
||||
)
|
||||
|
||||
view.setModel(model)
|
||||
key_val = {
|
||||
"user": users,
|
||||
"level": levels
|
||||
}
|
||||
proxy_model = FilterLogModel(key_val, view)
|
||||
proxy_model.setSourceModel(model)
|
||||
view.setModel(proxy_model)
|
||||
|
||||
view.customContextMenuRequested.connect(self.on_context_menu)
|
||||
view.selectionModel().selectionChanged.connect(self.active_changed)
|
||||
# user_filter.connect()
|
||||
|
||||
# TODO remove if nothing will affect...
|
||||
# header = self.view.header()
|
||||
# WARNING this is cool but slows down widget a lot
|
||||
# header = view.header()
|
||||
# # Enforce the columns to fit the data (purely cosmetic)
|
||||
# if Qt.__binding__ in ("PySide2", "PyQt5"):
|
||||
# header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
|
||||
# else:
|
||||
# header.setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
|
||||
|
||||
# Set signals
|
||||
|
||||
# prepare
|
||||
model.refresh()
|
||||
|
||||
# Store to memory
|
||||
self.model = model
|
||||
self.proxy_model = proxy_model
|
||||
self.view = view
|
||||
|
||||
self.user_filter = user_filter
|
||||
self.level_filter = level_filter
|
||||
|
||||
def user_changed(self):
|
||||
valid_actions = []
|
||||
for action in self.user_filter.items():
|
||||
print(action)
|
||||
if action.isChecked():
|
||||
valid_actions.append(action.text())
|
||||
|
||||
self.proxy_model.allowed_key_values["user"] = valid_actions
|
||||
self.proxy_model.invalidate()
|
||||
|
||||
def level_changed(self):
|
||||
valid_actions = []
|
||||
for action in self.level_filter.items():
|
||||
if action.isChecked():
|
||||
valid_actions.append(action.text())
|
||||
|
||||
self.proxy_model.allowed_key_values["level"] = valid_actions
|
||||
self.proxy_model.invalidate()
|
||||
|
||||
|
||||
def on_context_menu(self, point):
|
||||
# TODO will be any actions? it's ready
|
||||
|
|
|
|||
|
|
@ -112,7 +112,9 @@ def install():
|
|||
# Disable all families except for the ones we explicitly want to see
|
||||
family_states = [
|
||||
"write",
|
||||
"review"
|
||||
"review",
|
||||
"nukenodes"
|
||||
"gizmo"
|
||||
]
|
||||
|
||||
avalon.data["familiesStateDefault"] = False
|
||||
|
|
|
|||
|
|
@ -105,6 +105,10 @@ def writes_version_sync():
|
|||
|
||||
for each in nuke.allNodes():
|
||||
if each.Class() == 'Write':
|
||||
# check if the node is avalon tracked
|
||||
if "AvalonTab" not in each.knobs():
|
||||
continue
|
||||
|
||||
avalon_knob_data = avalon.nuke.get_avalon_knob_data(
|
||||
each, ['avalon:', 'ak:'])
|
||||
|
||||
|
|
@ -1190,3 +1194,39 @@ class BuildWorkfile(WorkfileSettings):
|
|||
|
||||
def position_up(self, multiply=1):
|
||||
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
|
||||
|
||||
def get_dependent_nodes(nodes):
|
||||
"""Get all dependent nodes connected to the list of nodes.
|
||||
|
||||
Looking for connections outside of the nodes in incoming argument.
|
||||
|
||||
Arguments:
|
||||
nodes (list): list of nuke.Node objects
|
||||
|
||||
Returns:
|
||||
connections_in: dictionary of nodes and its dependencies
|
||||
connections_out: dictionary of nodes and its dependency
|
||||
"""
|
||||
|
||||
connections_in = dict()
|
||||
connections_out = dict()
|
||||
node_names = [n.name() for n in nodes]
|
||||
for node in nodes:
|
||||
inputs = node.dependencies()
|
||||
outputs = node.dependent()
|
||||
# collect all inputs outside
|
||||
test_in = [(i, n) for i, n in enumerate(inputs)
|
||||
if n.name() not in node_names]
|
||||
if test_in:
|
||||
connections_in.update({
|
||||
node: test_in
|
||||
})
|
||||
# collect all outputs outside
|
||||
test_out = [i for i in outputs if i.name() not in node_names]
|
||||
if test_out:
|
||||
# only one dependent node is allowed
|
||||
connections_out.update({
|
||||
node: test_out[-1]
|
||||
})
|
||||
|
||||
return connections_in, connections_out
|
||||
|
|
|
|||
64
pype/nuke/utils.py
Normal file
64
pype/nuke/utils.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import os
|
||||
import nuke
|
||||
from avalon.nuke import lib as anlib
|
||||
|
||||
|
||||
def get_node_outputs(node):
|
||||
'''
|
||||
Return a dictionary of the nodes and pipes that are connected to node
|
||||
'''
|
||||
dep_dict = {}
|
||||
dependencies = node.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS)
|
||||
for d in dependencies:
|
||||
dep_dict[d] = []
|
||||
for i in range(d.inputs()):
|
||||
if d.input(i) == node:
|
||||
dep_dict[d].append(i)
|
||||
return dep_dict
|
||||
|
||||
|
||||
def is_node_gizmo(node):
|
||||
'''
|
||||
return True if node is gizmo
|
||||
'''
|
||||
return 'gizmo_file' in node.knobs()
|
||||
|
||||
|
||||
def gizmo_is_nuke_default(gizmo):
|
||||
'''Check if gizmo is in default install path'''
|
||||
plug_dir = os.path.join(os.path.dirname(
|
||||
nuke.env['ExecutablePath']), 'plugins')
|
||||
return gizmo.filename().startswith(plug_dir)
|
||||
|
||||
|
||||
def bake_gizmos_recursively(in_group=nuke.Root()):
|
||||
"""Converting a gizmo to group
|
||||
|
||||
Argumets:
|
||||
is_group (nuke.Node)[optonal]: group node or all nodes
|
||||
"""
|
||||
# preserve selection after all is done
|
||||
with anlib.maintained_selection():
|
||||
# jump to the group
|
||||
with in_group:
|
||||
for node in nuke.allNodes():
|
||||
if is_node_gizmo(node) and not gizmo_is_nuke_default(node):
|
||||
with node:
|
||||
outputs = get_node_outputs(node)
|
||||
group = node.makeGroup()
|
||||
# Reconnect inputs and outputs if any
|
||||
if outputs:
|
||||
for n, pipes in outputs.items():
|
||||
for i in pipes:
|
||||
n.setInput(i, group)
|
||||
for i in range(node.inputs()):
|
||||
group.setInput(i, node.input(i))
|
||||
# set node position and name
|
||||
group.setXYpos(node.xpos(), node.ypos())
|
||||
name = node.name()
|
||||
nuke.delete(node)
|
||||
group.setName(name)
|
||||
node = group
|
||||
|
||||
if node.Class() == "Group":
|
||||
bake_gizmos_recursively(node)
|
||||
|
|
@ -1,9 +1,6 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from avalon import (
|
||||
io,
|
||||
api as avalon
|
||||
)
|
||||
from avalon import api as avalon
|
||||
from pype import api as pype
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
|
|
|||
|
|
@ -116,6 +116,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
},
|
||||
"assetversion_data": {
|
||||
"version": version_number,
|
||||
"comment": instance.context.data.get("comment", "")
|
||||
},
|
||||
"component_data": component_data,
|
||||
"component_path": comp['published_path'],
|
||||
|
|
|
|||
|
|
@ -29,11 +29,16 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
if instance.context.data.get('version'):
|
||||
version = "v" + str(instance.context.data['version'])
|
||||
|
||||
frame_start = int(instance.data.get("frameStart") or 0)
|
||||
frame_end = int(instance.data.get("frameEnd") or 1)
|
||||
duration = frame_end - frame_start + 1
|
||||
prep_data = {
|
||||
"username": instance.context.data['user'],
|
||||
"asset": os.environ['AVALON_ASSET'],
|
||||
"task": os.environ['AVALON_TASK'],
|
||||
"start_frame": int(instance.data["frameStart"]),
|
||||
"frame_start": frame_start,
|
||||
"frame_end": frame_end,
|
||||
"duration": duration,
|
||||
"version": version
|
||||
}
|
||||
self.log.debug("__ prep_data: {}".format(prep_data))
|
||||
|
|
@ -49,12 +54,17 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
name = "_burnin"
|
||||
movieFileBurnin = filename.replace(".mov", "") + name + ".mov"
|
||||
|
||||
full_movie_path = os.path.join(os.path.normpath(stagingdir), repre["files"])
|
||||
full_burnin_path = os.path.join(os.path.normpath(stagingdir), movieFileBurnin)
|
||||
full_movie_path = os.path.join(
|
||||
os.path.normpath(stagingdir), repre["files"]
|
||||
)
|
||||
full_burnin_path = os.path.join(
|
||||
os.path.normpath(stagingdir), movieFileBurnin
|
||||
)
|
||||
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
|
||||
|
||||
burnin_data = {
|
||||
"input": full_movie_path.replace("\\", "/"),
|
||||
"codec": repre.get("codec", []),
|
||||
"output": full_burnin_path.replace("\\", "/"),
|
||||
"burnin_data": prep_data
|
||||
}
|
||||
|
|
|
|||
|
|
@ -147,6 +147,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
)
|
||||
|
||||
output_args = []
|
||||
output_args.extend(profile.get('codec', []))
|
||||
# preset's output data
|
||||
output_args.extend(profile.get('output', []))
|
||||
|
||||
|
|
@ -183,7 +184,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
'ext': ext,
|
||||
'files': repr_file,
|
||||
"tags": new_tags,
|
||||
"outputName": name
|
||||
"outputName": name,
|
||||
"codec": profile.get('codec', [])
|
||||
})
|
||||
if repre_new.get('preview'):
|
||||
repre_new.pop("preview")
|
||||
|
|
|
|||
|
|
@ -70,6 +70,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"audio",
|
||||
"yetiRig",
|
||||
"yeticache",
|
||||
"nukenodes",
|
||||
"gizmo"
|
||||
"source",
|
||||
"matchmove",
|
||||
"image"
|
||||
|
|
@ -412,7 +414,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
}
|
||||
|
||||
if sequence_repre and repre.get("frameStart"):
|
||||
representation['context']['frame'] = repre.get("frameStart")
|
||||
representation['context']['frame'] = src_padding_exp % repre.get("frameStart")
|
||||
|
||||
self.log.debug("__ representation: {}".format(representation))
|
||||
destination_list.append(dst)
|
||||
|
|
@ -525,13 +527,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
# add group if available
|
||||
if instance.data.get("subsetGroup"):
|
||||
subset["data"].update(
|
||||
{"subsetGroup": instance.data.get("subsetGroup")}
|
||||
)
|
||||
io.update_many({
|
||||
'type': 'subset',
|
||||
'_id': io.ObjectId(subset["_id"])
|
||||
}, {'$set': subset["data"]}
|
||||
}, {'$set': {'data.subsetGroup':
|
||||
instance.data.get('subsetGroup')}}
|
||||
)
|
||||
|
||||
return subset
|
||||
|
|
|
|||
|
|
@ -1,7 +1,4 @@
|
|||
import os
|
||||
import sys
|
||||
from avalon import io
|
||||
from pprint import pprint
|
||||
import acre
|
||||
|
||||
from avalon import api, lib
|
||||
|
|
|
|||
|
|
@ -1,10 +1,9 @@
|
|||
import os
|
||||
import sys
|
||||
from pprint import pprint
|
||||
import acre
|
||||
|
||||
from avalon import api, lib, io
|
||||
import pype.api as pype
|
||||
from pypeapp import Anatomy
|
||||
|
||||
|
||||
class PremierePro(api.Action):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
|
||||
import avalon.maya
|
||||
from pype.maya import lib
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
|
@ -14,10 +15,21 @@ class CreateAss(avalon.maya.Creator):
|
|||
icon = "cube"
|
||||
defaults = ['Main']
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateAss, self).__init__(*args, **kwargs)
|
||||
|
||||
# Add animation data
|
||||
self.data.update(lib.collect_animation_data())
|
||||
|
||||
# Vertex colors with the geometry
|
||||
self.data["exportSequence"] = False
|
||||
|
||||
def process(self):
|
||||
instance = super(CreateAss, self).process()
|
||||
|
||||
data = OrderedDict(**self.data)
|
||||
# data = OrderedDict(**self.data)
|
||||
|
||||
|
||||
|
||||
nodes = list()
|
||||
|
||||
|
|
@ -30,4 +42,6 @@ class CreateAss(avalon.maya.Creator):
|
|||
assProxy = cmds.sets(name="proxy_SET", empty=True)
|
||||
cmds.sets([assContent, assProxy], forceElement=instance)
|
||||
|
||||
self.data = data
|
||||
# self.log.info(data)
|
||||
#
|
||||
# self.data = data
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ from avalon import api
|
|||
import pype.maya.plugin
|
||||
import os
|
||||
from pypeapp import config
|
||||
import clique
|
||||
|
||||
|
||||
class AssProxyLoader(pype.maya.plugin.ReferenceLoader):
|
||||
|
|
@ -21,6 +22,13 @@ class AssProxyLoader(pype.maya.plugin.ReferenceLoader):
|
|||
from avalon import maya
|
||||
import pymel.core as pm
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
self.log.info("version_data: {}\n".format(version_data))
|
||||
|
||||
frameStart = version_data.get("frameStart", None)
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
except ValueError:
|
||||
|
|
@ -30,7 +38,24 @@ class AssProxyLoader(pype.maya.plugin.ReferenceLoader):
|
|||
|
||||
groupName = "{}:{}".format(namespace, name)
|
||||
path = self.fname
|
||||
proxyPath = os.path.splitext(path)[0] + ".ma"
|
||||
proxyPath_base = os.path.splitext(path)[0]
|
||||
|
||||
if frameStart is not None:
|
||||
proxyPath_base = os.path.splitext(proxyPath_base)[0]
|
||||
|
||||
publish_folder = os.path.split(path)[0]
|
||||
files_in_folder = os.listdir(publish_folder)
|
||||
collections, remainder = clique.assemble(files_in_folder)
|
||||
|
||||
if collections:
|
||||
hashes = collections[0].padding * '#'
|
||||
coll = collections[0].format('{head}[index]{tail}')
|
||||
filename = coll.replace('[index]', hashes)
|
||||
|
||||
path = os.path.join(publish_folder, filename)
|
||||
|
||||
proxyPath = proxyPath_base + ".ma"
|
||||
self.log.info
|
||||
|
||||
nodes = cmds.file(proxyPath,
|
||||
namespace=namespace,
|
||||
|
|
@ -147,6 +172,13 @@ class AssStandinLoader(api.Loader):
|
|||
import mtoa.ui.arnoldmenu
|
||||
import pymel.core as pm
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
self.log.info("version_data: {}\n".format(version_data))
|
||||
|
||||
frameStart = version_data.get("frameStart", None)
|
||||
|
||||
asset = context['asset']['name']
|
||||
namespace = namespace or lib.unique_namespace(
|
||||
asset + "_",
|
||||
|
|
@ -182,6 +214,8 @@ class AssStandinLoader(api.Loader):
|
|||
|
||||
# Set the standin filepath
|
||||
standinShape.dso.set(self.fname)
|
||||
if frameStart is not None:
|
||||
standinShape.useFrameExtension.set(1)
|
||||
|
||||
nodes = [root, standin]
|
||||
self[:] = nodes
|
||||
|
|
@ -199,14 +233,23 @@ class AssStandinLoader(api.Loader):
|
|||
|
||||
path = api.get_representation_path(representation)
|
||||
|
||||
# Update the standin
|
||||
members = pm.sets(container['objectName'], query=True)
|
||||
standins = pm.ls(members, type="AiStandIn", long=True)
|
||||
files_in_path = os.listdir(os.path.split(path)[0])
|
||||
sequence = 0
|
||||
collections, remainder = clique.assemble(files_in_path)
|
||||
if collections:
|
||||
sequence = 1
|
||||
|
||||
assert len(caches) == 1, "This is a bug"
|
||||
# Update the standin
|
||||
standins = list()
|
||||
members = pm.sets(container['objectName'], query=True)
|
||||
for member in members:
|
||||
shape = member.getShape()
|
||||
if (shape and shape.type() == "aiStandIn"):
|
||||
standins.append(shape)
|
||||
|
||||
for standin in standins:
|
||||
standin.cacheFileName.set(path)
|
||||
standin.dso.set(path)
|
||||
standin.useFrameExtension.set(sequence)
|
||||
|
||||
container = pm.PyNode(container["objectName"])
|
||||
container.representation.set(str(representation["_id"]))
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
import pype.maya.plugin
|
||||
import os
|
||||
from pypeapp import config
|
||||
reload(config)
|
||||
import pype.maya.plugin
|
||||
reload(pype.maya.plugin)
|
||||
|
||||
|
||||
class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Load the model"""
|
||||
|
|
@ -22,7 +20,6 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
|
|||
from avalon import maya
|
||||
import pymel.core as pm
|
||||
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
except ValueError:
|
||||
|
|
|
|||
|
|
@ -297,9 +297,11 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.info("Collected file nodes:\n{}".format(files))
|
||||
# Collect textures if any file nodes are found
|
||||
instance.data["resources"] = [self.collect_resource(n)
|
||||
for n in files]
|
||||
self.log.info("Collected resources:\n{}".format(instance.data["resources"]))
|
||||
instance.data["resources"] = []
|
||||
for n in files:
|
||||
instance.data["resources"].append(self.collect_resource(n))
|
||||
|
||||
self.log.info("Collected resources: {}".format(instance.data["resources"]))
|
||||
|
||||
# Log a warning when no relevant sets were retrieved for the look.
|
||||
if not instance.data["lookData"]["relationships"]:
|
||||
|
|
@ -423,7 +425,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.debug("processing: {}".format(node))
|
||||
if cmds.nodeType(node) == 'file':
|
||||
self.log.debug("file node")
|
||||
self.log.debug(" - file node")
|
||||
attribute = "{}.fileTextureName".format(node)
|
||||
computed_attribute = "{}.computedFileTextureNamePattern".format(node)
|
||||
elif cmds.nodeType(node) == 'aiImage':
|
||||
|
|
@ -431,7 +433,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
attribute = "{}.filename".format(node)
|
||||
computed_attribute = attribute
|
||||
source = cmds.getAttr(attribute)
|
||||
|
||||
self.log.info(" - file source: {}".format(source))
|
||||
color_space_attr = "{}.colorSpace".format(node)
|
||||
color_space = cmds.getAttr(color_space_attr)
|
||||
# Compare with the computed file path, e.g. the one with the <UDIM>
|
||||
|
|
@ -455,6 +457,13 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
if len(files) == 0:
|
||||
self.log.error("No valid files found from node `%s`" % node)
|
||||
|
||||
self.log.info("collection of resource done:")
|
||||
self.log.info(" - node: {}".format(node))
|
||||
self.log.info(" - attribute: {}".format(attribute))
|
||||
self.log.info(" - source: {}".format(source))
|
||||
self.log.info(" - file: {}".format(files))
|
||||
self.log.info(" - color space: {}".format(color_space))
|
||||
|
||||
# Define the resource
|
||||
return {"node": node,
|
||||
"attribute": attribute,
|
||||
|
|
|
|||
|
|
@ -20,8 +20,11 @@ class ExtractAssStandin(pype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
sequence = instance.data.get("exportSequence", False)
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
filename = "{}.ass".format(instance.name)
|
||||
filenames = list()
|
||||
file_path = os.path.join(staging_dir, filename)
|
||||
|
||||
# Write out .ass file
|
||||
|
|
@ -29,13 +32,47 @@ class ExtractAssStandin(pype.api.Extractor):
|
|||
with avalon.maya.maintained_selection():
|
||||
self.log.info("Writing: {}".format(instance.data["setMembers"]))
|
||||
cmds.select(instance.data["setMembers"], noExpand=True)
|
||||
cmds.arnoldExportAss( filename=file_path,
|
||||
selected=True,
|
||||
asciiAss=True,
|
||||
shadowLinks=True,
|
||||
lightLinks=True,
|
||||
boundingBox=True
|
||||
)
|
||||
|
||||
if sequence:
|
||||
self.log.info("Extracting ass sequence")
|
||||
|
||||
# Collect the start and end including handles
|
||||
start = instance.data.get("frameStart", 1)
|
||||
end = instance.data.get("frameEnd", 1)
|
||||
handles = instance.data.get("handles", 0)
|
||||
step = instance.data.get("step", 0)
|
||||
if handles:
|
||||
start -= handles
|
||||
end += handles
|
||||
|
||||
exported_files = cmds.arnoldExportAss(filename=file_path,
|
||||
selected=True,
|
||||
asciiAss=True,
|
||||
shadowLinks=True,
|
||||
lightLinks=True,
|
||||
boundingBox=True,
|
||||
startFrame=start,
|
||||
endFrame=end,
|
||||
frameStep=step
|
||||
)
|
||||
for file in exported_files:
|
||||
filenames.append(os.path.split(file)[1])
|
||||
self.log.info("Exported: {}".format(filenames))
|
||||
else:
|
||||
cmds.arnoldExportAss(filename=file_path,
|
||||
selected=True,
|
||||
asciiAss=True,
|
||||
shadowLinks=True,
|
||||
lightLinks=True,
|
||||
boundingBox=True
|
||||
)
|
||||
filenames = filename
|
||||
optionals = [
|
||||
"frameStart", "frameEnd", "step", "handles",
|
||||
"handleEnd", "handleStart"
|
||||
]
|
||||
for key in optionals:
|
||||
instance.data.pop(key, None)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
@ -43,9 +80,13 @@ class ExtractAssStandin(pype.api.Extractor):
|
|||
representation = {
|
||||
'name': 'ass',
|
||||
'ext': 'ass',
|
||||
'files': filename,
|
||||
'files': filenames,
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
|
||||
if sequence:
|
||||
representation['frameStart'] = start
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s"
|
||||
|
|
|
|||
|
|
@ -43,8 +43,13 @@ class ExtractAssProxy(pype.api.Extractor):
|
|||
|
||||
# Get only the shape contents we need in such a way that we avoid
|
||||
# taking along intermediateObjects
|
||||
members = instance.data['proxy']
|
||||
members = cmds.ls(members,
|
||||
proxy = instance.data.get('proxy', None)
|
||||
|
||||
if not proxy:
|
||||
self.log.info("no proxy mesh")
|
||||
return
|
||||
|
||||
members = cmds.ls(proxy,
|
||||
dag=True,
|
||||
transforms=True,
|
||||
noIntermediate=True)
|
||||
|
|
|
|||
|
|
@ -38,11 +38,7 @@ def source_hash(filepath, *args):
|
|||
file_name = os.path.basename(filepath)
|
||||
time = str(os.path.getmtime(filepath))
|
||||
size = str(os.path.getsize(filepath))
|
||||
return "|".join([
|
||||
file_name,
|
||||
time,
|
||||
size
|
||||
] + list(args)).replace(".", ",")
|
||||
return "|".join([file_name, time, size] + list(args)).replace(".", ",")
|
||||
|
||||
|
||||
def find_paths_by_hash(texture_hash):
|
||||
|
|
@ -64,36 +60,31 @@ def maketx(source, destination, *args):
|
|||
"""
|
||||
|
||||
cmd = [
|
||||
"maketx",
|
||||
"-v", # verbose
|
||||
"-u", # update mode
|
||||
# unpremultiply before conversion (recommended when alpha present)
|
||||
"--unpremult",
|
||||
"--checknan",
|
||||
# use oiio-optimized settings for tile-size, planarconfig, metadata
|
||||
"--oiio",
|
||||
"--filter lanczos3"
|
||||
]
|
||||
"maketx",
|
||||
"-v", # verbose
|
||||
"-u", # update mode
|
||||
# unpremultiply before conversion (recommended when alpha present)
|
||||
"--unpremult",
|
||||
"--checknan",
|
||||
# use oiio-optimized settings for tile-size, planarconfig, metadata
|
||||
"--oiio",
|
||||
"--filter lanczos3",
|
||||
]
|
||||
|
||||
cmd.extend(args)
|
||||
cmd.extend([
|
||||
"-o", destination,
|
||||
source
|
||||
])
|
||||
cmd.extend(["-o", destination, source])
|
||||
|
||||
CREATE_NO_WINDOW = 0x08000000
|
||||
kwargs = dict(
|
||||
args=cmd,
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
kwargs = dict(args=cmd, stderr=subprocess.STDOUT)
|
||||
|
||||
if sys.platform == "win32":
|
||||
kwargs["creationflags"] = CREATE_NO_WIDOW
|
||||
kwargs["creationflags"] = CREATE_NO_WINDOW
|
||||
try:
|
||||
out = subprocess.check_output(**kwargs)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
print(exc)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
|
|
@ -180,11 +171,12 @@ class ExtractLook(pype.api.Extractor):
|
|||
# Preserve color space values (force value after filepath change)
|
||||
# This will also trigger in the same order at end of context to
|
||||
# ensure after context it's still the original value.
|
||||
color_space = resource.get('color_space')
|
||||
color_space = resource.get("color_space")
|
||||
|
||||
for f in resource["files"]:
|
||||
|
||||
files_metadata[os.path.normpath(f)] = {'color_space': color_space}
|
||||
files_metadata[os.path.normpath(f)] = {
|
||||
"color_space": color_space}
|
||||
# files.update(os.path.normpath(f))
|
||||
|
||||
# Process the resource files
|
||||
|
|
@ -195,17 +187,19 @@ class ExtractLook(pype.api.Extractor):
|
|||
self.log.info(files)
|
||||
for filepath in files_metadata:
|
||||
|
||||
cspace = files_metadata[filepath]['color_space']
|
||||
cspace = files_metadata[filepath]["color_space"]
|
||||
linearise = False
|
||||
if cspace == 'sRGB':
|
||||
if cspace == "sRGB":
|
||||
linearise = True
|
||||
# set its file node to 'raw' as tx will be linearized
|
||||
files_metadata[filepath]["color_space"] = "raw"
|
||||
|
||||
source, mode, hash = self._process_texture(
|
||||
filepath, do_maketx, staging=dir_path, linearise=linearise
|
||||
)
|
||||
destination = self.resource_destination(
|
||||
instance, source, do_maketx
|
||||
)
|
||||
destination = self.resource_destination(instance,
|
||||
source,
|
||||
do_maketx)
|
||||
|
||||
# Force copy is specified.
|
||||
if instance.data.get("forceCopy", False):
|
||||
|
|
@ -235,11 +229,11 @@ class ExtractLook(pype.api.Extractor):
|
|||
# Preserve color space values (force value after filepath change)
|
||||
# This will also trigger in the same order at end of context to
|
||||
# ensure after context it's still the original value.
|
||||
color_space_attr = resource['node'] + ".colorSpace"
|
||||
color_space_attr = resource["node"] + ".colorSpace"
|
||||
color_space = cmds.getAttr(color_space_attr)
|
||||
|
||||
# Remap file node filename to destination
|
||||
attr = resource['attribute']
|
||||
attr = resource["attribute"]
|
||||
remap[attr] = destinations[source]
|
||||
|
||||
remap[color_space_attr] = color_space
|
||||
|
|
@ -268,13 +262,15 @@ class ExtractLook(pype.api.Extractor):
|
|||
channels=True,
|
||||
constraints=True,
|
||||
expressions=True,
|
||||
constructionHistory=True
|
||||
constructionHistory=True,
|
||||
)
|
||||
|
||||
# Write the JSON data
|
||||
self.log.info("Extract json..")
|
||||
data = {"attributes": lookdata["attributes"],
|
||||
"relationships": relationships}
|
||||
data = {
|
||||
"attributes": lookdata["attributes"],
|
||||
"relationships": relationships
|
||||
}
|
||||
|
||||
with open(json_path, "w") as f:
|
||||
json.dump(data, f)
|
||||
|
|
@ -293,7 +289,7 @@ class ExtractLook(pype.api.Extractor):
|
|||
instance.data["representations"].append(
|
||||
{
|
||||
"name": "ma",
|
||||
"ext": 'ma',
|
||||
"ext": "ma",
|
||||
"files": os.path.basename(maya_fname),
|
||||
"stagingDir": os.path.dirname(maya_fname),
|
||||
}
|
||||
|
|
@ -301,7 +297,7 @@ class ExtractLook(pype.api.Extractor):
|
|||
instance.data["representations"].append(
|
||||
{
|
||||
"name": "json",
|
||||
"ext": 'json',
|
||||
"ext": "json",
|
||||
"files": os.path.basename(json_fname),
|
||||
"stagingDir": os.path.dirname(json_fname),
|
||||
}
|
||||
|
|
@ -314,13 +310,12 @@ class ExtractLook(pype.api.Extractor):
|
|||
# Source hash for the textures
|
||||
instance.data["sourceHashes"] = hashes
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s" % (
|
||||
instance.name, maya_path)
|
||||
)
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
maya_path))
|
||||
|
||||
def resource_destination(self, instance, filepath, do_maketx):
|
||||
|
||||
anatomy = instance.context.data['anatomy']
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
|
||||
self.create_destination_template(instance, anatomy)
|
||||
|
||||
|
|
@ -332,9 +327,7 @@ class ExtractLook(pype.api.Extractor):
|
|||
ext = ".tx"
|
||||
|
||||
return os.path.join(
|
||||
instance.data["assumedDestination"],
|
||||
"resources",
|
||||
basename + ext
|
||||
instance.data["assumedDestination"], "resources", basename + ext
|
||||
)
|
||||
|
||||
def _process_texture(self, filepath, do_maketx, staging, linearise):
|
||||
|
|
@ -366,17 +359,13 @@ class ExtractLook(pype.api.Extractor):
|
|||
return source, HARDLINK, texture_hash
|
||||
else:
|
||||
self.log.warning(
|
||||
"Paths not found on disk, "
|
||||
"skipping hardlink: %s" % (existing,)
|
||||
("Paths not found on disk, "
|
||||
"skipping hardlink: %s") % (existing,)
|
||||
)
|
||||
|
||||
if do_maketx and ext != ".tx":
|
||||
# Produce .tx file in staging if source file is not .tx
|
||||
converted = os.path.join(
|
||||
staging,
|
||||
"resources",
|
||||
fname + ".tx"
|
||||
)
|
||||
converted = os.path.join(staging, "resources", fname + ".tx")
|
||||
|
||||
if linearise:
|
||||
self.log.info("tx: converting sRGB -> linear")
|
||||
|
|
@ -389,9 +378,15 @@ class ExtractLook(pype.api.Extractor):
|
|||
os.makedirs(os.path.dirname(converted))
|
||||
|
||||
self.log.info("Generating .tx file for %s .." % filepath)
|
||||
maketx(filepath, converted,
|
||||
# Include `source-hash` as string metadata
|
||||
"-sattrib", "sourceHash", texture_hash, colorconvert)
|
||||
maketx(
|
||||
filepath,
|
||||
converted,
|
||||
# Include `source-hash` as string metadata
|
||||
"-sattrib",
|
||||
"sourceHash",
|
||||
texture_hash,
|
||||
colorconvert,
|
||||
)
|
||||
|
||||
return converted, COPY, texture_hash
|
||||
|
||||
|
|
@ -417,58 +412,62 @@ class ExtractLook(pype.api.Extractor):
|
|||
project_name = api.Session["AVALON_PROJECT"]
|
||||
a_template = anatomy.templates
|
||||
|
||||
project = io.find_one({"type": "project",
|
||||
"name": project_name},
|
||||
projection={"config": True, "data": True})
|
||||
project = io.find_one(
|
||||
{"type": "project", "name": project_name},
|
||||
projection={"config": True, "data": True},
|
||||
)
|
||||
|
||||
template = a_template['publish']['path']
|
||||
template = a_template["publish"]["path"]
|
||||
# anatomy = instance.context.data['anatomy']
|
||||
|
||||
asset = io.find_one({"type": "asset",
|
||||
"name": asset_name,
|
||||
"parent": project["_id"]})
|
||||
asset = io.find_one(
|
||||
{"type": "asset", "name": asset_name, "parent": project["_id"]}
|
||||
)
|
||||
|
||||
assert asset, ("No asset found by the name '{}' "
|
||||
"in project '{}'".format(asset_name, project_name))
|
||||
silo = asset.get('silo')
|
||||
"in project '{}'").format(asset_name, project_name)
|
||||
silo = asset.get("silo")
|
||||
|
||||
subset = io.find_one({"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset["_id"]})
|
||||
subset = io.find_one(
|
||||
{"type": "subset", "name": subset_name, "parent": asset["_id"]}
|
||||
)
|
||||
|
||||
# assume there is no version yet, we start at `1`
|
||||
version = None
|
||||
version_number = 1
|
||||
if subset is not None:
|
||||
version = io.find_one({"type": "version",
|
||||
"parent": subset["_id"]},
|
||||
sort=[("name", -1)])
|
||||
version = io.find_one(
|
||||
{"type": "version",
|
||||
"parent": subset["_id"]
|
||||
}, sort=[("name", -1)]
|
||||
)
|
||||
|
||||
# if there is a subset there ought to be version
|
||||
if version is not None:
|
||||
version_number += version["name"]
|
||||
|
||||
if instance.data.get('version'):
|
||||
version_number = int(instance.data.get('version'))
|
||||
if instance.data.get("version"):
|
||||
version_number = int(instance.data.get("version"))
|
||||
|
||||
padding = int(a_template['render']['padding'])
|
||||
padding = int(a_template["render"]["padding"])
|
||||
|
||||
hierarchy = asset['data']['parents']
|
||||
hierarchy = asset["data"]["parents"]
|
||||
if hierarchy:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = "/".join(hierarchy)
|
||||
|
||||
template_data = {"root": api.Session["AVALON_PROJECTS"],
|
||||
"project": {"name": project_name,
|
||||
"code": project['data']['code']},
|
||||
"silo": silo,
|
||||
"family": instance.data['family'],
|
||||
"asset": asset_name,
|
||||
"subset": subset_name,
|
||||
"frame": ('#' * padding),
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy,
|
||||
"representation": "TEMP"}
|
||||
template_data = {
|
||||
"root": api.Session["AVALON_PROJECTS"],
|
||||
"project": {"name": project_name, "code": project["data"]["code"]},
|
||||
"silo": silo,
|
||||
"family": instance.data["family"],
|
||||
"asset": asset_name,
|
||||
"subset": subset_name,
|
||||
"frame": ("#" * padding),
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy,
|
||||
"representation": "TEMP",
|
||||
}
|
||||
|
||||
instance.data["assumedTemplateData"] = template_data
|
||||
self.log.info(template_data)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import pyblish.api
|
||||
|
||||
import avalon.io as io
|
||||
from avalon import io
|
||||
|
||||
import pype.api
|
||||
import pype.maya.action
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
import avalon.io as io
|
||||
from avalon import io
|
||||
import pype.maya.action
|
||||
|
||||
from pype.maya import lib
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import nuke
|
||||
import os
|
||||
import pyblish.api
|
||||
import avalon.io as io
|
||||
from avalon import io
|
||||
# TODO: add repair function
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,16 +1,50 @@
|
|||
from avalon.nuke.pipeline import Creator
|
||||
|
||||
from avalon.nuke import lib as anlib
|
||||
import nuke
|
||||
|
||||
class CreateBackdrop(Creator):
|
||||
"""Add Publishable Backdrop"""
|
||||
|
||||
name = "backdrop"
|
||||
label = "Backdrop"
|
||||
family = "group"
|
||||
icon = "cube"
|
||||
name = "nukenodes"
|
||||
label = "Create Backdrop"
|
||||
family = "nukenodes"
|
||||
icon = "file-archive-o"
|
||||
defaults = ["Main"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateBackdrop, self).__init__(*args, **kwargs)
|
||||
|
||||
self.nodes = nuke.selectedNodes()
|
||||
self.node_color = "0xdfea5dff"
|
||||
return
|
||||
|
||||
def process(self):
|
||||
from nukescripts import autoBackdrop
|
||||
nodes = list()
|
||||
if (self.options or {}).get("useSelection"):
|
||||
nodes = self.nodes
|
||||
|
||||
if len(nodes) >= 1:
|
||||
anlib.select_nodes(nodes)
|
||||
bckd_node = autoBackdrop()
|
||||
bckd_node["name"].setValue("{}_BDN".format(self.name))
|
||||
bckd_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
bckd_node["note_font_size"].setValue(24)
|
||||
bckd_node["label"].setValue("[{}]".format(self.name))
|
||||
# add avalon knobs
|
||||
instance = anlib.imprint(bckd_node, self.data)
|
||||
|
||||
return instance
|
||||
else:
|
||||
nuke.message("Please select nodes you "
|
||||
"wish to add to a container")
|
||||
return
|
||||
else:
|
||||
bckd_node = autoBackdrop()
|
||||
bckd_node["name"].setValue("{}_BDN".format(self.name))
|
||||
bckd_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
bckd_node["note_font_size"].setValue(24)
|
||||
bckd_node["label"].setValue("[{}]".format(self.name))
|
||||
# add avalon knobs
|
||||
instance = anlib.imprint(bckd_node, self.data)
|
||||
|
||||
return instance
|
||||
|
|
|
|||
79
pype/plugins/nuke/create/create_gizmo.py
Normal file
79
pype/plugins/nuke/create/create_gizmo.py
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
from avalon.nuke.pipeline import Creator
|
||||
from avalon.nuke import lib as anlib
|
||||
import nuke
|
||||
import nukescripts
|
||||
|
||||
class CreateGizmo(Creator):
|
||||
"""Add Publishable "gizmo" group
|
||||
|
||||
The name is symbolically gizmo as presumably
|
||||
it is something familiar to nuke users as group of nodes
|
||||
distributed downstream in workflow
|
||||
"""
|
||||
|
||||
name = "gizmo"
|
||||
label = "Gizmo"
|
||||
family = "gizmo"
|
||||
icon = "file-archive-o"
|
||||
defaults = ["ViewerInput", "Lut", "Effect"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateGizmo, self).__init__(*args, **kwargs)
|
||||
self.nodes = nuke.selectedNodes()
|
||||
self.node_color = "0x7533c1ff"
|
||||
return
|
||||
|
||||
def process(self):
|
||||
if (self.options or {}).get("useSelection"):
|
||||
nodes = self.nodes
|
||||
self.log.info(len(nodes))
|
||||
if len(nodes) == 1:
|
||||
anlib.select_nodes(nodes)
|
||||
node = nodes[-1]
|
||||
# check if Group node
|
||||
if node.Class() in "Group":
|
||||
node["name"].setValue("{}_GZM".format(self.name))
|
||||
node["tile_color"].setValue(int(self.node_color, 16))
|
||||
return anlib.imprint(node, self.data)
|
||||
else:
|
||||
nuke.message("Please select a group node "
|
||||
"you wish to publish as the gizmo")
|
||||
|
||||
if len(nodes) >= 2:
|
||||
anlib.select_nodes(nodes)
|
||||
nuke.makeGroup()
|
||||
gizmo_node = nuke.selectedNode()
|
||||
gizmo_node["name"].setValue("{}_GZM".format(self.name))
|
||||
gizmo_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
# add sticky node wit guide
|
||||
with gizmo_node:
|
||||
sticky = nuke.createNode("StickyNote")
|
||||
sticky["label"].setValue(
|
||||
"Add following:\n- set Input"
|
||||
" nodes\n- set one Output1\n"
|
||||
"- create User knobs on the group")
|
||||
|
||||
# add avalon knobs
|
||||
return anlib.imprint(gizmo_node, self.data)
|
||||
|
||||
else:
|
||||
nuke.message("Please select nodes you "
|
||||
"wish to add to the gizmo")
|
||||
return
|
||||
else:
|
||||
with anlib.maintained_selection():
|
||||
gizmo_node = nuke.createNode("Group")
|
||||
gizmo_node["name"].setValue("{}_GZM".format(self.name))
|
||||
gizmo_node["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
# add sticky node wit guide
|
||||
with gizmo_node:
|
||||
sticky = nuke.createNode("StickyNote")
|
||||
sticky["label"].setValue(
|
||||
"Add following:\n- add Input"
|
||||
" nodes\n- add one Output1\n"
|
||||
"- create User knobs on the group")
|
||||
|
||||
# add avalon knobs
|
||||
return anlib.imprint(gizmo_node, self.data)
|
||||
|
|
@ -24,8 +24,6 @@ class CreateWriteRender(plugin.PypeCreator):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(CreateWriteRender, self).__init__(*args, **kwargs)
|
||||
|
||||
self.name = self.data["subset"]
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
data["family"] = self.family
|
||||
|
|
|
|||
239
pype/plugins/nuke/load/load_gizmo_ip.py
Normal file
239
pype/plugins/nuke/load/load_gizmo_ip.py
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
from pype.nuke import lib as pnlib
|
||||
from avalon.nuke import lib as anlib
|
||||
from avalon.nuke import containerise, update_container
|
||||
|
||||
|
||||
class LoadGizmoInputProcess(api.Loader):
|
||||
"""Loading colorspace soft effect exported from nukestudio"""
|
||||
|
||||
representations = ["gizmo"]
|
||||
families = ["gizmo"]
|
||||
|
||||
label = "Load Gizmo - Input Process"
|
||||
order = 0
|
||||
icon = "eye"
|
||||
color = style.colors.alert
|
||||
node_color = "0x7533c1ff"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Loading function to get Gizmo as Input Process on viewer
|
||||
|
||||
Arguments:
|
||||
context (dict): context of version
|
||||
name (str): name of the version
|
||||
namespace (str): asset name
|
||||
data (dict): compulsory attribute > not used
|
||||
|
||||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
"""
|
||||
|
||||
# get main variables
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
namespace = namespace or context['asset']['name']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
# prepare data for imprinting
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
|
||||
data_imprint = {"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# getting file path
|
||||
file = self.fname.replace("\\", "/")
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with anlib.maintained_selection():
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
|
||||
GN["name"].setValue(object_name)
|
||||
|
||||
# try to place it under Viewer1
|
||||
if not self.connect_active_viewer(GN):
|
||||
nuke.delete(GN)
|
||||
return
|
||||
|
||||
return containerise(
|
||||
node=GN,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Nuke automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
# get main variables
|
||||
# Get version from io
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
# get corresponding node
|
||||
GN = nuke.toNode(container['objectName'])
|
||||
|
||||
file = api.get_representation_path(representation).replace("\\", "/")
|
||||
context = representation["context"]
|
||||
name = container['name']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
namespace = container['namespace']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
|
||||
data_imprint = {"representation": str(representation["_id"]),
|
||||
"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
with anlib.maintained_selection():
|
||||
xpos = GN.xpos()
|
||||
ypos = GN.ypos()
|
||||
avalon_data = anlib.get_avalon_knob_data(GN)
|
||||
nuke.delete(GN)
|
||||
# add group from nk
|
||||
nuke.nodePaste(file)
|
||||
|
||||
GN = nuke.selectedNode()
|
||||
anlib.set_avalon_knob_data(GN, avalon_data)
|
||||
GN.setXYpos(xpos, ypos)
|
||||
GN["name"].setValue(object_name)
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
# change color of node
|
||||
if version.get("name") not in [max_version]:
|
||||
GN["tile_color"].setValue(int("0xd88467ff", 16))
|
||||
else:
|
||||
GN["tile_color"].setValue(int(self.node_color, 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
return update_container(GN, data_imprint)
|
||||
|
||||
def connect_active_viewer(self, group_node):
|
||||
"""
|
||||
Finds Active viewer and
|
||||
place the node under it, also adds
|
||||
name of group into Input Process of the viewer
|
||||
|
||||
Arguments:
|
||||
group_node (nuke node): nuke group node object
|
||||
|
||||
"""
|
||||
group_node_name = group_node["name"].value()
|
||||
|
||||
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
|
||||
if len(viewer) > 0:
|
||||
viewer = viewer[0]
|
||||
else:
|
||||
self.log.error("Please create Viewer node before you "
|
||||
"run this action again")
|
||||
return None
|
||||
|
||||
# get coordinates of Viewer1
|
||||
xpos = viewer["xpos"].value()
|
||||
ypos = viewer["ypos"].value()
|
||||
|
||||
ypos += 150
|
||||
|
||||
viewer["ypos"].setValue(ypos)
|
||||
|
||||
# set coordinates to group node
|
||||
group_node["xpos"].setValue(xpos)
|
||||
group_node["ypos"].setValue(ypos + 50)
|
||||
|
||||
# add group node name to Viewer Input Process
|
||||
viewer["input_process_node"].setValue(group_node_name)
|
||||
|
||||
# put backdrop under
|
||||
pnlib.create_backdrop(label="Input Process", layer=2,
|
||||
nodes=[viewer, group_node], color="0x7c7faaff")
|
||||
|
||||
return True
|
||||
|
||||
def get_item(self, data, trackIndex, subTrackIndex):
|
||||
return {key: val for key, val in data.items()
|
||||
if subTrackIndex == val["subTrackIndex"]
|
||||
if trackIndex == val["trackIndex"]}
|
||||
|
||||
def byteify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes trought all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
||||
Returns:
|
||||
dict: with fixed values and keys
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(input, dict):
|
||||
return {self.byteify(key): self.byteify(value)
|
||||
for key, value in input.iteritems()}
|
||||
elif isinstance(input, list):
|
||||
return [self.byteify(element) for element in input]
|
||||
elif isinstance(input, unicode):
|
||||
return input.encode('utf-8')
|
||||
else:
|
||||
return input
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
@ -1,9 +1,6 @@
|
|||
import os
|
||||
import contextlib
|
||||
|
||||
from avalon import api
|
||||
import avalon.io as io
|
||||
|
||||
from avalon import api, io
|
||||
|
||||
import nuke
|
||||
|
||||
|
|
@ -102,7 +99,7 @@ class LoadMov(api.Loader):
|
|||
handle_start = version_data.get("handleStart", None)
|
||||
handle_end = version_data.get("handleEnd", None)
|
||||
repr_cont = context["representation"]["context"]
|
||||
|
||||
|
||||
# fix handle start and end if none are available
|
||||
if not handle_start and not handle_end:
|
||||
handle_start = handles
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class LinkAsGroup(api.Loader):
|
|||
"""Copy the published file to be pasted at the desired location"""
|
||||
|
||||
representations = ["nk"]
|
||||
families = ["workfile"]
|
||||
families = ["workfile", "nukenodes"]
|
||||
|
||||
label = "Load Precomp"
|
||||
order = 0
|
||||
|
|
@ -63,8 +63,6 @@ class LinkAsGroup(api.Loader):
|
|||
colorspace = context["version"]["data"].get("colorspace", None)
|
||||
self.log.info("colorspace: {}\n".format(colorspace))
|
||||
|
||||
# ['version', 'file', 'reading', 'output', 'useOutput']
|
||||
|
||||
P["name"].setValue("{}_{}".format(name, namespace))
|
||||
P["useOutput"].setValue(True)
|
||||
|
||||
|
|
@ -74,14 +72,15 @@ class LinkAsGroup(api.Loader):
|
|||
if n.Class() == "Group"
|
||||
if get_avalon_knob_data(n)]
|
||||
|
||||
# create panel for selecting output
|
||||
panel_choices = " ".join(writes)
|
||||
panel_label = "Select write node for output"
|
||||
p = nuke.Panel("Select Write Node")
|
||||
p.addEnumerationPulldown(
|
||||
panel_label, panel_choices)
|
||||
p.show()
|
||||
P["output"].setValue(p.value(panel_label))
|
||||
if writes:
|
||||
# create panel for selecting output
|
||||
panel_choices = " ".join(writes)
|
||||
panel_label = "Select write node for output"
|
||||
p = nuke.Panel("Select Write Node")
|
||||
p.addEnumerationPulldown(
|
||||
panel_label, panel_choices)
|
||||
p.show()
|
||||
P["output"].setValue(p.value(panel_label))
|
||||
|
||||
P["tile_color"].setValue(0xff0ff0ff)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,6 @@
|
|||
import os
|
||||
import contextlib
|
||||
|
||||
from avalon import api
|
||||
import avalon.io as io
|
||||
|
||||
from avalon import api, io
|
||||
|
||||
import nuke
|
||||
|
||||
|
|
|
|||
83
pype/plugins/nuke/publish/collect_backdrop.py
Normal file
83
pype/plugins/nuke/publish/collect_backdrop.py
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
import pyblish.api
|
||||
import pype.api as pype
|
||||
from pype.nuke import lib as pnlib
|
||||
import nuke
|
||||
|
||||
@pyblish.api.log
|
||||
class CollectBackdrops(pyblish.api.InstancePlugin):
|
||||
"""Collect Backdrop node instance and its content
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.22
|
||||
label = "Collect Backdrop"
|
||||
hosts = ["nuke"]
|
||||
families = ["nukenodes"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
bckn = instance[0]
|
||||
|
||||
# define size of the backdrop
|
||||
left = bckn.xpos()
|
||||
top = bckn.ypos()
|
||||
right = left + bckn['bdwidth'].value()
|
||||
bottom = top + bckn['bdheight'].value()
|
||||
|
||||
# iterate all nodes
|
||||
for node in nuke.allNodes():
|
||||
|
||||
# exclude viewer
|
||||
if node.Class() == "Viewer":
|
||||
continue
|
||||
|
||||
# find all related nodes
|
||||
if (node.xpos() > left) \
|
||||
and (node.xpos() + node.screenWidth() < right) \
|
||||
and (node.ypos() > top) \
|
||||
and (node.ypos() + node.screenHeight() < bottom):
|
||||
|
||||
# add contained nodes to instance's node list
|
||||
instance.append(node)
|
||||
|
||||
# get all connections from outside of backdrop
|
||||
nodes = instance[1:]
|
||||
connections_in, connections_out = pnlib.get_dependent_nodes(nodes)
|
||||
instance.data["connections_in"] = connections_in
|
||||
instance.data["connections_out"] = connections_out
|
||||
|
||||
# make label nicer
|
||||
instance.data["label"] = "{0} ({1} nodes)".format(
|
||||
bckn.name(), len(instance)-1)
|
||||
|
||||
instance.data["families"].append(instance.data["family"])
|
||||
|
||||
# Get frame range
|
||||
handle_start = instance.context.data["handleStart"]
|
||||
handle_end = instance.context.data["handleEnd"]
|
||||
first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
last_frame = int(nuke.root()["last_frame"].getValue())
|
||||
|
||||
# get version
|
||||
version = pype.get_version_from_path(nuke.root().name())
|
||||
instance.data['version'] = version
|
||||
|
||||
# Add version data to instance
|
||||
version_data = {
|
||||
"handles": handle_start,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": first_frame + handle_start,
|
||||
"frameEnd": last_frame - handle_end,
|
||||
"version": int(version),
|
||||
"families": [instance.data["family"]] + instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.context.data["fps"]
|
||||
}
|
||||
|
||||
instance.data.update({
|
||||
"versionData": version_data,
|
||||
"frameStart": first_frame,
|
||||
"frameEnd": last_frame
|
||||
})
|
||||
self.log.info("Backdrop content collected: `{}`".format(instance[:]))
|
||||
self.log.info("Backdrop instance collected: `{}`".format(instance))
|
||||
56
pype/plugins/nuke/publish/collect_gizmo.py
Normal file
56
pype/plugins/nuke/publish/collect_gizmo.py
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
import pyblish.api
|
||||
import pype.api as pype
|
||||
import nuke
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class CollectGizmo(pyblish.api.InstancePlugin):
|
||||
"""Collect Gizmo (group) node instance and its content
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.22
|
||||
label = "Collect Gizmo (Group)"
|
||||
hosts = ["nuke"]
|
||||
families = ["gizmo"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
grpn = instance[0]
|
||||
|
||||
# add family to familiess
|
||||
instance.data["families"].insert(0, instance.data["family"])
|
||||
# make label nicer
|
||||
instance.data["label"] = "{0} ({1} nodes)".format(
|
||||
grpn.name(), len(instance) - 1)
|
||||
|
||||
# Get frame range
|
||||
handle_start = instance.context.data["handleStart"]
|
||||
handle_end = instance.context.data["handleEnd"]
|
||||
first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
last_frame = int(nuke.root()["last_frame"].getValue())
|
||||
|
||||
# get version
|
||||
version = pype.get_version_from_path(nuke.root().name())
|
||||
instance.data['version'] = version
|
||||
|
||||
# Add version data to instance
|
||||
version_data = {
|
||||
"handles": handle_start,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": first_frame + handle_start,
|
||||
"frameEnd": last_frame - handle_end,
|
||||
"colorspace": nuke.root().knob('workingSpaceLUT').value(),
|
||||
"version": int(version),
|
||||
"families": [instance.data["family"]] + instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.context.data["fps"]
|
||||
}
|
||||
|
||||
instance.data.update({
|
||||
"versionData": version_data,
|
||||
"frameStart": first_frame,
|
||||
"frameEnd": last_frame
|
||||
})
|
||||
self.log.info("Gizmo content collected: `{}`".format(instance[:]))
|
||||
self.log.info("Gizmo instance collected: `{}`".format(instance))
|
||||
|
|
@ -21,7 +21,6 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
self.log.debug("asset_data: {}".format(asset_data["data"]))
|
||||
instances = []
|
||||
# creating instances per write node
|
||||
|
||||
self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes()))
|
||||
for node in nuke.allNodes():
|
||||
|
|
@ -45,6 +44,14 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
if avalon_knob_data["id"] != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
# establish families
|
||||
family = avalon_knob_data["family"]
|
||||
families = list()
|
||||
|
||||
# except disabled nodes but exclude backdrops in test
|
||||
if ("nukenodes" not in family) and (node["disable"].value()):
|
||||
continue
|
||||
|
||||
subset = avalon_knob_data.get(
|
||||
"subset", None) or node["name"].value()
|
||||
|
||||
|
|
@ -54,6 +61,23 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# Add all nodes in group instances.
|
||||
if node.Class() == "Group":
|
||||
# only alter families for render family
|
||||
if ("render" in family):
|
||||
# check if node is not disabled
|
||||
families.append(avalon_knob_data["families"])
|
||||
if node["render"].value():
|
||||
self.log.info("flagged for render")
|
||||
add_family = "render.local"
|
||||
# dealing with local/farm rendering
|
||||
if node["render_farm"].value():
|
||||
self.log.info("adding render farm family")
|
||||
add_family = "render.farm"
|
||||
instance.data["transfer"] = False
|
||||
families.append(add_family)
|
||||
else:
|
||||
# add family into families
|
||||
families.insert(0, family)
|
||||
|
||||
node.begin()
|
||||
for i in nuke.allNodes():
|
||||
instance.append(i)
|
||||
|
|
@ -61,7 +85,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
family = avalon_knob_data["family"]
|
||||
families = [avalon_knob_data["families"]]
|
||||
|
||||
|
||||
if node.Class() not in "Read":
|
||||
if node["render"].value():
|
||||
self.log.info("flagged for render")
|
||||
|
|
|
|||
|
|
@ -24,7 +24,8 @@ class CollectWriteLegacy(pyblish.api.InstancePlugin):
|
|||
self.log.info("render")
|
||||
return
|
||||
|
||||
instance.data.update(
|
||||
{"family": "write.legacy",
|
||||
"families": []}
|
||||
)
|
||||
if "render" in node.knobs():
|
||||
instance.data.update(
|
||||
{"family": "write.legacy",
|
||||
"families": []}
|
||||
)
|
||||
|
|
|
|||
103
pype/plugins/nuke/publish/extract_backdrop.py
Normal file
103
pype/plugins/nuke/publish/extract_backdrop.py
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
import pyblish.api
|
||||
from avalon.nuke import lib as anlib
|
||||
from pype.nuke import lib as pnlib
|
||||
import nuke
|
||||
import os
|
||||
import pype
|
||||
reload(pnlib)
|
||||
|
||||
class ExtractBackdropNode(pype.api.Extractor):
|
||||
"""Extracting content of backdrop nodes
|
||||
|
||||
Will create nuke script only with containing nodes.
|
||||
Also it will solve Input and Output nodes.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Backdrop"
|
||||
hosts = ["nuke"]
|
||||
families = ["nukenodes"]
|
||||
|
||||
def process(self, instance):
|
||||
tmp_nodes = list()
|
||||
nodes = instance[1:]
|
||||
# Define extract output file path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{0}.nk".format(instance.name)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
# maintain selection
|
||||
with anlib.maintained_selection():
|
||||
# all connections outside of backdrop
|
||||
connections_in = instance.data["connections_in"]
|
||||
connections_out = instance.data["connections_out"]
|
||||
self.log.debug("_ connections_in: `{}`".format(connections_in))
|
||||
self.log.debug("_ connections_out: `{}`".format(connections_out))
|
||||
|
||||
# create input nodes and name them as passing node (*_INP)
|
||||
for n, inputs in connections_in.items():
|
||||
for i, input in inputs:
|
||||
inpn = nuke.createNode("Input")
|
||||
inpn["name"].setValue("{}_{}_INP".format(n.name(), i))
|
||||
n.setInput(i, inpn)
|
||||
inpn.setXYpos(input.xpos(), input.ypos())
|
||||
nodes.append(inpn)
|
||||
tmp_nodes.append(inpn)
|
||||
|
||||
anlib.reset_selection()
|
||||
|
||||
# connect output node
|
||||
for n, output in connections_out.items():
|
||||
opn = nuke.createNode("Output")
|
||||
self.log.info(n.name())
|
||||
self.log.info(output.name())
|
||||
output.setInput(
|
||||
next((i for i, d in enumerate(output.dependencies())
|
||||
if d.name() in n.name()), 0), opn)
|
||||
opn.setInput(0, n)
|
||||
opn.autoplace()
|
||||
nodes.append(opn)
|
||||
tmp_nodes.append(opn)
|
||||
anlib.reset_selection()
|
||||
|
||||
# select nodes to copy
|
||||
anlib.reset_selection()
|
||||
anlib.select_nodes(nodes)
|
||||
# create tmp nk file
|
||||
# save file to the path
|
||||
nuke.nodeCopy(path)
|
||||
|
||||
# Clean up
|
||||
for tn in tmp_nodes:
|
||||
nuke.delete(tn)
|
||||
|
||||
# restore original connections
|
||||
# reconnect input node
|
||||
for n, inputs in connections_in.items():
|
||||
for i, input in inputs:
|
||||
n.setInput(i, input)
|
||||
|
||||
# reconnect output node
|
||||
for n, output in connections_out.items():
|
||||
output.setInput(
|
||||
next((i for i, d in enumerate(output.dependencies())
|
||||
if d.name() in n.name()), 0), n)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
# create representation
|
||||
representation = {
|
||||
'name': 'nk',
|
||||
'ext': 'nk',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '{}' to: {}".format(
|
||||
instance.name, path))
|
||||
|
||||
self.log.info("Data {}".format(
|
||||
instance.data))
|
||||
95
pype/plugins/nuke/publish/extract_gizmo.py
Normal file
95
pype/plugins/nuke/publish/extract_gizmo.py
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
import pyblish.api
|
||||
from avalon.nuke import lib as anlib
|
||||
from pype.nuke import lib as pnlib
|
||||
from pype.nuke import utils as pnutils
|
||||
import nuke
|
||||
import os
|
||||
import pype
|
||||
|
||||
|
||||
class ExtractGizmo(pype.api.Extractor):
|
||||
"""Extracting Gizmo (Group) node
|
||||
|
||||
Will create nuke script only with the Gizmo node.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Gizmo (Group)"
|
||||
hosts = ["nuke"]
|
||||
families = ["gizmo"]
|
||||
|
||||
def process(self, instance):
|
||||
tmp_nodes = list()
|
||||
orig_grpn = instance[0]
|
||||
# Define extract output file path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{0}.nk".format(instance.name)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
# maintain selection
|
||||
with anlib.maintained_selection():
|
||||
orig_grpn_name = orig_grpn.name()
|
||||
tmp_grpn_name = orig_grpn_name + "_tmp"
|
||||
# select original group node
|
||||
anlib.select_nodes([orig_grpn])
|
||||
|
||||
# copy to clipboard
|
||||
nuke.nodeCopy("%clipboard%")
|
||||
|
||||
# reset selection to none
|
||||
anlib.reset_selection()
|
||||
|
||||
# paste clipboard
|
||||
nuke.nodePaste("%clipboard%")
|
||||
|
||||
# assign pasted node
|
||||
copy_grpn = nuke.selectedNode()
|
||||
copy_grpn.setXYpos((orig_grpn.xpos() + 120), orig_grpn.ypos())
|
||||
|
||||
# convert gizmos to groups
|
||||
pnutils.bake_gizmos_recursively(copy_grpn)
|
||||
|
||||
# remove avalonknobs
|
||||
knobs = copy_grpn.knobs()
|
||||
avalon_knobs = [k for k in knobs.keys()
|
||||
for ak in ["avalon:", "ak:"]
|
||||
if ak in k]
|
||||
avalon_knobs.append("publish")
|
||||
for ak in avalon_knobs:
|
||||
copy_grpn.removeKnob(knobs[ak])
|
||||
|
||||
# add to temporary nodes
|
||||
tmp_nodes.append(copy_grpn)
|
||||
|
||||
# swap names
|
||||
orig_grpn.setName(tmp_grpn_name)
|
||||
copy_grpn.setName(orig_grpn_name)
|
||||
|
||||
# create tmp nk file
|
||||
# save file to the path
|
||||
nuke.nodeCopy(path)
|
||||
|
||||
# Clean up
|
||||
for tn in tmp_nodes:
|
||||
nuke.delete(tn)
|
||||
|
||||
# rename back to original
|
||||
orig_grpn.setName(orig_grpn_name)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
# create representation
|
||||
representation = {
|
||||
'name': 'gizmo',
|
||||
'ext': 'nk',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '{}' to: {}".format(
|
||||
instance.name, path))
|
||||
|
||||
self.log.info("Data {}".format(
|
||||
instance.data))
|
||||
69
pype/plugins/nuke/publish/validate_backdrop.py
Normal file
69
pype/plugins/nuke/publish/validate_backdrop.py
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
import pyblish
|
||||
from avalon.nuke import lib as anlib
|
||||
import nuke
|
||||
|
||||
|
||||
class SelectCenterInNodeGraph(pyblish.api.Action):
|
||||
"""
|
||||
Centering failed instance node in node grap
|
||||
"""
|
||||
|
||||
label = "Center node in node graph"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (result["error"] is not None and result["instance"] is not None
|
||||
and result["instance"] not in failed):
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
all_xC = list()
|
||||
all_yC = list()
|
||||
|
||||
# maintain selection
|
||||
with anlib.maintained_selection():
|
||||
# collect all failed nodes xpos and ypos
|
||||
for instance in instances:
|
||||
bdn = instance[0]
|
||||
xC = bdn.xpos() + bdn.screenWidth()/2
|
||||
yC = bdn.ypos() + bdn.screenHeight()/2
|
||||
|
||||
all_xC.append(xC)
|
||||
all_yC.append(yC)
|
||||
|
||||
self.log.info("all_xC: `{}`".format(all_xC))
|
||||
self.log.info("all_yC: `{}`".format(all_yC))
|
||||
|
||||
# zoom to nodes in node graph
|
||||
nuke.zoom(2, [min(all_xC), min(all_yC)])
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class ValidateBackdrop(pyblish.api.InstancePlugin):
|
||||
"""Validate amount of nodes on backdrop node in case user
|
||||
forgoten to add nodes above the publishing backdrop node"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
families = ["nukenodes"]
|
||||
label = "Validate Backdrop"
|
||||
hosts = ["nuke"]
|
||||
actions = [SelectCenterInNodeGraph]
|
||||
|
||||
def process(self, instance):
|
||||
connections_out = instance.data["connections_out"]
|
||||
|
||||
msg_multiple_outputs = "Only one outcoming connection from \"{}\" is allowed".format(
|
||||
instance.data["name"])
|
||||
assert len(connections_out.keys()) <= 1, msg_multiple_outputs
|
||||
|
||||
msg_no_content = "No content on backdrop node: \"{}\"".format(
|
||||
instance.data["name"])
|
||||
assert len(instance) > 1, msg_no_content
|
||||
58
pype/plugins/nuke/publish/validate_gizmo.py
Normal file
58
pype/plugins/nuke/publish/validate_gizmo.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
import pyblish
|
||||
from avalon.nuke import lib as anlib
|
||||
import nuke
|
||||
|
||||
|
||||
class OpenFailedGroupNode(pyblish.api.Action):
|
||||
"""
|
||||
Centering failed instance node in node grap
|
||||
"""
|
||||
|
||||
label = "Open Gizmo in Node Graph"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (result["error"] is not None and result["instance"] is not None
|
||||
and result["instance"] not in failed):
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
# maintain selection
|
||||
with anlib.maintained_selection():
|
||||
# collect all failed nodes xpos and ypos
|
||||
for instance in instances:
|
||||
grpn = instance[0]
|
||||
nuke.showDag(grpn)
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class ValidateGizmo(pyblish.api.InstancePlugin):
|
||||
"""Validate amount of output nodes in gizmo (group) node"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
optional = True
|
||||
families = ["gizmo"]
|
||||
label = "Validate Gizmo (Group)"
|
||||
hosts = ["nuke"]
|
||||
actions = [OpenFailedGroupNode]
|
||||
|
||||
def process(self, instance):
|
||||
grpn = instance[0]
|
||||
|
||||
with grpn:
|
||||
connections_out = nuke.allNodes('Output')
|
||||
msg_multiple_outputs = "Only one outcoming connection from "
|
||||
"\"{}\" is allowed".format(instance.data["name"])
|
||||
assert len(connections_out) <= 1, msg_multiple_outputs
|
||||
|
||||
connections_in = nuke.allNodes('Input')
|
||||
msg_missing_inputs = "At least one Input node has to be used in: "
|
||||
"\"{}\"".format(instance.data["name"])
|
||||
assert len(connections_in) >= 1, msg_missing_inputs
|
||||
|
|
@ -4,7 +4,6 @@ from pyblish import api
|
|||
|
||||
import nuke
|
||||
|
||||
|
||||
class CollectClips(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
|
|
@ -31,6 +30,7 @@ class CollectClips(api.ContextPlugin):
|
|||
sub_items = video_track.subTrackItems()
|
||||
|
||||
for item in items:
|
||||
data = dict()
|
||||
# compare with selection or if disabled
|
||||
if item not in selection or not item.isEnabled():
|
||||
continue
|
||||
|
|
@ -83,9 +83,12 @@ class CollectClips(api.ContextPlugin):
|
|||
except Exception:
|
||||
source_first_frame = 0
|
||||
|
||||
data = {"name": "{0}_{1}".format(track.name(), item.name()),
|
||||
data.update({
|
||||
"name": "{0}_{1}".format(track.name(), item.name()),
|
||||
"item": item,
|
||||
"source": source,
|
||||
"timecodeStart": str(source.timecodeStart()),
|
||||
"timelineTimecodeStart": str(sequence.timecodeStart()),
|
||||
"sourcePath": source_path,
|
||||
"track": track.name(),
|
||||
"trackIndex": track_index,
|
||||
|
|
@ -93,19 +96,24 @@ class CollectClips(api.ContextPlugin):
|
|||
"effects": effects,
|
||||
"sourceIn": int(item.sourceIn()),
|
||||
"sourceOut": int(item.sourceOut()),
|
||||
"mediaDuration": (int(item.sourceOut()) -
|
||||
int(item.sourceIn())) + 1,
|
||||
"clipIn": int(item.timelineIn()),
|
||||
"clipOut": int(item.timelineOut()),
|
||||
"clipDuration": (int(item.timelineOut()) -
|
||||
int(item.timelineIn())) + 1,
|
||||
"asset": asset,
|
||||
"family": "clip",
|
||||
"families": [],
|
||||
"handles": 0,
|
||||
"handleStart": projectdata.get("handles", 0),
|
||||
"handleEnd": projectdata.get("handles", 0),
|
||||
"version": int(version)}
|
||||
"version": int(version)})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
self.log.info("Created instance: {}".format(instance))
|
||||
self.log.info("Created instance.data: {}".format(instance.data))
|
||||
self.log.debug(">> effects: {}".format(instance.data["effects"]))
|
||||
|
||||
context.data["assetsShared"][asset] = dict()
|
||||
|
|
|
|||
|
|
@ -234,8 +234,9 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
'stagingDir': staging_dir,
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStart": "%0{}d".format(
|
||||
len(str(frame_end))) % frame_start
|
||||
}
|
||||
instance.data["representations"].append(plates_representation)
|
||||
|
||||
|
|
|
|||
|
|
@ -91,7 +91,9 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
text = today.strftime(date_format)
|
||||
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
|
||||
|
||||
def add_frame_numbers(self, align, options=None, start_frame=None):
|
||||
def add_frame_numbers(
|
||||
self, align, options=None, start_frame=None, text=None
|
||||
):
|
||||
"""
|
||||
Convenience method to create the frame number expression.
|
||||
|
||||
|
|
@ -103,7 +105,12 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if start_frame:
|
||||
options['frame_offset'] = start_frame
|
||||
|
||||
options['expression'] = r'%%{eif\:n+%d\:d}' % options['frame_offset']
|
||||
expr = r'%%{eif\:n+%d\:d}' % options['frame_offset']
|
||||
if text and isinstance(text, str):
|
||||
text = r"{}".format(text)
|
||||
expr = text.replace("{current_frame}", expr)
|
||||
|
||||
options['expression'] = expr
|
||||
text = str(int(self.end_frame + options['frame_offset']))
|
||||
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
|
||||
|
||||
|
|
@ -121,7 +128,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
|
||||
timecode = ffmpeg_burnins._frames_to_timecode(
|
||||
options['frame_offset'],
|
||||
self.frame_rate
|
||||
self.frame_rate
|
||||
)
|
||||
options = options.copy()
|
||||
if not options.get('fps'):
|
||||
|
|
@ -213,13 +220,15 @@ def example(input_path, output_path):
|
|||
burnin.render(output_path, overwrite=True)
|
||||
|
||||
|
||||
def burnins_from_data(input_path, output_path, data, overwrite=True):
|
||||
def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True):
|
||||
'''
|
||||
This method adds burnins to video/image file based on presets setting.
|
||||
Extension of output MUST be same as input. (mov -> mov, avi -> avi,...)
|
||||
|
||||
:param input_path: full path to input file where burnins should be add
|
||||
:type input_path: str
|
||||
:param codec_data: all codec related arguments in list
|
||||
:param codec_data: list
|
||||
:param output_path: full path to output file where output will be rendered
|
||||
:type output_path: str
|
||||
:param data: data required for burnin settings (more info below)
|
||||
|
|
@ -284,8 +293,8 @@ def burnins_from_data(input_path, output_path, data, overwrite=True):
|
|||
|
||||
burnin = ModifiedBurnins(input_path, options_init=options_init)
|
||||
|
||||
start_frame = data.get("start_frame")
|
||||
start_frame_tc = data.get('start_frame_tc', start_frame)
|
||||
frame_start = data.get("frame_start")
|
||||
frame_start_tc = data.get('frame_start_tc', frame_start)
|
||||
for align_text, preset in presets.get('burnins', {}).items():
|
||||
align = None
|
||||
if align_text == 'TOP_LEFT':
|
||||
|
|
@ -311,7 +320,7 @@ def burnins_from_data(input_path, output_path, data, overwrite=True):
|
|||
|
||||
if (
|
||||
bi_func in ['frame_numbers', 'timecode'] and
|
||||
start_frame is None
|
||||
frame_start is None
|
||||
):
|
||||
log.error(
|
||||
'start_frame is not set in entered data!'
|
||||
|
|
@ -320,9 +329,26 @@ def burnins_from_data(input_path, output_path, data, overwrite=True):
|
|||
return
|
||||
|
||||
if bi_func == 'frame_numbers':
|
||||
burnin.add_frame_numbers(align, start_frame=start_frame)
|
||||
current_frame_identifier = "{current_frame}"
|
||||
text = preset.get('text') or current_frame_identifier
|
||||
|
||||
if current_frame_identifier not in text:
|
||||
log.warning((
|
||||
'Text for Frame numbers don\'t have '
|
||||
'`{current_frame}` key in text!'
|
||||
))
|
||||
|
||||
text_items = []
|
||||
split_items = text.split(current_frame_identifier)
|
||||
for item in split_items:
|
||||
text_items.append(item.format(**data))
|
||||
|
||||
text = "{current_frame}".join(text_items)
|
||||
|
||||
burnin.add_frame_numbers(align, start_frame=frame_start, text=text)
|
||||
|
||||
elif bi_func == 'timecode':
|
||||
burnin.add_timecode(align, start_frame=start_frame_tc)
|
||||
burnin.add_timecode(align, start_frame=frame_start_tc)
|
||||
elif bi_func == 'text':
|
||||
if not preset.get('text'):
|
||||
log.error('Text is not set for text function burnin!')
|
||||
|
|
@ -339,11 +365,20 @@ def burnins_from_data(input_path, output_path, data, overwrite=True):
|
|||
)
|
||||
return
|
||||
|
||||
burnin.render(output_path, overwrite=overwrite)
|
||||
codec_args = ''
|
||||
if codec_data is not []:
|
||||
codec_args = " ".join(codec_data)
|
||||
|
||||
burnin.render(output_path, args=codec_args, overwrite=overwrite)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
import json
|
||||
data = json.loads(sys.argv[-1])
|
||||
burnins_from_data(data['input'], data['output'], data['burnin_data'])
|
||||
burnins_from_data(
|
||||
data['input'],
|
||||
data['codec'],
|
||||
data['output'],
|
||||
data['burnin_data']
|
||||
)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import os
|
|||
import logging
|
||||
import subprocess
|
||||
import platform
|
||||
from shutil import which
|
||||
|
||||
handler = logging.basicConfig()
|
||||
log = logging.getLogger("Publish Image Sequences")
|
||||
|
|
@ -35,22 +36,32 @@ def __main__():
|
|||
auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..")
|
||||
auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root
|
||||
|
||||
if kwargs.pype:
|
||||
pype_root = kwargs.pype
|
||||
else:
|
||||
# if pype argument not specified, lets assume it is set in PATH
|
||||
pype_root = ""
|
||||
|
||||
print("Set pype root to: {}".format(pype_root))
|
||||
print("Paths: {}".format(kwargs.paths or [os.getcwd()]))
|
||||
|
||||
paths = kwargs.paths or [os.getcwd()]
|
||||
pype_command = "pype.ps1"
|
||||
if platform.system().lower() == "linux":
|
||||
pype_command = "pype"
|
||||
elif platform.system().lower() == "windows":
|
||||
pype_command = "pype.bat"
|
||||
|
||||
if kwargs.pype:
|
||||
pype_root = kwargs.pype
|
||||
else:
|
||||
# test if pype.bat / pype is in the PATH
|
||||
# if it is, which() will return its path and we use that.
|
||||
# if not, we use auto_pype_root path. Caveat of that one is
|
||||
# that it can be UNC path and that will not work on windows.
|
||||
|
||||
pype_path = which(pype_command)
|
||||
|
||||
if pype_path:
|
||||
pype_root = os.path.dirname(pype_path)
|
||||
else:
|
||||
pype_root = auto_pype_root
|
||||
|
||||
print("Set pype root to: {}".format(pype_root))
|
||||
print("Paths: {}".format(kwargs.paths or [os.getcwd()]))
|
||||
|
||||
paths = kwargs.paths or [os.getcwd()]
|
||||
|
||||
args = [
|
||||
os.path.join(pype_root, pype_command),
|
||||
"publish",
|
||||
|
|
@ -60,9 +71,11 @@ def __main__():
|
|||
print("Pype command: {}".format(" ".join(args)))
|
||||
# Forcing forwaring the environment because environment inheritance does
|
||||
# not always work.
|
||||
exit_code = subprocess.call(args, env=os.environ)
|
||||
# Cast all values in environment to str to be safe
|
||||
env = {k: str(v) for k, v in os.environ.items()}
|
||||
exit_code = subprocess.call(args, env=env)
|
||||
if exit_code != 0:
|
||||
raise ValueError("Publishing failed.")
|
||||
raise RuntimeError("Publishing failed.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
|||
|
|
@ -7,8 +7,7 @@ import copy
|
|||
|
||||
from maya import cmds
|
||||
|
||||
from avalon import api
|
||||
import avalon.io as io
|
||||
from avalon import api, io
|
||||
from avalon.maya.lib import unique_namespace
|
||||
from pype.maya.lib import matrix_equals
|
||||
|
||||
|
|
|
|||
|
|
@ -1,19 +0,0 @@
|
|||
<?xml version="1.0" encoding="iso-8859-1"?>
|
||||
<svg version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 690 200" fill="#777777" style="enable-background:new 0 0 690 200;" xml:space="preserve">
|
||||
<g>
|
||||
<path d="
|
||||
M30,10
|
||||
h630 a20,20 0 0 1 20,20
|
||||
v140 a20,20 0 0 1 -20,20
|
||||
h-630 a20,20 0 0 1 -20,-20
|
||||
v-140 a20,20 0 0 1 20,-20 z
|
||||
|
||||
M17,37
|
||||
v126 a20,20 0 0 0 20,20
|
||||
h616 a20,20 0 0 0 20,-20
|
||||
v-126 a20,20 0 0 0 -20,-20
|
||||
h-616 a20,20 0 0 0 -20,20 z"/>
|
||||
<text style='font-family:Trebuchet MS;font-size:140px;' x="70" y="155" class="small">PREVIEW</text>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 661 B |
|
|
@ -1,19 +0,0 @@
|
|||
<?xml version="1.0" encoding="iso-8859-1"?>
|
||||
<svg version="1.1" id="Capa_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 890 200" fill="#777777" style="enable-background:new 0 0 890 200;" xml:space="preserve">
|
||||
<g>
|
||||
<path d="
|
||||
M30,10
|
||||
h830 a20,20 0 0 1 20,20
|
||||
v140 a20,20 0 0 1 -20,20
|
||||
h-830 a20,20 0 0 1 -20,-20
|
||||
v-140 a20,20 0 0 1 20,-20 z
|
||||
|
||||
M17,37
|
||||
v126 a20,20 0 0 0 20,20
|
||||
h816 a20,20 0 0 0 20,-20
|
||||
v-126 a20,20 0 0 0 -20,-20
|
||||
h-816 a20,20 0 0 0 -20,20 z"/>
|
||||
<text style='font-family:Trebuchet MS;font-size:140px;' x="70" y="155" class="small">THUMBNAIL</text>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 663 B |
|
|
@ -2,7 +2,7 @@ import os
|
|||
from . import QtCore, QtGui, QtWidgets
|
||||
from . import SvgButton
|
||||
from . import get_resource
|
||||
from avalon import style
|
||||
from pypeapp import style
|
||||
|
||||
|
||||
class ComponentItem(QtWidgets.QFrame):
|
||||
|
|
@ -16,6 +16,9 @@ class ComponentItem(QtWidgets.QFrame):
|
|||
signal_preview = QtCore.Signal(object)
|
||||
signal_repre_change = QtCore.Signal(object, object)
|
||||
|
||||
preview_text = "PREVIEW"
|
||||
thumbnail_text = "THUMBNAIL"
|
||||
|
||||
def __init__(self, parent, main_parent):
|
||||
super().__init__()
|
||||
self.has_valid_repre = True
|
||||
|
|
@ -124,17 +127,8 @@ class ComponentItem(QtWidgets.QFrame):
|
|||
|
||||
frame_icons = QtWidgets.QFrame(frame_repre_icons)
|
||||
|
||||
self.preview = SvgButton(
|
||||
get_resource('preview.svg'), 64, 18,
|
||||
[self.C_NORMAL, self.C_HOVER, self.C_ACTIVE, self.C_ACTIVE_HOVER],
|
||||
frame_icons
|
||||
)
|
||||
|
||||
self.thumbnail = SvgButton(
|
||||
get_resource('thumbnail.svg'), 84, 18,
|
||||
[self.C_NORMAL, self.C_HOVER, self.C_ACTIVE, self.C_ACTIVE_HOVER],
|
||||
frame_icons
|
||||
)
|
||||
self.preview = LightingButton(self.preview_text)
|
||||
self.thumbnail = LightingButton(self.thumbnail_text)
|
||||
|
||||
layout = QtWidgets.QHBoxLayout(frame_icons)
|
||||
layout.setSpacing(6)
|
||||
|
|
@ -272,16 +266,16 @@ class ComponentItem(QtWidgets.QFrame):
|
|||
self.signal_repre_change.emit(self, repre_name)
|
||||
|
||||
def is_thumbnail(self):
|
||||
return self.thumbnail.checked
|
||||
return self.thumbnail.isChecked()
|
||||
|
||||
def change_thumbnail(self, hover=True):
|
||||
self.thumbnail.change_checked(hover)
|
||||
self.thumbnail.setChecked(hover)
|
||||
|
||||
def is_preview(self):
|
||||
return self.preview.checked
|
||||
return self.preview.isChecked()
|
||||
|
||||
def change_preview(self, hover=True):
|
||||
self.preview.change_checked(hover)
|
||||
self.preview.setChecked(hover)
|
||||
|
||||
def collect_data(self):
|
||||
in_files = self.in_data['files']
|
||||
|
|
@ -309,3 +303,62 @@ class ComponentItem(QtWidgets.QFrame):
|
|||
data['fps'] = self.in_data['fps']
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class LightingButton(QtWidgets.QPushButton):
|
||||
lightingbtnstyle = """
|
||||
QPushButton {
|
||||
text-align: center;
|
||||
color: #777777;
|
||||
background-color: transparent;
|
||||
border-width: 1px;
|
||||
border-color: #777777;
|
||||
border-style: solid;
|
||||
padding-top: 2px;
|
||||
padding-bottom: 2px;
|
||||
padding-left: 3px;
|
||||
padding-right: 3px;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
QPushButton:hover {
|
||||
border-color: #cccccc;
|
||||
color: #cccccc;
|
||||
}
|
||||
|
||||
QPushButton:pressed {
|
||||
border-color: #ffffff;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
QPushButton:disabled {
|
||||
border-color: #3A3939;
|
||||
color: #3A3939;
|
||||
}
|
||||
|
||||
QPushButton:checked {
|
||||
border-color: #4BB543;
|
||||
color: #4BB543;
|
||||
}
|
||||
|
||||
QPushButton:checked:hover {
|
||||
border-color: #4Bd543;
|
||||
color: #4Bd543;
|
||||
}
|
||||
|
||||
QPushButton:checked:pressed {
|
||||
border-color: #4BF543;
|
||||
color: #4BF543;
|
||||
}
|
||||
"""
|
||||
def __init__(self, text, *args, **kwargs):
|
||||
super().__init__(text, *args, **kwargs)
|
||||
self.setStyleSheet(self.lightingbtnstyle)
|
||||
|
||||
self.setCheckable(True)
|
||||
|
||||
preview_font_metrics = self.fontMetrics().boundingRect(text)
|
||||
width = preview_font_metrics.width() + 16
|
||||
height = preview_font_metrics.height() + 5
|
||||
self.setMaximumWidth(width)
|
||||
self.setMaximumHeight(height)
|
||||
|
|
|
|||
|
|
@ -92,28 +92,32 @@ class DropDataFrame(QtWidgets.QFrame):
|
|||
self._refresh_view()
|
||||
|
||||
def _set_thumbnail(self, in_item):
|
||||
current_state = in_item.is_thumbnail()
|
||||
in_item.change_thumbnail(not current_state)
|
||||
|
||||
checked_item = None
|
||||
for item in self.components_list.widgets():
|
||||
if item.is_thumbnail():
|
||||
checked_item = item
|
||||
break
|
||||
if checked_item is None or checked_item == in_item:
|
||||
in_item.change_thumbnail()
|
||||
else:
|
||||
if checked_item is not None and checked_item != in_item:
|
||||
checked_item.change_thumbnail(False)
|
||||
in_item.change_thumbnail()
|
||||
|
||||
in_item.change_thumbnail(current_state)
|
||||
|
||||
def _set_preview(self, in_item):
|
||||
current_state = in_item.is_preview()
|
||||
in_item.change_preview(not current_state)
|
||||
|
||||
checked_item = None
|
||||
for item in self.components_list.widgets():
|
||||
if item.is_preview():
|
||||
checked_item = item
|
||||
break
|
||||
if checked_item is None or checked_item == in_item:
|
||||
in_item.change_preview()
|
||||
else:
|
||||
if checked_item is not None and checked_item != in_item:
|
||||
checked_item.change_preview(False)
|
||||
in_item.change_preview()
|
||||
|
||||
in_item.change_preview(current_state)
|
||||
|
||||
def _remove_item(self, in_item):
|
||||
valid_repre = in_item.has_valid_repre is True
|
||||
|
|
|
|||
|
|
@ -3,9 +3,8 @@ import contextlib
|
|||
import collections
|
||||
|
||||
from avalon.vendor import qtawesome
|
||||
from avalon.vendor.Qt import QtWidgets, QtCore, QtGui
|
||||
from avalon import io
|
||||
from avalon import style
|
||||
from Qt import QtWidgets, QtCore, QtGui
|
||||
from avalon import style, io
|
||||
|
||||
from .model import (
|
||||
TreeModel,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue