mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into 3.0/refactoring
This commit is contained in:
commit
d44d63cdc0
64 changed files with 1644 additions and 1302 deletions
29
pype/hooks/global/pre_add_last_workfile_arg.py
Normal file
29
pype/hooks/global/pre_add_last_workfile_arg.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
import os
|
||||
from pype.lib import PreLaunchHook
|
||||
|
||||
|
||||
class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
||||
"""Add last workfile path to launch arguments.
|
||||
|
||||
This is not possible to do for all applications the same way.
|
||||
"""
|
||||
|
||||
order = 0
|
||||
app_groups = ["maya", "nuke", "nukex", "hiero", "nukestudio"]
|
||||
|
||||
def execute(self):
|
||||
if not self.data.get("start_last_workfile"):
|
||||
self.log.info("It is set to not start last workfile on start.")
|
||||
return
|
||||
|
||||
last_workfile = self.data.get("last_workfile_path")
|
||||
if not last_workfile:
|
||||
self.log.warning("Last workfile was not collected.")
|
||||
return
|
||||
|
||||
if not os.path.exists(last_workfile):
|
||||
self.log.info("Current context does not have any workfile yet.")
|
||||
return
|
||||
|
||||
# Add path to workfile to arguments
|
||||
self.launch_context.launch_args.append(last_workfile)
|
||||
24
pype/hooks/global/pre_with_windows_shell.py
Normal file
24
pype/hooks/global/pre_with_windows_shell.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
import os
|
||||
from pype.lib import PreLaunchHook
|
||||
|
||||
|
||||
class LaunchWithWindowsShell(PreLaunchHook):
|
||||
"""Add shell command before executable.
|
||||
|
||||
Some hosts have issues when are launched directly from python in that case
|
||||
it is possible to prepend shell executable which will trigger process
|
||||
instead.
|
||||
"""
|
||||
|
||||
order = 10
|
||||
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
|
||||
platforms = ["windows"]
|
||||
|
||||
def execute(self):
|
||||
# Get comspec which is cmd.exe in most cases.
|
||||
comspec = os.environ.get("COMSPEC", "cmd.exe")
|
||||
# Add comspec to arguments list and add "/k"
|
||||
new_args = [comspec, "/c"]
|
||||
new_args.extend(self.launch_context.launch_args)
|
||||
# Replace launch args with new one
|
||||
self.launch_context.launch_args = new_args
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
import os
|
||||
from pype.lib import PreLaunchHook
|
||||
|
||||
|
||||
class HieroLaunchArguments(PreLaunchHook):
|
||||
order = 0
|
||||
app_groups = ["hiero"]
|
||||
|
||||
def execute(self):
|
||||
"""Prepare suprocess launch arguments for Hiero."""
|
||||
# Add path to workfile to arguments
|
||||
if self.data.get("start_last_workfile"):
|
||||
last_workfile = self.data.get("last_workfile_path")
|
||||
if os.path.exists(last_workfile):
|
||||
self.launch_context.launch_args.append(last_workfile)
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
import os
|
||||
from pype.lib import PreLaunchHook
|
||||
|
||||
|
||||
class MayaLaunchArguments(PreLaunchHook):
|
||||
"""Add path to last workfile to launch arguments."""
|
||||
order = 0
|
||||
app_groups = ["maya"]
|
||||
|
||||
def execute(self):
|
||||
"""Prepare suprocess launch arguments for Maya."""
|
||||
# Add path to workfile to arguments
|
||||
if self.data.get("start_last_workfile"):
|
||||
last_workfile = self.data.get("last_workfile_path")
|
||||
if os.path.exists(last_workfile):
|
||||
self.launch_context.launch_args.append(last_workfile)
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
import os
|
||||
from pype.lib import PreLaunchHook
|
||||
|
||||
|
||||
class NukeStudioLaunchArguments(PreLaunchHook):
|
||||
order = 0
|
||||
app_groups = ["nukestudio"]
|
||||
|
||||
def execute(self):
|
||||
"""Prepare suprocess launch arguments for NukeStudio."""
|
||||
# Add path to workfile to arguments
|
||||
if self.data.get("start_last_workfile"):
|
||||
last_workfile = self.data.get("last_workfile_path")
|
||||
if os.path.exists(last_workfile):
|
||||
self.launch_context.launch_args.append(last_workfile)
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
import os
|
||||
from pype.lib import PreLaunchHook
|
||||
|
||||
|
||||
class NukeXLaunchArguments(PreLaunchHook):
|
||||
order = 0
|
||||
app_groups = ["nukex"]
|
||||
|
||||
def execute(self):
|
||||
"""Prepare suprocess launch arguments for NukeX."""
|
||||
# Add path to workfile to arguments
|
||||
if self.data.get("start_last_workfile"):
|
||||
last_workfile = self.data.get("last_workfile_path")
|
||||
if os.path.exists(last_workfile):
|
||||
self.launch_context.launch_args.append(last_workfile)
|
||||
|
|
@ -183,11 +183,11 @@ PypeHarmony.color = function(rgba) {
|
|||
/**
|
||||
* get all dependencies for given node.
|
||||
* @function
|
||||
* @param {string} node node path.
|
||||
* @param {string} _node node path.
|
||||
* @return {array} List of dependent nodes.
|
||||
*/
|
||||
PypeHarmony.getDependencies = function(node) {
|
||||
var target_node = node;
|
||||
PypeHarmony.getDependencies = function(_node) {
|
||||
var target_node = _node;
|
||||
var numInput = node.numberOfInputPorts(target_node);
|
||||
var dependencies = [];
|
||||
for (var i = 0 ; i < numInput; i++) {
|
||||
|
|
|
|||
|
|
@ -104,7 +104,8 @@ __all__ = [
|
|||
"PostLaunchHook",
|
||||
|
||||
"filter_pyblish_plugins",
|
||||
"source_hash",
|
||||
"get_unique_layer_name",
|
||||
"get_background_layers",
|
||||
|
||||
"version_up",
|
||||
"get_version_from_path",
|
||||
|
|
|
|||
|
|
@ -73,14 +73,14 @@ class RenderInstance(object):
|
|||
@frameStart.validator
|
||||
def check_frame_start(self, _, value):
|
||||
"""Validate if frame start is not larger then end."""
|
||||
if value >= self.frameEnd:
|
||||
if value > self.frameEnd:
|
||||
raise ValueError("frameStart must be smaller "
|
||||
"or equal then frameEnd")
|
||||
|
||||
@frameEnd.validator
|
||||
def check_frame_end(self, _, value):
|
||||
"""Validate if frame end is not less then start."""
|
||||
if value <= self.frameStart:
|
||||
if value < self.frameStart:
|
||||
raise ValueError("frameEnd must be smaller "
|
||||
"or equal then frameStart")
|
||||
|
||||
|
|
|
|||
|
|
@ -373,8 +373,12 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"""Plugin entry point."""
|
||||
self._instance = instance
|
||||
context = instance.context
|
||||
self._deadline_url = os.environ.get(
|
||||
"DEADLINE_REST_URL", "http://localhost:8082")
|
||||
self._deadline_url = (
|
||||
context.data["system_settings"]
|
||||
["modules"]
|
||||
["deadline"]
|
||||
["DEADLINE_REST_URL"]
|
||||
)
|
||||
assert self._deadline_url, "Requires DEADLINE_REST_URL"
|
||||
|
||||
file_path = None
|
||||
|
|
|
|||
|
|
@ -248,11 +248,11 @@ class ApplicationTool:
|
|||
class ApplicationExecutable:
|
||||
def __init__(self, executable):
|
||||
default_launch_args = []
|
||||
executable_path = None
|
||||
if isinstance(executable, str):
|
||||
executable_path = executable
|
||||
|
||||
elif isinstance(executable, list):
|
||||
executable_path = None
|
||||
for arg in executable:
|
||||
if arg:
|
||||
if executable_path is None:
|
||||
|
|
@ -711,7 +711,7 @@ class ApplicationLaunchContext:
|
|||
# Execute prelaunch hooks
|
||||
for prelaunch_hook in self.prelaunch_hooks:
|
||||
self.log.debug("Executing prelaunch hook: {}".format(
|
||||
str(prelaunch_hook)
|
||||
str(prelaunch_hook.__class__.__name__)
|
||||
))
|
||||
prelaunch_hook.execute()
|
||||
|
||||
|
|
@ -730,7 +730,7 @@ class ApplicationLaunchContext:
|
|||
# Process post launch hooks
|
||||
for postlaunch_hook in self.postlaunch_hooks:
|
||||
self.log.debug("Executing postlaunch hook: {}".format(
|
||||
str(prelaunch_hook)
|
||||
str(postlaunch_hook.__class__.__name__)
|
||||
))
|
||||
|
||||
# TODO how to handle errors?
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@
|
|||
import os
|
||||
import inspect
|
||||
import logging
|
||||
import re
|
||||
import json
|
||||
|
||||
from pype.settings import get_project_settings
|
||||
|
||||
|
|
@ -78,3 +80,57 @@ def source_hash(filepath, *args):
|
|||
time = str(os.path.getmtime(filepath))
|
||||
size = str(os.path.getsize(filepath))
|
||||
return "|".join([file_name, time, size] + list(args)).replace(".", ",")
|
||||
|
||||
|
||||
def get_unique_layer_name(layers, name):
|
||||
"""
|
||||
Gets all layer names and if 'name' is present in them, increases
|
||||
suffix by 1 (eg. creates unique layer name - for Loader)
|
||||
Args:
|
||||
layers (list): of strings, names only
|
||||
name (string): checked value
|
||||
|
||||
Returns:
|
||||
(string): name_00X (without version)
|
||||
"""
|
||||
names = {}
|
||||
for layer in layers:
|
||||
layer_name = re.sub(r'_\d{3}$', '', layer)
|
||||
if layer_name in names.keys():
|
||||
names[layer_name] = names[layer_name] + 1
|
||||
else:
|
||||
names[layer_name] = 1
|
||||
occurrences = names.get(name, 0)
|
||||
|
||||
return "{}_{:0>3d}".format(name, occurrences + 1)
|
||||
|
||||
|
||||
def get_background_layers(file_url):
|
||||
"""
|
||||
Pulls file name from background json file, enrich with folder url for
|
||||
AE to be able import files.
|
||||
|
||||
Order is important, follows order in json.
|
||||
|
||||
Args:
|
||||
file_url (str): abs url of background json
|
||||
|
||||
Returns:
|
||||
(list): of abs paths to images
|
||||
"""
|
||||
with open(file_url) as json_file:
|
||||
data = json.load(json_file)
|
||||
|
||||
layers = list()
|
||||
bg_folder = os.path.dirname(file_url)
|
||||
for child in data['children']:
|
||||
if child.get("filename"):
|
||||
layers.append(os.path.join(bg_folder, child.get("filename")).
|
||||
replace("\\", "/"))
|
||||
else:
|
||||
for layer in child['children']:
|
||||
if layer.get("filename"):
|
||||
layers.append(os.path.join(bg_folder,
|
||||
layer.get("filename")).
|
||||
replace("\\", "/"))
|
||||
return layers
|
||||
|
|
|
|||
|
|
@ -4,13 +4,31 @@ from pype.modules.websocket_server import WebSocketServer
|
|||
Used anywhere solution is calling client methods.
|
||||
"""
|
||||
import json
|
||||
from collections import namedtuple
|
||||
|
||||
import attr
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s
|
||||
class AEItem(object):
|
||||
"""
|
||||
Object denoting Item in AE. Each item is created in AE by any Loader,
|
||||
but contains same fields, which are being used in later processing.
|
||||
"""
|
||||
# metadata
|
||||
id = attr.ib() # id created by AE, could be used for querying
|
||||
name = attr.ib() # name of item
|
||||
item_type = attr.ib(default=None) # item type (footage, folder, comp)
|
||||
# all imported elements, single for
|
||||
# regular image, array for Backgrounds
|
||||
members = attr.ib(factory=list)
|
||||
workAreaStart = attr.ib(default=None)
|
||||
workAreaDuration = attr.ib(default=None)
|
||||
frameRate = attr.ib(default=None)
|
||||
file_name = attr.ib(default=None)
|
||||
|
||||
|
||||
class AfterEffectsServerStub():
|
||||
"""
|
||||
Stub for calling function on client (Photoshop js) side.
|
||||
|
|
@ -34,22 +52,14 @@ class AfterEffectsServerStub():
|
|||
('AfterEffects.open', path=path)
|
||||
)
|
||||
|
||||
def read(self, layer, layers_meta=None):
|
||||
"""
|
||||
Parses layer metadata from Label field of active document
|
||||
Args:
|
||||
layer: <namedTuple Layer("id":XX, "name":"YYY")
|
||||
layers_meta: full list from Headline (for performance in loops)
|
||||
Returns:
|
||||
"""
|
||||
if layers_meta is None:
|
||||
layers_meta = self.get_metadata()
|
||||
|
||||
return layers_meta.get(str(layer.id))
|
||||
|
||||
def get_metadata(self):
|
||||
"""
|
||||
Get stored JSON with metadata from AE.Metadata.Label field
|
||||
Get complete stored JSON with metadata from AE.Metadata.Label
|
||||
field.
|
||||
|
||||
It contains containers loaded by any Loader OR instances creted
|
||||
by Creator.
|
||||
|
||||
Returns:
|
||||
(dict)
|
||||
"""
|
||||
|
|
@ -57,54 +67,85 @@ class AfterEffectsServerStub():
|
|||
('AfterEffects.get_metadata')
|
||||
)
|
||||
try:
|
||||
layers_data = json.loads(res)
|
||||
metadata = json.loads(res)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ValueError("Unparsable metadata {}".format(res))
|
||||
return layers_data or {}
|
||||
return metadata or []
|
||||
|
||||
def imprint(self, layer, data, all_layers=None, layers_meta=None):
|
||||
def read(self, item, layers_meta=None):
|
||||
"""
|
||||
Save layer metadata to Label field of metadata of active document
|
||||
Parses item metadata from Label field of active document.
|
||||
Used as filter to pick metadata for specific 'item' only.
|
||||
|
||||
Args:
|
||||
layer (namedtuple): Layer("id": XXX, "name":'YYY')
|
||||
item (AEItem): pulled info from AE
|
||||
layers_meta (dict): full list from Headline
|
||||
(load and inject for better performance in loops)
|
||||
Returns:
|
||||
(dict):
|
||||
"""
|
||||
if layers_meta is None:
|
||||
layers_meta = self.get_metadata()
|
||||
|
||||
for item_meta in layers_meta:
|
||||
if 'container' in item_meta.get('id') and \
|
||||
str(item.id) == str(item_meta.get('members')[0]):
|
||||
return item_meta
|
||||
|
||||
log.debug("Couldn't find layer metadata")
|
||||
|
||||
def imprint(self, item, data, all_items=None, items_meta=None):
|
||||
"""
|
||||
Save item metadata to Label field of metadata of active document
|
||||
Args:
|
||||
item (AEItem):
|
||||
data(string): json representation for single layer
|
||||
all_layers (list of namedtuples): for performance, could be
|
||||
all_items (list of item): for performance, could be
|
||||
injected for usage in loop, if not, single call will be
|
||||
triggered
|
||||
layers_meta(string): json representation from Headline
|
||||
items_meta(string): json representation from Headline
|
||||
(for performance - provide only if imprint is in
|
||||
loop - value should be same)
|
||||
Returns: None
|
||||
"""
|
||||
if not layers_meta:
|
||||
layers_meta = self.get_metadata()
|
||||
if not items_meta:
|
||||
items_meta = self.get_metadata()
|
||||
|
||||
# json.dumps writes integer values in a dictionary to string, so
|
||||
# anticipating it here.
|
||||
if str(layer.id) in layers_meta and layers_meta[str(layer.id)]:
|
||||
if data:
|
||||
layers_meta[str(layer.id)].update(data)
|
||||
result_meta = []
|
||||
# fix existing
|
||||
is_new = True
|
||||
|
||||
for item_meta in items_meta:
|
||||
if item_meta.get('members') \
|
||||
and str(item.id) == str(item_meta.get('members')[0]):
|
||||
is_new = False
|
||||
if data:
|
||||
item_meta.update(data)
|
||||
result_meta.append(item_meta)
|
||||
else:
|
||||
layers_meta.pop(str(layer.id))
|
||||
else:
|
||||
layers_meta[str(layer.id)] = data
|
||||
result_meta.append(item_meta)
|
||||
|
||||
if is_new:
|
||||
result_meta.append(data)
|
||||
|
||||
# Ensure only valid ids are stored.
|
||||
if not all_layers:
|
||||
if not all_items:
|
||||
# loaders create FootageItem now
|
||||
all_layers = self.get_items(comps=True,
|
||||
folders=False,
|
||||
footages=True)
|
||||
item_ids = [int(item.id) for item in all_layers]
|
||||
cleaned_data = {}
|
||||
for id in layers_meta:
|
||||
if int(id) in item_ids:
|
||||
cleaned_data[id] = layers_meta[id]
|
||||
all_items = self.get_items(comps=True,
|
||||
folders=True,
|
||||
footages=True)
|
||||
item_ids = [int(item.id) for item in all_items]
|
||||
cleaned_data = []
|
||||
for meta in result_meta:
|
||||
# for creation of instance OR loaded container
|
||||
if 'instance' in meta.get('id') or \
|
||||
int(meta.get('members')[0]) in item_ids:
|
||||
cleaned_data.append(meta)
|
||||
|
||||
payload = json.dumps(cleaned_data, indent=4)
|
||||
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.imprint', payload=payload)
|
||||
)
|
||||
('AfterEffects.imprint', payload=payload))
|
||||
|
||||
def get_active_document_full_name(self):
|
||||
"""
|
||||
|
|
@ -130,8 +171,10 @@ class AfterEffectsServerStub():
|
|||
"""
|
||||
Get all items from Project panel according to arguments.
|
||||
There are multiple different types:
|
||||
CompItem (could have multiple layers - source for Creator)
|
||||
FolderItem (collection type, currently not used
|
||||
CompItem (could have multiple layers - source for Creator,
|
||||
will be rendered)
|
||||
FolderItem (collection type, currently used for Background
|
||||
loading)
|
||||
FootageItem (imported file - created by Loader)
|
||||
Args:
|
||||
comps (bool): return CompItems
|
||||
|
|
@ -218,15 +261,15 @@ class AfterEffectsServerStub():
|
|||
item_id=item.id,
|
||||
item_name=item_name))
|
||||
|
||||
def delete_item(self, item):
|
||||
""" Deletes FootageItem with new file
|
||||
def delete_item(self, item_id):
|
||||
""" Deletes *Item in a file
|
||||
Args:
|
||||
item (dict):
|
||||
item_id (int):
|
||||
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.delete_item',
|
||||
item_id=item.id
|
||||
item_id=item_id
|
||||
))
|
||||
|
||||
def is_saved(self):
|
||||
|
|
@ -340,12 +383,95 @@ class AfterEffectsServerStub():
|
|||
def close(self):
|
||||
self.client.close()
|
||||
|
||||
def import_background(self, comp_id, comp_name, files):
|
||||
"""
|
||||
Imports backgrounds images to existing or new composition.
|
||||
|
||||
If comp_id is not provided, new composition is created, basic
|
||||
values (width, heights, frameRatio) takes from first imported
|
||||
image.
|
||||
|
||||
All images from background json are imported as a FootageItem and
|
||||
separate layer is created for each of them under composition.
|
||||
|
||||
Order of imported 'files' is important.
|
||||
|
||||
Args:
|
||||
comp_id (int): id of existing composition (null if new)
|
||||
comp_name (str): used when new composition
|
||||
files (list): list of absolute paths to import and
|
||||
add as layers
|
||||
|
||||
Returns:
|
||||
(AEItem): object with id of created folder, all imported images
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.import_background',
|
||||
comp_id=comp_id,
|
||||
comp_name=comp_name,
|
||||
files=files))
|
||||
|
||||
records = self._to_records(res)
|
||||
if records:
|
||||
return records.pop()
|
||||
|
||||
log.debug("Import background failed.")
|
||||
|
||||
def reload_background(self, comp_id, comp_name, files):
|
||||
"""
|
||||
Reloads backgrounds images to existing composition.
|
||||
|
||||
It actually deletes complete folder with imported images and
|
||||
created composition for safety.
|
||||
|
||||
Args:
|
||||
comp_id (int): id of existing composition to be overwritten
|
||||
comp_name (str): new name of composition (could be same as old
|
||||
if version up only)
|
||||
files (list): list of absolute paths to import and
|
||||
add as layers
|
||||
Returns:
|
||||
(AEItem): object with id of created folder, all imported images
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.reload_background',
|
||||
comp_id=comp_id,
|
||||
comp_name=comp_name,
|
||||
files=files))
|
||||
|
||||
records = self._to_records(res)
|
||||
if records:
|
||||
return records.pop()
|
||||
|
||||
log.debug("Reload of background failed.")
|
||||
|
||||
def add_item_as_layer(self, comp_id, item_id):
|
||||
"""
|
||||
Adds already imported FootageItem ('item_id') as a new
|
||||
layer to composition ('comp_id').
|
||||
|
||||
Args:
|
||||
comp_id (int): id of target composition
|
||||
item_id (int): FootageItem.id
|
||||
comp already found previously
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.add_item_as_layer',
|
||||
comp_id=comp_id,
|
||||
item_id=item_id))
|
||||
|
||||
records = self._to_records(res)
|
||||
if records:
|
||||
return records.pop()
|
||||
|
||||
log.debug("Adding new layer failed.")
|
||||
|
||||
def _to_records(self, res):
|
||||
"""
|
||||
Converts string json representation into list of named tuples for
|
||||
Converts string json representation into list of AEItem
|
||||
dot notation access to work.
|
||||
Returns: <list of named tuples>
|
||||
res(string): - json representation
|
||||
Returns: <list of AEItem>
|
||||
res(string): - json representation
|
||||
"""
|
||||
if not res:
|
||||
return []
|
||||
|
|
@ -358,9 +484,19 @@ class AfterEffectsServerStub():
|
|||
return []
|
||||
|
||||
ret = []
|
||||
# convert to namedtuple to use dot donation
|
||||
if isinstance(layers_data, dict): # TODO refactore
|
||||
# convert to AEItem to use dot donation
|
||||
if isinstance(layers_data, dict):
|
||||
layers_data = [layers_data]
|
||||
for d in layers_data:
|
||||
ret.append(namedtuple('Layer', d.keys())(*d.values()))
|
||||
# currently implemented and expected fields
|
||||
item = AEItem(d.get('id'),
|
||||
d.get('name'),
|
||||
d.get('type'),
|
||||
d.get('members'),
|
||||
d.get('workAreaStart'),
|
||||
d.get('workAreaDuration'),
|
||||
d.get('frameRate'),
|
||||
d.get('file_name'))
|
||||
|
||||
ret.append(item)
|
||||
return ret
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class CreateRender(api.Creator):
|
|||
if self.name.lower() == item.name.lower():
|
||||
self._show_msg(txt)
|
||||
return False
|
||||
|
||||
self.data["members"] = [item.id]
|
||||
stub.imprint(item, self.data)
|
||||
stub.set_label_color(item.id, 14) # Cyan options 0 - 16
|
||||
stub.rename_item(item, self.data["subset"])
|
||||
|
|
|
|||
99
pype/plugins/aftereffects/load/load_background.py
Normal file
99
pype/plugins/aftereffects/load/load_background.py
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
import re
|
||||
|
||||
from avalon import api, aftereffects
|
||||
|
||||
from pype.lib import get_background_layers, get_unique_layer_name
|
||||
|
||||
stub = aftereffects.stub()
|
||||
|
||||
|
||||
class BackgroundLoader(api.Loader):
|
||||
"""
|
||||
Load images from Background family
|
||||
Creates for each background separate folder with all imported images
|
||||
from background json AND automatically created composition with layers,
|
||||
each layer for separate image.
|
||||
|
||||
For each load container is created and stored in project (.aep)
|
||||
metadata
|
||||
"""
|
||||
families = ["background"]
|
||||
representations = ["json"]
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
items = stub.get_items(comps=True)
|
||||
existing_items = [layer.name for layer in items]
|
||||
|
||||
comp_name = get_unique_layer_name(
|
||||
existing_items,
|
||||
"{}_{}".format(context["asset"]["name"], name))
|
||||
|
||||
layers = get_background_layers(self.fname)
|
||||
comp = stub.import_background(None, comp_name, layers)
|
||||
|
||||
if not comp:
|
||||
self.log.warning(
|
||||
"Import background failed.")
|
||||
self.log.warning("Check host app for alert error.")
|
||||
return
|
||||
|
||||
self[:] = [comp]
|
||||
namespace = namespace or comp_name
|
||||
|
||||
return aftereffects.containerise(
|
||||
name,
|
||||
namespace,
|
||||
comp,
|
||||
context,
|
||||
self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
""" Switch asset or change version """
|
||||
context = representation.get("context", {})
|
||||
_ = container.pop("layer")
|
||||
|
||||
# without iterator number (_001, 002...)
|
||||
namespace_from_container = re.sub(r'_\d{3}$', '',
|
||||
container["namespace"])
|
||||
comp_name = "{}_{}".format(context["asset"], context["subset"])
|
||||
|
||||
# switching assets
|
||||
if namespace_from_container != comp_name:
|
||||
items = stub.get_items(comps=True)
|
||||
existing_items = [layer.name for layer in items]
|
||||
comp_name = get_unique_layer_name(
|
||||
existing_items,
|
||||
"{}_{}".format(context["asset"], context["subset"]))
|
||||
else: # switching version - keep same name
|
||||
comp_name = container["namespace"]
|
||||
|
||||
path = api.get_representation_path(representation)
|
||||
|
||||
layers = get_background_layers(path)
|
||||
comp = stub.reload_background(container["members"][1],
|
||||
comp_name,
|
||||
layers)
|
||||
|
||||
# update container
|
||||
container["representation"] = str(representation["_id"])
|
||||
container["name"] = context["subset"]
|
||||
container["namespace"] = comp_name
|
||||
container["members"] = comp.members
|
||||
|
||||
stub.imprint(comp, container)
|
||||
|
||||
def remove(self, container):
|
||||
"""
|
||||
Removes element from scene: deletes layer + removes from file
|
||||
metadata.
|
||||
Args:
|
||||
container (dict): container to be removed - used to get layer_id
|
||||
"""
|
||||
print("!!!! container:: {}".format(container))
|
||||
layer = container.pop("layer")
|
||||
stub.imprint(layer, {})
|
||||
stub.delete_item(layer.id)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
from avalon import api, aftereffects
|
||||
from pype.plugins import lib
|
||||
from pype import lib
|
||||
import re
|
||||
|
||||
stub = aftereffects.stub()
|
||||
|
|
@ -21,9 +21,10 @@ class FileLoader(api.Loader):
|
|||
representations = ["*"]
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
comp_name = lib.get_unique_layer_name(stub.get_items(comps=True),
|
||||
context["asset"]["name"],
|
||||
name)
|
||||
layers = stub.get_items(comps=True, folders=True, footages=True)
|
||||
existing_layers = [layer.name for layer in layers]
|
||||
comp_name = lib.get_unique_layer_name(
|
||||
existing_layers, "{}_{}".format(context["asset"]["name"], name))
|
||||
|
||||
import_options = {}
|
||||
|
||||
|
|
@ -77,9 +78,11 @@ class FileLoader(api.Loader):
|
|||
layer_name = "{}_{}".format(context["asset"], context["subset"])
|
||||
# switching assets
|
||||
if namespace_from_container != layer_name:
|
||||
layer_name = lib.get_unique_layer_name(stub.get_items(comps=True),
|
||||
context["asset"],
|
||||
context["subset"])
|
||||
layers = stub.get_items(comps=True)
|
||||
existing_layers = [layer.name for layer in layers]
|
||||
layer_name = lib.get_unique_layer_name(
|
||||
existing_layers,
|
||||
"{}_{}".format(context["asset"], context["subset"]))
|
||||
else: # switching version - keep same name
|
||||
layer_name = container["namespace"]
|
||||
path = api.get_representation_path(representation)
|
||||
|
|
|
|||
|
|
@ -33,12 +33,16 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
|||
|
||||
compositions = aftereffects.stub().get_items(True)
|
||||
compositions_by_id = {item.id: item for item in compositions}
|
||||
for item_id, inst in aftereffects.stub().get_metadata().items():
|
||||
for inst in aftereffects.stub().get_metadata():
|
||||
schema = inst.get('schema')
|
||||
# loaded asset container skip it
|
||||
if schema and 'container' in schema:
|
||||
continue
|
||||
|
||||
if not inst["members"]:
|
||||
raise ValueError("Couldn't find id, unable to publish. " +
|
||||
"Please recreate instance.")
|
||||
item_id = inst["members"][0]
|
||||
work_area_info = aftereffects.stub().get_work_area(int(item_id))
|
||||
frameStart = work_area_info.workAreaStart
|
||||
|
||||
|
|
@ -110,7 +114,10 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
|||
|
||||
# pull file name from Render Queue Output module
|
||||
render_q = aftereffects.stub().get_render_info()
|
||||
if not render_q:
|
||||
raise ValueError("No file extension set in Render Queue")
|
||||
_, ext = os.path.splitext(os.path.basename(render_q.file_name))
|
||||
|
||||
base_dir = self._get_output_dir(render_instance)
|
||||
expected_files = []
|
||||
if "#" not in render_q.file_name: # single frame (mov)W
|
||||
|
|
|
|||
|
|
@ -105,3 +105,13 @@ class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
|
|||
deadline_plugin_info.Output = render_path.replace("\\", "/")
|
||||
|
||||
return attr.asdict(deadline_plugin_info)
|
||||
|
||||
def from_published_scene(self):
|
||||
""" Do not overwrite expected files.
|
||||
|
||||
Use published is set to True, so rendering will be triggered
|
||||
from published scene (in 'publish' folder). Default implementation
|
||||
of abstract class renames expected (eg. rendered) files accordingly
|
||||
which is not needed here.
|
||||
"""
|
||||
return super().from_published_scene(False)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
|
|||
"""Submit CelAction2D scene to Deadline
|
||||
|
||||
Renders are submitted to a Deadline Web Service as
|
||||
supplied via the environment variable DEADLINE_REST_URL
|
||||
supplied via settings key "DEADLINE_REST_URL".
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -37,10 +37,15 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
|
|||
instance.data["toBeRenderedOn"] = "deadline"
|
||||
context = instance.context
|
||||
|
||||
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL")
|
||||
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
|
||||
deadline_url = (
|
||||
context.data["system_settings"]
|
||||
["modules"]
|
||||
["deadline"]
|
||||
["DEADLINE_REST_URL"]
|
||||
)
|
||||
assert deadline_url, "Requires DEADLINE_REST_URL"
|
||||
|
||||
self.deadline_url = "{}/api/jobs".format(DEADLINE_REST_URL)
|
||||
self.deadline_url = "{}/api/jobs".format(deadline_url)
|
||||
self._comment = context.data.get("comment", "")
|
||||
self._deadline_user = context.data.get(
|
||||
"deadlineUser", getpass.getuser())
|
||||
|
|
|
|||
|
|
@ -61,8 +61,8 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin):
|
|||
"Missing FTrack Task entity in context")
|
||||
|
||||
host = pyblish.api.current_host()
|
||||
to_check = context.data["presets"].get(
|
||||
host, {}).get("ftrack_custom_attributes")
|
||||
to_check = self.ftrack_custom_attributes.get(host, {}))
|
||||
|
||||
if not to_check:
|
||||
self.log.warning("ftrack_attributes preset not found")
|
||||
return
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"""Submit current Comp to Deadline
|
||||
|
||||
Renders are submitted to a Deadline Web Service as
|
||||
supplied via the environment variable DEADLINE_REST_URL
|
||||
supplied via settings key "DEADLINE_REST_URL".
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -32,9 +32,13 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
from avalon.fusion.lib import get_frame_path
|
||||
|
||||
DEADLINE_REST_URL = api.Session.get("DEADLINE_REST_URL",
|
||||
"http://localhost:8082")
|
||||
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
|
||||
deadline_url = (
|
||||
context.data["system_settings"]
|
||||
["modules"]
|
||||
["deadline"]
|
||||
["DEADLINE_REST_URL"]
|
||||
)
|
||||
assert deadline_url, "Requires DEADLINE_REST_URL"
|
||||
|
||||
# Collect all saver instances in context that are to be rendered
|
||||
saver_instances = []
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ Provides:
|
|||
import os
|
||||
import json
|
||||
|
||||
from pype.lib import ApplicationManager
|
||||
from avalon import api, lib
|
||||
import pyblish.api
|
||||
|
||||
|
|
@ -64,12 +65,12 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin):
|
|||
"username": context.data["user"]
|
||||
}
|
||||
|
||||
avalon_app_name = os.environ.get("AVALON_APP_NAME")
|
||||
if avalon_app_name:
|
||||
application_def = lib.get_application(avalon_app_name)
|
||||
app_dir = application_def.get("application_dir")
|
||||
if app_dir:
|
||||
context_data["app"] = app_dir
|
||||
app_manager = ApplicationManager()
|
||||
app_name = os.environ.get("AVALON_APP_NAME")
|
||||
if app_name:
|
||||
app = app_manager.applications.get(app_name)
|
||||
if app:
|
||||
context_data["app"] = app.host_name
|
||||
|
||||
datetime_data = context.data.get("datetimeData") or {}
|
||||
context_data.update(datetime_data)
|
||||
|
|
|
|||
|
|
@ -1,24 +0,0 @@
|
|||
"""
|
||||
Requires:
|
||||
config_data -> colorspace.default
|
||||
config_data -> dataflow.default
|
||||
|
||||
Provides:
|
||||
context -> presets
|
||||
"""
|
||||
|
||||
from pyblish import api
|
||||
from pype.api import get_current_project_settings
|
||||
|
||||
|
||||
class CollectPresets(api.ContextPlugin):
|
||||
"""Collect Presets."""
|
||||
|
||||
order = api.CollectorOrder - 0.491
|
||||
label = "Collect Presets"
|
||||
|
||||
def process(self, context):
|
||||
project_settings = get_current_project_settings()
|
||||
context.data["presets"] = project_settings
|
||||
|
||||
return
|
||||
13
pype/plugins/global/publish/collect_settings.py
Normal file
13
pype/plugins/global/publish/collect_settings.py
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
from pyblish import api
|
||||
from pype.api import get_current_project_settings, get_system_settings
|
||||
|
||||
|
||||
class CollectSettings(api.ContextPlugin):
|
||||
"""Collect Settings and store in the context."""
|
||||
|
||||
order = api.CollectorOrder - 0.491
|
||||
label = "Collect Settings"
|
||||
|
||||
def process(self, context):
|
||||
context.data["project_settings"] = get_current_project_settings()
|
||||
context.data["system_settings"] = get_system_settings()
|
||||
|
|
@ -68,8 +68,6 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
if "representations" not in instance.data:
|
||||
raise RuntimeError("Burnin needs already created mov to work on.")
|
||||
|
||||
if self.use_legacy_code(instance):
|
||||
return self.legacy_process(instance)
|
||||
self.main_process(instance)
|
||||
|
||||
# Remove any representations tagged for deletion.
|
||||
|
|
@ -79,12 +77,6 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
self.log.debug("Removing representation: {}".format(repre))
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
def use_legacy_code(self, instance):
|
||||
presets = instance.context.data.get("presets")
|
||||
if presets is None and self.profiles is None:
|
||||
return True
|
||||
return "burnins" in (presets.get("tools") or {})
|
||||
|
||||
def main_process(self, instance):
|
||||
# TODO get these data from context
|
||||
host_name = os.environ["AVALON_APP"]
|
||||
|
|
@ -700,7 +692,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
return filtered_burnin_defs
|
||||
|
||||
def families_filter_validation(self, families, output_families_filter):
|
||||
"""Determines if entered families intersect with families filters.
|
||||
"""Determine if entered families intersect with families filters.
|
||||
|
||||
All family values are lowered to avoid unexpected results.
|
||||
"""
|
||||
|
|
@ -747,7 +739,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
return regexes
|
||||
|
||||
def validate_value_by_regexes(self, value, in_list):
|
||||
"""Validates in any regexe from list match entered value.
|
||||
"""Validate in any regexe from list match entered value.
|
||||
|
||||
Args:
|
||||
in_list (list): List with regexes.
|
||||
|
|
@ -770,14 +762,14 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
return output
|
||||
|
||||
def main_family_from_instance(self, instance):
|
||||
"""Returns main family of entered instance."""
|
||||
"""Return main family of entered instance."""
|
||||
family = instance.data.get("family")
|
||||
if not family:
|
||||
family = instance.data["families"][0]
|
||||
return family
|
||||
|
||||
def families_from_instance(self, instance):
|
||||
"""Returns all families of entered instance."""
|
||||
"""Return all families of entered instance."""
|
||||
families = []
|
||||
family = instance.data.get("family")
|
||||
if family:
|
||||
|
|
@ -789,7 +781,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
return families
|
||||
|
||||
def burnin_script_path(self):
|
||||
"""Returns path to python script for burnin processing."""
|
||||
"""Return path to python script for burnin processing."""
|
||||
# TODO maybe convert to Plugin's attribute
|
||||
# Get script path.
|
||||
module_path = os.environ["PYPE_MODULE_ROOT"]
|
||||
|
|
@ -813,7 +805,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
return scriptpath
|
||||
|
||||
def python_executable_path(self):
|
||||
"""Returns path to Python 3 executable."""
|
||||
"""Return path to Python 3 executable."""
|
||||
# TODO maybe convert to Plugin's attribute
|
||||
# Get executable.
|
||||
executable = os.getenv("PYPE_PYTHON_EXE")
|
||||
|
|
@ -825,211 +817,3 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
|
||||
self.log.debug("executable: {}".format(executable))
|
||||
return executable
|
||||
|
||||
def legacy_process(self, instance):
|
||||
self.log.warning("Legacy burnin presets are used.")
|
||||
|
||||
context_data = instance.context.data
|
||||
|
||||
version = instance.data.get(
|
||||
'version', instance.context.data.get('version'))
|
||||
frame_start = int(instance.data.get("frameStart") or 0)
|
||||
frame_end = int(instance.data.get("frameEnd") or 1)
|
||||
handle_start = instance.data.get("handleStart",
|
||||
context_data.get("handleStart"))
|
||||
handle_end = instance.data.get("handleEnd",
|
||||
context_data.get("handleEnd"))
|
||||
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
duration = frame_end_handle - frame_start_handle + 1
|
||||
|
||||
prep_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
|
||||
if "slate.farm" in instance.data["families"]:
|
||||
frame_start_handle += 1
|
||||
duration -= 1
|
||||
|
||||
prep_data.update({
|
||||
"frame_start": frame_start_handle,
|
||||
"frame_end": frame_end_handle,
|
||||
"duration": duration,
|
||||
"version": int(version),
|
||||
"comment": instance.context.data.get("comment", "")
|
||||
})
|
||||
|
||||
intent_label = instance.context.data.get("intent")
|
||||
if intent_label and isinstance(intent_label, dict):
|
||||
intent_label = intent_label.get("label")
|
||||
|
||||
if intent_label:
|
||||
prep_data["intent"] = intent_label
|
||||
|
||||
# get anatomy project
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
self.log.debug("__ prep_data: {}".format(prep_data))
|
||||
for i, repre in enumerate(instance.data["representations"]):
|
||||
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
|
||||
|
||||
if instance.data.get("multipartExr") is True:
|
||||
# ffmpeg doesn't support multipart exrs
|
||||
continue
|
||||
|
||||
if "burnin" not in repre.get("tags", []):
|
||||
continue
|
||||
|
||||
is_sequence = "sequence" in repre.get("tags", [])
|
||||
|
||||
# no handles switch from profile tags
|
||||
no_handles = "no-handles" in repre.get("tags", [])
|
||||
|
||||
stagingdir = repre["stagingDir"]
|
||||
filename = "{0}".format(repre["files"])
|
||||
|
||||
if is_sequence:
|
||||
filename = repre["sequence_file"]
|
||||
|
||||
name = "_burnin"
|
||||
ext = os.path.splitext(filename)[1]
|
||||
movieFileBurnin = filename.replace(ext, "") + name + ext
|
||||
|
||||
if is_sequence:
|
||||
fn_splt = filename.split(".")
|
||||
movieFileBurnin = ".".join(
|
||||
((fn_splt[0] + name), fn_splt[-2], fn_splt[-1]))
|
||||
|
||||
self.log.debug("__ movieFileBurnin: `{}`".format(movieFileBurnin))
|
||||
|
||||
full_movie_path = os.path.join(
|
||||
os.path.normpath(stagingdir), filename)
|
||||
full_burnin_path = os.path.join(
|
||||
os.path.normpath(stagingdir), movieFileBurnin)
|
||||
|
||||
self.log.debug("__ full_movie_path: {}".format(full_movie_path))
|
||||
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
|
||||
|
||||
# create copy of prep_data for anatomy formatting
|
||||
_prep_data = copy.deepcopy(prep_data)
|
||||
_prep_data["representation"] = repre["name"]
|
||||
filled_anatomy = anatomy.format_all(_prep_data)
|
||||
_prep_data["anatomy"] = filled_anatomy.get_solved()
|
||||
|
||||
# copy frame range variables
|
||||
frame_start_cp = frame_start_handle
|
||||
frame_end_cp = frame_end_handle
|
||||
duration_cp = duration
|
||||
|
||||
if no_handles:
|
||||
frame_start_cp = frame_start
|
||||
frame_end_cp = frame_end
|
||||
duration_cp = frame_end_cp - frame_start_cp + 1
|
||||
_prep_data.update({
|
||||
"frame_start": frame_start_cp,
|
||||
"frame_end": frame_end_cp,
|
||||
"duration": duration_cp,
|
||||
})
|
||||
|
||||
# dealing with slates
|
||||
slate_frame_start = frame_start_cp
|
||||
slate_frame_end = frame_end_cp
|
||||
slate_duration = duration_cp
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
if "slate-frame" in repre.get("tags", []):
|
||||
slate_frame_start = frame_start_cp - 1
|
||||
slate_frame_end = frame_end_cp
|
||||
slate_duration = duration_cp + 1
|
||||
|
||||
self.log.debug("__1 slate_frame_start: {}".format(
|
||||
slate_frame_start))
|
||||
|
||||
_prep_data.update({
|
||||
"slate_frame_start": slate_frame_start,
|
||||
"slate_frame_end": slate_frame_end,
|
||||
"slate_duration": slate_duration
|
||||
})
|
||||
|
||||
burnin_data = {
|
||||
"input": full_movie_path.replace("\\", "/"),
|
||||
"codec": repre.get("codec", []),
|
||||
"output": full_burnin_path.replace("\\", "/"),
|
||||
"burnin_data": _prep_data
|
||||
}
|
||||
|
||||
self.log.debug("__ burnin_data2: {}".format(burnin_data))
|
||||
|
||||
json_data = json.dumps(burnin_data)
|
||||
|
||||
# Get script path.
|
||||
module_path = os.environ['PYPE_MODULE_ROOT']
|
||||
|
||||
# There can be multiple paths in PYPE_MODULE_ROOT, in which case
|
||||
# we just take first one.
|
||||
if os.pathsep in module_path:
|
||||
module_path = module_path.split(os.pathsep)[0]
|
||||
|
||||
scriptpath = os.path.normpath(
|
||||
os.path.join(
|
||||
module_path,
|
||||
"pype",
|
||||
"scripts",
|
||||
"otio_burnin.py"
|
||||
)
|
||||
)
|
||||
|
||||
self.log.debug("__ scriptpath: {}".format(scriptpath))
|
||||
|
||||
# Get executable.
|
||||
executable = os.getenv("PYPE_PYTHON_EXE")
|
||||
|
||||
# There can be multiple paths in PYPE_PYTHON_EXE, in which case
|
||||
# we just take first one.
|
||||
if os.pathsep in executable:
|
||||
executable = executable.split(os.pathsep)[0]
|
||||
|
||||
self.log.debug("__ EXE: {}".format(executable))
|
||||
|
||||
args = [executable, scriptpath, json_data]
|
||||
self.log.debug("Executing: {}".format(args))
|
||||
output = pype.api.subprocess(args, shell=True, logger=self.log)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_update = {
|
||||
"files": movieFileBurnin,
|
||||
"name": repre["name"],
|
||||
"tags": [x for x in repre["tags"] if x != "delete"]
|
||||
}
|
||||
|
||||
if is_sequence:
|
||||
burnin_seq_files = list()
|
||||
for frame_index in range(_prep_data["duration"] + 1):
|
||||
if frame_index == 0:
|
||||
continue
|
||||
burnin_seq_files.append(movieFileBurnin % frame_index)
|
||||
repre_update.update({
|
||||
"files": burnin_seq_files
|
||||
})
|
||||
|
||||
instance.data["representations"][i].update(repre_update)
|
||||
|
||||
# removing the source mov file
|
||||
if is_sequence:
|
||||
for frame_index in range(_prep_data["duration"] + 1):
|
||||
if frame_index == 0:
|
||||
continue
|
||||
rm_file = full_movie_path % frame_index
|
||||
os.remove(rm_file)
|
||||
self.log.debug("Removed: `{}`".format(rm_file))
|
||||
else:
|
||||
os.remove(full_movie_path)
|
||||
self.log.debug("Removed: `{}`".format(full_movie_path))
|
||||
|
||||
# Remove any representations tagged for deletion.
|
||||
for repre in instance.data["representations"]:
|
||||
if "delete" in repre.get("tags", []):
|
||||
self.log.debug("Removing representation: {}".format(repre))
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
self.log.debug(instance.data["representations"])
|
||||
|
|
|
|||
|
|
@ -348,6 +348,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
+ 1
|
||||
)
|
||||
|
||||
duration_seconds = float(output_frames_len / temp_data["fps"])
|
||||
|
||||
if temp_data["input_is_sequence"]:
|
||||
# Set start frame of input sequence (just frame in filename)
|
||||
# - definition of input filepath
|
||||
|
|
@ -375,33 +377,39 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
# Change output's duration and start point if should not contain
|
||||
# handles
|
||||
start_sec = 0
|
||||
if temp_data["without_handles"] and temp_data["handles_are_set"]:
|
||||
# Set start time without handles
|
||||
# - check if handle_start is bigger than 0 to avoid zero division
|
||||
if temp_data["handle_start"] > 0:
|
||||
start_sec = float(temp_data["handle_start"]) / temp_data["fps"]
|
||||
ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec))
|
||||
ffmpeg_input_args.append("-ss {:0.10f}".format(start_sec))
|
||||
|
||||
# Set output duration inn seconds
|
||||
duration_sec = float(output_frames_len / temp_data["fps"])
|
||||
ffmpeg_output_args.append("-t {:0.2f}".format(duration_sec))
|
||||
ffmpeg_output_args.append("-t {:0.10}".format(duration_seconds))
|
||||
|
||||
# Set frame range of output when input or output is sequence
|
||||
elif temp_data["input_is_sequence"] or temp_data["output_is_sequence"]:
|
||||
elif temp_data["output_is_sequence"]:
|
||||
ffmpeg_output_args.append("-frames:v {}".format(output_frames_len))
|
||||
|
||||
# Add duration of an input sequence if output is video
|
||||
if (
|
||||
temp_data["input_is_sequence"]
|
||||
and not temp_data["output_is_sequence"]
|
||||
):
|
||||
ffmpeg_input_args.append("-to {:0.10f}".format(
|
||||
duration_seconds + start_sec
|
||||
))
|
||||
|
||||
# Add video/image input path
|
||||
ffmpeg_input_args.append(
|
||||
"-i \"{}\"".format(temp_data["full_input_path"])
|
||||
)
|
||||
|
||||
# Use shortest input
|
||||
ffmpeg_output_args.append("-shortest")
|
||||
|
||||
# Add audio arguments if there are any. Skipped when output are images.
|
||||
if not temp_data["output_ext_is_image"] and temp_data["with_audio"]:
|
||||
audio_in_args, audio_filters, audio_out_args = self.audio_args(
|
||||
instance, temp_data
|
||||
instance, temp_data, duration_seconds
|
||||
)
|
||||
ffmpeg_input_args.extend(audio_in_args)
|
||||
ffmpeg_audio_filters.extend(audio_filters)
|
||||
|
|
@ -616,7 +624,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Input path {}".format(full_input_path))
|
||||
self.log.debug("Output path {}".format(full_output_path))
|
||||
|
||||
def audio_args(self, instance, temp_data):
|
||||
def audio_args(self, instance, temp_data, duration_seconds):
|
||||
"""Prepares FFMpeg arguments for audio inputs."""
|
||||
audio_in_args = []
|
||||
audio_filters = []
|
||||
|
|
@ -639,11 +647,19 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
audio_in_args.append(
|
||||
"-ss {}".format(offset_seconds)
|
||||
)
|
||||
|
||||
elif offset_seconds < 0:
|
||||
audio_in_args.append(
|
||||
"-itsoffset {}".format(abs(offset_seconds))
|
||||
)
|
||||
|
||||
# Audio duration is offset from `-ss`
|
||||
audio_duration = duration_seconds + offset_seconds
|
||||
|
||||
# Set audio duration
|
||||
audio_in_args.append("-to {:0.10f}".format(audio_duration))
|
||||
|
||||
# Add audio input path
|
||||
audio_in_args.append("-i \"{}\"".format(audio["filename"]))
|
||||
|
||||
# NOTE: These were changed from input to output arguments.
|
||||
|
|
|
|||
|
|
@ -305,7 +305,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.info("Submitting Deadline job ...")
|
||||
|
||||
url = "{}/api/jobs".format(self.DEADLINE_REST_URL)
|
||||
url = "{}/api/jobs".format(self.deadline_url)
|
||||
response = requests.post(url, json=payload, timeout=10)
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
|
@ -924,10 +924,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
}
|
||||
|
||||
if submission_type == "deadline":
|
||||
self.DEADLINE_REST_URL = os.environ.get(
|
||||
"DEADLINE_REST_URL", "http://localhost:8082"
|
||||
self.deadline_url = (
|
||||
context.data["system_settings"]
|
||||
["modules"]
|
||||
["deadline"]
|
||||
["DEADLINE_REST_URL"]
|
||||
)
|
||||
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
|
||||
assert self.deadline_url, "Requires DEADLINE_REST_URL"
|
||||
|
||||
self._submit_deadline_post_job(instance, render_job, instances)
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ class ExtractPalette(pype.api.Extractor):
|
|||
os.path.basename(palette_file)
|
||||
.split(".plt")[0] + "_swatches.png"
|
||||
)
|
||||
self.log.info(f"Temporary humbnail path {tmp_thumb_path}")
|
||||
self.log.info(f"Temporary thumbnail path {tmp_thumb_path}")
|
||||
|
||||
palette_version = str(instance.data.get("version")).zfill(3)
|
||||
|
||||
|
|
@ -52,6 +52,11 @@ class ExtractPalette(pype.api.Extractor):
|
|||
palette_version,
|
||||
palette_file,
|
||||
tmp_thumb_path)
|
||||
except OSError as e:
|
||||
# FIXME: this happens on Mac where PIL cannot access fonts
|
||||
# for some reason.
|
||||
self.log.warning("Thumbnail generation failed")
|
||||
self.log.warning(e)
|
||||
except ValueError:
|
||||
self.log.error("Unsupported palette type for thumbnail.")
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,11 @@ class ExtractTemplate(pype.api.Extractor):
|
|||
for backdrop in self.get_backdrops(dependency):
|
||||
backdrops[backdrop["title"]["text"]] = backdrop
|
||||
unique_backdrops = [backdrops[x] for x in set(backdrops.keys())]
|
||||
|
||||
if not unique_backdrops:
|
||||
self.log.error(("No backdrops detected for template. "
|
||||
"Please move template instance node onto "
|
||||
"some backdrop and try again."))
|
||||
raise AssertionError("No backdrop detected")
|
||||
# Get non-connected nodes within backdrops.
|
||||
all_nodes = instance.context.data.get("allNodes")
|
||||
for node in [x for x in all_nodes if x not in dependencies]:
|
||||
|
|
|
|||
|
|
@ -1,26 +0,0 @@
|
|||
import re
|
||||
|
||||
|
||||
def get_unique_layer_name(layers, asset_name, subset_name):
|
||||
"""
|
||||
Gets all layer names and if 'name' is present in them, increases
|
||||
suffix by 1 (eg. creates unique layer name - for Loader)
|
||||
Args:
|
||||
layers (list): of namedtuples, expects 'name' field present
|
||||
asset_name (string): in format asset_subset (Hero)
|
||||
subset_name (string): (LOD)
|
||||
|
||||
Returns:
|
||||
(string): name_00X (without version)
|
||||
"""
|
||||
name = "{}_{}".format(asset_name, subset_name)
|
||||
names = {}
|
||||
for layer in layers:
|
||||
layer_name = re.sub(r'_\d{3}$', '', layer.name)
|
||||
if layer_name in names.keys():
|
||||
names[layer_name] = names[layer_name] + 1
|
||||
else:
|
||||
names[layer_name] = 1
|
||||
occurrences = names.get(name, 0)
|
||||
|
||||
return "{}_{:0>3d}".format(name, occurrences + 1)
|
||||
|
|
@ -9,6 +9,7 @@ from maya import cmds
|
|||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
from pype.hosts.maya import lib
|
||||
from pype.api import get_system_settings
|
||||
import avalon.maya
|
||||
|
||||
|
||||
|
|
@ -124,8 +125,11 @@ class CreateRender(avalon.maya.Creator):
|
|||
# get pools
|
||||
pools = []
|
||||
|
||||
deadline_url = os.environ.get("DEADLINE_REST_URL", None)
|
||||
muster_url = os.environ.get("MUSTER_REST_URL", None)
|
||||
system_settings = get_system_settings()["modules"]
|
||||
|
||||
deadline_url = system_settings["deadline"]["DEADLINE_REST_URL"]
|
||||
muster_url = system_settings["muster"]["MUSTER_REST_URL"]
|
||||
|
||||
if deadline_url and muster_url:
|
||||
self.log.error(
|
||||
"Both Deadline and Muster are enabled. " "Cannot support both."
|
||||
|
|
@ -198,7 +202,7 @@ class CreateRender(avalon.maya.Creator):
|
|||
"""Load Muster credentials.
|
||||
|
||||
Load Muster credentials from file and set ``MUSTER_USER``,
|
||||
``MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from presets.
|
||||
``MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from settings.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If loaded credentials are invalid.
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class CreateRenderSetup(avalon.maya.Creator):
|
|||
super(CreateRenderSetup, self).__init__(*args, **kwargs)
|
||||
|
||||
# here we can pre-create renderSetup layers, possibly utlizing
|
||||
# presets for it.
|
||||
# settings for it.
|
||||
|
||||
# _____
|
||||
# / __\__
|
||||
|
|
|
|||
|
|
@ -102,10 +102,11 @@ class ExtractCameraMayaScene(pype.api.Extractor):
|
|||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
# get settings
|
||||
ext_mapping = (instance.context.data["presets"]["maya"]
|
||||
.get("ext_mapping")) # noqa: E501
|
||||
ext_mapping = (
|
||||
instance.context.data["project_settings"]["maya"]["ext_mapping"]
|
||||
)
|
||||
if ext_mapping:
|
||||
self.log.info("Looking in presets for scene type ...")
|
||||
self.log.info("Looking in settings for scene type ...")
|
||||
# use extension mapping for first family found
|
||||
for family in self.families:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -24,9 +24,11 @@ class ExtractMayaSceneRaw(pype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
|
||||
ext_mapping = (
|
||||
instance.context.data["project_settings"]["maya"]["ext_mapping"]
|
||||
)
|
||||
if ext_mapping:
|
||||
self.log.info("Looking in presets for scene type ...")
|
||||
self.log.info("Looking in settings for scene type ...")
|
||||
# use extension mapping for first family found
|
||||
for family in self.families:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -31,9 +31,11 @@ class ExtractModel(pype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
|
||||
ext_mapping = (
|
||||
instance.context.data["project_settings"]["maya"]["ext_mapping"]
|
||||
)
|
||||
if ext_mapping:
|
||||
self.log.info("Looking in presets for scene type ...")
|
||||
self.log.info("Looking in settings for scene type ...")
|
||||
# use extension mapping for first family found
|
||||
for family in self.families:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -43,7 +43,9 @@ class ExtractPlayblast(pype.api.Extractor):
|
|||
|
||||
# get cameras
|
||||
camera = instance.data['review_camera']
|
||||
capture_preset = instance.context.data['presets']['maya']['capture']
|
||||
capture_preset = (
|
||||
instance.context.data['project_settings']['maya']['capture']
|
||||
)
|
||||
|
||||
try:
|
||||
preset = lib.load_capture_preset(data=capture_preset)
|
||||
|
|
|
|||
|
|
@ -18,9 +18,11 @@ class ExtractRig(pype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
|
||||
ext_mapping = (
|
||||
instance.context.data["project_settings"]["maya"]["ext_mapping"]
|
||||
)
|
||||
if ext_mapping:
|
||||
self.log.info("Looking in presets for scene type ...")
|
||||
self.log.info("Looking in settings for scene type ...")
|
||||
# use extension mapping for first family found
|
||||
for family in self.families:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -33,7 +33,10 @@ class ExtractThumbnail(pype.api.Extractor):
|
|||
camera = instance.data['review_camera']
|
||||
|
||||
capture_preset = ""
|
||||
capture_preset = instance.context.data['presets']['maya']['capture']
|
||||
capture_preset = (
|
||||
instance.context.data["project_settings"]['maya']['capture']
|
||||
)
|
||||
|
||||
try:
|
||||
preset = lib.load_capture_preset(data=capture_preset)
|
||||
except:
|
||||
|
|
|
|||
|
|
@ -101,9 +101,11 @@ class ExtractYetiRig(pype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
|
||||
ext_mapping = (
|
||||
instance.context.data["project_settings"]["maya"]["ext_mapping"]
|
||||
)
|
||||
if ext_mapping:
|
||||
self.log.info("Looking in presets for scene type ...")
|
||||
self.log.info("Looking in settings for scene type ...")
|
||||
# use extension mapping for first family found
|
||||
for family in self.families:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -238,11 +238,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"""Submit available render layers to Deadline.
|
||||
|
||||
Renders are submitted to a Deadline Web Service as
|
||||
supplied via the environment variable ``DEADLINE_REST_URL``.
|
||||
|
||||
Note:
|
||||
If Deadline configuration is not detected, this plugin will
|
||||
be disabled.
|
||||
supplied via settings key "DEADLINE_REST_URL".
|
||||
|
||||
Attributes:
|
||||
use_published (bool): Use published scene to render instead of the
|
||||
|
|
@ -254,11 +250,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
hosts = ["maya"]
|
||||
families = ["renderlayer"]
|
||||
if not os.environ.get("DEADLINE_REST_URL"):
|
||||
optional = False
|
||||
active = False
|
||||
else:
|
||||
optional = True
|
||||
|
||||
use_published = True
|
||||
tile_assembler_plugin = "PypeTileAssembler"
|
||||
|
|
@ -267,9 +258,16 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
instance.data["toBeRenderedOn"] = "deadline"
|
||||
context = instance.context
|
||||
|
||||
self._instance = instance
|
||||
self._deadline_url = os.environ.get(
|
||||
"DEADLINE_REST_URL", "http://localhost:8082")
|
||||
self._deadline_url = (
|
||||
context.data["system_settings"]
|
||||
["modules"]
|
||||
["deadline"]
|
||||
["DEADLINE_REST_URL"]
|
||||
)
|
||||
|
||||
assert self._deadline_url, "Requires DEADLINE_REST_URL"
|
||||
|
||||
context = instance.context
|
||||
|
|
|
|||
|
|
@ -153,7 +153,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
def _load_credentials(self):
|
||||
"""
|
||||
Load Muster credentials from file and set `MUSTER_USER`,
|
||||
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from presets.
|
||||
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from settings.
|
||||
|
||||
.. todo::
|
||||
|
||||
|
|
|
|||
|
|
@ -22,9 +22,12 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
|
|||
actions = [pype.api.RepairContextAction]
|
||||
optional = True
|
||||
|
||||
attributes = None
|
||||
|
||||
def process(self, context):
|
||||
# Check for preset existence.
|
||||
if not context.data["presets"]["maya"].get("attributes"):
|
||||
|
||||
if not self.attributes:
|
||||
return
|
||||
|
||||
invalid = self.get_invalid(context, compute=True)
|
||||
|
|
@ -43,7 +46,6 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
|
|||
|
||||
@classmethod
|
||||
def get_invalid_attributes(cls, context):
|
||||
presets = context.data["presets"]["maya"]["attributes"]
|
||||
invalid_attributes = []
|
||||
for instance in context:
|
||||
# Filter publisable instances.
|
||||
|
|
@ -53,23 +55,23 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
|
|||
# Filter families.
|
||||
families = [instance.data["family"]]
|
||||
families += instance.data.get("families", [])
|
||||
families = list(set(families) & set(presets.keys()))
|
||||
families = list(set(families) & set(self.attributes.keys()))
|
||||
if not families:
|
||||
continue
|
||||
|
||||
# Get all attributes to validate.
|
||||
attributes = {}
|
||||
for family in families:
|
||||
for preset in presets[family]:
|
||||
for preset in self.attributes[family]:
|
||||
[node_name, attribute_name] = preset.split(".")
|
||||
try:
|
||||
attributes[node_name].update(
|
||||
{attribute_name: presets[family][preset]}
|
||||
{attribute_name: self.attributes[family][preset]}
|
||||
)
|
||||
except KeyError:
|
||||
attributes.update({
|
||||
node_name: {
|
||||
attribute_name: presets[family][preset]
|
||||
attribute_name: self.attributes[family][preset]
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -12,8 +12,6 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.ValidatorOrder
|
||||
hosts = ["maya"]
|
||||
families = ["renderlayer"]
|
||||
if not os.environ.get("DEADLINE_REST_URL"):
|
||||
active = False
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
@ -21,14 +19,15 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin):
|
|||
if not contextplugin_should_run(self, context):
|
||||
return
|
||||
|
||||
try:
|
||||
DEADLINE_REST_URL = os.environ["DEADLINE_REST_URL"]
|
||||
except KeyError:
|
||||
self.log.error("Deadline REST API url not found.")
|
||||
raise ValueError("Deadline REST API url not found.")
|
||||
deadline_url = (
|
||||
context.data["system_settings"]
|
||||
["modules"]
|
||||
["deadline"]
|
||||
["DEADLINE_REST_URL"]
|
||||
)
|
||||
|
||||
# Check response
|
||||
response = self._requests_get(DEADLINE_REST_URL)
|
||||
response = self._requests_get(deadline_url)
|
||||
assert response.ok, "Response must be ok"
|
||||
assert response.text.startswith("Deadline Web Service "), (
|
||||
"Web service did not respond with 'Deadline Web Service'"
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ class ValidateMusterConnection(pyblish.api.ContextPlugin):
|
|||
def _load_credentials(self):
|
||||
"""
|
||||
Load Muster credentials from file and set `MUSTER_USER`,
|
||||
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from presets.
|
||||
`MUSTER_PASSWORD`, `MUSTER_REST_URL` is loaded from settings.
|
||||
|
||||
.. todo::
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"""Submit write to Deadline
|
||||
|
||||
Renders are submitted to a Deadline Web Service as
|
||||
supplied via the environment variable DEADLINE_REST_URL
|
||||
supplied via settings key "DEADLINE_REST_URL".
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -34,11 +34,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
node = instance[0]
|
||||
context = instance.context
|
||||
|
||||
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
|
||||
"http://localhost:8082")
|
||||
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
|
||||
deadline_url = (
|
||||
context.data["system_settings"]
|
||||
["modules"]
|
||||
["deadline"]
|
||||
["DEADLINE_REST_URL"]
|
||||
)
|
||||
assert deadline_url, "Requires DEADLINE_REST_URL"
|
||||
|
||||
self.deadline_url = "{}/api/jobs".format(DEADLINE_REST_URL)
|
||||
self.deadline_url = "{}/api/jobs".format(deadline_url)
|
||||
self._comment = context.data.get("comment", "")
|
||||
self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion"))
|
||||
self._deadline_user = context.data.get(
|
||||
|
|
|
|||
|
|
@ -9,8 +9,7 @@ class ValidateKnobs(pyblish.api.ContextPlugin):
|
|||
|
||||
Knobs to validate and their values comes from the
|
||||
|
||||
Example for presets in config:
|
||||
"presets/plugins/nuke/publish.json" preset, which needs this structure:
|
||||
Controled by plugin settings that require json in following structure:
|
||||
"ValidateKnobs": {
|
||||
"enabled": true,
|
||||
"knobs": {
|
||||
|
|
@ -28,20 +27,6 @@ class ValidateKnobs(pyblish.api.ContextPlugin):
|
|||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
nuke_presets = context.data["presets"].get("nuke")
|
||||
|
||||
if not nuke_presets:
|
||||
return
|
||||
|
||||
publish_presets = nuke_presets.get("publish")
|
||||
|
||||
if not publish_presets:
|
||||
return
|
||||
|
||||
plugin_preset = publish_presets.get("ValidateKnobs")
|
||||
|
||||
if not plugin_preset:
|
||||
return
|
||||
|
||||
invalid = self.get_invalid(context, compute=True)
|
||||
if invalid:
|
||||
|
|
@ -60,8 +45,7 @@ class ValidateKnobs(pyblish.api.ContextPlugin):
|
|||
@classmethod
|
||||
def get_invalid_knobs(cls, context):
|
||||
invalid_knobs = []
|
||||
publish_presets = context.data["presets"]["nuke"]["publish"]
|
||||
knobs_preset = publish_presets["ValidateKnobs"]["knobs"]
|
||||
|
||||
for instance in context:
|
||||
# Filter publisable instances.
|
||||
if not instance.data["publish"]:
|
||||
|
|
@ -70,15 +54,15 @@ class ValidateKnobs(pyblish.api.ContextPlugin):
|
|||
# Filter families.
|
||||
families = [instance.data["family"]]
|
||||
families += instance.data.get("families", [])
|
||||
families = list(set(families) & set(knobs_preset.keys()))
|
||||
families = list(set(families) & set(self.knobs.keys()))
|
||||
if not families:
|
||||
continue
|
||||
|
||||
# Get all knobs to validate.
|
||||
knobs = {}
|
||||
for family in families:
|
||||
for preset in knobs_preset[family]:
|
||||
knobs.update({preset: knobs_preset[family][preset]})
|
||||
for preset in self.knobs[family]:
|
||||
knobs.update({preset: self.knobs[family][preset]})
|
||||
|
||||
# Get invalid knobs.
|
||||
nodes = []
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ class LoadImage(pipeline.Loader):
|
|||
|
||||
def _remove_layers(self, layer_ids, layers=None):
|
||||
if not layer_ids:
|
||||
self.log.warning("Got empty layer ids list.")
|
||||
return
|
||||
|
||||
if layers is None:
|
||||
|
|
@ -117,6 +118,7 @@ class LoadImage(pipeline.Loader):
|
|||
layer_ids_to_remove.append(layer_id)
|
||||
|
||||
if not layer_ids_to_remove:
|
||||
self.log.warning("No layers to delete.")
|
||||
return
|
||||
|
||||
george_script_lines = []
|
||||
|
|
@ -128,12 +130,14 @@ class LoadImage(pipeline.Loader):
|
|||
|
||||
def remove(self, container):
|
||||
layer_ids = self.layer_ids_from_container(container)
|
||||
self.log.warning("Layers to delete {}".format(layer_ids))
|
||||
self._remove_layers(layer_ids)
|
||||
|
||||
current_containers = pipeline.ls()
|
||||
pop_idx = None
|
||||
for idx, cur_con in enumerate(current_containers):
|
||||
if cur_con["objectName"] == container["objectName"]:
|
||||
cur_con_layer_ids = self.layer_ids_from_container(cur_con)
|
||||
if cur_con_layer_ids == layer_ids:
|
||||
pop_idx = idx
|
||||
break
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
import avalon.api
|
||||
from avalon.tvpaint import pipeline, lib
|
||||
|
||||
|
||||
|
|
@ -10,26 +12,64 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
|
|||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
current_project_id = lib.execute_george("tv_projectcurrentid")
|
||||
lib.execute_george("tv_projectselect {}".format(current_project_id))
|
||||
|
||||
# Collect and store current context to have reference
|
||||
current_context = {
|
||||
"project": avalon.api.Session["AVALON_PROJECT"],
|
||||
"asset": avalon.api.Session["AVALON_ASSET"],
|
||||
"task": avalon.api.Session["AVALON_TASK"]
|
||||
}
|
||||
context.data["previous_context"] = current_context
|
||||
self.log.debug("Current context is: {}".format(current_context))
|
||||
|
||||
# Collect context from workfile metadata
|
||||
self.log.info("Collecting workfile context")
|
||||
workfile_context = pipeline.get_current_workfile_context()
|
||||
if workfile_context:
|
||||
# Change current context with context from workfile
|
||||
key_map = (
|
||||
("AVALON_ASSET", "asset"),
|
||||
("AVALON_TASK", "task")
|
||||
)
|
||||
for env_key, key in key_map:
|
||||
avalon.api.Session[env_key] = workfile_context[key]
|
||||
os.environ[env_key] = workfile_context[key]
|
||||
else:
|
||||
# Handle older workfiles or workfiles without metadata
|
||||
self.log.warning(
|
||||
"Workfile does not contain information about context."
|
||||
" Using current Session context."
|
||||
)
|
||||
workfile_context = current_context.copy()
|
||||
|
||||
context.data["workfile_context"] = workfile_context
|
||||
self.log.info("Context changed to: {}".format(workfile_context))
|
||||
|
||||
# Collect instances
|
||||
self.log.info("Collecting instance data from workfile")
|
||||
instance_data = pipeline.list_instances()
|
||||
context.data["workfileInstances"] = instance_data
|
||||
self.log.debug(
|
||||
"Instance data:\"{}".format(json.dumps(instance_data, indent=4))
|
||||
)
|
||||
context.data["workfileInstances"] = instance_data
|
||||
|
||||
# Collect information about layers
|
||||
self.log.info("Collecting layers data from workfile")
|
||||
layers_data = lib.layers_data()
|
||||
context.data["layersData"] = layers_data
|
||||
self.log.debug(
|
||||
"Layers data:\"{}".format(json.dumps(layers_data, indent=4))
|
||||
)
|
||||
context.data["layersData"] = layers_data
|
||||
|
||||
# Collect information about groups
|
||||
self.log.info("Collecting groups data from workfile")
|
||||
group_data = lib.groups_data()
|
||||
context.data["groupsData"] = group_data
|
||||
self.log.debug(
|
||||
"Group data:\"{}".format(json.dumps(group_data, indent=4))
|
||||
)
|
||||
context.data["groupsData"] = group_data
|
||||
|
||||
self.log.info("Collecting scene data from workfile")
|
||||
workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ")
|
||||
|
|
|
|||
|
|
@ -0,0 +1,37 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
|
||||
"""Validate project name stored in workfile metadata.
|
||||
|
||||
It is not possible to publish from different project than is set in
|
||||
environment variable "AVALON_PROJECT".
|
||||
"""
|
||||
|
||||
label = "Validate Workfile Project Name"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
|
||||
def process(self, context):
|
||||
workfile_context = context.data["workfile_context"]
|
||||
workfile_project_name = workfile_context["project"]
|
||||
env_project_name = os.environ["AVALON_PROJECT"]
|
||||
if workfile_project_name == env_project_name:
|
||||
self.log.info((
|
||||
"Both workfile project and environment project are same. {}"
|
||||
).format(env_project_name))
|
||||
return
|
||||
|
||||
# Raise an error
|
||||
raise AssertionError((
|
||||
# Short message
|
||||
"Workfile from different Project ({})."
|
||||
# Description what's wrong
|
||||
" It is not possible to publish when TVPaint was launched in"
|
||||
"context of different project. Current context project is \"{}\"."
|
||||
" Launch TVPaint in context of project \"{}\" and then publish."
|
||||
).format(
|
||||
workfile_project_name,
|
||||
env_project_name,
|
||||
workfile_project_name,
|
||||
))
|
||||
|
|
@ -93,6 +93,10 @@
|
|||
"enabled": true,
|
||||
"note_with_intent_template": "",
|
||||
"note_labels": []
|
||||
},
|
||||
"ValidateFtrackAttributes": {
|
||||
"enabled": false,
|
||||
"ftrack_custom_attributes": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -94,8 +94,8 @@
|
|||
"deadline_department": "",
|
||||
"deadline_pool": "",
|
||||
"deadline_group": "",
|
||||
"deadline_chunk_size": "",
|
||||
"deadline_priority": "",
|
||||
"deadline_chunk_size": 1,
|
||||
"deadline_priority": 50,
|
||||
"aov_filter": {
|
||||
"maya": [
|
||||
".+(?:\\.|_)([Bb]eauty)(?:\\.|_).*"
|
||||
|
|
@ -111,7 +111,7 @@
|
|||
}
|
||||
},
|
||||
"tools": {
|
||||
"Creator": {
|
||||
"creator": {
|
||||
"families_smart_select": {
|
||||
"Render": [
|
||||
"light",
|
||||
|
|
@ -179,4 +179,4 @@
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"maya_capture": {
|
||||
"capture": {
|
||||
"Codec": {
|
||||
"compression": "jpg",
|
||||
"format": "image",
|
||||
|
|
@ -107,9 +107,10 @@
|
|||
"overscan": 1.0
|
||||
}
|
||||
},
|
||||
"ext_mapping": {},
|
||||
"publish": {
|
||||
"CollectMayaRender": {
|
||||
"sync_workfile_version": true
|
||||
"sync_workfile_version": false
|
||||
},
|
||||
"ValidateCameraAttributes": {
|
||||
"enabled": true,
|
||||
|
|
@ -134,6 +135,9 @@
|
|||
"ValidateMeshHasOverlappingUVs": {
|
||||
"enabled": false
|
||||
},
|
||||
"ValidateAttributes": {
|
||||
"enabled": false
|
||||
},
|
||||
"ExtractCameraAlembic": {
|
||||
"enabled": true,
|
||||
"optional": true,
|
||||
|
|
@ -316,4 +320,4 @@
|
|||
"ValidateNoAnimation": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -35,7 +35,7 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"ValidateNukeWriteKnobs": {
|
||||
"ValidateKnobs": {
|
||||
"enabled": true,
|
||||
"knobs": {
|
||||
"render": {
|
||||
|
|
@ -87,4 +87,4 @@
|
|||
]
|
||||
},
|
||||
"filters": {}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -123,4 +123,4 @@
|
|||
"help": "Script exported from matchmoving application"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -169,11 +169,11 @@
|
|||
"enabled": false,
|
||||
"workspace_name": "studio name"
|
||||
},
|
||||
"Deadline": {
|
||||
"deadline": {
|
||||
"enabled": true,
|
||||
"DEADLINE_REST_URL": "http://localhost:8082"
|
||||
},
|
||||
"Muster": {
|
||||
"muster": {
|
||||
"enabled": false,
|
||||
"MUSTER_REST_URL": "http://127.0.0.1:9890",
|
||||
"templates_mapping": {
|
||||
|
|
|
|||
|
|
@ -6,261 +6,310 @@
|
|||
"checkbox_key": "enabled",
|
||||
"is_file": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Additional Ftrack paths"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "ftrack_actions_path",
|
||||
"label": "Action paths",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "ftrack_events_path",
|
||||
"label": "Event paths",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Additional Ftrack paths"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "ftrack_actions_path",
|
||||
"label": "Action paths",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "ftrack_events_path",
|
||||
"label": "Event paths",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "events",
|
||||
"label": "Server Events",
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "events",
|
||||
"label": "Server Events",
|
||||
"key": "sync_to_avalon",
|
||||
"label": "Sync to avalon",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "sync_to_avalon",
|
||||
"label": "Sync to avalon",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Allow name and hierarchy change only if following statuses are on all children tasks"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "statuses_name_change",
|
||||
"label": "Statuses",
|
||||
"object_type": {
|
||||
"type": "text",
|
||||
"multiline": false
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "push_frame_values_to_task",
|
||||
"label": "Sync Hierarchical and Entity Attributes",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
}, {
|
||||
"type": "list",
|
||||
"key": "interest_entity_types",
|
||||
"label": "Entity types of interest",
|
||||
"object_type": {
|
||||
"type": "text",
|
||||
"multiline": false
|
||||
}
|
||||
}, {
|
||||
"type": "list",
|
||||
"key": "interest_attributess",
|
||||
"label": "Attributes to sync",
|
||||
"object_type": {
|
||||
"type": "text",
|
||||
"multiline": false
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "thumbnail_updates",
|
||||
"label": "Update Hierarchy thumbnails",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},{
|
||||
"type": "label",
|
||||
"label": "Push thumbnail from version, up through multiple hierarchy levels."
|
||||
},{
|
||||
"type": "number",
|
||||
"key": "levels",
|
||||
"label": "Levels"
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "user_assignment",
|
||||
"label": "Run script on user assignments",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Allow name and hierarchy change only if following statuses are on all children tasks"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "statuses_name_change",
|
||||
"label": "Statuses",
|
||||
"object_type":
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "status_update",
|
||||
"label": "Update status on task action",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"key": "mapping",
|
||||
"type": "dict-modifiable",
|
||||
"object_type": {
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "status_task_to_parent",
|
||||
"label": "Sync status from Task to Parent",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"key": "parent_status_match_all_task_statuses",
|
||||
"type": "dict-modifiable",
|
||||
"label": "Change parent if all tasks match",
|
||||
"object_type": {
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "parent_status_by_task_status",
|
||||
"type": "dict-modifiable",
|
||||
"label": "Change parent status if a single task matches",
|
||||
"object_type": {
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "status_task_to_version",
|
||||
"label": "Sync status from Task to Version",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
}, {
|
||||
"type": "dict-modifiable",
|
||||
"key": "mapping",
|
||||
"object_type": {
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "status_version_to_task",
|
||||
"label": "Sync status from Version to Task",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
}, {
|
||||
"type": "dict-modifiable",
|
||||
"key": "mapping",
|
||||
"object_type": {
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "first_version_status",
|
||||
"label": "Set status on first created version",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},{
|
||||
"type": "text",
|
||||
"key": "status",
|
||||
"label": "Status"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "next_task_update",
|
||||
"label": "Update status on next task",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},{
|
||||
"type": "dict-modifiable",
|
||||
"key": "mapping",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
}]
|
||||
"type": "text",
|
||||
"multiline": false
|
||||
}
|
||||
]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsable": true,
|
||||
"key": "publish",
|
||||
"label": "Publish plugins",
|
||||
"is_file": true,
|
||||
"children": [{
|
||||
"key": "push_frame_values_to_task",
|
||||
"label": "Sync Hierarchical and Entity Attributes",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "interest_entity_types",
|
||||
"label": "Entity types of interest",
|
||||
"object_type":
|
||||
{
|
||||
"type": "text",
|
||||
"multiline": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "interest_attributess",
|
||||
"label": "Attributes to sync",
|
||||
"object_type":
|
||||
{
|
||||
"type": "text",
|
||||
"multiline": false
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "thumbnail_updates",
|
||||
"label": "Update Hierarchy thumbnails",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Push thumbnail from version, up through multiple hierarchy levels."
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"key": "levels",
|
||||
"label": "Levels"
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "user_assignment",
|
||||
"label": "Run script on user assignments",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "status_update",
|
||||
"label": "Update status on task action",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"key": "mapping",
|
||||
"type": "dict-modifiable",
|
||||
"object_type":
|
||||
{
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "status_task_to_parent",
|
||||
"label": "Sync status from Task to Parent",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"key": "parent_status_match_all_task_statuses",
|
||||
"type": "dict-modifiable",
|
||||
"label": "Change parent if all tasks match",
|
||||
"object_type":
|
||||
{
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "parent_status_by_task_status",
|
||||
"type": "dict-modifiable",
|
||||
"label": "Change parent status if a single task matches",
|
||||
"object_type":
|
||||
{
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "status_task_to_version",
|
||||
"label": "Sync status from Task to Version",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "dict-modifiable",
|
||||
"key": "mapping",
|
||||
"object_type":
|
||||
{
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "status_version_to_task",
|
||||
"label": "Sync status from Version to Task",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "dict-modifiable",
|
||||
"key": "mapping",
|
||||
"object_type":
|
||||
{
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
}
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "first_version_status",
|
||||
"label": "Set status on first created version",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "status",
|
||||
"label": "Status"
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "next_task_update",
|
||||
"label": "Update status on next task",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "dict-modifiable",
|
||||
"key": "mapping",
|
||||
"object_type":
|
||||
{
|
||||
"type": "text"
|
||||
}
|
||||
}]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsable": true,
|
||||
"key": "publish",
|
||||
"label": "Publish plugins",
|
||||
"is_file": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsable": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "IntegrateFtrackNote",
|
||||
"label": "IntegrateFtrackNote",
|
||||
"is_group": true,
|
||||
"children": [{
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
}, {
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "note_with_intent_template",
|
||||
"label": "Note with intent template"
|
||||
}, {
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"object_type": "text",
|
||||
"key": "note_labels",
|
||||
"label": "Note labels"
|
||||
}]
|
||||
}]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsable": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ValidateFtrackAttributes",
|
||||
"label": "ValidateFtrackAttributes",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "raw-json",
|
||||
"key": "ftrack_custom_attributes",
|
||||
"label": "Custom attributes to validate"
|
||||
}]
|
||||
}
|
||||
|
||||
]
|
||||
}]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,8 +57,8 @@
|
|||
"type": "dict",
|
||||
"collapsable": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ValidateNukeWriteKnobs",
|
||||
"label": "ValidateNukeWriteKnobs",
|
||||
"key": "ValidateKnobs",
|
||||
"label": "ValidateKnobs",
|
||||
"is_group": true,
|
||||
"children": [{
|
||||
"type": "boolean",
|
||||
|
|
|
|||
|
|
@ -344,12 +344,12 @@
|
|||
"label": "Deadline Group"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"type": "number",
|
||||
"key": "deadline_chunk_size",
|
||||
"label": "Deadline Chunk Size"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"type": "number",
|
||||
"key": "deadline_priority",
|
||||
"label": "Deadline Priotity"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
{
|
||||
"type": "dict",
|
||||
"collapsable": true,
|
||||
"key": "Creator",
|
||||
"key": "creator",
|
||||
"label": "Creator",
|
||||
"children": [
|
||||
{
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -123,6 +123,25 @@
|
|||
"label": "Enabled"
|
||||
}]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsable": true,
|
||||
"key": "ValidateAttributes",
|
||||
"label": "ValidateAttributes",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "raw-json",
|
||||
"key": "attributes",
|
||||
"label": "Attributes"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@
|
|||
]
|
||||
}, {
|
||||
"type": "dict",
|
||||
"key": "Deadline",
|
||||
"key": "deadline",
|
||||
"label": "Deadline",
|
||||
"collapsable": true,
|
||||
"checkbox_key": "enabled",
|
||||
|
|
@ -115,7 +115,7 @@
|
|||
}]
|
||||
}, {
|
||||
"type": "dict",
|
||||
"key": "Muster",
|
||||
"key": "muster",
|
||||
"label": "Muster",
|
||||
"collapsable": true,
|
||||
"checkbox_key": "enabled",
|
||||
|
|
@ -126,7 +126,7 @@
|
|||
}, {
|
||||
"type": "text",
|
||||
"key": "MUSTER_REST_URL",
|
||||
"label": "Muster Resl URL"
|
||||
"label": "Muster Rest URL"
|
||||
}, {
|
||||
"type": "dict-modifiable",
|
||||
"object_type": {
|
||||
|
|
|
|||
|
|
@ -1,13 +1,19 @@
|
|||
/* :root {
|
||||
--border-color-: #464b54;
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
QWidget {
|
||||
color: #bfccd6;
|
||||
background-color: #293742;
|
||||
background-color: #282C34;
|
||||
font-size: 12px;
|
||||
border-radius: 0px;
|
||||
}
|
||||
|
||||
QMenu {
|
||||
border: 1px solid #555555;
|
||||
background-color: #1d272f;
|
||||
background-color: #21252B;
|
||||
}
|
||||
|
||||
QMenu::item {
|
||||
|
|
@ -26,24 +32,24 @@ QCheckBox::indicator {}
|
|||
QCheckBox::indicator:focus {}
|
||||
|
||||
QLineEdit, QSpinBox, QDoubleSpinBox, QPlainTextEdit, QTextEdit {
|
||||
border: 1px solid #aaaaaa;
|
||||
border: 1px solid #464b54;
|
||||
border-radius: 3px;
|
||||
background-color: #1d272f;
|
||||
background-color: #21252B;
|
||||
}
|
||||
|
||||
QLineEdit:disabled, QSpinBox:disabled, QDoubleSpinBox:disabled, QPlainTextEdit:disabled, QTextEdit:disabled, QPushButton:disabled {
|
||||
background-color: #4e6474;
|
||||
background-color: #464b54;
|
||||
}
|
||||
|
||||
QLineEdit:focus, QSpinBox:focus, QDoubleSpinBox:focus, QPlainTextEdit:focus, QTextEdit:focus {
|
||||
border: 1px solid #ffffff;
|
||||
border: 1px solid #839caf;
|
||||
}
|
||||
|
||||
QComboBox {
|
||||
border: 1px solid #aaaaaa;
|
||||
border: 1px solid #464b54;
|
||||
border-radius: 3px;
|
||||
padding: 2px 2px 4px 4px;
|
||||
background: #1d272f;
|
||||
background: #21252B;
|
||||
}
|
||||
|
||||
QComboBox QAbstractItemView::item {
|
||||
|
|
@ -56,25 +62,25 @@ QToolButton {
|
|||
|
||||
QLabel {
|
||||
background: transparent;
|
||||
color: #7390a5;
|
||||
color: #969b9e;
|
||||
}
|
||||
QLabel:hover {color: #839caf;}
|
||||
QLabel:hover {color: #b8c1c5;}
|
||||
|
||||
QLabel[state="studio"] {color: #bfccd6;}
|
||||
QLabel[state="studio"] {color: #73C990;}
|
||||
QLabel[state="studio"]:hover {color: #ffffff;}
|
||||
QLabel[state="modified"] {color: #137cbd;}
|
||||
QLabel[state="modified"]:hover {color: #1798e8;}
|
||||
QLabel[state="overriden-modified"] {color: #137cbd;}
|
||||
QLabel[state="overriden-modified"]:hover {color: #1798e8;}
|
||||
QLabel[state="modified"] {color: #189aea;}
|
||||
QLabel[state="modified"]:hover {color: #46b1f3;}
|
||||
QLabel[state="overriden-modified"] {color: #189aea;}
|
||||
QLabel[state="overriden-modified"]:hover {color: #46b1f3;}
|
||||
QLabel[state="overriden"] {color: #ff8c1a;}
|
||||
QLabel[state="overriden"]:hover {color: #ffa64d;}
|
||||
QLabel[state="invalid"] {color: #ad2e2e;}
|
||||
QLabel[state="invalid"]:hover {color: #ad2e2e;}
|
||||
|
||||
|
||||
QWidget[input-state="studio"] {border-color: #bfccd6;}
|
||||
QWidget[input-state="modified"] {border-color: #137cbd;}
|
||||
QWidget[input-state="overriden-modified"] {border-color: #137cbd;}
|
||||
QWidget[input-state="studio"] {border-color: #858a94;}
|
||||
QWidget[input-state="modified"] {border-color: #189aea;}
|
||||
QWidget[input-state="overriden-modified"] {border-color: #189aea;}
|
||||
QWidget[input-state="overriden"] {border-color: #ff8c1a;}
|
||||
QWidget[input-state="invalid"] {border-color: #ad2e2e;}
|
||||
|
||||
|
|
@ -84,7 +90,9 @@ QPushButton {
|
|||
padding: 5px;
|
||||
}
|
||||
QPushButton:hover {
|
||||
background-color: #31424e;
|
||||
background-color: #333840;
|
||||
border: 1px solid #fff;
|
||||
color: #fff;
|
||||
}
|
||||
QPushButton[btn-type="tool-item"] {
|
||||
border: 1px solid #bfccd6;
|
||||
|
|
@ -92,8 +100,8 @@ QPushButton[btn-type="tool-item"] {
|
|||
}
|
||||
|
||||
QPushButton[btn-type="tool-item"]:hover {
|
||||
border-color: #137cbd;
|
||||
color: #137cbd;
|
||||
border-color: #189aea;
|
||||
color: #46b1f3;
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
|
|
@ -103,16 +111,16 @@ QPushButton[btn-type="tool-item-icon"] {
|
|||
}
|
||||
|
||||
QPushButton[btn-type="expand-toggle"] {
|
||||
background: #1d272f;
|
||||
background: #21252B;
|
||||
}
|
||||
|
||||
#GroupWidget {
|
||||
border-bottom: 1px solid #1d272f;
|
||||
border-bottom: 1px solid #21252B;
|
||||
}
|
||||
|
||||
#ProjectListWidget QListView {
|
||||
border: 1px solid #aaaaaa;
|
||||
background: #1d272f;
|
||||
border: 1px solid #464b54;
|
||||
background: #21252B;
|
||||
}
|
||||
#ProjectListWidget QLabel {
|
||||
background: transparent;
|
||||
|
|
@ -123,8 +131,8 @@ QPushButton[btn-type="expand-toggle"] {
|
|||
font-size: 12px;
|
||||
}
|
||||
|
||||
#DictKey[state="studio"] {border-color: #bfccd6;}
|
||||
#DictKey[state="modified"] {border-color: #137cbd;}
|
||||
#DictKey[state="studio"] {border-color: #464b54;}
|
||||
#DictKey[state="modified"] {border-color: #189aea;}
|
||||
#DictKey[state="overriden"] {border-color: #00f;}
|
||||
#DictKey[state="overriden-modified"] {border-color: #0f0;}
|
||||
#DictKey[state="invalid"] {border-color: #ad2e2e;}
|
||||
|
|
@ -141,9 +149,9 @@ QPushButton[btn-type="expand-toggle"] {
|
|||
}
|
||||
|
||||
#SideLineWidget {
|
||||
background-color: #31424e;
|
||||
background-color: #333942;
|
||||
border-style: solid;
|
||||
border-color: #3b4f5e;
|
||||
border-color: #4e5254;
|
||||
border-left-width: 3px;
|
||||
border-bottom-width: 0px;
|
||||
border-right-width: 0px;
|
||||
|
|
@ -151,14 +159,14 @@ QPushButton[btn-type="expand-toggle"] {
|
|||
}
|
||||
|
||||
#SideLineWidget:hover {
|
||||
border-color: #58768d;
|
||||
border-color: #7d8386;
|
||||
}
|
||||
|
||||
#SideLineWidget[state="child-studio"] {border-color: #455c6e;}
|
||||
#SideLineWidget[state="child-studio"]:hover {border-color: #62839d;}
|
||||
#SideLineWidget[state="child-studio"] {border-color: #56a06f;}
|
||||
#SideLineWidget[state="child-studio"]:hover {border-color: #73C990;}
|
||||
|
||||
#SideLineWidget[state="child-modified"] {border-color: #106aa2;}
|
||||
#SideLineWidget[state="child-modified"]:hover {border-color: #137cbd;}
|
||||
#SideLineWidget[state="child-modified"]:hover {border-color: #189aea;}
|
||||
|
||||
#SideLineWidget[state="child-invalid"] {border-color: #ad2e2e;}
|
||||
#SideLineWidget[state="child-invalid"]:hover {border-color: #c93636;}
|
||||
|
|
@ -167,7 +175,7 @@ QPushButton[btn-type="expand-toggle"] {
|
|||
#SideLineWidget[state="child-overriden"]:hover {border-color: #ff8c1a;}
|
||||
|
||||
#SideLineWidget[state="child-overriden-modified"] {border-color: #106aa2;}
|
||||
#SideLineWidget[state="child-overriden-modified"]:hover {border-color: #137cbd;}
|
||||
#SideLineWidget[state="child-overriden-modified"]:hover {border-color: #189aea;}
|
||||
|
||||
#MainWidget {
|
||||
background: #141a1f;
|
||||
|
|
@ -177,12 +185,12 @@ QPushButton[btn-type="expand-toggle"] {
|
|||
background: transparent;
|
||||
}
|
||||
#DictAsWidgetBody[show_borders="1"] {
|
||||
border: 2px solid #cccccc;
|
||||
border: 1px solid #4e5254;
|
||||
border-radius: 5px;
|
||||
}
|
||||
|
||||
#SplitterItem {
|
||||
background-color: #1d272f;
|
||||
background-color: #21252B;
|
||||
}
|
||||
|
||||
QTabWidget::pane {
|
||||
|
|
@ -200,18 +208,18 @@ QTabBar::tab {
|
|||
}
|
||||
|
||||
QTabBar::tab:selected {
|
||||
background: #293742;
|
||||
background: #282C34;
|
||||
border-color: #9B9B9B;
|
||||
border-bottom-color: #C2C7CB;
|
||||
}
|
||||
|
||||
QTabBar::tab:!selected {
|
||||
margin-top: 2px;
|
||||
background: #1d272f;
|
||||
background: #21252B;
|
||||
}
|
||||
|
||||
QTabBar::tab:!selected:hover {
|
||||
background: #3b4f5e;
|
||||
background: #333840;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -231,13 +239,13 @@ QTabBar::tab:only-one {
|
|||
QScrollBar:horizontal {
|
||||
height: 15px;
|
||||
margin: 3px 15px 3px 15px;
|
||||
border: 1px transparent #1d272f;
|
||||
border: 1px transparent #21252B;
|
||||
border-radius: 4px;
|
||||
background-color: #1d272f;
|
||||
background-color: #21252B;
|
||||
}
|
||||
|
||||
QScrollBar::handle:horizontal {
|
||||
background-color: #61839e;
|
||||
background-color: #4B5362;
|
||||
min-width: 5px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
|
@ -285,15 +293,15 @@ QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {
|
|||
}
|
||||
|
||||
QScrollBar:vertical {
|
||||
background-color: #1d272f;
|
||||
background-color: #21252B;
|
||||
width: 15px;
|
||||
margin: 15px 3px 15px 3px;
|
||||
border: 1px transparent #1d272f;
|
||||
border: 1px transparent #21252B;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
QScrollBar::handle:vertical {
|
||||
background-color: #61839e;
|
||||
background-color: #4B5362;
|
||||
min-height: 5px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -412,7 +412,6 @@ class ProjectListWidget(QtWidgets.QWidget):
|
|||
self.setObjectName("ProjectListWidget")
|
||||
|
||||
label_widget = QtWidgets.QLabel("Projects")
|
||||
label_widget.setProperty("state", "studio")
|
||||
project_list = ProjectListView(self)
|
||||
project_list.setModel(QtGui.QStandardItemModel())
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue