[Automated] Merged develop into main

This commit is contained in:
pypebot 2021-07-28 05:37:39 +02:00 committed by GitHub
commit 146ceb3020
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
39 changed files with 1369 additions and 108 deletions

View file

@ -0,0 +1,28 @@
import subprocess
from openpype.lib import PreLaunchHook
class LaunchFoundryAppsWindows(PreLaunchHook):
"""Foundry applications have specific way how to launch them.
Nuke is executed "like" python process so it is required to pass
`CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console.
At the same time the newly created console won't create it's own stdout
and stderr handlers so they should not be redirected to DEVNULL.
"""
# Should be as last hook because must change launch arguments to string
order = 1000
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
platforms = ["windows"]
def execute(self):
# Change `creationflags` to CREATE_NEW_CONSOLE
# - on Windows will nuke create new window using it's console
# Set `stdout` and `stderr` to None so new created console does not
# have redirected output to DEVNULL in build
self.launch_context.kwargs.update({
"creationflags": subprocess.CREATE_NEW_CONSOLE,
"stdout": None,
"stderr": None
})

View file

@ -49,5 +49,7 @@ class NonPythonHostHook(PreLaunchHook):
if remainders:
self.launch_context.launch_args.extend(remainders)
# This must be set otherwise it wouldn't be possible to catch output
# when build OpenPype is used.
self.launch_context.kwargs["stdout"] = subprocess.DEVNULL
self.launch_context.kwargs["stderr"] = subprocess.STDOUT
self.launch_context.kwargs["stderr"] = subprocess.DEVNULL

View file

@ -1,44 +0,0 @@
import os
import subprocess
from openpype.lib import PreLaunchHook
class LaunchWithWindowsShell(PreLaunchHook):
"""Add shell command before executable.
Some hosts have issues when are launched directly from python in that case
it is possible to prepend shell executable which will trigger process
instead.
"""
# Should be as last hook because must change launch arguments to string
order = 1000
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
platforms = ["windows"]
def execute(self):
launch_args = self.launch_context.clear_launch_args(
self.launch_context.launch_args)
new_args = [
# Get comspec which is cmd.exe in most cases.
os.environ.get("COMSPEC", "cmd.exe"),
# NOTE change to "/k" if want to keep console opened
"/c",
# Convert arguments to command line arguments (as string)
"\"{}\"".format(
subprocess.list2cmdline(launch_args)
)
]
# Convert list to string
# WARNING this only works if is used as string
args_string = " ".join(new_args)
self.log.info((
"Modified launch arguments to be launched with shell \"{}\"."
).format(args_string))
# Replace launch args with new one
self.launch_context.launch_args = args_string
# Change `creationflags` to CREATE_NEW_CONSOLE
self.launch_context.kwargs["creationflags"] = (
subprocess.CREATE_NEW_CONSOLE
)

View file

@ -1739,3 +1739,68 @@ def process_workfile_builder():
log.info("Opening last workfile...")
# open workfile
open_file(last_workfile_path)
def recreate_instance(origin_node, avalon_data=None):
"""Recreate input instance to different data
Args:
origin_node (nuke.Node): Nuke node to be recreating from
avalon_data (dict, optional): data to be used in new node avalon_data
Returns:
nuke.Node: newly created node
"""
knobs_wl = ["render", "publish", "review", "ypos",
"use_limit", "first", "last"]
# get data from avalon knobs
data = anlib.get_avalon_knob_data(
origin_node)
# add input data to avalon data
if avalon_data:
data.update(avalon_data)
# capture all node knobs allowed in op_knobs
knobs_data = {k: origin_node[k].value()
for k in origin_node.knobs()
for key in knobs_wl
if key in k}
# get node dependencies
inputs = origin_node.dependencies()
outputs = origin_node.dependent()
# remove the node
nuke.delete(origin_node)
# create new node
# get appropriate plugin class
creator_plugin = None
for Creator in api.discover(api.Creator):
if Creator.__name__ == data["creator"]:
creator_plugin = Creator
break
# create write node with creator
new_node_name = data["subset"]
new_node = creator_plugin(new_node_name, data["asset"]).process()
# white listed knobs to the new node
for _k, _v in knobs_data.items():
try:
print(_k, _v)
new_node[_k].setValue(_v)
except Exception as e:
print(e)
# connect to original inputs
for i, n in enumerate(inputs):
new_node.setInput(i, n)
# connect to outputs
if len(outputs) > 0:
for dn in outputs:
dn.setInput(0, new_node)
return new_node

View file

@ -181,7 +181,8 @@ class CollectInstances(pyblish.api.InstancePlugin):
}
})
for subset, properities in self.subsets.items():
if properities["version"] == 0:
version = properities.get("version")
if version and version == 0:
properities.pop("version")
# adding Review-able instance

View file

@ -0,0 +1,456 @@
import os
import re
import pyblish.api
import json
from avalon.api import format_template_with_optional_keys
from openpype.lib import prepare_template_data
class CollectTextures(pyblish.api.ContextPlugin):
"""Collect workfile (and its resource_files) and textures.
Currently implements use case with Mari and Substance Painter, where
one workfile is main (.mra - Mari) with possible additional workfiles
(.spp - Substance)
Provides:
1 instance per workfile (with 'resources' filled if needed)
(workfile family)
1 instance per group of textures
(textures family)
"""
order = pyblish.api.CollectorOrder
label = "Collect Textures"
hosts = ["standalonepublisher"]
families = ["texture_batch"]
actions = []
# from presets
main_workfile_extensions = ['mra']
other_workfile_extensions = ['spp', 'psd']
texture_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga",
"gif", "svg"]
# additional families (ftrack etc.)
workfile_families = []
textures_families = []
color_space = ["linsRGB", "raw", "acesg"]
# currently implemented placeholders ["color_space"]
# describing patterns in file names splitted by regex groups
input_naming_patterns = {
# workfile: corridorMain_v001.mra >
# texture: corridorMain_aluminiumID_v001_baseColor_linsRGB_1001.exr
"workfile": r'^([^.]+)(_[^_.]*)?_v([0-9]{3,}).+',
"textures": r'^([^_.]+)_([^_.]+)_v([0-9]{3,})_([^_.]+)_({color_space})_(1[0-9]{3}).+', # noqa
}
# matching regex group position to 'input_naming_patterns'
input_naming_groups = {
"workfile": ('asset', 'filler', 'version'),
"textures": ('asset', 'shader', 'version', 'channel', 'color_space',
'udim')
}
workfile_subset_template = "textures{Subset}Workfile"
# implemented keys: ["color_space", "channel", "subset", "shader"]
texture_subset_template = "textures{Subset}_{Shader}_{Channel}"
def process(self, context):
self.context = context
resource_files = {}
workfile_files = {}
representations = {}
version_data = {}
asset_builds = set()
asset = None
for instance in context:
if not self.input_naming_patterns:
raise ValueError("Naming patterns are not configured. \n"
"Ask admin to provide naming conventions "
"for workfiles and textures.")
if not asset:
asset = instance.data["asset"] # selected from SP
parsed_subset = instance.data["subset"].replace(
instance.data["family"], '')
fill_pairs = {
"subset": parsed_subset
}
fill_pairs = prepare_template_data(fill_pairs)
workfile_subset = format_template_with_optional_keys(
fill_pairs, self.workfile_subset_template)
processed_instance = False
for repre in instance.data["representations"]:
ext = repre["ext"].replace('.', '')
asset_build = version = None
if isinstance(repre["files"], list):
repre_file = repre["files"][0]
else:
repre_file = repre["files"]
if ext in self.main_workfile_extensions or \
ext in self.other_workfile_extensions:
asset_build = self._get_asset_build(
repre_file,
self.input_naming_patterns["workfile"],
self.input_naming_groups["workfile"],
self.color_space
)
version = self._get_version(
repre_file,
self.input_naming_patterns["workfile"],
self.input_naming_groups["workfile"],
self.color_space
)
asset_builds.add((asset_build, version,
workfile_subset, 'workfile'))
processed_instance = True
if not representations.get(workfile_subset):
representations[workfile_subset] = []
if ext in self.main_workfile_extensions:
# workfiles can have only single representation
# currently OP is not supporting different extensions in
# representation files
representations[workfile_subset] = [repre]
workfile_files[asset_build] = repre_file
if ext in self.other_workfile_extensions:
# add only if not added already from main
if not representations.get(workfile_subset):
representations[workfile_subset] = [repre]
# only overwrite if not present
if not workfile_files.get(asset_build):
workfile_files[asset_build] = repre_file
if not resource_files.get(workfile_subset):
resource_files[workfile_subset] = []
item = {
"files": [os.path.join(repre["stagingDir"],
repre["files"])],
"source": "standalone publisher"
}
resource_files[workfile_subset].append(item)
if ext in self.texture_extensions:
c_space = self._get_color_space(
repre_file,
self.color_space
)
channel = self._get_channel_name(
repre_file,
self.input_naming_patterns["textures"],
self.input_naming_groups["textures"],
self.color_space
)
shader = self._get_shader_name(
repre_file,
self.input_naming_patterns["textures"],
self.input_naming_groups["textures"],
self.color_space
)
formatting_data = {
"color_space": c_space or '', # None throws exception
"channel": channel or '',
"shader": shader or '',
"subset": parsed_subset or ''
}
fill_pairs = prepare_template_data(formatting_data)
subset = format_template_with_optional_keys(
fill_pairs, self.texture_subset_template)
asset_build = self._get_asset_build(
repre_file,
self.input_naming_patterns["textures"],
self.input_naming_groups["textures"],
self.color_space
)
version = self._get_version(
repre_file,
self.input_naming_patterns["textures"],
self.input_naming_groups["textures"],
self.color_space
)
if not representations.get(subset):
representations[subset] = []
representations[subset].append(repre)
ver_data = {
"color_space": c_space or '',
"channel_name": channel or '',
"shader_name": shader or ''
}
version_data[subset] = ver_data
asset_builds.add(
(asset_build, version, subset, "textures"))
processed_instance = True
if processed_instance:
self.context.remove(instance)
self._create_new_instances(context,
asset,
asset_builds,
resource_files,
representations,
version_data,
workfile_files)
def _create_new_instances(self, context, asset, asset_builds,
resource_files, representations,
version_data, workfile_files):
"""Prepare new instances from collected data.
Args:
context (ContextPlugin)
asset (string): selected asset from SP
asset_builds (set) of tuples
(asset_build, version, subset, family)
resource_files (list) of resource dicts - to store additional
files to main workfile
representations (list) of dicts - to store workfile info OR
all collected texture files, key is asset_build
version_data (dict) - prepared to store into version doc in DB
workfile_files (dict) - to store workfile to add to textures
key is asset_build
"""
# sort workfile first
asset_builds = sorted(asset_builds,
key=lambda tup: tup[3], reverse=True)
# workfile must have version, textures might
main_version = None
for asset_build, version, subset, family in asset_builds:
if not main_version:
main_version = version
new_instance = context.create_instance(subset)
new_instance.data.update(
{
"subset": subset,
"asset": asset,
"label": subset,
"name": subset,
"family": family,
"version": int(version or main_version or 1),
"asset_build": asset_build # remove in validator
}
)
workfile = workfile_files.get(asset_build)
if resource_files.get(subset):
# add resources only when workfile is main style
for ext in self.main_workfile_extensions:
if ext in workfile:
new_instance.data.update({
"resources": resource_files.get(subset)
})
break
# store origin
if family == 'workfile':
families = self.workfile_families
new_instance.data["source"] = "standalone publisher"
else:
families = self.textures_families
repre = representations.get(subset)[0]
new_instance.context.data["currentFile"] = os.path.join(
repre["stagingDir"], workfile or 'dummy.txt')
new_instance.data["families"] = families
# add data for version document
ver_data = version_data.get(subset)
if ver_data:
if workfile:
ver_data['workfile'] = workfile
new_instance.data.update(
{"versionData": ver_data}
)
upd_representations = representations.get(subset)
if upd_representations and family != 'workfile':
upd_representations = self._update_representations(
upd_representations)
new_instance.data["representations"] = upd_representations
self.log.debug("new instance - {}:: {}".format(
family,
json.dumps(new_instance.data, indent=4)))
def _get_asset_build(self, name,
input_naming_patterns, input_naming_groups,
color_spaces):
"""Loops through configured workfile patterns to find asset name.
Asset name used to bind workfile and its textures.
Args:
name (str): workfile name
input_naming_patterns (list):
[workfile_pattern] or [texture_pattern]
input_naming_groups (list)
ordinal position of regex groups matching to input_naming..
color_spaces (list) - predefined color spaces
"""
asset_name = "NOT_AVAIL"
return self._parse(name, input_naming_patterns, input_naming_groups,
color_spaces, 'asset') or asset_name
def _get_version(self, name, input_naming_patterns, input_naming_groups,
color_spaces):
found = self._parse(name, input_naming_patterns, input_naming_groups,
color_spaces, 'version')
if found:
return found.replace('v', '')
self.log.info("No version found in the name {}".format(name))
def _get_udim(self, name, input_naming_patterns, input_naming_groups,
color_spaces):
"""Parses from 'name' udim value."""
found = self._parse(name, input_naming_patterns, input_naming_groups,
color_spaces, 'udim')
if found:
return found
self.log.warning("Didn't find UDIM in {}".format(name))
def _get_color_space(self, name, color_spaces):
"""Looks for color_space from a list in a file name.
Color space seems not to be recognizable by regex pattern, set of
known space spaces must be provided.
"""
color_space = None
found = [cs for cs in color_spaces if
re.search("_{}_".format(cs), name)]
if not found:
self.log.warning("No color space found in {}".format(name))
else:
if len(found) > 1:
msg = "Multiple color spaces found in {}->{}".format(name,
found)
self.log.warning(msg)
color_space = found[0]
return color_space
def _get_shader_name(self, name, input_naming_patterns,
input_naming_groups, color_spaces):
"""Return parsed shader name.
Shader name is needed for overlapping udims (eg. udims might be
used for different materials, shader needed to not overwrite).
Unknown format of channel name and color spaces >> cs are known
list - 'color_space' used as a placeholder
"""
found = self._parse(name, input_naming_patterns, input_naming_groups,
color_spaces, 'shader')
if found:
return found
self.log.warning("Didn't find shader in {}".format(name))
def _get_channel_name(self, name, input_naming_patterns,
input_naming_groups, color_spaces):
"""Return parsed channel name.
Unknown format of channel name and color spaces >> cs are known
list - 'color_space' used as a placeholder
"""
found = self._parse(name, input_naming_patterns, input_naming_groups,
color_spaces, 'channel')
if found:
return found
self.log.warning("Didn't find channel in {}".format(name))
def _parse(self, name, input_naming_patterns, input_naming_groups,
color_spaces, key):
"""Universal way to parse 'name' with configurable regex groups.
Args:
name (str): workfile name
input_naming_patterns (list):
[workfile_pattern] or [texture_pattern]
input_naming_groups (list)
ordinal position of regex groups matching to input_naming..
color_spaces (list) - predefined color spaces
Raises:
ValueError - if broken 'input_naming_groups'
"""
for input_pattern in input_naming_patterns:
for cs in color_spaces:
pattern = input_pattern.replace('{color_space}', cs)
regex_result = re.findall(pattern, name)
if regex_result:
idx = list(input_naming_groups).index(key)
if idx < 0:
msg = "input_naming_groups must " +\
"have '{}' key".format(key)
raise ValueError(msg)
try:
parsed_value = regex_result[0][idx]
return parsed_value
except IndexError:
self.log.warning("Wrong index, probably "
"wrong name {}".format(name))
def _update_representations(self, upd_representations):
"""Frames dont have sense for textures, add collected udims instead."""
udims = []
for repre in upd_representations:
repre.pop("frameStart", None)
repre.pop("frameEnd", None)
repre.pop("fps", None)
# ignore unique name from SP, use extension instead
# SP enforces unique name, here different subsets >> unique repres
repre["name"] = repre["ext"].replace('.', '')
files = repre.get("files", [])
if not isinstance(files, list):
files = [files]
for file_name in files:
udim = self._get_udim(file_name,
self.input_naming_patterns["textures"],
self.input_naming_groups["textures"],
self.color_space)
udims.append(udim)
repre["udim"] = udims # must be this way, used for filling path
return upd_representations

View file

@ -0,0 +1,42 @@
import os
import pyblish.api
class ExtractResources(pyblish.api.InstancePlugin):
"""
Extracts files from instance.data["resources"].
These files are additional (textures etc.), currently not stored in
representations!
Expects collected 'resourcesDir'. (list of dicts with 'files' key and
list of source urls)
Provides filled 'transfers' (list of tuples (source_url, target_url))
"""
label = "Extract Resources SP"
hosts = ["standalonepublisher"]
order = pyblish.api.ExtractorOrder
families = ["workfile"]
def process(self, instance):
if not instance.data.get("resources"):
self.log.info("No resources")
return
if not instance.data.get("transfers"):
instance.data["transfers"] = []
publish_dir = instance.data["resourcesDir"]
transfers = []
for resource in instance.data["resources"]:
for file_url in resource.get("files", []):
file_name = os.path.basename(file_url)
dest_url = os.path.join(publish_dir, file_name)
transfers.append((file_url, dest_url))
self.log.info("transfers:: {}".format(transfers))
instance.data["transfers"].extend(transfers)

View file

@ -0,0 +1,43 @@
import os
import pyblish.api
class ExtractWorkfileUrl(pyblish.api.ContextPlugin):
"""
Modifies 'workfile' field to contain link to published workfile.
Expects that batch contains only single workfile and matching
(multiple) textures.
"""
label = "Extract Workfile Url SP"
hosts = ["standalonepublisher"]
order = pyblish.api.ExtractorOrder
families = ["textures"]
def process(self, context):
filepath = None
# first loop for workfile
for instance in context:
if instance.data["family"] == 'workfile':
anatomy = context.data['anatomy']
template_data = instance.data.get("anatomyData")
rep_name = instance.data.get("representations")[0].get("name")
template_data["representation"] = rep_name
template_data["ext"] = rep_name
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled["publish"]["path"]
filepath = os.path.normpath(template_filled)
self.log.info("Using published scene for render {}".format(
filepath))
if not filepath:
self.log.info("Texture batch doesn't contain workfile.")
return
# then apply to all textures
for instance in context:
if instance.data["family"] == 'textures':
instance.data["versionData"]["workfile"] = filepath

View file

@ -0,0 +1,22 @@
import pyblish.api
import openpype.api
class ValidateTextureBatch(pyblish.api.InstancePlugin):
"""Validates that some texture files are present."""
label = "Validate Texture Presence"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["workfile"]
optional = False
def process(self, instance):
present = False
for instance in instance.context:
if instance.data["family"] == "textures":
self.log.info("Some textures present.")
return
assert present, "No textures found in published batch!"

View file

@ -0,0 +1,20 @@
import pyblish.api
import openpype.api
class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin):
"""Validates that textures have appropriate workfile attached.
Workfile is optional, disable this Validator after Refresh if you are
sure it is not needed.
"""
label = "Validate Texture Has Workfile"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["textures"]
optional = True
def process(self, instance):
wfile = instance.data["versionData"].get("workfile")
assert wfile, "Textures are missing attached workfile"

View file

@ -0,0 +1,50 @@
import pyblish.api
import openpype.api
class ValidateTextureBatchNaming(pyblish.api.InstancePlugin):
"""Validates that all instances had properly formatted name."""
label = "Validate Texture Batch Naming"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["workfile", "textures"]
optional = False
def process(self, instance):
file_name = instance.data["representations"][0]["files"]
if isinstance(file_name, list):
file_name = file_name[0]
msg = "Couldnt find asset name in '{}'\n".format(file_name) + \
"File name doesn't follow configured pattern.\n" + \
"Please rename the file."
assert "NOT_AVAIL" not in instance.data["asset_build"], msg
instance.data.pop("asset_build")
if instance.data["family"] == "textures":
file_name = instance.data["representations"][0]["files"][0]
self._check_proper_collected(instance.data["versionData"],
file_name)
def _check_proper_collected(self, versionData, file_name):
"""
Loop through collected versionData to check if name parsing was OK.
Args:
versionData: (dict)
Returns:
raises AssertionException
"""
missing_key_values = []
for key, value in versionData.items():
if not value:
missing_key_values.append(key)
msg = "Collected data {} doesn't contain values for {}".format(
versionData, missing_key_values) + "\n" + \
"Name of the texture file doesn't match expected pattern.\n" + \
"Please rename file(s) {}".format(file_name)
assert not missing_key_values, msg

View file

@ -0,0 +1,38 @@
import pyblish.api
import openpype.api
class ValidateTextureBatchVersions(pyblish.api.InstancePlugin):
"""Validates that versions match in workfile and textures.
Workfile is optional, so if you are sure, you can disable this
validator after Refresh.
Validates that only single version is published at a time.
"""
label = "Validate Texture Batch Versions"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["textures"]
optional = False
def process(self, instance):
wfile = instance.data["versionData"].get("workfile")
version_str = "v{:03d}".format(instance.data["version"])
if not wfile: # no matching workfile, do not check versions
self.log.info("No workfile present for textures")
return
msg = "Not matching version: texture v{:03d} - workfile {}"
assert version_str in wfile, \
msg.format(
instance.data["version"], wfile
)
present_versions = set()
for instance in instance.context:
present_versions.add(instance.data["version"])
assert len(present_versions) == 1, "Too many versions in a batch!"

View file

@ -0,0 +1,29 @@
import pyblish.api
import openpype.api
class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin):
"""Validates that textures workfile has collected resources (optional).
Collected recourses means secondary workfiles (in most cases).
"""
label = "Validate Texture Workfile Has Resources"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["workfile"]
optional = True
# from presets
main_workfile_extensions = ['mra']
def process(self, instance):
if instance.data["family"] == "workfile":
ext = instance.data["representations"][0]["ext"]
if ext not in self.main_workfile_extensions:
self.log.warning("Only secondary workfile present!")
return
msg = "No secondary workfiles present for workfile {}".\
format(instance.data["name"])
assert instance.data.get("resources"), msg

View file

@ -155,6 +155,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
"sceneMarkInState": mark_in_state == "set",
"sceneMarkOut": int(mark_out_frame),
"sceneMarkOutState": mark_out_state == "set",
"sceneStartFrame": int(lib.execute_george("tv_startframe")),
"sceneBgColor": self._get_bg_color()
}
self.log.debug(

View file

@ -49,6 +49,14 @@ class ExtractSequence(pyblish.api.Extractor):
family_lowered = instance.data["family"].lower()
mark_in = instance.context.data["sceneMarkIn"]
mark_out = instance.context.data["sceneMarkOut"]
# Scene start frame offsets the output files, so we need to offset the
# marks.
scene_start_frame = instance.context.data["sceneStartFrame"]
difference = scene_start_frame - mark_in
mark_in += difference
mark_out += difference
# Frame start/end may be stored as float
frame_start = int(instance.data["frameStart"])
frame_end = int(instance.data["frameEnd"])
@ -98,7 +106,7 @@ class ExtractSequence(pyblish.api.Extractor):
self.log.warning((
"Lowering representation range to {} frames."
" Changed frame end {} -> {}"
).format(output_range + 1, mark_out, new_mark_out))
).format(output_range + 1, mark_out, new_output_frame_end))
output_frame_end = new_output_frame_end
# -------------------------------------------------------------------

View file

@ -0,0 +1,27 @@
import pyblish.api
from avalon.tvpaint import lib
class RepairStartFrame(pyblish.api.Action):
"""Repair start frame."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
lib.execute_george("tv_startframe 0")
class ValidateStartFrame(pyblish.api.ContextPlugin):
"""Validate start frame being at frame 0."""
label = "Validate Start Frame"
order = pyblish.api.ValidatorOrder
hosts = ["tvpaint"]
actions = [RepairStartFrame]
optional = True
def process(self, context):
start_frame = lib.execute_george("tv_startframe")
assert int(start_frame) == 0, "Start frame has to be frame 0."

View file

@ -1,4 +1,5 @@
import os
import sys
import re
import copy
import json
@ -708,6 +709,10 @@ class ApplicationLaunchContext:
)
self.kwargs["creationflags"] = flags
if not sys.stdout:
self.kwargs["stdout"] = subprocess.DEVNULL
self.kwargs["stderr"] = subprocess.DEVNULL
self.prelaunch_hooks = None
self.postlaunch_hooks = None

View file

@ -1,6 +1,8 @@
import json
from avalon.api import AvalonMongoDB
from openpype.api import ProjectSettings
from openpype.lib import create_project
from openpype.modules.ftrack.lib import (
ServerAction,
@ -21,8 +23,24 @@ class PrepareProjectServer(ServerAction):
role_list = ["Pypeclub", "Administrator", "Project Manager"]
# Key to store info about trigerring create folder structure
settings_key = "prepare_project"
item_splitter = {"type": "label", "value": "---"}
_keys_order = (
"fps",
"frameStart",
"frameEnd",
"handleStart",
"handleEnd",
"clipIn",
"clipOut",
"resolutionHeight",
"resolutionWidth",
"pixelAspect",
"applications",
"tools_env",
"library_project",
)
def discover(self, session, entities, event):
"""Show only on project."""
@ -47,13 +65,7 @@ class PrepareProjectServer(ServerAction):
project_entity = entities[0]
project_name = project_entity["full_name"]
try:
project_settings = ProjectSettings(project_name)
except ValueError:
return {
"message": "Project is not synchronized yet",
"success": False
}
project_settings = ProjectSettings(project_name)
project_anatom_settings = project_settings["project_anatomy"]
root_items = self.prepare_root_items(project_anatom_settings)
@ -78,14 +90,13 @@ class PrepareProjectServer(ServerAction):
items.extend(ca_items)
# This item will be last (before enumerators)
# - sets value of auto synchronization
auto_sync_name = "avalon_auto_sync"
# This item will be last before enumerators
# Set value of auto synchronization
auto_sync_value = project_entity["custom_attributes"].get(
CUST_ATTR_AUTO_SYNC, False
)
auto_sync_item = {
"name": auto_sync_name,
"name": CUST_ATTR_AUTO_SYNC,
"type": "boolean",
"value": auto_sync_value,
"label": "AutoSync to Avalon"
@ -199,7 +210,18 @@ class PrepareProjectServer(ServerAction):
str([key for key in attributes_to_set])
))
for key, in_data in attributes_to_set.items():
attribute_keys = set(attributes_to_set.keys())
keys_order = []
for key in self._keys_order:
if key in attribute_keys:
keys_order.append(key)
attribute_keys = attribute_keys - set(keys_order)
for key in sorted(attribute_keys):
keys_order.append(key)
for key in keys_order:
in_data = attributes_to_set[key]
attr = in_data["object"]
# initial item definition
@ -225,7 +247,7 @@ class PrepareProjectServer(ServerAction):
multiselect_enumerators.append(self.item_splitter)
multiselect_enumerators.append({
"type": "label",
"value": in_data["label"]
"value": "<h3>{}</h3>".format(in_data["label"])
})
default = in_data["default"]
@ -286,10 +308,10 @@ class PrepareProjectServer(ServerAction):
return items, multiselect_enumerators
def launch(self, session, entities, event):
if not event['data'].get('values', {}):
in_data = event["data"].get("values")
if not in_data:
return
in_data = event['data']['values']
root_values = {}
root_key = "__root__"
@ -337,7 +359,27 @@ class PrepareProjectServer(ServerAction):
self.log.debug("Setting Custom Attribute values")
project_name = entities[0]["full_name"]
project_entity = entities[0]
project_name = project_entity["full_name"]
# Try to find project document
dbcon = AvalonMongoDB()
dbcon.install()
dbcon.Session["AVALON_PROJECT"] = project_name
project_doc = dbcon.find_one({
"type": "project"
})
# Create project if is not available
# - creation is required to be able set project anatomy and attributes
if not project_doc:
project_code = project_entity["name"]
self.log.info("Creating project \"{} [{}]\"".format(
project_name, project_code
))
create_project(project_name, project_code, dbcon=dbcon)
dbcon.uninstall()
project_settings = ProjectSettings(project_name)
project_anatomy_settings = project_settings["project_anatomy"]
project_anatomy_settings["roots"] = root_data
@ -352,10 +394,12 @@ class PrepareProjectServer(ServerAction):
project_settings.save()
entity = entities[0]
for key, value in custom_attribute_values.items():
entity["custom_attributes"][key] = value
self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value))
# Change custom attributes on project
if custom_attribute_values:
for key, value in custom_attribute_values.items():
project_entity["custom_attributes"][key] = value
self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value))
session.commit()
return True

View file

@ -1259,7 +1259,7 @@ class SyncToAvalonEvent(BaseEvent):
self.process_session,
entity,
hier_attrs,
self.cust_attr_types_by_id
self.cust_attr_types_by_id.values()
)
for key, val in hier_values.items():
output[key] = val

View file

@ -25,6 +25,8 @@ class PrepareProjectLocal(BaseAction):
settings_key = "prepare_project"
# Key to store info about trigerring create folder structure
create_project_structure_key = "create_folder_structure"
create_project_structure_identifier = "create.project.structure"
item_splitter = {"type": "label", "value": "---"}
_keys_order = (
"fps",
@ -90,14 +92,12 @@ class PrepareProjectLocal(BaseAction):
items.extend(ca_items)
# This item will be last (before enumerators)
# - sets value of auto synchronization
auto_sync_name = "avalon_auto_sync"
# Set value of auto synchronization
auto_sync_value = project_entity["custom_attributes"].get(
CUST_ATTR_AUTO_SYNC, False
)
auto_sync_item = {
"name": auto_sync_name,
"name": CUST_ATTR_AUTO_SYNC,
"type": "boolean",
"value": auto_sync_value,
"label": "AutoSync to Avalon"
@ -105,6 +105,27 @@ class PrepareProjectLocal(BaseAction):
# Add autosync attribute
items.append(auto_sync_item)
# This item will be last before enumerators
# Ask if want to trigger Action Create Folder Structure
create_project_structure_checked = (
project_settings
["project_settings"]
["ftrack"]
["user_handlers"]
["prepare_project"]
["create_project_structure_checked"]
).value
items.append({
"type": "label",
"value": "<h3>Want to create basic Folder Structure?</h3>"
})
items.append({
"name": self.create_project_structure_key,
"type": "boolean",
"value": create_project_structure_checked,
"label": "Check if Yes"
})
# Add enumerator items at the end
for item in multiselect_enumerators:
items.append(item)
@ -248,7 +269,7 @@ class PrepareProjectLocal(BaseAction):
multiselect_enumerators.append(self.item_splitter)
multiselect_enumerators.append({
"type": "label",
"value": in_data["label"]
"value": "<h3>{}</h3>".format(in_data["label"])
})
default = in_data["default"]
@ -309,10 +330,13 @@ class PrepareProjectLocal(BaseAction):
return items, multiselect_enumerators
def launch(self, session, entities, event):
if not event['data'].get('values', {}):
in_data = event["data"].get("values")
if not in_data:
return
in_data = event['data']['values']
create_project_structure_checked = in_data.pop(
self.create_project_structure_key
)
root_values = {}
root_key = "__root__"
@ -395,11 +419,18 @@ class PrepareProjectLocal(BaseAction):
project_settings.save()
entity = entities[0]
for key, value in custom_attribute_values.items():
entity["custom_attributes"][key] = value
self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value))
# Change custom attributes on project
if custom_attribute_values:
for key, value in custom_attribute_values.items():
project_entity["custom_attributes"][key] = value
self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value))
session.commit()
# Trigger create project structure action
if create_project_structure_checked:
self.trigger_action(
self.create_project_structure_identifier, event
)
return True

View file

@ -380,7 +380,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
test_dest_files = list()
for i in [1, 2]:
template_data["frame"] = src_padding_exp % i
template_data["representation"] = repre['ext']
if not repre.get("udim"):
template_data["frame"] = src_padding_exp % i
else:
template_data["udim"] = src_padding_exp % i
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
if repre_context is None:
@ -388,7 +393,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
test_dest_files.append(
os.path.normpath(template_filled)
)
template_data["frame"] = repre_context["frame"]
if not repre.get("udim"):
template_data["frame"] = repre_context["frame"]
else:
template_data["udim"] = repre_context["udim"]
self.log.debug(
"test_dest_files: {}".format(str(test_dest_files)))
@ -453,7 +461,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_start_frame = dst_padding
# Store used frame value to template data
template_data["frame"] = dst_start_frame
if repre.get("frame"):
template_data["frame"] = dst_start_frame
dst = "{0}{1}{2}".format(
dst_head,
dst_start_frame,
@ -476,6 +486,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"Given file name is a full path"
)
template_data["representation"] = repre['ext']
# Store used frame value to template data
if repre.get("udim"):
template_data["udim"] = repre["udim"][0]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
@ -488,6 +502,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
repre['published_path'] = dst
self.log.debug("__ dst: {}".format(dst))
if repre.get("udim"):
repre_context["udim"] = repre.get("udim") # store list
repre["publishedFiles"] = published_files
for key in self.db_representation_context_keys:
@ -1045,6 +1062,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
)
)
shutil.copy(file_url, new_name)
os.remove(file_url)
else:
self.log.debug(
"Renaming file {} to {}".format(

View file

@ -92,15 +92,16 @@ class RepairSelectInvalidInstances(pyblish.api.Action):
context_asset = context.data["assetEntity"]["name"]
for instance in instances:
self.set_attribute(instance, context_asset)
if "nuke" in pyblish.api.registered_hosts():
import openpype.hosts.nuke.api as nuke_api
origin_node = instance[0]
nuke_api.lib.recreate_instance(
origin_node, avalon_data={"asset": context_asset}
)
else:
self.set_attribute(instance, context_asset)
def set_attribute(self, instance, context_asset):
if "nuke" in pyblish.api.registered_hosts():
import nuke
nuke.toNode(
instance.data.get("name")
)["avalon:asset"].setValue(context_asset)
if "maya" in pyblish.api.registered_hosts():
from maya import cmds
cmds.setAttr(

View file

@ -17,7 +17,7 @@
},
"publish": {
"folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}",
"file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}>.{ext}",
"file": "{project[code]}_{asset}_{subset}_{@version}<_{output}><.{@frame}><_{udim}>.{ext}",
"path": "{@folder}/{@file}",
"thumbnail": "{thumbnail_root}/{project[name]}/{_id}_{thumbnail_type}.{ext}"
},

View file

@ -136,7 +136,8 @@
"Pypeclub",
"Administrator",
"Project manager"
]
],
"create_project_structure_checked": false
},
"clean_hierarchical_attr": {
"enabled": true,

View file

@ -123,6 +123,16 @@
],
"help": "Process multiple Mov files and publish them for layout and comp."
},
"create_texture_batch": {
"name": "texture_batch",
"label": "Texture Batch",
"family": "texture_batch",
"icon": "image",
"defaults": [
"Main"
],
"help": "Texture files with UDIM together with worfile"
},
"__dynamic_keys_labels__": {
"create_workfile": "Workfile",
"create_model": "Model",
@ -134,10 +144,65 @@
"create_image": "Image",
"create_matchmove": "Matchmove",
"create_render": "Render",
"create_mov_batch": "Batch Mov"
"create_mov_batch": "Batch Mov",
"create_texture_batch": "Batch Texture"
}
},
"publish": {
"CollectTextures": {
"enabled": true,
"active": true,
"main_workfile_extensions": [
"mra"
],
"other_workfile_extensions": [
"spp",
"psd"
],
"texture_extensions": [
"exr",
"dpx",
"jpg",
"jpeg",
"png",
"tiff",
"tga",
"gif",
"svg"
],
"workfile_families": [],
"texture_families": [],
"color_space": [
"linsRGB",
"raw",
"acesg"
],
"input_naming_patterns": {
"workfile": [
"^([^.]+)(_[^_.]*)?_v([0-9]{3,}).+"
],
"textures": [
"^([^_.]+)_([^_.]+)_v([0-9]{3,})_([^_.]+)_({color_space})_(1[0-9]{3}).+"
]
},
"input_naming_groups": {
"workfile": [
"asset",
"filler",
"version"
],
"textures": [
"asset",
"shader",
"version",
"channel",
"color_space",
"udim"
]
},
"workfile_subset_template": "textures{Subset}Workfile",
"texture_subset_template": "textures{Subset}_{Shader}_{Channel}"
},
"ValidateSceneSettings": {
"enabled": true,
"optional": true,

View file

@ -18,6 +18,11 @@
"optional": true,
"active": true
},
"ValidateStartFrame": {
"enabled": false,
"optional": true,
"active": true
},
"ValidateAssetName": {
"enabled": true,
"optional": true,

View file

@ -1,5 +1,6 @@
from .dict_immutable_keys_entity import DictImmutableKeysEntity
from .lib import OverrideState
from .exceptions import EntitySchemaError
class AnatomyEntity(DictImmutableKeysEntity):
@ -23,3 +24,25 @@ class AnatomyEntity(DictImmutableKeysEntity):
if not child_obj.has_project_override:
child_obj.add_to_project_override()
return super(AnatomyEntity, self).on_child_change(child_obj)
def schema_validations(self):
non_group_children = []
for key, child_obj in self.non_gui_children.items():
if not child_obj.is_group:
non_group_children.append(key)
if non_group_children:
_non_group_children = [
"project_anatomy/{}".format(key)
for key in non_group_children
]
reason = (
"Anatomy must have all children as groups."
" Set 'is_group' to `true` on > {}"
).format(", ".join([
'"{}"'.format(item)
for item in _non_group_children
]))
raise EntitySchemaError(self, reason)
return super(AnatomyEntity, self).schema_validations()

View file

@ -577,6 +577,15 @@ How output of the schema could look like on save:
}
```
## Anatomy
Anatomy represents data stored on project document.
### anatomy
- entity works similarly to `dict`
- anatomy has always all keys overriden with overrides
- overrides are not applied as all anatomy data must be available from project document
- all children must be groups
## Proxy wrappers
- should wraps multiple inputs only visually
- these does not have `"key"` key and do not allow to have `"is_file"` or `"is_group"` modifiers enabled

View file

@ -441,6 +441,18 @@
"key": "role_list",
"label": "Roles",
"object_type": "text"
},
{
"type": "separator"
},
{
"type": "label",
"label": "Check \"Create project structure\" by default"
},
{
"type": "boolean",
"key": "create_project_structure_checked",
"label": "Checked"
}
]
},

View file

@ -56,6 +56,119 @@
"key": "publish",
"label": "Publish plugins",
"children": [
{
"type": "dict",
"collapsible": true,
"key": "CollectTextures",
"label": "Collect Textures",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "list",
"key": "main_workfile_extensions",
"object_type": "text",
"label": "Main workfile extensions"
},
{
"key": "other_workfile_extensions",
"label": "Support workfile extensions",
"type": "list",
"object_type": "text"
},
{
"type": "list",
"key": "texture_extensions",
"object_type": "text",
"label": "Texture extensions"
},
{
"type": "list",
"key": "workfile_families",
"object_type": "text",
"label": "Additional families for workfile"
},
{
"type": "list",
"key": "texture_families",
"object_type": "text",
"label": "Additional families for textures"
},
{
"type": "list",
"key": "color_space",
"object_type": "text",
"label": "Color spaces"
},
{
"type": "dict",
"collapsible": false,
"key": "input_naming_patterns",
"label": "Regex patterns for naming conventions",
"children": [
{
"type": "label",
"label": "Add regex groups matching expected name"
},
{
"type": "list",
"object_type": "text",
"key": "workfile",
"label": "Workfile naming pattern"
},
{
"type": "list",
"object_type": "text",
"key": "textures",
"label": "Textures naming pattern"
}
]
},
{
"type": "dict",
"collapsible": false,
"key": "input_naming_groups",
"label": "Group order for regex patterns",
"children": [
{
"type": "label",
"label": "Add names of matched groups in correct order. Available values: ('filler', 'asset', 'shader', 'version', 'channel', 'color_space', 'udim')"
},
{
"type": "list",
"object_type": "text",
"key": "workfile",
"label": "Workfile group positions"
},
{
"type": "list",
"object_type": "text",
"key": "textures",
"label": "Textures group positions"
}
]
},
{
"type": "text",
"key": "workfile_subset_template",
"label": "Subset name template for workfile"
},
{
"type": "text",
"key": "texture_subset_template",
"label": "Subset name template for textures"
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -52,6 +52,17 @@
}
]
},
{
"type": "schema_template",
"name": "template_publish_plugin",
"template_data": [
{
"key": "ValidateStartFrame",
"label": "Validate Scene Start Frame",
"docstring": "Validate first frame of scene is set to '0'."
}
]
},
{
"type": "schema_template",
"name": "template_publish_plugin",

View file

@ -3,6 +3,7 @@
"key": "imageio",
"label": "Color Management and Output Formats",
"is_file": true,
"is_group": true,
"children": [
{
"key": "hiero",
@ -14,7 +15,6 @@
"type": "dict",
"label": "Workfile",
"collapsible": false,
"is_group": true,
"children": [
{
"type": "form",
@ -89,7 +89,6 @@
"type": "dict",
"label": "Colorspace on Inputs by regex detection",
"collapsible": true,
"is_group": true,
"children": [
{
"type": "list",
@ -124,7 +123,6 @@
"type": "dict",
"label": "Viewer",
"collapsible": false,
"is_group": true,
"children": [
{
"type": "text",
@ -138,7 +136,6 @@
"type": "dict",
"label": "Workfile",
"collapsible": false,
"is_group": true,
"children": [
{
"type": "form",
@ -236,7 +233,6 @@
"type": "dict",
"label": "Nodes",
"collapsible": true,
"is_group": true,
"children": [
{
"key": "requiredNodes",
@ -339,7 +335,6 @@
"type": "dict",
"label": "Colorspace on Inputs by regex detection",
"collapsible": true,
"is_group": true,
"children": [
{
"type": "list",

View file

@ -315,14 +315,28 @@ class DuplicatedEnvGroups(Exception):
super(DuplicatedEnvGroups, self).__init__(msg)
def load_openpype_default_settings():
"""Load openpype default settings."""
return load_jsons_from_dir(DEFAULTS_DIR)
def reset_default_settings():
"""Reset cache of default settings. Can't be used now."""
global _DEFAULT_SETTINGS
_DEFAULT_SETTINGS = None
def get_default_settings():
"""Get default settings.
Todo:
Cache loaded defaults.
Returns:
dict: Loaded default settings.
"""
# TODO add cacher
return load_jsons_from_dir(DEFAULTS_DIR)
return load_openpype_default_settings()
# global _DEFAULT_SETTINGS
# if _DEFAULT_SETTINGS is None:
# _DEFAULT_SETTINGS = load_jsons_from_dir(DEFAULTS_DIR)
@ -868,6 +882,25 @@ def get_environments():
return find_environments(get_system_settings(False))
def get_general_environments():
"""Get general environments.
Function is implemented to be able load general environments without using
`get_default_settings`.
"""
# Use only openpype defaults.
# - prevent to use `get_system_settings` where `get_default_settings`
# is used
default_values = load_openpype_default_settings()
studio_overrides = get_studio_system_settings_overrides()
result = apply_overrides(default_values, studio_overrides)
environments = result["general"]["environment"]
clear_metadata_from_settings(environments)
return environments
def clear_metadata_from_settings(values):
"""Remove all metadata keys from loaded settings."""
if isinstance(values, dict):

View file

@ -211,7 +211,8 @@ class DropDataFrame(QtWidgets.QFrame):
folder_path = os.path.dirname(collection.head)
if file_base[-1] in ['.', '_']:
file_base = file_base[:-1]
file_ext = collection.tail
file_ext = os.path.splitext(
collection.format('{head}{padding}{tail}'))[1]
repr_name = file_ext.replace('.', '')
range = collection.format('{ranges}')

@ -1 +1 @@
Subproject commit d8be0bdb37961e32243f1de0eb9696e86acf7443
Subproject commit cfd4191e364b47de7364096f45d9d9d9a901692a

View file

@ -208,14 +208,21 @@ def set_openpype_global_environments() -> None:
"""Set global OpenPype's environments."""
import acre
from openpype.settings import get_environments
try:
from openpype.settings import get_general_environments
all_env = get_environments()
general_env = get_general_environments()
except Exception:
# Backwards compatibility for OpenPype versions where
# `get_general_environments` does not exists yet
from openpype.settings import get_environments
all_env = get_environments()
general_env = all_env["global"]
# TODO Global environments will be stored in "general" settings so loading
# will be modified and can be done in igniter.
env = acre.merge(
acre.parse(all_env["global"]),
acre.parse(general_env),
dict(os.environ)
)
os.environ.clear()

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View file

@ -0,0 +1,98 @@
---
id: settings_project_standalone
title: Project Standalone Publisher Setting
sidebar_label: Standalone Publisher
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
Project settings can have project specific values. Each new project is using studio values defined in **default** project but these values can be modified or overriden per project.
:::warning Default studio values
Projects always use default project values unless they have [project override](../admin_settings#project-overrides) (orage colour). Any changes in default project may affect all existing projects.
:::
## Creator Plugins
Contains list of implemented families to show in middle menu in Standalone Publisher. Each plugin must contain:
- name
- label
- family
- icon
- default subset(s)
- help (additional short information about family)
![example of creator plugin](assets/standalone_creators.png)
## Publish plugins
### Collect Textures
Serves to collect all needed information about workfiles and textures created from those. Allows to publish
main workfile (for example from Mari), additional worfiles (from Substance Painter) and exported textures.
Available configuration:
- Main workfile extension - only single workfile can be "main" one
- Support workfile extensions - additional workfiles will be published to same folder as "main", just under `resourses` subfolder
- Texture extension - what kind of formats are expected for textures
- Additional families for workfile - should any family ('ftrack', 'review') be added to published workfile
- Additional families for textures - should any family ('ftrack', 'review') be added to published textures
#### Naming conventions
Implementation tries to be flexible and cover multiple naming conventions for workfiles and textures.
##### Workfile naming pattern
Provide regex matching pattern containing regex groups used to parse workfile name to learn needed information. (For example
build name.)
Example:
- pattern: ```^([^.]+)(_[^_.]*)?_v([0-9]{3,}).+```
- with groups: ```["asset", "filler", "version"]```
parses `corridorMain_v001` into three groups:
- asset build (`corridorMain`)
- filler (in this case empty)
- version (`001`)
Advanced example (for texture files):
- pattern: ```^([^_.]+)_([^_.]+)_v([0-9]{3,})_([^_.]+)_({color_space})_(1[0-9]{3}).+```
- with groups: ```["asset", "shader", "version", "channel", "color_space", "udim"]```
parses `corridorMain_aluminiumID_v001_baseColor_linsRGB_1001.exr`:
- asset build (`corridorMain`)
- shader (`aluminiumID`)
- version (`001`)
- channel (`baseColor`)
- color_space (`linsRGB`)
- udim (`1001`)
In case of different naming pattern, additional groups could be added or removed. Number of matching groups (`(...)`) must be same as number of items in `Group order for regex patterns`
##### Workfile group positions
For each matching regex group set in previous paragraph, its ordinal position is required (in case of need for addition of new groups etc.)
Number of groups added here must match number of parsing groups from `Workfile naming pattern`.
##### Output names
Output names of published workfiles and textures could be configured separately:
- Subset name template for workfile
- Subset name template for textures (implemented keys: ["color_space", "channel", "subset", "shader"])
### Validate Scene Settings
#### Check Frame Range for Extensions
Configure families, file extension and task to validate that DB setting (frame range) matches currently published values.
### ExtractThumbnailSP
Plugin responsible for generating thumbnails, configure appropriate values for your version o ffmpeg.

View file

@ -65,7 +65,8 @@ module.exports = {
label: "Project Settings",
items: [
"project_settings/settings_project_global",
"project_settings/settings_project_nuke"
"project_settings/settings_project_nuke",
"project_settings/settings_project_standalone"
],
},
],