Merge branch 'develop' into enhancement/OP-5600_Houdini-manage-colorspaces-in-review-ROP

This commit is contained in:
Mustafa Zarkash 2023-08-31 17:12:05 +03:00 committed by GitHub
commit 6191038dbd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
137 changed files with 5628 additions and 951 deletions

View file

@ -35,6 +35,14 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.16.5-nightly.3
- 3.16.5-nightly.2
- 3.16.5-nightly.1
- 3.16.4
- 3.16.4-nightly.3
- 3.16.4-nightly.2
- 3.16.4-nightly.1
- 3.16.3
- 3.16.3-nightly.5
- 3.16.3-nightly.4
- 3.16.3-nightly.3
@ -127,14 +135,6 @@ body:
- 3.14.8
- 3.14.8-nightly.4
- 3.14.8-nightly.3
- 3.14.8-nightly.2
- 3.14.8-nightly.1
- 3.14.7
- 3.14.7-nightly.8
- 3.14.7-nightly.7
- 3.14.7-nightly.6
- 3.14.7-nightly.5
- 3.14.7-nightly.4
validations:
required: true
- type: dropdown

File diff suppressed because it is too large Load diff

View file

@ -62,7 +62,7 @@ development tools like [CMake](https://cmake.org/) and [Visual Studio](https://v
#### Clone repository:
```sh
git clone --recurse-submodules git@github.com:Pypeclub/OpenPype.git
git clone --recurse-submodules git@github.com:ynput/OpenPype.git
```
#### To build OpenPype:
@ -144,6 +144,10 @@ sudo ./tools/docker_build.sh centos7
If all is successful, you'll find built OpenPype in `./build/` folder.
Docker build can be also started from Windows machine, just use `./tools/docker_build.ps1` instead of shell script.
This could be used even for building linux build (with argument `centos7` or `debian`)
#### Manual build
You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled.

View file

@ -1074,7 +1074,7 @@ def convert_update_folder_to_v4(project_name, asset_id, update_data, con):
parent_id = None
tasks = None
new_data = {}
attribs = {}
attribs = full_update_data.pop("attrib", {})
if "type" in update_data:
new_update_data["active"] = update_data["type"] == "asset"
@ -1113,6 +1113,9 @@ def convert_update_folder_to_v4(project_name, asset_id, update_data, con):
print("Folder has new data: {}".format(new_data))
new_update_data["data"] = new_data
if attribs:
new_update_data["attrib"] = attribs
if has_task_changes:
raise ValueError("Task changes of folder are not implemented")
@ -1126,7 +1129,7 @@ def convert_update_subset_to_v4(project_name, subset_id, update_data, con):
full_update_data = _from_flat_dict(update_data)
data = full_update_data.get("data")
new_data = {}
attribs = {}
attribs = full_update_data.pop("attrib", {})
if data:
if "family" in data:
family = data.pop("family")
@ -1148,9 +1151,6 @@ def convert_update_subset_to_v4(project_name, subset_id, update_data, con):
elif value is not REMOVED_VALUE:
new_data[key] = value
if attribs:
new_update_data["attribs"] = attribs
if "name" in update_data:
new_update_data["name"] = update_data["name"]
@ -1165,6 +1165,9 @@ def convert_update_subset_to_v4(project_name, subset_id, update_data, con):
new_update_data["folderId"] = update_data["parent"]
flat_data = _to_flat_dict(new_update_data)
if attribs:
flat_data["attrib"] = attribs
if new_data:
print("Subset has new data: {}".format(new_data))
flat_data["data"] = new_data
@ -1179,7 +1182,7 @@ def convert_update_version_to_v4(project_name, version_id, update_data, con):
full_update_data = _from_flat_dict(update_data)
data = full_update_data.get("data")
new_data = {}
attribs = {}
attribs = full_update_data.pop("attrib", {})
if data:
if "author" in data:
new_update_data["author"] = data.pop("author")
@ -1196,9 +1199,6 @@ def convert_update_version_to_v4(project_name, version_id, update_data, con):
elif value is not REMOVED_VALUE:
new_data[key] = value
if attribs:
new_update_data["attribs"] = attribs
if "name" in update_data:
new_update_data["version"] = update_data["name"]
@ -1213,6 +1213,9 @@ def convert_update_version_to_v4(project_name, version_id, update_data, con):
new_update_data["productId"] = update_data["parent"]
flat_data = _to_flat_dict(new_update_data)
if attribs:
flat_data["attrib"] = attribs
if new_data:
print("Version has new data: {}".format(new_data))
flat_data["data"] = new_data
@ -1252,7 +1255,7 @@ def convert_update_representation_to_v4(
data = full_update_data.get("data")
new_data = {}
attribs = {}
attribs = full_update_data.pop("attrib", {})
if data:
for key, value in data.items():
if key in folder_attributes:
@ -1309,6 +1312,9 @@ def convert_update_representation_to_v4(
new_update_data["files"] = new_files
flat_data = _to_flat_dict(new_update_data)
if attribs:
flat_data["attrib"] = attribs
if new_data:
print("Representation has new data: {}".format(new_data))
flat_data["data"] = new_data

View file

@ -83,10 +83,10 @@ def _get_subsets(
project_name,
subset_ids,
subset_names,
folder_ids,
names_by_folder_ids,
active,
fields
folder_ids=folder_ids,
names_by_folder_ids=names_by_folder_ids,
active=active,
fields=fields,
):
yield convert_v4_subset_to_v3(subset)

View file

@ -1,3 +1,11 @@
"""Cache of thumbnails downloaded from AYON server.
Thumbnails are cached to appdirs to predefined directory.
This should be moved to thumbnails logic in pipeline but because it would
overflow OpenPype logic it's here for now.
"""
import os
import time
import collections
@ -10,7 +18,7 @@ FileInfo = collections.namedtuple(
)
class ThumbnailCache:
class AYONThumbnailCache:
"""Cache of thumbnails on local storage.
Thumbnails are cached to appdirs to predefined directory. Each project has
@ -32,13 +40,14 @@ class ThumbnailCache:
# Lifetime of thumbnails (in seconds)
# - default 3 days
days_alive = 3 * 24 * 60 * 60
days_alive = 3
# Max size of thumbnail directory (in bytes)
# - default 2 Gb
max_filesize = 2 * 1024 * 1024 * 1024
def __init__(self, cleanup=True):
self._thumbnails_dir = None
self._days_alive_secs = self.days_alive * 24 * 60 * 60
if cleanup:
self.cleanup()
@ -50,7 +59,8 @@ class ThumbnailCache:
"""
if self._thumbnails_dir is None:
directory = appdirs.user_data_dir("ayon", "ynput")
# TODO use generic function
directory = appdirs.user_data_dir("AYON", "Ynput")
self._thumbnails_dir = os.path.join(directory, "thumbnails")
return self._thumbnails_dir
@ -121,7 +131,7 @@ class ThumbnailCache:
for filename in filenames:
path = os.path.join(root, filename)
modification_time = os.path.getmtime(path)
if current_time - modification_time > self.days_alive:
if current_time - modification_time > self._days_alive_secs:
os.remove(path)
def _max_size_cleanup(self, thumbnails_dir):

View file

@ -28,7 +28,6 @@ class RenderCreator(Creator):
create_allow_context_change = True
# Settings
default_variants = []
mark_for_review = True
def create(self, subset_name_from_ui, data, pre_create_data):
@ -171,6 +170,10 @@ class RenderCreator(Creator):
)
self.mark_for_review = plugin_settings["mark_for_review"]
self.default_variants = plugin_settings.get(
"default_variants",
plugin_settings.get("defaults") or []
)
def get_detail_description(self):
return """Creator for Render instances

View file

@ -22,9 +22,12 @@ log = logging.getLogger(__name__)
JSON_PREFIX = "JSON:::"
def get_asset_fps():
def get_asset_fps(asset_doc=None):
"""Return current asset fps."""
return get_current_project_asset()["data"].get("fps")
if asset_doc is None:
asset_doc = get_current_project_asset(fields=["data.fps"])
return asset_doc["data"]["fps"]
def set_id(node, unique_id, overwrite=False):
@ -472,14 +475,19 @@ def maintained_selection():
def reset_framerange():
"""Set frame range to current asset"""
"""Set frame range and FPS to current asset"""
# Get asset data
project_name = get_current_project_name()
asset_name = get_current_asset_name()
# Get the asset ID from the database for the asset of current context
asset_doc = get_asset_by_name(project_name, asset_name)
asset_data = asset_doc["data"]
# Get FPS
fps = get_asset_fps(asset_doc)
# Get Start and End Frames
frame_start = asset_data.get("frameStart")
frame_end = asset_data.get("frameEnd")
@ -493,6 +501,9 @@ def reset_framerange():
frame_start -= int(handle_start)
frame_end += int(handle_end)
# Set frame range and FPS
print("Setting scene FPS to {}".format(int(fps)))
set_scene_fps(fps)
hou.playbar.setFrameRange(frame_start, frame_end)
hou.playbar.setPlaybackRange(frame_start, frame_end)
hou.setFrame(frame_start)

View file

@ -25,7 +25,6 @@ from openpype.lib import (
emit_event,
)
from .lib import get_asset_fps
log = logging.getLogger("openpype.hosts.houdini")
@ -385,11 +384,6 @@ def _set_context_settings():
None
"""
# Set new scene fps
fps = get_asset_fps()
print("Setting scene FPS to %i" % fps)
lib.set_scene_fps(fps)
lib.reset_framerange()

View file

@ -33,7 +33,7 @@ class CreateVDBCache(plugin.HoudiniCreator):
}
if self.selected_nodes:
parms["soppath"] = self.selected_nodes[0].path()
parms["soppath"] = self.get_sop_node_path(self.selected_nodes[0])
instance_node.setParms(parms)
@ -42,3 +42,63 @@ class CreateVDBCache(plugin.HoudiniCreator):
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
]
def get_sop_node_path(self, selected_node):
"""Get Sop Path of the selected node.
Although Houdini allows ObjNode path on `sop_path` for the
the ROP node, we prefer it set to the SopNode path explicitly.
"""
# Allow sop level paths (e.g. /obj/geo1/box1)
if isinstance(selected_node, hou.SopNode):
self.log.debug(
"Valid SopNode selection, 'SOP Path' in ROP will"
" be set to '%s'.", selected_node.path()
)
return selected_node.path()
# Allow object level paths to Geometry nodes (e.g. /obj/geo1)
# but do not allow other object level nodes types like cameras, etc.
elif isinstance(selected_node, hou.ObjNode) and \
selected_node.type().name() == "geo":
# Try to find output node.
sop_node = self.get_obj_output(selected_node)
if sop_node:
self.log.debug(
"Valid ObjNode selection, 'SOP Path' in ROP will "
"be set to the child path '%s'.", sop_node.path()
)
return sop_node.path()
self.log.debug(
"Selection isn't valid. 'SOP Path' in ROP will be empty."
)
return ""
def get_obj_output(self, obj_node):
"""Try to find output node.
If any output nodes are present, return the output node with
the minimum 'outputidx'
If no output nodes are present, return the node with display flag
If no nodes are present at all, return None
"""
outputs = obj_node.subnetOutputs()
# if obj_node is empty
if not outputs:
return
# if obj_node has one output child whether its
# sop output node or a node with the render flag
elif len(outputs) == 1:
return outputs[0]
# if there are more than one, then it has multiple output nodes
# return the one with the minimum 'outputidx'
else:
return min(outputs,
key=lambda node: node.evalParm('outputidx'))

View file

@ -2,7 +2,19 @@
<mainMenu>
<menuBar>
<subMenu id="openpype_menu">
<label>OpenPype</label>
<labelExpression><![CDATA[
import os
return os.environ.get("AVALON_LABEL") or "OpenPype"
]]></labelExpression>
<actionItem id="asset_name">
<labelExpression><![CDATA[
from openpype.pipeline import get_current_asset_name, get_current_task_name
label = "{}, {}".format(get_current_asset_name(), get_current_task_name())
return label
]]></labelExpression>
</actionItem>
<separatorItem/>
<scriptItem id="openpype_create">
<label>Create...</label>

View file

@ -22,10 +22,10 @@ from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
get_representation_path,
legacy_io,
)
from openpype.pipeline.load import LoadError
from openpype.client import get_asset_by_name
from openpype.pipeline.create import get_subset_name
from . import lib
from .lib import imprint, read
@ -405,14 +405,21 @@ class RenderlayerCreator(NewCreator, MayaCreatorBase):
# No existing scene instance node for this layer. Note that
# this instance will not have the `instance_node` data yet
# until it's been saved/persisted at least once.
# TODO: Correctly define the subset name using templates
prefix = self.layer_instance_prefix or self.family
subset_name = "{}{}".format(prefix, layer.name())
project_name = self.create_context.get_current_project_name()
instance_data = {
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"],
"asset": self.create_context.get_current_asset_name(),
"task": self.create_context.get_current_task_name(),
"variant": layer.name(),
}
asset_doc = get_asset_by_name(project_name,
instance_data["asset"])
subset_name = self.get_subset_name(
layer.name(),
instance_data["task"],
asset_doc,
project_name)
instance = CreatedInstance(
family=self.family,
subset_name=subset_name,
@ -519,10 +526,75 @@ class RenderlayerCreator(NewCreator, MayaCreatorBase):
if node and cmds.objExists(node):
cmds.delete(node)
def get_subset_name(
self,
variant,
task_name,
asset_doc,
project_name,
host_name=None,
instance=None
):
# creator.family != 'render' as expected
return get_subset_name(self.layer_instance_prefix,
variant,
task_name,
asset_doc,
project_name)
class Loader(LoaderPlugin):
hosts = ["maya"]
def get_custom_namespace_and_group(self, context, options, loader_key):
"""Queries Settings to get custom template for namespace and group.
Group template might be empty >> this forces to not wrap imported items
into separate group.
Args:
context (dict)
options (dict): artist modifiable options from dialog
loader_key (str): key to get separate configuration from Settings
('reference_loader'|'import_loader')
"""
options["attach_to_root"] = True
asset = context['asset']
subset = context['subset']
settings = get_project_settings(context['project']['name'])
custom_naming = settings['maya']['load'][loader_key]
if not custom_naming['namespace']:
raise LoadError("No namespace specified in "
"Maya ReferenceLoader settings")
elif not custom_naming['group_name']:
self.log.debug("No custom group_name, no group will be created.")
options["attach_to_root"] = False
formatting_data = {
"asset_name": asset['name'],
"asset_type": asset['type'],
"folder": {
"name": asset["name"],
},
"subset": subset['name'],
"family": (
subset['data'].get('family') or
subset['data']['families'][0]
)
}
custom_namespace = custom_naming['namespace'].format(
**formatting_data
)
custom_group_name = custom_naming['group_name'].format(
**formatting_data
)
return custom_group_name, custom_namespace, options
class ReferenceLoader(Loader):
"""A basic ReferenceLoader for Maya
@ -565,42 +637,13 @@ class ReferenceLoader(Loader):
path = self.filepath_from_context(context)
assert os.path.exists(path), "%s does not exist." % path
asset = context['asset']
subset = context['subset']
settings = get_project_settings(context['project']['name'])
custom_naming = settings['maya']['load']['reference_loader']
loaded_containers = []
if not custom_naming['namespace']:
raise LoadError("No namespace specified in "
"Maya ReferenceLoader settings")
elif not custom_naming['group_name']:
self.log.debug("No custom group_name, no group will be created.")
options["attach_to_root"] = False
formatting_data = {
"asset_name": asset['name'],
"asset_type": asset['type'],
"folder": {
"name": asset["name"],
},
"subset": subset['name'],
"family": (
subset['data'].get('family') or
subset['data']['families'][0]
)
}
custom_namespace = custom_naming['namespace'].format(
**formatting_data
)
custom_group_name = custom_naming['group_name'].format(
**formatting_data
)
custom_group_name, custom_namespace, options = \
self.get_custom_namespace_and_group(context, options,
"reference_loader")
count = options.get("count") or 1
loaded_containers = []
for c in range(0, count):
namespace = lib.get_custom_namespace(custom_namespace)
group_name = "{}:{}".format(

View file

@ -2,6 +2,8 @@ from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin
from openpype.hosts.maya.api import plugin
from openpype.hosts.maya.api.lib import read
from openpype.client import get_asset_by_name
from maya import cmds
from maya.app.renderSetup.model import renderSetup
@ -135,6 +137,18 @@ class MayaLegacyConvertor(SubsetConvertorPlugin,
# "rendering" family being converted to "renderlayer" family)
original_data["family"] = creator.family
# recreate subset name as without it would be
# `renderingMain` vs correct `renderMain`
project_name = self.create_context.get_current_project_name()
asset_doc = get_asset_by_name(project_name,
original_data["asset"])
subset_name = creator.get_subset_name(
original_data["variant"],
data["task"],
asset_doc,
project_name)
original_data["subset"] = subset_name
# Convert to creator attributes when relevant
creator_attributes = {}
for key in list(original_data.keys()):

View file

@ -33,6 +33,13 @@ class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
suffix="_abc"
)
attach_to_root = options.get("attach_to_root", True)
group_name = options["group_name"]
# no group shall be created
if not attach_to_root:
group_name = namespace
# hero_001 (abc)
# asset_counter{optional}
path = self.filepath_from_context(context)
@ -41,8 +48,8 @@ class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
nodes = cmds.file(file_url,
namespace=namespace,
sharedReferenceFile=False,
groupReference=True,
groupName=options['group_name'],
groupReference=attach_to_root,
groupName=group_name,
reference=True,
returnNewNodes=True)

View file

@ -5,8 +5,9 @@ import qargparse
from openpype.pipeline import load
from openpype.hosts.maya.api.lib import (
maintained_selection,
unique_namespace
get_custom_namespace
)
import openpype.hosts.maya.api.plugin
class SetFrameRangeLoader(load.LoaderPlugin):
@ -83,7 +84,7 @@ class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
animationEndTime=end)
class ImportMayaLoader(load.LoaderPlugin):
class ImportMayaLoader(openpype.hosts.maya.api.plugin.Loader):
"""Import action for Maya (unmanaged)
Warning:
@ -130,13 +131,14 @@ class ImportMayaLoader(load.LoaderPlugin):
if choice is False:
return
asset = context['asset']
custom_group_name, custom_namespace, options = \
self.get_custom_namespace_and_group(context, data,
"import_loader")
namespace = namespace or unique_namespace(
asset["name"] + "_",
prefix="_" if asset["name"][0].isdigit() else "",
suffix="_",
)
namespace = get_custom_namespace(custom_namespace)
if not options.get("attach_to_root", True):
custom_group_name = namespace
path = self.filepath_from_context(context)
with maintained_selection():
@ -145,8 +147,9 @@ class ImportMayaLoader(load.LoaderPlugin):
preserveReferences=True,
namespace=namespace,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
groupReference=options.get("attach_to_root",
True),
groupName=custom_group_name)
if data.get("clean_import", False):
remove_attributes = ["cbId"]

View file

@ -9,8 +9,7 @@ from openpype.hosts.maya.api.lib import (
maintained_selection,
get_container_members,
parent_nodes,
create_rig_animation_instance,
get_reference_node
create_rig_animation_instance
)

View file

@ -19,8 +19,15 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
def process_reference(
self, context, name=None, namespace=None, options=None
):
group_name = options['group_name']
path = self.filepath_from_context(context)
attach_to_root = options.get("attach_to_root", True)
group_name = options["group_name"]
# no group shall be created
if not attach_to_root:
group_name = namespace
with lib.maintained_selection():
file_url = self.prepare_root_value(
path, context["project"]["name"]
@ -30,7 +37,7 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupReference=attach_to_root,
groupName=group_name
)

View file

@ -10,7 +10,6 @@ class CollectCurrentFile(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder - 0.4
label = "Maya Current File"
hosts = ['maya']
families = ["workfile"]
def process(self, context):
"""Inject the current working file"""

View file

@ -304,9 +304,9 @@ class CollectMayaRender(pyblish.api.InstancePlugin):
if self.sync_workfile_version:
data["version"] = context.data["version"]
for instance in context:
if instance.data['family'] == "workfile":
instance.data["version"] = context.data["version"]
for _instance in context:
if _instance.data['family'] == "workfile":
_instance.data["version"] = context.data["version"]
# Define nice label
label = "{0} ({1})".format(layer_name, instance.data["asset"])

View file

@ -2076,9 +2076,16 @@ class WorkfileSettings(object):
str(workfile_settings["OCIO_config"]))
else:
# set values to root
# OCIO config path is defined from prelaunch hook
self._root_node["colorManagement"].setValue("OCIO")
# print previous settings in case some were found in workfile
residual_path = self._root_node["customOCIOConfigPath"].value()
if residual_path:
log.info("Residual OCIO config path found: `{}`".format(
residual_path
))
# we dont need the key anymore
workfile_settings.pop("customOCIOConfigPath", None)
workfile_settings.pop("colorManagement", None)
@ -2100,9 +2107,35 @@ class WorkfileSettings(object):
# set ocio config path
if config_data:
current_ocio_path = os.getenv("OCIO")
if current_ocio_path != config_data["path"]:
message = """
log.info("OCIO config path found: `{}`".format(
config_data["path"]))
# check if there's a mismatch between environment and settings
correct_settings = self._is_settings_matching_environment(
config_data)
# if there's no mismatch between environment and settings
if correct_settings:
self._set_ocio_config_path_to_workfile(config_data)
def _is_settings_matching_environment(self, config_data):
""" Check if OCIO config path is different from environment
Args:
config_data (dict): OCIO config data from settings
Returns:
bool: True if settings are matching environment, False otherwise
"""
current_ocio_path = os.environ["OCIO"]
settings_ocio_path = config_data["path"]
# normalize all paths to forward slashes
current_ocio_path = current_ocio_path.replace("\\", "/")
settings_ocio_path = settings_ocio_path.replace("\\", "/")
if current_ocio_path != settings_ocio_path:
message = """
It seems like there's a mismatch between the OCIO config path set in your Nuke
settings and the actual path set in your OCIO environment.
@ -2120,12 +2153,118 @@ Please note the paths for your reference:
Reopening Nuke should synchronize these paths and resolve any discrepancies.
"""
nuke.message(
message.format(
env_path=current_ocio_path,
settings_path=config_data["path"]
)
nuke.message(
message.format(
env_path=current_ocio_path,
settings_path=settings_ocio_path
)
)
return False
return True
def _set_ocio_config_path_to_workfile(self, config_data):
""" Set OCIO config path to workfile
Path set into nuke workfile. It is trying to replace path with
environment variable if possible. If not, it will set it as it is.
It also saves the script to apply the change, but only if it's not
empty Untitled script.
Args:
config_data (dict): OCIO config data from settings
"""
# replace path with env var if possible
ocio_path = self._replace_ocio_path_with_env_var(config_data)
log.info("Setting OCIO config path to: `{}`".format(
ocio_path))
self._root_node["customOCIOConfigPath"].setValue(
ocio_path
)
self._root_node["OCIO_config"].setValue("custom")
# only save script if it's not empty
if self._root_node["name"].value() != "":
log.info("Saving script to apply OCIO config path change.")
nuke.scriptSave()
def _get_included_vars(self, config_template):
""" Get all environment variables included in template
Args:
config_template (str): OCIO config template from settings
Returns:
list: list of environment variables included in template
"""
# resolve all environments for whitelist variables
included_vars = [
"BUILTIN_OCIO_ROOT",
]
# include all project root related env vars
for env_var in os.environ:
if env_var.startswith("OPENPYPE_PROJECT_ROOT_"):
included_vars.append(env_var)
# use regex to find env var in template with format {ENV_VAR}
# this way we make sure only template used env vars are included
env_var_regex = r"\{([A-Z0-9_]+)\}"
env_var = re.findall(env_var_regex, config_template)
if env_var:
included_vars.append(env_var[0])
return included_vars
def _replace_ocio_path_with_env_var(self, config_data):
""" Replace OCIO config path with environment variable
Environment variable is added as TCL expression to path. TCL expression
is also replacing backward slashes found in path for windows
formatted values.
Args:
config_data (str): OCIO config dict from settings
Returns:
str: OCIO config path with environment variable TCL expression
"""
config_path = config_data["path"]
config_template = config_data["template"]
included_vars = self._get_included_vars(config_template)
# make sure we return original path if no env var is included
new_path = config_path
for env_var in included_vars:
env_path = os.getenv(env_var)
if not env_path:
continue
# it has to be directory current process can see
if not os.path.isdir(env_path):
continue
# make sure paths are in same format
env_path = env_path.replace("\\", "/")
path = config_path.replace("\\", "/")
# check if env_path is in path and replace to first found positive
if env_path in path:
# with regsub we make sure path format of slashes is correct
resub_expr = (
"[regsub -all {{\\\\}} [getenv {}] \"/\"]").format(env_var)
new_path = path.replace(
env_path, resub_expr
)
break
return new_path
def set_writes_colorspace(self):
''' Adds correct colorspace to write node dict
@ -2239,7 +2378,7 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
knobs["to"]))
def set_colorspace(self):
''' Setting colorpace following presets
''' Setting colorspace following presets
'''
# get imageio
nuke_colorspace = get_nuke_imageio_settings()
@ -2247,17 +2386,16 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
log.info("Setting colorspace to workfile...")
try:
self.set_root_colorspace(nuke_colorspace)
except AttributeError:
msg = "set_colorspace(): missing `workfile` settings in template"
except AttributeError as _error:
msg = "Set Colorspace to workfile error: {}".format(_error)
nuke.message(msg)
log.info("Setting colorspace to viewers...")
try:
self.set_viewers_colorspace(nuke_colorspace["viewer"])
except AttributeError:
msg = "set_colorspace(): missing `viewer` settings in template"
except AttributeError as _error:
msg = "Set Colorspace to viewer error: {}".format(_error)
nuke.message(msg)
log.error(msg)
log.info("Setting colorspace to write nodes...")
try:

View file

@ -543,6 +543,9 @@ def list_instances(creator_id=None):
For SubsetManager
Args:
creator_id (Optional[str]): creator identifier
Returns:
(list) of dictionaries matching instances format
"""
@ -575,10 +578,13 @@ def list_instances(creator_id=None):
if creator_id and instance_data["creator_identifier"] != creator_id:
continue
if instance_data["instance_id"] in instance_ids:
instance_id = instance_data.get("instance_id")
if not instance_id:
pass
elif instance_id in instance_ids:
instance_data.pop("instance_id")
else:
instance_ids.add(instance_data["instance_id"])
instance_ids.add(instance_id)
# node name could change, so update subset name data
_update_subset_name_data(instance_data, node)

View file

@ -327,6 +327,7 @@ class NukeWriteCreator(NukeCreator):
"frames": "Use existing frames"
}
if ("farm_rendering" in self.instance_attributes):
rendering_targets["frames_farm"] = "Use existing frames - farm"
rendering_targets["farm"] = "Farm rendering"
return EnumDef(

View file

@ -114,6 +114,11 @@ class NukePlaceholderPlugin(PlaceholderPlugin):
placeholder_data[key] = value
return placeholder_data
def delete_placeholder(self, placeholder):
"""Remove placeholder if building was successful"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
nuke.delete(placeholder_node)
class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
identifier = "nuke.load"
@ -276,14 +281,6 @@ class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
placeholder.data["nb_children"] += 1
reset_selection()
# remove placeholders marked as delete
if (
placeholder.data.get("delete")
and not placeholder.data.get("keep_placeholder")
):
self.log.debug("Deleting node: {}".format(placeholder_node.name()))
nuke.delete(placeholder_node)
# go back to root group
nuke.root().begin()
@ -690,14 +687,6 @@ class NukePlaceholderCreatePlugin(
placeholder.data["nb_children"] += 1
reset_selection()
# remove placeholders marked as delete
if (
placeholder.data.get("delete")
and not placeholder.data.get("keep_placeholder")
):
self.log.debug("Deleting node: {}".format(placeholder_node.name()))
nuke.delete(placeholder_node)
# go back to root group
nuke.root().begin()

View file

@ -96,7 +96,8 @@ class LoadImage(load.LoaderPlugin):
file = file.replace("\\", "/")
repr_cont = context["representation"]["context"]
representation = context["representation"]
repr_cont = representation["context"]
frame = repr_cont.get("frame")
if frame:
padding = len(frame)
@ -104,16 +105,7 @@ class LoadImage(load.LoaderPlugin):
frame,
format(frame_number, "0{}".format(padding)))
name_data = {
"asset": repr_cont["asset"],
"subset": repr_cont["subset"],
"representation": context["representation"]["name"],
"ext": repr_cont["representation"],
"id": context["representation"]["_id"],
"class_name": self.__class__.__name__
}
read_name = self.node_name_template.format(**name_data)
read_name = self._get_node_name(representation)
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
@ -212,6 +204,8 @@ class LoadImage(load.LoaderPlugin):
last = first = int(frame_number)
# Set the global in to the start frame of the sequence
read_name = self._get_node_name(representation)
node["name"].setValue(read_name)
node["file"].setValue(file)
node["origfirst"].setValue(first)
node["first"].setValue(first)
@ -250,3 +244,17 @@ class LoadImage(load.LoaderPlugin):
with viewer_update_and_undo_stop():
nuke.delete(node)
def _get_node_name(self, representation):
repre_cont = representation["context"]
name_data = {
"asset": repre_cont["asset"],
"subset": repre_cont["subset"],
"representation": representation["name"],
"ext": repre_cont["representation"],
"id": representation["_id"],
"class_name": self.__class__.__name__
}
return self.node_name_template.format(**name_data)

View file

@ -2,11 +2,13 @@ import nuke
import pyblish.api
class CollectInstanceData(pyblish.api.InstancePlugin):
"""Collect all nodes with Avalon knob."""
class CollectNukeInstanceData(pyblish.api.InstancePlugin):
"""Collect Nuke instance data
"""
order = pyblish.api.CollectorOrder - 0.49
label = "Collect Instance Data"
label = "Collect Nuke Instance Data"
hosts = ["nuke", "nukeassist"]
# presets
@ -40,5 +42,14 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
"pixelAspect": pixel_aspect
})
# add creator attributes to instance
creator_attributes = instance.data["creator_attributes"]
instance.data.update(creator_attributes)
# add review family if review activated on instance
if instance.data.get("review"):
instance.data["families"].append("review")
self.log.debug("Collected instance: {}".format(
instance.data))

View file

@ -5,7 +5,7 @@ import nuke
class CollectSlate(pyblish.api.InstancePlugin):
"""Check if SLATE node is in scene and connected to rendering tree"""
order = pyblish.api.CollectorOrder + 0.09
order = pyblish.api.CollectorOrder + 0.002
label = "Collect Slate Node"
hosts = ["nuke"]
families = ["render"]
@ -13,10 +13,14 @@ class CollectSlate(pyblish.api.InstancePlugin):
def process(self, instance):
node = instance.data["transientData"]["node"]
slate = next((n for n in nuke.allNodes()
if "slate" in n.name().lower()
if not n["disable"].getValue()),
None)
slate = next(
(
n_ for n_ in nuke.allNodes()
if "slate" in n_.name().lower()
if not n_["disable"].getValue()
),
None
)
if slate:
# check if slate node is connected to write node tree

View file

@ -1,5 +1,4 @@
import os
from pprint import pformat
import nuke
import pyblish.api
from openpype.hosts.nuke import api as napi
@ -15,30 +14,16 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
hosts = ["nuke", "nukeassist"]
families = ["render", "prerender", "image"]
# cache
_write_nodes = {}
_frame_ranges = {}
def process(self, instance):
self.log.debug(pformat(instance.data))
creator_attributes = instance.data["creator_attributes"]
instance.data.update(creator_attributes)
group_node = instance.data["transientData"]["node"]
render_target = instance.data["render_target"]
family = instance.data["family"]
families = instance.data["families"]
# add targeted family to families
instance.data["families"].append(
"{}.{}".format(family, render_target)
)
if instance.data.get("review"):
instance.data["families"].append("review")
child_nodes = napi.get_instance_group_node_childs(instance)
instance.data["transientData"]["childNodes"] = child_nodes
write_node = None
for x in child_nodes:
if x.Class() == "Write":
write_node = x
write_node = self._write_node_helper(instance)
if write_node is None:
self.log.warning(
@ -48,113 +33,134 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
)
return
instance.data["writeNode"] = write_node
self.log.debug("checking instance: {}".format(instance))
# get colorspace and add to version data
colorspace = napi.get_colorspace_from_node(write_node)
# Determine defined file type
ext = write_node["file_type"].value()
if render_target == "frames":
self._set_existing_files_data(instance, colorspace)
# Get frame range
handle_start = instance.context.data["handleStart"]
handle_end = instance.context.data["handleEnd"]
first_frame = int(nuke.root()["first_frame"].getValue())
last_frame = int(nuke.root()["last_frame"].getValue())
frame_length = int(last_frame - first_frame + 1)
elif render_target == "frames_farm":
collected_frames = self._set_existing_files_data(
instance, colorspace)
if write_node["use_limit"].getValue():
first_frame = int(write_node["first"].getValue())
last_frame = int(write_node["last"].getValue())
self._set_expected_files(instance, collected_frames)
self._add_farm_instance_data(instance)
elif render_target == "farm":
self._add_farm_instance_data(instance)
# set additional instance data
self._set_additional_instance_data(instance, render_target, colorspace)
def _set_existing_files_data(self, instance, colorspace):
"""Set existing files data to instance data.
Args:
instance (pyblish.api.Instance): pyblish instance
colorspace (str): colorspace
Returns:
list: collected frames
"""
collected_frames = self._get_collected_frames(instance)
representation = self._get_existing_frames_representation(
instance, collected_frames
)
# inject colorspace data
self.set_representation_colorspace(
representation, instance.context,
colorspace=colorspace
)
instance.data["representations"].append(representation)
return collected_frames
def _set_expected_files(self, instance, collected_frames):
"""Set expected files to instance data.
Args:
instance (pyblish.api.Instance): pyblish instance
collected_frames (list): collected frames
"""
write_node = self._write_node_helper(instance)
write_file_path = nuke.filename(write_node)
output_dir = os.path.dirname(write_file_path)
# get colorspace and add to version data
colorspace = napi.get_colorspace_from_node(write_node)
instance.data["expectedFiles"] = [
os.path.join(output_dir, source_file)
for source_file in collected_frames
]
self.log.debug('output dir: {}'.format(output_dir))
def _get_frame_range_data(self, instance):
"""Get frame range data from instance.
if render_target == "frames":
representation = {
'name': ext,
'ext': ext,
"stagingDir": output_dir,
"tags": []
}
Args:
instance (pyblish.api.Instance): pyblish instance
# get file path knob
node_file_knob = write_node["file"]
# list file paths based on input frames
expected_paths = list(sorted({
node_file_knob.evaluate(frame)
for frame in range(first_frame, last_frame + 1)
}))
Returns:
tuple: first_frame, last_frame
"""
# convert only to base names
expected_filenames = [
os.path.basename(filepath)
for filepath in expected_paths
]
instance_name = instance.data["name"]
# make sure files are existing at folder
collected_frames = [
filename
for filename in os.listdir(output_dir)
if filename in expected_filenames
]
if self._frame_ranges.get(instance_name):
# return cashed write node
return self._frame_ranges[instance_name]
if collected_frames:
collected_frames_len = len(collected_frames)
frame_start_str = "%0{}d".format(
len(str(last_frame))) % first_frame
representation['frameStart'] = frame_start_str
write_node = self._write_node_helper(instance)
# in case slate is expected and not yet rendered
self.log.debug("_ frame_length: {}".format(frame_length))
self.log.debug("_ collected_frames_len: {}".format(
collected_frames_len))
# Get frame range from workfile
first_frame = int(nuke.root()["first_frame"].getValue())
last_frame = int(nuke.root()["last_frame"].getValue())
# this will only run if slate frame is not already
# rendered from previews publishes
if (
"slate" in families
and frame_length == collected_frames_len
and family == "render"
):
frame_slate_str = (
"{{:0{}d}}".format(len(str(last_frame)))
).format(first_frame - 1)
# Get frame range from write node if activated
if write_node["use_limit"].getValue():
first_frame = int(write_node["first"].getValue())
last_frame = int(write_node["last"].getValue())
slate_frame = collected_frames[0].replace(
frame_start_str, frame_slate_str)
collected_frames.insert(0, slate_frame)
# add to cache
self._frame_ranges[instance_name] = (first_frame, last_frame)
if collected_frames_len == 1:
representation['files'] = collected_frames.pop()
else:
representation['files'] = collected_frames
return first_frame, last_frame
# inject colorspace data
self.set_representation_colorspace(
representation, instance.context,
colorspace=colorspace
)
def _set_additional_instance_data(
self, instance, render_target, colorspace
):
"""Set additional instance data.
instance.data["representations"].append(representation)
self.log.info("Publishing rendered frames ...")
Args:
instance (pyblish.api.Instance): pyblish instance
render_target (str): render target
colorspace (str): colorspace
"""
family = instance.data["family"]
elif render_target == "farm":
farm_keys = ["farm_chunk", "farm_priority", "farm_concurrency"]
for key in farm_keys:
# Skip if key is not in creator attributes
if key not in creator_attributes:
continue
# Add farm attributes to instance
instance.data[key] = creator_attributes[key]
# add targeted family to families
instance.data["families"].append(
"{}.{}".format(family, render_target)
)
self.log.debug("Appending render target to families: {}.{}".format(
family, render_target)
)
# Farm rendering
instance.data["transfer"] = False
instance.data["farm"] = True
self.log.info("Farm rendering ON ...")
write_node = self._write_node_helper(instance)
# Determine defined file type
ext = write_node["file_type"].value()
# get frame range data
handle_start = instance.context.data["handleStart"]
handle_end = instance.context.data["handleEnd"]
first_frame, last_frame = self._get_frame_range_data(instance)
# get output paths
write_file_path = nuke.filename(write_node)
output_dir = os.path.dirname(write_file_path)
# TODO: remove this when we have proper colorspace support
version_data = {
@ -188,10 +194,6 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
"frameEndHandle": last_frame,
})
# make sure rendered sequence on farm will
# be used for extract review
if not instance.data.get("review"):
instance.data["useSequenceForReview"] = False
# TODO temporarily set stagingDir as persistent for backward
# compatibility. This is mainly focused on `renders`folders which
@ -199,4 +201,201 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
# this logic should be removed and replaced with custom staging dir
instance.data["stagingDir_persistent"] = True
self.log.debug("instance.data: {}".format(pformat(instance.data)))
def _write_node_helper(self, instance):
"""Helper function to get write node from instance.
Also sets instance transient data with child nodes.
Args:
instance (pyblish.api.Instance): pyblish instance
Returns:
nuke.Node: write node
"""
instance_name = instance.data["name"]
if self._write_nodes.get(instance_name):
# return cashed write node
return self._write_nodes[instance_name]
# get all child nodes from group node
child_nodes = napi.get_instance_group_node_childs(instance)
# set child nodes to instance transient data
instance.data["transientData"]["childNodes"] = child_nodes
write_node = None
for node_ in child_nodes:
if node_.Class() == "Write":
write_node = node_
if write_node:
# for slate frame extraction
instance.data["transientData"]["writeNode"] = write_node
# add to cache
self._write_nodes[instance_name] = write_node
return self._write_nodes[instance_name]
def _get_existing_frames_representation(
self,
instance,
collected_frames
):
"""Get existing frames representation.
Args:
instance (pyblish.api.Instance): pyblish instance
collected_frames (list): collected frames
Returns:
dict: representation
"""
first_frame, last_frame = self._get_frame_range_data(instance)
write_node = self._write_node_helper(instance)
write_file_path = nuke.filename(write_node)
output_dir = os.path.dirname(write_file_path)
# Determine defined file type
ext = write_node["file_type"].value()
representation = {
"name": ext,
"ext": ext,
"stagingDir": output_dir,
"tags": []
}
frame_start_str = self._get_frame_start_str(first_frame, last_frame)
representation['frameStart'] = frame_start_str
# set slate frame
collected_frames = self._add_slate_frame_to_collected_frames(
instance,
collected_frames,
first_frame,
last_frame
)
if len(collected_frames) == 1:
representation['files'] = collected_frames.pop()
else:
representation['files'] = collected_frames
return representation
def _get_frame_start_str(self, first_frame, last_frame):
"""Get frame start string.
Args:
first_frame (int): first frame
last_frame (int): last frame
Returns:
str: frame start string
"""
# convert first frame to string with padding
return (
"{{:0{}d}}".format(len(str(last_frame)))
).format(first_frame)
def _add_slate_frame_to_collected_frames(
self,
instance,
collected_frames,
first_frame,
last_frame
):
"""Add slate frame to collected frames.
Args:
instance (pyblish.api.Instance): pyblish instance
collected_frames (list): collected frames
first_frame (int): first frame
last_frame (int): last frame
Returns:
list: collected frames
"""
frame_start_str = self._get_frame_start_str(first_frame, last_frame)
frame_length = int(last_frame - first_frame + 1)
# this will only run if slate frame is not already
# rendered from previews publishes
if (
"slate" in instance.data["families"]
and frame_length == len(collected_frames)
):
frame_slate_str = self._get_frame_start_str(
first_frame - 1,
last_frame
)
slate_frame = collected_frames[0].replace(
frame_start_str, frame_slate_str)
collected_frames.insert(0, slate_frame)
return collected_frames
def _add_farm_instance_data(self, instance):
"""Add farm publishing related instance data.
Args:
instance (pyblish.api.Instance): pyblish instance
"""
# make sure rendered sequence on farm will
# be used for extract review
if not instance.data.get("review"):
instance.data["useSequenceForReview"] = False
# Farm rendering
instance.data.update({
"transfer": False,
"farm": True # to skip integrate
})
self.log.info("Farm rendering ON ...")
def _get_collected_frames(self, instance):
"""Get collected frames.
Args:
instance (pyblish.api.Instance): pyblish instance
Returns:
list: collected frames
"""
first_frame, last_frame = self._get_frame_range_data(instance)
write_node = self._write_node_helper(instance)
write_file_path = nuke.filename(write_node)
output_dir = os.path.dirname(write_file_path)
# get file path knob
node_file_knob = write_node["file"]
# list file paths based on input frames
expected_paths = list(sorted({
node_file_knob.evaluate(frame)
for frame in range(first_frame, last_frame + 1)
}))
# convert only to base names
expected_filenames = {
os.path.basename(filepath)
for filepath in expected_paths
}
# make sure files are existing at folder
collected_frames = [
filename
for filename in os.listdir(output_dir)
if filename in expected_filenames
]
return collected_frames

View file

@ -11,9 +11,9 @@ from openpype.hosts.nuke.api.lib import maintained_selection
class ExtractCamera(publish.Extractor):
""" 3D camera exctractor
""" 3D camera extractor
"""
label = 'Exctract Camera'
label = 'Extract Camera'
order = pyblish.api.ExtractorOrder
families = ["camera"]
hosts = ["nuke"]

View file

@ -11,9 +11,9 @@ from openpype.hosts.nuke.api.lib import (
class ExtractModel(publish.Extractor):
""" 3D model exctractor
""" 3D model extractor
"""
label = 'Exctract Model'
label = 'Extract Model'
order = pyblish.api.ExtractorOrder
families = ["model"]
hosts = ["nuke"]

View file

@ -249,7 +249,7 @@ class ExtractSlateFrame(publish.Extractor):
# Add file to representation files
# - get write node
write_node = instance.data["writeNode"]
write_node = instance.data["transientData"]["writeNode"]
# - evaluate filepaths for first frame and slate frame
first_filename = os.path.basename(
write_node["file"].evaluate(first_frame))

View file

@ -54,6 +54,7 @@ class ExtractThumbnail(publish.Extractor):
def render_thumbnail(self, instance, output_name=None, **kwargs):
first_frame = instance.data["frameStartHandle"]
last_frame = instance.data["frameEndHandle"]
colorspace = instance.data["colorspace"]
# find frame range and define middle thumb frame
mid_frame = int((last_frame - first_frame) / 2)
@ -112,8 +113,8 @@ class ExtractThumbnail(publish.Extractor):
if self.use_rendered and os.path.isfile(path_render):
# check if file exist otherwise connect to write node
rnode = nuke.createNode("Read")
rnode["file"].setValue(path_render)
rnode["colorspace"].setValue(colorspace)
# turn it raw if none of baking is ON
if all([

View file

@ -1,3 +1,5 @@
from collections import defaultdict
import pyblish.api
from openpype.pipeline.publish import get_errored_instances_from_context
from openpype.hosts.nuke.api.lib import (
@ -87,6 +89,11 @@ class ValidateNukeWriteNode(
correct_data
))
# Collect key values of same type in a list.
values_by_name = defaultdict(list)
for knob_data in correct_data["knobs"]:
values_by_name[knob_data["name"]].append(knob_data["value"])
for knob_data in correct_data["knobs"]:
knob_type = knob_data["type"]
self.log.debug("__ knob_type: {}".format(
@ -105,28 +112,33 @@ class ValidateNukeWriteNode(
)
key = knob_data["name"]
value = knob_data["value"]
values = values_by_name[key]
node_value = write_node[key].value()
# fix type differences
if type(node_value) in (int, float):
try:
if isinstance(value, list):
value = color_gui_to_int(value)
else:
value = float(value)
node_value = float(node_value)
except ValueError:
value = str(value)
else:
value = str(value)
node_value = str(node_value)
fixed_values = []
for value in values:
if type(node_value) in (int, float):
try:
self.log.debug("__ key: {} | value: {}".format(
key, value
if isinstance(value, list):
value = color_gui_to_int(value)
else:
value = float(value)
node_value = float(node_value)
except ValueError:
value = str(value)
else:
value = str(value)
node_value = str(node_value)
fixed_values.append(value)
self.log.debug("__ key: {} | values: {}".format(
key, fixed_values
))
if (
node_value != value
node_value not in fixed_values
and key != "file"
and key != "tile_color"
):

View file

@ -18,6 +18,7 @@ Provides:
import pyblish.api
from openpype.client import get_last_version_by_subset_name
from openpype.pipeline.version_start import get_versioning_start
class CollectPublishedVersion(pyblish.api.ContextPlugin):
@ -47,9 +48,17 @@ class CollectPublishedVersion(pyblish.api.ContextPlugin):
version_doc = get_last_version_by_subset_name(project_name,
workfile_subset_name,
asset_id)
version_int = 1
if version_doc:
version_int += int(version_doc["name"])
version_int = int(version_doc["name"]) + 1
else:
version_int = get_versioning_start(
project_name,
"photoshop",
task_name=context.data["task"],
task_type=context.data["taskType"],
project_settings=context.data["project_settings"]
)
self.log.debug(f"Setting {version_int} to context.")
context.data["version"] = version_int

View file

@ -233,7 +233,7 @@ def get_layers_pre_post_behavior(layer_ids, communicator=None):
Pre and Post behaviors is enumerator of possible values:
- "none"
- "repeat" / "loop"
- "repeat"
- "pingpong"
- "hold"
@ -242,7 +242,7 @@ def get_layers_pre_post_behavior(layer_ids, communicator=None):
{
0: {
"pre": "none",
"post": "loop"
"post": "repeat"
}
}
```

View file

@ -77,13 +77,15 @@ def _calculate_pre_behavior_copy(
for frame_idx in range(range_start, layer_frame_start):
output_idx_by_frame_idx[frame_idx] = first_exposure_frame
elif pre_beh in ("loop", "repeat"):
elif pre_beh == "repeat":
# Loop backwards from last frame of layer
for frame_idx in reversed(range(range_start, layer_frame_start)):
eq_frame_idx_offset = (
(layer_frame_end - frame_idx) % frame_count
)
eq_frame_idx = layer_frame_end - eq_frame_idx_offset
eq_frame_idx = layer_frame_start + (
layer_frame_end - eq_frame_idx_offset
)
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
elif pre_beh == "pingpong":
@ -139,10 +141,10 @@ def _calculate_post_behavior_copy(
for frame_idx in range(layer_frame_end + 1, range_end + 1):
output_idx_by_frame_idx[frame_idx] = last_exposure_frame
elif post_beh in ("loop", "repeat"):
elif post_beh == "repeat":
# Loop backwards from last frame of layer
for frame_idx in range(layer_frame_end + 1, range_end + 1):
eq_frame_idx = frame_idx % frame_count
eq_frame_idx = layer_frame_start + (frame_idx % frame_count)
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
elif post_beh == "pingpong":

View file

@ -18,6 +18,7 @@ from openpype.hosts.tvpaint.api.lib import (
from openpype.hosts.tvpaint.api.pipeline import (
get_current_workfile_context,
)
from openpype.pipeline.version_start import get_versioning_start
class LoadWorkfile(plugin.Loader):
@ -95,7 +96,13 @@ class LoadWorkfile(plugin.Loader):
)[1]
if version is None:
version = 1
version = get_versioning_start(
project_name,
"tvpaint",
task_name=task_name,
task_type=data["task"]["type"],
family="workfile"
)
else:
version += 1

View file

@ -76,11 +76,16 @@ class AnimationAlembicLoader(plugin.Loader):
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
version = context.get('version').get('name')
version = context.get('version')
# Check if version is hero version and use different name
if not version.get("name") and version.get('type') == "hero_version":
name_version = f"{name}_hero"
else:
name_version = f"{name}_v{version.get('name'):03d}"
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}_v{version:03d}", suffix="")
f"{root}/{asset}/{name_version}", suffix="")
container_name += suffix

View file

@ -78,11 +78,16 @@ class SkeletalMeshAlembicLoader(plugin.Loader):
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
version = context.get('version').get('name')
version = context.get('version')
# Check if version is hero version and use different name
if not version.get("name") and version.get('type') == "hero_version":
name_version = f"{name}_hero"
else:
name_version = f"{name}_v{version.get('name'):03d}"
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}_v{version:03d}", suffix="")
f"{root}/{asset}/{name_version}", suffix="")
container_name += suffix

View file

@ -52,11 +52,16 @@ class SkeletalMeshFBXLoader(plugin.Loader):
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
version = context.get('version').get('name')
version = context.get('version')
# Check if version is hero version and use different name
if not version.get("name") and version.get('type') == "hero_version":
name_version = f"{name}_hero"
else:
name_version = f"{name}_v{version.get('name'):03d}"
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}_v{version:03d}", suffix="")
f"{root}/{asset}/{name_version}", suffix="")
container_name += suffix

View file

@ -79,11 +79,13 @@ class StaticMeshAlembicLoader(plugin.Loader):
root = "/Game/Ayon/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
asset_name = "{}_{}".format(asset, name)
asset_name = f"{asset}_{name}" if asset else f"{name}"
version = context.get('version')
# Check if version is hero version and use different name
if not version.get("name") and version.get('type') == "hero_version":
name_version = f"{name}_hero"
else:
asset_name = "{}".format(name)
version = context.get('version').get('name')
name_version = f"{name}_v{version.get('name'):03d}"
default_conversion = False
if options.get("default_conversion"):
@ -91,7 +93,7 @@ class StaticMeshAlembicLoader(plugin.Loader):
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}_v{version:03d}", suffix="")
f"{root}/{asset}/{name_version}", suffix="")
container_name += suffix

View file

@ -78,10 +78,16 @@ class StaticMeshFBXLoader(plugin.Loader):
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
version = context.get('version')
# Check if version is hero version and use different name
if not version.get("name") and version.get('type') == "hero_version":
name_version = f"{name}_hero"
else:
name_version = f"{name}_v{version.get('name'):03d}"
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}", suffix=""
f"{root}/{asset}/{name_version}", suffix=""
)
container_name += suffix

View file

@ -1,4 +1,6 @@
import clique
import os
import re
import pyblish.api
@ -21,7 +23,19 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin):
representations = instance.data.get("representations")
for repr in representations:
data = instance.data.get("assetEntity", {}).get("data", {})
patterns = [clique.PATTERNS["frames"]]
repr_files = repr["files"]
if isinstance(repr_files, str):
continue
ext = repr.get("ext")
if not ext:
_, ext = os.path.splitext(repr_files[0])
elif not ext.startswith("."):
ext = ".{}".format(ext)
pattern = r"\D?(?P<index>(?P<padding>0*)\d+){}$".format(
re.escape(ext))
patterns = [pattern]
collections, remainder = clique.assemble(
repr["files"], minimum_items=1, patterns=patterns)
@ -30,6 +44,10 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin):
collection = collections[0]
frames = list(collection.indexes)
if instance.data.get("slate"):
# Slate is not part of the frame range
frames = frames[1:]
current_range = (frames[0], frames[-1])
required_range = (data["clipIn"],
data["clipOut"])

View file

@ -25,6 +25,7 @@ from openpype.lib import (
)
from openpype.pipeline.create import get_subset_name
from openpype_modules.webpublisher.lib import parse_json
from openpype.pipeline.version_start import get_versioning_start
class CollectPublishedFiles(pyblish.api.ContextPlugin):
@ -103,7 +104,13 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
project_settings=context.data["project_settings"]
)
version = self._get_next_version(
project_name, asset_doc, subset_name
project_name,
asset_doc,
task_name,
task_type,
family,
subset_name,
context
)
next_versions.append(version)
@ -141,8 +148,9 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
try:
no_of_frames = self._get_number_of_frames(file_url)
if no_of_frames:
frame_end = int(frame_start) + \
math.ceil(no_of_frames)
frame_end = (
int(frame_start) + math.ceil(no_of_frames)
)
frame_end = math.ceil(frame_end) - 1
instance.data["frameEnd"] = frame_end
self.log.debug("frameEnd:: {}".format(
@ -270,7 +278,16 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
config["families"],
config["tags"])
def _get_next_version(self, project_name, asset_doc, subset_name):
def _get_next_version(
self,
project_name,
asset_doc,
task_name,
task_type,
family,
subset_name,
context
):
"""Returns version number or 1 for 'asset' and 'subset'"""
version_doc = get_last_version_by_subset_name(
@ -279,9 +296,19 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
asset_doc["_id"],
fields=["name"]
)
version = 1
if version_doc:
version += int(version_doc["name"])
version = int(version_doc["name"]) + 1
else:
version = get_versioning_start(
project_name,
"webpublisher",
task_name=task_name,
task_type=task_type,
family=family,
subset=subset_name,
project_settings=context.data["project_settings"]
)
return version
def _get_number_of_frames(self, file_url):

View file

@ -280,13 +280,14 @@ class BatchPublishEndpoint(WebpublishApiEndpoint):
for key, value in add_args.items():
# Skip key values where value is None
if value is not None:
args.append("--{}".format(key))
# Extend list into arguments (targets can be a list)
if isinstance(value, (tuple, list)):
args.extend(value)
else:
args.append(value)
if value is None:
continue
arg_key = "--{}".format(key)
if not isinstance(value, (tuple, list)):
value = [value]
for item in value:
args += [arg_key, item]
log.info("args:: {}".format(args))
if add_to_queue:

View file

@ -3,6 +3,7 @@ import os
import re
import copy
import inspect
import collections
import logging
import weakref
from uuid import uuid4
@ -340,8 +341,8 @@ class EventSystem(object):
event.emit()
return event
def emit_event(self, event):
"""Emit event object.
def _process_event(self, event):
"""Process event topic and trigger callbacks.
Args:
event (Event): Prepared event with topic and data.
@ -356,6 +357,91 @@ class EventSystem(object):
for callback in invalid_callbacks:
self._registered_callbacks.remove(callback)
def emit_event(self, event):
"""Emit event object.
Args:
event (Event): Prepared event with topic and data.
"""
self._process_event(event)
class QueuedEventSystem(EventSystem):
"""Events are automatically processed in queue.
If callback triggers another event, the event is not processed until
all callbacks of previous event are processed.
Allows to implement custom event process loop by changing 'auto_execute'.
Note:
This probably should be default behavior of 'EventSystem'. Changing it
now could cause problems in existing code.
Args:
auto_execute (Optional[bool]): If 'True', events are processed
automatically. Custom loop calling 'process_next_event'
must be implemented when set to 'False'.
"""
def __init__(self, auto_execute=True):
super(QueuedEventSystem, self).__init__()
self._event_queue = collections.deque()
self._current_event = None
self._auto_execute = auto_execute
def __len__(self):
return self.count()
def count(self):
"""Get number of events in queue.
Returns:
int: Number of events in queue.
"""
return len(self._event_queue)
def process_next_event(self):
"""Process next event in queue.
Should be used only if 'auto_execute' is set to 'False'. Only single
event is processed.
Returns:
Union[Event, None]: Processed event.
"""
if self._current_event is not None:
raise ValueError("An event is already in progress.")
if not self._event_queue:
return None
event = self._event_queue.popleft()
self._current_event = event
self._process_event(event)
self._current_event = None
return event
def emit_event(self, event):
"""Emit event object.
Args:
event (Event): Prepared event with topic and data.
"""
if not self._auto_execute or self._current_event is not None:
self._event_queue.append(event)
return
self._event_queue.append(event)
while self._event_queue:
event = self._event_queue.popleft()
self._current_event = event
self._process_event(event)
self._current_event = None
class GlobalEventSystem:
"""Event system living in global scope of process.

View file

@ -373,10 +373,12 @@ def _load_ayon_addons(openpype_modules, modules_key, log):
addons_info = _get_ayon_addons_information()
if not addons_info:
return v3_addons_to_skip
addons_dir = os.path.join(
appdirs.user_data_dir("AYON", "Ynput"),
"addons"
)
addons_dir = os.environ.get("AYON_ADDONS_DIR")
if not addons_dir:
addons_dir = os.path.join(
appdirs.user_data_dir("AYON", "Ynput"),
"addons"
)
if not os.path.exists(addons_dir):
log.warning("Addons directory does not exists. Path \"{}\"".format(
addons_dir

View file

@ -8,6 +8,7 @@ attribute or using default server if that attribute doesn't exists.
from maya import cmds
import pyblish.api
from openpype.pipeline.publish import KnownPublishError
class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
@ -81,13 +82,14 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
if k in default_servers
}
msg = (
"\"{}\" server on instance is not enabled in project settings."
" Enabled project servers:\n{}".format(
instance_server, project_enabled_servers
if instance_server not in project_enabled_servers:
msg = (
"\"{}\" server on instance is not enabled in project settings."
" Enabled project servers:\n{}".format(
instance_server, project_enabled_servers
)
)
)
assert instance_server in project_enabled_servers, msg
raise KnownPublishError(msg)
self.log.debug("Using project approved server.")
return project_enabled_servers[instance_server]

View file

@ -1,31 +1,31 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Scene setting</title>
<title>Deadline Pools</title>
<description>
## Invalid Deadline pools found
## Invalid Deadline pools found
Configured pools don't match what is set in Deadline.
Configured pools don't match available pools in Deadline.
{invalid_value_str}
### How to repair?
### How to repair?
If your instance had deadline pools set on creation, remove or
change them.
If your instance had deadline pools set on creation, remove or
change them.
In other cases inform admin to change them in Settings.
In other cases inform admin to change them in Settings.
Available deadline pools:
{pools_str}
Available deadline pools {pools_str}.
</description>
<detail>
### __Detailed Info__
### __Detailed Info__
This error is shown when deadline pool is not on Deadline anymore. It
could happen in case of republish old workfile which was created with
previous deadline pools,
or someone changed pools on Deadline side, but didn't modify Openpype
Settings.
This error is shown when a configured pool is not available on Deadline. It
can happen when publishing old workfiles which were created with previous
deadline pools, or someone changed the available pools in Deadline,
but didn't modify Openpype Settings to match the changes.
</detail>
</error>
</root>

View file

@ -12,7 +12,9 @@ from openpype.pipeline import (
legacy_io,
OpenPypePyblishPluginMixin
)
from openpype.settings import get_project_settings
from openpype.pipeline.publish.lib import (
replace_with_published_scene_path
)
from openpype.hosts.max.api.lib import (
get_current_renderer,
get_multipass_setting
@ -247,7 +249,8 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
if instance.data["renderer"] == "Redshift_Renderer":
self.log.debug("Using Redshift...published scene wont be used..")
replace_in_path = False
return replace_in_path
return replace_with_published_scene_path(
instance, replace_in_path)
@staticmethod
def _iter_expected_files(exp):

View file

@ -90,7 +90,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
if not instance.data.get("farm"):
self.log.debug("Skipping local instance.")
return
instance.data["attributeValues"] = self.get_attr_values_from_data(
instance.data)
@ -123,13 +122,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
render_path = instance.data['path']
script_path = context.data["currentFile"]
for item in context:
if "workfile" in item.data["families"]:
msg = "Workfile (scene) must be published along"
assert item.data["publish"] is True, msg
template_data = item.data.get("anatomyData")
rep = item.data.get("representations")[0].get("name")
for item_ in context:
if "workfile" in item_.data["family"]:
template_data = item_.data.get("anatomyData")
rep = item_.data.get("representations")[0].get("name")
template_data["representation"] = rep
template_data["ext"] = rep
template_data["comment"] = None
@ -141,19 +137,24 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
"Using published scene for render {}".format(script_path)
)
response = self.payload_submit(
instance,
script_path,
render_path,
node.name(),
submit_frame_start,
submit_frame_end
)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = response.json()
instance.data["outputDir"] = os.path.dirname(
render_path).replace("\\", "/")
instance.data["publishJobState"] = "Suspended"
# only add main rendering job if target is not frames_farm
r_job_response_json = None
if instance.data["render_target"] != "frames_farm":
r_job_response = self.payload_submit(
instance,
script_path,
render_path,
node.name(),
submit_frame_start,
submit_frame_end
)
r_job_response_json = r_job_response.json()
instance.data["deadlineSubmissionJob"] = r_job_response_json
# Store output dir for unified publisher (filesequence)
instance.data["outputDir"] = os.path.dirname(
render_path).replace("\\", "/")
instance.data["publishJobState"] = "Suspended"
if instance.data.get("bakingNukeScripts"):
for baking_script in instance.data["bakingNukeScripts"]:
@ -161,18 +162,20 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
script_path = baking_script["bakeScriptPath"]
exe_node_name = baking_script["bakeWriteNodeName"]
resp = self.payload_submit(
b_job_response = self.payload_submit(
instance,
script_path,
render_path,
exe_node_name,
submit_frame_start,
submit_frame_end,
response.json()
r_job_response_json,
baking_submission=True
)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = resp.json()
instance.data["deadlineSubmissionJob"] = b_job_response.json()
instance.data["publishJobState"] = "Suspended"
# add to list of job Id
@ -180,7 +183,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
instance.data["bakingSubmissionJobs"] = []
instance.data["bakingSubmissionJobs"].append(
resp.json()["_id"])
b_job_response.json()["_id"])
# redefinition of families
if "render" in instance.data["family"]:
@ -199,15 +202,35 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
exe_node_name,
start_frame,
end_frame,
response_data=None
response_data=None,
baking_submission=False,
):
"""Submit payload to Deadline
Args:
instance (pyblish.api.Instance): pyblish instance
script_path (str): path to nuke script
render_path (str): path to rendered images
exe_node_name (str): name of the node to render
start_frame (int): start frame
end_frame (int): end frame
response_data Optional[dict]: response data from
previous submission
baking_submission Optional[bool]: if it's baking submission
Returns:
requests.Response
"""
render_dir = os.path.normpath(os.path.dirname(render_path))
batch_name = os.path.basename(script_path)
jobname = "%s - %s" % (batch_name, instance.name)
# batch name
src_filepath = instance.context.data["currentFile"]
batch_name = os.path.basename(src_filepath)
job_name = os.path.basename(render_path)
if is_in_tests():
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
output_filename_0 = self.preview_fname(render_path)
if not response_data:
@ -228,11 +251,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
# Top-level group name
"BatchName": batch_name,
# Asset dependency to wait for at least the scene file to sync.
# "AssetDependency0": script_path,
# Job name, as seen in Monitor
"Name": jobname,
"Name": job_name,
# Arbitrary username, for visualisation in Monitor
"UserName": self._deadline_user,
@ -294,12 +314,17 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
"AuxFiles": []
}
if response_data.get("_id"):
# TODO: rewrite for baking with sequences
if baking_submission:
payload["JobInfo"].update({
"JobType": "Normal",
"ChunkSize": 99999999
})
if response_data.get("_id"):
payload["JobInfo"].update({
"BatchName": response_data["Props"]["Batch"],
"JobDependency0": response_data["_id"],
"ChunkSize": 99999999
})
# Include critical environment variables with submission

View file

@ -3,7 +3,7 @@
import os
import json
import re
from copy import copy, deepcopy
from copy import deepcopy
import requests
import clique
@ -16,6 +16,7 @@ from openpype.client import (
from openpype.pipeline import publish, legacy_io
from openpype.lib import EnumDef, is_running_from_build
from openpype.tests.lib import is_in_tests
from openpype.pipeline.version_start import get_versioning_start
from openpype.pipeline.farm.pyblish_functions import (
create_skeleton_instance,
@ -97,7 +98,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
hosts = ["fusion", "max", "maya", "nuke", "houdini",
"celaction", "aftereffects", "harmony"]
families = ["render.farm", "prerender.farm",
families = ["render.farm", "render.frames_farm",
"prerender.farm", "prerender.frames_farm",
"renderlayer", "imagesequence",
"vrayscene", "maxrender",
"arnold_rop", "mantra_rop",
@ -120,7 +122,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
"FTRACK_SERVER",
"AVALON_APP_NAME",
"OPENPYPE_USERNAME",
"OPENPYPE_SG_USER"
"OPENPYPE_SG_USER",
"KITSU_LOGIN",
"KITSU_PWD"
]
# custom deadline attributes
@ -210,7 +214,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
environment["OPENPYPE_PUBLISH_JOB"] = "1"
environment["OPENPYPE_RENDER_JOB"] = "0"
environment["OPENPYPE_REMOTE_PUBLISH"] = "0"
deadline_plugin = "Openpype"
deadline_plugin = "OpenPype"
# Add OpenPype version if we are running from build.
if is_running_from_build():
self.environ_keys.append("OPENPYPE_VERSION")
@ -298,7 +302,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
payload["JobInfo"]["JobDependency{}".format(
job_index)] = assembly_id # noqa: E501
job_index += 1
else:
elif job.get("_id"):
payload["JobInfo"]["JobDependency0"] = job["_id"]
for index, (key_, value_) in enumerate(environment.items()):
@ -474,6 +478,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER"),
}
deadline_publish_job_id = None
if submission_type == "deadline":
# get default deadline webservice url from deadline module
self.deadline_url = instance.context.data["defaultDeadline"]
@ -566,7 +571,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
if version:
version = int(version["name"]) + 1
else:
version = 1
version = get_versioning_start(
project_name,
template_data["app"],
task_name=template_data["task"]["name"],
task_type=template_data["task"]["type"],
family="render",
subset=subset,
project_settings=context.data["project_settings"]
)
host_name = context.data["hostName"]
task_info = template_data.get("task") or {}

View file

@ -1,8 +1,7 @@
import os
import requests
import pyblish.api
from openpype_modules.deadline.abstract_submit_deadline import requests_get
class ValidateDeadlineConnection(pyblish.api.InstancePlugin):
"""Validate Deadline Web Service is running"""
@ -10,7 +9,10 @@ class ValidateDeadlineConnection(pyblish.api.InstancePlugin):
label = "Validate Deadline Web Service"
order = pyblish.api.ValidatorOrder
hosts = ["maya", "nuke"]
families = ["renderlayer"]
families = ["renderlayer", "render"]
# cache
responses = {}
def process(self, instance):
# get default deadline webservice url from deadline module
@ -18,28 +20,16 @@ class ValidateDeadlineConnection(pyblish.api.InstancePlugin):
# if custom one is set in instance, use that
if instance.data.get("deadlineUrl"):
deadline_url = instance.data.get("deadlineUrl")
self.log.info(
"We have deadline URL on instance {}".format(
deadline_url))
self.log.debug(
"We have deadline URL on instance {}".format(deadline_url)
)
assert deadline_url, "Requires Deadline Webservice URL"
# Check response
response = self._requests_get(deadline_url)
if deadline_url not in self.responses:
self.responses[deadline_url] = requests_get(deadline_url)
response = self.responses[deadline_url]
assert response.ok, "Response must be ok"
assert response.text.startswith("Deadline Web Service "), (
"Web service did not respond with 'Deadline Web Service'"
)
def _requests_get(self, *args, **kwargs):
""" Wrapper for requests, disabling SSL certificate validation if
DONT_VERIFY_SSL environment variable is found. This is useful when
Deadline or Muster server are running with self-signed certificates
and their certificate is not added to trusted certificates on
client machines.
WARNING: disabling SSL certificate validation is defeating one line
of defense SSL is providing and it is not recommended.
"""
if 'verify' not in kwargs:
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) else True # noqa
return requests.get(*args, **kwargs)

View file

@ -19,38 +19,64 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
order = pyblish.api.ValidatorOrder
families = ["rendering",
"render.farm",
"render.frames_farm",
"renderFarm",
"renderlayer",
"maxrender"]
optional = True
# cache
pools_per_url = {}
def process(self, instance):
if not self.is_active(instance.data):
return
if not instance.data.get("farm"):
self.log.debug("Skipping local instance.")
return
# get default deadline webservice url from deadline module
deadline_url = instance.context.data["defaultDeadline"]
self.log.info("deadline_url::{}".format(deadline_url))
pools = DeadlineModule.get_deadline_pools(deadline_url, log=self.log)
self.log.info("pools::{}".format(pools))
formatting_data = {
"pools_str": ",".join(pools)
}
deadline_url = self.get_deadline_url(instance)
pools = self.get_pools(deadline_url)
invalid_pools = {}
primary_pool = instance.data.get("primaryPool")
if primary_pool and primary_pool not in pools:
msg = "Configured primary '{}' not present on Deadline".format(
instance.data["primaryPool"])
formatting_data["invalid_value_str"] = msg
raise PublishXmlValidationError(self, msg,
formatting_data=formatting_data)
invalid_pools["primary"] = primary_pool
secondary_pool = instance.data.get("secondaryPool")
if secondary_pool and secondary_pool not in pools:
msg = "Configured secondary '{}' not present on Deadline".format(
instance.data["secondaryPool"])
formatting_data["invalid_value_str"] = msg
raise PublishXmlValidationError(self, msg,
formatting_data=formatting_data)
invalid_pools["secondary"] = secondary_pool
if invalid_pools:
message = "\n".join(
"{} pool '{}' not available on Deadline".format(key.title(),
pool)
for key, pool in invalid_pools.items()
)
raise PublishXmlValidationError(
plugin=self,
message=message,
formatting_data={"pools_str": ", ".join(pools)}
)
def get_deadline_url(self, instance):
# get default deadline webservice url from deadline module
deadline_url = instance.context.data["defaultDeadline"]
if instance.data.get("deadlineUrl"):
# if custom one is set in instance, use that
deadline_url = instance.data.get("deadlineUrl")
return deadline_url
def get_pools(self, deadline_url):
if deadline_url not in self.pools_per_url:
self.log.debug(
"Querying available pools for Deadline url: {}".format(
deadline_url)
)
pools = DeadlineModule.get_deadline_pools(deadline_url,
log=self.log)
self.log.info("Available pools: {}".format(pools))
self.pools_per_url[deadline_url] = pools
return self.pools_per_url[deadline_url]

View file

@ -20,8 +20,19 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
allow_user_override = True
def process(self, instance):
self.instance = instance
frame_list = self._get_frame_list(instance.data["render_job_id"])
"""Process all the nodes in the instance"""
# get dependency jobs ids for retrieving frame list
dependent_job_ids = self._get_dependent_job_ids(instance)
if not dependent_job_ids:
self.log.warning("No dependent jobs found for instance: {}"
"".format(instance))
return
# get list of frames from dependent jobs
frame_list = self._get_dependent_jobs_frames(
instance, dependent_job_ids)
for repre in instance.data["representations"]:
expected_files = self._get_expected_files(repre)
@ -78,26 +89,45 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
)
)
def _get_frame_list(self, original_job_id):
def _get_dependent_job_ids(self, instance):
"""Returns list of dependent job ids from instance metadata.json
Args:
instance (pyblish.api.Instance): pyblish instance
Returns:
(list): list of dependent job ids
"""
dependent_job_ids = []
# job_id collected from metadata.json
original_job_id = instance.data["render_job_id"]
dependent_job_ids_env = os.environ.get("RENDER_JOB_IDS")
if dependent_job_ids_env:
dependent_job_ids = dependent_job_ids_env.split(',')
elif original_job_id:
dependent_job_ids = [original_job_id]
return dependent_job_ids
def _get_dependent_jobs_frames(self, instance, dependent_job_ids):
"""Returns list of frame ranges from all render job.
Render job might be re-submitted so job_id in metadata.json could be
invalid. GlobalJobPreload injects current job id to RENDER_JOB_IDS.
Args:
original_job_id (str)
instance (pyblish.api.Instance): pyblish instance
dependent_job_ids (list): list of dependent job ids
Returns:
(list)
"""
all_frame_lists = []
render_job_ids = os.environ.get("RENDER_JOB_IDS")
if render_job_ids:
render_job_ids = render_job_ids.split(',')
else: # fallback
render_job_ids = [original_job_id]
for job_id in render_job_ids:
job_info = self._get_job_info(job_id)
for job_id in dependent_job_ids:
job_info = self._get_job_info(instance, job_id)
frame_list = job_info["Props"].get("Frames")
if frame_list:
all_frame_lists.extend(frame_list.split(','))
@ -152,18 +182,25 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
return file_name_template, frame_placeholder
def _get_job_info(self, job_id):
def _get_job_info(self, instance, job_id):
"""Calls DL for actual job info for 'job_id'
Might be different than job info saved in metadata.json if user
manually changes job pre/during rendering.
Args:
instance (pyblish.api.Instance): pyblish instance
job_id (str): Deadline job id
Returns:
(dict): Job info from Deadline
"""
# get default deadline webservice url from deadline module
deadline_url = self.instance.context.data["defaultDeadline"]
deadline_url = instance.context.data["defaultDeadline"]
# if custom one is set in instance, use that
if self.instance.data.get("deadlineUrl"):
deadline_url = self.instance.data.get("deadlineUrl")
if instance.data.get("deadlineUrl"):
deadline_url = instance.data.get("deadlineUrl")
assert deadline_url, "Requires Deadline Webservice URL"
url = "{}/api/jobs?JobID={}".format(deadline_url, job_id)

View file

@ -38,6 +38,7 @@ class AyonDeadlinePlugin(DeadlinePlugin):
for publish process.
"""
def __init__(self):
super().__init__()
self.InitializeProcessCallback += self.InitializeProcess
self.RenderExecutableCallback += self.RenderExecutable
self.RenderArgumentCallback += self.RenderArgument
@ -90,7 +91,13 @@ class AyonDeadlinePlugin(DeadlinePlugin):
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
exe_list = exe_list.replace("\\ ", " ")
exe = FileUtils.SearchFileList(exe_list)
expanded_paths = []
for path in exe_list.split(";"):
if path.startswith("~"):
path = os.path.expanduser(path)
expanded_paths.append(path)
exe = FileUtils.SearchFileList(";".join(expanded_paths))
if exe == "":
self.FailRender(

View file

@ -547,7 +547,14 @@ def get_ayon_executable():
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
exe_list = exe_list.replace("\\ ", " ")
return exe_list
# Expand user paths
expanded_paths = []
for path in exe_list.split(";"):
if path.startswith("~"):
path = os.path.expanduser(path)
expanded_paths.append(path)
return ";".join(expanded_paths)
def inject_render_job_id(deadlinePlugin):

View file

@ -8,13 +8,14 @@ from Deadline.Scripting import *
def GetDeadlinePlugin():
return HarmonyOpenPypePlugin()
def CleanupDeadlinePlugin( deadlinePlugin ):
deadlinePlugin.Cleanup()
class HarmonyOpenPypePlugin( DeadlinePlugin ):
def __init__( self ):
super().__init__()
self.InitializeProcessCallback += self.InitializeProcess
self.RenderExecutableCallback += self.RenderExecutable
self.RenderArgumentCallback += self.RenderArgument
@ -24,11 +25,11 @@ class HarmonyOpenPypePlugin( DeadlinePlugin ):
print("Cleanup")
for stdoutHandler in self.StdoutHandlers:
del stdoutHandler.HandleCallback
del self.InitializeProcessCallback
del self.RenderExecutableCallback
del self.RenderArgumentCallback
def CheckExitCode( self, exitCode ):
print("check code")
if exitCode != 0:
@ -36,20 +37,20 @@ class HarmonyOpenPypePlugin( DeadlinePlugin ):
self.LogInfo( "Renderer reported an error with error code 100. This will be ignored, since the option to ignore it is specified in the Job Properties." )
else:
self.FailRender( "Renderer returned non-zero error code %d. Check the renderer's output." % exitCode )
def InitializeProcess( self ):
self.PluginType = PluginType.Simple
self.StdoutHandling = True
self.PopupHandling = True
self.AddStdoutHandlerCallback( "Rendered frame ([0-9]+)" ).HandleCallback += self.HandleStdoutProgress
def HandleStdoutProgress( self ):
startFrame = self.GetStartFrame()
endFrame = self.GetEndFrame()
if( endFrame - startFrame + 1 != 0 ):
self.SetProgress( 100 * ( int(self.GetRegexMatch(1)) - startFrame + 1 ) / ( endFrame - startFrame + 1 ) )
def RenderExecutable( self ):
version = int( self.GetPluginInfoEntry( "Version" ) )
exe = ""
@ -58,7 +59,7 @@ class HarmonyOpenPypePlugin( DeadlinePlugin ):
if( exe == "" ):
self.FailRender( "Harmony render executable was not found in the configured separated list \"" + exeList + "\". The path to the render executable can be configured from the Plugin Configuration in the Deadline Monitor." )
return exe
def RenderArgument( self ):
renderArguments = "-batch"
@ -72,20 +73,20 @@ class HarmonyOpenPypePlugin( DeadlinePlugin ):
resolutionX = self.GetIntegerPluginInfoEntryWithDefault( "ResolutionX", -1 )
resolutionY = self.GetIntegerPluginInfoEntryWithDefault( "ResolutionY", -1 )
fov = self.GetFloatPluginInfoEntryWithDefault( "FieldOfView", -1 )
if resolutionX > 0 and resolutionY > 0 and fov > 0:
renderArguments += " -res " + str( resolutionX ) + " " + str( resolutionY ) + " " + str( fov )
camera = self.GetPluginInfoEntryWithDefault( "Camera", "" )
if not camera == "":
renderArguments += " -camera " + camera
startFrame = str( self.GetStartFrame() )
endFrame = str( self.GetEndFrame() )
renderArguments += " -frames " + startFrame + " " + endFrame
if not self.GetBooleanPluginInfoEntryWithDefault( "IsDatabase", False ):
sceneFilename = self.GetPluginInfoEntryWithDefault( "SceneFile", self.GetDataFilename() )
sceneFilename = RepositoryUtils.CheckPathMapping( sceneFilename )
@ -99,12 +100,12 @@ class HarmonyOpenPypePlugin( DeadlinePlugin ):
renderArguments += " -scene " + scene
version = self.GetPluginInfoEntryWithDefault( "SceneVersion", "" )
renderArguments += " -version " + version
#tempSceneDirectory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber()) )
#preRenderScript =
#preRenderScript =
rendernodeNum = 0
scriptBuilder = StringBuilder()
while True:
nodeName = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Node", "" )
if nodeName == "":
@ -115,35 +116,35 @@ class HarmonyOpenPypePlugin( DeadlinePlugin ):
nodeLeadingZero = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "LeadingZero", "" )
nodeFormat = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Format", "" )
nodeStartFrame = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "StartFrame", "" )
if not nodePath == "":
scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"drawingName\", 1, \"" + nodePath + "\" );")
if not nodeLeadingZero == "":
scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"leadingZeros\", 1, \"" + nodeLeadingZero + "\" );")
if not nodeFormat == "":
scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"drawingType\", 1, \"" + nodeFormat + "\" );")
if not nodeStartFrame == "":
scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"start\", 1, \"" + nodeStartFrame + "\" );")
if nodeType == "Movie":
nodePath = self.GetPluginInfoEntryWithDefault( "Output" + str( rendernodeNum ) + "Path", "" )
if not nodePath == "":
scriptBuilder.AppendLine("node.setTextAttr( \"" + nodeName + "\", \"moviePath\", 1, \"" + nodePath + "\" );")
rendernodeNum += 1
tempDirectory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber()) )
preRenderScriptName = Path.Combine( tempDirectory, "preRenderScript.txt" )
File.WriteAllText( preRenderScriptName, scriptBuilder.ToString() )
preRenderInlineScript = self.GetPluginInfoEntryWithDefault( "PreRenderInlineScript", "" )
if preRenderInlineScript:
renderArguments += " -preRenderInlineScript \"" + preRenderInlineScript +"\""
renderArguments += " -preRenderScript \"" + preRenderScriptName +"\""
return renderArguments

View file

@ -38,6 +38,7 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
for publish process.
"""
def __init__(self):
super().__init__()
self.InitializeProcessCallback += self.InitializeProcess
self.RenderExecutableCallback += self.RenderExecutable
self.RenderArgumentCallback += self.RenderArgument
@ -107,7 +108,7 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
"Scanning for compatible requested "
f"version {requested_version}"))
dir_list = self.GetConfigEntry("OpenPypeInstallationDirs")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
dir_list = dir_list.replace("\\ ", " ")

View file

@ -249,6 +249,7 @@ class OpenPypeTileAssembler(DeadlinePlugin):
def __init__(self):
"""Init."""
super().__init__()
self.InitializeProcessCallback += self.initialize_process
self.RenderExecutableCallback += self.render_executable
self.RenderArgumentCallback += self.render_argument

View file

@ -11,10 +11,8 @@ Provides:
"""
import os
import sys
import collections
import six
import pyblish.api
import clique
@ -355,7 +353,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
status_name = asset_version_data.pop("status_name", None)
# Try query asset version by criteria (asset id and version)
version = asset_version_data.get("version") or 0
version = asset_version_data.get("version") or "0"
asset_version_entity = self._query_asset_version(
session, version, asset_id
)

View file

@ -116,6 +116,18 @@ class CopyLastPublishedWorkfile(PreLaunchHook):
"task": {"name": task_name, "type": task_type}
}
# Add version filter
workfile_version = self.launch_context.data.get("workfile_version", -1)
if workfile_version > 0 and workfile_version not in {None, "last"}:
context_filters["version"] = self.launch_context.data[
"workfile_version"
]
# Only one version will be matched
version_index = 0
else:
version_index = workfile_version
workfile_representations = list(get_representations(
project_name,
context_filters=context_filters
@ -133,9 +145,10 @@ class CopyLastPublishedWorkfile(PreLaunchHook):
lambda r: r["context"].get("version") is not None,
workfile_representations
)
workfile_representation = max(
# Get workfile version
workfile_representation = sorted(
filtered_repres, key=lambda r: r["context"]["version"]
)
)[version_index]
# Copy file and substitute path
last_published_workfile_path = download_last_published_workfile(

View file

@ -94,7 +94,7 @@ from .context_tools import (
get_current_host_name,
get_current_project_name,
get_current_asset_name,
get_current_task_name,
get_current_task_name
)
install = install_host
uninstall = uninstall_host

View file

@ -21,6 +21,7 @@ from openpype.client import (
from openpype.lib.events import emit_event
from openpype.modules import load_modules, ModulesManager
from openpype.settings import get_project_settings
from openpype.tests.lib import is_in_tests
from .publish.lib import filter_pyblish_plugins
from .anatomy import Anatomy
@ -35,7 +36,7 @@ from . import (
register_inventory_action_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_inventory_action_path,
deregister_inventory_action_path
)
@ -142,6 +143,10 @@ def install_host(host):
else:
pyblish.api.register_target("local")
if is_in_tests():
print("Registering pyblish target: automated")
pyblish.api.register_target("automated")
project_name = os.environ.get("AVALON_PROJECT")
host_name = os.environ.get("AVALON_APP")

View file

@ -2,6 +2,7 @@ from .constants import (
SUBSET_NAME_ALLOWED_SYMBOLS,
DEFAULT_SUBSET_TEMPLATE,
PRE_CREATE_THUMBNAIL_KEY,
DEFAULT_VARIANT_VALUE,
)
from .utils import (
@ -50,6 +51,7 @@ __all__ = (
"SUBSET_NAME_ALLOWED_SYMBOLS",
"DEFAULT_SUBSET_TEMPLATE",
"PRE_CREATE_THUMBNAIL_KEY",
"DEFAULT_VARIANT_VALUE",
"get_last_versions_for_instances",
"get_next_versions_for_instances",

View file

@ -1,10 +1,12 @@
SUBSET_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_."
DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}"
PRE_CREATE_THUMBNAIL_KEY = "thumbnail_source"
DEFAULT_VARIANT_VALUE = "Main"
__all__ = (
"SUBSET_NAME_ALLOWED_SYMBOLS",
"DEFAULT_SUBSET_TEMPLATE",
"PRE_CREATE_THUMBNAIL_KEY",
"DEFAULT_VARIANT_VALUE",
)

View file

@ -1,4 +1,3 @@
import os
import copy
import collections
@ -20,6 +19,7 @@ from openpype.pipeline.plugin_discover import (
deregister_plugin_path
)
from .constants import DEFAULT_VARIANT_VALUE
from .subset_name import get_subset_name
from .utils import get_next_versions_for_instances
from .legacy_create import LegacyCreator
@ -517,7 +517,7 @@ class Creator(BaseCreator):
default_variants = []
# Default variant used in 'get_default_variant'
default_variant = None
_default_variant = None
# Short description of family
# - may not be used if `get_description` is overriden
@ -543,6 +543,21 @@ class Creator(BaseCreator):
# - similar to instance attribute definitions
pre_create_attr_defs = []
def __init__(self, *args, **kwargs):
cls = self.__class__
# Fix backwards compatibility for plugins which override
# 'default_variant' attribute directly
if not isinstance(cls.default_variant, property):
# Move value from 'default_variant' to '_default_variant'
self._default_variant = self.default_variant
# Create property 'default_variant' on the class
cls.default_variant = property(
cls._get_default_variant_wrap,
cls._set_default_variant_wrap
)
super(Creator, self).__init__(*args, **kwargs)
@property
def show_order(self):
"""Order in which is creator shown in UI.
@ -595,10 +610,10 @@ class Creator(BaseCreator):
def get_default_variants(self):
"""Default variant values for UI tooltips.
Replacement of `defatults` attribute. Using method gives ability to
have some "logic" other than attribute values.
Replacement of `default_variants` attribute. Using method gives
ability to have some "logic" other than attribute values.
By default returns `default_variants` value.
By default, returns `default_variants` value.
Returns:
List[str]: Whisper variants for user input.
@ -606,17 +621,63 @@ class Creator(BaseCreator):
return copy.deepcopy(self.default_variants)
def get_default_variant(self):
def get_default_variant(self, only_explicit=False):
"""Default variant value that will be used to prefill variant input.
This is for user input and value may not be content of result from
`get_default_variants`.
Can return `None`. In that case first element from
`get_default_variants` should be used.
Note:
This method does not allow to have empty string as
default variant.
Args:
only_explicit (Optional[bool]): If True, only explicit default
variant from '_default_variant' will be returned.
Returns:
str: Variant value.
"""
return self.default_variant
if only_explicit or self._default_variant:
return self._default_variant
for variant in self.get_default_variants():
return variant
return DEFAULT_VARIANT_VALUE
def _get_default_variant_wrap(self):
"""Default variant value that will be used to prefill variant input.
Wrapper for 'get_default_variant'.
Notes:
This method is wrapper for 'get_default_variant'
for 'default_variant' property, so creator can override
the method.
Returns:
str: Variant value.
"""
return self.get_default_variant()
def _set_default_variant_wrap(self, variant):
"""Set default variant value.
This method is needed for automated settings overrides which are
changing attributes based on keys in settings.
Args:
variant (str): New default variant value.
"""
self._default_variant = variant
default_variant = property(
_get_default_variant_wrap,
_set_default_variant_wrap
)
def get_pre_create_attr_defs(self):
"""Plugin attribute definitions needed for creation.

View file

@ -116,8 +116,8 @@ def get_time_data_from_instance_or_context(instance):
instance.context.data.get("fps")),
handle_start=(instance.data.get("handleStart") or
instance.context.data.get("handleStart")), # noqa: E501
handle_end=(instance.data.get("handleStart") or
instance.context.data.get("handleStart"))
handle_end=(instance.data.get("handleEnd") or
instance.context.data.get("handleEnd"))
)
@ -139,7 +139,7 @@ def get_transferable_representations(instance):
to_transfer = []
for representation in instance.data.get("representations", []):
if "publish_on_farm" not in representation.get("tags"):
if "publish_on_farm" not in representation.get("tags", []):
continue
trans_rep = representation.copy()
@ -265,8 +265,7 @@ def create_skeleton_instance(
instance_skeleton_data[v] = instance.data.get(v)
representations = get_transferable_representations(instance)
instance_skeleton_data["representations"] = []
instance_skeleton_data["representations"] += representations
instance_skeleton_data["representations"] = representations
persistent = instance.data.get("stagingDir_persistent") is True
instance_skeleton_data["stagingDir_persistent"] = persistent
@ -568,9 +567,15 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
col = list(cols[0])
# create subset name `familyTaskSubset_AOV`
group_name = 'render{}{}{}{}'.format(
task[0].upper(), task[1:],
subset[0].upper(), subset[1:])
# TODO refactor/remove me
family = skeleton["family"]
if not subset.startswith(family):
group_name = '{}{}{}{}{}'.format(
family,
task[0].upper(), task[1:],
subset[0].upper(), subset[1:])
else:
group_name = subset
# if there are multiple cameras, we need to add camera name
if isinstance(col, (list, tuple)):

View file

@ -464,9 +464,8 @@ def apply_plugin_settings_automatically(plugin, settings, logger=None):
for option, value in settings.items():
if logger:
logger.debug("Plugin {} - Attr: {} -> {}".format(
option, value, plugin.__name__
))
logger.debug("Plugin %s - Attr: %s -> %s",
plugin.__name__, option, value)
setattr(plugin, option, value)

View file

@ -3,6 +3,7 @@ import copy
import logging
from openpype import AYON_SERVER_ENABLED
from openpype.lib import Logger
from openpype.client import get_project
from . import legacy_io
from .anatomy import Anatomy
@ -11,13 +12,13 @@ from .plugin_discover import (
register_plugin,
register_plugin_path,
)
log = logging.getLogger(__name__)
def get_thumbnail_binary(thumbnail_entity, thumbnail_type, dbcon=None):
if not thumbnail_entity:
return
log = Logger.get_logger(__name__)
resolvers = discover_thumbnail_resolvers()
resolvers = sorted(resolvers, key=lambda cls: cls.priority)
if dbcon is None:
@ -133,6 +134,16 @@ class BinaryThumbnail(ThumbnailResolver):
class ServerThumbnailResolver(ThumbnailResolver):
_cache = None
@classmethod
def _get_cache(cls):
if cls._cache is None:
from openpype.client.server.thumbnails import AYONThumbnailCache
cls._cache = AYONThumbnailCache()
return cls._cache
def process(self, thumbnail_entity, thumbnail_type):
if not AYON_SERVER_ENABLED:
return None
@ -142,20 +153,40 @@ class ServerThumbnailResolver(ThumbnailResolver):
if not entity_type or not entity_id:
return None
from openpype.client.server.server_api import get_server_api_connection
import ayon_api
project_name = self.dbcon.active_project()
thumbnail_id = thumbnail_entity["_id"]
con = get_server_api_connection()
filepath = con.get_thumbnail(
project_name, entity_type, entity_id, thumbnail_id
)
content = None
cache = self._get_cache()
filepath = cache.get_thumbnail_filepath(project_name, thumbnail_id)
if filepath:
with open(filepath, "rb") as stream:
content = stream.read()
return stream.read()
return content
# This is new way how thumbnails can be received from server
# - output is 'ThumbnailContent' object
if hasattr(ayon_api, "get_thumbnail_by_id"):
result = ayon_api.get_thumbnail_by_id(thumbnail_id)
if result.is_valid:
filepath = cache.store_thumbnail(
project_name,
thumbnail_id,
result.content,
result.content_type
)
else:
# Backwards compatibility for ayon api where 'get_thumbnail_by_id'
# is not implemented and output is filepath
filepath = ayon_api.get_thumbnail(
project_name, entity_type, entity_id, thumbnail_id
)
if not filepath:
return None
with open(filepath, "rb") as stream:
return stream.read()
# Thumbnail resolvers

View file

@ -0,0 +1,37 @@
from openpype.lib.profiles_filtering import filter_profiles
from openpype.settings import get_project_settings
def get_versioning_start(
project_name,
host_name,
task_name=None,
task_type=None,
family=None,
subset=None,
project_settings=None,
):
"""Get anatomy versioning start"""
if not project_settings:
project_settings = get_project_settings(project_name)
version_start = 1
settings = project_settings["global"]
profiles = settings.get("version_start_category", {}).get("profiles", [])
if not profiles:
return version_start
filtering_criteria = {
"host_names": host_name,
"families": family,
"task_names": task_name,
"task_types": task_type,
"subsets": subset
}
profile = filter_profiles(profiles, filtering_criteria)
if profile is None:
return version_start
return profile["version_start"]

View file

@ -10,7 +10,7 @@ from openpype.lib import (
Logger,
StringTemplate,
)
from openpype.pipeline import Anatomy
from openpype.pipeline import version_start, Anatomy
from openpype.pipeline.template_data import get_template_data
@ -316,7 +316,13 @@ def get_last_workfile(
)
if filename is None:
data = copy.deepcopy(fill_data)
data["version"] = 1
data["version"] = version_start.get_versioning_start(
data["project"]["name"],
data["app"],
task_name=data["task"]["name"],
task_type=data["task"]["type"],
family="workfile"
)
data.pop("comment", None)
if not data.get("ext"):
data["ext"] = extensions[0]

View file

@ -1612,7 +1612,7 @@ class PlaceholderLoadMixin(object):
pass
def delete_placeholder(self, placeholder, failed):
def delete_placeholder(self, placeholder):
"""Called when all item population is done."""
self.log.debug("Clean up of placeholder is not implemented.")
@ -1781,6 +1781,17 @@ class PlaceholderCreateMixin(object):
self.post_placeholder_process(placeholder, failed)
if failed:
self.log.debug(
"Placeholder cleanup skipped due to failed placeholder "
"population."
)
return
if not placeholder.data.get("keep_placeholder", True):
self.delete_placeholder(placeholder)
def create_failed(self, placeholder, creator_data):
if hasattr(placeholder, "create_failed"):
placeholder.create_failed(creator_data)
@ -1800,9 +1811,12 @@ class PlaceholderCreateMixin(object):
representation.
failed (bool): Loading of representation failed.
"""
pass
def delete_placeholder(self, placeholder):
"""Called when all item population is done."""
self.log.debug("Clean up of placeholder is not implemented.")
def _before_instance_create(self, placeholder):
"""Can be overriden. Is called before instance is created."""

View file

@ -0,0 +1,125 @@
import os
import platform
import subprocess
from string import Formatter
from openpype.client import (
get_project,
get_asset_by_name,
)
from openpype.pipeline import (
Anatomy,
LauncherAction,
)
from openpype.pipeline.template_data import get_template_data
class OpenTaskPath(LauncherAction):
name = "open_task_path"
label = "Explore here"
icon = "folder-open"
order = 500
def is_compatible(self, session):
"""Return whether the action is compatible with the session"""
return bool(session.get("AVALON_ASSET"))
def process(self, session, **kwargs):
from qtpy import QtCore, QtWidgets
project_name = session["AVALON_PROJECT"]
asset_name = session["AVALON_ASSET"]
task_name = session.get("AVALON_TASK", None)
path = self._get_workdir(project_name, asset_name, task_name)
if not path:
return
app = QtWidgets.QApplication.instance()
ctrl_pressed = QtCore.Qt.ControlModifier & app.keyboardModifiers()
if ctrl_pressed:
# Copy path to clipboard
self.copy_path_to_clipboard(path)
else:
self.open_in_explorer(path)
def _find_first_filled_path(self, path):
if not path:
return ""
fields = set()
for item in Formatter().parse(path):
_, field_name, format_spec, conversion = item
if not field_name:
continue
conversion = "!{}".format(conversion) if conversion else ""
format_spec = ":{}".format(format_spec) if format_spec else ""
orig_key = "{{{}{}{}}}".format(
field_name, conversion, format_spec)
fields.add(orig_key)
for field in fields:
path = path.split(field, 1)[0]
return path
def _get_workdir(self, project_name, asset_name, task_name):
project = get_project(project_name)
asset = get_asset_by_name(project_name, asset_name)
data = get_template_data(project, asset, task_name)
anatomy = Anatomy(project_name)
workdir = anatomy.templates_obj["work"]["folder"].format(data)
# Remove any potential un-formatted parts of the path
valid_workdir = self._find_first_filled_path(workdir)
# Path is not filled at all
if not valid_workdir:
raise AssertionError("Failed to calculate workdir.")
# Normalize
valid_workdir = os.path.normpath(valid_workdir)
if os.path.exists(valid_workdir):
return valid_workdir
# If task was selected, try to find asset path only to asset
if not task_name:
raise AssertionError("Folder does not exist.")
data.pop("task", None)
workdir = anatomy.templates_obj["work"]["folder"].format(data)
valid_workdir = self._find_first_filled_path(workdir)
if valid_workdir:
# Normalize
valid_workdir = os.path.normpath(valid_workdir)
if os.path.exists(valid_workdir):
return valid_workdir
raise AssertionError("Folder does not exist.")
@staticmethod
def open_in_explorer(path):
platform_name = platform.system().lower()
if platform_name == "windows":
args = ["start", path]
elif platform_name == "darwin":
args = ["open", "-na", path]
elif platform_name == "linux":
args = ["xdg-open", path]
else:
raise RuntimeError(f"Unknown platform {platform.system()}")
# Make sure path is converted correctly for 'os.system'
os.system(subprocess.list2cmdline(args))
@staticmethod
def copy_path_to_clipboard(path):
from qtpy import QtWidgets
path = path.replace("\\", "/")
print(f"Copied to clipboard: {path}")
app = QtWidgets.QApplication.instance()
assert app, "Must have running QApplication instance"
# Set to Clipboard
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(os.path.normpath(path))

View file

@ -32,6 +32,7 @@ from openpype.client import (
get_subsets,
get_last_versions
)
from openpype.pipeline.version_start import get_versioning_start
class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
@ -187,25 +188,13 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
project_task_types = project_doc["config"]["tasks"]
for instance in context:
if self.follow_workfile_version:
version_number = context.data('version')
else:
version_number = instance.data.get("version")
# If version is not specified for instance or context
if version_number is None:
# TODO we should be able to change default version by studio
# preferences (like start with version number `0`)
version_number = 1
# use latest version (+1) if already any exist
latest_version = instance.data["latestVersion"]
if latest_version is not None:
version_number += int(latest_version)
anatomy_updates = {
"asset": instance.data["asset"],
"folder": {
"name": instance.data["asset"],
},
"family": instance.data["family"],
"subset": instance.data["subset"],
"version": version_number
}
# Hierarchy
@ -225,6 +214,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
anatomy_updates["parent"] = parent_name
# Task
task_type = None
task_name = instance.data.get("task")
if task_name:
asset_tasks = asset_doc["data"]["tasks"]
@ -240,6 +230,30 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
"short": task_code
}
# Define version
if self.follow_workfile_version:
version_number = context.data('version')
else:
version_number = instance.data.get("version")
# use latest version (+1) if already any exist
if version_number is None:
latest_version = instance.data["latestVersion"]
if latest_version is not None:
version_number = int(latest_version) + 1
# If version is not specified for instance or context
if version_number is None:
version_number = get_versioning_start(
context.data["projectName"],
instance.context.data["hostName"],
task_name=task_name,
task_type=task_type,
family=instance.data["family"],
subset=instance.data["subset"]
)
anatomy_updates["version"] = version_number
# Additional data
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:

View file

@ -53,8 +53,8 @@ class ExtractBurnin(publish.Extractor):
"flame",
"houdini",
"max",
"blender"
# "resolve"
"blender",
"unreal"
]
optional = True

View file

@ -128,7 +128,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
if thumbnail_created:
return full_output_path
self.log.warning("Thumbanil has not been created.")
self.log.warning("Thumbnail has not been created.")
def _instance_has_thumbnail(self, instance):
if "representations" not in instance.data:
@ -147,6 +147,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
oiio_cmd = get_oiio_tool_args(
"oiiotool",
"-a", src_path,
"--ch", "R,G,B",
"-o", dst_path
)
self.log.info("Running: {}".format(" ".join(oiio_cmd)))

View file

@ -142,6 +142,12 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
))
return
if AYON_SERVER_ENABLED and src_version_entity["name"] == 0:
self.log.debug(
"Version 0 cannot have hero version. Skipping."
)
return
all_copied_files = []
transfers = instance.data.get("transfers", list())
for _src, dst in transfers:

View file

@ -7,12 +7,12 @@ from openpype.pipeline.publish import (
class ValidatePublishDir(pyblish.api.InstancePlugin):
"""Validates if 'publishDir' is a project directory
"""Validates if files are being published into a project directory
'publishDir' is collected based on publish templates. In specific cases
('source' template) source folder of items is used as a 'publishDir', this
validates if it is inside any project dir for the project.
(eg. files are not published from local folder, unaccessible for studio'
In specific cases ('source' template - in place publishing) source folder
of published items is used as a regular `publish` dir.
This validates if it is inside any project dir for the project.
(eg. files are not published from local folder, inaccessible for studio')
"""
@ -44,6 +44,8 @@ class ValidatePublishDir(pyblish.api.InstancePlugin):
anatomy = instance.context.data["anatomy"]
# original_dirname must be convertable to rootless path
# in other case it is path inside of root folder for the project
success, _ = anatomy.find_root_template_from_path(original_dirname)
formatting_data = {
@ -56,11 +58,12 @@ class ValidatePublishDir(pyblish.api.InstancePlugin):
formatting_data=formatting_data)
def _get_template_name_from_instance(self, instance):
"""Find template which will be used during integration."""
project_name = instance.context.data["projectName"]
host_name = instance.context.data["hostName"]
anatomy_data = instance.data["anatomyData"]
family = anatomy_data["family"]
family = self.family_mapping.get("family") or family
family = self.family_mapping.get(family) or family
task_info = anatomy_data.get("task") or {}
return get_publish_template_name(

View file

@ -25,16 +25,16 @@ class ValidateVersion(pyblish.api.InstancePlugin):
# TODO: Remove full non-html version upon drop of old publisher
msg = (
"Version '{0}' from instance '{1}' that you are "
" trying to publish is lower or equal to an existing version "
" in the database. Version in database: '{2}'."
"trying to publish is lower or equal to an existing version "
"in the database. Version in database: '{2}'."
"Please version up your workfile to a higher version number "
"than: '{2}'."
).format(version, instance.data["name"], latest_version)
msg_html = (
"Version <b>{0}</b> from instance <b>{1}</b> that you are "
" trying to publish is lower or equal to an existing version "
" in the database. Version in database: <b>{2}</b>.<br><br>"
"trying to publish is lower or equal to an existing version "
"in the database. Version in database: <b>{2}</b>.<br><br>"
"Please version up your workfile to a higher version number "
"than: <b>{2}</b>."
).format(version, instance.data["name"], latest_version)

View file

@ -19,6 +19,7 @@ from openpype.pipeline import (
)
from openpype.pipeline.context_tools import get_workdir_from_session
from openpype.pipeline.version_start import get_versioning_start
log = logging.getLogger("Update Slap Comp")
@ -26,9 +27,6 @@ log = logging.getLogger("Update Slap Comp")
def _format_version_folder(folder):
"""Format a version folder based on the filepath
Assumption here is made that, if the path does not exists the folder
will be "v001"
Args:
folder: file path to a folder
@ -36,9 +34,13 @@ def _format_version_folder(folder):
str: new version folder name
"""
new_version = 1
new_version = get_versioning_start(
get_current_project_name(),
"fusion",
family="workfile"
)
if os.path.isdir(folder):
re_version = re.compile("v\d+$")
re_version = re.compile(r"v\d+$")
versions = [i for i in os.listdir(folder) if os.path.isdir(i)
and re_version.match(i)]
if versions:

View file

@ -301,6 +301,10 @@ def convert_system_settings(ayon_settings, default_settings, addon_versions):
if "core" in ayon_settings:
_convert_general(ayon_settings, output, default_settings)
for key, value in ayon_settings.items():
if key not in output:
output[key] = value
for key, value in default_settings.items():
if key not in output:
output[key] = value
@ -602,6 +606,13 @@ def _convert_maya_project_settings(ayon_settings, output):
.replace("{product[name]}", "{subset}")
)
if ayon_maya_load.get("import_loader"):
import_loader = ayon_maya_load["import_loader"]
import_loader["namespace"] = (
import_loader["namespace"]
.replace("{product[name]}", "{subset}")
)
output["maya"] = ayon_maya
@ -1265,6 +1276,10 @@ def convert_project_settings(ayon_settings, default_settings):
_convert_global_project_settings(ayon_settings, output, default_settings)
for key, value in ayon_settings.items():
if key not in output:
output[key] = value
for key, value in default_settings.items():
if key not in output:
output[key] = value

View file

@ -12,7 +12,7 @@
},
"create": {
"RenderCreator": {
"defaults": [
"default_variants": [
"Main"
],
"mark_for_review": true

View file

@ -1,4 +1,7 @@
{
"version_start_category": {
"profiles": []
},
"imageio": {
"activate_global_color_management": false,
"ocio_config": {

View file

@ -14,48 +14,70 @@
"create": {
"CreateArnoldAss": {
"enabled": true,
"default_variants": [],
"default_variants": [
"Main"
],
"ext": ".ass"
},
"CreateAlembicCamera": {
"enabled": true,
"defaults": []
"default_variants": [
"Main"
]
},
"CreateCompositeSequence": {
"enabled": true,
"defaults": []
"default_variants": [
"Main"
]
},
"CreatePointCache": {
"enabled": true,
"defaults": []
"default_variants": [
"Main"
]
},
"CreateRedshiftROP": {
"enabled": true,
"defaults": []
"default_variants": [
"Main"
]
},
"CreateRemotePublish": {
"enabled": true,
"defaults": []
"default_variants": [
"Main"
]
},
"CreateVDBCache": {
"enabled": true,
"defaults": []
"default_variants": [
"Main"
]
},
"CreateUSD": {
"enabled": false,
"defaults": []
"default_variants": [
"Main"
]
},
"CreateUSDModel": {
"enabled": false,
"defaults": []
"default_variants": [
"Main"
]
},
"USDCreateShadingWorkspace": {
"enabled": false,
"defaults": []
"default_variants": [
"Main"
]
},
"CreateUSDRender": {
"enabled": false,
"defaults": []
"default_variants": [
"Main"
]
}
},
"publish": {

View file

@ -527,7 +527,7 @@
},
"CreateRender": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
@ -547,7 +547,9 @@
},
"CreateUnrealSkeletalMesh": {
"enabled": true,
"default_variants": [],
"default_variants": [
"Main"
],
"joint_hints": "jnt_org"
},
"CreateMultiverseLook": {
@ -627,55 +629,55 @@
},
"CreateMultiverseUsd": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateMultiverseUsdComp": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateMultiverseUsdOver": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateAssembly": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateCamera": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateLayout": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateMayaScene": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateRenderSetup": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateRig": {
"enabled": true,
"defaults": [
"default_variants": [
"Main",
"Sim",
"Cloth"
@ -683,20 +685,20 @@
},
"CreateSetDress": {
"enabled": true,
"defaults": [
"default_variants": [
"Main",
"Anim"
]
},
"CreateVRayScene": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
},
"CreateYetiRig": {
"enabled": true,
"defaults": [
"default_variants": [
"Main"
]
}
@ -1463,6 +1465,10 @@
"namespace": "{asset_name}_{subset}_##_",
"group_name": "_GRP",
"display_handle": true
},
"import_loader": {
"namespace": "{asset_name}_{subset}_##_",
"group_name": "_GRP"
}
},
"workfile_build": {

View file

@ -256,6 +256,23 @@
"allow_multiple_items": true,
"allow_version_control": false,
"extensions": []
},
{
"family": "audio",
"identifier": "",
"label": "Audio ",
"icon": "fa5s.file-audio",
"default_variants": [
"Main"
],
"description": "Audio product",
"detailed_description": "Audio files for review or final delivery",
"allow_sequences": false,
"allow_multiple_items": false,
"allow_version_control": false,
"extensions": [
".wav"
]
}
],
"editorial_creators": {

View file

@ -32,7 +32,7 @@
"children": [
{
"type": "list",
"key": "defaults",
"key": "default_variants",
"label": "Default Variants",
"object_type": "text",
"docstring": "Fill default variant(s) (like 'Main' or 'Default') used in subset name creation."

View file

@ -5,6 +5,61 @@
"label": "Global",
"is_file": true,
"children": [
{
"type": "dict",
"key": "version_start_category",
"label": "Version Start",
"collapsible": true,
"collapsible_key": true,
"children": [
{
"type": "list",
"collapsible": true,
"key": "profiles",
"label": "Profiles",
"object_type": {
"type": "dict",
"children": [
{
"key": "host_names",
"label": "Host names",
"type": "hosts-enum",
"multiselection": true
},
{
"key": "task_types",
"label": "Task types",
"type": "task-types-enum"
},
{
"key": "task_names",
"label": "Task names",
"type": "list",
"object_type": "text"
},
{
"key": "families",
"label": "Families",
"type": "list",
"object_type": "text"
},
{
"key": "subsets",
"label": "Subset names",
"type": "list",
"object_type": "text"
},
{
"key": "version_start",
"label": "Version Start",
"type": "number",
"minimum": 0
}
]
}
}
]
},
{
"key": "imageio",
"type": "dict",

View file

@ -19,7 +19,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
},
{
@ -39,51 +39,51 @@
]
},
{
"type": "schema_template",
"name": "template_create_plugin",
"template_data": [
{
"key": "CreateAlembicCamera",
"label": "Create Alembic Camera"
},
{
"key": "CreateCompositeSequence",
"label": "Create Composite (Image Sequence)"
},
{
"key": "CreatePointCache",
"label": "Create Point Cache"
},
{
"key": "CreateRedshiftROP",
"label": "Create Redshift ROP"
},
{
"key": "CreateRemotePublish",
"label": "Create Remote Publish"
},
{
"key": "CreateVDBCache",
"label": "Create VDB Cache"
},
{
"key": "CreateUSD",
"label": "Create USD"
},
{
"key": "CreateUSDModel",
"label": "Create USD Model"
},
{
"key": "USDCreateShadingWorkspace",
"label": "Create USD Shading Workspace"
},
{
"key": "CreateUSDRender",
"label": "Create USD Render"
}
]
}
{
"type": "schema_template",
"name": "template_create_plugin",
"template_data": [
{
"key": "CreateAlembicCamera",
"label": "Create Alembic Camera"
},
{
"key": "CreateCompositeSequence",
"label": "Create Composite (Image Sequence)"
},
{
"key": "CreatePointCache",
"label": "Create Point Cache"
},
{
"key": "CreateRedshiftROP",
"label": "Create Redshift ROP"
},
{
"key": "CreateRemotePublish",
"label": "Create Remote Publish"
},
{
"key": "CreateVDBCache",
"label": "Create VDB Cache"
},
{
"key": "CreateUSD",
"label": "Create USD"
},
{
"key": "CreateUSDModel",
"label": "Create USD Model"
},
{
"key": "USDCreateShadingWorkspace",
"label": "Create USD Shading Workspace"
},
{
"key": "CreateUSDRender",
"label": "Create USD Render"
}
]
}
]
}

View file

@ -29,14 +29,20 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
}
]
},
{
"type": "schema",
"name": "schema_maya_create_render"
{
"type": "schema_template",
"name": "template_create_plugin",
"template_data": [
{
"key": "CreateRender",
"label": "Create Render"
}
]
},
{
"type": "dict",
@ -53,7 +59,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
},
{
@ -85,7 +91,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
},
{
@ -148,7 +154,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
}
]
@ -178,7 +184,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
}
]
@ -213,7 +219,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
}
]
@ -243,7 +249,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
}
]
@ -263,7 +269,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
},
{
@ -288,7 +294,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
},
{
@ -390,7 +396,7 @@
{
"type": "list",
"key": "default_variants",
"label": "Default Subsets",
"label": "Default Variants",
"object_type": "text"
}
]

View file

@ -1,20 +0,0 @@
{
"type": "dict",
"collapsible": true,
"key": "CreateRender",
"label": "Create Render",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "list",
"key": "defaults",
"label": "Default Subsets",
"object_type": "text"
}
]
}

View file

@ -121,6 +121,28 @@
"label": "Display Handle On Load References"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "import_loader",
"label": "Import Loader",
"children": [
{
"type": "text",
"label": "Namespace",
"key": "namespace"
},
{
"type": "text",
"label": "Group name",
"key": "group_name"
},
{
"type": "label",
"label": "Here's a link to the doc where you can find explanations about customing the naming of referenced assets: https://openpype.io/docs/admin_hosts_maya#load-plugins"
}
]
}
]
}

View file

@ -13,8 +13,8 @@
},
{
"type": "list",
"key": "defaults",
"label": "Default Subsets",
"key": "default_variants",
"label": "Default Variants",
"object_type": "text"
}
]

View file

@ -343,6 +343,7 @@ class TextAttrWidget(_BaseAttrDefWidget):
return self._input_widget.text()
def set_value(self, value, multivalue=False):
block_signals = False
if multivalue:
set_value = set(value)
if None in set_value:
@ -352,13 +353,18 @@ class TextAttrWidget(_BaseAttrDefWidget):
if len(set_value) == 1:
value = tuple(set_value)[0]
else:
block_signals = True
value = "< Multiselection >"
if value != self.current_value():
if block_signals:
self._input_widget.blockSignals(True)
if self.multiline:
self._input_widget.setPlainText(value)
else:
self._input_widget.setText(value)
if block_signals:
self._input_widget.blockSignals(False)
class BoolAttrWidget(_BaseAttrDefWidget):
@ -391,7 +397,9 @@ class BoolAttrWidget(_BaseAttrDefWidget):
set_value.add(self.attr_def.default)
if len(set_value) > 1:
self._input_widget.blockSignals(True)
self._input_widget.setCheckState(QtCore.Qt.PartiallyChecked)
self._input_widget.blockSignals(False)
return
value = tuple(set_value)[0]

View file

@ -6,6 +6,7 @@ from openpype import AYON_SERVER_ENABLED
from openpype.pipeline.create import (
SUBSET_NAME_ALLOWED_SYMBOLS,
PRE_CREATE_THUMBNAIL_KEY,
DEFAULT_VARIANT_VALUE,
TaskNotSetError,
)
@ -626,7 +627,7 @@ class CreateWidget(QtWidgets.QWidget):
default_variants = creator_item.default_variants
if not default_variants:
default_variants = ["Main"]
default_variants = [DEFAULT_VARIANT_VALUE]
default_variant = creator_item.default_variant
if not default_variant:
@ -642,7 +643,7 @@ class CreateWidget(QtWidgets.QWidget):
elif variant:
self.variant_hints_menu.addAction(variant)
variant_text = default_variant or "Main"
variant_text = default_variant or DEFAULT_VARIANT_VALUE
# Make sure subset name is updated to new plugin
if variant_text == self.variant_input.text():
self._on_variant_change()

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View file

@ -168,7 +168,7 @@ class OverviewWidget(QtWidgets.QFrame):
def make_sure_animation_is_finished(self):
if self._change_anim.state() == QtCore.QAbstractAnimation.Running:
self._change_anim.stop()
self._on_change_anim_finished()
self._on_change_anim_finished()
def set_state(self, new_state, animate):
if new_state == self._current_state:

Some files were not shown because too many files have changed in this diff Show more