diff --git a/.all-contributorsrc b/.all-contributorsrc
index b30f3b2499..60812cdb3c 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -1,6 +1,6 @@
{
"projectName": "OpenPype",
- "projectOwner": "pypeclub",
+ "projectOwner": "ynput",
"repoType": "github",
"repoHost": "https://github.com",
"files": [
@@ -319,8 +319,18 @@
"code",
"doc"
]
+ },
+ {
+ "login": "movalex",
+ "name": "Alexey Bogomolov",
+ "avatar_url": "https://avatars.githubusercontent.com/u/11698866?v=4",
+ "profile": "http://abogomolov.com",
+ "contributions": [
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
- "skipCi": true
+ "skipCi": true,
+ "commitType": "docs"
}
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 0036e121b7..aa5b8decdc 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -35,6 +35,7 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
+ - 3.15.9
- 3.15.9-nightly.2
- 3.15.9-nightly.1
- 3.15.8
@@ -134,7 +135,6 @@ body:
- 3.14.3-nightly.1
- 3.14.2
- 3.14.2-nightly.5
- - 3.14.2-nightly.4
validations:
required: true
- type: dropdown
diff --git a/README.md b/README.md
index 514ffb62c0..8757e3db92 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-[](#contributors-)
+[](#contributors-)
OpenPype
====
@@ -303,41 +303,44 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
diff --git a/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py b/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py
new file mode 100644
index 0000000000..559e9ae0ce
--- /dev/null
+++ b/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py
@@ -0,0 +1,55 @@
+from pathlib import Path
+
+from openpype.lib import PreLaunchHook
+
+
+class AddPythonScriptToLaunchArgs(PreLaunchHook):
+ """Add python script to be executed before Blender launch."""
+
+ # Append after file argument
+ order = 15
+ app_groups = [
+ "blender",
+ ]
+
+ def execute(self):
+ if not self.launch_context.data.get("python_scripts"):
+ return
+
+ # Add path to workfile to arguments
+ for python_script_path in self.launch_context.data["python_scripts"]:
+ self.log.info(
+ f"Adding python script {python_script_path} to launch"
+ )
+ # Test script path exists
+ python_script_path = Path(python_script_path)
+ if not python_script_path.exists():
+ self.log.warning(
+ f"Python script {python_script_path} doesn't exist. "
+ "Skipped..."
+ )
+ continue
+
+ if "--" in self.launch_context.launch_args:
+ # Insert before separator
+ separator_index = self.launch_context.launch_args.index("--")
+ self.launch_context.launch_args.insert(
+ separator_index,
+ "-P",
+ )
+ self.launch_context.launch_args.insert(
+ separator_index + 1,
+ python_script_path.as_posix(),
+ )
+ else:
+ self.launch_context.launch_args.extend(
+ ["-P", python_script_path.as_posix()]
+ )
+
+ # Ensure separator
+ if "--" not in self.launch_context.launch_args:
+ self.launch_context.launch_args.append("--")
+
+ self.launch_context.launch_args.extend(
+ [*self.launch_context.data.get("script_args", [])]
+ )
diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py
index 77844d2448..c9bebfa8b2 100644
--- a/openpype/hosts/hiero/plugins/load/load_clip.py
+++ b/openpype/hosts/hiero/plugins/load/load_clip.py
@@ -41,8 +41,8 @@ class LoadClip(phiero.SequenceLoader):
clip_name_template = "{asset}_{subset}_{representation}"
+ @classmethod
def apply_settings(cls, project_settings, system_settings):
-
plugin_type_settings = (
project_settings
.get("hiero", {})
diff --git a/openpype/hosts/max/api/colorspace.py b/openpype/hosts/max/api/colorspace.py
new file mode 100644
index 0000000000..fafee4ee04
--- /dev/null
+++ b/openpype/hosts/max/api/colorspace.py
@@ -0,0 +1,50 @@
+import attr
+from pymxs import runtime as rt
+
+
+@attr.s
+class LayerMetadata(object):
+ """Data class for Render Layer metadata."""
+ frameStart = attr.ib()
+ frameEnd = attr.ib()
+
+
+@attr.s
+class RenderProduct(object):
+ """Getting Colorspace as
+ Specific Render Product Parameter for submitting
+ publish job.
+ """
+ colorspace = attr.ib() # colorspace
+ view = attr.ib()
+ productName = attr.ib(default=None)
+
+
+class ARenderProduct(object):
+
+ def __init__(self):
+ """Constructor."""
+ # Initialize
+ self.layer_data = self._get_layer_data()
+ self.layer_data.products = self.get_colorspace_data()
+
+ def _get_layer_data(self):
+ return LayerMetadata(
+ frameStart=int(rt.rendStart),
+ frameEnd=int(rt.rendEnd),
+ )
+
+ def get_colorspace_data(self):
+ """To be implemented by renderer class.
+ This should return a list of RenderProducts.
+ Returns:
+ list: List of RenderProduct
+ """
+ colorspace_data = [
+ RenderProduct(
+ colorspace="sRGB",
+ view="ACES 1.0",
+ productName=""
+ )
+ ]
+ return colorspace_data
diff --git a/openpype/hosts/max/api/lib.py b/openpype/hosts/max/api/lib.py
index d9213863b1..e2af0720ec 100644
--- a/openpype/hosts/max/api/lib.py
+++ b/openpype/hosts/max/api/lib.py
@@ -128,7 +128,14 @@ def get_all_children(parent, node_type=None):
def get_current_renderer():
- """get current renderer"""
+ """
+ Notes:
+ Get current renderer for Max
+
+ Returns:
+ "{Current Renderer}:{Current Renderer}"
+ e.g. "Redshift_Renderer:Redshift_Renderer"
+ """
return rt.renderers.production
diff --git a/openpype/hosts/max/api/lib_renderproducts.py b/openpype/hosts/max/api/lib_renderproducts.py
index 8224d589ad..94b0aeb913 100644
--- a/openpype/hosts/max/api/lib_renderproducts.py
+++ b/openpype/hosts/max/api/lib_renderproducts.py
@@ -3,94 +3,126 @@
# arnold
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
import os
+
from pymxs import runtime as rt
-from openpype.hosts.max.api.lib import (
- get_current_renderer,
- get_default_render_folder
-)
-from openpype.pipeline.context_tools import get_current_project_asset
-from openpype.settings import get_project_settings
+
+from openpype.hosts.max.api.lib import get_current_renderer
from openpype.pipeline import legacy_io
+from openpype.settings import get_project_settings
class RenderProducts(object):
def __init__(self, project_settings=None):
- self._project_settings = project_settings
- if not self._project_settings:
- self._project_settings = get_project_settings(
- legacy_io.Session["AVALON_PROJECT"]
- )
+ self._project_settings = project_settings or get_project_settings(
+ legacy_io.Session["AVALON_PROJECT"])
+
+ def get_beauty(self, container):
+ render_dir = os.path.dirname(rt.rendOutputFilename)
+
+ output_file = os.path.join(render_dir, container)
- def render_product(self, container):
- folder = rt.maxFilePath
- file = rt.maxFileName
- folder = folder.replace("\\", "/")
setting = self._project_settings
- render_folder = get_default_render_folder(setting)
- filename, ext = os.path.splitext(file)
+ img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa
- output_file = os.path.join(folder,
- render_folder,
- filename,
+ start_frame = int(rt.rendStart)
+ end_frame = int(rt.rendEnd) + 1
+
+ return {
+ "beauty": self.get_expected_beauty(
+ output_file, start_frame, end_frame, img_fmt
+ )
+ }
+
+ def get_aovs(self, container):
+ render_dir = os.path.dirname(rt.rendOutputFilename)
+
+ output_file = os.path.join(render_dir,
container)
- context = get_current_project_asset()
- # TODO: change the frame range follows the current render setting
- startFrame = int(rt.rendStart)
- endFrame = int(rt.rendEnd) + 1
-
- img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
- full_render_list = self.beauty_render_product(output_file,
- startFrame,
- endFrame,
- img_fmt)
+ setting = self._project_settings
+ img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa
+ start_frame = int(rt.rendStart)
+ end_frame = int(rt.rendEnd) + 1
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
-
-
- if renderer == "VUE_File_Renderer":
- return full_render_list
+ render_dict = {}
if renderer in [
"ART_Renderer",
- "Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
- render_elem_list = self.render_elements_product(output_file,
- startFrame,
- endFrame,
- img_fmt)
- if render_elem_list:
- full_render_list.extend(iter(render_elem_list))
- return full_render_list
+ render_name = self.get_render_elements_name()
+ if render_name:
+ for name in render_name:
+ render_dict.update({
+ name: self.get_expected_render_elements(
+ output_file, name, start_frame,
+ end_frame, img_fmt)
+ })
+ elif renderer == "Redshift_Renderer":
+ render_name = self.get_render_elements_name()
+ if render_name:
+ rs_aov_files = rt.Execute("renderers.current.separateAovFiles")
+ # this doesn't work, always returns False
+ # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles
+ if img_fmt == "exr" and not rs_aov_files:
+ for name in render_name:
+ if name == "RsCryptomatte":
+ render_dict.update({
+ name: self.get_expected_render_elements(
+ output_file, name, start_frame,
+ end_frame, img_fmt)
+ })
+ else:
+ for name in render_name:
+ render_dict.update({
+ name: self.get_expected_render_elements(
+ output_file, name, start_frame,
+ end_frame, img_fmt)
+ })
- if renderer == "Arnold":
- aov_list = self.arnold_render_product(output_file,
- startFrame,
- endFrame,
- img_fmt)
- if aov_list:
- full_render_list.extend(iter(aov_list))
- return full_render_list
+ elif renderer == "Arnold":
+ render_name = self.get_arnold_product_name()
+ if render_name:
+ for name in render_name:
+ render_dict.update({
+ name: self.get_expected_arnold_product(
+ output_file, name, start_frame, end_frame, img_fmt)
+ })
+ elif renderer in [
+ "V_Ray_6_Hotfix_3",
+ "V_Ray_GPU_6_Hotfix_3"
+ ]:
+ if img_fmt != "exr":
+ render_name = self.get_render_elements_name()
+ if render_name:
+ for name in render_name:
+ render_dict.update({
+ name: self.get_expected_render_elements(
+ output_file, name, start_frame,
+ end_frame, img_fmt) # noqa
+ })
- def beauty_render_product(self, folder, startFrame, endFrame, fmt):
+ return render_dict
+
+ def get_expected_beauty(self, folder, start_frame, end_frame, fmt):
beauty_frame_range = []
- for f in range(startFrame, endFrame):
- beauty_output = f"{folder}.{f}.{fmt}"
+ for f in range(start_frame, end_frame):
+ frame = "%04d" % f
+ beauty_output = f"{folder}.{frame}.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
beauty_frame_range.append(beauty_output)
return beauty_frame_range
- # TODO: Get the arnold render product
- def arnold_render_product(self, folder, startFrame, endFrame, fmt):
- """Get all the Arnold AOVs"""
- aovs = []
+ def get_arnold_product_name(self):
+ """Get all the Arnold AOVs name"""
+ aov_name = []
amw = rt.MaxtoAOps.AOVsManagerWindow()
aov_mgr = rt.renderers.current.AOVManager
@@ -100,34 +132,51 @@ class RenderProducts(object):
return
for i in range(aov_group_num):
# get the specific AOV group
- for aov in aov_mgr.drivers[i].aov_list:
- for f in range(startFrame, endFrame):
- render_element = f"{folder}_{aov.name}.{f}.{fmt}"
- render_element = render_element.replace("\\", "/")
- aovs.append(render_element)
-
+ aov_name.extend(aov.name for aov in aov_mgr.drivers[i].aov_list)
# close the AOVs manager window
amw.close()
- return aovs
+ return aov_name
- def render_elements_product(self, folder, startFrame, endFrame, fmt):
- """Get all the render element output files. """
- render_dirname = []
+ def get_expected_arnold_product(self, folder, name,
+ start_frame, end_frame, fmt):
+ """Get all the expected Arnold AOVs"""
+ aov_list = []
+ for f in range(start_frame, end_frame):
+ frame = "%04d" % f
+ render_element = f"{folder}_{name}.{frame}.{fmt}"
+ render_element = render_element.replace("\\", "/")
+ aov_list.append(render_element)
+ return aov_list
+
+ def get_render_elements_name(self):
+ """Get all the render element names for general """
+ render_name = []
render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
+ if render_elem_num < 1:
+ return
# get render elements from the renders
for i in range(render_elem_num):
renderlayer_name = render_elem.GetRenderElement(i)
- target, renderpass = str(renderlayer_name).split(":")
if renderlayer_name.enabled:
- for f in range(startFrame, endFrame):
- render_element = f"{folder}_{renderpass}.{f}.{fmt}"
- render_element = render_element.replace("\\", "/")
- render_dirname.append(render_element)
+ target, renderpass = str(renderlayer_name).split(":")
+ render_name.append(renderpass)
- return render_dirname
+ return render_name
+
+ def get_expected_render_elements(self, folder, name,
+ start_frame, end_frame, fmt):
+ """Get all the expected render element output files. """
+ render_elements = []
+ for f in range(start_frame, end_frame):
+ frame = "%04d" % f
+ render_element = f"{folder}_{name}.{frame}.{fmt}"
+ render_element = render_element.replace("\\", "/")
+ render_elements.append(render_element)
+
+ return render_elements
def image_format(self):
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
diff --git a/openpype/hosts/max/plugins/create/create_redshift_proxy.py b/openpype/hosts/max/plugins/create/create_redshift_proxy.py
new file mode 100644
index 0000000000..698ea82b69
--- /dev/null
+++ b/openpype/hosts/max/plugins/create/create_redshift_proxy.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+"""Creator plugin for creating camera."""
+from openpype.hosts.max.api import plugin
+from openpype.pipeline import CreatedInstance
+
+
+class CreateRedshiftProxy(plugin.MaxCreator):
+ identifier = "io.openpype.creators.max.redshiftproxy"
+ label = "Redshift Proxy"
+ family = "redshiftproxy"
+ icon = "gear"
+
+ def create(self, subset_name, instance_data, pre_create_data):
+
+ _ = super(CreateRedshiftProxy, self).create(
+ subset_name,
+ instance_data,
+ pre_create_data) # type: CreatedInstance
diff --git a/openpype/hosts/max/plugins/create/create_render.py b/openpype/hosts/max/plugins/create/create_render.py
index 68ae5eac72..5ad895b86e 100644
--- a/openpype/hosts/max/plugins/create/create_render.py
+++ b/openpype/hosts/max/plugins/create/create_render.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating camera."""
+import os
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
@@ -14,6 +15,10 @@ class CreateRender(plugin.MaxCreator):
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
+ file = rt.maxFileName
+ filename, _ = os.path.splitext(file)
+ instance_data["AssetName"] = filename
+
instance = super(CreateRender, self).create(
subset_name,
instance_data,
diff --git a/openpype/hosts/max/plugins/load/load_model.py b/openpype/hosts/max/plugins/load/load_model.py
index 95ee014e07..5f1ae3378e 100644
--- a/openpype/hosts/max/plugins/load/load_model.py
+++ b/openpype/hosts/max/plugins/load/load_model.py
@@ -1,8 +1,5 @@
-
import os
-from openpype.pipeline import (
- load, get_representation_path
-)
+from openpype.pipeline import load, get_representation_path
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.lib import maintained_selection
@@ -24,24 +21,20 @@ class ModelAbcLoader(load.LoaderPlugin):
file_path = os.path.normpath(self.fname)
abc_before = {
- c for c in rt.rootNode.Children
+ c
+ for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
- abc_import_cmd = (f"""
-AlembicImport.ImportToRoot = false
-AlembicImport.CustomAttributes = true
-AlembicImport.UVs = true
-AlembicImport.VertexColors = true
-
-importFile @"{file_path}" #noPrompt
- """)
-
- self.log.debug(f"Executing command: {abc_import_cmd}")
- rt.execute(abc_import_cmd)
+ rt.AlembicImport.ImportToRoot = False
+ rt.AlembicImport.CustomAttributes = True
+ rt.AlembicImport.UVs = True
+ rt.AlembicImport.VertexColors = True
+ rt.importFile(file_path, rt.name("noPrompt"))
abc_after = {
- c for c in rt.rootNode.Children
+ c
+ for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
@@ -54,10 +47,12 @@ importFile @"{file_path}" #noPrompt
abc_container = abc_containers.pop()
return containerise(
- name, [abc_container], context, loader=self.__class__.__name__)
+ name, [abc_container], context, loader=self.__class__.__name__
+ )
def update(self, container, representation):
from pymxs import runtime as rt
+
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
rt.select(node.Children)
@@ -76,9 +71,10 @@ importFile @"{file_path}" #noPrompt
with maintained_selection():
rt.select(node)
- lib.imprint(container["instance_node"], {
- "representation": str(representation["_id"])
- })
+ lib.imprint(
+ container["instance_node"],
+ {"representation": str(representation["_id"])},
+ )
def switch(self, container, representation):
self.update(container, representation)
diff --git a/openpype/hosts/max/plugins/load/load_model_fbx.py b/openpype/hosts/max/plugins/load/load_model_fbx.py
index 01e6acae12..61101c482d 100644
--- a/openpype/hosts/max/plugins/load/load_model_fbx.py
+++ b/openpype/hosts/max/plugins/load/load_model_fbx.py
@@ -1,8 +1,5 @@
import os
-from openpype.pipeline import (
- load,
- get_representation_path
-)
+from openpype.pipeline import load, get_representation_path
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.lib import maintained_selection
@@ -24,10 +21,7 @@ class FbxModelLoader(load.LoaderPlugin):
rt.FBXImporterSetParam("Animation", False)
rt.FBXImporterSetParam("Cameras", False)
rt.FBXImporterSetParam("Preserveinstances", True)
- rt.importFile(
- filepath,
- rt.name("noPrompt"),
- using=rt.FBXIMP)
+ rt.importFile(filepath, rt.name("noPrompt"), using=rt.FBXIMP)
container = rt.getNodeByName(f"{name}")
if not container:
@@ -38,7 +32,8 @@ class FbxModelLoader(load.LoaderPlugin):
selection.Parent = container
return containerise(
- name, [container], context, loader=self.__class__.__name__)
+ name, [container], context, loader=self.__class__.__name__
+ )
def update(self, container, representation):
from pymxs import runtime as rt
@@ -46,24 +41,21 @@ class FbxModelLoader(load.LoaderPlugin):
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
rt.select(node.Children)
- fbx_reimport_cmd = (
- f"""
-FBXImporterSetParam "Animation" false
-FBXImporterSetParam "Cameras" false
-FBXImporterSetParam "AxisConversionMethod" true
-FbxExporterSetParam "UpAxis" "Y"
-FbxExporterSetParam "Preserveinstances" true
-importFile @"{path}" #noPrompt using:FBXIMP
- """)
- rt.execute(fbx_reimport_cmd)
+ rt.FBXImporterSetParam("Animation", False)
+ rt.FBXImporterSetParam("Cameras", False)
+ rt.FBXImporterSetParam("AxisConversionMethod", True)
+ rt.FBXImporterSetParam("UpAxis", "Y")
+ rt.FBXImporterSetParam("Preserveinstances", True)
+ rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP)
with maintained_selection():
rt.select(node)
- lib.imprint(container["instance_node"], {
- "representation": str(representation["_id"])
- })
+ lib.imprint(
+ container["instance_node"],
+ {"representation": str(representation["_id"])},
+ )
def switch(self, container, representation):
self.update(container, representation)
diff --git a/openpype/hosts/max/plugins/load/load_pointcache.py b/openpype/hosts/max/plugins/load/load_pointcache.py
index b3e12adc7b..5fb9772f87 100644
--- a/openpype/hosts/max/plugins/load/load_pointcache.py
+++ b/openpype/hosts/max/plugins/load/load_pointcache.py
@@ -5,9 +5,7 @@ Because of limited api, alembics can be only loaded, but not easily updated.
"""
import os
-from openpype.pipeline import (
- load, get_representation_path
-)
+from openpype.pipeline import load, get_representation_path
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
@@ -15,9 +13,7 @@ from openpype.hosts.max.api import lib
class AbcLoader(load.LoaderPlugin):
"""Alembic loader."""
- families = ["camera",
- "animation",
- "pointcache"]
+ families = ["camera", "animation", "pointcache"]
label = "Load Alembic"
representations = ["abc"]
order = -10
@@ -30,21 +26,17 @@ class AbcLoader(load.LoaderPlugin):
file_path = os.path.normpath(self.fname)
abc_before = {
- c for c in rt.rootNode.Children
+ c
+ for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
- abc_export_cmd = (f"""
-AlembicImport.ImportToRoot = false
-
-importFile @"{file_path}" #noPrompt
- """)
-
- self.log.debug(f"Executing command: {abc_export_cmd}")
- rt.execute(abc_export_cmd)
+ rt.AlembicImport.ImportToRoot = False
+ rt.importFile(file_path, rt.name("noPrompt"))
abc_after = {
- c for c in rt.rootNode.Children
+ c
+ for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
@@ -57,7 +49,8 @@ importFile @"{file_path}" #noPrompt
abc_container = abc_containers.pop()
return containerise(
- name, [abc_container], context, loader=self.__class__.__name__)
+ name, [abc_container], context, loader=self.__class__.__name__
+ )
def update(self, container, representation):
from pymxs import runtime as rt
@@ -69,9 +62,10 @@ importFile @"{file_path}" #noPrompt
for alembic_object in alembic_objects:
alembic_object.source = path
- lib.imprint(container["instance_node"], {
- "representation": str(representation["_id"])
- })
+ lib.imprint(
+ container["instance_node"],
+ {"representation": str(representation["_id"])},
+ )
def switch(self, container, representation):
self.update(container, representation)
diff --git a/openpype/hosts/max/plugins/load/load_redshift_proxy.py b/openpype/hosts/max/plugins/load/load_redshift_proxy.py
new file mode 100644
index 0000000000..31692f6367
--- /dev/null
+++ b/openpype/hosts/max/plugins/load/load_redshift_proxy.py
@@ -0,0 +1,63 @@
+import os
+import clique
+
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
+from openpype.hosts.max.api.pipeline import containerise
+from openpype.hosts.max.api import lib
+
+
+class RedshiftProxyLoader(load.LoaderPlugin):
+ """Load rs files with Redshift Proxy"""
+
+ label = "Load Redshift Proxy"
+ families = ["redshiftproxy"]
+ representations = ["rs"]
+ order = -9
+ icon = "code-fork"
+ color = "white"
+
+ def load(self, context, name=None, namespace=None, data=None):
+ from pymxs import runtime as rt
+
+ filepath = self.filepath_from_context(context)
+ rs_proxy = rt.RedshiftProxy()
+ rs_proxy.file = filepath
+ files_in_folder = os.listdir(os.path.dirname(filepath))
+ collections, remainder = clique.assemble(files_in_folder)
+ if collections:
+ rs_proxy.is_sequence = True
+
+ container = rt.container()
+ container.name = name
+ rs_proxy.Parent = container
+
+ asset = rt.getNodeByName(name)
+
+ return containerise(
+ name, [asset], context, loader=self.__class__.__name__)
+
+ def update(self, container, representation):
+ from pymxs import runtime as rt
+
+ path = get_representation_path(representation)
+ node = rt.getNodeByName(container["instance_node"])
+ for children in node.Children:
+ children_node = rt.getNodeByName(children.name)
+ for proxy in children_node.Children:
+ proxy.file = path
+
+ lib.imprint(container["instance_node"], {
+ "representation": str(representation["_id"])
+ })
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ from pymxs import runtime as rt
+
+ node = rt.getNodeByName(container["instance_node"])
+ rt.delete(node)
diff --git a/openpype/hosts/max/plugins/publish/collect_render.py b/openpype/hosts/max/plugins/publish/collect_render.py
index 00e00a8eb5..db5c84fad9 100644
--- a/openpype/hosts/max/plugins/publish/collect_render.py
+++ b/openpype/hosts/max/plugins/publish/collect_render.py
@@ -5,7 +5,8 @@ import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import get_current_asset_name
-from openpype.hosts.max.api.lib import get_max_version
+from openpype.hosts.max.api import colorspace
+from openpype.hosts.max.api.lib import get_max_version, get_current_renderer
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
from openpype.client import get_last_version_by_subset_name
@@ -28,8 +29,16 @@ class CollectRender(pyblish.api.InstancePlugin):
context.data['currentFile'] = current_file
asset = get_current_asset_name()
- render_layer_files = RenderProducts().render_product(instance.name)
+ files_by_aov = RenderProducts().get_beauty(instance.name)
folder = folder.replace("\\", "/")
+ aovs = RenderProducts().get_aovs(instance.name)
+ files_by_aov.update(aovs)
+
+ if "expectedFiles" not in instance.data:
+ instance.data["expectedFiles"] = list()
+ instance.data["files"] = list()
+ instance.data["expectedFiles"].append(files_by_aov)
+ instance.data["files"].append(files_by_aov)
img_format = RenderProducts().image_format()
project_name = context.data["projectName"]
@@ -38,7 +47,6 @@ class CollectRender(pyblish.api.InstancePlugin):
version_doc = get_last_version_by_subset_name(project_name,
instance.name,
asset_id)
-
self.log.debug("version_doc: {0}".format(version_doc))
version_int = 1
if version_doc:
@@ -46,22 +54,42 @@ class CollectRender(pyblish.api.InstancePlugin):
self.log.debug(f"Setting {version_int} to context.")
context.data["version"] = version_int
- # setup the plugin as 3dsmax for the internal renderer
+ # OCIO config not support in
+ # most of the 3dsmax renderers
+ # so this is currently hard coded
+ # TODO: add options for redshift/vray ocio config
+ instance.data["colorspaceConfig"] = ""
+ instance.data["colorspaceDisplay"] = "sRGB"
+ instance.data["colorspaceView"] = "ACES 1.0 SDR-video"
+ instance.data["renderProducts"] = colorspace.ARenderProduct()
+ instance.data["publishJobState"] = "Suspended"
+ instance.data["attachTo"] = []
+ renderer_class = get_current_renderer()
+ renderer = str(renderer_class).split(":")[0]
+ # also need to get the render dir for conversion
data = {
- "subset": instance.name,
"asset": asset,
+ "subset": str(instance.name),
"publish": True,
"maxversion": str(get_max_version()),
"imageFormat": img_format,
"family": 'maxrender',
"families": ['maxrender'],
+ "renderer": renderer,
"source": filepath,
- "expectedFiles": render_layer_files,
"plugin": "3dsmax",
"frameStart": int(rt.rendStart),
"frameEnd": int(rt.rendEnd),
"version": version_int,
"farm": True
}
- self.log.info("data: {0}".format(data))
instance.data.update(data)
+
+ # TODO: this should be unified with maya and its "multipart" flag
+ # on instance.
+ if renderer == "Redshift_Renderer":
+ instance.data.update(
+ {"separateAovFiles": rt.Execute(
+ "renderers.current.separateAovFiles")})
+
+ self.log.info("data: {0}".format(data))
diff --git a/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py
new file mode 100644
index 0000000000..3b44099609
--- /dev/null
+++ b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py
@@ -0,0 +1,62 @@
+import os
+import pyblish.api
+from openpype.pipeline import publish
+from pymxs import runtime as rt
+from openpype.hosts.max.api import maintained_selection
+
+
+class ExtractRedshiftProxy(publish.Extractor):
+ """
+ Extract Redshift Proxy with rsProxy
+ """
+
+ order = pyblish.api.ExtractorOrder - 0.1
+ label = "Extract RedShift Proxy"
+ hosts = ["max"]
+ families = ["redshiftproxy"]
+
+ def process(self, instance):
+ container = instance.data["instance_node"]
+ start = int(instance.context.data.get("frameStart"))
+ end = int(instance.context.data.get("frameEnd"))
+
+ self.log.info("Extracting Redshift Proxy...")
+ stagingdir = self.staging_dir(instance)
+ rs_filename = "{name}.rs".format(**instance.data)
+ rs_filepath = os.path.join(stagingdir, rs_filename)
+ rs_filepath = rs_filepath.replace("\\", "/")
+
+ rs_filenames = self.get_rsfiles(instance, start, end)
+
+ with maintained_selection():
+ # select and export
+ con = rt.getNodeByName(container)
+ rt.select(con.Children)
+ # Redshift rsProxy command
+ # rsProxy fp selected compress connectivity startFrame endFrame
+ # camera warnExisting transformPivotToOrigin
+ rt.rsProxy(rs_filepath, 1, 0, 0, start, end, 0, 1, 1)
+
+ self.log.info("Performing Extraction ...")
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ 'name': 'rs',
+ 'ext': 'rs',
+ 'files': rs_filenames if len(rs_filenames) > 1 else rs_filenames[0], # noqa
+ "stagingDir": stagingdir,
+ }
+ instance.data["representations"].append(representation)
+ self.log.info("Extracted instance '%s' to: %s" % (instance.name,
+ stagingdir))
+
+ def get_rsfiles(self, instance, startFrame, endFrame):
+ rs_filenames = []
+ rs_name = instance.data["name"]
+ for frame in range(startFrame, endFrame + 1):
+ rs_filename = "%s.%04d.rs" % (rs_name, frame)
+ rs_filenames.append(rs_filename)
+
+ return rs_filenames
diff --git a/openpype/hosts/max/plugins/publish/save_scene.py b/openpype/hosts/max/plugins/publish/save_scene.py
new file mode 100644
index 0000000000..a40788ab41
--- /dev/null
+++ b/openpype/hosts/max/plugins/publish/save_scene.py
@@ -0,0 +1,21 @@
+import pyblish.api
+import os
+
+
+class SaveCurrentScene(pyblish.api.ContextPlugin):
+ """Save current scene
+
+ """
+
+ label = "Save current file"
+ order = pyblish.api.ExtractorOrder - 0.49
+ hosts = ["max"]
+ families = ["maxrender", "workfile"]
+
+ def process(self, context):
+ from pymxs import runtime as rt
+ folder = rt.maxFilePath
+ file = rt.maxFileName
+ current = os.path.join(folder, file)
+ assert context.data["currentFile"] == current
+ rt.saveMaxFile(current)
diff --git a/openpype/hosts/max/plugins/publish/validate_deadline_publish.py b/openpype/hosts/max/plugins/publish/validate_deadline_publish.py
new file mode 100644
index 0000000000..b2f0e863f4
--- /dev/null
+++ b/openpype/hosts/max/plugins/publish/validate_deadline_publish.py
@@ -0,0 +1,43 @@
+import os
+import pyblish.api
+from pymxs import runtime as rt
+from openpype.pipeline.publish import (
+ RepairAction,
+ ValidateContentsOrder,
+ PublishValidationError,
+ OptionalPyblishPluginMixin
+)
+from openpype.hosts.max.api.lib_rendersettings import RenderSettings
+
+
+class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin):
+ """Validates Render File Directory is
+ not the same in every submission
+ """
+
+ order = ValidateContentsOrder
+ families = ["maxrender"]
+ hosts = ["max"]
+ label = "Render Output for Deadline"
+ optional = True
+ actions = [RepairAction]
+
+ def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+ file = rt.maxFileName
+ filename, ext = os.path.splitext(file)
+ if filename not in rt.rendOutputFilename:
+ raise PublishValidationError(
+ "Render output folder "
+ "doesn't match the max scene name! "
+ "Use Repair action to "
+ "fix the folder file path.."
+ )
+
+ @classmethod
+ def repair(cls, instance):
+ container = instance.data.get("instance_node")
+ RenderSettings().render_output(container)
+ cls.log.debug("Reset the render output folder...")
diff --git a/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py b/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py
new file mode 100644
index 0000000000..bc82f82f3b
--- /dev/null
+++ b/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+import pyblish.api
+from openpype.pipeline import PublishValidationError
+from pymxs import runtime as rt
+from openpype.pipeline.publish import RepairAction
+from openpype.hosts.max.api.lib import get_current_renderer
+
+
+class ValidateRendererRedshiftProxy(pyblish.api.InstancePlugin):
+ """
+ Validates Redshift as the current renderer for creating
+ Redshift Proxy
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["redshiftproxy"]
+ hosts = ["max"]
+ label = "Redshift Renderer"
+ actions = [RepairAction]
+
+ def process(self, instance):
+ invalid = self.get_redshift_renderer(instance)
+ if invalid:
+ raise PublishValidationError("Please install Redshift for 3dsMax"
+ " before using the Redshift proxy instance") # noqa
+ invalid = self.get_current_renderer(instance)
+ if invalid:
+ raise PublishValidationError("The Redshift proxy extraction"
+ "discontinued since the current renderer is not Redshift") # noqa
+
+ def get_redshift_renderer(self, instance):
+ invalid = list()
+ max_renderers_list = str(rt.RendererClass.classes)
+ if "Redshift_Renderer" not in max_renderers_list:
+ invalid.append(max_renderers_list)
+
+ return invalid
+
+ def get_current_renderer(self, instance):
+ invalid = list()
+ renderer_class = get_current_renderer()
+ current_renderer = str(renderer_class).split(":")[0]
+ if current_renderer != "Redshift_Renderer":
+ invalid.append(current_renderer)
+
+ return invalid
+
+ @classmethod
+ def repair(cls, instance):
+ for Renderer in rt.RendererClass.classes:
+ renderer = Renderer()
+ if "Redshift_Renderer" in str(renderer):
+ rt.renderers.production = renderer
+ break
diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py
index 387b7321b9..4681175808 100644
--- a/openpype/hosts/maya/plugins/create/create_render.py
+++ b/openpype/hosts/maya/plugins/create/create_render.py
@@ -181,16 +181,34 @@ class CreateRender(plugin.Creator):
primary_pool = pool_setting["primary_pool"]
sorted_pools = self._set_default_pool(list(pools), primary_pool)
- cmds.addAttr(self.instance, longName="primaryPool",
- attributeType="enum",
- enumName=":".join(sorted_pools))
+ cmds.addAttr(
+ self.instance,
+ longName="primaryPool",
+ attributeType="enum",
+ enumName=":".join(sorted_pools)
+ )
+ cmds.setAttr(
+ "{}.primaryPool".format(self.instance),
+ 0,
+ keyable=False,
+ channelBox=True
+ )
pools = ["-"] + pools
secondary_pool = pool_setting["secondary_pool"]
sorted_pools = self._set_default_pool(list(pools), secondary_pool)
- cmds.addAttr("{}.secondaryPool".format(self.instance),
- attributeType="enum",
- enumName=":".join(sorted_pools))
+ cmds.addAttr(
+ self.instance,
+ longName="secondaryPool",
+ attributeType="enum",
+ enumName=":".join(sorted_pools)
+ )
+ cmds.setAttr(
+ "{}.secondaryPool".format(self.instance),
+ 0,
+ keyable=False,
+ channelBox=True
+ )
def _create_render_settings(self):
"""Create instance settings."""
@@ -260,6 +278,12 @@ class CreateRender(plugin.Creator):
default_priority)
self.data["tile_priority"] = tile_priority
+ strict_error_checking = maya_submit_dl.get("strict_error_checking",
+ True)
+ self.data["strict_error_checking"] = strict_error_checking
+
+ # Pool attributes should be last since they will be recreated when
+ # the deadline server changes.
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
@@ -272,9 +296,6 @@ class CreateRender(plugin.Creator):
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
- strict_error_checking = maya_submit_dl.get("strict_error_checking",
- True)
- self.data["strict_error_checking"] = strict_error_checking
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")
diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py
index f4a4a44344..74ca27ff3c 100644
--- a/openpype/hosts/maya/plugins/load/load_reference.py
+++ b/openpype/hosts/maya/plugins/load/load_reference.py
@@ -33,7 +33,7 @@ def preserve_modelpanel_cameras(container, log=None):
panel_cameras = {}
for panel in cmds.getPanel(type="modelPanel"):
cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True),
- long=True)
+ long=True)[0]
# Often but not always maya returns the transform from the
# modelPanel as opposed to the camera shape, so we convert it
diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py
index 7c47f17acb..babd494758 100644
--- a/openpype/hosts/maya/plugins/publish/collect_render.py
+++ b/openpype/hosts/maya/plugins/publish/collect_render.py
@@ -336,7 +336,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
context.data["system_settings"]["modules"]["deadline"]
)
if deadline_settings["enabled"]:
- data["deadlineUrl"] = render_instance.data.get("deadlineUrl")
+ data["deadlineUrl"] = render_instance.data["deadlineUrl"]
if self.sync_workfile_version:
data["version"] = context.data["version"]
diff --git a/openpype/hosts/nuke/startup/__init__.py b/openpype/hosts/nuke/startup/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py b/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py
new file mode 100644
index 0000000000..f0cbabe20f
--- /dev/null
+++ b/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py
@@ -0,0 +1,47 @@
+""" OpenPype custom script for resetting read nodes start frame values """
+
+import nuke
+import nukescripts
+
+
+class FrameSettingsPanel(nukescripts.PythonPanel):
+ """ Frame Settings Panel """
+ def __init__(self):
+ nukescripts.PythonPanel.__init__(self, "Set Frame Start (Read Node)")
+
+ # create knobs
+ self.frame = nuke.Int_Knob(
+ 'frame', 'Frame Number')
+ self.selected = nuke.Boolean_Knob("selection")
+ # add knobs to panel
+ self.addKnob(self.selected)
+ self.addKnob(self.frame)
+
+ # set values
+ self.selected.setValue(False)
+ self.frame.setValue(nuke.root().firstFrame())
+
+ def process(self):
+ """ Process the panel values. """
+ # get values
+ frame = self.frame.value()
+ if self.selected.value():
+ # selected nodes processing
+ if not nuke.selectedNodes():
+ return
+ for rn_ in nuke.selectedNodes():
+ if rn_.Class() != "Read":
+ continue
+ rn_["frame_mode"].setValue("start_at")
+ rn_["frame"].setValue(str(frame))
+ else:
+ # all nodes processing
+ for rn_ in nuke.allNodes(filter="Read"):
+ rn_["frame_mode"].setValue("start_at")
+ rn_["frame"].setValue(str(frame))
+
+
+def main():
+ p_ = FrameSettingsPanel()
+ if p_.showModalDialog():
+ print(p_.process())
diff --git a/openpype/hosts/resolve/api/workio.py b/openpype/hosts/resolve/api/workio.py
index 5ce73eea53..5966fa6a43 100644
--- a/openpype/hosts/resolve/api/workio.py
+++ b/openpype/hosts/resolve/api/workio.py
@@ -43,18 +43,22 @@ def open_file(filepath):
"""
Loading project
"""
+
+ from . import bmdvr
+
pm = get_project_manager()
+ page = bmdvr.GetCurrentPage()
+ if page is not None:
+ # Save current project only if Resolve has an active page, otherwise
+ # we consider Resolve being in a pre-launch state (no open UI yet)
+ project = pm.GetCurrentProject()
+ print(f"Saving current project: {project}")
+ pm.SaveProject()
+
file = os.path.basename(filepath)
fname, _ = os.path.splitext(file)
dname, _ = fname.split("_v")
-
- # deal with current project
- project = pm.GetCurrentProject()
- log.info(f"Test `pm`: {pm}")
- pm.SaveProject()
-
try:
- log.info(f"Test `dname`: {dname}")
if not set_project_manager_to_folder_name(dname):
raise
# load project from input path
@@ -72,6 +76,7 @@ def open_file(filepath):
return False
return True
+
def current_file():
pm = get_project_manager()
current_dir = os.getenv("AVALON_WORKDIR")
diff --git a/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py b/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py
new file mode 100644
index 0000000000..0e27ddb8c3
--- /dev/null
+++ b/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py
@@ -0,0 +1,45 @@
+import os
+
+from openpype.lib import PreLaunchHook
+import openpype.hosts.resolve
+
+
+class ResolveLaunchLastWorkfile(PreLaunchHook):
+ """Special hook to open last workfile for Resolve.
+
+ Checks 'start_last_workfile', if set to False, it will not open last
+ workfile. This property is set explicitly in Launcher.
+ """
+
+ # Execute after workfile template copy
+ order = 10
+ app_groups = ["resolve"]
+
+ def execute(self):
+ if not self.data.get("start_last_workfile"):
+ self.log.info("It is set to not start last workfile on start.")
+ return
+
+ last_workfile = self.data.get("last_workfile_path")
+ if not last_workfile:
+ self.log.warning("Last workfile was not collected.")
+ return
+
+ if not os.path.exists(last_workfile):
+ self.log.info("Current context does not have any workfile yet.")
+ return
+
+ # Add path to launch environment for the startup script to pick up
+ self.log.info(f"Setting OPENPYPE_RESOLVE_OPEN_ON_LAUNCH to launch "
+ f"last workfile: {last_workfile}")
+ key = "OPENPYPE_RESOLVE_OPEN_ON_LAUNCH"
+ self.launch_context.env[key] = last_workfile
+
+ # Set the openpype prelaunch startup script path for easy access
+ # in the LUA .scriptlib code
+ op_resolve_root = os.path.dirname(openpype.hosts.resolve.__file__)
+ script_path = os.path.join(op_resolve_root, "startup.py")
+ key = "OPENPYPE_RESOLVE_STARTUP_SCRIPT"
+ self.launch_context.env[key] = script_path
+ self.log.info("Setting OPENPYPE_RESOLVE_STARTUP_SCRIPT to: "
+ f"{script_path}")
diff --git a/openpype/hosts/resolve/startup.py b/openpype/hosts/resolve/startup.py
new file mode 100644
index 0000000000..79a64e0fbf
--- /dev/null
+++ b/openpype/hosts/resolve/startup.py
@@ -0,0 +1,62 @@
+"""This script is used as a startup script in Resolve through a .scriptlib file
+
+It triggers directly after the launch of Resolve and it's recommended to keep
+it optimized for fast performance since the Resolve UI is actually interactive
+while this is running. As such, there's nothing ensuring the user isn't
+continuing manually before any of the logic here runs. As such we also try
+to delay any imports as much as possible.
+
+This code runs in a separate process to the main Resolve process.
+
+"""
+import os
+
+import openpype.hosts.resolve.api
+
+
+def ensure_installed_host():
+ """Install resolve host with openpype and return the registered host.
+
+ This function can be called multiple times without triggering an
+ additional install.
+ """
+ from openpype.pipeline import install_host, registered_host
+ host = registered_host()
+ if host:
+ return host
+
+ install_host(openpype.hosts.resolve.api)
+ return registered_host()
+
+
+def launch_menu():
+ print("Launching Resolve OpenPype menu..")
+ ensure_installed_host()
+ openpype.hosts.resolve.api.launch_pype_menu()
+
+
+def open_file(path):
+ # Avoid the need to "install" the host
+ host = ensure_installed_host()
+ host.open_file(path)
+
+
+def main():
+ # Open last workfile
+ workfile_path = os.environ.get("OPENPYPE_RESOLVE_OPEN_ON_LAUNCH")
+ if workfile_path:
+ open_file(workfile_path)
+ else:
+ print("No last workfile set to open. Skipping..")
+
+ # Launch OpenPype menu
+ from openpype.settings import get_project_settings
+ from openpype.pipeline.context_tools import get_current_project_name
+ project_name = get_current_project_name()
+ settings = get_project_settings(project_name)
+ if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True):
+ launch_menu()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib
new file mode 100644
index 0000000000..ec9b30a18d
--- /dev/null
+++ b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib
@@ -0,0 +1,21 @@
+-- Run OpenPype's Python launch script for resolve
+function file_exists(name)
+ local f = io.open(name, "r")
+ return f ~= nil and io.close(f)
+end
+
+
+openpype_startup_script = os.getenv("OPENPYPE_RESOLVE_STARTUP_SCRIPT")
+if openpype_startup_script ~= nil then
+ script = fusion:MapPath(openpype_startup_script)
+
+ if file_exists(script) then
+ -- We must use RunScript to ensure it runs in a separate
+ -- process to Resolve itself to avoid a deadlock for
+ -- certain imports of OpenPype libraries or Qt
+ print("Running launch script: " .. script)
+ fusion:RunScript(script)
+ else
+ print("Launch script not found at: " .. script)
+ end
+end
\ No newline at end of file
diff --git a/openpype/hosts/resolve/utils.py b/openpype/hosts/resolve/utils.py
index 9a161f4865..5e3003862f 100644
--- a/openpype/hosts/resolve/utils.py
+++ b/openpype/hosts/resolve/utils.py
@@ -29,6 +29,9 @@ def setup(env):
log.info("Utility Scripts Dir: `{}`".format(util_scripts_paths))
log.info("Utility Scripts: `{}`".format(scripts))
+ # Make sure scripts dir exists
+ os.makedirs(util_scripts_dir, exist_ok=True)
+
# make sure no script file is in folder
for script in os.listdir(util_scripts_dir):
path = os.path.join(util_scripts_dir, script)
@@ -50,6 +53,14 @@ def setup(env):
src = os.path.join(directory, script)
dst = os.path.join(util_scripts_dir, script)
+
+ # TODO: Make this a less hacky workaround
+ if script == "openpype_startup.scriptlib":
+ # Handle special case for scriptlib that needs to be a folder
+ # up from the Comp folder in the Fusion scripts
+ dst = os.path.join(os.path.dirname(util_scripts_dir),
+ script)
+
log.info("Copying `{}` to `{}`...".format(src, dst))
if os.path.isdir(src):
shutil.copytree(
diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py
index 07107ec011..674eaa3b91 100644
--- a/openpype/lib/project_backpack.py
+++ b/openpype/lib/project_backpack.py
@@ -113,26 +113,29 @@ def pack_project(
project_name
))
- roots = project_doc["config"]["roots"]
- # Determine root directory of project
- source_root = None
- source_root_name = None
- for root_name, root_value in roots.items():
- if source_root is not None:
- raise ValueError(
- "Packaging is supported only for single root projects"
- )
- source_root = root_value
- source_root_name = root_name
+ root_path = None
+ source_root = {}
+ project_source_path = None
+ if not only_documents:
+ roots = project_doc["config"]["roots"]
+ # Determine root directory of project
+ source_root_name = None
+ for root_name, root_value in roots.items():
+ if source_root is not None:
+ raise ValueError(
+ "Packaging is supported only for single root projects"
+ )
+ source_root = root_value
+ source_root_name = root_name
- root_path = source_root[platform.system().lower()]
- print("Using root \"{}\" with path \"{}\"".format(
- source_root_name, root_path
- ))
+ root_path = source_root[platform.system().lower()]
+ print("Using root \"{}\" with path \"{}\"".format(
+ source_root_name, root_path
+ ))
- project_source_path = os.path.join(root_path, project_name)
- if not os.path.exists(project_source_path):
- raise ValueError("Didn't find source of project files")
+ project_source_path = os.path.join(root_path, project_name)
+ if not os.path.exists(project_source_path):
+ raise ValueError("Didn't find source of project files")
# Determine zip filepath where data will be stored
if not destination_dir:
@@ -273,8 +276,7 @@ def unpack_project(
low_platform = platform.system().lower()
project_name = metadata["project_name"]
- source_root = metadata["root"]
- root_path = source_root[low_platform]
+ root_path = metadata["root"].get(low_platform)
# Drop existing collection
replace_project_documents(project_name, docs, database_name)
diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py
index 558a637e4b..7938c27233 100644
--- a/openpype/modules/deadline/abstract_submit_deadline.py
+++ b/openpype/modules/deadline/abstract_submit_deadline.py
@@ -582,7 +582,6 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin):
metadata_folder = metadata_folder.replace(orig_scene,
new_scene)
instance.data["publishRenderMetadataFolder"] = metadata_folder
-
self.log.info("Scene name was switched {} -> {}".format(
orig_scene, new_scene
))
diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
index 9981bead3e..2de6073e29 100644
--- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
+++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
@@ -5,23 +5,26 @@ This is resolving index of server lists stored in `deadlineServers` instance
attribute or using default server if that attribute doesn't exists.
"""
+from maya import cmds
+
import pyblish.api
class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
"""Collect Deadline Webservice URL from instance."""
- order = pyblish.api.CollectorOrder + 0.415
+ # Run before collect_render.
+ order = pyblish.api.CollectorOrder + 0.005
label = "Deadline Webservice from the Instance"
families = ["rendering", "renderlayer"]
+ hosts = ["maya"]
def process(self, instance):
instance.data["deadlineUrl"] = self._collect_deadline_url(instance)
self.log.info(
"Using {} for submission.".format(instance.data["deadlineUrl"]))
- @staticmethod
- def _collect_deadline_url(render_instance):
+ def _collect_deadline_url(self, render_instance):
# type: (pyblish.api.Instance) -> str
"""Get Deadline Webservice URL from render instance.
@@ -49,8 +52,16 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
default_server = render_instance.context.data["defaultDeadline"]
instance_server = render_instance.data.get("deadlineServers")
if not instance_server:
+ self.log.debug("Using default server.")
return default_server
+ # Get instance server as sting.
+ if isinstance(instance_server, int):
+ instance_server = cmds.getAttr(
+ "{}.deadlineServers".format(render_instance.data["objset"]),
+ asString=True
+ )
+
default_servers = deadline_settings["deadline_urls"]
project_servers = (
render_instance.context.data
@@ -58,15 +69,23 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
["deadline"]
["deadline_servers"]
)
- deadline_servers = {
+ if not project_servers:
+ self.log.debug("Not project servers found. Using default servers.")
+ return default_servers[instance_server]
+
+ project_enabled_servers = {
k: default_servers[k]
for k in project_servers
if k in default_servers
}
- # This is Maya specific and may not reflect real selection of deadline
- # url as dictionary keys in Python 2 are not ordered
- return deadline_servers[
- list(deadline_servers.keys())[
- int(render_instance.data.get("deadlineServers"))
- ]
- ]
+
+ msg = (
+ "\"{}\" server on instance is not enabled in project settings."
+ " Enabled project servers:\n{}".format(
+ instance_server, project_enabled_servers
+ )
+ )
+ assert instance_server in project_enabled_servers, msg
+
+ self.log.debug("Using project approved server.")
+ return project_enabled_servers[instance_server]
diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py
index cb2b0cf156..1a0d615dc3 100644
--- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py
+++ b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py
@@ -17,7 +17,8 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
`CollectDeadlineServerFromInstance`.
"""
- order = pyblish.api.CollectorOrder + 0.410
+ # Run before collect_deadline_server_instance.
+ order = pyblish.api.CollectorOrder + 0.0025
label = "Default Deadline Webservice"
pass_mongo_url = False
diff --git a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py
index c728b6b9c7..b6a30e36b7 100644
--- a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py
@@ -78,7 +78,7 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
job_info.BatchName = src_filename
job_info.Plugin = instance.data["plugin"]
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
-
+ job_info.EnableAutoTimeout = True
# Deadline requires integers in frame range
frames = "{start}-{end}".format(
start=int(instance.data["frameStart"]),
@@ -133,7 +133,8 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
# Add list of expected files to job
# ---------------------------------
exp = instance.data.get("expectedFiles")
- for filepath in exp:
+
+ for filepath in self._iter_expected_files(exp):
job_info.OutputDirectory += os.path.dirname(filepath)
job_info.OutputFilename += os.path.basename(filepath)
@@ -162,10 +163,11 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
instance = self._instance
filepath = self.scene_path
- expected_files = instance.data["expectedFiles"]
- if not expected_files:
+ files = instance.data["expectedFiles"]
+ if not files:
raise RuntimeError("No Render Elements found!")
- output_dir = os.path.dirname(expected_files[0])
+ first_file = next(self._iter_expected_files(files))
+ output_dir = os.path.dirname(first_file)
instance.data["outputDir"] = output_dir
instance.data["toBeRenderedOn"] = "deadline"
@@ -196,25 +198,22 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
else:
plugin_data["DisableMultipass"] = 1
- expected_files = instance.data.get("expectedFiles")
- if not expected_files:
+ files = instance.data.get("expectedFiles")
+ if not files:
raise RuntimeError("No render elements found")
- old_output_dir = os.path.dirname(expected_files[0])
+ first_file = next(self._iter_expected_files(files))
+ old_output_dir = os.path.dirname(first_file)
output_beauty = RenderSettings().get_render_output(instance.name,
old_output_dir)
- filepath = self.from_published_scene()
-
- def _clean_name(path):
- return os.path.splitext(os.path.basename(path))[0]
-
- new_scene = _clean_name(filepath)
- orig_scene = _clean_name(instance.context.data["currentFile"])
-
- output_beauty = output_beauty.replace(orig_scene, new_scene)
- output_beauty = output_beauty.replace("\\", "/")
- plugin_data["RenderOutput"] = output_beauty
-
+ rgb_bname = os.path.basename(output_beauty)
+ dir = os.path.dirname(first_file)
+ beauty_name = f"{dir}/{rgb_bname}"
+ beauty_name = beauty_name.replace("\\", "/")
+ plugin_data["RenderOutput"] = beauty_name
+ # as 3dsmax has version with different languages
+ plugin_data["Language"] = "ENU"
renderer_class = get_current_renderer()
+
renderer = str(renderer_class).split(":")[0]
if renderer in [
"ART_Renderer",
@@ -226,14 +225,37 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
]:
render_elem_list = RenderSettings().get_render_element()
for i, element in enumerate(render_elem_list):
- element = element.replace(orig_scene, new_scene)
- plugin_data["RenderElementOutputFilename%d" % i] = element # noqa
+ elem_bname = os.path.basename(element)
+ new_elem = f"{dir}/{elem_bname}"
+ new_elem = new_elem.replace("/", "\\")
+ plugin_data["RenderElementOutputFilename%d" % i] = new_elem # noqa
+
+ if renderer == "Redshift_Renderer":
+ plugin_data["redshift_SeparateAovFiles"] = instance.data.get(
+ "separateAovFiles")
self.log.debug("plugin data:{}".format(plugin_data))
plugin_info.update(plugin_data)
return job_info, plugin_info
+ def from_published_scene(self, replace_in_path=True):
+ instance = self._instance
+ if instance.data["renderer"] == "Redshift_Renderer":
+ self.log.debug("Using Redshift...published scene wont be used..")
+ replace_in_path = False
+ return replace_in_path
+
+ @staticmethod
+ def _iter_expected_files(exp):
+ if isinstance(exp[0], dict):
+ for _aov, files in exp[0].items():
+ for file in files:
+ yield file
+ else:
+ for file in exp:
+ yield file
+
@classmethod
def get_attribute_defs(cls):
defs = super(MaxSubmitDeadline, cls).get_attribute_defs()
diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
index f646551a07..e41be7000b 100644
--- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py
+++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
@@ -1089,6 +1089,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
deadline_publish_job_id = \
self._submit_deadline_post_job(instance, render_job, instances)
+ # Inject deadline url to instances.
+ for inst in instances:
+ inst["deadlineUrl"] = self.deadline_url
+
# publish job file
publish_job = {
"asset": asset,
diff --git a/openpype/plugins/publish/collect_frames_fix.py b/openpype/plugins/publish/collect_frames_fix.py
index 837738eb06..86e727b053 100644
--- a/openpype/plugins/publish/collect_frames_fix.py
+++ b/openpype/plugins/publish/collect_frames_fix.py
@@ -35,41 +35,47 @@ class CollectFramesFixDef(
rewrite_version = attribute_values.get("rewrite_version")
- if frames_to_fix:
- instance.data["frames_to_fix"] = frames_to_fix
+ if not frames_to_fix:
+ return
- subset_name = instance.data["subset"]
- asset_name = instance.data["asset"]
+ instance.data["frames_to_fix"] = frames_to_fix
- project_entity = instance.data["projectEntity"]
- project_name = project_entity["name"]
+ subset_name = instance.data["subset"]
+ asset_name = instance.data["asset"]
- version = get_last_version_by_subset_name(project_name,
- subset_name,
- asset_name=asset_name)
- if not version:
- self.log.warning("No last version found, "
- "re-render not possible")
- return
+ project_entity = instance.data["projectEntity"]
+ project_name = project_entity["name"]
- representations = get_representations(project_name,
- version_ids=[version["_id"]])
- published_files = []
- for repre in representations:
- if repre["context"]["family"] not in self.families:
- continue
+ version = get_last_version_by_subset_name(
+ project_name,
+ subset_name,
+ asset_name=asset_name
+ )
+ if not version:
+ self.log.warning(
+ "No last version found, re-render not possible"
+ )
+ return
- for file_info in repre.get("files"):
- published_files.append(file_info["path"])
+ representations = get_representations(
+ project_name, version_ids=[version["_id"]]
+ )
+ published_files = []
+ for repre in representations:
+ if repre["context"]["family"] not in self.families:
+ continue
- instance.data["last_version_published_files"] = published_files
- self.log.debug("last_version_published_files::{}".format(
- instance.data["last_version_published_files"]))
+ for file_info in repre.get("files"):
+ published_files.append(file_info["path"])
- if rewrite_version:
- instance.data["version"] = version["name"]
- # limits triggering version validator
- instance.data.pop("latestVersion")
+ instance.data["last_version_published_files"] = published_files
+ self.log.debug("last_version_published_files::{}".format(
+ instance.data["last_version_published_files"]))
+
+ if self.rewrite_version_enable and rewrite_version:
+ instance.data["version"] = version["name"]
+ # limits triggering version validator
+ instance.data.pop("latestVersion")
@classmethod
def get_attribute_defs(cls):
diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json
index f01bdf7d50..3f8be4c872 100644
--- a/openpype/settings/defaults/project_settings/nuke.json
+++ b/openpype/settings/defaults/project_settings/nuke.json
@@ -222,6 +222,13 @@
"title": "OpenPype Docs",
"command": "import webbrowser;webbrowser.open(url='https://openpype.io/docs/artist_hosts_nuke_tut')",
"tooltip": "Open the OpenPype Nuke user doc page"
+ },
+ {
+ "type": "action",
+ "sourcetype": "python",
+ "title": "Set Frame Start (Read Node)",
+ "command": "from openpype.hosts.nuke.startup.frame_setting_for_read_nodes import main;main();",
+ "tooltip": "Set frame start for read node(s)"
}
]
},
diff --git a/openpype/settings/defaults/project_settings/resolve.json b/openpype/settings/defaults/project_settings/resolve.json
index 264f3bd902..56efa78e89 100644
--- a/openpype/settings/defaults/project_settings/resolve.json
+++ b/openpype/settings/defaults/project_settings/resolve.json
@@ -1,4 +1,5 @@
{
+ "launch_openpype_menu_on_start": false,
"imageio": {
"ocio_config": {
"enabled": false,
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json b/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json
index b326f22394..6f98bdd3bd 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json
@@ -5,6 +5,11 @@
"label": "DaVinci Resolve",
"is_file": true,
"children": [
+ {
+ "type": "boolean",
+ "key": "launch_openpype_menu_on_start",
+ "label": "Launch OpenPype menu on start of Resolve"
+ },
{
"key": "imageio",
"type": "dict",
diff --git a/openpype/tools/tray/pype_tray.py b/openpype/tools/tray/pype_tray.py
index 2f3b5251f9..fdc0a8094d 100644
--- a/openpype/tools/tray/pype_tray.py
+++ b/openpype/tools/tray/pype_tray.py
@@ -633,10 +633,10 @@ class TrayManager:
# Create a copy of sys.argv
additional_args = list(sys.argv)
- # Check last argument from `get_openpype_execute_args`
- # - when running from code it is the same as first from sys.argv
- if args[-1] == additional_args[0]:
- additional_args.pop(0)
+ # Remove first argument from 'sys.argv'
+ # - when running from code the first argument is 'start.py'
+ # - when running from build the first argument is executable
+ additional_args.pop(0)
cleanup_additional_args = False
if use_expected_version:
@@ -663,7 +663,6 @@ class TrayManager:
additional_args = _additional_args
args.extend(additional_args)
-
run_detached_process(args, env=envs)
self.exit()
diff --git a/website/docs/artist_hosts_3dsmax.md b/website/docs/artist_hosts_3dsmax.md
index 12c1f40181..fffab8ca5d 100644
--- a/website/docs/artist_hosts_3dsmax.md
+++ b/website/docs/artist_hosts_3dsmax.md
@@ -30,7 +30,7 @@ By clicking the icon ```OpenPype Menu``` rolls out.
Choose ```OpenPype Menu > Launcher``` to open the ```Launcher``` window.
-When opened you can **choose** the **project** to work in from the list. Then choose the particular **asset** you want to work on then choose **task**
+When opened you can **choose** the **project** to work in from the list. Then choose the particular **asset** you want to work on then choose **task**
and finally **run 3dsmax by its icon** in the tools.

@@ -65,13 +65,13 @@ If not any workfile present simply hit ```Save As``` and keep ```Subversion``` e

-OpenPype correctly names it and add version to the workfile. This basically happens whenever user trigger ```Save As``` action. Resulting into incremental version numbers like
+OpenPype correctly names it and add version to the workfile. This basically happens whenever user trigger ```Save As``` action. Resulting into incremental version numbers like
```workfileName_v001```
```workfileName_v002```
- etc.
+ etc.
Basically meaning user is free of guessing what is the correct naming and other necessities to keep everything in order and managed.
@@ -105,13 +105,13 @@ Before proceeding further please check [Glossary](artist_concepts.md) and [What
### Intro
-Current OpenPype integration (ver 3.15.0) supports only ```PointCache``` and ```Camera``` families now.
+Current OpenPype integration (ver 3.15.0) supports only ```PointCache```, ```Camera```, ```Geometry``` and ```Redshift Proxy``` families now.
**Pointcache** family being basically any geometry outputted as Alembic cache (.abc) format
**Camera** family being 3dsmax Camera object with/without animation outputted as native .max, FBX, Alembic format
-
+**Redshift Proxy** family being Redshift Proxy object with/without animation outputted as rs format(Redshift Proxy's very own format)
---
:::note Work in progress
@@ -119,7 +119,3 @@ This part of documentation is still work in progress.
:::
## ...to be added
-
-
-
-
diff --git a/website/docs/dev_blender.md b/website/docs/dev_blender.md
new file mode 100644
index 0000000000..bed0e4a09d
--- /dev/null
+++ b/website/docs/dev_blender.md
@@ -0,0 +1,61 @@
+---
+id: dev_blender
+title: Blender integration
+sidebar_label: Blender integration
+toc_max_heading_level: 4
+---
+
+## Run python script at launch
+In case you need to execute a python script when Blender is started (aka [`-P`](https://docs.blender.org/manual/en/latest/advanced/command_line/arguments.html#python-options)), for example to programmatically modify a blender file for conformation, you can create an OpenPype hook as follows:
+
+```python
+from openpype.hosts.blender.hooks import pre_add_run_python_script_arg
+from openpype.lib import PreLaunchHook
+
+
+class MyHook(PreLaunchHook):
+ """Add python script to be executed before Blender launch."""
+
+ order = pre_add_run_python_script_arg.AddPythonScriptToLaunchArgs.order - 1
+ app_groups = [
+ "blender",
+ ]
+
+ def execute(self):
+ self.launch_context.data.setdefault("python_scripts", []).append(
+ "/path/to/my_script.py"
+ )
+```
+
+You can write a bare python script, as you could run into the [Text Editor](https://docs.blender.org/manual/en/latest/editors/text_editor.html).
+
+### Python script with arguments
+#### Adding arguments
+In case you need to pass arguments to your script, you can append them to `self.launch_context.data["script_args"]`:
+
+```python
+self.launch_context.data.setdefault("script_args", []).append(
+ "--my-arg",
+ "value",
+ )
+```
+
+#### Parsing arguments
+You can parse arguments in your script using [argparse](https://docs.python.org/3/library/argparse.html) as follows:
+
+```python
+import argparse
+
+parser = argparse.ArgumentParser(
+ description="Parsing arguments for my_script.py"
+)
+parser.add_argument(
+ "--my-arg",
+ nargs="?",
+ help="My argument",
+)
+args, unknown = arg_parser.parse_known_args(
+ sys.argv[sys.argv.index("--") + 1 :]
+)
+print(args.my_arg)
+```
diff --git a/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png b/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png
index 80e00702e6..76dd9b372a 100644
Binary files a/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png and b/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png differ
diff --git a/website/docs/project_settings/settings_project_global.md b/website/docs/project_settings/settings_project_global.md
index 7bd24a5773..5ddf247d98 100644
--- a/website/docs/project_settings/settings_project_global.md
+++ b/website/docs/project_settings/settings_project_global.md
@@ -170,12 +170,10 @@ A profile may generate multiple outputs from a single input. Each output must de
- **`Letter Box`**
- **Enabled** - Enable letter boxes
- - **Ratio** - Ratio of letter boxes
- - **Type** - **Letterbox** (horizontal bars) or **Pillarbox** (vertical bars)
+ - **Ratio** - Ratio of letter boxes. Ratio type is calculated from output image dimensions. If letterbox ratio > image ratio, _letterbox_ is applied. Otherwise _pillarbox_ will be rendered.
- **Fill color** - Fill color of boxes (RGBA: 0-255)
- **Line Thickness** - Line thickness on the edge of box (set to `0` to turn off)
- - **Fill color** - Line color on the edge of box (RGBA: 0-255)
- - **Example**
+ - **Line color** - Line color on the edge of box (RGBA: 0-255)


diff --git a/website/sidebars.js b/website/sidebars.js
index 4874782197..267cc7f6d7 100644
--- a/website/sidebars.js
+++ b/website/sidebars.js
@@ -180,6 +180,7 @@ module.exports = {
]
},
"dev_deadline",
+ "dev_blender",
"dev_colorspace"
]
};