Merge branch 'develop' into bugfix/OP-3022-Look-publishing-and-srgb-colorspace-in-Maya-2022
29
.github/workflows/nightly_merge.yml
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
name: Dev -> Main
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '21 3 * * 3,6'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
develop-to-main:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: 🚛 Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: 🔨 Merge develop to main
|
||||
uses: everlytic/branch-merge@1.1.0
|
||||
with:
|
||||
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
source_ref: 'develop'
|
||||
target_branch: 'main'
|
||||
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
|
||||
|
||||
- name: Invoke pre-release workflow
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
with:
|
||||
workflow: Nightly Prerelease
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
67
.github/workflows/prerelease.yml
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
name: Nightly Prerelease
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
create_nightly:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: 🚛 Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
|
||||
- name: Install Python requirements
|
||||
run: pip install gitpython semver PyGithub
|
||||
|
||||
- name: 🔎 Determine next version type
|
||||
id: version_type
|
||||
run: |
|
||||
TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
|
||||
echo "type=${TYPE}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: 💉 Inject new version into files
|
||||
id: version
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: |
|
||||
NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }})
|
||||
echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: 💾 Commit and Tag
|
||||
id: git_commit
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
run: |
|
||||
git config user.email ${{ secrets.CI_EMAIL }}
|
||||
git config user.name ${{ secrets.CI_USER }}
|
||||
git checkout main
|
||||
git pull
|
||||
git add .
|
||||
git commit -m "[Automated] Bump version"
|
||||
tag_name="CI/${{ steps.version.outputs.next_tag }}"
|
||||
echo $tag_name
|
||||
git tag -a $tag_name -m "nightly build"
|
||||
|
||||
- name: Push to protected main branch
|
||||
uses: CasperWA/push-protected@v2.10.0
|
||||
with:
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
branch: main
|
||||
tags: true
|
||||
unprotect_reviews: true
|
||||
|
||||
- name: 🔨 Merge main back to develop
|
||||
uses: everlytic/branch-merge@1.1.0
|
||||
if: steps.version_type.outputs.type != 'skip'
|
||||
with:
|
||||
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
source_ref: 'main'
|
||||
target_branch: 'develop'
|
||||
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
|
||||
|
|
@ -210,7 +210,8 @@ def switch_item(container,
|
|||
if any(not x for x in [asset_name, subset_name, representation_name]):
|
||||
repre_id = container["representation"]
|
||||
representation = get_representation_by_id(project_name, repre_id)
|
||||
repre_parent_docs = get_representation_parents(representation)
|
||||
repre_parent_docs = get_representation_parents(
|
||||
project_name, representation)
|
||||
if repre_parent_docs:
|
||||
version, subset, asset, _ = repre_parent_docs
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class FusionPrelaunch(PreLaunchHook):
|
|||
"Make sure the environment in fusion settings has "
|
||||
"'FUSION_PYTHON3_HOME' set correctly and make sure "
|
||||
"Python 3 is installed in the given path."
|
||||
f"\n\nPYTHON36: {fusion_python3_home}"
|
||||
f"\n\nPYTHON PATH: {fusion_python3_home}"
|
||||
)
|
||||
|
||||
self.log.info(f"Setting {py3_var}: '{py3_dir}'...")
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
"outputDir": os.path.dirname(path),
|
||||
"ext": ext, # todo: should be redundant
|
||||
"label": label,
|
||||
"task": context.data["task"],
|
||||
"frameStart": context.data["frameStart"],
|
||||
"frameEnd": context.data["frameEnd"],
|
||||
"frameStartHandle": context.data["frameStartHandle"],
|
||||
|
|
|
|||
|
|
@ -1,6 +1,4 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
|
||||
|
||||
|
|
@ -23,23 +21,53 @@ class Fusionlocal(pyblish.api.InstancePlugin):
|
|||
# This plug-in runs only once and thus assumes all instances
|
||||
# currently will render the same frame range
|
||||
context = instance.context
|
||||
key = "__hasRun{}".format(self.__class__.__name__)
|
||||
key = f"__hasRun{self.__class__.__name__}"
|
||||
if context.data.get(key, False):
|
||||
return
|
||||
else:
|
||||
context.data[key] = True
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
context.data[key] = True
|
||||
|
||||
self.render_once(context)
|
||||
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
ext = os.path.splitext(os.path.basename(path))[-1]
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
repre = {
|
||||
'name': ext[1:],
|
||||
'ext': ext[1:],
|
||||
'frameStart': f"%0{len(str(frame_end))}d" % frame_start,
|
||||
'files': files,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# review representation
|
||||
repre_preview = repre.copy()
|
||||
repre_preview["name"] = repre_preview["ext"] = "mp4"
|
||||
repre_preview["tags"] = ["review", "ftrackreview", "delete"]
|
||||
instance.data["representations"].append(repre_preview)
|
||||
|
||||
def render_once(self, context):
|
||||
"""Render context comp only once, even with more render instances"""
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Starting render")
|
||||
self.log.info("Start frame: {}".format(frame_start))
|
||||
self.log.info("End frame: {}".format(frame_end))
|
||||
self.log.info(f"Start frame: {frame_start}")
|
||||
self.log.info(f"End frame: {frame_end}")
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
result = current_comp.Render({
|
||||
|
|
@ -48,26 +76,5 @@ class Fusionlocal(pyblish.api.InstancePlugin):
|
|||
"Wait": True
|
||||
})
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
collected_frames = os.listdir(output_dir)
|
||||
repre = {
|
||||
'name': ext[1:],
|
||||
'ext': ext[1:],
|
||||
'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start,
|
||||
'files': collected_frames,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# review representation
|
||||
repre_preview = repre.copy()
|
||||
repre_preview["name"] = repre_preview["ext"] = "mp4"
|
||||
repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"]
|
||||
instance.data["representations"].append(repre_preview)
|
||||
|
||||
self.log.debug(f"_ instance.data: {pformat(instance.data)}")
|
||||
|
||||
if not result:
|
||||
raise RuntimeError("Comp render failed")
|
||||
|
|
|
|||
|
|
@ -126,10 +126,6 @@ def check_inventory():
|
|||
|
||||
def application_launch(event):
|
||||
"""Event that is executed after Harmony is launched."""
|
||||
# FIXME: This is breaking server <-> client communication.
|
||||
# It is now moved so it it manually called.
|
||||
# ensure_scene_settings()
|
||||
# check_inventory()
|
||||
# fills OPENPYPE_HARMONY_JS
|
||||
pype_harmony_path = Path(__file__).parent.parent / "js" / "PypeHarmony.js"
|
||||
pype_harmony_js = pype_harmony_path.read_text()
|
||||
|
|
@ -146,6 +142,9 @@ def application_launch(event):
|
|||
harmony.send({"script": script})
|
||||
inject_avalon_js()
|
||||
|
||||
ensure_scene_settings()
|
||||
check_inventory()
|
||||
|
||||
|
||||
def export_template(backdrops, nodes, filepath):
|
||||
"""Export Template to file.
|
||||
|
|
|
|||
|
|
@ -120,3 +120,51 @@ def get_all_children(parent, node_type=None):
|
|||
|
||||
return ([x for x in child_list if rt.superClassOf(x) == node_type]
|
||||
if node_type else child_list)
|
||||
|
||||
|
||||
def get_current_renderer():
|
||||
"""get current renderer"""
|
||||
return rt.renderers.production
|
||||
|
||||
|
||||
def get_default_render_folder(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["default_render_image_folder"])
|
||||
|
||||
|
||||
def set_framerange(start_frame, end_frame):
|
||||
"""
|
||||
Note:
|
||||
Frame range can be specified in different types. Possible values are:
|
||||
* `1` - Single frame.
|
||||
* `2` - Active time segment ( animationRange ).
|
||||
* `3` - User specified Range.
|
||||
* `4` - User specified Frame pickup string (for example `1,3,5-12`).
|
||||
|
||||
Todo:
|
||||
Current type is hard-coded, there should be a custom setting for this.
|
||||
"""
|
||||
rt.rendTimeType = 4
|
||||
if start_frame is not None and end_frame is not None:
|
||||
frame_range = "{0}-{1}".format(start_frame, end_frame)
|
||||
rt.rendPickupFrames = frame_range
|
||||
|
||||
|
||||
def get_multipass_setting(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["multipass"])
|
||||
|
||||
|
||||
def get_max_version():
|
||||
"""
|
||||
Args:
|
||||
get max version date for deadline
|
||||
|
||||
Returns:
|
||||
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
|
||||
max_info[7] = max version date
|
||||
"""
|
||||
max_info = rt.maxversion()
|
||||
return max_info[7]
|
||||
|
|
|
|||
114
openpype/hosts/max/api/lib_renderproducts.py
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
# Render Element Example : For scanline render, VRay
|
||||
# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC
|
||||
# arnold
|
||||
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
|
||||
import os
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api.lib import (
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class RenderProducts(object):
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def render_product(self, container):
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
||||
output_file = os.path.join(folder,
|
||||
render_folder,
|
||||
filename,
|
||||
container)
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
full_render_list = []
|
||||
beauty = self.beauty_render_product(output_file, img_fmt)
|
||||
full_render_list.append(beauty)
|
||||
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return full_render_list
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_elem_list = self.render_elements_product(output_file,
|
||||
img_fmt)
|
||||
if render_elem_list:
|
||||
full_render_list.extend(iter(render_elem_list))
|
||||
return full_render_list
|
||||
|
||||
if renderer == "Arnold":
|
||||
aov_list = self.arnold_render_product(output_file,
|
||||
img_fmt)
|
||||
if aov_list:
|
||||
full_render_list.extend(iter(aov_list))
|
||||
return full_render_list
|
||||
|
||||
def beauty_render_product(self, folder, fmt):
|
||||
beauty_output = f"{folder}.####.{fmt}"
|
||||
beauty_output = beauty_output.replace("\\", "/")
|
||||
return beauty_output
|
||||
|
||||
# TODO: Get the arnold render product
|
||||
def arnold_render_product(self, folder, fmt):
|
||||
"""Get all the Arnold AOVs"""
|
||||
aovs = []
|
||||
|
||||
amw = rt.MaxtoAOps.AOVsManagerWindow()
|
||||
aov_mgr = rt.renderers.current.AOVManager
|
||||
# Check if there is any aov group set in AOV manager
|
||||
aov_group_num = len(aov_mgr.drivers)
|
||||
if aov_group_num < 1:
|
||||
return
|
||||
for i in range(aov_group_num):
|
||||
# get the specific AOV group
|
||||
for aov in aov_mgr.drivers[i].aov_list:
|
||||
render_element = f"{folder}_{aov.name}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
aovs.append(render_element)
|
||||
# close the AOVs manager window
|
||||
amw.close()
|
||||
|
||||
return aovs
|
||||
|
||||
def render_elements_product(self, folder, fmt):
|
||||
"""Get all the render element output files. """
|
||||
render_dirname = []
|
||||
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
# get render elements from the renders
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
if renderlayer_name.enabled:
|
||||
render_element = f"{folder}_{renderpass}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
render_dirname.append(render_element)
|
||||
|
||||
return render_dirname
|
||||
|
||||
def image_format(self):
|
||||
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
168
openpype/hosts/max/api/lib_rendersettings.py
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
import os
|
||||
from pymxs import runtime as rt
|
||||
from openpype.lib import Logger
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
|
||||
from openpype.hosts.max.api.lib import (
|
||||
set_framerange,
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
|
||||
|
||||
class RenderSettings(object):
|
||||
|
||||
log = Logger.get_logger("RenderSettings")
|
||||
|
||||
_aov_chars = {
|
||||
"dot": ".",
|
||||
"dash": "-",
|
||||
"underscore": "_"
|
||||
}
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
"""
|
||||
Set up the naming convention for the render
|
||||
elements for the deadline submission
|
||||
"""
|
||||
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def set_render_camera(self, selection):
|
||||
for sel in selection:
|
||||
# to avoid Attribute Error from pymxs wrapper
|
||||
found = False
|
||||
if rt.classOf(sel) in rt.Camera.classes:
|
||||
found = True
|
||||
rt.viewport.setCamera(sel)
|
||||
break
|
||||
if not found:
|
||||
raise RuntimeError("Camera not found")
|
||||
|
||||
def render_output(self, container):
|
||||
folder = rt.maxFilePath
|
||||
# hard-coded, should be customized in the setting
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
# hard-coded, set the renderoutput path
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
output_dir = os.path.join(folder,
|
||||
render_folder,
|
||||
filename)
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
# hard-coded, should be customized in the setting
|
||||
context = get_current_project_asset()
|
||||
|
||||
# get project resolution
|
||||
width = context["data"].get("resolutionWidth")
|
||||
height = context["data"].get("resolutionHeight")
|
||||
# Set Frame Range
|
||||
frame_start = context["data"].get("frame_start")
|
||||
frame_end = context["data"].get("frame_end")
|
||||
set_framerange(frame_start, frame_end)
|
||||
# get the production render
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output = os.path.join(output_dir, container)
|
||||
try:
|
||||
aov_separator = self._aov_chars[(
|
||||
self._project_settings["maya"]
|
||||
["RenderSettings"]
|
||||
["aov_separator"]
|
||||
)]
|
||||
except KeyError:
|
||||
aov_separator = "."
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
output_filename = output_filename.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
rt.rendOutputFilename = output_filename
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return
|
||||
# TODO: Finish the arnold render setup
|
||||
if renderer == "Arnold":
|
||||
self.arnold_setup()
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
self.render_element_layer(output, width, height, img_fmt)
|
||||
|
||||
rt.rendSaveFile = True
|
||||
|
||||
def arnold_setup(self):
|
||||
# get Arnold RenderView run in the background
|
||||
# for setting up renderable camera
|
||||
arv = rt.MAXToAOps.ArnoldRenderView()
|
||||
render_camera = rt.viewport.GetCamera()
|
||||
arv.setOption("Camera", str(render_camera))
|
||||
|
||||
# TODO: add AOVs and extension
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
setup_cmd = (
|
||||
f"""
|
||||
amw = MaxtoAOps.AOVsManagerWindow()
|
||||
amw.close()
|
||||
aovmgr = renderers.current.AOVManager
|
||||
aovmgr.drivers = #()
|
||||
img_fmt = "{img_fmt}"
|
||||
if img_fmt == "png" then driver = ArnoldPNGDriver()
|
||||
if img_fmt == "jpg" then driver = ArnoldJPEGDriver()
|
||||
if img_fmt == "exr" then driver = ArnoldEXRDriver()
|
||||
if img_fmt == "tif" then driver = ArnoldTIFFDriver()
|
||||
if img_fmt == "tiff" then driver = ArnoldTIFFDriver()
|
||||
append aovmgr.drivers driver
|
||||
aovmgr.drivers[1].aov_list = #()
|
||||
""")
|
||||
|
||||
rt.execute(setup_cmd)
|
||||
arv.close()
|
||||
|
||||
def render_element_layer(self, dir, width, height, ext):
|
||||
"""For Renderers with render elements"""
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext)
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
|
||||
def get_render_output(self, container, output_dir):
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
return output_filename
|
||||
|
||||
def get_render_element(self):
|
||||
orig_render_elem = []
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
render_element = render_elem.GetRenderElementFilename(i)
|
||||
orig_render_elem.append(render_element)
|
||||
|
||||
return orig_render_elem
|
||||
33
openpype/hosts/max/plugins/create/create_render.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
|
||||
|
||||
class CreateRender(plugin.MaxCreator):
|
||||
identifier = "io.openpype.creators.max.render"
|
||||
label = "Render"
|
||||
family = "maxrender"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreateRender, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container_name = instance.data.get("instance_node")
|
||||
container = rt.getNodeByName(container_name)
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
||||
# set viewport camera for rendering(mandatory for deadline)
|
||||
RenderSettings().set_render_camera(sel_obj)
|
||||
# set output paths for rendering(mandatory for deadline)
|
||||
RenderSettings().render_output(container_name)
|
||||
67
openpype/hosts/max/plugins/publish/collect_render.py
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect Render"""
|
||||
import os
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import get_current_asset_name
|
||||
from openpype.hosts.max.api.lib import get_max_version
|
||||
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
|
||||
from openpype.client import get_last_version_by_subset_name
|
||||
|
||||
|
||||
class CollectRender(pyblish.api.InstancePlugin):
|
||||
"""Collect Render for Deadline"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect 3dsmax Render Layers"
|
||||
hosts = ['max']
|
||||
families = ["maxrender"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
current_file = os.path.join(folder, file)
|
||||
filepath = current_file.replace("\\", "/")
|
||||
|
||||
context.data['currentFile'] = current_file
|
||||
asset = get_current_asset_name()
|
||||
|
||||
render_layer_files = RenderProducts().render_product(instance.name)
|
||||
folder = folder.replace("\\", "/")
|
||||
|
||||
img_format = RenderProducts().image_format()
|
||||
project_name = context.data["projectName"]
|
||||
asset_doc = context.data["assetEntity"]
|
||||
asset_id = asset_doc["_id"]
|
||||
version_doc = get_last_version_by_subset_name(project_name,
|
||||
instance.name,
|
||||
asset_id)
|
||||
|
||||
self.log.debug("version_doc: {0}".format(version_doc))
|
||||
version_int = 1
|
||||
if version_doc:
|
||||
version_int += int(version_doc["name"])
|
||||
|
||||
self.log.debug(f"Setting {version_int} to context.")
|
||||
context.data["version"] = version_int
|
||||
|
||||
# setup the plugin as 3dsmax for the internal renderer
|
||||
data = {
|
||||
"subset": instance.name,
|
||||
"asset": asset,
|
||||
"publish": True,
|
||||
"maxversion": str(get_max_version()),
|
||||
"imageFormat": img_format,
|
||||
"family": 'maxrender',
|
||||
"families": ['maxrender'],
|
||||
"source": filepath,
|
||||
"expectedFiles": render_layer_files,
|
||||
"plugin": "3dsmax",
|
||||
"frameStart": context.data['frameStart'],
|
||||
"frameEnd": context.data['frameEnd'],
|
||||
"version": version_int
|
||||
}
|
||||
self.log.info("data: {0}".format(data))
|
||||
instance.data.update(data)
|
||||
|
|
@ -4,7 +4,6 @@ from maya import cmds
|
|||
|
||||
from openpype.client import get_asset_by_name, get_project
|
||||
from openpype.pipeline import legacy_io
|
||||
from . import lib
|
||||
|
||||
|
||||
class ToolWindows:
|
||||
|
|
@ -58,54 +57,6 @@ def edit_shader_definitions():
|
|||
window.show()
|
||||
|
||||
|
||||
def reset_frame_range():
|
||||
"""Set frame range to current asset"""
|
||||
|
||||
fps = lib.convert_to_maya_fps(
|
||||
float(legacy_io.Session.get("AVALON_FPS", 25))
|
||||
)
|
||||
lib.set_scene_fps(fps)
|
||||
|
||||
# Set frame start/end
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
cmds.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
|
||||
frame_start -= int(handle_start)
|
||||
frame_end += int(handle_end)
|
||||
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.playbackOptions(animationStartTime=frame_start)
|
||||
cmds.playbackOptions(animationEndTime=frame_end)
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.currentTime(frame_start)
|
||||
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
|
||||
|
||||
def _resolution_from_document(doc):
|
||||
if not doc or "data" not in doc:
|
||||
print("Entered document is not valid. \"{}\"".format(str(doc)))
|
||||
|
|
|
|||
|
|
@ -43,7 +43,6 @@ from openpype.pipeline import (
|
|||
registered_host,
|
||||
)
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from .commands import reset_frame_range
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
|
|
@ -2007,7 +2006,7 @@ def set_scene_fps(fps, update=True):
|
|||
'48000': '48000fps'
|
||||
}
|
||||
|
||||
unit = fps_mapping.get(str(fps), None)
|
||||
unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None)
|
||||
if unit is None:
|
||||
raise ValueError("Unsupported FPS value: `%s`" % fps)
|
||||
|
||||
|
|
@ -2074,6 +2073,54 @@ def set_scene_resolution(width, height, pixelAspect):
|
|||
cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect)
|
||||
|
||||
|
||||
def reset_frame_range():
|
||||
"""Set frame range to current asset"""
|
||||
|
||||
fps = convert_to_maya_fps(
|
||||
float(legacy_io.Session.get("AVALON_FPS", 25))
|
||||
)
|
||||
set_scene_fps(fps)
|
||||
|
||||
# Set frame start/end
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
cmds.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
|
||||
frame_start -= int(handle_start)
|
||||
frame_end += int(handle_end)
|
||||
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.playbackOptions(animationStartTime=frame_start)
|
||||
cmds.playbackOptions(animationEndTime=frame_end)
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.currentTime(frame_start)
|
||||
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
"""Apply the scene resolution from the project definition
|
||||
|
||||
|
|
@ -3416,11 +3463,11 @@ def convert_to_maya_fps(fps):
|
|||
# If input fps is a whole number we'll return.
|
||||
if float(fps).is_integer():
|
||||
# Validate fps is part of Maya's fps selection.
|
||||
if fps not in int_framerates:
|
||||
if int(fps) not in int_framerates:
|
||||
raise ValueError(
|
||||
"Framerate \"{}\" is not supported in Maya".format(fps)
|
||||
)
|
||||
return fps
|
||||
return int(fps)
|
||||
else:
|
||||
# Differences to supported float frame rates.
|
||||
differences = []
|
||||
|
|
|
|||
|
|
@ -797,6 +797,11 @@ class RenderProductsVray(ARenderProducts):
|
|||
if default_ext in {"exr (multichannel)", "exr (deep)"}:
|
||||
default_ext = "exr"
|
||||
|
||||
# Define multipart.
|
||||
multipart = False
|
||||
if image_format_str == "exr (multichannel)":
|
||||
multipart = True
|
||||
|
||||
products = []
|
||||
|
||||
# add beauty as default when not disabled
|
||||
|
|
@ -804,23 +809,28 @@ class RenderProductsVray(ARenderProducts):
|
|||
if not dont_save_rgb:
|
||||
for camera in cameras:
|
||||
products.append(
|
||||
RenderProduct(productName="",
|
||||
ext=default_ext,
|
||||
camera=camera))
|
||||
RenderProduct(
|
||||
productName="",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
multipart=multipart
|
||||
)
|
||||
)
|
||||
|
||||
# separate alpha file
|
||||
separate_alpha = self._get_attr("vraySettings.separateAlpha")
|
||||
if separate_alpha:
|
||||
for camera in cameras:
|
||||
products.append(
|
||||
RenderProduct(productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera)
|
||||
RenderProduct(
|
||||
productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
multipart=multipart
|
||||
)
|
||||
)
|
||||
|
||||
if image_format_str == "exr (multichannel)":
|
||||
if multipart:
|
||||
# AOVs are merged in m-channel file, only main layer is rendered
|
||||
self.multipart = True
|
||||
return products
|
||||
|
||||
# handle aovs from references
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from openpype.settings import (
|
|||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline import CreatorError
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from openpype.hosts.maya.api.commands import reset_frame_range
|
||||
from openpype.hosts.maya.api.lib import reset_frame_range
|
||||
|
||||
|
||||
class RenderSettings(object):
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ from openpype.pipeline.workfile import BuildWorkfile
|
|||
from openpype.tools.utils import host_tools
|
||||
from openpype.hosts.maya.api import lib, lib_rendersettings
|
||||
from .lib import get_main_window, IS_HEADLESS
|
||||
from .commands import reset_frame_range
|
||||
|
||||
from .workfile_template_builder import (
|
||||
create_placeholder,
|
||||
|
|
@ -113,7 +112,7 @@ def install():
|
|||
|
||||
cmds.menuItem(
|
||||
"Reset Frame Range",
|
||||
command=lambda *args: reset_frame_range()
|
||||
command=lambda *args: lib.reset_frame_range()
|
||||
)
|
||||
|
||||
cmds.menuItem(
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import clique
|
||||
import capture
|
||||
|
|
@ -44,10 +45,6 @@ class ExtractPlayblast(publish.Extractor):
|
|||
# get cameras
|
||||
camera = instance.data['review_camera']
|
||||
|
||||
override_viewport_options = (
|
||||
self.capture_preset['Viewport Options']
|
||||
['override_viewport_options']
|
||||
)
|
||||
preset = lib.load_capture_preset(data=self.capture_preset)
|
||||
# Grab capture presets from the project settings
|
||||
capture_presets = self.capture_preset
|
||||
|
|
@ -119,6 +116,27 @@ class ExtractPlayblast(publish.Extractor):
|
|||
pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"]))
|
||||
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False)
|
||||
|
||||
# Need to explicitly enable some viewport changes so the viewport is
|
||||
# refreshed ahead of playblasting.
|
||||
panel = cmds.getPanel(withFocus=True)
|
||||
keys = [
|
||||
"useDefaultMaterial",
|
||||
"wireframeOnShaded",
|
||||
"xray",
|
||||
"jointXray",
|
||||
"backfaceCulling"
|
||||
]
|
||||
viewport_defaults = {}
|
||||
for key in keys:
|
||||
viewport_defaults[key] = cmds.modelEditor(
|
||||
panel, query=True, **{key: True}
|
||||
)
|
||||
if preset["viewport_options"][key]:
|
||||
cmds.modelEditor(panel, edit=True, **{key: True})
|
||||
|
||||
override_viewport_options = (
|
||||
capture_presets['Viewport Options']['override_viewport_options']
|
||||
)
|
||||
with lib.maintained_time():
|
||||
filename = preset.get("filename", "%TEMP%")
|
||||
|
||||
|
|
@ -127,18 +145,26 @@ class ExtractPlayblast(publish.Extractor):
|
|||
# playblast and viewer
|
||||
preset['viewer'] = False
|
||||
|
||||
self.log.info('using viewport preset: {}'.format(preset))
|
||||
|
||||
# Update preset with current panel setting
|
||||
# if override_viewport_options is turned off
|
||||
if not override_viewport_options:
|
||||
panel = cmds.getPanel(withFocus=True)
|
||||
panel = cmds.getPanel(withFocus=True) or ""
|
||||
if not override_viewport_options and "modelPanel" in panel:
|
||||
panel_preset = capture.parse_active_view()
|
||||
panel_preset.pop("camera")
|
||||
preset.update(panel_preset)
|
||||
cmds.setFocus(panel)
|
||||
|
||||
self.log.info(
|
||||
"Using preset:\n{}".format(
|
||||
json.dumps(preset, sort_keys=True, indent=4)
|
||||
)
|
||||
)
|
||||
|
||||
path = capture.capture(log=self.log, **preset)
|
||||
|
||||
# Restoring viewport options.
|
||||
cmds.modelEditor(panel, edit=True, **viewport_defaults)
|
||||
|
||||
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
|
||||
|
||||
self.log.debug("playblast path {}".format(path))
|
||||
|
|
|
|||
|
|
@ -134,8 +134,8 @@ class ExtractThumbnail(publish.Extractor):
|
|||
|
||||
# Update preset with current panel setting
|
||||
# if override_viewport_options is turned off
|
||||
if not override_viewport_options:
|
||||
panel = cmds.getPanel(withFocus=True)
|
||||
panel = cmds.getPanel(withFocus=True) or ""
|
||||
if not override_viewport_options and "modelPanel" in panel:
|
||||
panel_preset = capture.parse_active_view()
|
||||
preset.update(panel_preset)
|
||||
cmds.setFocus(panel)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import pyblish.api
|
|||
|
||||
import openpype.hosts.maya.api.lib as mayalib
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from math import ceil
|
||||
from openpype.pipeline.publish import (
|
||||
RepairContextAction,
|
||||
ValidateSceneOrder,
|
||||
|
|
|
|||
|
|
@ -220,8 +220,20 @@ class LoadClip(plugin.NukeLoader):
|
|||
dict: altered representation data
|
||||
"""
|
||||
representation = deepcopy(representation)
|
||||
frame = representation["context"]["frame"]
|
||||
representation["context"]["frame"] = "#" * len(str(frame))
|
||||
context = representation["context"]
|
||||
template = representation["data"]["template"]
|
||||
if (
|
||||
"{originalBasename}" in template
|
||||
and "frame" in context
|
||||
):
|
||||
frame = context["frame"]
|
||||
hashed_frame = "#" * len(str(frame))
|
||||
origin_basename = context["originalBasename"]
|
||||
context["originalBasename"] = origin_basename.replace(
|
||||
frame, hashed_frame
|
||||
)
|
||||
|
||||
representation["context"]["frame"] = hashed_frame
|
||||
return representation
|
||||
|
||||
def update(self, container, representation):
|
||||
|
|
|
|||
|
|
@ -309,8 +309,6 @@ class QtTVPaintRpc(BaseTVPaintRpc):
|
|||
self.add_methods(
|
||||
(route_name, self.workfiles_tool),
|
||||
(route_name, self.loader_tool),
|
||||
(route_name, self.creator_tool),
|
||||
(route_name, self.subset_manager_tool),
|
||||
(route_name, self.publish_tool),
|
||||
(route_name, self.scene_inventory_tool),
|
||||
(route_name, self.library_loader_tool),
|
||||
|
|
@ -330,21 +328,9 @@ class QtTVPaintRpc(BaseTVPaintRpc):
|
|||
self._execute_in_main_thread(item)
|
||||
return
|
||||
|
||||
async def creator_tool(self):
|
||||
log.info("Triggering Creator tool")
|
||||
item = MainThreadItem(self.tools_helper.show_creator)
|
||||
await self._async_execute_in_main_thread(item, wait=False)
|
||||
|
||||
async def subset_manager_tool(self):
|
||||
log.info("Triggering Subset Manager tool")
|
||||
item = MainThreadItem(self.tools_helper.show_subset_manager)
|
||||
# Do not wait for result of callback
|
||||
self._execute_in_main_thread(item, wait=False)
|
||||
return
|
||||
|
||||
async def publish_tool(self):
|
||||
log.info("Triggering Publish tool")
|
||||
item = MainThreadItem(self.tools_helper.show_publish)
|
||||
item = MainThreadItem(self.tools_helper.show_publisher_tool)
|
||||
self._execute_in_main_thread(item)
|
||||
return
|
||||
|
||||
|
|
@ -859,10 +845,6 @@ class QtCommunicator(BaseCommunicator):
|
|||
"callback": "loader_tool",
|
||||
"label": "Load",
|
||||
"help": "Open loader tool"
|
||||
}, {
|
||||
"callback": "creator_tool",
|
||||
"label": "Create",
|
||||
"help": "Open creator tool"
|
||||
}, {
|
||||
"callback": "scene_inventory_tool",
|
||||
"label": "Scene inventory",
|
||||
|
|
@ -875,10 +857,6 @@ class QtCommunicator(BaseCommunicator):
|
|||
"callback": "library_loader_tool",
|
||||
"label": "Library",
|
||||
"help": "Open library loader tool"
|
||||
}, {
|
||||
"callback": "subset_manager_tool",
|
||||
"label": "Subset Manager",
|
||||
"help": "Open subset manager tool"
|
||||
}, {
|
||||
"callback": "experimental_tools",
|
||||
"label": "Experimental tools",
|
||||
|
|
|
|||
|
|
@ -202,8 +202,9 @@ def get_groups_data(communicator=None):
|
|||
# Variable containing full path to output file
|
||||
"output_path = \"{}\"".format(output_filepath),
|
||||
"empty = 0",
|
||||
# Loop over 100 groups
|
||||
"FOR idx = 1 TO 100",
|
||||
# Loop over 26 groups which is ATM maximum possible (in 11.7)
|
||||
# - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880
|
||||
"FOR idx = 1 TO 26",
|
||||
# Receive information about groups
|
||||
"tv_layercolor \"getcolor\" 0 idx",
|
||||
"PARSE result clip_id group_index c_red c_green c_blue group_name",
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import requests
|
|||
import pyblish.api
|
||||
|
||||
from openpype.client import get_project, get_asset_by_name
|
||||
from openpype.host import HostBase, IWorkfileHost, ILoadHost
|
||||
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
|
||||
from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR
|
||||
from openpype.settings import get_current_project_settings
|
||||
from openpype.lib import register_event_callback
|
||||
|
|
@ -18,6 +18,7 @@ from openpype.pipeline import (
|
|||
register_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from openpype.pipeline.context_tools import get_global_context
|
||||
|
||||
from .lib import (
|
||||
execute_george,
|
||||
|
|
@ -29,6 +30,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
METADATA_SECTION = "avalon"
|
||||
SECTION_NAME_CONTEXT = "context"
|
||||
SECTION_NAME_CREATE_CONTEXT = "create_context"
|
||||
SECTION_NAME_INSTANCES = "instances"
|
||||
SECTION_NAME_CONTAINERS = "containers"
|
||||
# Maximum length of metadata chunk string
|
||||
|
|
@ -58,7 +60,7 @@ instances=2
|
|||
"""
|
||||
|
||||
|
||||
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
||||
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||
name = "tvpaint"
|
||||
|
||||
def install(self):
|
||||
|
|
@ -85,14 +87,63 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
registered_callbacks = (
|
||||
pyblish.api.registered_callbacks().get("instanceToggled") or []
|
||||
)
|
||||
if self.on_instance_toggle not in registered_callbacks:
|
||||
pyblish.api.register_callback(
|
||||
"instanceToggled", self.on_instance_toggle
|
||||
)
|
||||
|
||||
register_event_callback("application.launched", self.initial_launch)
|
||||
register_event_callback("application.exit", self.application_exit)
|
||||
|
||||
def get_current_project_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current project name.
|
||||
"""
|
||||
|
||||
return self.get_current_context().get("project_name")
|
||||
|
||||
def get_current_asset_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current asset name.
|
||||
"""
|
||||
|
||||
return self.get_current_context().get("asset_name")
|
||||
|
||||
def get_current_task_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current task name.
|
||||
"""
|
||||
|
||||
return self.get_current_context().get("task_name")
|
||||
|
||||
def get_current_context(self):
|
||||
context = get_current_workfile_context()
|
||||
if not context:
|
||||
return get_global_context()
|
||||
|
||||
if "project_name" in context:
|
||||
return context
|
||||
# This is legacy way how context was stored
|
||||
return {
|
||||
"project_name": context.get("project"),
|
||||
"asset_name": context.get("asset"),
|
||||
"task_name": context.get("task")
|
||||
}
|
||||
|
||||
# --- Create ---
|
||||
def get_context_data(self):
|
||||
return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {})
|
||||
|
||||
def update_context_data(self, data, changes):
|
||||
return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data)
|
||||
|
||||
def list_instances(self):
|
||||
"""List all created instances from current workfile."""
|
||||
return list_instances()
|
||||
|
||||
def write_instances(self, data):
|
||||
return write_instances(data)
|
||||
|
||||
# --- Workfile ---
|
||||
def open_workfile(self, filepath):
|
||||
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
|
||||
filepath.replace("\\", "/")
|
||||
|
|
@ -102,11 +153,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
def save_workfile(self, filepath=None):
|
||||
if not filepath:
|
||||
filepath = self.get_current_workfile()
|
||||
context = {
|
||||
"project": legacy_io.Session["AVALON_PROJECT"],
|
||||
"asset": legacy_io.Session["AVALON_ASSET"],
|
||||
"task": legacy_io.Session["AVALON_TASK"]
|
||||
}
|
||||
context = get_global_context()
|
||||
save_current_workfile_context(context)
|
||||
|
||||
# Execute george script to save workfile.
|
||||
|
|
@ -125,6 +172,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
def get_workfile_extensions(self):
|
||||
return [".tvpp"]
|
||||
|
||||
# --- Load ---
|
||||
def get_containers(self):
|
||||
return get_containers()
|
||||
|
||||
|
|
@ -137,27 +185,15 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
return
|
||||
|
||||
log.info("Setting up project...")
|
||||
set_context_settings()
|
||||
|
||||
def remove_instance(self, instance):
|
||||
"""Remove instance from current workfile metadata.
|
||||
|
||||
Implementation for Subset manager tool.
|
||||
"""
|
||||
|
||||
current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES)
|
||||
instance_id = instance.get("uuid")
|
||||
found_idx = None
|
||||
if instance_id:
|
||||
for idx, _inst in enumerate(current_instances):
|
||||
if _inst["uuid"] == instance_id:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
if found_idx is None:
|
||||
global_context = get_global_context()
|
||||
project_name = global_context.get("project_name")
|
||||
asset_name = global_context.get("aset_name")
|
||||
if not project_name or not asset_name:
|
||||
return
|
||||
current_instances.pop(found_idx)
|
||||
write_instances(current_instances)
|
||||
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
set_context_settings(project_name, asset_doc)
|
||||
|
||||
def application_exit(self):
|
||||
"""Logic related to TimerManager.
|
||||
|
|
@ -177,34 +213,6 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
|
||||
requests.post(rest_api_url)
|
||||
|
||||
def on_instance_toggle(self, instance, old_value, new_value):
|
||||
"""Update instance data in workfile on publish toggle."""
|
||||
# Review may not have real instance in wokrfile metadata
|
||||
if not instance.data.get("uuid"):
|
||||
return
|
||||
|
||||
instance_id = instance.data["uuid"]
|
||||
found_idx = None
|
||||
current_instances = list_instances()
|
||||
for idx, workfile_instance in enumerate(current_instances):
|
||||
if workfile_instance["uuid"] == instance_id:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
if found_idx is None:
|
||||
return
|
||||
|
||||
if "active" in current_instances[found_idx]:
|
||||
current_instances[found_idx]["active"] = new_value
|
||||
self.write_instances(current_instances)
|
||||
|
||||
def list_instances(self):
|
||||
"""List all created instances from current workfile."""
|
||||
return list_instances()
|
||||
|
||||
def write_instances(self, data):
|
||||
return write_instances(data)
|
||||
|
||||
|
||||
def containerise(
|
||||
name, namespace, members, context, loader, current_containers=None
|
||||
|
|
@ -462,40 +470,17 @@ def get_containers():
|
|||
return output
|
||||
|
||||
|
||||
def set_context_settings(asset_doc=None):
|
||||
def set_context_settings(project_name, asset_doc):
|
||||
"""Set workfile settings by asset document data.
|
||||
|
||||
Change fps, resolution and frame start/end.
|
||||
"""
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
if asset_doc is None:
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
# Use current session asset if not passed
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
project_doc = get_project(project_name)
|
||||
|
||||
framerate = asset_doc["data"].get("fps")
|
||||
if framerate is None:
|
||||
framerate = project_doc["data"].get("fps")
|
||||
|
||||
if framerate is not None:
|
||||
execute_george(
|
||||
"tv_framerate {} \"timestretch\"".format(framerate)
|
||||
)
|
||||
else:
|
||||
print("Framerate was not found!")
|
||||
|
||||
width_key = "resolutionWidth"
|
||||
height_key = "resolutionHeight"
|
||||
|
||||
width = asset_doc["data"].get(width_key)
|
||||
height = asset_doc["data"].get(height_key)
|
||||
if width is None or height is None:
|
||||
width = project_doc["data"].get(width_key)
|
||||
height = project_doc["data"].get(height_key)
|
||||
|
||||
if width is None or height is None:
|
||||
print("Resolution was not found!")
|
||||
else:
|
||||
|
|
@ -503,6 +488,15 @@ def set_context_settings(asset_doc=None):
|
|||
"tv_resizepage {} {} 0".format(width, height)
|
||||
)
|
||||
|
||||
framerate = asset_doc["data"].get("fps")
|
||||
|
||||
if framerate is not None:
|
||||
execute_george(
|
||||
"tv_framerate {} \"timestretch\"".format(framerate)
|
||||
)
|
||||
else:
|
||||
print("Framerate was not found!")
|
||||
|
||||
frame_start = asset_doc["data"].get("frameStart")
|
||||
frame_end = asset_doc["data"].get("frameEnd")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,80 +1,142 @@
|
|||
import re
|
||||
import uuid
|
||||
|
||||
from openpype.pipeline import (
|
||||
LegacyCreator,
|
||||
LoaderPlugin,
|
||||
registered_host,
|
||||
from openpype.pipeline import LoaderPlugin
|
||||
from openpype.pipeline.create import (
|
||||
CreatedInstance,
|
||||
get_subset_name,
|
||||
AutoCreator,
|
||||
Creator,
|
||||
)
|
||||
from openpype.pipeline.create.creator_plugins import cache_and_get_instances
|
||||
|
||||
from .lib import get_layers_data
|
||||
from .pipeline import get_current_workfile_context
|
||||
|
||||
|
||||
class Creator(LegacyCreator):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
# Add unified identifier created with `uuid` module
|
||||
self.data["uuid"] = str(uuid.uuid4())
|
||||
SHARED_DATA_KEY = "openpype.tvpaint.instances"
|
||||
|
||||
@classmethod
|
||||
def get_dynamic_data(cls, *args, **kwargs):
|
||||
dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs)
|
||||
|
||||
# Change asset and name by current workfile context
|
||||
workfile_context = get_current_workfile_context()
|
||||
asset_name = workfile_context.get("asset")
|
||||
task_name = workfile_context.get("task")
|
||||
if "asset" not in dynamic_data and asset_name:
|
||||
dynamic_data["asset"] = asset_name
|
||||
class TVPaintCreatorCommon:
|
||||
@property
|
||||
def subset_template_family_filter(self):
|
||||
return self.family
|
||||
|
||||
if "task" not in dynamic_data and task_name:
|
||||
dynamic_data["task"] = task_name
|
||||
return dynamic_data
|
||||
|
||||
@staticmethod
|
||||
def are_instances_same(instance_1, instance_2):
|
||||
"""Compare instances but skip keys with unique values.
|
||||
|
||||
During compare are skipped keys that will be 100% sure
|
||||
different on new instance, like "id".
|
||||
|
||||
Returns:
|
||||
bool: True if instances are same.
|
||||
"""
|
||||
if (
|
||||
not isinstance(instance_1, dict)
|
||||
or not isinstance(instance_2, dict)
|
||||
):
|
||||
return instance_1 == instance_2
|
||||
|
||||
checked_keys = set()
|
||||
checked_keys.add("id")
|
||||
for key, value in instance_1.items():
|
||||
if key not in checked_keys:
|
||||
if key not in instance_2:
|
||||
return False
|
||||
if value != instance_2[key]:
|
||||
return False
|
||||
checked_keys.add(key)
|
||||
|
||||
for key in instance_2.keys():
|
||||
if key not in checked_keys:
|
||||
return False
|
||||
return True
|
||||
|
||||
def write_instances(self, data):
|
||||
self.log.debug(
|
||||
"Storing instance data to workfile. {}".format(str(data))
|
||||
def _cache_and_get_instances(self):
|
||||
return cache_and_get_instances(
|
||||
self, SHARED_DATA_KEY, self.host.list_instances
|
||||
)
|
||||
host = registered_host()
|
||||
return host.write_instances(data)
|
||||
|
||||
def process(self):
|
||||
host = registered_host()
|
||||
data = host.list_instances()
|
||||
data.append(self.data)
|
||||
self.write_instances(data)
|
||||
def _collect_create_instances(self):
|
||||
instances_by_identifier = self._cache_and_get_instances()
|
||||
for instance_data in instances_by_identifier[self.identifier]:
|
||||
instance = CreatedInstance.from_existing(instance_data, self)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def _update_create_instances(self, update_list):
|
||||
if not update_list:
|
||||
return
|
||||
|
||||
cur_instances = self.host.list_instances()
|
||||
cur_instances_by_id = {}
|
||||
for instance_data in cur_instances:
|
||||
instance_id = instance_data.get("instance_id")
|
||||
if instance_id:
|
||||
cur_instances_by_id[instance_id] = instance_data
|
||||
|
||||
for instance, changes in update_list:
|
||||
instance_data = changes.new_value
|
||||
cur_instance_data = cur_instances_by_id.get(instance.id)
|
||||
if cur_instance_data is None:
|
||||
cur_instances.append(instance_data)
|
||||
continue
|
||||
for key in set(cur_instance_data) - set(instance_data):
|
||||
cur_instance_data.pop(key)
|
||||
cur_instance_data.update(instance_data)
|
||||
self.host.write_instances(cur_instances)
|
||||
|
||||
def _custom_get_subset_name(
|
||||
self,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name=None,
|
||||
instance=None
|
||||
):
|
||||
dynamic_data = self.get_dynamic_data(
|
||||
variant, task_name, asset_doc, project_name, host_name, instance
|
||||
)
|
||||
|
||||
return get_subset_name(
|
||||
self.family,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
dynamic_data=dynamic_data,
|
||||
project_settings=self.project_settings,
|
||||
family_filter=self.subset_template_family_filter
|
||||
)
|
||||
|
||||
|
||||
class TVPaintCreator(Creator, TVPaintCreatorCommon):
|
||||
def collect_instances(self):
|
||||
self._collect_create_instances()
|
||||
|
||||
def update_instances(self, update_list):
|
||||
self._update_create_instances(update_list)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
ids_to_remove = {
|
||||
instance.id
|
||||
for instance in instances
|
||||
}
|
||||
cur_instances = self.host.list_instances()
|
||||
changed = False
|
||||
new_instances = []
|
||||
for instance_data in cur_instances:
|
||||
if instance_data.get("instance_id") in ids_to_remove:
|
||||
changed = True
|
||||
else:
|
||||
new_instances.append(instance_data)
|
||||
|
||||
if changed:
|
||||
self.host.write_instances(new_instances)
|
||||
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def get_dynamic_data(self, *args, **kwargs):
|
||||
# Change asset and name by current workfile context
|
||||
create_context = self.create_context
|
||||
asset_name = create_context.get_current_asset_name()
|
||||
task_name = create_context.get_current_task_name()
|
||||
output = {}
|
||||
if asset_name:
|
||||
output["asset"] = asset_name
|
||||
if task_name:
|
||||
output["task"] = task_name
|
||||
return output
|
||||
|
||||
def get_subset_name(self, *args, **kwargs):
|
||||
return self._custom_get_subset_name(*args, **kwargs)
|
||||
|
||||
def _store_new_instance(self, new_instance):
|
||||
instances_data = self.host.list_instances()
|
||||
instances_data.append(new_instance.data_to_store())
|
||||
self.host.write_instances(instances_data)
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
|
||||
class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon):
|
||||
def collect_instances(self):
|
||||
self._collect_create_instances()
|
||||
|
||||
def update_instances(self, update_list):
|
||||
self._update_create_instances(update_list)
|
||||
|
||||
def get_subset_name(self, *args, **kwargs):
|
||||
return self._custom_get_subset_name(*args, **kwargs)
|
||||
|
||||
|
||||
class Loader(LoaderPlugin):
|
||||
|
|
|
|||
150
openpype/hosts/tvpaint/plugins/create/convert_legacy.py
Normal file
|
|
@ -0,0 +1,150 @@
|
|||
import collections
|
||||
|
||||
from openpype.pipeline.create.creator_plugins import (
|
||||
SubsetConvertorPlugin,
|
||||
cache_and_get_instances,
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.plugin import SHARED_DATA_KEY
|
||||
from openpype.hosts.tvpaint.api.lib import get_groups_data
|
||||
|
||||
|
||||
class TVPaintLegacyConverted(SubsetConvertorPlugin):
|
||||
"""Conversion of legacy instances in scene to new creators.
|
||||
|
||||
This convertor handles only instances created by core creators.
|
||||
|
||||
All instances that would be created using auto-creators are removed as at
|
||||
the moment of finding them would there already be existing instances.
|
||||
"""
|
||||
|
||||
identifier = "tvpaint.legacy.converter"
|
||||
|
||||
def find_instances(self):
|
||||
instances_by_identifier = cache_and_get_instances(
|
||||
self, SHARED_DATA_KEY, self.host.list_instances
|
||||
)
|
||||
if instances_by_identifier[None]:
|
||||
self.add_convertor_item("Convert legacy instances")
|
||||
|
||||
def convert(self):
|
||||
current_instances = self.host.list_instances()
|
||||
to_convert = collections.defaultdict(list)
|
||||
converted = False
|
||||
for instance in current_instances:
|
||||
if instance.get("creator_identifier") is not None:
|
||||
continue
|
||||
converted = True
|
||||
|
||||
family = instance.get("family")
|
||||
if family in (
|
||||
"renderLayer",
|
||||
"renderPass",
|
||||
"renderScene",
|
||||
"review",
|
||||
"workfile",
|
||||
):
|
||||
to_convert[family].append(instance)
|
||||
else:
|
||||
instance["keep"] = False
|
||||
|
||||
# Skip if nothing was changed
|
||||
if not converted:
|
||||
self.remove_convertor_item()
|
||||
return
|
||||
|
||||
self._convert_render_layers(
|
||||
to_convert["renderLayer"], current_instances)
|
||||
self._convert_render_passes(
|
||||
to_convert["renderpass"], current_instances)
|
||||
self._convert_render_scenes(
|
||||
to_convert["renderScene"], current_instances)
|
||||
self._convert_workfiles(
|
||||
to_convert["workfile"], current_instances)
|
||||
self._convert_reviews(
|
||||
to_convert["review"], current_instances)
|
||||
|
||||
new_instances = [
|
||||
instance
|
||||
for instance in current_instances
|
||||
if instance.get("keep") is not False
|
||||
]
|
||||
self.host.write_instances(new_instances)
|
||||
# remove legacy item if all is fine
|
||||
self.remove_convertor_item()
|
||||
|
||||
def _convert_render_layers(self, render_layers, current_instances):
|
||||
if not render_layers:
|
||||
return
|
||||
|
||||
# Look for possible existing render layers in scene
|
||||
render_layers_by_group_id = {}
|
||||
for instance in current_instances:
|
||||
if instance.get("creator_identifier") == "render.layer":
|
||||
group_id = instance["creator_identifier"]["group_id"]
|
||||
render_layers_by_group_id[group_id] = instance
|
||||
|
||||
groups_by_id = {
|
||||
group["group_id"]: group
|
||||
for group in get_groups_data()
|
||||
}
|
||||
for render_layer in render_layers:
|
||||
group_id = render_layer.pop("group_id")
|
||||
# Just remove legacy instance if group is already occupied
|
||||
if group_id in render_layers_by_group_id:
|
||||
render_layer["keep"] = False
|
||||
continue
|
||||
# Add identifier
|
||||
render_layer["creator_identifier"] = "render.layer"
|
||||
# Change 'uuid' to 'instance_id'
|
||||
render_layer["instance_id"] = render_layer.pop("uuid")
|
||||
# Fill creator attributes
|
||||
render_layer["creator_attributes"] = {
|
||||
"group_id": group_id
|
||||
}
|
||||
render_layer["family"] = "render"
|
||||
group = groups_by_id[group_id]
|
||||
# Use group name for variant
|
||||
group["variant"] = group["name"]
|
||||
|
||||
def _convert_render_passes(self, render_passes, current_instances):
|
||||
if not render_passes:
|
||||
return
|
||||
|
||||
# Render passes must have available render layers so we look for render
|
||||
# layers first
|
||||
# - '_convert_render_layers' must be called before this method
|
||||
render_layers_by_group_id = {}
|
||||
for instance in current_instances:
|
||||
if instance.get("creator_identifier") == "render.layer":
|
||||
group_id = instance["creator_identifier"]["group_id"]
|
||||
render_layers_by_group_id[group_id] = instance
|
||||
|
||||
for render_pass in render_passes:
|
||||
group_id = render_pass.pop("group_id")
|
||||
render_layer = render_layers_by_group_id.get(group_id)
|
||||
if not render_layer:
|
||||
render_pass["keep"] = False
|
||||
continue
|
||||
|
||||
render_pass["creator_identifier"] = "render.pass"
|
||||
render_pass["instance_id"] = render_pass.pop("uuid")
|
||||
render_pass["family"] = "render"
|
||||
|
||||
render_pass["creator_attributes"] = {
|
||||
"render_layer_instance_id": render_layer["instance_id"]
|
||||
}
|
||||
render_pass["variant"] = render_pass.pop("pass")
|
||||
render_pass.pop("renderlayer")
|
||||
|
||||
# Rest of instances are just marked for deletion
|
||||
def _convert_render_scenes(self, render_scenes, current_instances):
|
||||
for render_scene in render_scenes:
|
||||
render_scene["keep"] = False
|
||||
|
||||
def _convert_workfiles(self, workfiles, current_instances):
|
||||
for render_scene in workfiles:
|
||||
render_scene["keep"] = False
|
||||
|
||||
def _convert_reviews(self, reviews, current_instances):
|
||||
for render_scene in reviews:
|
||||
render_scene["keep"] = False
|
||||
1115
openpype/hosts/tvpaint/plugins/create/create_render.py
Normal file
|
|
@ -1,231 +0,0 @@
|
|||
from openpype.lib import prepare_template_data
|
||||
from openpype.pipeline import CreatorError
|
||||
from openpype.hosts.tvpaint.api import (
|
||||
plugin,
|
||||
CommunicationWrapper
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.lib import (
|
||||
get_layers_data,
|
||||
get_groups_data,
|
||||
execute_george_through_file,
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.pipeline import list_instances
|
||||
|
||||
|
||||
class CreateRenderlayer(plugin.Creator):
|
||||
"""Mark layer group as one instance."""
|
||||
name = "render_layer"
|
||||
label = "RenderLayer"
|
||||
family = "renderLayer"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
rename_group = True
|
||||
render_pass = "beauty"
|
||||
|
||||
rename_script_template = (
|
||||
"tv_layercolor \"setcolor\""
|
||||
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
|
||||
)
|
||||
|
||||
dynamic_subset_keys = [
|
||||
"renderpass", "renderlayer", "render_pass", "render_layer", "group"
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_dynamic_data(
|
||||
cls, variant, task_name, asset_id, project_name, host_name
|
||||
):
|
||||
dynamic_data = super(CreateRenderlayer, cls).get_dynamic_data(
|
||||
variant, task_name, asset_id, project_name, host_name
|
||||
)
|
||||
# Use render pass name from creator's plugin
|
||||
dynamic_data["renderpass"] = cls.render_pass
|
||||
# Add variant to render layer
|
||||
dynamic_data["renderlayer"] = variant
|
||||
# Change family for subset name fill
|
||||
dynamic_data["family"] = "render"
|
||||
|
||||
# TODO remove - Backwards compatibility for old subset name templates
|
||||
# - added 2022/04/28
|
||||
dynamic_data["render_pass"] = dynamic_data["renderpass"]
|
||||
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
|
||||
|
||||
return dynamic_data
|
||||
|
||||
@classmethod
|
||||
def get_default_variant(cls):
|
||||
"""Default value for variant in Creator tool.
|
||||
|
||||
Method checks if TVPaint implementation is running and tries to find
|
||||
selected layers from TVPaint. If only one is selected it's name is
|
||||
returned.
|
||||
|
||||
Returns:
|
||||
str: Default variant name for Creator tool.
|
||||
"""
|
||||
# Validate that communication is initialized
|
||||
if CommunicationWrapper.communicator:
|
||||
# Get currently selected layers
|
||||
layers_data = get_layers_data()
|
||||
|
||||
selected_layers = [
|
||||
layer
|
||||
for layer in layers_data
|
||||
if layer["selected"]
|
||||
]
|
||||
# Return layer name if only one is selected
|
||||
if len(selected_layers) == 1:
|
||||
return selected_layers[0]["name"]
|
||||
|
||||
# Use defaults
|
||||
if cls.defaults:
|
||||
return cls.defaults[0]
|
||||
return None
|
||||
|
||||
def process(self):
|
||||
self.log.debug("Query data from workfile.")
|
||||
instances = list_instances()
|
||||
layers_data = get_layers_data()
|
||||
|
||||
self.log.debug("Checking for selection groups.")
|
||||
# Collect group ids from selection
|
||||
group_ids = set()
|
||||
for layer in layers_data:
|
||||
if layer["selected"]:
|
||||
group_ids.add(layer["group_id"])
|
||||
|
||||
# Raise if there is no selection
|
||||
if not group_ids:
|
||||
raise CreatorError("Nothing is selected.")
|
||||
|
||||
# This creator should run only on one group
|
||||
if len(group_ids) > 1:
|
||||
raise CreatorError("More than one group is in selection.")
|
||||
|
||||
group_id = tuple(group_ids)[0]
|
||||
# If group id is `0` it is `default` group which is invalid
|
||||
if group_id == 0:
|
||||
raise CreatorError(
|
||||
"Selection is not in group. Can't mark selection as Beauty."
|
||||
)
|
||||
|
||||
self.log.debug(f"Selected group id is \"{group_id}\".")
|
||||
self.data["group_id"] = group_id
|
||||
|
||||
group_data = get_groups_data()
|
||||
group_name = None
|
||||
for group in group_data:
|
||||
if group["group_id"] == group_id:
|
||||
group_name = group["name"]
|
||||
break
|
||||
|
||||
if group_name is None:
|
||||
raise AssertionError(
|
||||
"Couldn't find group by id \"{}\"".format(group_id)
|
||||
)
|
||||
|
||||
subset_name_fill_data = {
|
||||
"group": group_name
|
||||
}
|
||||
|
||||
family = self.family = self.data["family"]
|
||||
|
||||
# Fill dynamic key 'group'
|
||||
subset_name = self.data["subset"].format(
|
||||
**prepare_template_data(subset_name_fill_data)
|
||||
)
|
||||
self.data["subset"] = subset_name
|
||||
|
||||
# Check for instances of same group
|
||||
existing_instance = None
|
||||
existing_instance_idx = None
|
||||
# Check if subset name is not already taken
|
||||
same_subset_instance = None
|
||||
same_subset_instance_idx = None
|
||||
for idx, instance in enumerate(instances):
|
||||
if instance["family"] == family:
|
||||
if instance["group_id"] == group_id:
|
||||
existing_instance = instance
|
||||
existing_instance_idx = idx
|
||||
elif instance["subset"] == subset_name:
|
||||
same_subset_instance = instance
|
||||
same_subset_instance_idx = idx
|
||||
|
||||
if (
|
||||
same_subset_instance_idx is not None
|
||||
and existing_instance_idx is not None
|
||||
):
|
||||
break
|
||||
|
||||
if same_subset_instance_idx is not None:
|
||||
if self._ask_user_subset_override(same_subset_instance):
|
||||
instances.pop(same_subset_instance_idx)
|
||||
else:
|
||||
return
|
||||
|
||||
if existing_instance is not None:
|
||||
self.log.info(
|
||||
f"Beauty instance for group id {group_id} already exists"
|
||||
", overriding"
|
||||
)
|
||||
instances[existing_instance_idx] = self.data
|
||||
else:
|
||||
instances.append(self.data)
|
||||
|
||||
self.write_instances(instances)
|
||||
|
||||
if not self.rename_group:
|
||||
self.log.info("Group rename function is turned off. Skipping")
|
||||
return
|
||||
|
||||
self.log.debug("Querying groups data from workfile.")
|
||||
groups_data = get_groups_data()
|
||||
|
||||
self.log.debug("Changing name of the group.")
|
||||
selected_group = None
|
||||
for group_data in groups_data:
|
||||
if group_data["group_id"] == group_id:
|
||||
selected_group = group_data
|
||||
|
||||
# Rename TVPaint group (keep color same)
|
||||
# - groups can't contain spaces
|
||||
new_group_name = self.data["variant"].replace(" ", "_")
|
||||
rename_script = self.rename_script_template.format(
|
||||
clip_id=selected_group["clip_id"],
|
||||
group_id=selected_group["group_id"],
|
||||
r=selected_group["red"],
|
||||
g=selected_group["green"],
|
||||
b=selected_group["blue"],
|
||||
name=new_group_name
|
||||
)
|
||||
execute_george_through_file(rename_script)
|
||||
|
||||
self.log.info(
|
||||
f"Name of group with index {group_id}"
|
||||
f" was changed to \"{new_group_name}\"."
|
||||
)
|
||||
|
||||
def _ask_user_subset_override(self, instance):
|
||||
from qtpy import QtCore
|
||||
from qtpy.QtWidgets import QMessageBox
|
||||
|
||||
title = "Subset \"{}\" already exist".format(instance["subset"])
|
||||
text = (
|
||||
"Instance with subset name \"{}\" already exists."
|
||||
"\n\nDo you want to override existing?"
|
||||
).format(instance["subset"])
|
||||
|
||||
dialog = QMessageBox()
|
||||
dialog.setWindowFlags(
|
||||
dialog.windowFlags()
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
dialog.setWindowTitle(title)
|
||||
dialog.setText(text)
|
||||
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
|
||||
dialog.setDefaultButton(QMessageBox.Yes)
|
||||
dialog.exec_()
|
||||
if dialog.result() == QMessageBox.Yes:
|
||||
return True
|
||||
return False
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
from openpype.pipeline import CreatorError
|
||||
from openpype.lib import prepare_template_data
|
||||
from openpype.hosts.tvpaint.api import (
|
||||
plugin,
|
||||
CommunicationWrapper
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.lib import get_layers_data
|
||||
from openpype.hosts.tvpaint.api.pipeline import list_instances
|
||||
|
||||
|
||||
class CreateRenderPass(plugin.Creator):
|
||||
"""Render pass is combination of one or more layers from same group.
|
||||
|
||||
Requirement to create Render Pass is to have already created beauty
|
||||
instance. Beauty instance is used as base for subset name.
|
||||
"""
|
||||
name = "render_pass"
|
||||
label = "RenderPass"
|
||||
family = "renderPass"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
dynamic_subset_keys = [
|
||||
"renderpass", "renderlayer", "render_pass", "render_layer"
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_dynamic_data(
|
||||
cls, variant, task_name, asset_id, project_name, host_name
|
||||
):
|
||||
dynamic_data = super(CreateRenderPass, cls).get_dynamic_data(
|
||||
variant, task_name, asset_id, project_name, host_name
|
||||
)
|
||||
dynamic_data["renderpass"] = variant
|
||||
dynamic_data["family"] = "render"
|
||||
|
||||
# TODO remove - Backwards compatibility for old subset name templates
|
||||
# - added 2022/04/28
|
||||
dynamic_data["render_pass"] = dynamic_data["renderpass"]
|
||||
|
||||
return dynamic_data
|
||||
|
||||
@classmethod
|
||||
def get_default_variant(cls):
|
||||
"""Default value for variant in Creator tool.
|
||||
|
||||
Method checks if TVPaint implementation is running and tries to find
|
||||
selected layers from TVPaint. If only one is selected it's name is
|
||||
returned.
|
||||
|
||||
Returns:
|
||||
str: Default variant name for Creator tool.
|
||||
"""
|
||||
# Validate that communication is initialized
|
||||
if CommunicationWrapper.communicator:
|
||||
# Get currently selected layers
|
||||
layers_data = get_layers_data()
|
||||
|
||||
selected_layers = [
|
||||
layer
|
||||
for layer in layers_data
|
||||
if layer["selected"]
|
||||
]
|
||||
# Return layer name if only one is selected
|
||||
if len(selected_layers) == 1:
|
||||
return selected_layers[0]["name"]
|
||||
|
||||
# Use defaults
|
||||
if cls.defaults:
|
||||
return cls.defaults[0]
|
||||
return None
|
||||
|
||||
def process(self):
|
||||
self.log.debug("Query data from workfile.")
|
||||
instances = list_instances()
|
||||
layers_data = get_layers_data()
|
||||
|
||||
self.log.debug("Checking selection.")
|
||||
# Get all selected layers and their group ids
|
||||
group_ids = set()
|
||||
selected_layers = []
|
||||
for layer in layers_data:
|
||||
if layer["selected"]:
|
||||
selected_layers.append(layer)
|
||||
group_ids.add(layer["group_id"])
|
||||
|
||||
# Raise if nothing is selected
|
||||
if not selected_layers:
|
||||
raise CreatorError("Nothing is selected.")
|
||||
|
||||
# Raise if layers from multiple groups are selected
|
||||
if len(group_ids) != 1:
|
||||
raise CreatorError("More than one group is in selection.")
|
||||
|
||||
group_id = tuple(group_ids)[0]
|
||||
self.log.debug(f"Selected group id is \"{group_id}\".")
|
||||
|
||||
# Find beauty instance for selected layers
|
||||
beauty_instance = None
|
||||
for instance in instances:
|
||||
if (
|
||||
instance["family"] == "renderLayer"
|
||||
and instance["group_id"] == group_id
|
||||
):
|
||||
beauty_instance = instance
|
||||
break
|
||||
|
||||
# Beauty is required for this creator so raise if was not found
|
||||
if beauty_instance is None:
|
||||
raise CreatorError("Beauty pass does not exist yet.")
|
||||
|
||||
subset_name = self.data["subset"]
|
||||
|
||||
subset_name_fill_data = {}
|
||||
|
||||
# Backwards compatibility
|
||||
# - beauty may be created with older creator where variant was not
|
||||
# stored
|
||||
if "variant" not in beauty_instance:
|
||||
render_layer = beauty_instance["name"]
|
||||
else:
|
||||
render_layer = beauty_instance["variant"]
|
||||
|
||||
subset_name_fill_data["renderlayer"] = render_layer
|
||||
subset_name_fill_data["render_layer"] = render_layer
|
||||
|
||||
# Format dynamic keys in subset name
|
||||
new_subset_name = subset_name.format(
|
||||
**prepare_template_data(subset_name_fill_data)
|
||||
)
|
||||
self.data["subset"] = new_subset_name
|
||||
self.log.info(f"New subset name is \"{new_subset_name}\".")
|
||||
|
||||
family = self.data["family"]
|
||||
variant = self.data["variant"]
|
||||
|
||||
self.data["group_id"] = group_id
|
||||
self.data["pass"] = variant
|
||||
self.data["renderlayer"] = render_layer
|
||||
|
||||
# Collect selected layer ids to be stored into instance
|
||||
layer_names = [layer["name"] for layer in selected_layers]
|
||||
self.data["layer_names"] = layer_names
|
||||
|
||||
# Check if same instance already exists
|
||||
existing_instance = None
|
||||
existing_instance_idx = None
|
||||
for idx, instance in enumerate(instances):
|
||||
if (
|
||||
instance["family"] == family
|
||||
and instance["group_id"] == group_id
|
||||
and instance["pass"] == variant
|
||||
):
|
||||
existing_instance = instance
|
||||
existing_instance_idx = idx
|
||||
break
|
||||
|
||||
if existing_instance is not None:
|
||||
self.log.info(
|
||||
f"Render pass instance for group id {group_id}"
|
||||
f" and name \"{variant}\" already exists, overriding."
|
||||
)
|
||||
instances[existing_instance_idx] = self.data
|
||||
else:
|
||||
instances.append(self.data)
|
||||
|
||||
self.write_instances(instances)
|
||||
76
openpype/hosts/tvpaint/plugins/create/create_review.py
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
|
||||
|
||||
|
||||
class TVPaintReviewCreator(TVPaintAutoCreator):
|
||||
family = "review"
|
||||
identifier = "scene.review"
|
||||
label = "Review"
|
||||
icon = "ei.video"
|
||||
|
||||
# Settings
|
||||
active_on_create = True
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
plugin_settings = (
|
||||
project_settings["tvpaint"]["create"]["create_review"]
|
||||
)
|
||||
self.default_variant = plugin_settings["default_variant"]
|
||||
self.default_variants = plugin_settings["default_variants"]
|
||||
self.active_on_create = plugin_settings["active_on_create"]
|
||||
|
||||
def create(self):
|
||||
existing_instance = None
|
||||
for instance in self.create_context.instances:
|
||||
if instance.creator_identifier == self.identifier:
|
||||
existing_instance = instance
|
||||
break
|
||||
|
||||
create_context = self.create_context
|
||||
host_name = create_context.host_name
|
||||
project_name = create_context.get_current_project_name()
|
||||
asset_name = create_context.get_current_asset_name()
|
||||
task_name = create_context.get_current_task_name()
|
||||
|
||||
if existing_instance is None:
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name
|
||||
)
|
||||
data = {
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
"variant": self.default_variant
|
||||
}
|
||||
if not self.active_on_create:
|
||||
data["active"] = False
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.family, subset_name, data, self
|
||||
)
|
||||
instances_data = self.host.list_instances()
|
||||
instances_data.append(new_instance.data_to_store())
|
||||
self.host.write_instances(instances_data)
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
elif (
|
||||
existing_instance["asset"] != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
existing_instance["variant"],
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
existing_instance
|
||||
)
|
||||
existing_instance["asset"] = asset_name
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["subset"] = subset_name
|
||||
70
openpype/hosts/tvpaint/plugins/create/create_workfile.py
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
|
||||
|
||||
|
||||
class TVPaintWorkfileCreator(TVPaintAutoCreator):
|
||||
family = "workfile"
|
||||
identifier = "workfile"
|
||||
label = "Workfile"
|
||||
icon = "fa.file-o"
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
plugin_settings = (
|
||||
project_settings["tvpaint"]["create"]["create_workfile"]
|
||||
)
|
||||
self.default_variant = plugin_settings["default_variant"]
|
||||
self.default_variants = plugin_settings["default_variants"]
|
||||
|
||||
def create(self):
|
||||
existing_instance = None
|
||||
for instance in self.create_context.instances:
|
||||
if instance.creator_identifier == self.identifier:
|
||||
existing_instance = instance
|
||||
break
|
||||
|
||||
create_context = self.create_context
|
||||
host_name = create_context.host_name
|
||||
project_name = create_context.get_current_project_name()
|
||||
asset_name = create_context.get_current_asset_name()
|
||||
task_name = create_context.get_current_task_name()
|
||||
|
||||
if existing_instance is None:
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name
|
||||
)
|
||||
data = {
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
"variant": self.default_variant
|
||||
}
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.family, subset_name, data, self
|
||||
)
|
||||
instances_data = self.host.list_instances()
|
||||
instances_data.append(new_instance.data_to_store())
|
||||
self.host.write_instances(instances_data)
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
elif (
|
||||
existing_instance["asset"] != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
existing_instance["variant"],
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
existing_instance
|
||||
)
|
||||
existing_instance["asset"] = asset_name
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["subset"] = subset_name
|
||||
|
|
@ -1,37 +1,34 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectOutputFrameRange(pyblish.api.ContextPlugin):
|
||||
class CollectOutputFrameRange(pyblish.api.InstancePlugin):
|
||||
"""Collect frame start/end from context.
|
||||
|
||||
When instances are collected context does not contain `frameStart` and
|
||||
`frameEnd` keys yet. They are collected in global plugin
|
||||
`CollectContextEntities`.
|
||||
"""
|
||||
|
||||
label = "Collect output frame range"
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder + 0.4999
|
||||
hosts = ["tvpaint"]
|
||||
families = ["review", "render"]
|
||||
|
||||
def process(self, context):
|
||||
for instance in context:
|
||||
frame_start = instance.data.get("frameStart")
|
||||
frame_end = instance.data.get("frameEnd")
|
||||
if frame_start is not None and frame_end is not None:
|
||||
self.log.debug(
|
||||
"Instance {} already has set frames {}-{}".format(
|
||||
str(instance), frame_start, frame_end
|
||||
)
|
||||
)
|
||||
return
|
||||
def process(self, instance):
|
||||
asset_doc = instance.data.get("assetEntity")
|
||||
if not asset_doc:
|
||||
return
|
||||
|
||||
frame_start = context.data.get("frameStart")
|
||||
frame_end = context.data.get("frameEnd")
|
||||
context = instance.context
|
||||
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
|
||||
self.log.info(
|
||||
"Set frames {}-{} on instance {} ".format(
|
||||
frame_start, frame_end, str(instance)
|
||||
)
|
||||
frame_start = asset_doc["data"]["frameStart"]
|
||||
frame_end = frame_start + (
|
||||
context.data["sceneMarkOut"] - context.data["sceneMarkIn"]
|
||||
)
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
self.log.info(
|
||||
"Set frames {}-{} on instance {} ".format(
|
||||
frame_start, frame_end, instance.data["subset"]
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,280 +0,0 @@
|
|||
import json
|
||||
import copy
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.create import get_subset_name
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
label = "Collect Instances"
|
||||
order = pyblish.api.CollectorOrder - 0.4
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
workfile_instances = context.data["workfileInstances"]
|
||||
|
||||
self.log.debug("Collected ({}) instances:\n{}".format(
|
||||
len(workfile_instances),
|
||||
json.dumps(workfile_instances, indent=4)
|
||||
))
|
||||
|
||||
filtered_instance_data = []
|
||||
# Backwards compatibility for workfiles that already have review
|
||||
# instance in metadata.
|
||||
review_instance_exist = False
|
||||
for instance_data in workfile_instances:
|
||||
family = instance_data["family"]
|
||||
if family == "review":
|
||||
review_instance_exist = True
|
||||
|
||||
elif family not in ("renderPass", "renderLayer"):
|
||||
self.log.info("Unknown family \"{}\". Skipping {}".format(
|
||||
family, json.dumps(instance_data, indent=4)
|
||||
))
|
||||
continue
|
||||
|
||||
filtered_instance_data.append(instance_data)
|
||||
|
||||
# Fake review instance if review was not found in metadata families
|
||||
if not review_instance_exist:
|
||||
filtered_instance_data.append(
|
||||
self._create_review_instance_data(context)
|
||||
)
|
||||
|
||||
for instance_data in filtered_instance_data:
|
||||
instance_data["fps"] = context.data["sceneFps"]
|
||||
|
||||
# Conversion from older instances
|
||||
# - change 'render_layer' to 'renderlayer'
|
||||
render_layer = instance_data.get("instance_data")
|
||||
if not render_layer:
|
||||
# Render Layer has only variant
|
||||
if instance_data["family"] == "renderLayer":
|
||||
render_layer = instance_data.get("variant")
|
||||
|
||||
# Backwards compatibility for renderPasses
|
||||
elif "render_layer" in instance_data:
|
||||
render_layer = instance_data["render_layer"]
|
||||
|
||||
if render_layer:
|
||||
instance_data["renderlayer"] = render_layer
|
||||
|
||||
# Store workfile instance data to instance data
|
||||
instance_data["originData"] = copy.deepcopy(instance_data)
|
||||
# Global instance data modifications
|
||||
# Fill families
|
||||
family = instance_data["family"]
|
||||
families = [family]
|
||||
if family != "review":
|
||||
families.append("review")
|
||||
# Add `review` family for thumbnail integration
|
||||
instance_data["families"] = families
|
||||
|
||||
# Instance name
|
||||
subset_name = instance_data["subset"]
|
||||
name = instance_data.get("name", subset_name)
|
||||
instance_data["name"] = name
|
||||
instance_data["label"] = "{} [{}-{}]".format(
|
||||
name,
|
||||
context.data["sceneMarkIn"] + 1,
|
||||
context.data["sceneMarkOut"] + 1
|
||||
)
|
||||
|
||||
active = instance_data.get("active", True)
|
||||
instance_data["active"] = active
|
||||
instance_data["publish"] = active
|
||||
# Add representations key
|
||||
instance_data["representations"] = []
|
||||
|
||||
# Different instance creation based on family
|
||||
instance = None
|
||||
if family == "review":
|
||||
# Change subset name of review instance
|
||||
|
||||
# Project name from workfile context
|
||||
project_name = context.data["workfile_context"]["project"]
|
||||
|
||||
# Collect asset doc to get asset id
|
||||
# - not sure if it's good idea to require asset id in
|
||||
# get_subset_name?
|
||||
asset_name = context.data["workfile_context"]["asset"]
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
# Host name from environment variable
|
||||
host_name = context.data["hostName"]
|
||||
# Use empty variant value
|
||||
variant = ""
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
new_subset_name = get_subset_name(
|
||||
family,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
|
||||
instance.data["layers"] = copy.deepcopy(
|
||||
context.data["layersData"]
|
||||
)
|
||||
|
||||
elif family == "renderLayer":
|
||||
instance = self.create_render_layer_instance(
|
||||
context, instance_data
|
||||
)
|
||||
elif family == "renderPass":
|
||||
instance = self.create_render_pass_instance(
|
||||
context, instance_data
|
||||
)
|
||||
|
||||
if instance is None:
|
||||
continue
|
||||
|
||||
any_visible = False
|
||||
for layer in instance.data["layers"]:
|
||||
if layer["visible"]:
|
||||
any_visible = True
|
||||
break
|
||||
|
||||
instance.data["publish"] = any_visible
|
||||
|
||||
self.log.debug("Created instance: {}\n{}".format(
|
||||
instance, json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
||||
def _create_review_instance_data(self, context):
|
||||
"""Fake review instance data."""
|
||||
|
||||
return {
|
||||
"family": "review",
|
||||
"asset": context.data["asset"],
|
||||
# Dummy subset name
|
||||
"subset": "reviewMain"
|
||||
}
|
||||
|
||||
def create_render_layer_instance(self, context, instance_data):
|
||||
name = instance_data["name"]
|
||||
# Change label
|
||||
subset_name = instance_data["subset"]
|
||||
|
||||
# Backwards compatibility
|
||||
# - subset names were not stored as final subset names during creation
|
||||
if "variant" not in instance_data:
|
||||
instance_data["label"] = "{}_Beauty".format(name)
|
||||
|
||||
# Change subset name
|
||||
# Final family of an instance will be `render`
|
||||
new_family = "render"
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}_{}_Beauty".format(
|
||||
new_family, task_name.capitalize(), name
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
|
||||
subset_name, new_subset_name
|
||||
))
|
||||
|
||||
# Get all layers for the layer
|
||||
layers_data = context.data["layersData"]
|
||||
group_id = instance_data["group_id"]
|
||||
group_layers = []
|
||||
for layer in layers_data:
|
||||
if layer["group_id"] == group_id:
|
||||
group_layers.append(layer)
|
||||
|
||||
if not group_layers:
|
||||
# Should be handled here?
|
||||
self.log.warning((
|
||||
f"Group with id {group_id} does not contain any layers."
|
||||
f" Instance \"{name}\" not created."
|
||||
))
|
||||
return None
|
||||
|
||||
instance_data["layers"] = group_layers
|
||||
|
||||
return context.create_instance(**instance_data)
|
||||
|
||||
def create_render_pass_instance(self, context, instance_data):
|
||||
pass_name = instance_data["pass"]
|
||||
self.log.info(
|
||||
"Creating render pass instance. \"{}\"".format(pass_name)
|
||||
)
|
||||
# Change label
|
||||
render_layer = instance_data["renderlayer"]
|
||||
|
||||
# Backwards compatibility
|
||||
# - subset names were not stored as final subset names during creation
|
||||
if "variant" not in instance_data:
|
||||
instance_data["label"] = "{}_{}".format(render_layer, pass_name)
|
||||
# Change subset name
|
||||
# Final family of an instance will be `render`
|
||||
new_family = "render"
|
||||
old_subset_name = instance_data["subset"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}_{}_{}".format(
|
||||
new_family, task_name.capitalize(), render_layer, pass_name
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
|
||||
old_subset_name, new_subset_name
|
||||
))
|
||||
|
||||
layers_data = context.data["layersData"]
|
||||
layers_by_name = {
|
||||
layer["name"]: layer
|
||||
for layer in layers_data
|
||||
}
|
||||
|
||||
if "layer_names" in instance_data:
|
||||
layer_names = instance_data["layer_names"]
|
||||
else:
|
||||
# Backwards compatibility
|
||||
# - not 100% working as it was found out that layer ids can't be
|
||||
# used as unified identifier across multiple workstations
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers_data
|
||||
}
|
||||
layer_ids = instance_data["layer_ids"]
|
||||
layer_names = []
|
||||
for layer_id in layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
if layer:
|
||||
layer_names.append(layer["name"])
|
||||
|
||||
if not layer_names:
|
||||
raise ValueError((
|
||||
"Metadata contain old way of storing layers information."
|
||||
" It is not possible to identify layers to publish with"
|
||||
" these data. Please remove Render Pass instances with"
|
||||
" Subset manager and use Creator tool to recreate them."
|
||||
))
|
||||
|
||||
render_pass_layers = []
|
||||
for layer_name in layer_names:
|
||||
layer = layers_by_name.get(layer_name)
|
||||
# NOTE This is kind of validation before validators?
|
||||
if not layer:
|
||||
self.log.warning(
|
||||
f"Layer with name {layer_name} was not found."
|
||||
)
|
||||
continue
|
||||
|
||||
render_pass_layers.append(layer)
|
||||
|
||||
if not render_pass_layers:
|
||||
name = instance_data["name"]
|
||||
self.log.warning(
|
||||
f"None of the layers from the RenderPass \"{name}\""
|
||||
" exist anymore. Instance not created."
|
||||
)
|
||||
return None
|
||||
|
||||
instance_data["layers"] = render_pass_layers
|
||||
return context.create_instance(**instance_data)
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
import copy
|
||||
import pyblish.api
|
||||
from openpype.lib import prepare_template_data
|
||||
|
||||
|
||||
class CollectRenderInstances(pyblish.api.InstancePlugin):
|
||||
label = "Collect Render Instances"
|
||||
order = pyblish.api.CollectorOrder - 0.4
|
||||
hosts = ["tvpaint"]
|
||||
families = ["render", "review"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
creator_identifier = instance.data["creator_identifier"]
|
||||
if creator_identifier == "render.layer":
|
||||
self._collect_data_for_render_layer(instance)
|
||||
|
||||
elif creator_identifier == "render.pass":
|
||||
self._collect_data_for_render_pass(instance)
|
||||
|
||||
elif creator_identifier == "render.scene":
|
||||
self._collect_data_for_render_scene(instance)
|
||||
|
||||
else:
|
||||
if creator_identifier == "scene.review":
|
||||
self._collect_data_for_review(instance)
|
||||
return
|
||||
|
||||
subset_name = instance.data["subset"]
|
||||
instance.data["name"] = subset_name
|
||||
instance.data["label"] = "{} [{}-{}]".format(
|
||||
subset_name,
|
||||
context.data["sceneMarkIn"] + 1,
|
||||
context.data["sceneMarkOut"] + 1
|
||||
)
|
||||
|
||||
def _collect_data_for_render_layer(self, instance):
|
||||
instance.data["families"].append("renderLayer")
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
group_id = creator_attributes["group_id"]
|
||||
if creator_attributes["mark_for_review"]:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
layers_data = instance.context.data["layersData"]
|
||||
instance.data["layers"] = [
|
||||
copy.deepcopy(layer)
|
||||
for layer in layers_data
|
||||
if layer["group_id"] == group_id
|
||||
]
|
||||
|
||||
def _collect_data_for_render_pass(self, instance):
|
||||
instance.data["families"].append("renderPass")
|
||||
|
||||
layer_names = set(instance.data["layer_names"])
|
||||
layers_data = instance.context.data["layersData"]
|
||||
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
if creator_attributes["mark_for_review"]:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
instance.data["layers"] = [
|
||||
copy.deepcopy(layer)
|
||||
for layer in layers_data
|
||||
if layer["name"] in layer_names
|
||||
]
|
||||
|
||||
render_layer_data = None
|
||||
render_layer_id = creator_attributes["render_layer_instance_id"]
|
||||
for in_data in instance.context.data["workfileInstances"]:
|
||||
if (
|
||||
in_data["creator_identifier"] == "render.layer"
|
||||
and in_data["instance_id"] == render_layer_id
|
||||
):
|
||||
render_layer_data = in_data
|
||||
break
|
||||
|
||||
instance.data["renderLayerData"] = copy.deepcopy(render_layer_data)
|
||||
# Invalid state
|
||||
if render_layer_data is None:
|
||||
return
|
||||
render_layer_name = render_layer_data["variant"]
|
||||
subset_name = instance.data["subset"]
|
||||
instance.data["subset"] = subset_name.format(
|
||||
**prepare_template_data({"renderlayer": render_layer_name})
|
||||
)
|
||||
|
||||
def _collect_data_for_render_scene(self, instance):
|
||||
instance.data["families"].append("renderScene")
|
||||
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
if creator_attributes["mark_for_review"]:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
instance.data["layers"] = copy.deepcopy(
|
||||
instance.context.data["layersData"]
|
||||
)
|
||||
|
||||
render_pass_name = (
|
||||
instance.data["creator_attributes"]["render_pass_name"]
|
||||
)
|
||||
subset_name = instance.data["subset"]
|
||||
instance.data["subset"] = subset_name.format(
|
||||
**prepare_template_data({"renderpass": render_pass_name})
|
||||
)
|
||||
|
||||
def _collect_data_for_review(self, instance):
|
||||
instance.data["layers"] = copy.deepcopy(
|
||||
instance.context.data["layersData"]
|
||||
)
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
import json
|
||||
import copy
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline.create import get_subset_name
|
||||
|
||||
|
||||
class CollectRenderScene(pyblish.api.ContextPlugin):
|
||||
"""Collect instance which renders whole scene in PNG.
|
||||
|
||||
Creates instance with family 'renderScene' which will have all layers
|
||||
to render which will be composite into one result. The instance is not
|
||||
collected from scene.
|
||||
|
||||
Scene will be rendered with all visible layers similar way like review is.
|
||||
|
||||
Instance is disabled if there are any created instances of 'renderLayer'
|
||||
or 'renderPass'. That is because it is expected that this instance is
|
||||
used as lazy publish of TVPaint file.
|
||||
|
||||
Subset name is created similar way like 'renderLayer' family. It can use
|
||||
`renderPass` and `renderLayer` keys which can be set using settings and
|
||||
`variant` is filled using `renderPass` value.
|
||||
"""
|
||||
label = "Collect Render Scene"
|
||||
order = pyblish.api.CollectorOrder - 0.39
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
# Value of 'render_pass' in subset name template
|
||||
render_pass = "beauty"
|
||||
|
||||
# Settings attributes
|
||||
enabled = False
|
||||
# Value of 'render_layer' and 'variant' in subset name template
|
||||
render_layer = "Main"
|
||||
|
||||
def process(self, context):
|
||||
# Check if there are created instances of renderPass and renderLayer
|
||||
# - that will define if renderScene instance is enabled after
|
||||
# collection
|
||||
any_created_instance = False
|
||||
for instance in context:
|
||||
family = instance.data["family"]
|
||||
if family in ("renderPass", "renderLayer"):
|
||||
any_created_instance = True
|
||||
break
|
||||
|
||||
# Global instance data modifications
|
||||
# Fill families
|
||||
family = "renderScene"
|
||||
# Add `review` family for thumbnail integration
|
||||
families = [family, "review"]
|
||||
|
||||
# Collect asset doc to get asset id
|
||||
# - not sure if it's good idea to require asset id in
|
||||
# get_subset_name?
|
||||
workfile_context = context.data["workfile_context"]
|
||||
# Project name from workfile context
|
||||
project_name = context.data["workfile_context"]["project"]
|
||||
asset_name = workfile_context["asset"]
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
# Host name from environment variable
|
||||
host_name = context.data["hostName"]
|
||||
# Variant is using render pass name
|
||||
variant = self.render_layer
|
||||
dynamic_data = {
|
||||
"renderlayer": self.render_layer,
|
||||
"renderpass": self.render_pass,
|
||||
}
|
||||
# TODO remove - Backwards compatibility for old subset name templates
|
||||
# - added 2022/04/28
|
||||
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
|
||||
dynamic_data["render_pass"] = dynamic_data["renderpass"]
|
||||
|
||||
task_name = workfile_context["task"]
|
||||
subset_name = get_subset_name(
|
||||
"render",
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
dynamic_data=dynamic_data,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
instance_data = {
|
||||
"family": family,
|
||||
"families": families,
|
||||
"fps": context.data["sceneFps"],
|
||||
"subset": subset_name,
|
||||
"name": subset_name,
|
||||
"label": "{} [{}-{}]".format(
|
||||
subset_name,
|
||||
context.data["sceneMarkIn"] + 1,
|
||||
context.data["sceneMarkOut"] + 1
|
||||
),
|
||||
"active": not any_created_instance,
|
||||
"publish": not any_created_instance,
|
||||
"representations": [],
|
||||
"layers": copy.deepcopy(context.data["layersData"]),
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
# Add render layer to instance data
|
||||
"renderlayer": self.render_layer
|
||||
}
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
|
||||
self.log.debug("Created instance: {}\n{}".format(
|
||||
instance, json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
|
@ -2,17 +2,15 @@ import os
|
|||
import json
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.create import get_subset_name
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.ContextPlugin):
|
||||
class CollectWorkfile(pyblish.api.InstancePlugin):
|
||||
label = "Collect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.4
|
||||
hosts = ["tvpaint"]
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, context):
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
current_file = context.data["currentFile"]
|
||||
|
||||
self.log.info(
|
||||
|
|
@ -21,49 +19,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
|
|||
|
||||
dirpath, filename = os.path.split(current_file)
|
||||
basename, ext = os.path.splitext(filename)
|
||||
instance = context.create_instance(name=basename)
|
||||
|
||||
# Project name from workfile context
|
||||
project_name = context.data["workfile_context"]["project"]
|
||||
|
||||
# Get subset name of workfile instance
|
||||
# Collect asset doc to get asset id
|
||||
# - not sure if it's good idea to require asset id in
|
||||
# get_subset_name?
|
||||
family = "workfile"
|
||||
asset_name = context.data["workfile_context"]["asset"]
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
# Host name from environment variable
|
||||
host_name = os.environ["AVALON_APP"]
|
||||
# Use empty variant value
|
||||
variant = ""
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
subset_name = get_subset_name(
|
||||
family,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
# Create Workfile instance
|
||||
instance.data.update({
|
||||
"subset": subset_name,
|
||||
"asset": context.data["asset"],
|
||||
"label": subset_name,
|
||||
"publish": True,
|
||||
"family": "workfile",
|
||||
"families": ["workfile"],
|
||||
"representations": [{
|
||||
"name": ext.lstrip("."),
|
||||
"ext": ext.lstrip("."),
|
||||
"files": filename,
|
||||
"stagingDir": dirpath
|
||||
}]
|
||||
instance.data["representations"].append({
|
||||
"name": ext.lstrip("."),
|
||||
"ext": ext.lstrip("."),
|
||||
"files": filename,
|
||||
"stagingDir": dirpath
|
||||
})
|
||||
|
||||
self.log.info("Collected workfile instance: {}".format(
|
||||
json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
|
|
|||
|
|
@ -65,9 +65,9 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
|
|||
|
||||
# Collect and store current context to have reference
|
||||
current_context = {
|
||||
"project": legacy_io.Session["AVALON_PROJECT"],
|
||||
"asset": legacy_io.Session["AVALON_ASSET"],
|
||||
"task": legacy_io.Session["AVALON_TASK"]
|
||||
"project_name": context.data["projectName"],
|
||||
"asset_name": context.data["asset"],
|
||||
"task_name": context.data["task"]
|
||||
}
|
||||
context.data["previous_context"] = current_context
|
||||
self.log.debug("Current context is: {}".format(current_context))
|
||||
|
|
@ -76,25 +76,31 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
|
|||
self.log.info("Collecting workfile context")
|
||||
|
||||
workfile_context = get_current_workfile_context()
|
||||
if "project" in workfile_context:
|
||||
workfile_context = {
|
||||
"project_name": workfile_context.get("project"),
|
||||
"asset_name": workfile_context.get("asset"),
|
||||
"task_name": workfile_context.get("task"),
|
||||
}
|
||||
# Store workfile context to pyblish context
|
||||
context.data["workfile_context"] = workfile_context
|
||||
if workfile_context:
|
||||
# Change current context with context from workfile
|
||||
key_map = (
|
||||
("AVALON_ASSET", "asset"),
|
||||
("AVALON_TASK", "task")
|
||||
("AVALON_ASSET", "asset_name"),
|
||||
("AVALON_TASK", "task_name")
|
||||
)
|
||||
for env_key, key in key_map:
|
||||
legacy_io.Session[env_key] = workfile_context[key]
|
||||
os.environ[env_key] = workfile_context[key]
|
||||
self.log.info("Context changed to: {}".format(workfile_context))
|
||||
|
||||
asset_name = workfile_context["asset"]
|
||||
task_name = workfile_context["task"]
|
||||
asset_name = workfile_context["asset_name"]
|
||||
task_name = workfile_context["task_name"]
|
||||
|
||||
else:
|
||||
asset_name = current_context["asset"]
|
||||
task_name = current_context["task"]
|
||||
asset_name = current_context["asset_name"]
|
||||
task_name = current_context["task_name"]
|
||||
# Handle older workfiles or workfiles without metadata
|
||||
self.log.warning((
|
||||
"Workfile does not contain information about context."
|
||||
|
|
@ -103,6 +109,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
|
|||
|
||||
# Store context asset name
|
||||
context.data["asset"] = asset_name
|
||||
context.data["task"] = task_name
|
||||
self.log.info(
|
||||
"Context is set to Asset: \"{}\" and Task: \"{}\"".format(
|
||||
asset_name, task_name
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from PIL import Image
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
from openpype.hosts.tvpaint.api.lib import (
|
||||
execute_george,
|
||||
execute_george_through_file,
|
||||
|
|
@ -24,8 +25,7 @@ from openpype.hosts.tvpaint.lib import (
|
|||
class ExtractSequence(pyblish.api.Extractor):
|
||||
label = "Extract Sequence"
|
||||
hosts = ["tvpaint"]
|
||||
families = ["review", "renderPass", "renderLayer", "renderScene"]
|
||||
families_to_review = ["review"]
|
||||
families = ["review", "render"]
|
||||
|
||||
# Modifiable with settings
|
||||
review_bg = [255, 255, 255, 255]
|
||||
|
|
@ -136,7 +136,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
# Fill tags and new families from project settings
|
||||
tags = []
|
||||
if family_lowered in self.families_to_review:
|
||||
if family_lowered == "review":
|
||||
tags.append("review")
|
||||
|
||||
# Sequence of one frame
|
||||
|
|
@ -162,10 +162,6 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
||||
if family_lowered in ("renderpass", "renderlayer", "renderscene"):
|
||||
# Change family to render
|
||||
instance.data["family"] = "render"
|
||||
|
||||
if not thumbnail_fullpath:
|
||||
return
|
||||
|
||||
|
|
@ -259,7 +255,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
output_filepaths_by_frame_idx[frame_idx] = filepath
|
||||
|
||||
if not os.path.exists(filepath):
|
||||
raise AssertionError(
|
||||
raise KnownPublishError(
|
||||
"Output was not rendered. File was not found {}".format(
|
||||
filepath
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Overused Color group</title>
|
||||
<description>## One Color group is used by multiple Render Layers
|
||||
|
||||
Single color group used by multiple Render Layers would cause clashes of rendered TVPaint layers. The same layers would be used for output files of both groups.
|
||||
|
||||
### Missing layer names
|
||||
|
||||
{groups_information}
|
||||
|
||||
### How to repair?
|
||||
|
||||
Refresh, go to 'Publish' tab and go through Render Layers and change their groups to not clash each other. If you reach limit of TVPaint color groups there is nothing you can do about it to fix the issue.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -20,6 +20,9 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
|||
duplicated_layer_names = []
|
||||
for layer_name in layer_names:
|
||||
layers = layers_by_name.get(layer_name)
|
||||
# It is not job of this validator to handle missing layers
|
||||
if layers is None:
|
||||
continue
|
||||
if len(layers) > 1:
|
||||
duplicated_layer_names.append(layer_name)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,11 +8,16 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Validate Layers Visibility"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["review", "renderPass", "renderLayer", "renderScene"]
|
||||
families = ["review", "render"]
|
||||
|
||||
def process(self, instance):
|
||||
layers = instance.data["layers"]
|
||||
# Instance have empty layers
|
||||
# - it is not job of this validator to check that
|
||||
if not layers:
|
||||
return
|
||||
layer_names = set()
|
||||
for layer in instance.data["layers"]:
|
||||
for layer in layers:
|
||||
layer_names.add(layer["name"])
|
||||
if layer["visible"]:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
import collections
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateRenderLayerGroups(pyblish.api.ContextPlugin):
|
||||
"""Validate group ids of renderLayer subsets.
|
||||
|
||||
Validate that there are not 2 render layers using the same group.
|
||||
"""
|
||||
|
||||
label = "Validate Render Layers Group"
|
||||
order = pyblish.api.ValidatorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
# Prepare layers
|
||||
render_layers_by_group_id = collections.defaultdict(list)
|
||||
for instance in context:
|
||||
families = instance.data.get("families")
|
||||
if not families or "renderLayer" not in families:
|
||||
continue
|
||||
|
||||
group_id = instance.data["creator_attributes"]["group_id"]
|
||||
render_layers_by_group_id[group_id].append(instance)
|
||||
|
||||
duplicated_instances = []
|
||||
for group_id, instances in render_layers_by_group_id.items():
|
||||
if len(instances) > 1:
|
||||
duplicated_instances.append((group_id, instances))
|
||||
|
||||
if not duplicated_instances:
|
||||
return
|
||||
|
||||
# Exception message preparations
|
||||
groups_data = context.data["groupsData"]
|
||||
groups_by_id = {
|
||||
group["group_id"]: group
|
||||
for group in groups_data
|
||||
}
|
||||
|
||||
per_group_msgs = []
|
||||
groups_information_lines = []
|
||||
for group_id, instances in duplicated_instances:
|
||||
group = groups_by_id[group_id]
|
||||
group_label = "Group \"{}\" ({})".format(
|
||||
group["name"],
|
||||
group["group_id"],
|
||||
)
|
||||
line_join_subset_names = "\n".join([
|
||||
f" - {instance['subset']}"
|
||||
for instance in instances
|
||||
])
|
||||
joined_subset_names = ", ".join([
|
||||
f"\"{instance['subset']}\""
|
||||
for instance in instances
|
||||
])
|
||||
per_group_msgs.append(
|
||||
"{} < {} >".format(group_label, joined_subset_names)
|
||||
)
|
||||
groups_information_lines.append(
|
||||
"<b>{}</b>\n{}".format(group_label, line_join_subset_names)
|
||||
)
|
||||
|
||||
# Raise an error
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
(
|
||||
"More than one Render Layer is using the same TVPaint"
|
||||
" group color. {}"
|
||||
).format(" | ".join(per_group_msgs)),
|
||||
formatting_data={
|
||||
"groups_information": "\n".join(groups_information_lines)
|
||||
}
|
||||
)
|
||||
|
|
@ -85,6 +85,5 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
|||
),
|
||||
"expected_group": correct_group["name"],
|
||||
"layer_names": ", ".join(invalid_layer_names)
|
||||
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin):
|
|||
"expected_width": expected_data["resolutionWidth"],
|
||||
"expected_height": expected_data["resolutionHeight"],
|
||||
"current_width": scene_data["resolutionWidth"],
|
||||
"current_height": scene_data["resolutionWidth"],
|
||||
"current_height": scene_data["resolutionHeight"],
|
||||
"expected_pixel_ratio": expected_data["pixelAspect"],
|
||||
"current_pixel_ratio": scene_data["pixelAspect"]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError, registered_host
|
||||
from openpype.pipeline import (
|
||||
PublishXmlValidationError,
|
||||
PublishValidationError,
|
||||
registered_host,
|
||||
)
|
||||
|
||||
|
||||
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
|
||||
|
|
@ -27,13 +31,18 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
|
|||
|
||||
actions = [ValidateWorkfileMetadataRepair]
|
||||
|
||||
required_keys = {"project", "asset", "task"}
|
||||
required_keys = {"project_name", "asset_name", "task_name"}
|
||||
|
||||
def process(self, context):
|
||||
workfile_context = context.data["workfile_context"]
|
||||
if not workfile_context:
|
||||
raise AssertionError(
|
||||
"Current workfile is missing whole metadata about context."
|
||||
raise PublishValidationError(
|
||||
"Current workfile is missing whole metadata about context.",
|
||||
"Missing context",
|
||||
(
|
||||
"Current workfile is missing metadata about task."
|
||||
" To fix this issue save the file using Workfiles tool."
|
||||
)
|
||||
)
|
||||
|
||||
missing_keys = []
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
|
@ -16,15 +15,15 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
workfile_context = context.data.get("workfile_context")
|
||||
# If workfile context is missing than project is matching to
|
||||
# `AVALON_PROJECT` value for 100%
|
||||
# global project
|
||||
if not workfile_context:
|
||||
self.log.info(
|
||||
"Workfile context (\"workfile_context\") is not filled."
|
||||
)
|
||||
return
|
||||
|
||||
workfile_project_name = workfile_context["project"]
|
||||
env_project_name = os.environ["AVALON_PROJECT"]
|
||||
workfile_project_name = workfile_context["project_name"]
|
||||
env_project_name = context.data["projectName"]
|
||||
if workfile_project_name == env_project_name:
|
||||
self.log.info((
|
||||
"Both workfile project and environment project are same. {}"
|
||||
|
|
|
|||
|
|
@ -81,11 +81,14 @@ def run_subprocess(*args, **kwargs):
|
|||
|
||||
Entered arguments and keyword arguments are passed to subprocess Popen.
|
||||
|
||||
On windows are 'creationflags' filled with flags that should cause ignore
|
||||
creation of new window.
|
||||
|
||||
Args:
|
||||
*args: Variable length arument list passed to Popen.
|
||||
*args: Variable length argument list passed to Popen.
|
||||
**kwargs : Arbitrary keyword arguments passed to Popen. Is possible to
|
||||
pass `logging.Logger` object under "logger" if want to use
|
||||
different than lib's logger.
|
||||
pass `logging.Logger` object under "logger" to use custom logger
|
||||
for output.
|
||||
|
||||
Returns:
|
||||
str: Full output of subprocess concatenated stdout and stderr.
|
||||
|
|
@ -95,6 +98,17 @@ def run_subprocess(*args, **kwargs):
|
|||
return code.
|
||||
"""
|
||||
|
||||
# Modify creation flags on windows to hide console window if in UI mode
|
||||
if (
|
||||
platform.system().lower() == "windows"
|
||||
and "creationflags" not in kwargs
|
||||
):
|
||||
kwargs["creationflags"] = (
|
||||
subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
| getattr(subprocess, "DETACHED_PROCESS", 0)
|
||||
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
|
||||
)
|
||||
|
||||
# Get environents from kwarg or use current process environments if were
|
||||
# not passed.
|
||||
env = kwargs.get("env") or os.environ
|
||||
|
|
@ -107,10 +121,10 @@ def run_subprocess(*args, **kwargs):
|
|||
logger = Logger.get_logger("run_subprocess")
|
||||
|
||||
# set overrides
|
||||
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
|
||||
kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE)
|
||||
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
|
||||
kwargs['env'] = filtered_env
|
||||
kwargs["stdout"] = kwargs.get("stdout", subprocess.PIPE)
|
||||
kwargs["stderr"] = kwargs.get("stderr", subprocess.PIPE)
|
||||
kwargs["stdin"] = kwargs.get("stdin", subprocess.PIPE)
|
||||
kwargs["env"] = filtered_env
|
||||
|
||||
proc = subprocess.Popen(*args, **kwargs)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import json
|
|||
import collections
|
||||
import tempfile
|
||||
import subprocess
|
||||
import platform
|
||||
|
||||
import xml.etree.ElementTree
|
||||
|
||||
|
|
@ -745,11 +746,18 @@ def get_ffprobe_data(path_to_file, logger=None):
|
|||
logger.debug("FFprobe command: {}".format(
|
||||
subprocess.list2cmdline(args)
|
||||
))
|
||||
popen = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
kwargs = {
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.PIPE,
|
||||
}
|
||||
if platform.system().lower() == "windows":
|
||||
kwargs["creationflags"] = (
|
||||
subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
| getattr(subprocess, "DETACHED_PROCESS", 0)
|
||||
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
|
||||
)
|
||||
|
||||
popen = subprocess.Popen(args, **kwargs)
|
||||
|
||||
popen_stdout, popen_stderr = popen.communicate()
|
||||
if popen_stdout:
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ from openpype.pipeline import legacy_io
|
|||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
|
||||
@attr.s
|
||||
|
|
@ -87,9 +88,13 @@ class AfterEffectsSubmitDeadline(
|
|||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_DEV",
|
||||
"OPENPYPE_LOG_NO_COLORS",
|
||||
"OPENPYPE_VERSION",
|
||||
"IS_TEST"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
keys.append("OPENPYPE_VERSION")
|
||||
|
||||
# Add mongo url if it's enabled
|
||||
if self._instance.context.data.get("deadlinePassMongoUrl"):
|
||||
keys.append("OPENPYPE_MONGO")
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from openpype.pipeline import legacy_io
|
|||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
|
||||
class _ZipFile(ZipFile):
|
||||
|
|
@ -279,10 +280,14 @@ class HarmonySubmitDeadline(
|
|||
"AVALON_TASK",
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_DEV",
|
||||
"OPENPYPE_LOG_NO_COLORS",
|
||||
"OPENPYPE_VERSION",
|
||||
"OPENPYPE_LOG_NO_COLORS"
|
||||
"IS_TEST"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
keys.append("OPENPYPE_VERSION")
|
||||
|
||||
# Add mongo url if it's enabled
|
||||
if self._instance.context.data.get("deadlinePassMongoUrl"):
|
||||
keys.append("OPENPYPE_MONGO")
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import pyblish.api
|
|||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
|
||||
class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin):
|
||||
|
|
@ -133,9 +134,13 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin):
|
|||
# Submit along the current Avalon tool setup that we launched
|
||||
# this application with so the Render Slave can build its own
|
||||
# similar environment using it, e.g. "houdini17.5;pluginx2.3"
|
||||
"AVALON_TOOLS",
|
||||
"OPENPYPE_VERSION"
|
||||
"AVALON_TOOLS"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
keys.append("OPENPYPE_VERSION")
|
||||
|
||||
# Add mongo url if it's enabled
|
||||
if context.data.get("deadlinePassMongoUrl"):
|
||||
keys.append("OPENPYPE_MONGO")
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import pyblish.api
|
|||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
|
||||
class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
|
||||
|
|
@ -105,9 +106,13 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
|
|||
# Submit along the current Avalon tool setup that we launched
|
||||
# this application with so the Render Slave can build its own
|
||||
# similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9"
|
||||
"AVALON_TOOLS",
|
||||
"OPENPYPE_VERSION"
|
||||
"AVALON_TOOLS"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
keys.append("OPENPYPE_VERSION")
|
||||
|
||||
# Add mongo url if it's enabled
|
||||
if context.data.get("deadlinePassMongoUrl"):
|
||||
keys.append("OPENPYPE_MONGO")
|
||||
|
|
|
|||
218
openpype/modules/deadline/plugins/publish/submit_max_deadline.py
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
import os
|
||||
import getpass
|
||||
import copy
|
||||
|
||||
import attr
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.hosts.max.api.lib import (
|
||||
get_current_renderer,
|
||||
get_multipass_setting
|
||||
)
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
|
||||
|
||||
@attr.s
|
||||
class MaxPluginInfo(object):
|
||||
SceneFile = attr.ib(default=None) # Input
|
||||
Version = attr.ib(default=None) # Mandatory for Deadline
|
||||
SaveFile = attr.ib(default=True)
|
||||
IgnoreInputs = attr.ib(default=True)
|
||||
|
||||
|
||||
class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
||||
|
||||
label = "Submit Render to Deadline"
|
||||
hosts = ["max"]
|
||||
families = ["maxrender"]
|
||||
targets = ["local"]
|
||||
|
||||
use_published = True
|
||||
priority = 50
|
||||
tile_priority = 50
|
||||
chunk_size = 1
|
||||
jobInfo = {}
|
||||
pluginInfo = {}
|
||||
group = None
|
||||
deadline_pool = None
|
||||
deadline_pool_secondary = None
|
||||
framePerTask = 1
|
||||
|
||||
def get_job_info(self):
|
||||
job_info = DeadlineJobInfo(Plugin="3dsmax")
|
||||
|
||||
# todo: test whether this works for existing production cases
|
||||
# where custom jobInfo was stored in the project settings
|
||||
job_info.update(self.jobInfo)
|
||||
|
||||
instance = self._instance
|
||||
context = instance.context
|
||||
|
||||
# Always use the original work file name for the Job name even when
|
||||
# rendering is done from the published Work File. The original work
|
||||
# file name is clearer because it can also have subversion strings,
|
||||
# etc. which are stripped for the published file.
|
||||
src_filepath = context.data["currentFile"]
|
||||
src_filename = os.path.basename(src_filepath)
|
||||
|
||||
job_info.Name = "%s - %s" % (src_filename, instance.name)
|
||||
job_info.BatchName = src_filename
|
||||
job_info.Plugin = instance.data["plugin"]
|
||||
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
|
||||
|
||||
# Deadline requires integers in frame range
|
||||
frames = "{start}-{end}".format(
|
||||
start=int(instance.data["frameStart"]),
|
||||
end=int(instance.data["frameEnd"])
|
||||
)
|
||||
job_info.Frames = frames
|
||||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
job_info.ChunkSize = instance.data.get("chunkSize", 1)
|
||||
job_info.Comment = context.data.get("comment")
|
||||
job_info.Priority = instance.data.get("priority", self.priority)
|
||||
job_info.FramesPerTask = instance.data.get("framesPerTask", 1)
|
||||
|
||||
if self.group:
|
||||
job_info.Group = self.group
|
||||
|
||||
# Add options from RenderGlobals
|
||||
render_globals = instance.data.get("renderGlobals", {})
|
||||
job_info.update(render_globals)
|
||||
|
||||
keys = [
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_SERVER",
|
||||
"OPENPYPE_SG_USER",
|
||||
"AVALON_PROJECT",
|
||||
"AVALON_ASSET",
|
||||
"AVALON_TASK",
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_DEV",
|
||||
"OPENPYPE_VERSION",
|
||||
"IS_TEST"
|
||||
]
|
||||
# Add mongo url if it's enabled
|
||||
if self._instance.context.data.get("deadlinePassMongoUrl"):
|
||||
keys.append("OPENPYPE_MONGO")
|
||||
|
||||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
|
||||
for key in keys:
|
||||
value = environment.get(key)
|
||||
if not value:
|
||||
continue
|
||||
job_info.EnvironmentKeyValue[key] = value
|
||||
|
||||
# to recognize job from PYPE for turning Event On/Off
|
||||
job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1"
|
||||
job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1"
|
||||
|
||||
# Add list of expected files to job
|
||||
# ---------------------------------
|
||||
exp = instance.data.get("expectedFiles")
|
||||
for filepath in exp:
|
||||
job_info.OutputDirectory += os.path.dirname(filepath)
|
||||
job_info.OutputFilename += os.path.basename(filepath)
|
||||
|
||||
return job_info
|
||||
|
||||
def get_plugin_info(self):
|
||||
instance = self._instance
|
||||
|
||||
plugin_info = MaxPluginInfo(
|
||||
SceneFile=self.scene_path,
|
||||
Version=instance.data["maxversion"],
|
||||
SaveFile=True,
|
||||
IgnoreInputs=True
|
||||
)
|
||||
|
||||
plugin_payload = attr.asdict(plugin_info)
|
||||
|
||||
# Patching with pluginInfo from settings
|
||||
for key, value in self.pluginInfo.items():
|
||||
plugin_payload[key] = value
|
||||
|
||||
return plugin_payload
|
||||
|
||||
def process_submission(self):
|
||||
|
||||
instance = self._instance
|
||||
filepath = self.scene_path
|
||||
|
||||
expected_files = instance.data["expectedFiles"]
|
||||
if not expected_files:
|
||||
raise RuntimeError("No Render Elements found!")
|
||||
output_dir = os.path.dirname(expected_files[0])
|
||||
instance.data["outputDir"] = output_dir
|
||||
instance.data["toBeRenderedOn"] = "deadline"
|
||||
|
||||
filename = os.path.basename(filepath)
|
||||
|
||||
payload_data = {
|
||||
"filename": filename,
|
||||
"dirname": output_dir
|
||||
}
|
||||
|
||||
self.log.debug("Submitting 3dsMax render..")
|
||||
payload = self._use_published_name(payload_data)
|
||||
job_info, plugin_info = payload
|
||||
self.submit(self.assemble_payload(job_info, plugin_info))
|
||||
|
||||
def _use_published_name(self, data):
|
||||
instance = self._instance
|
||||
job_info = copy.deepcopy(self.job_info)
|
||||
plugin_info = copy.deepcopy(self.plugin_info)
|
||||
plugin_data = {}
|
||||
project_setting = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
multipass = get_multipass_setting(project_setting)
|
||||
if multipass:
|
||||
plugin_data["DisableMultipass"] = 0
|
||||
else:
|
||||
plugin_data["DisableMultipass"] = 1
|
||||
|
||||
expected_files = instance.data.get("expectedFiles")
|
||||
if not expected_files:
|
||||
raise RuntimeError("No render elements found")
|
||||
old_output_dir = os.path.dirname(expected_files[0])
|
||||
output_beauty = RenderSettings().get_render_output(instance.name,
|
||||
old_output_dir)
|
||||
filepath = self.from_published_scene()
|
||||
|
||||
def _clean_name(path):
|
||||
return os.path.splitext(os.path.basename(path))[0]
|
||||
|
||||
new_scene = _clean_name(filepath)
|
||||
orig_scene = _clean_name(instance.context.data["currentFile"])
|
||||
|
||||
output_beauty = output_beauty.replace(orig_scene, new_scene)
|
||||
output_beauty = output_beauty.replace("\\", "/")
|
||||
plugin_data["RenderOutput"] = output_beauty
|
||||
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_elem_list = RenderSettings().get_render_element()
|
||||
for i, element in enumerate(render_elem_list):
|
||||
element = element.replace(orig_scene, new_scene)
|
||||
plugin_data["RenderElementOutputFilename%d" % i] = element # noqa
|
||||
|
||||
self.log.debug("plugin data:{}".format(plugin_data))
|
||||
plugin_info.update(plugin_data)
|
||||
|
||||
return job_info, plugin_info
|
||||
|
|
@ -38,6 +38,7 @@ from openpype.hosts.maya.api.lib import get_attr_in_layer
|
|||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
|
||||
def _validate_deadline_bool_value(instance, attribute, value):
|
||||
|
|
@ -165,10 +166,14 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
"AVALON_ASSET",
|
||||
"AVALON_TASK",
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_DEV",
|
||||
"OPENPYPE_VERSION",
|
||||
"OPENPYPE_DEV"
|
||||
"IS_TEST"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
keys.append("OPENPYPE_VERSION")
|
||||
|
||||
# Add mongo url if it's enabled
|
||||
if self._instance.context.data.get("deadlinePassMongoUrl"):
|
||||
keys.append("OPENPYPE_MONGO")
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from maya import cmds
|
|||
from openpype.pipeline import legacy_io, PublishXmlValidationError
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
|
@ -104,9 +105,13 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin):
|
|||
keys = [
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER",
|
||||
"OPENPYPE_VERSION"
|
||||
"FTRACK_SERVER"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
keys.append("OPENPYPE_VERSION")
|
||||
|
||||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import pyblish.api
|
|||
import nuke
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
|
||||
class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
||||
|
|
@ -265,9 +266,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"PYBLISHPLUGINPATH",
|
||||
"NUKE_PATH",
|
||||
"TOOL_ENV",
|
||||
"FOUNDRY_LICENSE",
|
||||
"OPENPYPE_VERSION"
|
||||
"FOUNDRY_LICENSE"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
keys.append("OPENPYPE_VERSION")
|
||||
|
||||
# Add mongo url if it's enabled
|
||||
if instance.context.data.get("deadlinePassMongoUrl"):
|
||||
keys.append("OPENPYPE_MONGO")
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.pipeline.farm.patterning import match_aov_pattern
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
|
||||
def get_resources(project_name, version, extension=None):
|
||||
|
|
@ -117,15 +118,17 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
deadline_plugin = "OpenPype"
|
||||
targets = ["local"]
|
||||
|
||||
hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"]
|
||||
hosts = ["fusion", "max", "maya", "nuke",
|
||||
"celaction", "aftereffects", "harmony"]
|
||||
|
||||
families = ["render.farm", "prerender.farm",
|
||||
"renderlayer", "imagesequence", "vrayscene"]
|
||||
"renderlayer", "imagesequence", "maxrender", "vrayscene"]
|
||||
|
||||
aov_filter = {"maya": [r".*([Bb]eauty).*"],
|
||||
"aftereffects": [r".*"], # for everything from AE
|
||||
"harmony": [r".*"], # for everything from AE
|
||||
"celaction": [r".*"]}
|
||||
"celaction": [r".*"],
|
||||
"max": [r".*"]}
|
||||
|
||||
environ_job_filter = [
|
||||
"OPENPYPE_METADATA_FILE"
|
||||
|
|
@ -136,10 +139,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER",
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_USERNAME",
|
||||
"OPENPYPE_VERSION"
|
||||
"OPENPYPE_USERNAME"
|
||||
]
|
||||
|
||||
# Add OpenPype version if we are running from build.
|
||||
if is_running_from_build():
|
||||
environ_keys.append("OPENPYPE_VERSION")
|
||||
|
||||
# custom deadline attributes
|
||||
deadline_department = ""
|
||||
deadline_pool = ""
|
||||
|
|
@ -292,8 +298,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"Group": self.deadline_group,
|
||||
"Pool": instance.data.get("primaryPool"),
|
||||
"SecondaryPool": instance.data.get("secondaryPool"),
|
||||
|
||||
"OutputDirectory0": output_dir
|
||||
# ensure the outputdirectory with correct slashes
|
||||
"OutputDirectory0": output_dir.replace("\\", "/")
|
||||
},
|
||||
"PluginInfo": {
|
||||
"Version": self.plugin_pype_version,
|
||||
|
|
@ -514,6 +520,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
# toggle preview on if multipart is on
|
||||
|
||||
if instance_data.get("multipartExr"):
|
||||
self.log.debug("Adding preview tag because its multipartExr")
|
||||
preview = True
|
||||
self.log.debug("preview:{}".format(preview))
|
||||
new_instance = deepcopy(instance_data)
|
||||
|
|
@ -593,6 +600,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
if instance["useSequenceForReview"]:
|
||||
# toggle preview on if multipart is on
|
||||
if instance.get("multipartExr", False):
|
||||
self.log.debug(
|
||||
"Adding preview tag because its multipartExr"
|
||||
)
|
||||
preview = True
|
||||
else:
|
||||
render_file_name = list(collection)[0]
|
||||
|
|
@ -700,8 +710,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
if preview:
|
||||
if "ftrack" not in families:
|
||||
if os.environ.get("FTRACK_SERVER"):
|
||||
self.log.debug(
|
||||
"Adding \"ftrack\" to families because of preview tag."
|
||||
)
|
||||
families.append("ftrack")
|
||||
if "review" not in families:
|
||||
self.log.debug(
|
||||
"Adding \"review\" to families because of preview tag."
|
||||
)
|
||||
families.append("review")
|
||||
instance["families"] = families
|
||||
|
||||
|
|
@ -960,6 +976,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
'''
|
||||
|
||||
render_job = None
|
||||
submission_type = ""
|
||||
if instance.data.get("toBeRenderedOn") == "deadline":
|
||||
render_job = data.pop("deadlineSubmissionJob", None)
|
||||
submission_type = "deadline"
|
||||
|
|
|
|||
|
|
@ -1390,6 +1390,8 @@ class CreateContext:
|
|||
self.autocreators = {}
|
||||
# Manual creators
|
||||
self.manual_creators = {}
|
||||
# Creators that are disabled
|
||||
self.disabled_creators = {}
|
||||
|
||||
self.convertors_plugins = {}
|
||||
self.convertor_items_by_id = {}
|
||||
|
|
@ -1667,6 +1669,7 @@ class CreateContext:
|
|||
|
||||
# Discover and prepare creators
|
||||
creators = {}
|
||||
disabled_creators = {}
|
||||
autocreators = {}
|
||||
manual_creators = {}
|
||||
report = discover_creator_plugins(return_report=True)
|
||||
|
|
@ -1703,6 +1706,9 @@ class CreateContext:
|
|||
self,
|
||||
self.headless
|
||||
)
|
||||
if not creator.enabled:
|
||||
disabled_creators[creator_identifier] = creator
|
||||
continue
|
||||
creators[creator_identifier] = creator
|
||||
if isinstance(creator, AutoCreator):
|
||||
autocreators[creator_identifier] = creator
|
||||
|
|
@ -1713,6 +1719,7 @@ class CreateContext:
|
|||
self.manual_creators = manual_creators
|
||||
|
||||
self.creators = creators
|
||||
self.disabled_creators = disabled_creators
|
||||
|
||||
def _reset_convertor_plugins(self):
|
||||
convertors_plugins = {}
|
||||
|
|
|
|||
|
|
@ -79,6 +79,10 @@ class SubsetConvertorPlugin(object):
|
|||
self._log = Logger.get_logger(self.__class__.__name__)
|
||||
return self._log
|
||||
|
||||
@property
|
||||
def host(self):
|
||||
return self._create_context.host
|
||||
|
||||
@abstractproperty
|
||||
def identifier(self):
|
||||
"""Converted identifier.
|
||||
|
|
|
|||
|
|
@ -70,7 +70,8 @@ def get_subset_name(
|
|||
host_name=None,
|
||||
default_template=None,
|
||||
dynamic_data=None,
|
||||
project_settings=None
|
||||
project_settings=None,
|
||||
family_filter=None,
|
||||
):
|
||||
"""Calculate subset name based on passed context and OpenPype settings.
|
||||
|
||||
|
|
@ -82,23 +83,35 @@ def get_subset_name(
|
|||
That's main reason why so many arguments are required to calculate subset
|
||||
name.
|
||||
|
||||
Option to pass family filter was added for special cases when creator or
|
||||
automated publishing require special subset name template which would be
|
||||
hard to maintain using its family value.
|
||||
Why not just pass the right family? -> Family is also used as fill
|
||||
value and for filtering of publish plugins.
|
||||
|
||||
Todos:
|
||||
Find better filtering options to avoid requirement of
|
||||
argument 'family_filter'.
|
||||
|
||||
Args:
|
||||
family (str): Instance family.
|
||||
variant (str): In most of the cases it is user input during creation.
|
||||
task_name (str): Task name on which context is instance created.
|
||||
asset_doc (dict): Queried asset document with its tasks in data.
|
||||
Used to get task type.
|
||||
project_name (str): Name of project on which is instance created.
|
||||
Important for project settings that are loaded.
|
||||
host_name (str): One of filtering criteria for template profile
|
||||
filters.
|
||||
default_template (str): Default template if any profile does not match
|
||||
passed context. Constant 'DEFAULT_SUBSET_TEMPLATE' is used if
|
||||
is not passed.
|
||||
dynamic_data (dict): Dynamic data specific for a creator which creates
|
||||
instance.
|
||||
project_settings (Union[Dict[str, Any], None]): Prepared settings for
|
||||
project. Settings are queried if not passed.
|
||||
project_name (Optional[str]): Name of project on which is instance
|
||||
created. Important for project settings that are loaded.
|
||||
host_name (Optional[str]): One of filtering criteria for template
|
||||
profile filters.
|
||||
default_template (Optional[str]): Default template if any profile does
|
||||
not match passed context. Constant 'DEFAULT_SUBSET_TEMPLATE'
|
||||
is used if is not passed.
|
||||
dynamic_data (Optional[Dict[str, Any]]): Dynamic data specific for
|
||||
a creator which creates instance.
|
||||
project_settings (Optional[Union[Dict[str, Any]]]): Prepared settings
|
||||
for project. Settings are queried if not passed.
|
||||
family_filter (Optional[str]): Use different family for subset template
|
||||
filtering. Value of 'family' is used when not passed.
|
||||
"""
|
||||
|
||||
if not family:
|
||||
|
|
@ -119,7 +132,7 @@ def get_subset_name(
|
|||
|
||||
template = get_subset_name_template(
|
||||
project_name,
|
||||
family,
|
||||
family_filter or family,
|
||||
task_name,
|
||||
task_type,
|
||||
host_name,
|
||||
|
|
|
|||
|
|
@ -14,16 +14,19 @@ from openpype.pipeline.editorial import (
|
|||
range_from_frames,
|
||||
make_sequence_collection
|
||||
)
|
||||
|
||||
from openpype.pipeline.publish import (
|
||||
get_publish_template_name
|
||||
)
|
||||
|
||||
class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
||||
"""Get Resources for a subset version"""
|
||||
|
||||
label = "Collect OTIO Subset Resources"
|
||||
order = pyblish.api.CollectorOrder - 0.077
|
||||
order = pyblish.api.CollectorOrder + 0.491
|
||||
families = ["clip"]
|
||||
hosts = ["resolve", "hiero", "flame"]
|
||||
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if "audio" in instance.data["family"]:
|
||||
|
|
@ -35,14 +38,21 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
if not instance.data.get("versionData"):
|
||||
instance.data["versionData"] = {}
|
||||
|
||||
template_name = self.get_template_name(instance)
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
publish_template_category = anatomy.templates[template_name]
|
||||
template = os.path.normpath(publish_template_category["path"])
|
||||
self.log.debug(
|
||||
">> template: {}".format(template))
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
otio_avalable_range = otio_clip.available_range()
|
||||
media_fps = otio_avalable_range.start_time.rate
|
||||
available_duration = otio_avalable_range.duration.value
|
||||
otio_available_range = otio_clip.available_range()
|
||||
media_fps = otio_available_range.start_time.rate
|
||||
available_duration = otio_available_range.duration.value
|
||||
|
||||
# get available range trimmed with processed retimes
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
|
|
@ -84,6 +94,11 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
frame_start = instance.data["frameStart"]
|
||||
frame_end = frame_start + (media_out - media_in)
|
||||
|
||||
# Fit start /end frame to media in /out
|
||||
if "{originalBasename}" in template:
|
||||
frame_start = media_in
|
||||
frame_end = media_out
|
||||
|
||||
# add to version data start and end range data
|
||||
# for loader plugins to be correctly displayed and loaded
|
||||
instance.data["versionData"].update({
|
||||
|
|
@ -153,7 +168,6 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
repre = self._create_representation(
|
||||
frame_start, frame_end, collection=collection)
|
||||
|
||||
instance.data["originalBasename"] = collection.format("{head}")
|
||||
else:
|
||||
_trim = False
|
||||
dirname, filename = os.path.split(media_ref.target_url)
|
||||
|
|
@ -168,8 +182,6 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
repre = self._create_representation(
|
||||
frame_start, frame_end, file=filename, trim=_trim)
|
||||
|
||||
instance.data["originalBasename"] = os.path.splitext(filename)[0]
|
||||
|
||||
instance.data["originalDirname"] = self.staging_dir
|
||||
|
||||
if repre:
|
||||
|
|
@ -225,3 +237,26 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
if kwargs.get("trim") is True:
|
||||
representation_data["tags"] = ["trim"]
|
||||
return representation_data
|
||||
|
||||
def get_template_name(self, instance):
|
||||
"""Return anatomy template name to use for integration"""
|
||||
|
||||
# Anatomy data is pre-filled by Collectors
|
||||
context = instance.context
|
||||
project_name = context.data["projectName"]
|
||||
|
||||
# Task can be optional in anatomy data
|
||||
host_name = context.data["hostName"]
|
||||
family = instance.data["family"]
|
||||
anatomy_data = instance.data["anatomyData"]
|
||||
task_info = anatomy_data.get("task") or {}
|
||||
|
||||
return get_publish_template_name(
|
||||
project_name,
|
||||
host_name,
|
||||
family,
|
||||
task_name=task_info.get("name"),
|
||||
task_type=task_info.get("type"),
|
||||
project_settings=context.data["project_settings"],
|
||||
logger=self.log
|
||||
)
|
||||
|
|
|
|||
|
|
@ -52,7 +52,16 @@ def _get_ffprobe_data(source):
|
|||
"-show_streams",
|
||||
source
|
||||
]
|
||||
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
|
||||
kwargs = {
|
||||
"stdout": subprocess.PIPE,
|
||||
}
|
||||
if platform.system().lower() == "windows":
|
||||
kwargs["creationflags"] = (
|
||||
subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
| getattr(subprocess, "DETACHED_PROCESS", 0)
|
||||
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
|
||||
)
|
||||
proc = subprocess.Popen(command, **kwargs)
|
||||
out = proc.communicate()[0]
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError("Failed to run: %s" % command)
|
||||
|
|
@ -331,12 +340,18 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
)
|
||||
print("Launching command: {}".format(command))
|
||||
|
||||
proc = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
shell=True
|
||||
)
|
||||
kwargs = {
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.PIPE,
|
||||
"shell": True,
|
||||
}
|
||||
if platform.system().lower() == "windows":
|
||||
kwargs["creationflags"] = (
|
||||
subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
| getattr(subprocess, "DETACHED_PROCESS", 0)
|
||||
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
|
||||
)
|
||||
proc = subprocess.Popen(command, **kwargs)
|
||||
|
||||
_stdout, _stderr = proc.communicate()
|
||||
if _stdout:
|
||||
|
|
|
|||
|
|
@ -36,6 +36,18 @@
|
|||
"scene_patches": [],
|
||||
"strict_error_checking": true
|
||||
},
|
||||
"MaxSubmitDeadline": {
|
||||
"enabled": true,
|
||||
"optional": false,
|
||||
"active": true,
|
||||
"use_published": true,
|
||||
"priority": 50,
|
||||
"chunk_size": 10,
|
||||
"group": "none",
|
||||
"deadline_pool": "",
|
||||
"deadline_pool_secondary": "",
|
||||
"framePerTask": 1
|
||||
},
|
||||
"NukeSubmitDeadline": {
|
||||
"enabled": true,
|
||||
"optional": false,
|
||||
|
|
@ -103,8 +115,11 @@
|
|||
],
|
||||
"harmony": [
|
||||
".*"
|
||||
],
|
||||
"max": [
|
||||
".*"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -324,7 +324,8 @@
|
|||
"animation",
|
||||
"look",
|
||||
"rig",
|
||||
"camera"
|
||||
"camera",
|
||||
"renderlayer"
|
||||
],
|
||||
"task_types": [],
|
||||
"tasks": [],
|
||||
|
|
|
|||
8
openpype/settings/defaults/project_settings/max.json
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"RenderSettings": {
|
||||
"default_render_image_folder": "renders/3dsmax",
|
||||
"aov_separator": "underscore",
|
||||
"image_format": "exr",
|
||||
"multipass": true
|
||||
}
|
||||
}
|
||||
|
|
@ -820,6 +820,11 @@
|
|||
"twoSidedLighting": true,
|
||||
"lineAAEnable": true,
|
||||
"multiSample": 8,
|
||||
"useDefaultMaterial": false,
|
||||
"wireframeOnShaded": false,
|
||||
"xray": false,
|
||||
"jointXray": false,
|
||||
"backfaceCulling": false,
|
||||
"ssaoEnable": false,
|
||||
"ssaoAmount": 1,
|
||||
"ssaoRadius": 16,
|
||||
|
|
|
|||
|
|
@ -10,11 +10,45 @@
|
|||
}
|
||||
},
|
||||
"stop_timer_on_application_exit": false,
|
||||
"publish": {
|
||||
"CollectRenderScene": {
|
||||
"enabled": false,
|
||||
"render_layer": "Main"
|
||||
"create": {
|
||||
"create_workfile": {
|
||||
"enabled": true,
|
||||
"default_variant": "Main",
|
||||
"default_variants": []
|
||||
},
|
||||
"create_review": {
|
||||
"enabled": true,
|
||||
"active_on_create": true,
|
||||
"default_variant": "Main",
|
||||
"default_variants": []
|
||||
},
|
||||
"create_render_scene": {
|
||||
"enabled": true,
|
||||
"active_on_create": false,
|
||||
"mark_for_review": true,
|
||||
"default_pass_name": "beauty",
|
||||
"default_variant": "Main",
|
||||
"default_variants": []
|
||||
},
|
||||
"create_render_layer": {
|
||||
"mark_for_review": true,
|
||||
"default_pass_name": "beauty",
|
||||
"default_variant": "Main",
|
||||
"default_variants": []
|
||||
},
|
||||
"create_render_pass": {
|
||||
"mark_for_review": true,
|
||||
"default_variant": "Main",
|
||||
"default_variants": []
|
||||
},
|
||||
"auto_detect_render": {
|
||||
"allow_group_rename": true,
|
||||
"group_name_template": "L{group_index}",
|
||||
"group_idx_offset": 10,
|
||||
"group_idx_padding": 3
|
||||
}
|
||||
},
|
||||
"publish": {
|
||||
"ExtractSequence": {
|
||||
"review_bg": [
|
||||
255,
|
||||
|
|
|
|||
|
|
@ -82,6 +82,10 @@
|
|||
"type": "schema",
|
||||
"name": "schema_project_slack"
|
||||
},
|
||||
{
|
||||
"type": "schema",
|
||||
"name": "schema_project_max"
|
||||
},
|
||||
{
|
||||
"type": "schema",
|
||||
"name": "schema_project_maya"
|
||||
|
|
|
|||
|
|
@ -204,6 +204,65 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "MaxSubmitDeadline",
|
||||
"label": "3dsMax Submit to Deadline",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "optional",
|
||||
"label": "Optional"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active",
|
||||
"label": "Active"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "use_published",
|
||||
"label": "Use Published scene"
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"key": "priority",
|
||||
"label": "Priority"
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"key": "chunk_size",
|
||||
"label": "Chunk Size"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "group",
|
||||
"label": "Group Name"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "deadline_pool",
|
||||
"label": "Deadline pool"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "deadline_pool_secondary",
|
||||
"label": "Deadline pool (secondary)"
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"key": "framePerTask",
|
||||
"label": "Frame Per Task"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,56 @@
|
|||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "max",
|
||||
"label": "Max",
|
||||
"is_file": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "RenderSettings",
|
||||
"label": "Render Settings",
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "default_render_image_folder",
|
||||
"label": "Default render image folder"
|
||||
},
|
||||
{
|
||||
"key": "aov_separator",
|
||||
"label": "AOV Separator character",
|
||||
"type": "enum",
|
||||
"multiselection": false,
|
||||
"default": "underscore",
|
||||
"enum_items": [
|
||||
{"dash": "- (dash)"},
|
||||
{"underscore": "_ (underscore)"},
|
||||
{"dot": ". (dot)"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "image_format",
|
||||
"label": "Output Image Format",
|
||||
"type": "enum",
|
||||
"multiselection": false,
|
||||
"defaults": "exr",
|
||||
"enum_items": [
|
||||
{"bmp": "bmp"},
|
||||
{"exr": "exr"},
|
||||
{"tif": "tif"},
|
||||
{"tiff": "tiff"},
|
||||
{"jpg": "jpg"},
|
||||
{"png": "png"},
|
||||
{"tga": "tga"},
|
||||
{"dds": "dds"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "multipass",
|
||||
"label": "multipass"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -30,14 +30,14 @@
|
|||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "publish",
|
||||
"label": "Publish plugins",
|
||||
"key": "create",
|
||||
"label": "Create plugins",
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "CollectRenderScene",
|
||||
"label": "Collect Render Scene",
|
||||
"key": "create_workfile",
|
||||
"label": "Create Workfile",
|
||||
"is_group": true,
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
|
|
@ -47,16 +47,200 @@
|
|||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "It is possible to fill <b>'render_layer'</b> or <b>'variant'</b> in subset name template with custom value.<br/>- value of <b>'render_pass'</b> is always \"beauty\"."
|
||||
"type": "text",
|
||||
"key": "default_variant",
|
||||
"label": "Default variant"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "render_layer",
|
||||
"label": "Render Layer"
|
||||
"type": "list",
|
||||
"key": "default_variants",
|
||||
"label": "Default variants",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "create_review",
|
||||
"label": "Create Review",
|
||||
"is_group": true,
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active_on_create",
|
||||
"label": "Active by default"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "default_variant",
|
||||
"label": "Default variant"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "default_variants",
|
||||
"label": "Default variants",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "create_render_scene",
|
||||
"label": "Create Render Scene",
|
||||
"is_group": true,
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active_on_create",
|
||||
"label": "Active by default"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "mark_for_review",
|
||||
"label": "Review by default"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "default_pass_name",
|
||||
"label": "Default beauty pass"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "default_variant",
|
||||
"label": "Default variant"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "default_variants",
|
||||
"label": "Default variants",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "create_render_layer",
|
||||
"label": "Create Render Layer",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "mark_for_review",
|
||||
"label": "Review by default"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "default_pass_name",
|
||||
"label": "Default beauty pass"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "default_variant",
|
||||
"label": "Default variant"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "default_variants",
|
||||
"label": "Default variants",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "create_render_pass",
|
||||
"label": "Create Render Pass",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "mark_for_review",
|
||||
"label": "Review by default"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "default_variant",
|
||||
"label": "Default variant"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "default_variants",
|
||||
"label": "Default variants",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "auto_detect_render",
|
||||
"label": "Auto-Detect Create Render",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "label",
|
||||
"label": "The creator tries to auto-detect Render Layers and Render Passes in scene. For Render Layers is used group name as a variant and for Render Passes is used TVPaint layer name.<br/><br/>Group names can be renamed by their used order in scene. The renaming template where can be used <b>{group_index}</b> formatting key which is filled by \"used position index of group\".<br/>- Template: <b>L{group_index}</b><br/>- Group offset: <b>10</b><br/>- Group padding: <b>3</b><br/>Would create group names \"<b>L010</b>\", \"<b>L020</b>\", ..."
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "allow_group_rename",
|
||||
"label": "Allow group rename"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "group_name_template",
|
||||
"label": "Group name template"
|
||||
},
|
||||
{
|
||||
"key": "group_idx_offset",
|
||||
"label": "Group index Offset",
|
||||
"type": "number",
|
||||
"decimal": 0,
|
||||
"minimum": 1
|
||||
},
|
||||
{
|
||||
"key": "group_idx_padding",
|
||||
"type": "number",
|
||||
"label": "Group index Padding",
|
||||
"decimal": 0,
|
||||
"minimum": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "publish",
|
||||
"label": "Publish plugins",
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
|
|
|
|||
|
|
@ -184,6 +184,10 @@
|
|||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "<b>Display</b>"
|
||||
},
|
||||
{
|
||||
"type":"boolean",
|
||||
"key": "renderDepthOfField",
|
||||
|
|
@ -221,6 +225,31 @@
|
|||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "useDefaultMaterial",
|
||||
"label": "Use Default Material"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "wireframeOnShaded",
|
||||
"label": "Wireframe On Shaded"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "xray",
|
||||
"label": "X-Ray"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "jointXray",
|
||||
"label": "X-Ray Joints"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "backfaceCulling",
|
||||
"label": "Backface Culling"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "ssaoEnable",
|
||||
|
|
|
|||
|
|
@ -385,6 +385,7 @@ class InstanceCardWidget(CardWidget):
|
|||
|
||||
self._last_subset_name = None
|
||||
self._last_variant = None
|
||||
self._last_label = None
|
||||
|
||||
icon_widget = IconValuePixmapLabel(group_icon, self)
|
||||
icon_widget.setObjectName("FamilyIconLabel")
|
||||
|
|
@ -462,14 +463,17 @@ class InstanceCardWidget(CardWidget):
|
|||
def _update_subset_name(self):
|
||||
variant = self.instance["variant"]
|
||||
subset_name = self.instance["subset"]
|
||||
label = self.instance.label
|
||||
if (
|
||||
variant == self._last_variant
|
||||
and subset_name == self._last_subset_name
|
||||
and label == self._last_label
|
||||
):
|
||||
return
|
||||
|
||||
self._last_variant = variant
|
||||
self._last_subset_name = subset_name
|
||||
self._last_label = label
|
||||
# Make `variant` bold
|
||||
label = html_escape(self.instance.label)
|
||||
found_parts = set(re.findall(variant, label, re.IGNORECASE))
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring Pype version."""
|
||||
__version__ = "3.15.1"
|
||||
__version__ = "3.15.2-nightly.1"
|
||||
|
|
|
|||
|
|
@ -6,89 +6,77 @@ sidebar_label: TVPaint
|
|||
|
||||
- [Work Files](artist_tools_workfiles)
|
||||
- [Load](artist_tools_loader)
|
||||
- [Create](artist_tools_creator)
|
||||
- [Subset Manager](artist_tools_subset_manager)
|
||||
- [Scene Inventory](artist_tools_inventory)
|
||||
- [Publish](artist_tools_publisher)
|
||||
- [Library](artist_tools_library)
|
||||
|
||||
|
||||
## Setup
|
||||
When you launch TVPaint with OpenPype for the very first time it is necessary to do some additional steps. Right after the TVPaint launching a few system windows will pop up.
|
||||
When you launch TVPaint with OpenPype for the very first time it is necessary to do some additional steps. Right after the TVPaint launching a few system windows will pop up.
|
||||
|
||||

|
||||
|
||||
Choose `Replace the file in the destination`. Then another window shows up.
|
||||
Choose `Replace the file in the destination`. Then another window shows up.
|
||||
|
||||

|
||||
|
||||
Click on `Continue`.
|
||||
|
||||
After opening TVPaint go to the menu bar: `Windows → Plugins → OpenPype`.
|
||||
After opening TVPaint go to the menu bar: `Windows → Plugins → OpenPype`.
|
||||
|
||||

|
||||
|
||||
Another TVPaint window pop up. Please press `Yes`. This window will be presented in every single TVPaint launching. Unfortunately, there is no other way how to workaround it.
|
||||
Another TVPaint window pop up. Please press `Yes`. This window will be presented in every single TVPaint launching. Unfortunately, there is no other way how to workaround it.
|
||||
|
||||

|
||||
|
||||
Now OpenPype Tools menu is in your TVPaint work area.
|
||||
Now OpenPype Tools menu is in your TVPaint work area.
|
||||
|
||||

|
||||
|
||||
You can start your work.
|
||||
You can start your work.
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
In TVPaint you can find the Tools in OpenPype menu extension. The OpenPype Tools menu should be available in your work area. However, sometimes it happens that the Tools menu is hidden. You can display the extension panel by going to `Windows -> Plugins -> OpenPype`.
|
||||
|
||||
|
||||
## Create
|
||||
In TVPaint you can create and publish **[Reviews](#review)**, **[Workfile](#workfile)**, **[Render Passes](#render-pass)** and **[Render Layers](#render-layer)**.
|
||||
|
||||
You have the possibility to organize your layers by using `Color group`.
|
||||
|
||||
On the bottom left corner of your timeline, you will note a `Color group` button.
|
||||
|
||||

|
||||
|
||||
It allows you to choose a group by checking one of the colors of the color list.
|
||||
|
||||

|
||||
|
||||
The timeline's animation layer can be marked by the color you pick from your Color group. Layers in the timeline with the same color are gathered into a group represents one render layer.
|
||||
|
||||

|
||||
## Create & Publish
|
||||
To be able to publish, you have to mark what should be published. The marking part is called **Create**. In TVPaint you can create and publish **[Reviews](#review)**, **[Workfile](#workfile)**, **[Render Layers](#render-layer)** and **[Render Passes](#render-pass)**.
|
||||
|
||||
:::important
|
||||
OpenPype specifically never tries to guess what you want to publish from the scene. Therefore, you have to tell OpenPype what you want to publish. There are three ways how to publish render from the scene.
|
||||
TVPaint integration tries to not guess what you want to publish from the scene. Therefore, you should tell what you want to publish.
|
||||
:::
|
||||
|
||||
When you want to publish `review` or `render layer` or `render pass`, open the `Creator` through the Tools menu `Create` button.
|
||||

|
||||
|
||||
### Review
|
||||
`Review` renders the whole file as is and sends the resulting QuickTime to Ftrack.
|
||||
- Is automatically created during publishing.
|
||||
`Review` will render all visible layers and create a reviewable output.
|
||||
- Is automatically created without any manual work.
|
||||
- You can disable the created instance if you want to skip review.
|
||||
|
||||
### Workfile
|
||||
`Workfile` stores the source workfile as is during publishing (e.g. for backup).
|
||||
- Is automatically created during publishing.
|
||||
`Workfile` integrate the source TVPaint file during publishing. Publishing of workfile is useful for backups.
|
||||
- Is automatically created without any manual work.
|
||||
- You can disable the created instance if you want to skip review.
|
||||
|
||||
### Render Layer
|
||||
|
||||
<div class="row markdown">
|
||||
<div class="col col--6 markdown">
|
||||
|
||||
Render Layer bakes all the animation layers of one particular color group together.
|
||||
|
||||
Render Layer bakes all the animation layers of one particular color group together.
|
||||
- In the **Create** tab, pick `Render Layer`
|
||||
- Fill `variant`, type in the name that the final published RenderLayer should have according to the naming convention in your studio. *(L10, BG, Hero, etc.)*
|
||||
- Color group will be renamed to the **variant** value
|
||||
- Choose color group from combobox
|
||||
- or select a layer of a particular color and set combobox to **<Use selection>**
|
||||
- Hit `Create` button
|
||||
|
||||
- Choose any amount of animation layers that need to be rendered together and assign them a color group.
|
||||
- Select any layer of a particular color
|
||||
- Go to `Creator` and choose `RenderLayer`.
|
||||
- In the `Subset`, type in the name that the final published RenderLayer should have according to the naming convention in your studio. *(L10, BG, Hero, etc.)*
|
||||
- Press `Create`
|
||||
- When you run [publish](#publish), the whole color group will be rendered together and published as a single `RenderLayer`
|
||||
After creating a RenderLayer, choose any amount of animation layers that need to be rendered together and assign them the color group.
|
||||
|
||||
You can change `variant` later in **Publish** tab.
|
||||
|
||||
</div>
|
||||
<div class="col col--6 markdown">
|
||||
|
|
@ -97,27 +85,45 @@ Render Layer bakes all the animation layers of one particular color group togeth
|
|||
|
||||
</div>
|
||||
</div>
|
||||
<br/>
|
||||
|
||||
**How to mark TVPaint layer to a group**
|
||||
|
||||
In the bottom left corner of your timeline, you will note a **Color group** button.
|
||||
|
||||

|
||||
|
||||
It allows you to choose a group by checking one of the colors of the color list.
|
||||
|
||||

|
||||
|
||||
The timeline's animation layer can be marked by the color you pick from your Color group. Layers in the timeline with the same color are gathered into a group represents one render layer.
|
||||
|
||||

|
||||
|
||||
|
||||
### Render Pass
|
||||
|
||||
Render Passes are smaller individual elements of a Render Layer. A `character` render layer might
|
||||
Render Passes are smaller individual elements of a [Render Layer](artist_hosts_tvpaint.md#render-layer). A `character` render layer might
|
||||
consist of multiple render passes such as `Line`, `Color` and `Shadow`.
|
||||
|
||||
Render Passes are specific because they have to belong to a particular Render Layer. You have to select to which Render Layer the pass belongs. Try to refresh if you don't see a specific Render Layer in the options.
|
||||
|
||||
<div class="row markdown">
|
||||
<div class="col col--6 markdown">
|
||||
Render Passes are specific because they have to belong to a particular layer. If you try to create a render pass and did not create any render layers before, an error message will pop up.
|
||||
|
||||
When you want to create `RenderPass`
|
||||
- choose one or several animation layers within one color group that you want to publish
|
||||
- In the Creator, pick `RenderPass`
|
||||
- Fill the `Subset` with the name of your pass, e.g. `Color`.
|
||||
When you want to create Render Pass
|
||||
- choose one or several TVPaint layers.
|
||||
- in the **Create** tab, pick `Render Pass`.
|
||||
- fill the `variant` with desired name of pass, e.g. `Color`.
|
||||
- select the Render Layer you want the Render Pass to belong to from the combobox.
|
||||
- if you don't see new Render Layer try refresh first.
|
||||
- Press `Create`
|
||||
|
||||
After creating a Render Pass, selected the TVPaint layers that should be marked with color group of Render Layer.
|
||||
|
||||
You can change `variant` or Render Layer later in **Publish** tab.
|
||||
|
||||
</div>
|
||||
<div class="col col--6 markdown">
|
||||
|
||||
|
|
@ -126,52 +132,26 @@ When you want to create `RenderPass`
|
|||
</div>
|
||||
</div>
|
||||
|
||||
:::warning
|
||||
You cannot change TVPaint layer name once you mark it as part of Render Pass. You would have to remove created Render Pass and create it again with new TVPaint layer name.
|
||||
:::
|
||||
|
||||
<br></br>
|
||||
|
||||
In this example, OpenPype will render selected animation layers within the given color group. E.i. the layers *L020_colour_fx*, *L020_colour_mouth*, and *L020_colour_eye* will be rendered as one pass belonging to the yellow RenderLayer.
|
||||
In this example, OpenPype will render selected animation layers within the given color group. E.i. the layers *L020_colour_fx*, *L020_colour_mouth*, and *L020_colour_eye* will be rendered as one pass belonging to the yellow RenderLayer.
|
||||
|
||||

|
||||
|
||||
|
||||
:::note
|
||||
You can check your RendrePasses and RenderLayers in [Subset Manager](#subset-manager) or you can start publishing. The publisher will show you a collection of all instances on the left side.
|
||||
:::
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Publish
|
||||
|
||||
<div class="row markdown">
|
||||
<div class="col col--6 markdown">
|
||||
|
||||
Now that you have created the required instances, you can publish them via `Publish` tool.
|
||||
- Click on `Publish` in OpenPype Tools menu.
|
||||
- wait until all instances are collected.
|
||||
- You can check on the left side whether all your instances have been created and are ready for publishing.
|
||||
Now that you have created the required instances, you can publish them.
|
||||
- Fill the comment on the bottom of the window.
|
||||
- Press the `Play` button to publish
|
||||
|
||||
</div>
|
||||
<div class="col col--6 markdown">
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Once the `Publisher` turns gets green your renders have been published.
|
||||
- Double check enabled instance and their context.
|
||||
- Press `Publish`.
|
||||
- Wait to finish.
|
||||
- Once the `Publisher` turns turns green your renders have been published.
|
||||
|
||||
---
|
||||
|
||||
## Subset Manager
|
||||
All created instances (render layers, passes, and reviews) will be shown as a simple list. If you don't want to publish some, right click on the item in the list and select `Remove instance`.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Load
|
||||
## Load
|
||||
When you want to load existing published work you can reach the `Loader` through the OpenPype Tools `Load` button.
|
||||
|
||||
The supported families for TVPaint are:
|
||||
|
|
@ -192,4 +172,4 @@ Scene Inventory shows you everything that you have loaded into your scene using
|
|||
|
||||

|
||||
|
||||
You can switch to a previous version of the file or update it to the latest or delete items.
|
||||
You can switch to a previous version of the file or update it to the latest or delete items.
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 170 KiB |
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 130 KiB |
|
Before Width: | Height: | Size: 3.5 KiB After Width: | Height: | Size: 6.5 KiB |
BIN
website/docs/assets/tvp_publisher.png
Normal file
|
After Width: | Height: | Size: 196 KiB |