Merge branch 'bugfix/OP-4993_Anchored-publishing-of-sources-with-padding-hashes-duplication' into bugfix/OP-4914_Anchored-publishing-issues

This commit is contained in:
Jakub Jezek 2023-02-24 16:45:34 +01:00
commit 3f89a0d3e5
No known key found for this signature in database
GPG key ID: 730D7C02726179A7
132 changed files with 5433 additions and 3030 deletions

View file

@ -0,0 +1,47 @@
name: Milestone Release [trigger]
on:
workflow_dispatch:
inputs:
milestone:
required: true
release-type:
type: choice
description: What release should be created
options:
- release
- pre-release
milestone:
types: closed
jobs:
milestone-title:
runs-on: ubuntu-latest
outputs:
milestone: ${{ steps.milestoneTitle.outputs.value }}
steps:
- name: Switch input milestone
uses: haya14busa/action-cond@v1
id: milestoneTitle
with:
cond: ${{ inputs.milestone == '' }}
if_true: ${{ github.event.milestone.title }}
if_false: ${{ inputs.milestone }}
- name: Print resulted milestone
run: |
echo "${{ steps.milestoneTitle.outputs.value }}"
call-ci-tools-milestone-release:
needs: milestone-title
uses: ynput/ci-tools/.github/workflows/milestone_release_ref.yml@main
with:
milestone: ${{ needs.milestone-title.outputs.milestone }}
repo-owner: ${{ github.event.repository.owner.login }}
repo-name: ${{ github.event.repository.name }}
version-py-path: "./openpype/version.py"
pyproject-path: "./pyproject.toml"
secrets:
token: ${{ secrets.YNPUT_BOT_TOKEN }}
user_email: ${{ secrets.CI_EMAIL }}
user_name: ${{ secrets.CI_USER }}

View file

@ -1,76 +0,0 @@
name: Stable Release
on:
release:
types:
- prereleased
jobs:
create_release:
runs-on: ubuntu-latest
if: github.actor != 'pypebot'
steps:
- name: 🚛 Checkout Code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install Python requirements
run: pip install gitpython semver PyGithub
- name: 💉 Inject new version into files
id: version
run: |
NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/})
LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release)
echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT
echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT
- name: 💾 Commit and Tag
id: git_commit
if: steps.version.outputs.release_tag != 'skip'
run: |
git config user.email ${{ secrets.CI_EMAIL }}
git config user.name ${{ secrets.CI_USER }}
git add .
git commit -m "[Automated] Release"
tag_name="${{ steps.version.outputs.release_tag }}"
git tag -a $tag_name -m "stable release"
- name: 🔏 Push to protected main branch
if: steps.version.outputs.release_tag != 'skip'
uses: CasperWA/push-protected@v2.10.0
with:
token: ${{ secrets.YNPUT_BOT_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
- name: 🚀 Github Release
if: steps.version.outputs.release_tag != 'skip'
uses: ncipollo/release-action@v1
with:
tag: ${{ steps.version.outputs.release_tag }}
token: ${{ secrets.YNPUT_BOT_TOKEN }}
- name: ☠ Delete Pre-release
if: steps.version.outputs.release_tag != 'skip'
uses: cb80/delrel@latest
with:
tag: "${{ steps.version.outputs.current_version }}"
- name: 🔁 Merge main back to develop
if: steps.version.outputs.release_tag != 'skip'
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,6 @@ OpenPype
[![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2022-lightgrey?labelColor=303846)
this
Introduction
------------

View file

@ -6,8 +6,7 @@ from openpype.hosts.aftereffects import api
from openpype.pipeline import (
Creator,
CreatedInstance,
CreatorError,
legacy_io,
CreatorError
)
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
from openpype.lib import prepare_template_data
@ -127,7 +126,7 @@ class RenderCreator(Creator):
subset_change = _changes.get("subset")
if subset_change:
api.get_stub().rename_item(created_inst.data["members"][0],
subset_change[1])
subset_change.new_value)
def remove_instances(self, instances):
for instance in instances:
@ -195,7 +194,7 @@ class RenderCreator(Creator):
instance_data.pop("uuid")
if not instance_data.get("task"):
instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
instance_data["task"] = self.create_context.get_current_task_name()
if not instance_data.get("creator_attributes"):
is_old_farm = instance_data["family"] != "renderLocal"

View file

@ -2,8 +2,7 @@ import openpype.hosts.aftereffects.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
CreatedInstance,
legacy_io,
CreatedInstance
)
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
@ -38,10 +37,11 @@ class AEWorkfileCreator(AutoCreator):
existing_instance = instance
break
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
host_name = legacy_io.Session["AVALON_APP"]
context = self.create_context
project_name = context.get_current_project_name()
asset_name = context.get_current_asset_name()
task_name = context.get_current_task_name()
host_name = context.host_name
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)

View file

@ -143,6 +143,9 @@ class ExtractSubsetResources(publish.Extractor):
# create staging dir path
staging_dir = self.staging_dir(instance)
# append staging dir for later cleanup
instance.context.data["cleanupFullPaths"].append(staging_dir)
# add default preset type for thumbnail and reviewable video
# update them with settings and override in case the same
# are found in there
@ -548,30 +551,3 @@ class ExtractSubsetResources(publish.Extractor):
"Path `{}` is containing more that one clip".format(path)
)
return clips[0]
def staging_dir(self, instance):
"""Provide a temporary directory in which to store extracted files
Upon calling this method the staging directory is stored inside
the instance.data['stagingDir']
"""
staging_dir = instance.data.get('stagingDir', None)
openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR")
if not staging_dir:
if openpype_temp_dir and os.path.exists(openpype_temp_dir):
staging_dir = os.path.normpath(
tempfile.mkdtemp(
prefix="pyblish_tmp_",
dir=openpype_temp_dir
)
)
else:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data['stagingDir'] = staging_dir
instance.context.data["cleanupFullPaths"].append(staging_dir)
return staging_dir

View file

@ -210,7 +210,8 @@ def switch_item(container,
if any(not x for x in [asset_name, subset_name, representation_name]):
repre_id = container["representation"]
representation = get_representation_by_id(project_name, repre_id)
repre_parent_docs = get_representation_parents(representation)
repre_parent_docs = get_representation_parents(
project_name, representation)
if repre_parent_docs:
version, subset, asset, _ = repre_parent_docs
else:

View file

@ -36,7 +36,7 @@ class FusionPrelaunch(PreLaunchHook):
"Make sure the environment in fusion settings has "
"'FUSION_PYTHON3_HOME' set correctly and make sure "
"Python 3 is installed in the given path."
f"\n\nPYTHON36: {fusion_python3_home}"
f"\n\nPYTHON PATH: {fusion_python3_home}"
)
self.log.info(f"Setting {py3_var}: '{py3_dir}'...")

View file

@ -80,6 +80,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"outputDir": os.path.dirname(path),
"ext": ext, # todo: should be redundant
"label": label,
"task": context.data["task"],
"frameStart": context.data["frameStart"],
"frameEnd": context.data["frameEnd"],
"frameStartHandle": context.data["frameStartHandle"],

View file

@ -1,6 +1,4 @@
import os
from pprint import pformat
import pyblish.api
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
@ -23,23 +21,53 @@ class Fusionlocal(pyblish.api.InstancePlugin):
# This plug-in runs only once and thus assumes all instances
# currently will render the same frame range
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
key = f"__hasRun{self.__class__.__name__}"
if context.data.get(key, False):
return
else:
context.data[key] = True
current_comp = context.data["currentComp"]
context.data[key] = True
self.render_once(context)
frame_start = context.data["frameStartHandle"]
frame_end = context.data["frameEndHandle"]
path = instance.data["path"]
output_dir = instance.data["outputDir"]
ext = os.path.splitext(os.path.basename(path))[-1]
basename = os.path.basename(path)
head, ext = os.path.splitext(basename)
files = [
f"{head}{str(frame).zfill(4)}{ext}"
for frame in range(frame_start, frame_end + 1)
]
repre = {
'name': ext[1:],
'ext': ext[1:],
'frameStart': f"%0{len(str(frame_end))}d" % frame_start,
'files': files,
"stagingDir": output_dir,
}
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(repre)
# review representation
repre_preview = repre.copy()
repre_preview["name"] = repre_preview["ext"] = "mp4"
repre_preview["tags"] = ["review", "ftrackreview", "delete"]
instance.data["representations"].append(repre_preview)
def render_once(self, context):
"""Render context comp only once, even with more render instances"""
current_comp = context.data["currentComp"]
frame_start = context.data["frameStartHandle"]
frame_end = context.data["frameEndHandle"]
self.log.info("Starting render")
self.log.info("Start frame: {}".format(frame_start))
self.log.info("End frame: {}".format(frame_end))
self.log.info(f"Start frame: {frame_start}")
self.log.info(f"End frame: {frame_end}")
with comp_lock_and_undo_chunk(current_comp):
result = current_comp.Render({
@ -48,26 +76,5 @@ class Fusionlocal(pyblish.api.InstancePlugin):
"Wait": True
})
if "representations" not in instance.data:
instance.data["representations"] = []
collected_frames = os.listdir(output_dir)
repre = {
'name': ext[1:],
'ext': ext[1:],
'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start,
'files': collected_frames,
"stagingDir": output_dir,
}
instance.data["representations"].append(repre)
# review representation
repre_preview = repre.copy()
repre_preview["name"] = repre_preview["ext"] = "mp4"
repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"]
instance.data["representations"].append(repre_preview)
self.log.debug(f"_ instance.data: {pformat(instance.data)}")
if not result:
raise RuntimeError("Comp render failed")

View file

@ -126,10 +126,6 @@ def check_inventory():
def application_launch(event):
"""Event that is executed after Harmony is launched."""
# FIXME: This is breaking server <-> client communication.
# It is now moved so it it manually called.
# ensure_scene_settings()
# check_inventory()
# fills OPENPYPE_HARMONY_JS
pype_harmony_path = Path(__file__).parent.parent / "js" / "PypeHarmony.js"
pype_harmony_js = pype_harmony_path.read_text()
@ -146,6 +142,9 @@ def application_launch(event):
harmony.send({"script": script})
inject_avalon_js()
ensure_scene_settings()
check_inventory()
def export_template(backdrops, nodes, filepath):
"""Export Template to file.

View file

@ -120,3 +120,51 @@ def get_all_children(parent, node_type=None):
return ([x for x in child_list if rt.superClassOf(x) == node_type]
if node_type else child_list)
def get_current_renderer():
"""get current renderer"""
return rt.renderers.production
def get_default_render_folder(project_setting=None):
return (project_setting["max"]
["RenderSettings"]
["default_render_image_folder"])
def set_framerange(start_frame, end_frame):
"""
Note:
Frame range can be specified in different types. Possible values are:
* `1` - Single frame.
* `2` - Active time segment ( animationRange ).
* `3` - User specified Range.
* `4` - User specified Frame pickup string (for example `1,3,5-12`).
Todo:
Current type is hard-coded, there should be a custom setting for this.
"""
rt.rendTimeType = 4
if start_frame is not None and end_frame is not None:
frame_range = "{0}-{1}".format(start_frame, end_frame)
rt.rendPickupFrames = frame_range
def get_multipass_setting(project_setting=None):
return (project_setting["max"]
["RenderSettings"]
["multipass"])
def get_max_version():
"""
Args:
get max version date for deadline
Returns:
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
max_info[7] = max version date
"""
max_info = rt.maxversion()
return max_info[7]

View file

@ -0,0 +1,114 @@
# Render Element Example : For scanline render, VRay
# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC
# arnold
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
import os
from pymxs import runtime as rt
from openpype.hosts.max.api.lib import (
get_current_renderer,
get_default_render_folder
)
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
class RenderProducts(object):
def __init__(self, project_settings=None):
self._project_settings = project_settings
if not self._project_settings:
self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
def render_product(self, container):
folder = rt.maxFilePath
file = rt.maxFileName
folder = folder.replace("\\", "/")
setting = self._project_settings
render_folder = get_default_render_folder(setting)
filename, ext = os.path.splitext(file)
output_file = os.path.join(folder,
render_folder,
filename,
container)
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
full_render_list = []
beauty = self.beauty_render_product(output_file, img_fmt)
full_render_list.append(beauty)
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
if renderer == "VUE_File_Renderer":
return full_render_list
if renderer in [
"ART_Renderer",
"Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
render_elem_list = self.render_elements_product(output_file,
img_fmt)
if render_elem_list:
full_render_list.extend(iter(render_elem_list))
return full_render_list
if renderer == "Arnold":
aov_list = self.arnold_render_product(output_file,
img_fmt)
if aov_list:
full_render_list.extend(iter(aov_list))
return full_render_list
def beauty_render_product(self, folder, fmt):
beauty_output = f"{folder}.####.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
return beauty_output
# TODO: Get the arnold render product
def arnold_render_product(self, folder, fmt):
"""Get all the Arnold AOVs"""
aovs = []
amw = rt.MaxtoAOps.AOVsManagerWindow()
aov_mgr = rt.renderers.current.AOVManager
# Check if there is any aov group set in AOV manager
aov_group_num = len(aov_mgr.drivers)
if aov_group_num < 1:
return
for i in range(aov_group_num):
# get the specific AOV group
for aov in aov_mgr.drivers[i].aov_list:
render_element = f"{folder}_{aov.name}.####.{fmt}"
render_element = render_element.replace("\\", "/")
aovs.append(render_element)
# close the AOVs manager window
amw.close()
return aovs
def render_elements_product(self, folder, fmt):
"""Get all the render element output files. """
render_dirname = []
render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
# get render elements from the renders
for i in range(render_elem_num):
renderlayer_name = render_elem.GetRenderElement(i)
target, renderpass = str(renderlayer_name).split(":")
if renderlayer_name.enabled:
render_element = f"{folder}_{renderpass}.####.{fmt}"
render_element = render_element.replace("\\", "/")
render_dirname.append(render_element)
return render_dirname
def image_format(self):
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa

View file

@ -0,0 +1,168 @@
import os
from pymxs import runtime as rt
from openpype.lib import Logger
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.hosts.max.api.lib import (
set_framerange,
get_current_renderer,
get_default_render_folder
)
class RenderSettings(object):
log = Logger.get_logger("RenderSettings")
_aov_chars = {
"dot": ".",
"dash": "-",
"underscore": "_"
}
def __init__(self, project_settings=None):
"""
Set up the naming convention for the render
elements for the deadline submission
"""
self._project_settings = project_settings
if not self._project_settings:
self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
def set_render_camera(self, selection):
for sel in selection:
# to avoid Attribute Error from pymxs wrapper
found = False
if rt.classOf(sel) in rt.Camera.classes:
found = True
rt.viewport.setCamera(sel)
break
if not found:
raise RuntimeError("Camera not found")
def render_output(self, container):
folder = rt.maxFilePath
# hard-coded, should be customized in the setting
file = rt.maxFileName
folder = folder.replace("\\", "/")
# hard-coded, set the renderoutput path
setting = self._project_settings
render_folder = get_default_render_folder(setting)
filename, ext = os.path.splitext(file)
output_dir = os.path.join(folder,
render_folder,
filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# hard-coded, should be customized in the setting
context = get_current_project_asset()
# get project resolution
width = context["data"].get("resolutionWidth")
height = context["data"].get("resolutionHeight")
# Set Frame Range
frame_start = context["data"].get("frame_start")
frame_end = context["data"].get("frame_end")
set_framerange(frame_start, frame_end)
# get the production render
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
output = os.path.join(output_dir, container)
try:
aov_separator = self._aov_chars[(
self._project_settings["maya"]
["RenderSettings"]
["aov_separator"]
)]
except KeyError:
aov_separator = "."
output_filename = "{0}..{1}".format(output, img_fmt)
output_filename = output_filename.replace("{aov_separator}",
aov_separator)
rt.rendOutputFilename = output_filename
if renderer == "VUE_File_Renderer":
return
# TODO: Finish the arnold render setup
if renderer == "Arnold":
self.arnold_setup()
if renderer in [
"ART_Renderer",
"Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
self.render_element_layer(output, width, height, img_fmt)
rt.rendSaveFile = True
def arnold_setup(self):
# get Arnold RenderView run in the background
# for setting up renderable camera
arv = rt.MAXToAOps.ArnoldRenderView()
render_camera = rt.viewport.GetCamera()
arv.setOption("Camera", str(render_camera))
# TODO: add AOVs and extension
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
setup_cmd = (
f"""
amw = MaxtoAOps.AOVsManagerWindow()
amw.close()
aovmgr = renderers.current.AOVManager
aovmgr.drivers = #()
img_fmt = "{img_fmt}"
if img_fmt == "png" then driver = ArnoldPNGDriver()
if img_fmt == "jpg" then driver = ArnoldJPEGDriver()
if img_fmt == "exr" then driver = ArnoldEXRDriver()
if img_fmt == "tif" then driver = ArnoldTIFFDriver()
if img_fmt == "tiff" then driver = ArnoldTIFFDriver()
append aovmgr.drivers driver
aovmgr.drivers[1].aov_list = #()
""")
rt.execute(setup_cmd)
arv.close()
def render_element_layer(self, dir, width, height, ext):
"""For Renderers with render elements"""
rt.renderWidth = width
rt.renderHeight = height
render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
if render_elem_num < 0:
return
for i in range(render_elem_num):
renderlayer_name = render_elem.GetRenderElement(i)
target, renderpass = str(renderlayer_name).split(":")
aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext)
render_elem.SetRenderElementFileName(i, aov_name)
def get_render_output(self, container, output_dir):
output = os.path.join(output_dir, container)
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
output_filename = "{0}..{1}".format(output, img_fmt)
return output_filename
def get_render_element(self):
orig_render_elem = []
render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
if render_elem_num < 0:
return
for i in range(render_elem_num):
render_element = render_elem.GetRenderElementFilename(i)
orig_render_elem.append(render_element)
return orig_render_elem

View file

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating camera."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
class CreateRender(plugin.MaxCreator):
identifier = "io.openpype.creators.max.render"
label = "Render"
family = "maxrender"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreateRender, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container_name = instance.data.get("instance_node")
container = rt.getNodeByName(container_name)
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))
# set viewport camera for rendering(mandatory for deadline)
RenderSettings().set_render_camera(sel_obj)
# set output paths for rendering(mandatory for deadline)
RenderSettings().render_output(container_name)

View file

@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
"""Collect Render"""
import os
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import get_current_asset_name
from openpype.hosts.max.api.lib import get_max_version
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
from openpype.client import get_last_version_by_subset_name
class CollectRender(pyblish.api.InstancePlugin):
"""Collect Render for Deadline"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect 3dsmax Render Layers"
hosts = ['max']
families = ["maxrender"]
def process(self, instance):
context = instance.context
folder = rt.maxFilePath
file = rt.maxFileName
current_file = os.path.join(folder, file)
filepath = current_file.replace("\\", "/")
context.data['currentFile'] = current_file
asset = get_current_asset_name()
render_layer_files = RenderProducts().render_product(instance.name)
folder = folder.replace("\\", "/")
img_format = RenderProducts().image_format()
project_name = context.data["projectName"]
asset_doc = context.data["assetEntity"]
asset_id = asset_doc["_id"]
version_doc = get_last_version_by_subset_name(project_name,
instance.name,
asset_id)
self.log.debug("version_doc: {0}".format(version_doc))
version_int = 1
if version_doc:
version_int += int(version_doc["name"])
self.log.debug(f"Setting {version_int} to context.")
context.data["version"] = version_int
# setup the plugin as 3dsmax for the internal renderer
data = {
"subset": instance.name,
"asset": asset,
"publish": True,
"maxversion": str(get_max_version()),
"imageFormat": img_format,
"family": 'maxrender',
"families": ['maxrender'],
"source": filepath,
"expectedFiles": render_layer_files,
"plugin": "3dsmax",
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],
"version": version_int
}
self.log.info("data: {0}".format(data))
instance.data.update(data)

View file

@ -57,68 +57,6 @@ def edit_shader_definitions():
window.show()
def reset_frame_range():
"""Set frame range to current asset"""
# Set FPS first
fps = {15: 'game',
24: 'film',
25: 'pal',
30: 'ntsc',
48: 'show',
50: 'palf',
60: 'ntscf',
23.98: '23.976fps',
23.976: '23.976fps',
29.97: '29.97fps',
47.952: '47.952fps',
47.95: '47.952fps',
59.94: '59.94fps',
44100: '44100fps',
48000: '48000fps'
}.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal")
cmds.currentUnit(time=fps)
# Set frame start/end
project_name = legacy_io.active_project()
asset_name = legacy_io.Session["AVALON_ASSET"]
asset = get_asset_by_name(project_name, asset_name)
frame_start = asset["data"].get("frameStart")
frame_end = asset["data"].get("frameEnd")
# Backwards compatibility
if frame_start is None or frame_end is None:
frame_start = asset["data"].get("edit_in")
frame_end = asset["data"].get("edit_out")
if frame_start is None or frame_end is None:
cmds.warning("No edit information found for %s" % asset_name)
return
handles = asset["data"].get("handles") or 0
handle_start = asset["data"].get("handleStart")
if handle_start is None:
handle_start = handles
handle_end = asset["data"].get("handleEnd")
if handle_end is None:
handle_end = handles
frame_start -= int(handle_start)
frame_end += int(handle_end)
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.playbackOptions(animationStartTime=frame_start)
cmds.playbackOptions(animationEndTime=frame_end)
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.currentTime(frame_start)
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
def _resolution_from_document(doc):
if not doc or "data" not in doc:
print("Entered document is not valid. \"{}\"".format(str(doc)))

View file

@ -34,7 +34,6 @@ from openpype.pipeline import (
registered_host,
)
from openpype.pipeline.context_tools import get_current_project_asset
from .commands import reset_frame_range
self = sys.modules[__name__]
@ -1970,8 +1969,6 @@ def get_id_from_sibling(node, history_only=True):
return first_id
# Project settings
def set_scene_fps(fps, update=True):
"""Set FPS from project configuration
@ -1984,30 +1981,23 @@ def set_scene_fps(fps, update=True):
"""
fps_mapping = {'15': 'game',
'24': 'film',
'25': 'pal',
'30': 'ntsc',
'48': 'show',
'50': 'palf',
'60': 'ntscf',
'23.98': '23.976fps',
'23.976': '23.976fps',
'29.97': '29.97fps',
'47.952': '47.952fps',
'47.95': '47.952fps',
'59.94': '59.94fps',
'44100': '44100fps',
'48000': '48000fps'}
fps_mapping = {
'15': 'game',
'24': 'film',
'25': 'pal',
'30': 'ntsc',
'48': 'show',
'50': 'palf',
'60': 'ntscf',
'23.976023976023978': '23.976fps',
'29.97002997002997': '29.97fps',
'47.952047952047955': '47.952fps',
'59.94005994005994': '59.94fps',
'44100': '44100fps',
'48000': '48000fps'
}
# pull from mapping
# this should convert float string to float and int to int
# so 25.0 is converted to 25, but 23.98 will be still float.
dec, ipart = math.modf(fps)
if dec == 0.0:
fps = int(ipart)
unit = fps_mapping.get(str(fps), None)
unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None)
if unit is None:
raise ValueError("Unsupported FPS value: `%s`" % fps)
@ -2074,6 +2064,54 @@ def set_scene_resolution(width, height, pixelAspect):
cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect)
def reset_frame_range():
"""Set frame range to current asset"""
fps = convert_to_maya_fps(
float(legacy_io.Session.get("AVALON_FPS", 25))
)
set_scene_fps(fps)
# Set frame start/end
project_name = legacy_io.active_project()
asset_name = legacy_io.Session["AVALON_ASSET"]
asset = get_asset_by_name(project_name, asset_name)
frame_start = asset["data"].get("frameStart")
frame_end = asset["data"].get("frameEnd")
# Backwards compatibility
if frame_start is None or frame_end is None:
frame_start = asset["data"].get("edit_in")
frame_end = asset["data"].get("edit_out")
if frame_start is None or frame_end is None:
cmds.warning("No edit information found for %s" % asset_name)
return
handles = asset["data"].get("handles") or 0
handle_start = asset["data"].get("handleStart")
if handle_start is None:
handle_start = handles
handle_end = asset["data"].get("handleEnd")
if handle_end is None:
handle_end = handles
frame_start -= int(handle_start)
frame_end += int(handle_end)
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.playbackOptions(animationStartTime=frame_start)
cmds.playbackOptions(animationEndTime=frame_end)
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.currentTime(frame_start)
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
def reset_scene_resolution():
"""Apply the scene resolution from the project definition
@ -2125,7 +2163,9 @@ def set_context_settings():
asset_data = asset_doc.get("data", {})
# Set project fps
fps = asset_data.get("fps", project_data.get("fps", 25))
fps = convert_to_maya_fps(
asset_data.get("fps", project_data.get("fps", 25))
)
legacy_io.Session["AVALON_FPS"] = str(fps)
set_scene_fps(fps)
@ -2147,15 +2187,12 @@ def validate_fps():
"""
fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"]
# TODO(antirotor): This is hack as for framerates having multiple
# decimal places. FTrack is ceiling decimal values on
# fps to two decimal places but Maya 2019+ is reporting those fps
# with much higher resolution. As we currently cannot fix Ftrack
# rounding, we have to round those numbers coming from Maya.
current_fps = float_round(mel.eval('currentTimeUnitToFPS()'), 2)
expected_fps = convert_to_maya_fps(
get_current_project_asset(fields=["data.fps"])["data"]["fps"]
)
current_fps = mel.eval('currentTimeUnitToFPS()')
fps_match = current_fps == fps
fps_match = current_fps == expected_fps
if not fps_match and not IS_HEADLESS:
from openpype.widgets import popup
@ -2164,14 +2201,19 @@ def validate_fps():
dialog = popup.PopupUpdateKeys(parent=parent)
dialog.setModal(True)
dialog.setWindowTitle("Maya scene does not match project FPS")
dialog.setMessage("Scene %i FPS does not match project %i FPS" %
(current_fps, fps))
dialog.setMessage(
"Scene {} FPS does not match project {} FPS".format(
current_fps, expected_fps
)
)
dialog.setButtonText("Fix")
# Set new text for button (add optional argument for the popup?)
toggle = dialog.widgets["toggle"]
update = toggle.isChecked()
dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update))
dialog.on_clicked_state.connect(
lambda: set_scene_fps(expected_fps, update)
)
dialog.show()
@ -3356,6 +3398,88 @@ def get_attribute_input(attr):
return connections[0] if connections else None
def convert_to_maya_fps(fps):
"""Convert any fps to supported Maya framerates."""
float_framerates = [
23.976023976023978,
# WTF is 29.97 df vs fps?
29.97002997002997,
47.952047952047955,
59.94005994005994
]
# 44100 fps evaluates as 41000.0. Why? Omitting for now.
int_framerates = [
2,
3,
4,
5,
6,
8,
10,
12,
15,
16,
20,
24,
25,
30,
40,
48,
50,
60,
75,
80,
90,
100,
120,
125,
150,
200,
240,
250,
300,
375,
400,
500,
600,
750,
1200,
1500,
2000,
3000,
6000,
48000
]
# If input fps is a whole number we'll return.
if float(fps).is_integer():
# Validate fps is part of Maya's fps selection.
if int(fps) not in int_framerates:
raise ValueError(
"Framerate \"{}\" is not supported in Maya".format(fps)
)
return int(fps)
else:
# Differences to supported float frame rates.
differences = []
for i in float_framerates:
differences.append(abs(i - fps))
# Validate difference does not stray too far from supported framerates.
min_difference = min(differences)
min_index = differences.index(min_difference)
supported_framerate = float_framerates[min_index]
if min_difference > 0.1:
raise ValueError(
"Framerate \"{}\" strays too far from any supported framerate"
" in Maya. Closest supported framerate is \"{}\"".format(
fps, supported_framerate
)
)
return supported_framerate
def write_xgen_file(data, filepath):
"""Overwrites data in .xgen files.

View file

@ -797,6 +797,11 @@ class RenderProductsVray(ARenderProducts):
if default_ext in {"exr (multichannel)", "exr (deep)"}:
default_ext = "exr"
# Define multipart.
multipart = False
if image_format_str == "exr (multichannel)":
multipart = True
products = []
# add beauty as default when not disabled
@ -804,23 +809,28 @@ class RenderProductsVray(ARenderProducts):
if not dont_save_rgb:
for camera in cameras:
products.append(
RenderProduct(productName="",
ext=default_ext,
camera=camera))
RenderProduct(
productName="",
ext=default_ext,
camera=camera,
multipart=multipart
)
)
# separate alpha file
separate_alpha = self._get_attr("vraySettings.separateAlpha")
if separate_alpha:
for camera in cameras:
products.append(
RenderProduct(productName="Alpha",
ext=default_ext,
camera=camera)
RenderProduct(
productName="Alpha",
ext=default_ext,
camera=camera,
multipart=multipart
)
)
if image_format_str == "exr (multichannel)":
if multipart:
# AOVs are merged in m-channel file, only main layer is rendered
self.multipart = True
return products
# handle aovs from references

View file

@ -14,7 +14,7 @@ from openpype.settings import (
from openpype.pipeline import legacy_io
from openpype.pipeline import CreatorError
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.hosts.maya.api.commands import reset_frame_range
from openpype.hosts.maya.api.lib import reset_frame_range
class RenderSettings(object):

View file

@ -12,7 +12,6 @@ from openpype.pipeline.workfile import BuildWorkfile
from openpype.tools.utils import host_tools
from openpype.hosts.maya.api import lib, lib_rendersettings
from .lib import get_main_window, IS_HEADLESS
from .commands import reset_frame_range
from .workfile_template_builder import (
create_placeholder,
@ -113,7 +112,7 @@ def install():
cmds.menuItem(
"Reset Frame Range",
command=lambda *args: reset_frame_range()
command=lambda *args: lib.reset_frame_range()
)
cmds.menuItem(

View file

@ -2,7 +2,7 @@ import json
from maya import cmds
from openpype.pipeline import registered_host
from openpype.pipeline import registered_host, get_current_asset_name
from openpype.pipeline.workfile.workfile_template_builder import (
TemplateAlreadyImported,
AbstractTemplateBuilder,
@ -41,10 +41,27 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
))
cmds.sets(name=PLACEHOLDER_SET, empty=True)
cmds.file(path, i=True, returnNewNodes=True)
new_nodes = cmds.file(path, i=True, returnNewNodes=True)
cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True)
imported_sets = cmds.ls(new_nodes, set=True)
if not imported_sets:
return True
# update imported sets information
asset_name = get_current_asset_name()
for node in imported_sets:
if not cmds.attributeQuery("id", node=node, exists=True):
continue
if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance":
continue
if not cmds.attributeQuery("asset", node=node, exists=True):
continue
cmds.setAttr(
"{}.asset".format(node), asset_name, type="string")
return True

View file

@ -6,7 +6,7 @@ from openpype.hosts.maya.api import (
from maya import cmds
class CreateAss(plugin.Creator):
class CreateArnoldSceneSource(plugin.Creator):
"""Arnold Scene Source"""
name = "ass"
@ -29,7 +29,7 @@ class CreateAss(plugin.Creator):
maskOperator = False
def __init__(self, *args, **kwargs):
super(CreateAss, self).__init__(*args, **kwargs)
super(CreateArnoldSceneSource, self).__init__(*args, **kwargs)
# Add animation data
self.data.update(lib.collect_animation_data())
@ -52,7 +52,7 @@ class CreateAss(plugin.Creator):
self.data["maskOperator"] = self.maskOperator
def process(self):
instance = super(CreateAss, self).process()
instance = super(CreateArnoldSceneSource, self).process()
nodes = []
@ -61,6 +61,6 @@ class CreateAss(plugin.Creator):
cmds.sets(nodes, rm=instance)
assContent = cmds.sets(name="content_SET")
assProxy = cmds.sets(name="proxy_SET", empty=True)
assContent = cmds.sets(name=instance + "_content_SET")
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets([assContent, assProxy], forceElement=instance)

View file

@ -1,3 +1,5 @@
from maya import cmds
from openpype.hosts.maya.api import (
lib,
plugin
@ -37,3 +39,9 @@ class CreatePointCache(plugin.Creator):
# Default to not send to farm.
self.data["farm"] = False
self.data["priority"] = 50
def process(self):
instance = super(CreatePointCache, self).process()
assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True)
cmds.sets(assProxy, forceElement=instance)

View file

@ -1,132 +0,0 @@
import os
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
)
from openpype.settings import get_project_settings
class AlembicStandinLoader(load.LoaderPlugin):
"""Load Alembic as Arnold Standin"""
families = ["animation", "model", "proxyAbc", "pointcache"]
representations = ["abc"]
label = "Import Alembic as Arnold Standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
version = context["version"]
version_data = version.get("data", {})
family = version["data"]["families"]
self.log.info("version_data: {}\n".format(version_data))
self.log.info("family: {}\n".format(family))
frameStart = version_data.get("frameStart", None)
asset = context["asset"]["name"]
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings["maya"]["load"]["colors"]
fps = legacy_io.Session["AVALON_FPS"]
c = colors.get(family[0])
if c is not None:
r = (float(c[0]) / 255)
g = (float(c[1]) / 255)
b = (float(c[2]) / 255)
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
r, g, b)
transform_name = label + "_ABC"
standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0]
standin = cmds.listRelatives(standinShape, parent=True,
typ="transform")
standin = cmds.rename(standin, transform_name)
standinShape = cmds.listRelatives(standin, children=True)[0]
cmds.parent(standin, root)
# Set the standin filepath
cmds.setAttr(standinShape + ".dso", self.fname, type="string")
cmds.setAttr(standinShape + ".abcFPS", float(fps))
if frameStart is None:
cmds.setAttr(standinShape + ".useFrameExtension", 0)
elif "model" in family:
cmds.setAttr(standinShape + ".useFrameExtension", 0)
else:
cmds.setAttr(standinShape + ".useFrameExtension", 1)
nodes = [root, standin]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
import pymel.core as pm
path = get_representation_path(representation)
fps = legacy_io.Session["AVALON_FPS"]
# Update the standin
standins = list()
members = pm.sets(container['objectName'], query=True)
self.log.info("container:{}".format(container))
for member in members:
shape = member.getShape()
if (shape and shape.type() == "aiStandIn"):
standins.append(shape)
for standin in standins:
standin.dso.set(path)
standin.abcFPS.set(float(fps))
if "modelMain" in container['objectName']:
standin.useFrameExtension.set(0)
else:
standin.useFrameExtension.set(1)
container = pm.PyNode(container["objectName"])
container.representation.set(str(representation["_id"]))
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -0,0 +1,218 @@
import os
import clique
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
)
from openpype.hosts.maya.api.lib import (
unique_namespace, get_attribute_input, maintained_selection
)
from openpype.hosts.maya.api.pipeline import containerise
def is_sequence(files):
sequence = False
collections, remainder = clique.assemble(files)
if collections:
sequence = True
return sequence
class ArnoldStandinLoader(load.LoaderPlugin):
"""Load as Arnold standin"""
families = ["ass", "animation", "model", "proxyAbc", "pointcache"]
representations = ["ass", "abc"]
label = "Load as Arnold standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
# Set color.
settings = get_project_settings(context["project"]["name"])
color = settings['maya']['load']['colors'].get('ass')
if color is not None:
cmds.setAttr(root + ".useOutlinerColor", True)
cmds.setAttr(
root + ".outlinerColor", color[0], color[1], color[2]
)
with maintained_selection():
# Create transform with shape
transform_name = label + "_standin"
standin_shape = mtoa.ui.arnoldmenu.createStandIn()
standin = cmds.listRelatives(standin_shape, parent=True)[0]
standin = cmds.rename(standin, transform_name)
standin_shape = cmds.listRelatives(standin, shapes=True)[0]
cmds.parent(standin, root)
# Set the standin filepath
path, operator = self._setup_proxy(
standin_shape, self.fname, namespace
)
cmds.setAttr(standin_shape + ".dso", path, type="string")
sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
nodes = [root, standin]
if operator is not None:
nodes.append(operator)
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def get_next_free_multi_index(self, attr_name):
"""Find the next unconnected multi index at the input attribute."""
for index in range(10000000):
connection_info = cmds.connectionInfo(
"{}[{}]".format(attr_name, index),
sourceFromDestination=True
)
if len(connection_info or []) == 0:
return index
def _get_proxy_path(self, path):
basename_split = os.path.basename(path).split(".")
proxy_basename = (
basename_split[0] + "_proxy." + ".".join(basename_split[1:])
)
proxy_path = "/".join([os.path.dirname(path), proxy_basename])
return proxy_basename, proxy_path
def _setup_proxy(self, shape, path, namespace):
proxy_basename, proxy_path = self._get_proxy_path(path)
options_node = "defaultArnoldRenderOptions"
merge_operator = get_attribute_input(options_node + ".operator")
if merge_operator is None:
merge_operator = cmds.createNode("aiMerge")
cmds.connectAttr(
merge_operator + ".message", options_node + ".operator"
)
merge_operator = merge_operator.split(".")[0]
string_replace_operator = cmds.createNode(
"aiStringReplace", name=namespace + ":string_replace_operator"
)
node_type = "alembic" if path.endswith(".abc") else "procedural"
cmds.setAttr(
string_replace_operator + ".selection",
"*.(@node=='{}')".format(node_type),
type="string"
)
cmds.setAttr(
string_replace_operator + ".match",
proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
cmds.connectAttr(
string_replace_operator + ".out",
"{}.inputs[{}]".format(
merge_operator,
self.get_next_free_multi_index(merge_operator + ".inputs")
)
)
# We setup the string operator no matter whether there is a proxy or
# not. This makes it easier to update since the string operator will
# always be created. Return original path to use for standin.
if not os.path.exists(proxy_path):
return path, string_replace_operator
return proxy_path, string_replace_operator
def update(self, container, representation):
# Update the standin
members = cmds.sets(container['objectName'], query=True)
for member in members:
if cmds.nodeType(member) == "aiStringReplace":
string_replace_operator = member
shapes = cmds.listRelatives(member, shapes=True)
if not shapes:
continue
if cmds.nodeType(shapes[0]) == "aiStandIn":
standin = shapes[0]
path = get_representation_path(representation)
proxy_basename, proxy_path = self._get_proxy_path(path)
# Whether there is proxy or so, we still update the string operator.
# If no proxy exists, the string operator wont replace anything.
cmds.setAttr(
string_replace_operator + ".match",
"resources/" + proxy_basename,
type="string"
)
cmds.setAttr(
string_replace_operator + ".replace",
os.path.basename(path),
type="string"
)
dso_path = path
if os.path.exists(proxy_path):
dso_path = proxy_path
cmds.setAttr(standin + ".dso", dso_path, type="string")
sequence = is_sequence(os.listdir(os.path.dirname(path)))
cmds.setAttr(standin + ".useFrameExtension", sequence)
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
type="string"
)
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -1,290 +0,0 @@
import os
import clique
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
)
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.plugin import get_reference_node
from openpype.hosts.maya.api.lib import (
maintained_selection,
unique_namespace
)
from openpype.hosts.maya.api.pipeline import containerise
class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"""Load Arnold Proxy as reference"""
families = ["ass"]
representations = ["ass"]
label = "Reference .ASS standin with Proxy"
order = -10
icon = "code-fork"
color = "orange"
def process_reference(self, context, name, namespace, options):
import maya.cmds as cmds
import pymel.core as pm
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
frameStart = version_data.get("frameStart", None)
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "ass"
with maintained_selection():
groupName = "{}:{}".format(namespace, name)
path = self.fname
proxyPath_base = os.path.splitext(path)[0]
if frameStart is not None:
proxyPath_base = os.path.splitext(proxyPath_base)[0]
publish_folder = os.path.split(path)[0]
files_in_folder = os.listdir(publish_folder)
collections, remainder = clique.assemble(files_in_folder)
if collections:
hashes = collections[0].padding * '#'
coll = collections[0].format('{head}[index]{tail}')
filename = coll.replace('[index]', hashes)
path = os.path.join(publish_folder, filename)
proxyPath = proxyPath_base + ".ma"
project_name = context["project"]["name"]
file_url = self.prepare_root_value(proxyPath,
project_name)
nodes = cmds.file(file_url,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName=groupName)
cmds.makeIdentity(groupName, apply=False, rotate=True,
translate=True, scale=True)
# Set attributes
proxyShape = pm.ls(nodes, type="mesh")[0]
proxyShape.aiTranslator.set('procedural')
proxyShape.dso.set(path)
proxyShape.aiOverrideShaders.set(0)
settings = get_project_settings(project_name)
colors = settings['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
)
self[:] = nodes
return nodes
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
from maya import cmds
import pymel.core as pm
node = container["objectName"]
representation["context"].pop("frame", None)
path = get_representation_path(representation)
print(path)
# path = self.fname
print(self.fname)
proxyPath = os.path.splitext(path)[0] + ".ma"
print(proxyPath)
# Get reference node from container members
members = cmds.sets(node, query=True, nodesOnly=True)
reference_node = get_reference_node(members)
assert os.path.exists(proxyPath), "%s does not exist." % proxyPath
try:
file_url = self.prepare_root_value(proxyPath,
representation["context"]
["project"]
["name"])
content = cmds.file(file_url,
loadReference=reference_node,
type="mayaAscii",
returnNewNodes=True)
# Set attributes
proxyShape = pm.ls(content, type="mesh")[0]
proxyShape.aiTranslator.set('procedural')
proxyShape.dso.set(path)
proxyShape.aiOverrideShaders.set(0)
except RuntimeError as exc:
# When changing a reference to a file that has load errors the
# command will raise an error even if the file is still loaded
# correctly (e.g. when raising errors on Arnold attributes)
# When the file is loaded and has content, we consider it's fine.
if not cmds.referenceQuery(reference_node, isLoaded=True):
raise
content = cmds.referenceQuery(reference_node,
nodes=True,
dagPath=True)
if not content:
raise
self.log.warning("Ignoring file read error:\n%s", exc)
# Add new nodes of the reference to the container
cmds.sets(content, forceElement=node)
# Remove any placeHolderList attribute entries from the set that
# are remaining from nodes being removed from the referenced file.
members = cmds.sets(node, query=True)
invalid = [x for x in members if ".placeHolderList" in x]
if invalid:
cmds.sets(invalid, remove=node)
# Update metadata
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
class AssStandinLoader(load.LoaderPlugin):
"""Load .ASS file as standin"""
families = ["ass"]
representations = ["ass"]
label = "Load .ASS file as standin"
order = -5
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, options):
import maya.cmds as cmds
import mtoa.ui.arnoldmenu
import pymel.core as pm
version = context['version']
version_data = version.get("data", {})
self.log.info("version_data: {}\n".format(version_data))
frameStart = version_data.get("frameStart", None)
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# cmds.loadPlugin("gpuCache", quiet=True)
# Root group
label = "{}:{}".format(namespace, name)
root = pm.group(name=label, empty=True)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings['maya']['load']['colors']
c = colors.get('ass')
if c is not None:
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
c[0], c[1], c[2])
# Create transform with shape
transform_name = label + "_ASS"
# transform = pm.createNode("transform", name=transform_name,
# parent=root)
standinShape = pm.PyNode(mtoa.ui.arnoldmenu.createStandIn())
standin = standinShape.getParent()
standin.rename(transform_name)
pm.parent(standin, root)
# Set the standin filepath
standinShape.dso.set(self.fname)
if frameStart is not None:
standinShape.useFrameExtension.set(1)
nodes = [root, standin]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
import pymel.core as pm
path = get_representation_path(representation)
files_in_path = os.listdir(os.path.split(path)[0])
sequence = 0
collections, remainder = clique.assemble(files_in_path)
if collections:
sequence = 1
# Update the standin
standins = list()
members = pm.sets(container['objectName'], query=True)
for member in members:
shape = member.getShape()
if (shape and shape.type() == "aiStandIn"):
standins.append(shape)
for standin in standins:
standin.dso.set(path)
standin.useFrameExtension.set(sequence)
container = pm.PyNode(container["objectName"])
container.representation.set(str(representation["_id"]))
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
import maya.cmds as cmds
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -1,19 +1,18 @@
from maya import cmds
from openpype.pipeline.publish import KnownPublishError
import pyblish.api
class CollectAssData(pyblish.api.InstancePlugin):
"""Collect Ass data."""
class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
"""Collect Arnold Scene Source data."""
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Ass'
label = "Collect Arnold Scene Source"
families = ["ass"]
def process(self, instance):
objsets = instance.data['setMembers']
objsets = instance.data["setMembers"]
for objset in objsets:
objset = str(objset)
@ -21,15 +20,12 @@ class CollectAssData(pyblish.api.InstancePlugin):
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
if "content_SET" in objset:
instance.data['setMembers'] = members
self.log.debug('content members: {}'.format(members))
elif objset.startswith("proxy_SET"):
if len(members) != 1:
msg = "You have multiple proxy meshes, please only use one"
raise KnownPublishError(msg)
instance.data['proxy'] = members
self.log.debug('proxy members: {}'.format(members))
if objset.endswith("content_SET"):
instance.data["setMembers"] = cmds.ls(members, long=True)
self.log.debug("content members: {}".format(members))
elif objset.endswith("proxy_SET"):
instance.data["proxy"] = cmds.ls(members, long=True)
self.log.debug("proxy members: {}".format(members))
# Use camera in object set if present else default to render globals
# camera.

View file

@ -1,3 +1,5 @@
from maya import cmds
import pyblish.api
@ -12,3 +14,31 @@ class CollectPointcache(pyblish.api.InstancePlugin):
def process(self, instance):
if instance.data.get("farm"):
instance.data["families"].append("publish.farm")
proxy_set = None
for node in instance.data["setMembers"]:
if cmds.nodeType(node) != "objectSet":
continue
members = cmds.sets(node, query=True)
if members is None:
self.log.warning("Skipped empty objectset: \"%s\" " % node)
continue
if node.endswith("proxy_SET"):
proxy_set = node
instance.data["proxy"] = []
instance.data["proxyRoots"] = []
for member in members:
instance.data["proxy"].extend(cmds.ls(member, long=True))
instance.data["proxyRoots"].extend(
cmds.ls(member, long=True)
)
instance.data["proxy"].extend(
cmds.listRelatives(member, shapes=True, fullPath=True)
)
self.log.debug(
"proxy members: {}".format(instance.data["proxy"])
)
if proxy_set:
instance.remove(proxy_set)
instance.data["setMembers"].remove(proxy_set)

View file

@ -42,7 +42,6 @@ Provides:
import re
import os
import platform
import json
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@ -320,7 +319,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"renderSetupIncludeLights"
),
"strict_error_checking": render_instance.data.get(
"strict_error_checking")
"strict_error_checking", True
)
}
# Collect Deadline url if Deadline module is enabled

View file

@ -0,0 +1,160 @@
import os
from maya import cmds
import arnold
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
maintained_selection, attribute_values, delete_after
)
class ExtractArnoldSceneSource(publish.Extractor):
"""Extract the content of the instance to an Arnold Scene Source file."""
label = "Extract Arnold Scene Source"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
file_path = os.path.join(staging_dir, filename)
# Mask
mask = arnold.AI_NODE_ALL
node_types = {
"options": arnold.AI_NODE_OPTIONS,
"camera": arnold.AI_NODE_CAMERA,
"light": arnold.AI_NODE_LIGHT,
"shape": arnold.AI_NODE_SHAPE,
"shader": arnold.AI_NODE_SHADER,
"override": arnold.AI_NODE_OVERRIDE,
"driver": arnold.AI_NODE_DRIVER,
"filter": arnold.AI_NODE_FILTER,
"color_manager": arnold.AI_NODE_COLOR_MANAGER,
"operator": arnold.AI_NODE_OPERATOR
}
for key in node_types.keys():
if instance.data.get("mask" + key.title()):
mask = mask ^ node_types[key]
# Motion blur
attribute_data = {
"defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
"motionBlur", True
),
"defaultArnoldRenderOptions.motion_steps": instance.data.get(
"motionBlurKeys", 2
),
"defaultArnoldRenderOptions.motion_frames": instance.data.get(
"motionBlurLength", 0.5
)
}
# Write out .ass file
kwargs = {
"filename": file_path,
"startFrame": instance.data.get("frameStartHandle", 1),
"endFrame": instance.data.get("frameEndHandle", 1),
"frameStep": instance.data.get("step", 1),
"selected": True,
"asciiAss": self.asciiAss,
"shadowLinks": True,
"lightLinks": True,
"boundingBox": True,
"expandProcedurals": instance.data.get("expandProcedurals", False),
"camera": instance.data["camera"],
"mask": mask
}
filenames = self._extract(
instance.data["setMembers"], attribute_data, kwargs
)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
"name": "ass",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
"frameStart": kwargs["startFrame"]
}
instance.data["representations"].append(representation)
self.log.info(
"Extracted instance {} to: {}".format(instance.name, staging_dir)
)
# Extract proxy.
if not instance.data.get("proxy", []):
return
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
filenames = self._extract(
instance.data["proxy"], attribute_data, kwargs
)
representation = {
"name": "proxy",
"ext": "ass",
"files": filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
"frameStart": kwargs["startFrame"],
"outputName": "proxy"
}
instance.data["representations"].append(representation)
def _extract(self, nodes, attribute_data, kwargs):
self.log.info("Writing: " + kwargs["filename"])
filenames = []
# Duplicating nodes so they are direct children of the world. This
# makes the hierarchy of any exported ass file the same.
with delete_after() as delete_bin:
duplicate_nodes = []
for node in nodes:
duplicate_transform = cmds.duplicate(node)[0]
# Discard the children.
shapes = cmds.listRelatives(duplicate_transform, shapes=True)
children = cmds.listRelatives(
duplicate_transform, children=True
)
cmds.delete(set(children) - set(shapes))
duplicate_transform = cmds.parent(
duplicate_transform, world=True
)[0]
cmds.rename(duplicate_transform, node.split("|")[-1])
duplicate_transform = "|" + node.split("|")[-1]
duplicate_nodes.append(duplicate_transform)
delete_bin.append(duplicate_transform)
with attribute_values(attribute_data):
with maintained_selection():
self.log.info(
"Writing: {}".format(duplicate_nodes)
)
cmds.select(duplicate_nodes, noExpand=True)
self.log.info(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
return filenames

View file

@ -1,106 +0,0 @@
import os
from maya import cmds
import arnold
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection, attribute_values
class ExtractAssStandin(publish.Extractor):
"""Extract the content of the instance to a ass file"""
label = "Arnold Scene Source (.ass)"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
filenames = []
file_path = os.path.join(staging_dir, filename)
# Mask
mask = arnold.AI_NODE_ALL
node_types = {
"options": arnold.AI_NODE_OPTIONS,
"camera": arnold.AI_NODE_CAMERA,
"light": arnold.AI_NODE_LIGHT,
"shape": arnold.AI_NODE_SHAPE,
"shader": arnold.AI_NODE_SHADER,
"override": arnold.AI_NODE_OVERRIDE,
"driver": arnold.AI_NODE_DRIVER,
"filter": arnold.AI_NODE_FILTER,
"color_manager": arnold.AI_NODE_COLOR_MANAGER,
"operator": arnold.AI_NODE_OPERATOR
}
for key in node_types.keys():
if instance.data.get("mask" + key.title()):
mask = mask ^ node_types[key]
# Motion blur
values = {
"defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
"motionBlur", True
),
"defaultArnoldRenderOptions.motion_steps": instance.data.get(
"motionBlurKeys", 2
),
"defaultArnoldRenderOptions.motion_frames": instance.data.get(
"motionBlurLength", 0.5
)
}
# Write out .ass file
kwargs = {
"filename": file_path,
"startFrame": instance.data.get("frameStartHandle", 1),
"endFrame": instance.data.get("frameEndHandle", 1),
"frameStep": instance.data.get("step", 1),
"selected": True,
"asciiAss": self.asciiAss,
"shadowLinks": True,
"lightLinks": True,
"boundingBox": True,
"expandProcedurals": instance.data.get("expandProcedurals", False),
"camera": instance.data["camera"],
"mask": mask
}
self.log.info("Writing: '%s'" % file_path)
with attribute_values(values):
with maintained_selection():
self.log.info(
"Writing: {}".format(instance.data["setMembers"])
)
cmds.select(instance.data["setMembers"], noExpand=True)
self.log.info(
"Extracting ass sequence with: {}".format(kwargs)
)
exported_files = cmds.arnoldExportAss(**kwargs)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ass',
'ext': 'ass',
'files': filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
'frameStart': kwargs["startFrame"]
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s"
% (instance.name, staging_dir))

View file

@ -1,81 +0,0 @@
import os
import contextlib
from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection
class ExtractAssProxy(publish.Extractor):
"""Extract proxy model as Maya Ascii to use as arnold standin
"""
order = publish.Extractor.order + 0.2
label = "Ass Proxy (Maya ASCII)"
hosts = ["maya"]
families = ["ass"]
def process(self, instance):
@contextlib.contextmanager
def unparent(root):
"""Temporarily unparent `root`"""
parent = cmds.listRelatives(root, parent=True)
if parent:
cmds.parent(root, world=True)
yield
self.log.info("{} - {}".format(root, parent))
cmds.parent(root, parent)
else:
yield
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name)
path = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Get only the shape contents we need in such a way that we avoid
# taking along intermediateObjects
proxy = instance.data.get('proxy', None)
if not proxy:
self.log.info("no proxy mesh")
return
members = cmds.ls(proxy,
dag=True,
transforms=True,
noIntermediate=True)
self.log.info(members)
with maintained_selection():
with unparent(members[0]):
cmds.select(members, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
channels=False,
constraints=False,
expressions=False,
constructionHistory=False)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ma',
'ext': 'ma',
'files': filename,
"stagingDir": stagingdir
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -1,4 +1,5 @@
import os
import json
import clique
import capture
@ -44,10 +45,6 @@ class ExtractPlayblast(publish.Extractor):
# get cameras
camera = instance.data['review_camera']
override_viewport_options = (
self.capture_preset['Viewport Options']
['override_viewport_options']
)
preset = lib.load_capture_preset(data=self.capture_preset)
# Grab capture presets from the project settings
capture_presets = self.capture_preset
@ -119,6 +116,27 @@ class ExtractPlayblast(publish.Extractor):
pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"]))
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False)
# Need to explicitly enable some viewport changes so the viewport is
# refreshed ahead of playblasting.
panel = cmds.getPanel(withFocus=True)
keys = [
"useDefaultMaterial",
"wireframeOnShaded",
"xray",
"jointXray",
"backfaceCulling"
]
viewport_defaults = {}
for key in keys:
viewport_defaults[key] = cmds.modelEditor(
panel, query=True, **{key: True}
)
if preset["viewport_options"][key]:
cmds.modelEditor(panel, edit=True, **{key: True})
override_viewport_options = (
capture_presets['Viewport Options']['override_viewport_options']
)
with lib.maintained_time():
filename = preset.get("filename", "%TEMP%")
@ -127,18 +145,26 @@ class ExtractPlayblast(publish.Extractor):
# playblast and viewer
preset['viewer'] = False
self.log.info('using viewport preset: {}'.format(preset))
# Update preset with current panel setting
# if override_viewport_options is turned off
if not override_viewport_options:
panel = cmds.getPanel(withFocus=True)
panel = cmds.getPanel(withFocus=True) or ""
if not override_viewport_options and "modelPanel" in panel:
panel_preset = capture.parse_active_view()
panel_preset.pop("camera")
preset.update(panel_preset)
cmds.setFocus(panel)
self.log.info(
"Using preset:\n{}".format(
json.dumps(preset, sort_keys=True, indent=4)
)
)
path = capture.capture(log=self.log, **preset)
# Restoring viewport options.
cmds.modelEditor(panel, edit=True, **viewport_defaults)
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
self.log.debug("playblast path {}".format(path))

View file

@ -1,4 +1,5 @@
import os
import copy
from maya import cmds
@ -9,6 +10,7 @@ from openpype.hosts.maya.api.lib import (
maintained_selection,
iter_visible_nodes_in_range
)
from openpype.lib import StringTemplate
class ExtractAlembic(publish.Extractor):
@ -23,9 +25,7 @@ class ExtractAlembic(publish.Extractor):
label = "Extract Pointcache (Alembic)"
hosts = ["maya"]
families = ["pointcache",
"model",
"vrayproxy"]
families = ["pointcache", "model", "vrayproxy"]
targets = ["local", "remote"]
def process(self, instance):
@ -87,6 +87,7 @@ class ExtractAlembic(publish.Extractor):
end=end))
suspend = not instance.data.get("refresh", False)
self.log.info(nodes)
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(nodes, noExpand=True)
@ -101,9 +102,9 @@ class ExtractAlembic(publish.Extractor):
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"name": "abc",
"ext": "abc",
"files": filename,
"stagingDir": dirname
}
instance.data["representations"].append(representation)
@ -112,6 +113,48 @@ class ExtractAlembic(publish.Extractor):
self.log.info("Extracted {} to {}".format(instance, dirname))
# Extract proxy.
if not instance.data.get("proxy"):
return
path = path.replace(".abc", "_proxy.abc")
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = instance.data["proxyRoots"]
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(instance.data["proxy"])
extract_alembic(
file=path,
startFrame=start,
endFrame=end,
**options
)
template_data = copy.deepcopy(instance.data["anatomyData"])
template_data.update({"ext": "abc"})
templates = instance.context.data["anatomy"].templates["publish"]
published_filename_without_extension = StringTemplate(
templates["file"]
).format(template_data).replace(".abc", "_proxy")
transfers = []
destination = os.path.join(
instance.data["resourcesDir"],
filename.replace(
filename.split(".")[0],
published_filename_without_extension
)
)
transfers.append((path, destination))
for source, destination in transfers:
self.log.debug("Transfer: {} > {}".format(source, destination))
instance.data["transfers"] = transfers
def get_members_and_roots(self, instance):
return instance[:], instance.data.get("setMembers")

View file

@ -134,8 +134,8 @@ class ExtractThumbnail(publish.Extractor):
# Update preset with current panel setting
# if override_viewport_options is turned off
if not override_viewport_options:
panel = cmds.getPanel(withFocus=True)
panel = cmds.getPanel(withFocus=True) or ""
if not override_viewport_options and "modelPanel" in panel:
panel_preset = capture.parse_active_view()
preset.update(panel_preset)
cmds.setFocus(panel)

View file

@ -0,0 +1,106 @@
import maya.cmds as cmds
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder, PublishValidationError
)
class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
"""Validate Arnold Scene Source.
We require at least 1 root node/parent for the meshes. This is to ensure we
can duplicate the nodes and preserve the names.
If using proxies we need the nodes to share the same names and not be
parent to the world. This ends up needing at least two groups with content
nodes and proxy nodes in another.
"""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["ass"]
label = "Validate Arnold Scene Source"
def _get_nodes_data(self, nodes):
ungrouped_nodes = []
nodes_by_name = {}
parents = []
for node in nodes:
node_split = node.split("|")
if len(node_split) == 2:
ungrouped_nodes.append(node)
parent = "|".join(node_split[:-1])
if parent:
parents.append(parent)
nodes_by_name[node_split[-1]] = node
for shape in cmds.listRelatives(node, shapes=True):
nodes_by_name[shape.split("|")[-1]] = shape
return ungrouped_nodes, nodes_by_name, parents
def process(self, instance):
ungrouped_nodes = []
nodes, content_nodes_by_name, content_parents = self._get_nodes_data(
instance.data["setMembers"]
)
ungrouped_nodes.extend(nodes)
nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data(
instance.data.get("proxy", [])
)
ungrouped_nodes.extend(nodes)
# Validate against nodes directly parented to world.
if ungrouped_nodes:
raise PublishValidationError(
"Found nodes parented to the world: {}\n"
"All nodes need to be grouped.".format(ungrouped_nodes)
)
# Proxy validation.
if not instance.data.get("proxy", []):
return
# Validate for content and proxy nodes amount being the same.
if len(instance.data["setMembers"]) != len(instance.data["proxy"]):
raise PublishValidationError(
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
"be the same.".format(
len(instance.data["setMembers"]),
len(instance.data["proxy"])
)
)
# Validate against content and proxy nodes sharing same parent.
if list(set(content_parents) & set(proxy_parents)):
raise PublishValidationError(
"Content and proxy nodes cannot share the same parent."
)
# Validate for content and proxy nodes sharing same names.
sorted_content_names = sorted(content_nodes_by_name.keys())
sorted_proxy_names = sorted(proxy_nodes_by_name.keys())
odd_content_names = list(
set(sorted_content_names) - set(sorted_proxy_names)
)
odd_content_nodes = [
content_nodes_by_name[x] for x in odd_content_names
]
odd_proxy_names = list(
set(sorted_proxy_names) - set(sorted_content_names)
)
odd_proxy_nodes = [
proxy_nodes_by_name[x] for x in odd_proxy_names
]
if not sorted_content_names == sorted_proxy_names:
raise PublishValidationError(
"Content and proxy nodes need to share the same names.\n"
"Content nodes not matching: {}\n"
"Proxy nodes not matching: {}".format(
odd_content_nodes, odd_proxy_nodes
)
)

View file

@ -4,7 +4,6 @@ import pyblish.api
import openpype.hosts.maya.api.lib as mayalib
from openpype.pipeline.context_tools import get_current_project_asset
from math import ceil
from openpype.pipeline.publish import (
RepairContextAction,
ValidateSceneOrder,
@ -33,18 +32,11 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
linearunits = context.data.get('linearUnits')
angularunits = context.data.get('angularUnits')
# TODO(antirotor): This is hack as for framerates having multiple
# decimal places. FTrack is ceiling decimal values on
# fps to two decimal places but Maya 2019+ is reporting those fps
# with much higher resolution. As we currently cannot fix Ftrack
# rounding, we have to round those numbers coming from Maya.
# NOTE: this must be revisited yet again as it seems that Ftrack is
# now flooring the value?
fps = mayalib.float_round(context.data.get('fps'), 2, ceil)
fps = context.data.get('fps')
# TODO repace query with using 'context.data["assetEntity"]'
asset_doc = get_current_project_asset()
asset_fps = asset_doc["data"]["fps"]
asset_fps = mayalib.convert_to_maya_fps(asset_doc["data"]["fps"])
self.log.info('Units (linear): {0}'.format(linearunits))
self.log.info('Units (angular): {0}'.format(angularunits))

View file

@ -193,7 +193,7 @@ class ImageCreator(Creator):
instance_data.pop("uuid")
if not instance_data.get("task"):
instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
instance_data["task"] = self.create_context.get_current_task_name()
if not instance_data.get("variant"):
instance_data["variant"] = ''

View file

@ -2,8 +2,7 @@ import openpype.hosts.photoshop.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
CreatedInstance,
legacy_io
CreatedInstance
)
from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances
@ -38,10 +37,11 @@ class PSWorkfileCreator(AutoCreator):
existing_instance = instance
break
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
host_name = legacy_io.Session["AVALON_APP"]
context = self.create_context
project_name = context.get_current_project_name()
asset_name = context.get_current_asset_name()
task_name = context.get_current_task_name()
host_name = context.host_name
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(

View file

@ -309,8 +309,6 @@ class QtTVPaintRpc(BaseTVPaintRpc):
self.add_methods(
(route_name, self.workfiles_tool),
(route_name, self.loader_tool),
(route_name, self.creator_tool),
(route_name, self.subset_manager_tool),
(route_name, self.publish_tool),
(route_name, self.scene_inventory_tool),
(route_name, self.library_loader_tool),
@ -330,21 +328,9 @@ class QtTVPaintRpc(BaseTVPaintRpc):
self._execute_in_main_thread(item)
return
async def creator_tool(self):
log.info("Triggering Creator tool")
item = MainThreadItem(self.tools_helper.show_creator)
await self._async_execute_in_main_thread(item, wait=False)
async def subset_manager_tool(self):
log.info("Triggering Subset Manager tool")
item = MainThreadItem(self.tools_helper.show_subset_manager)
# Do not wait for result of callback
self._execute_in_main_thread(item, wait=False)
return
async def publish_tool(self):
log.info("Triggering Publish tool")
item = MainThreadItem(self.tools_helper.show_publish)
item = MainThreadItem(self.tools_helper.show_publisher_tool)
self._execute_in_main_thread(item)
return
@ -859,10 +845,6 @@ class QtCommunicator(BaseCommunicator):
"callback": "loader_tool",
"label": "Load",
"help": "Open loader tool"
}, {
"callback": "creator_tool",
"label": "Create",
"help": "Open creator tool"
}, {
"callback": "scene_inventory_tool",
"label": "Scene inventory",
@ -875,10 +857,6 @@ class QtCommunicator(BaseCommunicator):
"callback": "library_loader_tool",
"label": "Library",
"help": "Open library loader tool"
}, {
"callback": "subset_manager_tool",
"label": "Subset Manager",
"help": "Open subset manager tool"
}, {
"callback": "experimental_tools",
"label": "Experimental tools",

View file

@ -202,8 +202,9 @@ def get_groups_data(communicator=None):
# Variable containing full path to output file
"output_path = \"{}\"".format(output_filepath),
"empty = 0",
# Loop over 100 groups
"FOR idx = 1 TO 100",
# Loop over 26 groups which is ATM maximum possible (in 11.7)
# - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880
"FOR idx = 1 TO 26",
# Receive information about groups
"tv_layercolor \"getcolor\" 0 idx",
"PARSE result clip_id group_index c_red c_green c_blue group_name",

View file

@ -8,7 +8,7 @@ import requests
import pyblish.api
from openpype.client import get_project, get_asset_by_name
from openpype.host import HostBase, IWorkfileHost, ILoadHost
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR
from openpype.settings import get_current_project_settings
from openpype.lib import register_event_callback
@ -18,6 +18,7 @@ from openpype.pipeline import (
register_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.pipeline.context_tools import get_global_context
from .lib import (
execute_george,
@ -29,6 +30,7 @@ log = logging.getLogger(__name__)
METADATA_SECTION = "avalon"
SECTION_NAME_CONTEXT = "context"
SECTION_NAME_CREATE_CONTEXT = "create_context"
SECTION_NAME_INSTANCES = "instances"
SECTION_NAME_CONTAINERS = "containers"
# Maximum length of metadata chunk string
@ -58,7 +60,7 @@ instances=2
"""
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "tvpaint"
def install(self):
@ -85,14 +87,63 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
registered_callbacks = (
pyblish.api.registered_callbacks().get("instanceToggled") or []
)
if self.on_instance_toggle not in registered_callbacks:
pyblish.api.register_callback(
"instanceToggled", self.on_instance_toggle
)
register_event_callback("application.launched", self.initial_launch)
register_event_callback("application.exit", self.application_exit)
def get_current_project_name(self):
"""
Returns:
Union[str, None]: Current project name.
"""
return self.get_current_context().get("project_name")
def get_current_asset_name(self):
"""
Returns:
Union[str, None]: Current asset name.
"""
return self.get_current_context().get("asset_name")
def get_current_task_name(self):
"""
Returns:
Union[str, None]: Current task name.
"""
return self.get_current_context().get("task_name")
def get_current_context(self):
context = get_current_workfile_context()
if not context:
return get_global_context()
if "project_name" in context:
return context
# This is legacy way how context was stored
return {
"project_name": context.get("project"),
"asset_name": context.get("asset"),
"task_name": context.get("task")
}
# --- Create ---
def get_context_data(self):
return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {})
def update_context_data(self, data, changes):
return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data)
def list_instances(self):
"""List all created instances from current workfile."""
return list_instances()
def write_instances(self, data):
return write_instances(data)
# --- Workfile ---
def open_workfile(self, filepath):
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath.replace("\\", "/")
@ -102,11 +153,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
def save_workfile(self, filepath=None):
if not filepath:
filepath = self.get_current_workfile()
context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
}
context = get_global_context()
save_current_workfile_context(context)
# Execute george script to save workfile.
@ -125,6 +172,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
def get_workfile_extensions(self):
return [".tvpp"]
# --- Load ---
def get_containers(self):
return get_containers()
@ -137,27 +185,15 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
return
log.info("Setting up project...")
set_context_settings()
def remove_instance(self, instance):
"""Remove instance from current workfile metadata.
Implementation for Subset manager tool.
"""
current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES)
instance_id = instance.get("uuid")
found_idx = None
if instance_id:
for idx, _inst in enumerate(current_instances):
if _inst["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
global_context = get_global_context()
project_name = global_context.get("project_name")
asset_name = global_context.get("aset_name")
if not project_name or not asset_name:
return
current_instances.pop(found_idx)
write_instances(current_instances)
asset_doc = get_asset_by_name(project_name, asset_name)
set_context_settings(project_name, asset_doc)
def application_exit(self):
"""Logic related to TimerManager.
@ -177,34 +213,6 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
requests.post(rest_api_url)
def on_instance_toggle(self, instance, old_value, new_value):
"""Update instance data in workfile on publish toggle."""
# Review may not have real instance in wokrfile metadata
if not instance.data.get("uuid"):
return
instance_id = instance.data["uuid"]
found_idx = None
current_instances = list_instances()
for idx, workfile_instance in enumerate(current_instances):
if workfile_instance["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
if "active" in current_instances[found_idx]:
current_instances[found_idx]["active"] = new_value
self.write_instances(current_instances)
def list_instances(self):
"""List all created instances from current workfile."""
return list_instances()
def write_instances(self, data):
return write_instances(data)
def containerise(
name, namespace, members, context, loader, current_containers=None
@ -462,40 +470,17 @@ def get_containers():
return output
def set_context_settings(asset_doc=None):
def set_context_settings(project_name, asset_doc):
"""Set workfile settings by asset document data.
Change fps, resolution and frame start/end.
"""
project_name = legacy_io.active_project()
if asset_doc is None:
asset_name = legacy_io.Session["AVALON_ASSET"]
# Use current session asset if not passed
asset_doc = get_asset_by_name(project_name, asset_name)
project_doc = get_project(project_name)
framerate = asset_doc["data"].get("fps")
if framerate is None:
framerate = project_doc["data"].get("fps")
if framerate is not None:
execute_george(
"tv_framerate {} \"timestretch\"".format(framerate)
)
else:
print("Framerate was not found!")
width_key = "resolutionWidth"
height_key = "resolutionHeight"
width = asset_doc["data"].get(width_key)
height = asset_doc["data"].get(height_key)
if width is None or height is None:
width = project_doc["data"].get(width_key)
height = project_doc["data"].get(height_key)
if width is None or height is None:
print("Resolution was not found!")
else:
@ -503,6 +488,15 @@ def set_context_settings(asset_doc=None):
"tv_resizepage {} {} 0".format(width, height)
)
framerate = asset_doc["data"].get("fps")
if framerate is not None:
execute_george(
"tv_framerate {} \"timestretch\"".format(framerate)
)
else:
print("Framerate was not found!")
frame_start = asset_doc["data"].get("frameStart")
frame_end = asset_doc["data"].get("frameEnd")

View file

@ -1,80 +1,142 @@
import re
import uuid
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
registered_host,
from openpype.pipeline import LoaderPlugin
from openpype.pipeline.create import (
CreatedInstance,
get_subset_name,
AutoCreator,
Creator,
)
from openpype.pipeline.create.creator_plugins import cache_and_get_instances
from .lib import get_layers_data
from .pipeline import get_current_workfile_context
class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
# Add unified identifier created with `uuid` module
self.data["uuid"] = str(uuid.uuid4())
SHARED_DATA_KEY = "openpype.tvpaint.instances"
@classmethod
def get_dynamic_data(cls, *args, **kwargs):
dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs)
# Change asset and name by current workfile context
workfile_context = get_current_workfile_context()
asset_name = workfile_context.get("asset")
task_name = workfile_context.get("task")
if "asset" not in dynamic_data and asset_name:
dynamic_data["asset"] = asset_name
class TVPaintCreatorCommon:
@property
def subset_template_family_filter(self):
return self.family
if "task" not in dynamic_data and task_name:
dynamic_data["task"] = task_name
return dynamic_data
@staticmethod
def are_instances_same(instance_1, instance_2):
"""Compare instances but skip keys with unique values.
During compare are skipped keys that will be 100% sure
different on new instance, like "id".
Returns:
bool: True if instances are same.
"""
if (
not isinstance(instance_1, dict)
or not isinstance(instance_2, dict)
):
return instance_1 == instance_2
checked_keys = set()
checked_keys.add("id")
for key, value in instance_1.items():
if key not in checked_keys:
if key not in instance_2:
return False
if value != instance_2[key]:
return False
checked_keys.add(key)
for key in instance_2.keys():
if key not in checked_keys:
return False
return True
def write_instances(self, data):
self.log.debug(
"Storing instance data to workfile. {}".format(str(data))
def _cache_and_get_instances(self):
return cache_and_get_instances(
self, SHARED_DATA_KEY, self.host.list_instances
)
host = registered_host()
return host.write_instances(data)
def process(self):
host = registered_host()
data = host.list_instances()
data.append(self.data)
self.write_instances(data)
def _collect_create_instances(self):
instances_by_identifier = self._cache_and_get_instances()
for instance_data in instances_by_identifier[self.identifier]:
instance = CreatedInstance.from_existing(instance_data, self)
self._add_instance_to_context(instance)
def _update_create_instances(self, update_list):
if not update_list:
return
cur_instances = self.host.list_instances()
cur_instances_by_id = {}
for instance_data in cur_instances:
instance_id = instance_data.get("instance_id")
if instance_id:
cur_instances_by_id[instance_id] = instance_data
for instance, changes in update_list:
instance_data = changes.new_value
cur_instance_data = cur_instances_by_id.get(instance.id)
if cur_instance_data is None:
cur_instances.append(instance_data)
continue
for key in set(cur_instance_data) - set(instance_data):
cur_instance_data.pop(key)
cur_instance_data.update(instance_data)
self.host.write_instances(cur_instances)
def _custom_get_subset_name(
self,
variant,
task_name,
asset_doc,
project_name,
host_name=None,
instance=None
):
dynamic_data = self.get_dynamic_data(
variant, task_name, asset_doc, project_name, host_name, instance
)
return get_subset_name(
self.family,
variant,
task_name,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data,
project_settings=self.project_settings,
family_filter=self.subset_template_family_filter
)
class TVPaintCreator(Creator, TVPaintCreatorCommon):
def collect_instances(self):
self._collect_create_instances()
def update_instances(self, update_list):
self._update_create_instances(update_list)
def remove_instances(self, instances):
ids_to_remove = {
instance.id
for instance in instances
}
cur_instances = self.host.list_instances()
changed = False
new_instances = []
for instance_data in cur_instances:
if instance_data.get("instance_id") in ids_to_remove:
changed = True
else:
new_instances.append(instance_data)
if changed:
self.host.write_instances(new_instances)
for instance in instances:
self._remove_instance_from_context(instance)
def get_dynamic_data(self, *args, **kwargs):
# Change asset and name by current workfile context
create_context = self.create_context
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
output = {}
if asset_name:
output["asset"] = asset_name
if task_name:
output["task"] = task_name
return output
def get_subset_name(self, *args, **kwargs):
return self._custom_get_subset_name(*args, **kwargs)
def _store_new_instance(self, new_instance):
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon):
def collect_instances(self):
self._collect_create_instances()
def update_instances(self, update_list):
self._update_create_instances(update_list)
def get_subset_name(self, *args, **kwargs):
return self._custom_get_subset_name(*args, **kwargs)
class Loader(LoaderPlugin):

View file

@ -0,0 +1,150 @@
import collections
from openpype.pipeline.create.creator_plugins import (
SubsetConvertorPlugin,
cache_and_get_instances,
)
from openpype.hosts.tvpaint.api.plugin import SHARED_DATA_KEY
from openpype.hosts.tvpaint.api.lib import get_groups_data
class TVPaintLegacyConverted(SubsetConvertorPlugin):
"""Conversion of legacy instances in scene to new creators.
This convertor handles only instances created by core creators.
All instances that would be created using auto-creators are removed as at
the moment of finding them would there already be existing instances.
"""
identifier = "tvpaint.legacy.converter"
def find_instances(self):
instances_by_identifier = cache_and_get_instances(
self, SHARED_DATA_KEY, self.host.list_instances
)
if instances_by_identifier[None]:
self.add_convertor_item("Convert legacy instances")
def convert(self):
current_instances = self.host.list_instances()
to_convert = collections.defaultdict(list)
converted = False
for instance in current_instances:
if instance.get("creator_identifier") is not None:
continue
converted = True
family = instance.get("family")
if family in (
"renderLayer",
"renderPass",
"renderScene",
"review",
"workfile",
):
to_convert[family].append(instance)
else:
instance["keep"] = False
# Skip if nothing was changed
if not converted:
self.remove_convertor_item()
return
self._convert_render_layers(
to_convert["renderLayer"], current_instances)
self._convert_render_passes(
to_convert["renderpass"], current_instances)
self._convert_render_scenes(
to_convert["renderScene"], current_instances)
self._convert_workfiles(
to_convert["workfile"], current_instances)
self._convert_reviews(
to_convert["review"], current_instances)
new_instances = [
instance
for instance in current_instances
if instance.get("keep") is not False
]
self.host.write_instances(new_instances)
# remove legacy item if all is fine
self.remove_convertor_item()
def _convert_render_layers(self, render_layers, current_instances):
if not render_layers:
return
# Look for possible existing render layers in scene
render_layers_by_group_id = {}
for instance in current_instances:
if instance.get("creator_identifier") == "render.layer":
group_id = instance["creator_identifier"]["group_id"]
render_layers_by_group_id[group_id] = instance
groups_by_id = {
group["group_id"]: group
for group in get_groups_data()
}
for render_layer in render_layers:
group_id = render_layer.pop("group_id")
# Just remove legacy instance if group is already occupied
if group_id in render_layers_by_group_id:
render_layer["keep"] = False
continue
# Add identifier
render_layer["creator_identifier"] = "render.layer"
# Change 'uuid' to 'instance_id'
render_layer["instance_id"] = render_layer.pop("uuid")
# Fill creator attributes
render_layer["creator_attributes"] = {
"group_id": group_id
}
render_layer["family"] = "render"
group = groups_by_id[group_id]
# Use group name for variant
group["variant"] = group["name"]
def _convert_render_passes(self, render_passes, current_instances):
if not render_passes:
return
# Render passes must have available render layers so we look for render
# layers first
# - '_convert_render_layers' must be called before this method
render_layers_by_group_id = {}
for instance in current_instances:
if instance.get("creator_identifier") == "render.layer":
group_id = instance["creator_identifier"]["group_id"]
render_layers_by_group_id[group_id] = instance
for render_pass in render_passes:
group_id = render_pass.pop("group_id")
render_layer = render_layers_by_group_id.get(group_id)
if not render_layer:
render_pass["keep"] = False
continue
render_pass["creator_identifier"] = "render.pass"
render_pass["instance_id"] = render_pass.pop("uuid")
render_pass["family"] = "render"
render_pass["creator_attributes"] = {
"render_layer_instance_id": render_layer["instance_id"]
}
render_pass["variant"] = render_pass.pop("pass")
render_pass.pop("renderlayer")
# Rest of instances are just marked for deletion
def _convert_render_scenes(self, render_scenes, current_instances):
for render_scene in render_scenes:
render_scene["keep"] = False
def _convert_workfiles(self, workfiles, current_instances):
for render_scene in workfiles:
render_scene["keep"] = False
def _convert_reviews(self, reviews, current_instances):
for render_scene in reviews:
render_scene["keep"] = False

View file

@ -0,0 +1,739 @@
"""Render Layer and Passes creators.
Render layer is main part which is represented by group in TVPaint. All TVPaint
layers marked with that group color are part of the render layer. To be more
specific about some parts of layer it is possible to create sub-sets of layer
which are named passes. Render pass consist of layers in same color group as
render layer but define more specific part.
For example render layer could be 'Bob' which consist of 5 TVPaint layers.
- Bob has 'head' which consist of 2 TVPaint layers -> Render pass 'head'
- Bob has 'body' which consist of 1 TVPaint layer -> Render pass 'body'
- Bob has 'arm' which consist of 1 TVPaint layer -> Render pass 'arm'
- Last layer does not belong to render pass at all
Bob will be rendered as 'beauty' of bob (all visible layers in group).
His head will be rendered too but without any other parts. The same for body
and arm.
What is this good for? Compositing has more power how the renders are used.
Can do transforms on each render pass without need to modify a re-render them
using TVPaint.
The workflow may hit issues when there are used other blending modes than
default 'color' blend more. In that case it is not recommended to use this
workflow at all as other blend modes may affect all layers in clip which can't
be done.
There is special case for simple publishing of scene which is called
'render.scene'. That will use all visible layers and render them as one big
sequence.
Todos:
Add option to extract marked layers and passes as json output format for
AfterEffects.
"""
import collections
from openpype.client import get_asset_by_name
from openpype.lib import (
prepare_template_data,
EnumDef,
TextDef,
BoolDef,
)
from openpype.pipeline.create import (
CreatedInstance,
CreatorError,
)
from openpype.hosts.tvpaint.api.plugin import (
TVPaintCreator,
TVPaintAutoCreator,
)
from openpype.hosts.tvpaint.api.lib import (
get_layers_data,
get_groups_data,
execute_george_through_file,
)
RENDER_LAYER_DETAILED_DESCRIPTIONS = (
"""Render Layer is "a group of TVPaint layers"
Be aware Render Layer <b>is not</b> TVPaint layer.
All TVPaint layers in the scene with the color group id are rendered in the
beauty pass. To create sub passes use Render Layer creator which is
dependent on existence of render layer instance.
The group can represent an asset (tree) or different part of scene that consist
of one or more TVPaint layers that can be used as single item during
compositing (for example).
In some cases may be needed to have sub parts of the layer. For example 'Bob'
could be Render Layer which has 'Arm', 'Head' and 'Body' as Render Passes.
"""
)
RENDER_PASS_DETAILED_DESCRIPTIONS = (
"""Render Pass is sub part of Render Layer.
Render Pass can consist of one or more TVPaint layers. Render Layers must
belong to a Render Layer. Marker TVPaint layers will change it's group color
to match group color of Render Layer.
"""
)
class CreateRenderlayer(TVPaintCreator):
"""Mark layer group as Render layer instance.
All TVPaint layers in the scene with the color group id are rendered in the
beauty pass. To create sub passes use Render Layer creator which is
dependent on existence of render layer instance.
"""
label = "Render Layer"
family = "render"
subset_template_family_filter = "renderLayer"
identifier = "render.layer"
icon = "fa5.images"
# George script to change color group
rename_script_template = (
"tv_layercolor \"setcolor\""
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
)
# Order to be executed before Render Pass creator
order = 90
description = "Mark TVPaint color group as one Render Layer."
detailed_description = RENDER_LAYER_DETAILED_DESCRIPTIONS
# Settings
# - Default render pass name for beauty
default_pass_name = "beauty"
# - Mark by default instance for review
mark_for_review = True
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_render_layer"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
self.default_pass_name = plugin_settings["default_pass_name"]
self.mark_for_review = plugin_settings["mark_for_review"]
def get_dynamic_data(
self, variant, task_name, asset_doc, project_name, host_name, instance
):
dynamic_data = super().get_dynamic_data(
variant, task_name, asset_doc, project_name, host_name, instance
)
dynamic_data["renderpass"] = self.default_pass_name
dynamic_data["renderlayer"] = variant
return dynamic_data
def _get_selected_group_ids(self):
return {
layer["group_id"]
for layer in get_layers_data()
if layer["selected"]
}
def create(self, subset_name, instance_data, pre_create_data):
self.log.debug("Query data from workfile.")
group_name = instance_data["variant"]
group_id = pre_create_data.get("group_id")
# This creator should run only on one group
if group_id is None or group_id == -1:
selected_groups = self._get_selected_group_ids()
selected_groups.discard(0)
if len(selected_groups) > 1:
raise CreatorError("You have selected more than one group")
if len(selected_groups) == 0:
raise CreatorError("You don't have selected any group")
group_id = tuple(selected_groups)[0]
self.log.debug("Querying groups data from workfile.")
groups_data = get_groups_data()
group_item = None
for group_data in groups_data:
if group_data["group_id"] == group_id:
group_item = group_data
for instance in self.create_context.instances:
if (
instance.creator_identifier == self.identifier
and instance["creator_attributes"]["group_id"] == group_id
):
raise CreatorError((
f"Group \"{group_item.get('name')}\" is already used"
f" by another render layer \"{instance['subset']}\""
))
self.log.debug(f"Selected group id is \"{group_id}\".")
if "creator_attributes" not in instance_data:
instance_data["creator_attributes"] = {}
creator_attributes = instance_data["creator_attributes"]
mark_for_review = pre_create_data.get("mark_for_review")
if mark_for_review is None:
mark_for_review = self.mark_for_review
creator_attributes["group_id"] = group_id
creator_attributes["mark_for_review"] = mark_for_review
self.log.info(f"Subset name is {subset_name}")
new_instance = CreatedInstance(
self.family,
subset_name,
instance_data,
self
)
self._store_new_instance(new_instance)
if not group_id or group_item["name"] == group_name:
return new_instance
self.log.debug("Changing name of the group.")
# Rename TVPaint group (keep color same)
# - groups can't contain spaces
rename_script = self.rename_script_template.format(
clip_id=group_item["clip_id"],
group_id=group_item["group_id"],
r=group_item["red"],
g=group_item["green"],
b=group_item["blue"],
name=group_name
)
execute_george_through_file(rename_script)
self.log.info((
f"Name of group with index {group_id}"
f" was changed to \"{group_name}\"."
))
return new_instance
def _get_groups_enum(self):
groups_enum = []
empty_groups = []
for group in get_groups_data():
group_name = group["name"]
item = {
"label": group_name,
"value": group["group_id"]
}
# TVPaint have defined how many color groups is available, but
# the count is not consistent across versions. It is not possible
# to know how many groups there is.
#
if group_name and group_name != "0":
if empty_groups:
groups_enum.extend(empty_groups)
empty_groups = []
groups_enum.append(item)
else:
empty_groups.append(item)
return groups_enum
def get_pre_create_attr_defs(self):
groups_enum = self._get_groups_enum()
groups_enum.insert(0, {"label": "<Use selection>", "value": -1})
return [
EnumDef(
"group_id",
label="Group",
items=groups_enum
),
BoolDef(
"mark_for_review",
label="Review",
default=self.mark_for_review
)
]
def get_instance_attr_defs(self):
groups_enum = self._get_groups_enum()
return [
EnumDef(
"group_id",
label="Group",
items=groups_enum
),
BoolDef(
"mark_for_review",
label="Review",
default=self.mark_for_review
)
]
def update_instances(self, update_list):
self._update_color_groups()
self._update_renderpass_groups()
super().update_instances(update_list)
def _update_color_groups(self):
render_layer_instances = []
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
render_layer_instances.append(instance)
if not render_layer_instances:
return
groups_by_id = {
group["group_id"]: group
for group in get_groups_data()
}
grg_script_lines = []
for instance in render_layer_instances:
group_id = instance["creator_attributes"]["group_id"]
variant = instance["variant"]
group = groups_by_id[group_id]
if group["name"] == variant:
continue
grg_script_lines.append(self.rename_script_template.format(
clip_id=group["clip_id"],
group_id=group["group_id"],
r=group["red"],
g=group["green"],
b=group["blue"],
name=variant
))
if grg_script_lines:
execute_george_through_file("\n".join(grg_script_lines))
def _update_renderpass_groups(self):
render_layer_instances = {}
render_pass_instances = collections.defaultdict(list)
for instance in self.create_context.instances:
if instance.creator_identifier == CreateRenderPass.identifier:
render_layer_id = (
instance["creator_attributes"]["render_layer_instance_id"]
)
render_pass_instances[render_layer_id].append(instance)
elif instance.creator_identifier == self.identifier:
render_layer_instances[instance.id] = instance
if not render_pass_instances or not render_layer_instances:
return
layers_data = get_layers_data()
layers_by_name = collections.defaultdict(list)
for layer in layers_data:
layers_by_name[layer["name"]].append(layer)
george_lines = []
for render_layer_id, instances in render_pass_instances.items():
render_layer_inst = render_layer_instances.get(render_layer_id)
if render_layer_inst is None:
continue
group_id = render_layer_inst["creator_attributes"]["group_id"]
layer_names = set()
for instance in instances:
layer_names |= set(instance["layer_names"])
for layer_name in layer_names:
george_lines.extend(
f"tv_layercolor \"set\" {layer['layer_id']} {group_id}"
for layer in layers_by_name[layer_name]
if layer["group_id"] != group_id
)
if george_lines:
execute_george_through_file("\n".join(george_lines))
class CreateRenderPass(TVPaintCreator):
family = "render"
subset_template_family_filter = "renderPass"
identifier = "render.pass"
label = "Render Pass"
icon = "fa5.image"
description = "Mark selected TVPaint layers as pass of Render Layer."
detailed_description = RENDER_PASS_DETAILED_DESCRIPTIONS
order = CreateRenderlayer.order + 10
# Settings
mark_for_review = True
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_render_pass"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
self.mark_for_review = plugin_settings["mark_for_review"]
def collect_instances(self):
instances_by_identifier = self._cache_and_get_instances()
render_layers = {
instance_data["instance_id"]: {
"variant": instance_data["variant"],
"template_data": prepare_template_data({
"renderlayer": instance_data["variant"]
})
}
for instance_data in (
instances_by_identifier[CreateRenderlayer.identifier]
)
}
for instance_data in instances_by_identifier[self.identifier]:
render_layer_instance_id = (
instance_data
.get("creator_attributes", {})
.get("render_layer_instance_id")
)
render_layer_info = render_layers.get(render_layer_instance_id)
self.update_instance_labels(
instance_data,
render_layer_info["variant"],
render_layer_info["template_data"]
)
instance = CreatedInstance.from_existing(instance_data, self)
self._add_instance_to_context(instance)
def get_dynamic_data(
self, variant, task_name, asset_doc, project_name, host_name, instance
):
dynamic_data = super().get_dynamic_data(
variant, task_name, asset_doc, project_name, host_name, instance
)
dynamic_data["renderpass"] = variant
dynamic_data["renderlayer"] = "{renderlayer}"
return dynamic_data
def update_instance_labels(
self, instance, render_layer_variant, render_layer_data=None
):
old_label = instance.get("label")
old_group = instance.get("group")
new_label = None
new_group = None
if render_layer_variant is not None:
if render_layer_data is None:
render_layer_data = prepare_template_data({
"renderlayer": render_layer_variant
})
try:
new_label = instance["subset"].format(**render_layer_data)
except (KeyError, ValueError):
pass
new_group = f"{self.get_group_label()} ({render_layer_variant})"
instance["label"] = new_label
instance["group"] = new_group
return old_group != new_group or old_label != new_label
def create(self, subset_name, instance_data, pre_create_data):
render_layer_instance_id = pre_create_data.get(
"render_layer_instance_id"
)
if not render_layer_instance_id:
raise CreatorError("Missing RenderLayer instance")
render_layer_instance = self.create_context.instances_by_id.get(
render_layer_instance_id
)
if render_layer_instance is None:
raise CreatorError((
"RenderLayer instance was not found"
f" by id \"{render_layer_instance_id}\""
))
group_id = render_layer_instance["creator_attributes"]["group_id"]
self.log.debug("Query data from workfile.")
layers_data = get_layers_data()
self.log.debug("Checking selection.")
# Get all selected layers and their group ids
marked_layer_names = pre_create_data.get("layer_names")
if marked_layer_names is not None:
layers_by_name = {layer["name"]: layer for layer in layers_data}
marked_layers = []
for layer_name in marked_layer_names:
layer = layers_by_name.get(layer_name)
if layer is None:
raise CreatorError(
f"Layer with name \"{layer_name}\" was not found")
marked_layers.append(layer)
else:
marked_layers = [
layer
for layer in layers_data
if layer["selected"]
]
# Raise if nothing is selected
if not marked_layers:
raise CreatorError(
"Nothing is selected. Please select layers.")
marked_layer_names = {layer["name"] for layer in marked_layers}
marked_layer_names = set(marked_layer_names)
instances_to_remove = []
for instance in self.create_context.instances:
if instance.creator_identifier != self.identifier:
continue
cur_layer_names = set(instance["layer_names"])
if not cur_layer_names.intersection(marked_layer_names):
continue
new_layer_names = cur_layer_names - marked_layer_names
if new_layer_names:
instance["layer_names"] = list(new_layer_names)
else:
instances_to_remove.append(instance)
render_layer = render_layer_instance["variant"]
subset_name_fill_data = {"renderlayer": render_layer}
# Format dynamic keys in subset name
label = subset_name
try:
label = label.format(
**prepare_template_data(subset_name_fill_data)
)
except (KeyError, ValueError):
pass
self.log.info(f"New subset name is \"{label}\".")
instance_data["label"] = label
instance_data["group"] = f"{self.get_group_label()} ({render_layer})"
instance_data["layer_names"] = list(marked_layer_names)
if "creator_attributes" not in instance_data:
instance_data["creator_attributes"] = {}
creator_attributes = instance_data["creator_attributes"]
mark_for_review = pre_create_data.get("mark_for_review")
if mark_for_review is None:
mark_for_review = self.mark_for_review
creator_attributes["mark_for_review"] = mark_for_review
creator_attributes["render_layer_instance_id"] = (
render_layer_instance_id
)
new_instance = CreatedInstance(
self.family,
subset_name,
instance_data,
self
)
instances_data = self._remove_and_filter_instances(
instances_to_remove
)
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
self._change_layers_group(marked_layers, group_id)
return new_instance
def _change_layers_group(self, layers, group_id):
filtered_layers = [
layer
for layer in layers
if layer["group_id"] != group_id
]
if filtered_layers:
self.log.info((
"Changing group of "
f"{','.join([l['name'] for l in filtered_layers])}"
f" to {group_id}"
))
george_lines = [
f"tv_layercolor \"set\" {layer['layer_id']} {group_id}"
for layer in filtered_layers
]
execute_george_through_file("\n".join(george_lines))
def _remove_and_filter_instances(self, instances_to_remove):
instances_data = self.host.list_instances()
if not instances_to_remove:
return instances_data
removed_ids = set()
for instance in instances_to_remove:
removed_ids.add(instance.id)
self._remove_instance_from_context(instance)
return [
instance_data
for instance_data in instances_data
if instance_data.get("instance_id") not in removed_ids
]
def get_pre_create_attr_defs(self):
render_layers = [
{
"value": instance.id,
"label": instance.label
}
for instance in self.create_context.instances
if instance.creator_identifier == CreateRenderlayer.identifier
]
if not render_layers:
render_layers.append({"value": None, "label": "N/A"})
return [
EnumDef(
"render_layer_instance_id",
label="Render Layer",
items=render_layers
),
BoolDef(
"mark_for_review",
label="Review",
default=self.mark_for_review
)
]
def get_instance_attr_defs(self):
return self.get_pre_create_attr_defs()
class TVPaintSceneRenderCreator(TVPaintAutoCreator):
family = "render"
subset_template_family_filter = "renderScene"
identifier = "render.scene"
label = "Scene Render"
icon = "fa.file-image-o"
# Settings
default_pass_name = "beauty"
mark_for_review = True
active_on_create = False
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_render_scene"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
self.mark_for_review = plugin_settings["mark_for_review"]
self.active_on_create = plugin_settings["active_on_create"]
self.default_pass_name = plugin_settings["default_pass_name"]
def get_dynamic_data(self, variant, *args, **kwargs):
dynamic_data = super().get_dynamic_data(variant, *args, **kwargs)
dynamic_data["renderpass"] = "{renderpass}"
dynamic_data["renderlayer"] = variant
return dynamic_data
def _create_new_instance(self):
create_context = self.create_context
host_name = create_context.host_name
project_name = create_context.get_current_project_name()
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
self.default_variant,
task_name,
asset_doc,
project_name,
host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant,
"creator_attributes": {
"render_pass_name": self.default_pass_name,
"mark_for_review": True
},
"label": self._get_label(
subset_name,
self.default_pass_name
)
}
if not self.active_on_create:
data["active"] = False
new_instance = CreatedInstance(
self.family, subset_name, data, self
)
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
return new_instance
def create(self):
existing_instance = None
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
existing_instance = instance
break
if existing_instance is None:
return self._create_new_instance()
create_context = self.create_context
host_name = create_context.host_name
project_name = create_context.get_current_project_name()
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
if (
existing_instance["asset"] != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
existing_instance["variant"],
task_name,
asset_doc,
project_name,
host_name,
existing_instance
)
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
existing_instance["label"] = self._get_label(
existing_instance["subset"],
existing_instance["creator_attributes"]["render_pass_name"]
)
def _get_label(self, subset_name, render_pass_name):
try:
subset_name = subset_name.format(**prepare_template_data({
"renderpass": render_pass_name
}))
except (KeyError, ValueError):
pass
return subset_name
def get_instance_attr_defs(self):
return [
TextDef(
"render_pass_name",
label="Pass Name",
default=self.default_pass_name,
tooltip=(
"Value is calculated during publishing and UI will update"
" label after refresh."
)
),
BoolDef(
"mark_for_review",
label="Review",
default=self.mark_for_review
)
]

View file

@ -1,231 +0,0 @@
from openpype.lib import prepare_template_data
from openpype.pipeline import CreatorError
from openpype.hosts.tvpaint.api import (
plugin,
CommunicationWrapper
)
from openpype.hosts.tvpaint.api.lib import (
get_layers_data,
get_groups_data,
execute_george_through_file,
)
from openpype.hosts.tvpaint.api.pipeline import list_instances
class CreateRenderlayer(plugin.Creator):
"""Mark layer group as one instance."""
name = "render_layer"
label = "RenderLayer"
family = "renderLayer"
icon = "cube"
defaults = ["Main"]
rename_group = True
render_pass = "beauty"
rename_script_template = (
"tv_layercolor \"setcolor\""
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
)
dynamic_subset_keys = [
"renderpass", "renderlayer", "render_pass", "render_layer", "group"
]
@classmethod
def get_dynamic_data(
cls, variant, task_name, asset_id, project_name, host_name
):
dynamic_data = super(CreateRenderlayer, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
# Use render pass name from creator's plugin
dynamic_data["renderpass"] = cls.render_pass
# Add variant to render layer
dynamic_data["renderlayer"] = variant
# Change family for subset name fill
dynamic_data["family"] = "render"
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_pass"] = dynamic_data["renderpass"]
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
return dynamic_data
@classmethod
def get_default_variant(cls):
"""Default value for variant in Creator tool.
Method checks if TVPaint implementation is running and tries to find
selected layers from TVPaint. If only one is selected it's name is
returned.
Returns:
str: Default variant name for Creator tool.
"""
# Validate that communication is initialized
if CommunicationWrapper.communicator:
# Get currently selected layers
layers_data = get_layers_data()
selected_layers = [
layer
for layer in layers_data
if layer["selected"]
]
# Return layer name if only one is selected
if len(selected_layers) == 1:
return selected_layers[0]["name"]
# Use defaults
if cls.defaults:
return cls.defaults[0]
return None
def process(self):
self.log.debug("Query data from workfile.")
instances = list_instances()
layers_data = get_layers_data()
self.log.debug("Checking for selection groups.")
# Collect group ids from selection
group_ids = set()
for layer in layers_data:
if layer["selected"]:
group_ids.add(layer["group_id"])
# Raise if there is no selection
if not group_ids:
raise CreatorError("Nothing is selected.")
# This creator should run only on one group
if len(group_ids) > 1:
raise CreatorError("More than one group is in selection.")
group_id = tuple(group_ids)[0]
# If group id is `0` it is `default` group which is invalid
if group_id == 0:
raise CreatorError(
"Selection is not in group. Can't mark selection as Beauty."
)
self.log.debug(f"Selected group id is \"{group_id}\".")
self.data["group_id"] = group_id
group_data = get_groups_data()
group_name = None
for group in group_data:
if group["group_id"] == group_id:
group_name = group["name"]
break
if group_name is None:
raise AssertionError(
"Couldn't find group by id \"{}\"".format(group_id)
)
subset_name_fill_data = {
"group": group_name
}
family = self.family = self.data["family"]
# Fill dynamic key 'group'
subset_name = self.data["subset"].format(
**prepare_template_data(subset_name_fill_data)
)
self.data["subset"] = subset_name
# Check for instances of same group
existing_instance = None
existing_instance_idx = None
# Check if subset name is not already taken
same_subset_instance = None
same_subset_instance_idx = None
for idx, instance in enumerate(instances):
if instance["family"] == family:
if instance["group_id"] == group_id:
existing_instance = instance
existing_instance_idx = idx
elif instance["subset"] == subset_name:
same_subset_instance = instance
same_subset_instance_idx = idx
if (
same_subset_instance_idx is not None
and existing_instance_idx is not None
):
break
if same_subset_instance_idx is not None:
if self._ask_user_subset_override(same_subset_instance):
instances.pop(same_subset_instance_idx)
else:
return
if existing_instance is not None:
self.log.info(
f"Beauty instance for group id {group_id} already exists"
", overriding"
)
instances[existing_instance_idx] = self.data
else:
instances.append(self.data)
self.write_instances(instances)
if not self.rename_group:
self.log.info("Group rename function is turned off. Skipping")
return
self.log.debug("Querying groups data from workfile.")
groups_data = get_groups_data()
self.log.debug("Changing name of the group.")
selected_group = None
for group_data in groups_data:
if group_data["group_id"] == group_id:
selected_group = group_data
# Rename TVPaint group (keep color same)
# - groups can't contain spaces
new_group_name = self.data["variant"].replace(" ", "_")
rename_script = self.rename_script_template.format(
clip_id=selected_group["clip_id"],
group_id=selected_group["group_id"],
r=selected_group["red"],
g=selected_group["green"],
b=selected_group["blue"],
name=new_group_name
)
execute_george_through_file(rename_script)
self.log.info(
f"Name of group with index {group_id}"
f" was changed to \"{new_group_name}\"."
)
def _ask_user_subset_override(self, instance):
from qtpy import QtCore
from qtpy.QtWidgets import QMessageBox
title = "Subset \"{}\" already exist".format(instance["subset"])
text = (
"Instance with subset name \"{}\" already exists."
"\n\nDo you want to override existing?"
).format(instance["subset"])
dialog = QMessageBox()
dialog.setWindowFlags(
dialog.windowFlags()
| QtCore.Qt.WindowStaysOnTopHint
)
dialog.setWindowTitle(title)
dialog.setText(text)
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
dialog.setDefaultButton(QMessageBox.Yes)
dialog.exec_()
if dialog.result() == QMessageBox.Yes:
return True
return False

View file

@ -1,167 +0,0 @@
from openpype.pipeline import CreatorError
from openpype.lib import prepare_template_data
from openpype.hosts.tvpaint.api import (
plugin,
CommunicationWrapper
)
from openpype.hosts.tvpaint.api.lib import get_layers_data
from openpype.hosts.tvpaint.api.pipeline import list_instances
class CreateRenderPass(plugin.Creator):
"""Render pass is combination of one or more layers from same group.
Requirement to create Render Pass is to have already created beauty
instance. Beauty instance is used as base for subset name.
"""
name = "render_pass"
label = "RenderPass"
family = "renderPass"
icon = "cube"
defaults = ["Main"]
dynamic_subset_keys = [
"renderpass", "renderlayer", "render_pass", "render_layer"
]
@classmethod
def get_dynamic_data(
cls, variant, task_name, asset_id, project_name, host_name
):
dynamic_data = super(CreateRenderPass, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
dynamic_data["renderpass"] = variant
dynamic_data["family"] = "render"
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_pass"] = dynamic_data["renderpass"]
return dynamic_data
@classmethod
def get_default_variant(cls):
"""Default value for variant in Creator tool.
Method checks if TVPaint implementation is running and tries to find
selected layers from TVPaint. If only one is selected it's name is
returned.
Returns:
str: Default variant name for Creator tool.
"""
# Validate that communication is initialized
if CommunicationWrapper.communicator:
# Get currently selected layers
layers_data = get_layers_data()
selected_layers = [
layer
for layer in layers_data
if layer["selected"]
]
# Return layer name if only one is selected
if len(selected_layers) == 1:
return selected_layers[0]["name"]
# Use defaults
if cls.defaults:
return cls.defaults[0]
return None
def process(self):
self.log.debug("Query data from workfile.")
instances = list_instances()
layers_data = get_layers_data()
self.log.debug("Checking selection.")
# Get all selected layers and their group ids
group_ids = set()
selected_layers = []
for layer in layers_data:
if layer["selected"]:
selected_layers.append(layer)
group_ids.add(layer["group_id"])
# Raise if nothing is selected
if not selected_layers:
raise CreatorError("Nothing is selected.")
# Raise if layers from multiple groups are selected
if len(group_ids) != 1:
raise CreatorError("More than one group is in selection.")
group_id = tuple(group_ids)[0]
self.log.debug(f"Selected group id is \"{group_id}\".")
# Find beauty instance for selected layers
beauty_instance = None
for instance in instances:
if (
instance["family"] == "renderLayer"
and instance["group_id"] == group_id
):
beauty_instance = instance
break
# Beauty is required for this creator so raise if was not found
if beauty_instance is None:
raise CreatorError("Beauty pass does not exist yet.")
subset_name = self.data["subset"]
subset_name_fill_data = {}
# Backwards compatibility
# - beauty may be created with older creator where variant was not
# stored
if "variant" not in beauty_instance:
render_layer = beauty_instance["name"]
else:
render_layer = beauty_instance["variant"]
subset_name_fill_data["renderlayer"] = render_layer
subset_name_fill_data["render_layer"] = render_layer
# Format dynamic keys in subset name
new_subset_name = subset_name.format(
**prepare_template_data(subset_name_fill_data)
)
self.data["subset"] = new_subset_name
self.log.info(f"New subset name is \"{new_subset_name}\".")
family = self.data["family"]
variant = self.data["variant"]
self.data["group_id"] = group_id
self.data["pass"] = variant
self.data["renderlayer"] = render_layer
# Collect selected layer ids to be stored into instance
layer_names = [layer["name"] for layer in selected_layers]
self.data["layer_names"] = layer_names
# Check if same instance already exists
existing_instance = None
existing_instance_idx = None
for idx, instance in enumerate(instances):
if (
instance["family"] == family
and instance["group_id"] == group_id
and instance["pass"] == variant
):
existing_instance = instance
existing_instance_idx = idx
break
if existing_instance is not None:
self.log.info(
f"Render pass instance for group id {group_id}"
f" and name \"{variant}\" already exists, overriding."
)
instances[existing_instance_idx] = self.data
else:
instances.append(self.data)
self.write_instances(instances)

View file

@ -0,0 +1,76 @@
from openpype.client import get_asset_by_name
from openpype.pipeline import CreatedInstance
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
class TVPaintReviewCreator(TVPaintAutoCreator):
family = "review"
identifier = "scene.review"
label = "Review"
icon = "ei.video"
# Settings
active_on_create = True
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_review"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
self.active_on_create = plugin_settings["active_on_create"]
def create(self):
existing_instance = None
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
existing_instance = instance
break
create_context = self.create_context
host_name = create_context.host_name
project_name = create_context.get_current_project_name()
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
self.default_variant,
task_name,
asset_doc,
project_name,
host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
if not self.active_on_create:
data["active"] = False
new_instance = CreatedInstance(
self.family, subset_name, data, self
)
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
elif (
existing_instance["asset"] != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
existing_instance["variant"],
task_name,
asset_doc,
project_name,
host_name,
existing_instance
)
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -0,0 +1,70 @@
from openpype.client import get_asset_by_name
from openpype.pipeline import CreatedInstance
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
class TVPaintWorkfileCreator(TVPaintAutoCreator):
family = "workfile"
identifier = "workfile"
label = "Workfile"
icon = "fa.file-o"
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_workfile"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
def create(self):
existing_instance = None
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
existing_instance = instance
break
create_context = self.create_context
host_name = create_context.host_name
project_name = create_context.get_current_project_name()
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
self.default_variant,
task_name,
asset_doc,
project_name,
host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
new_instance = CreatedInstance(
self.family, subset_name, data, self
)
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
elif (
existing_instance["asset"] != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
existing_instance["variant"],
task_name,
asset_doc,
project_name,
host_name,
existing_instance
)
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -1,37 +1,34 @@
import pyblish.api
class CollectOutputFrameRange(pyblish.api.ContextPlugin):
class CollectOutputFrameRange(pyblish.api.InstancePlugin):
"""Collect frame start/end from context.
When instances are collected context does not contain `frameStart` and
`frameEnd` keys yet. They are collected in global plugin
`CollectContextEntities`.
"""
label = "Collect output frame range"
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder + 0.4999
hosts = ["tvpaint"]
families = ["review", "render"]
def process(self, context):
for instance in context:
frame_start = instance.data.get("frameStart")
frame_end = instance.data.get("frameEnd")
if frame_start is not None and frame_end is not None:
self.log.debug(
"Instance {} already has set frames {}-{}".format(
str(instance), frame_start, frame_end
)
)
return
def process(self, instance):
asset_doc = instance.data.get("assetEntity")
if not asset_doc:
return
frame_start = context.data.get("frameStart")
frame_end = context.data.get("frameEnd")
context = instance.context
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
self.log.info(
"Set frames {}-{} on instance {} ".format(
frame_start, frame_end, str(instance)
)
frame_start = asset_doc["data"]["frameStart"]
frame_end = frame_start + (
context.data["sceneMarkOut"] - context.data["sceneMarkIn"]
)
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
self.log.info(
"Set frames {}-{} on instance {} ".format(
frame_start, frame_end, instance.data["subset"]
)
)

View file

@ -1,280 +0,0 @@
import json
import copy
import pyblish.api
from openpype.client import get_asset_by_name
from openpype.pipeline import legacy_io
from openpype.pipeline.create import get_subset_name
class CollectInstances(pyblish.api.ContextPlugin):
label = "Collect Instances"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
def process(self, context):
workfile_instances = context.data["workfileInstances"]
self.log.debug("Collected ({}) instances:\n{}".format(
len(workfile_instances),
json.dumps(workfile_instances, indent=4)
))
filtered_instance_data = []
# Backwards compatibility for workfiles that already have review
# instance in metadata.
review_instance_exist = False
for instance_data in workfile_instances:
family = instance_data["family"]
if family == "review":
review_instance_exist = True
elif family not in ("renderPass", "renderLayer"):
self.log.info("Unknown family \"{}\". Skipping {}".format(
family, json.dumps(instance_data, indent=4)
))
continue
filtered_instance_data.append(instance_data)
# Fake review instance if review was not found in metadata families
if not review_instance_exist:
filtered_instance_data.append(
self._create_review_instance_data(context)
)
for instance_data in filtered_instance_data:
instance_data["fps"] = context.data["sceneFps"]
# Conversion from older instances
# - change 'render_layer' to 'renderlayer'
render_layer = instance_data.get("instance_data")
if not render_layer:
# Render Layer has only variant
if instance_data["family"] == "renderLayer":
render_layer = instance_data.get("variant")
# Backwards compatibility for renderPasses
elif "render_layer" in instance_data:
render_layer = instance_data["render_layer"]
if render_layer:
instance_data["renderlayer"] = render_layer
# Store workfile instance data to instance data
instance_data["originData"] = copy.deepcopy(instance_data)
# Global instance data modifications
# Fill families
family = instance_data["family"]
families = [family]
if family != "review":
families.append("review")
# Add `review` family for thumbnail integration
instance_data["families"] = families
# Instance name
subset_name = instance_data["subset"]
name = instance_data.get("name", subset_name)
instance_data["name"] = name
instance_data["label"] = "{} [{}-{}]".format(
name,
context.data["sceneMarkIn"] + 1,
context.data["sceneMarkOut"] + 1
)
active = instance_data.get("active", True)
instance_data["active"] = active
instance_data["publish"] = active
# Add representations key
instance_data["representations"] = []
# Different instance creation based on family
instance = None
if family == "review":
# Change subset name of review instance
# Project name from workfile context
project_name = context.data["workfile_context"]["project"]
# Collect asset doc to get asset id
# - not sure if it's good idea to require asset id in
# get_subset_name?
asset_name = context.data["workfile_context"]["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
# Host name from environment variable
host_name = context.data["hostName"]
# Use empty variant value
variant = ""
task_name = legacy_io.Session["AVALON_TASK"]
new_subset_name = get_subset_name(
family,
variant,
task_name,
asset_doc,
project_name,
host_name,
project_settings=context.data["project_settings"]
)
instance_data["subset"] = new_subset_name
instance = context.create_instance(**instance_data)
instance.data["layers"] = copy.deepcopy(
context.data["layersData"]
)
elif family == "renderLayer":
instance = self.create_render_layer_instance(
context, instance_data
)
elif family == "renderPass":
instance = self.create_render_pass_instance(
context, instance_data
)
if instance is None:
continue
any_visible = False
for layer in instance.data["layers"]:
if layer["visible"]:
any_visible = True
break
instance.data["publish"] = any_visible
self.log.debug("Created instance: {}\n{}".format(
instance, json.dumps(instance.data, indent=4)
))
def _create_review_instance_data(self, context):
"""Fake review instance data."""
return {
"family": "review",
"asset": context.data["asset"],
# Dummy subset name
"subset": "reviewMain"
}
def create_render_layer_instance(self, context, instance_data):
name = instance_data["name"]
# Change label
subset_name = instance_data["subset"]
# Backwards compatibility
# - subset names were not stored as final subset names during creation
if "variant" not in instance_data:
instance_data["label"] = "{}_Beauty".format(name)
# Change subset name
# Final family of an instance will be `render`
new_family = "render"
task_name = legacy_io.Session["AVALON_TASK"]
new_subset_name = "{}{}_{}_Beauty".format(
new_family, task_name.capitalize(), name
)
instance_data["subset"] = new_subset_name
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
subset_name, new_subset_name
))
# Get all layers for the layer
layers_data = context.data["layersData"]
group_id = instance_data["group_id"]
group_layers = []
for layer in layers_data:
if layer["group_id"] == group_id:
group_layers.append(layer)
if not group_layers:
# Should be handled here?
self.log.warning((
f"Group with id {group_id} does not contain any layers."
f" Instance \"{name}\" not created."
))
return None
instance_data["layers"] = group_layers
return context.create_instance(**instance_data)
def create_render_pass_instance(self, context, instance_data):
pass_name = instance_data["pass"]
self.log.info(
"Creating render pass instance. \"{}\"".format(pass_name)
)
# Change label
render_layer = instance_data["renderlayer"]
# Backwards compatibility
# - subset names were not stored as final subset names during creation
if "variant" not in instance_data:
instance_data["label"] = "{}_{}".format(render_layer, pass_name)
# Change subset name
# Final family of an instance will be `render`
new_family = "render"
old_subset_name = instance_data["subset"]
task_name = legacy_io.Session["AVALON_TASK"]
new_subset_name = "{}{}_{}_{}".format(
new_family, task_name.capitalize(), render_layer, pass_name
)
instance_data["subset"] = new_subset_name
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
old_subset_name, new_subset_name
))
layers_data = context.data["layersData"]
layers_by_name = {
layer["name"]: layer
for layer in layers_data
}
if "layer_names" in instance_data:
layer_names = instance_data["layer_names"]
else:
# Backwards compatibility
# - not 100% working as it was found out that layer ids can't be
# used as unified identifier across multiple workstations
layers_by_id = {
layer["layer_id"]: layer
for layer in layers_data
}
layer_ids = instance_data["layer_ids"]
layer_names = []
for layer_id in layer_ids:
layer = layers_by_id.get(layer_id)
if layer:
layer_names.append(layer["name"])
if not layer_names:
raise ValueError((
"Metadata contain old way of storing layers information."
" It is not possible to identify layers to publish with"
" these data. Please remove Render Pass instances with"
" Subset manager and use Creator tool to recreate them."
))
render_pass_layers = []
for layer_name in layer_names:
layer = layers_by_name.get(layer_name)
# NOTE This is kind of validation before validators?
if not layer:
self.log.warning(
f"Layer with name {layer_name} was not found."
)
continue
render_pass_layers.append(layer)
if not render_pass_layers:
name = instance_data["name"]
self.log.warning(
f"None of the layers from the RenderPass \"{name}\""
" exist anymore. Instance not created."
)
return None
instance_data["layers"] = render_pass_layers
return context.create_instance(**instance_data)

View file

@ -0,0 +1,109 @@
import copy
import pyblish.api
from openpype.lib import prepare_template_data
class CollectRenderInstances(pyblish.api.InstancePlugin):
label = "Collect Render Instances"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
families = ["render", "review"]
def process(self, instance):
context = instance.context
creator_identifier = instance.data["creator_identifier"]
if creator_identifier == "render.layer":
self._collect_data_for_render_layer(instance)
elif creator_identifier == "render.pass":
self._collect_data_for_render_pass(instance)
elif creator_identifier == "render.scene":
self._collect_data_for_render_scene(instance)
else:
if creator_identifier == "scene.review":
self._collect_data_for_review(instance)
return
subset_name = instance.data["subset"]
instance.data["name"] = subset_name
instance.data["label"] = "{} [{}-{}]".format(
subset_name,
context.data["sceneMarkIn"] + 1,
context.data["sceneMarkOut"] + 1
)
def _collect_data_for_render_layer(self, instance):
instance.data["families"].append("renderLayer")
creator_attributes = instance.data["creator_attributes"]
group_id = creator_attributes["group_id"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
layers_data = instance.context.data["layersData"]
instance.data["layers"] = [
copy.deepcopy(layer)
for layer in layers_data
if layer["group_id"] == group_id
]
def _collect_data_for_render_pass(self, instance):
instance.data["families"].append("renderPass")
layer_names = set(instance.data["layer_names"])
layers_data = instance.context.data["layersData"]
creator_attributes = instance.data["creator_attributes"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
instance.data["layers"] = [
copy.deepcopy(layer)
for layer in layers_data
if layer["name"] in layer_names
]
render_layer_data = None
render_layer_id = creator_attributes["render_layer_instance_id"]
for in_data in instance.context.data["workfileInstances"]:
if (
in_data["creator_identifier"] == "render.layer"
and in_data["instance_id"] == render_layer_id
):
render_layer_data = in_data
break
instance.data["renderLayerData"] = copy.deepcopy(render_layer_data)
# Invalid state
if render_layer_data is None:
return
render_layer_name = render_layer_data["variant"]
subset_name = instance.data["subset"]
instance.data["subset"] = subset_name.format(
**prepare_template_data({"renderlayer": render_layer_name})
)
def _collect_data_for_render_scene(self, instance):
instance.data["families"].append("renderScene")
creator_attributes = instance.data["creator_attributes"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
instance.data["layers"] = copy.deepcopy(
instance.context.data["layersData"]
)
render_pass_name = (
instance.data["creator_attributes"]["render_pass_name"]
)
subset_name = instance.data["subset"]
instance.data["subset"] = subset_name.format(
**prepare_template_data({"renderpass": render_pass_name})
)
def _collect_data_for_review(self, instance):
instance.data["layers"] = copy.deepcopy(
instance.context.data["layersData"]
)

View file

@ -1,114 +0,0 @@
import json
import copy
import pyblish.api
from openpype.client import get_asset_by_name
from openpype.pipeline.create import get_subset_name
class CollectRenderScene(pyblish.api.ContextPlugin):
"""Collect instance which renders whole scene in PNG.
Creates instance with family 'renderScene' which will have all layers
to render which will be composite into one result. The instance is not
collected from scene.
Scene will be rendered with all visible layers similar way like review is.
Instance is disabled if there are any created instances of 'renderLayer'
or 'renderPass'. That is because it is expected that this instance is
used as lazy publish of TVPaint file.
Subset name is created similar way like 'renderLayer' family. It can use
`renderPass` and `renderLayer` keys which can be set using settings and
`variant` is filled using `renderPass` value.
"""
label = "Collect Render Scene"
order = pyblish.api.CollectorOrder - 0.39
hosts = ["tvpaint"]
# Value of 'render_pass' in subset name template
render_pass = "beauty"
# Settings attributes
enabled = False
# Value of 'render_layer' and 'variant' in subset name template
render_layer = "Main"
def process(self, context):
# Check if there are created instances of renderPass and renderLayer
# - that will define if renderScene instance is enabled after
# collection
any_created_instance = False
for instance in context:
family = instance.data["family"]
if family in ("renderPass", "renderLayer"):
any_created_instance = True
break
# Global instance data modifications
# Fill families
family = "renderScene"
# Add `review` family for thumbnail integration
families = [family, "review"]
# Collect asset doc to get asset id
# - not sure if it's good idea to require asset id in
# get_subset_name?
workfile_context = context.data["workfile_context"]
# Project name from workfile context
project_name = context.data["workfile_context"]["project"]
asset_name = workfile_context["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
# Host name from environment variable
host_name = context.data["hostName"]
# Variant is using render pass name
variant = self.render_layer
dynamic_data = {
"renderlayer": self.render_layer,
"renderpass": self.render_pass,
}
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
dynamic_data["render_pass"] = dynamic_data["renderpass"]
task_name = workfile_context["task"]
subset_name = get_subset_name(
"render",
variant,
task_name,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data,
project_settings=context.data["project_settings"]
)
instance_data = {
"family": family,
"families": families,
"fps": context.data["sceneFps"],
"subset": subset_name,
"name": subset_name,
"label": "{} [{}-{}]".format(
subset_name,
context.data["sceneMarkIn"] + 1,
context.data["sceneMarkOut"] + 1
),
"active": not any_created_instance,
"publish": not any_created_instance,
"representations": [],
"layers": copy.deepcopy(context.data["layersData"]),
"asset": asset_name,
"task": task_name,
# Add render layer to instance data
"renderlayer": self.render_layer
}
instance = context.create_instance(**instance_data)
self.log.debug("Created instance: {}\n{}".format(
instance, json.dumps(instance.data, indent=4)
))

View file

@ -2,17 +2,15 @@ import os
import json
import pyblish.api
from openpype.client import get_asset_by_name
from openpype.pipeline import legacy_io
from openpype.pipeline.create import get_subset_name
class CollectWorkfile(pyblish.api.ContextPlugin):
class CollectWorkfile(pyblish.api.InstancePlugin):
label = "Collect Workfile"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
families = ["workfile"]
def process(self, context):
def process(self, instance):
context = instance.context
current_file = context.data["currentFile"]
self.log.info(
@ -21,49 +19,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
dirpath, filename = os.path.split(current_file)
basename, ext = os.path.splitext(filename)
instance = context.create_instance(name=basename)
# Project name from workfile context
project_name = context.data["workfile_context"]["project"]
# Get subset name of workfile instance
# Collect asset doc to get asset id
# - not sure if it's good idea to require asset id in
# get_subset_name?
family = "workfile"
asset_name = context.data["workfile_context"]["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
# Host name from environment variable
host_name = os.environ["AVALON_APP"]
# Use empty variant value
variant = ""
task_name = legacy_io.Session["AVALON_TASK"]
subset_name = get_subset_name(
family,
variant,
task_name,
asset_doc,
project_name,
host_name,
project_settings=context.data["project_settings"]
)
# Create Workfile instance
instance.data.update({
"subset": subset_name,
"asset": context.data["asset"],
"label": subset_name,
"publish": True,
"family": "workfile",
"families": ["workfile"],
"representations": [{
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": filename,
"stagingDir": dirpath
}]
instance.data["representations"].append({
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": filename,
"stagingDir": dirpath
})
self.log.info("Collected workfile instance: {}".format(
json.dumps(instance.data, indent=4)
))

View file

@ -65,9 +65,9 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect and store current context to have reference
current_context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
"project_name": context.data["projectName"],
"asset_name": context.data["asset"],
"task_name": context.data["task"]
}
context.data["previous_context"] = current_context
self.log.debug("Current context is: {}".format(current_context))
@ -76,25 +76,31 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
self.log.info("Collecting workfile context")
workfile_context = get_current_workfile_context()
if "project" in workfile_context:
workfile_context = {
"project_name": workfile_context.get("project"),
"asset_name": workfile_context.get("asset"),
"task_name": workfile_context.get("task"),
}
# Store workfile context to pyblish context
context.data["workfile_context"] = workfile_context
if workfile_context:
# Change current context with context from workfile
key_map = (
("AVALON_ASSET", "asset"),
("AVALON_TASK", "task")
("AVALON_ASSET", "asset_name"),
("AVALON_TASK", "task_name")
)
for env_key, key in key_map:
legacy_io.Session[env_key] = workfile_context[key]
os.environ[env_key] = workfile_context[key]
self.log.info("Context changed to: {}".format(workfile_context))
asset_name = workfile_context["asset"]
task_name = workfile_context["task"]
asset_name = workfile_context["asset_name"]
task_name = workfile_context["task_name"]
else:
asset_name = current_context["asset"]
task_name = current_context["task"]
asset_name = current_context["asset_name"]
task_name = current_context["task_name"]
# Handle older workfiles or workfiles without metadata
self.log.warning((
"Workfile does not contain information about context."
@ -103,6 +109,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Store context asset name
context.data["asset"] = asset_name
context.data["task"] = task_name
self.log.info(
"Context is set to Asset: \"{}\" and Task: \"{}\"".format(
asset_name, task_name

View file

@ -6,6 +6,7 @@ from PIL import Image
import pyblish.api
from openpype.pipeline.publish import KnownPublishError
from openpype.hosts.tvpaint.api.lib import (
execute_george,
execute_george_through_file,
@ -24,8 +25,7 @@ from openpype.hosts.tvpaint.lib import (
class ExtractSequence(pyblish.api.Extractor):
label = "Extract Sequence"
hosts = ["tvpaint"]
families = ["review", "renderPass", "renderLayer", "renderScene"]
families_to_review = ["review"]
families = ["review", "render"]
# Modifiable with settings
review_bg = [255, 255, 255, 255]
@ -136,7 +136,7 @@ class ExtractSequence(pyblish.api.Extractor):
# Fill tags and new families from project settings
tags = []
if family_lowered in self.families_to_review:
if family_lowered == "review":
tags.append("review")
# Sequence of one frame
@ -162,10 +162,6 @@ class ExtractSequence(pyblish.api.Extractor):
instance.data["representations"].append(new_repre)
if family_lowered in ("renderpass", "renderlayer", "renderscene"):
# Change family to render
instance.data["family"] = "render"
if not thumbnail_fullpath:
return
@ -259,7 +255,7 @@ class ExtractSequence(pyblish.api.Extractor):
output_filepaths_by_frame_idx[frame_idx] = filepath
if not os.path.exists(filepath):
raise AssertionError(
raise KnownPublishError(
"Output was not rendered. File was not found {}".format(
filepath
)

View file

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Overused Color group</title>
<description>## One Color group is used by multiple Render Layers
Single color group used by multiple Render Layers would cause clashes of rendered TVPaint layers. The same layers would be used for output files of both groups.
### Missing layer names
{groups_information}
### How to repair?
Refresh, go to 'Publish' tab and go through Render Layers and change their groups to not clash each other. If you reach limit of TVPaint color groups there is nothing you can do about it to fix the issue.
</description>
</error>
</root>

View file

@ -20,6 +20,9 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
duplicated_layer_names = []
for layer_name in layer_names:
layers = layers_by_name.get(layer_name)
# It is not job of this validator to handle missing layers
if layers is None:
continue
if len(layers) > 1:
duplicated_layer_names.append(layer_name)

View file

@ -8,11 +8,16 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
label = "Validate Layers Visibility"
order = pyblish.api.ValidatorOrder
families = ["review", "renderPass", "renderLayer", "renderScene"]
families = ["review", "render"]
def process(self, instance):
layers = instance.data["layers"]
# Instance have empty layers
# - it is not job of this validator to check that
if not layers:
return
layer_names = set()
for layer in instance.data["layers"]:
for layer in layers:
layer_names.add(layer["name"])
if layer["visible"]:
return

View file

@ -0,0 +1,74 @@
import collections
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
class ValidateRenderLayerGroups(pyblish.api.ContextPlugin):
"""Validate group ids of renderLayer subsets.
Validate that there are not 2 render layers using the same group.
"""
label = "Validate Render Layers Group"
order = pyblish.api.ValidatorOrder + 0.1
def process(self, context):
# Prepare layers
render_layers_by_group_id = collections.defaultdict(list)
for instance in context:
families = instance.data.get("families")
if not families or "renderLayer" not in families:
continue
group_id = instance.data["creator_attributes"]["group_id"]
render_layers_by_group_id[group_id].append(instance)
duplicated_instances = []
for group_id, instances in render_layers_by_group_id.items():
if len(instances) > 1:
duplicated_instances.append((group_id, instances))
if not duplicated_instances:
return
# Exception message preparations
groups_data = context.data["groupsData"]
groups_by_id = {
group["group_id"]: group
for group in groups_data
}
per_group_msgs = []
groups_information_lines = []
for group_id, instances in duplicated_instances:
group = groups_by_id[group_id]
group_label = "Group \"{}\" ({})".format(
group["name"],
group["group_id"],
)
line_join_subset_names = "\n".join([
f" - {instance['subset']}"
for instance in instances
])
joined_subset_names = ", ".join([
f"\"{instance['subset']}\""
for instance in instances
])
per_group_msgs.append(
"{} < {} >".format(group_label, joined_subset_names)
)
groups_information_lines.append(
"<b>{}</b>\n{}".format(group_label, line_join_subset_names)
)
# Raise an error
raise PublishXmlValidationError(
self,
(
"More than one Render Layer is using the same TVPaint"
" group color. {}"
).format(" | ".join(per_group_msgs)),
formatting_data={
"groups_information": "\n".join(groups_information_lines)
}
)

View file

@ -85,6 +85,5 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
),
"expected_group": correct_group["name"],
"layer_names": ", ".join(invalid_layer_names)
}
)

View file

@ -42,7 +42,7 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin):
"expected_width": expected_data["resolutionWidth"],
"expected_height": expected_data["resolutionHeight"],
"current_width": scene_data["resolutionWidth"],
"current_height": scene_data["resolutionWidth"],
"current_height": scene_data["resolutionHeight"],
"expected_pixel_ratio": expected_data["pixelAspect"],
"current_pixel_ratio": scene_data["pixelAspect"]
}

View file

@ -1,5 +1,9 @@
import pyblish.api
from openpype.pipeline import PublishXmlValidationError, registered_host
from openpype.pipeline import (
PublishXmlValidationError,
PublishValidationError,
registered_host,
)
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
@ -27,13 +31,18 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
actions = [ValidateWorkfileMetadataRepair]
required_keys = {"project", "asset", "task"}
required_keys = {"project_name", "asset_name", "task_name"}
def process(self, context):
workfile_context = context.data["workfile_context"]
if not workfile_context:
raise AssertionError(
"Current workfile is missing whole metadata about context."
raise PublishValidationError(
"Current workfile is missing whole metadata about context.",
"Missing context",
(
"Current workfile is missing metadata about task."
" To fix this issue save the file using Workfiles tool."
)
)
missing_keys = []

View file

@ -1,4 +1,3 @@
import os
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
@ -16,15 +15,15 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
def process(self, context):
workfile_context = context.data.get("workfile_context")
# If workfile context is missing than project is matching to
# `AVALON_PROJECT` value for 100%
# global project
if not workfile_context:
self.log.info(
"Workfile context (\"workfile_context\") is not filled."
)
return
workfile_project_name = workfile_context["project"]
env_project_name = os.environ["AVALON_PROJECT"]
workfile_project_name = workfile_context["project_name"]
env_project_name = context.data["projectName"]
if workfile_project_name == env_project_name:
self.log.info((
"Both workfile project and environment project are same. {}"

View file

@ -82,9 +82,6 @@ from .mongo import (
validate_mongo_connection,
OpenPypeMongoConnection
)
from .anatomy import (
Anatomy
)
from .dateutils import (
get_datetime_data,
@ -119,36 +116,19 @@ from .transcoding import (
)
from .avalon_context import (
CURRENT_DOC_SCHEMAS,
PROJECT_NAME_ALLOWED_SYMBOLS,
PROJECT_NAME_REGEX,
create_project,
is_latest,
any_outdated,
get_asset,
get_linked_assets,
get_latest_version,
get_system_general_anatomy_data,
get_workfile_template_key,
get_workfile_template_key_from_context,
get_workdir_data,
get_workdir,
get_workdir_with_workdir_data,
get_last_workfile_with_version,
get_last_workfile,
create_workfile_doc,
save_workfile_data_to_doc,
get_workfile_doc,
BuildWorkfile,
get_creator_by_name,
get_custom_workfile_template,
change_timer_to_current_context,
get_custom_workfile_template_by_context,
get_custom_workfile_template_by_string_context,
get_custom_workfile_template
@ -186,8 +166,6 @@ from .plugin_tools import (
get_subset_name,
get_subset_name_with_asset_doc,
prepare_template_data,
filter_pyblish_plugins,
set_plugin_attributes_from_settings,
source_hash,
)
@ -278,34 +256,17 @@ __all__ = [
"convert_ffprobe_fps_to_float",
"CURRENT_DOC_SCHEMAS",
"PROJECT_NAME_ALLOWED_SYMBOLS",
"PROJECT_NAME_REGEX",
"create_project",
"is_latest",
"any_outdated",
"get_asset",
"get_linked_assets",
"get_latest_version",
"get_system_general_anatomy_data",
"get_workfile_template_key",
"get_workfile_template_key_from_context",
"get_workdir_data",
"get_workdir",
"get_workdir_with_workdir_data",
"get_last_workfile_with_version",
"get_last_workfile",
"create_workfile_doc",
"save_workfile_data_to_doc",
"get_workfile_doc",
"BuildWorkfile",
"get_creator_by_name",
"change_timer_to_current_context",
"get_custom_workfile_template_by_context",
"get_custom_workfile_template_by_string_context",
"get_custom_workfile_template",
@ -338,8 +299,6 @@ __all__ = [
"TaskNotSetError",
"get_subset_name",
"get_subset_name_with_asset_doc",
"filter_pyblish_plugins",
"set_plugin_attributes_from_settings",
"source_hash",
"format_file_size",
@ -358,8 +317,6 @@ __all__ = [
"terminal",
"Anatomy",
"get_datetime_data",
"get_formatted_current_time",

View file

@ -1,38 +0,0 @@
"""Code related to project Anatomy was moved
to 'openpype.pipeline.anatomy' please change your imports as soon as
possible. File will be probably removed in OpenPype 3.14.*
"""
import warnings
import functools
class AnatomyDeprecatedWarning(DeprecationWarning):
pass
def anatomy_deprecated(func):
"""Mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", AnatomyDeprecatedWarning)
warnings.warn(
(
"Deprecated import of 'Anatomy'."
" Class was moved to 'openpype.pipeline.anatomy'."
" Please change your imports of Anatomy in codebase."
),
category=AnatomyDeprecatedWarning
)
return func(*args, **kwargs)
return new_func
@anatomy_deprecated
def Anatomy(*args, **kwargs):
from openpype.pipeline.anatomy import Anatomy
return Anatomy(*args, **kwargs)

View file

@ -1,6 +1,5 @@
"""Should be used only inside of hosts."""
import os
import copy
import platform
import logging
import functools
@ -10,17 +9,12 @@ import six
from openpype.client import (
get_project,
get_assets,
get_asset_by_name,
get_last_version_by_subset_name,
get_workfile_info,
)
from openpype.client.operations import (
CURRENT_ASSET_DOC_SCHEMA,
CURRENT_PROJECT_SCHEMA,
CURRENT_PROJECT_CONFIG_SCHEMA,
PROJECT_NAME_ALLOWED_SYMBOLS,
PROJECT_NAME_REGEX,
)
from .profiles_filtering import filter_profiles
from .path_templates import StringTemplate
@ -128,70 +122,6 @@ def with_pipeline_io(func):
return wrapped
@deprecated("openpype.pipeline.context_tools.is_representation_from_latest")
def is_latest(representation):
"""Return whether the representation is from latest version
Args:
representation (dict): The representation document from the database.
Returns:
bool: Whether the representation is of latest version.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.context_tools import is_representation_from_latest
return is_representation_from_latest(representation)
@deprecated("openpype.pipeline.load.any_outdated_containers")
def any_outdated():
"""Return whether the current scene has any outdated content.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.load import any_outdated_containers
return any_outdated_containers()
@deprecated("openpype.pipeline.context_tools.get_current_project_asset")
def get_asset(asset_name=None):
""" Returning asset document from database by its name.
Doesn't count with duplicities on asset names!
Args:
asset_name (str)
Returns:
(MongoDB document)
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.context_tools import get_current_project_asset
return get_current_project_asset(asset_name=asset_name)
@deprecated("openpype.pipeline.template_data.get_general_template_data")
def get_system_general_anatomy_data(system_settings=None):
"""
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.template_data import get_general_template_data
return get_general_template_data(system_settings)
@deprecated("openpype.client.get_linked_asset_ids")
def get_linked_asset_ids(asset_doc):
"""Return linked asset ids for `asset_doc` from DB
@ -214,66 +144,6 @@ def get_linked_asset_ids(asset_doc):
return get_linked_asset_ids(project_name, asset_doc=asset_doc)
@deprecated("openpype.client.get_linked_assets")
def get_linked_assets(asset_doc):
"""Return linked assets for `asset_doc` from DB
Args:
asset_doc (dict): Asset document from DB
Returns:
(list) Asset documents of input links for passed asset doc.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline import legacy_io
from openpype.client import get_linked_assets
project_name = legacy_io.active_project()
return get_linked_assets(project_name, asset_doc=asset_doc)
@deprecated("openpype.client.get_last_version_by_subset_name")
def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
"""Retrieve latest version from `asset_name`, and `subset_name`.
Do not use if you want to query more than 5 latest versions as this method
query 3 times to mongo for each call. For those cases is better to use
more efficient way, e.g. with help of aggregations.
Args:
asset_name (str): Name of asset.
subset_name (str): Name of subset.
dbcon (AvalonMongoDB, optional): Avalon Mongo connection with Session.
project_name (str, optional): Find latest version in specific project.
Returns:
None: If asset, subset or version were not found.
dict: Last version document for entered.
Deprecated:
Function will be removed after release version 3.15.*
"""
if not project_name:
if not dbcon:
from openpype.pipeline import legacy_io
log.debug("Using `legacy_io` for query.")
dbcon = legacy_io
# Make sure is installed
dbcon.install()
project_name = dbcon.active_project()
return get_last_version_by_subset_name(
project_name, subset_name, asset_name=asset_name
)
@deprecated(
"openpype.pipeline.workfile.get_workfile_template_key_from_context")
def get_workfile_template_key_from_context(
@ -361,142 +231,6 @@ def get_workfile_template_key(
)
@deprecated("openpype.pipeline.template_data.get_template_data")
def get_workdir_data(project_doc, asset_doc, task_name, host_name):
"""Prepare data for workdir template filling from entered information.
Args:
project_doc (dict): Mongo document of project from MongoDB.
asset_doc (dict): Mongo document of asset from MongoDB.
task_name (str): Task name for which are workdir data preapred.
host_name (str): Host which is used to workdir. This is required
because workdir template may contain `{app}` key.
Returns:
dict: Data prepared for filling workdir template.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.template_data import get_template_data
return get_template_data(
project_doc, asset_doc, task_name, host_name
)
@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data")
def get_workdir_with_workdir_data(
workdir_data, anatomy=None, project_name=None, template_key=None
):
"""Fill workdir path from entered data and project's anatomy.
It is possible to pass only project's name instead of project's anatomy but
one of them **must** be entered. It is preferred to enter anatomy if is
available as initialization of a new Anatomy object may be time consuming.
Args:
workdir_data (dict): Data to fill workdir template.
anatomy (Anatomy): Anatomy object for specific project. Optional if
`project_name` is entered.
project_name (str): Project's name. Optional if `anatomy` is entered
otherwise Anatomy object is created with using the project name.
template_key (str): Key of work templates in anatomy templates. If not
passed `get_workfile_template_key_from_context` is used to get it.
dbcon(AvalonMongoDB): Mongo connection. Required only if 'template_key'
and 'project_name' are not passed.
Returns:
TemplateResult: Workdir path.
Raises:
ValueError: When both `anatomy` and `project_name` are set to None.
Deprecated:
Function will be removed after release version 3.15.*
"""
if not anatomy and not project_name:
raise ValueError((
"Missing required arguments one of `project_name` or `anatomy`"
" must be entered."
))
if not project_name:
project_name = anatomy.project_name
from openpype.pipeline.workfile import get_workdir_with_workdir_data
return get_workdir_with_workdir_data(
workdir_data, project_name, anatomy, template_key
)
@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data")
def get_workdir(
project_doc,
asset_doc,
task_name,
host_name,
anatomy=None,
template_key=None
):
"""Fill workdir path from entered data and project's anatomy.
Args:
project_doc (dict): Mongo document of project from MongoDB.
asset_doc (dict): Mongo document of asset from MongoDB.
task_name (str): Task name for which are workdir data preapred.
host_name (str): Host which is used to workdir. This is required
because workdir template may contain `{app}` key. In `Session`
is stored under `AVALON_APP` key.
anatomy (Anatomy): Optional argument. Anatomy object is created using
project name from `project_doc`. It is preferred to pass this
argument as initialization of a new Anatomy object may be time
consuming.
template_key (str): Key of work templates in anatomy templates. Default
value is defined in `get_workdir_with_workdir_data`.
Returns:
TemplateResult: Workdir path.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.workfile import get_workdir
# Output is TemplateResult object which contain useful data
return get_workdir(
project_doc,
asset_doc,
task_name,
host_name,
anatomy,
template_key
)
@deprecated("openpype.pipeline.context_tools.get_template_data_from_session")
def template_data_from_session(session=None):
""" Return dictionary with template from session keys.
Args:
session (dict, Optional): The Session to use. If not provided use the
currently active global Session.
Returns:
dict: All available data from session.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.context_tools import get_template_data_from_session
return get_template_data_from_session(session)
@deprecated("openpype.pipeline.context_tools.compute_session_changes")
def compute_session_changes(
session, task=None, asset=None, app=None, template_key=None
@ -588,133 +322,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None):
return change_current_context(asset, task, template_key)
@deprecated("openpype.client.get_workfile_info")
def get_workfile_doc(asset_id, task_name, filename, dbcon=None):
"""Return workfile document for entered context.
Do not use this method to get more than one document. In that cases use
custom query as this will return documents from database one by one.
Args:
asset_id (ObjectId): Mongo ID of an asset under which workfile belongs.
task_name (str): Name of task under which the workfile belongs.
filename (str): Name of a workfile.
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
`legacy_io` is used if not entered.
Returns:
dict: Workfile document or None.
Deprecated:
Function will be removed after release version 3.15.*
"""
# Use legacy_io if dbcon is not entered
if not dbcon:
from openpype.pipeline import legacy_io
dbcon = legacy_io
project_name = dbcon.active_project()
return get_workfile_info(project_name, asset_id, task_name, filename)
@deprecated
def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
"""Creates or replace workfile document in mongo.
Do not use this method to update data. This method will remove all
additional data from existing document.
Args:
asset_doc (dict): Document of asset under which workfile belongs.
task_name (str): Name of task for which is workfile related to.
filename (str): Filename of workfile.
workdir (str): Path to directory where `filename` is located.
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
`legacy_io` is used if not entered.
"""
from openpype.pipeline import Anatomy
from openpype.pipeline.template_data import get_template_data
# Use legacy_io if dbcon is not entered
if not dbcon:
from openpype.pipeline import legacy_io
dbcon = legacy_io
# Filter of workfile document
doc_filter = {
"type": "workfile",
"parent": asset_doc["_id"],
"task_name": task_name,
"filename": filename
}
# Document data are copy of filter
doc_data = copy.deepcopy(doc_filter)
# Prepare project for workdir data
project_name = dbcon.active_project()
project_doc = get_project(project_name)
workdir_data = get_template_data(
project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"]
)
# Prepare anatomy
anatomy = Anatomy(project_name)
# Get workdir path (result is anatomy.TemplateResult)
template_workdir = get_workdir_with_workdir_data(
workdir_data, anatomy
)
template_workdir_path = str(template_workdir).replace("\\", "/")
# Replace slashses in workdir path where workfile is located
mod_workdir = workdir.replace("\\", "/")
# Replace workdir from templates with rootless workdir
rootles_workdir = mod_workdir.replace(
template_workdir_path,
template_workdir.rootless.replace("\\", "/")
)
doc_data["schema"] = "pype:workfile-1.0"
doc_data["files"] = ["/".join([rootles_workdir, filename])]
doc_data["data"] = {}
dbcon.replace_one(
doc_filter,
doc_data,
upsert=True
)
@deprecated
def save_workfile_data_to_doc(workfile_doc, data, dbcon=None):
if not workfile_doc:
# TODO add log message
return
if not data:
return
# Use legacy_io if dbcon is not entered
if not dbcon:
from openpype.pipeline import legacy_io
dbcon = legacy_io
# Convert data to mongo modification keys/values
# - this is naive implementation which does not expect nested
# dictionaries
set_data = {}
for key, value in data.items():
new_key = "data.{}".format(key)
set_data[new_key] = value
# Update workfile document with data
dbcon.update_one(
{"_id": workfile_doc["_id"]},
{"$set": set_data}
)
@deprecated("openpype.pipeline.workfile.BuildWorkfile")
def BuildWorkfile():
"""Build workfile class was moved to workfile pipeline.
@ -747,38 +354,6 @@ def get_creator_by_name(creator_name, case_sensitive=False):
return get_legacy_creator_by_name(creator_name, case_sensitive)
@deprecated
def change_timer_to_current_context():
"""Called after context change to change timers.
Deprecated:
This method is specific for TimersManager module so please use the
functionality from there. Function will be removed after release
version 3.15.*
"""
from openpype.pipeline import legacy_io
webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
if not webserver_url:
log.warning("Couldn't find webserver url")
return
rest_api_url = "{}/timers_manager/start_timer".format(webserver_url)
try:
import requests
except Exception:
log.warning("Couldn't start timer")
return
data = {
"project_name": legacy_io.Session["AVALON_PROJECT"],
"asset_name": legacy_io.Session["AVALON_ASSET"],
"task_name": legacy_io.Session["AVALON_TASK"]
}
requests.post(rest_api_url, json=data)
def _get_task_context_data_for_anatomy(
project_doc, asset_doc, task_name, anatomy=None
):
@ -800,6 +375,8 @@ def _get_task_context_data_for_anatomy(
dict: With Anatomy context data.
"""
from openpype.pipeline.template_data import get_general_template_data
if anatomy is None:
from openpype.pipeline import Anatomy
anatomy = Anatomy(project_doc["name"])
@ -840,7 +417,7 @@ def _get_task_context_data_for_anatomy(
}
}
system_general_data = get_system_general_anatomy_data()
system_general_data = get_general_template_data()
data.update(system_general_data)
return data

View file

@ -8,7 +8,6 @@ import warnings
import functools
from openpype.client import get_asset_by_id
from openpype.settings import get_project_settings
log = logging.getLogger(__name__)
@ -101,8 +100,6 @@ def get_subset_name_with_asset_doc(
is not passed.
dynamic_data (dict): Dynamic data specific for a creator which creates
instance.
dbcon (AvalonMongoDB): Mongo connection to be able query asset document
if 'asset_doc' is not passed.
"""
from openpype.pipeline.create import get_subset_name
@ -202,122 +199,6 @@ def prepare_template_data(fill_pairs):
return fill_data
@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins")
def filter_pyblish_plugins(plugins):
"""Filter pyblish plugins by presets.
This servers as plugin filter / modifier for pyblish. It will load plugin
definitions from presets and filter those needed to be excluded.
Args:
plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base`
`discover()` method.
Deprecated:
Function will be removed after release version 3.15.*
"""
from openpype.pipeline.publish.lib import filter_pyblish_plugins
filter_pyblish_plugins(plugins)
@deprecated
def set_plugin_attributes_from_settings(
plugins, superclass, host_name=None, project_name=None
):
"""Change attribute values on Avalon plugins by project settings.
This function should be used only in host context. Modify
behavior of plugins.
Args:
plugins (list): Plugins discovered by origin avalon discover method.
superclass (object): Superclass of plugin type (e.g. Cretor, Loader).
host_name (str): Name of host for which plugins are loaded and from.
Value from environment `AVALON_APP` is used if not entered.
project_name (str): Name of project for which settings will be loaded.
Value from environment `AVALON_PROJECT` is used if not entered.
Deprecated:
Function will be removed after release version 3.15.*
"""
# Function is not used anymore
from openpype.pipeline import LegacyCreator, LoaderPlugin
# determine host application to use for finding presets
if host_name is None:
host_name = os.environ.get("AVALON_APP")
if project_name is None:
project_name = os.environ.get("AVALON_PROJECT")
# map plugin superclass to preset json. Currently supported is load and
# create (LoaderPlugin and LegacyCreator)
plugin_type = None
if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin):
plugin_type = "load"
elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator):
plugin_type = "create"
if not host_name or not project_name or plugin_type is None:
msg = "Skipped attributes override from settings."
if not host_name:
msg += " Host name is not defined."
if not project_name:
msg += " Project name is not defined."
if plugin_type is None:
msg += " Plugin type is unsupported for class {}.".format(
superclass.__name__
)
print(msg)
return
print(">>> Finding presets for {}:{} ...".format(host_name, plugin_type))
project_settings = get_project_settings(project_name)
plugin_type_settings = (
project_settings
.get(host_name, {})
.get(plugin_type, {})
)
global_type_settings = (
project_settings
.get("global", {})
.get(plugin_type, {})
)
if not global_type_settings and not plugin_type_settings:
return
for plugin in plugins:
plugin_name = plugin.__name__
plugin_settings = None
# Look for plugin settings in host specific settings
if plugin_name in plugin_type_settings:
plugin_settings = plugin_type_settings[plugin_name]
# Look for plugin settings in global settings
elif plugin_name in global_type_settings:
plugin_settings = global_type_settings[plugin_name]
if not plugin_settings:
continue
print(">>> We have preset for {}".format(plugin_name))
for option, value in plugin_settings.items():
if option == "enabled" and value is False:
setattr(plugin, "active", False)
print(" - is disabled by preset")
else:
setattr(plugin, option, value)
print(" - setting `{}`: `{}`".format(option, value))
def source_hash(filepath, *args):
"""Generate simple identifier for a source file.
This is used to identify whether a source file has previously been

View file

@ -12,6 +12,7 @@ from openpype.pipeline import legacy_io
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
@attr.s
@ -87,9 +88,13 @@ class AfterEffectsSubmitDeadline(
"AVALON_APP_NAME",
"OPENPYPE_DEV",
"OPENPYPE_LOG_NO_COLORS",
"OPENPYPE_VERSION",
"IS_TEST"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")

View file

@ -14,6 +14,7 @@ from openpype.pipeline import legacy_io
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
class _ZipFile(ZipFile):
@ -279,10 +280,14 @@ class HarmonySubmitDeadline(
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV",
"OPENPYPE_LOG_NO_COLORS",
"OPENPYPE_VERSION",
"OPENPYPE_LOG_NO_COLORS"
"IS_TEST"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")

View file

@ -9,6 +9,7 @@ import pyblish.api
from openpype.pipeline import legacy_io
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin):
@ -133,9 +134,13 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin):
# Submit along the current Avalon tool setup that we launched
# this application with so the Render Slave can build its own
# similar environment using it, e.g. "houdini17.5;pluginx2.3"
"AVALON_TOOLS",
"OPENPYPE_VERSION"
"AVALON_TOOLS"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")

View file

@ -10,6 +10,7 @@ import pyblish.api
from openpype.pipeline import legacy_io
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
@ -105,9 +106,13 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
# Submit along the current Avalon tool setup that we launched
# this application with so the Render Slave can build its own
# similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9"
"AVALON_TOOLS",
"OPENPYPE_VERSION"
"AVALON_TOOLS"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")

View file

@ -0,0 +1,218 @@
import os
import getpass
import copy
import attr
from openpype.pipeline import legacy_io
from openpype.settings import get_project_settings
from openpype.hosts.max.api.lib import (
get_current_renderer,
get_multipass_setting
)
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@attr.s
class MaxPluginInfo(object):
SceneFile = attr.ib(default=None) # Input
Version = attr.ib(default=None) # Mandatory for Deadline
SaveFile = attr.ib(default=True)
IgnoreInputs = attr.ib(default=True)
class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
label = "Submit Render to Deadline"
hosts = ["max"]
families = ["maxrender"]
targets = ["local"]
use_published = True
priority = 50
tile_priority = 50
chunk_size = 1
jobInfo = {}
pluginInfo = {}
group = None
deadline_pool = None
deadline_pool_secondary = None
framePerTask = 1
def get_job_info(self):
job_info = DeadlineJobInfo(Plugin="3dsmax")
# todo: test whether this works for existing production cases
# where custom jobInfo was stored in the project settings
job_info.update(self.jobInfo)
instance = self._instance
context = instance.context
# Always use the original work file name for the Job name even when
# rendering is done from the published Work File. The original work
# file name is clearer because it can also have subversion strings,
# etc. which are stripped for the published file.
src_filepath = context.data["currentFile"]
src_filename = os.path.basename(src_filepath)
job_info.Name = "%s - %s" % (src_filename, instance.name)
job_info.BatchName = src_filename
job_info.Plugin = instance.data["plugin"]
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
# Deadline requires integers in frame range
frames = "{start}-{end}".format(
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
)
job_info.Frames = frames
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
job_info.ChunkSize = instance.data.get("chunkSize", 1)
job_info.Comment = context.data.get("comment")
job_info.Priority = instance.data.get("priority", self.priority)
job_info.FramesPerTask = instance.data.get("framesPerTask", 1)
if self.group:
job_info.Group = self.group
# Add options from RenderGlobals
render_globals = instance.data.get("renderGlobals", {})
job_info.update(render_globals)
keys = [
"FTRACK_API_KEY",
"FTRACK_API_USER",
"FTRACK_SERVER",
"OPENPYPE_SG_USER",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV",
"OPENPYPE_VERSION",
"IS_TEST"
]
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)
for key in keys:
value = environment.get(key)
if not value:
continue
job_info.EnvironmentKeyValue[key] = value
# to recognize job from PYPE for turning Event On/Off
job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1"
job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1"
# Add list of expected files to job
# ---------------------------------
exp = instance.data.get("expectedFiles")
for filepath in exp:
job_info.OutputDirectory += os.path.dirname(filepath)
job_info.OutputFilename += os.path.basename(filepath)
return job_info
def get_plugin_info(self):
instance = self._instance
plugin_info = MaxPluginInfo(
SceneFile=self.scene_path,
Version=instance.data["maxversion"],
SaveFile=True,
IgnoreInputs=True
)
plugin_payload = attr.asdict(plugin_info)
# Patching with pluginInfo from settings
for key, value in self.pluginInfo.items():
plugin_payload[key] = value
return plugin_payload
def process_submission(self):
instance = self._instance
filepath = self.scene_path
expected_files = instance.data["expectedFiles"]
if not expected_files:
raise RuntimeError("No Render Elements found!")
output_dir = os.path.dirname(expected_files[0])
instance.data["outputDir"] = output_dir
instance.data["toBeRenderedOn"] = "deadline"
filename = os.path.basename(filepath)
payload_data = {
"filename": filename,
"dirname": output_dir
}
self.log.debug("Submitting 3dsMax render..")
payload = self._use_published_name(payload_data)
job_info, plugin_info = payload
self.submit(self.assemble_payload(job_info, plugin_info))
def _use_published_name(self, data):
instance = self._instance
job_info = copy.deepcopy(self.job_info)
plugin_info = copy.deepcopy(self.plugin_info)
plugin_data = {}
project_setting = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
multipass = get_multipass_setting(project_setting)
if multipass:
plugin_data["DisableMultipass"] = 0
else:
plugin_data["DisableMultipass"] = 1
expected_files = instance.data.get("expectedFiles")
if not expected_files:
raise RuntimeError("No render elements found")
old_output_dir = os.path.dirname(expected_files[0])
output_beauty = RenderSettings().get_render_output(instance.name,
old_output_dir)
filepath = self.from_published_scene()
def _clean_name(path):
return os.path.splitext(os.path.basename(path))[0]
new_scene = _clean_name(filepath)
orig_scene = _clean_name(instance.context.data["currentFile"])
output_beauty = output_beauty.replace(orig_scene, new_scene)
output_beauty = output_beauty.replace("\\", "/")
plugin_data["RenderOutput"] = output_beauty
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
if renderer in [
"ART_Renderer",
"Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
render_elem_list = RenderSettings().get_render_element()
for i, element in enumerate(render_elem_list):
element = element.replace(orig_scene, new_scene)
plugin_data["RenderElementOutputFilename%d" % i] = element # noqa
self.log.debug("plugin data:{}".format(plugin_data))
plugin_info.update(plugin_data)
return job_info, plugin_info

View file

@ -38,6 +38,7 @@ from openpype.hosts.maya.api.lib import get_attr_in_layer
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
def _validate_deadline_bool_value(instance, attribute, value):
@ -165,10 +166,14 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV",
"OPENPYPE_VERSION",
"OPENPYPE_DEV"
"IS_TEST"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")

View file

@ -7,6 +7,7 @@ from maya import cmds
from openpype.pipeline import legacy_io, PublishXmlValidationError
from openpype.settings import get_project_settings
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
import pyblish.api
@ -104,9 +105,13 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin):
keys = [
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"OPENPYPE_VERSION"
"FTRACK_SERVER"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)

View file

@ -10,6 +10,7 @@ import pyblish.api
import nuke
from openpype.pipeline import legacy_io
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
class NukeSubmitDeadline(pyblish.api.InstancePlugin):
@ -265,9 +266,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"PYBLISHPLUGINPATH",
"NUKE_PATH",
"TOOL_ENV",
"FOUNDRY_LICENSE",
"OPENPYPE_VERSION"
"FOUNDRY_LICENSE"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
if instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")

View file

@ -20,6 +20,7 @@ from openpype.pipeline import (
)
from openpype.tests.lib import is_in_tests
from openpype.pipeline.farm.patterning import match_aov_pattern
from openpype.lib import is_running_from_build
def get_resources(project_name, version, extension=None):
@ -117,15 +118,17 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
deadline_plugin = "OpenPype"
targets = ["local"]
hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"]
hosts = ["fusion", "max", "maya", "nuke",
"celaction", "aftereffects", "harmony"]
families = ["render.farm", "prerender.farm",
"renderlayer", "imagesequence", "vrayscene"]
"renderlayer", "imagesequence", "maxrender", "vrayscene"]
aov_filter = {"maya": [r".*([Bb]eauty).*"],
"aftereffects": [r".*"], # for everything from AE
"harmony": [r".*"], # for everything from AE
"celaction": [r".*"]}
"celaction": [r".*"],
"max": [r".*"]}
environ_job_filter = [
"OPENPYPE_METADATA_FILE"
@ -136,10 +139,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"FTRACK_API_KEY",
"FTRACK_SERVER",
"AVALON_APP_NAME",
"OPENPYPE_USERNAME",
"OPENPYPE_VERSION"
"OPENPYPE_USERNAME"
]
# Add OpenPype version if we are running from build.
if is_running_from_build():
environ_keys.append("OPENPYPE_VERSION")
# custom deadline attributes
deadline_department = ""
deadline_pool = ""
@ -292,8 +298,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"Group": self.deadline_group,
"Pool": instance.data.get("primaryPool"),
"SecondaryPool": instance.data.get("secondaryPool"),
"OutputDirectory0": output_dir
# ensure the outputdirectory with correct slashes
"OutputDirectory0": output_dir.replace("\\", "/")
},
"PluginInfo": {
"Version": self.plugin_pype_version,
@ -514,6 +520,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# toggle preview on if multipart is on
if instance_data.get("multipartExr"):
self.log.debug("Adding preview tag because its multipartExr")
preview = True
self.log.debug("preview:{}".format(preview))
new_instance = deepcopy(instance_data)
@ -593,6 +600,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
if instance["useSequenceForReview"]:
# toggle preview on if multipart is on
if instance.get("multipartExr", False):
self.log.debug(
"Adding preview tag because its multipartExr"
)
preview = True
else:
render_file_name = list(collection)[0]
@ -700,8 +710,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
if preview:
if "ftrack" not in families:
if os.environ.get("FTRACK_SERVER"):
self.log.debug(
"Adding \"ftrack\" to families because of preview tag."
)
families.append("ftrack")
if "review" not in families:
self.log.debug(
"Adding \"review\" to families because of preview tag."
)
families.append("review")
instance["families"] = families
@ -960,6 +976,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
'''
render_job = None
submission_type = ""
if instance.data.get("toBeRenderedOn") == "deadline":
render_job = data.pop("deadlineSubmissionJob", None)
submission_type = "deadline"

View file

@ -201,19 +201,21 @@ def get_openpype_versions(dir_list):
print(">>> Getting OpenPype executable ...")
openpype_versions = []
install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
if install_dir:
print("--- Looking for OpenPype at: {}".format(install_dir))
sub_dirs = [
f.path for f in os.scandir(install_dir)
if f.is_dir()
]
for subdir in sub_dirs:
version = get_openpype_version_from_path(subdir)
if not version:
continue
print(" - found: {} - {}".format(version, subdir))
openpype_versions.append((version, subdir))
# special case of multiple install dirs
for dir_list in dir_list.split(","):
install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
if install_dir:
print("--- Looking for OpenPype at: {}".format(install_dir))
sub_dirs = [
f.path for f in os.scandir(install_dir)
if f.is_dir()
]
for subdir in sub_dirs:
version = get_openpype_version_from_path(subdir)
if not version:
continue
print(" - found: {} - {}".format(version, subdir))
openpype_versions.append((version, subdir))
return openpype_versions

View file

@ -107,20 +107,23 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin):
"Scanning for compatible requested "
f"version {requested_version}"))
dir_list = self.GetConfigEntry("OpenPypeInstallationDirs")
# clean '\ ' for MacOS pasting
if platform.system().lower() == "darwin":
dir_list = dir_list.replace("\\ ", " ")
install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
if install_dir:
sub_dirs = [
f.path for f in os.scandir(install_dir)
if f.is_dir()
]
for subdir in sub_dirs:
version = self.get_openpype_version_from_path(subdir)
if not version:
continue
openpype_versions.append((version, subdir))
for dir_list in dir_list.split(","):
install_dir = DirectoryUtils.SearchDirectoryList(dir_list)
if install_dir:
sub_dirs = [
f.path for f in os.scandir(install_dir)
if f.is_dir()
]
for subdir in sub_dirs:
version = self.get_openpype_version_from_path(subdir)
if not version:
continue
openpype_versions.append((version, subdir))
exe_list = self.GetConfigEntry("OpenPypeExecutable")
# clean '\ ' for MacOS pasting

View file

@ -3,6 +3,7 @@ import json
import copy
import pyblish.api
from openpype.pipeline.publish import get_publish_repre_path
from openpype.lib.openpype_version import get_openpype_version
from openpype.lib.transcoding import (
get_ffprobe_streams,
@ -55,6 +56,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"reference": "reference"
}
keep_first_subset_name_for_review = True
upload_reviewable_with_origin_name = False
asset_versions_status_profiles = []
additional_metadata_keys = []
@ -153,7 +155,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
if not review_representations or has_movie_review:
for repre in thumbnail_representations:
repre_path = self._get_repre_path(instance, repre, False)
repre_path = get_publish_repre_path(instance, repre, False)
if not repre_path:
self.log.warning(
"Published path is not set and source was removed."
@ -210,7 +212,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"from {}".format(repre))
continue
repre_path = self._get_repre_path(instance, repre, False)
repre_path = get_publish_repre_path(instance, repre, False)
if not repre_path:
self.log.warning(
"Published path is not set and source was removed."
@ -293,6 +295,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
# Add item to component list
component_list.append(review_item)
if self.upload_reviewable_with_origin_name:
origin_name_component = copy.deepcopy(review_item)
filename = os.path.basename(repre_path)
origin_name_component["component_data"]["name"] = (
os.path.splitext(filename)[0]
)
component_list.append(origin_name_component)
# Duplicate thumbnail component for all not first reviews
if first_thumbnail_component is not None:
@ -324,7 +333,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
# Add others representations as component
for repre in other_representations:
published_path = self._get_repre_path(instance, repre, True)
published_path = get_publish_repre_path(instance, repre, True)
if not published_path:
continue
# Create copy of base comp item and append it
@ -364,51 +373,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
def _collect_additional_metadata(self, streams):
pass
def _get_repre_path(self, instance, repre, only_published):
"""Get representation path that can be used for integration.
When 'only_published' is set to true the validation of path is not
relevant. In that case we just need what is set in 'published_path'
as "reference". The reference is not used to get or upload the file but
for reference where the file was published.
Args:
instance (pyblish.Instance): Processed instance object. Used
for source of staging dir if representation does not have
filled it.
repre (dict): Representation on instance which could be and
could not be integrated with main integrator.
only_published (bool): Care only about published paths and
ignore if filepath is not existing anymore.
Returns:
str: Path to representation file.
None: Path is not filled or does not exists.
"""
published_path = repre.get("published_path")
if published_path:
published_path = os.path.normpath(published_path)
if os.path.exists(published_path):
return published_path
if only_published:
return published_path
comp_files = repre["files"]
if isinstance(comp_files, (tuple, list, set)):
filename = comp_files[0]
else:
filename = comp_files
staging_dir = repre.get("stagingDir")
if not staging_dir:
staging_dir = instance.data["stagingDir"]
src_path = os.path.normpath(os.path.join(staging_dir, filename))
if os.path.exists(src_path):
return src_path
return None
def _get_asset_version_status_name(self, instance):
if not self.asset_versions_status_profiles:
return None

View file

@ -1,6 +1,8 @@
import os
import pyblish.api
from openpype.pipeline.publish import get_publish_repre_path
class IntegrateShotgridPublish(pyblish.api.InstancePlugin):
"""
@ -22,7 +24,9 @@ class IntegrateShotgridPublish(pyblish.api.InstancePlugin):
for representation in instance.data.get("representations", []):
local_path = representation.get("published_path")
local_path = get_publish_repre_path(
instance, representation, False
)
code = os.path.basename(local_path)
if representation.get("tags", []):

View file

@ -1,6 +1,7 @@
import os
import pyblish.api
from openpype.pipeline.publish import get_publish_repre_path
class IntegrateShotgridVersion(pyblish.api.InstancePlugin):
"""Integrate Shotgrid Version"""
@ -41,8 +42,9 @@ class IntegrateShotgridVersion(pyblish.api.InstancePlugin):
data_to_update["sg_status_list"] = status
for representation in instance.data.get("representations", []):
local_path = representation.get("published_path")
code = os.path.basename(local_path)
local_path = get_publish_repre_path(
instance, representation, False
)
if "shotgridreview" in representation.get("tags", []):

View file

@ -8,6 +8,7 @@ from abc import ABCMeta, abstractmethod
import time
from openpype.client import OpenPypeMongoConnection
from openpype.pipeline.publish import get_publish_repre_path
from openpype.lib.plugin_tools import prepare_template_data
@ -167,9 +168,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
thumbnail_path = None
for repre in instance.data.get("representations", []):
if repre.get('thumbnail') or "thumbnail" in repre.get('tags', []):
repre_thumbnail_path = (
repre.get("published_path") or
os.path.join(repre["stagingDir"], repre["files"])
repre_thumbnail_path = get_publish_repre_path(
instance, repre, False
)
if os.path.exists(repre_thumbnail_path):
thumbnail_path = repre_thumbnail_path
@ -184,9 +184,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
if (repre.get("review")
or "review" in tags
or "burnin" in tags):
repre_review_path = (
repre.get("published_path") or
os.path.join(repre["stagingDir"], repre["files"])
repre_review_path = get_publish_repre_path(
instance, repre, False
)
if os.path.exists(repre_review_path):
review_path = repre_review_path

View file

@ -8,7 +8,10 @@ import inspect
from uuid import uuid4
from contextlib import contextmanager
from openpype.client import get_assets
import pyblish.logic
import pyblish.api
from openpype.client import get_assets, get_asset_by_name
from openpype.settings import (
get_system_settings,
get_project_settings
@ -17,13 +20,11 @@ from openpype.lib.attribute_definitions import (
UnknownDef,
serialize_attr_defs,
deserialize_attr_defs,
get_default_values,
)
from openpype.host import IPublishHost
from openpype.pipeline import legacy_io
from openpype.pipeline.mongodb import (
AvalonMongoDB,
session_data_from_environment,
)
from openpype.pipeline.plugin_discover import DiscoverResult
from .creator_plugins import (
Creator,
@ -1338,8 +1339,6 @@ class CreateContext:
Args:
host(ModuleType): Host implementation which handles implementation and
global metadata.
dbcon(AvalonMongoDB): Connection to mongo with context (at least
project).
headless(bool): Context is created out of UI (Current not used).
reset(bool): Reset context on initialization.
discover_publish_plugins(bool): Discover publish plugins during reset
@ -1347,16 +1346,8 @@ class CreateContext:
"""
def __init__(
self, host, dbcon=None, headless=False, reset=True,
discover_publish_plugins=True
self, host, headless=False, reset=True, discover_publish_plugins=True
):
# Create conncetion if is not passed
if dbcon is None:
session = session_data_from_environment(True)
dbcon = AvalonMongoDB(session)
dbcon.install()
self.dbcon = dbcon
self.host = host
# Prepare attribute for logger (Created on demand in `log` property)
@ -1380,6 +1371,10 @@ class CreateContext:
" Missing methods: {}"
).format(joined_methods))
self._current_project_name = None
self._current_asset_name = None
self._current_task_name = None
self._host_is_valid = host_is_valid
# Currently unused variable
self.headless = headless
@ -1387,6 +1382,8 @@ class CreateContext:
# Instances by their ID
self._instances_by_id = {}
self.creator_discover_result = None
self.convertor_discover_result = None
# Discovered creators
self.creators = {}
# Prepare categories of creators
@ -1499,11 +1496,20 @@ class CreateContext:
@property
def host_name(self):
if hasattr(self.host, "name"):
return self.host.name
return os.environ["AVALON_APP"]
@property
def project_name(self):
return self.dbcon.active_project()
def get_current_project_name(self):
return self._current_project_name
def get_current_asset_name(self):
return self._current_asset_name
def get_current_task_name(self):
return self._current_task_name
project_name = property(get_current_project_name)
@property
def log(self):
@ -1520,7 +1526,7 @@ class CreateContext:
self.reset_preparation()
self.reset_avalon_context()
self.reset_current_context()
self.reset_plugins(discover_publish_plugins)
self.reset_context_data()
@ -1567,14 +1573,22 @@ class CreateContext:
self._collection_shared_data = None
self.refresh_thumbnails()
def reset_avalon_context(self):
"""Give ability to reset avalon context.
def reset_current_context(self):
"""Refresh current context.
Reset is based on optional host implementation of `get_current_context`
function or using `legacy_io.Session`.
Some hosts have ability to change context file without using workfiles
tool but that change is not propagated to
tool but that change is not propagated to 'legacy_io.Session'
nor 'os.environ'.
Todos:
UI: Current context should be also checked on save - compare
initial values vs. current values.
Related to UI checks: Current workfile can be also considered
as current context information as that's where the metadata
are stored. We should store the workfile (if is available) too.
"""
project_name = asset_name = task_name = None
@ -1592,12 +1606,9 @@ class CreateContext:
if not task_name:
task_name = legacy_io.Session.get("AVALON_TASK")
if project_name:
self.dbcon.Session["AVALON_PROJECT"] = project_name
if asset_name:
self.dbcon.Session["AVALON_ASSET"] = asset_name
if task_name:
self.dbcon.Session["AVALON_TASK"] = task_name
self._current_project_name = project_name
self._current_asset_name = asset_name
self._current_task_name = task_name
def reset_plugins(self, discover_publish_plugins=True):
"""Reload plugins.
@ -1611,18 +1622,15 @@ class CreateContext:
self._reset_convertor_plugins()
def _reset_publish_plugins(self, discover_publish_plugins):
import pyblish.logic
from openpype.pipeline import OpenPypePyblishPluginMixin
from openpype.pipeline.publish import (
publish_plugins_discover,
DiscoverResult
publish_plugins_discover
)
# Reset publish plugins
self._attr_plugins_by_family = {}
discover_result = DiscoverResult()
discover_result = DiscoverResult(pyblish.api.Plugin)
plugins_with_defs = []
plugins_by_targets = []
plugins_mismatch_targets = []
@ -1661,7 +1669,9 @@ class CreateContext:
creators = {}
autocreators = {}
manual_creators = {}
for creator_class in discover_creator_plugins():
report = discover_creator_plugins(return_report=True)
self.creator_discover_result = report
for creator_class in report.plugins:
if inspect.isabstract(creator_class):
self.log.info(
"Skipping abstract Creator {}".format(str(creator_class))
@ -1706,7 +1716,9 @@ class CreateContext:
def _reset_convertor_plugins(self):
convertors_plugins = {}
for convertor_class in discover_convertor_plugins():
report = discover_convertor_plugins(return_report=True)
self.convertor_discover_result = report
for convertor_class in report.plugins:
if inspect.isabstract(convertor_class):
self.log.info(
"Skipping abstract Creator {}".format(str(convertor_class))
@ -1792,40 +1804,128 @@ class CreateContext:
with self.bulk_instances_collection():
self._bulk_instances_to_process.append(instance)
def create(self, identifier, *args, **kwargs):
"""Wrapper for creators to trigger created.
def _get_creator_in_create(self, identifier):
"""Creator by identifier with unified error.
Different types of creators may expect different arguments thus the
hints for args are blind.
Helper method to get creator by identifier with same error when creator
is not available.
Args:
identifier (str): Creator's identifier.
*args (Tuple[Any]): Arguments for create method.
**kwargs (Dict[Any, Any]): Keyword argument for create method.
identifier (str): Identifier of creator plugin.
Returns:
BaseCreator: Creator found by identifier.
Raises:
CreatorError: When identifier is not known.
"""
error_message = "Failed to run Creator with identifier \"{}\". {}"
creator = self.creators.get(identifier)
label = getattr(creator, "label", None)
failed = False
add_traceback = False
exc_info = None
try:
# Fake CreatorError (Could be maybe specific exception?)
if creator is None:
# Fake CreatorError (Could be maybe specific exception?)
if creator is None:
raise CreatorError(
"Creator {} was not found".format(identifier)
)
return creator
def create(
self,
creator_identifier,
variant,
asset_doc=None,
task_name=None,
pre_create_data=None
):
"""Trigger create of plugins with standartized arguments.
Arguments 'asset_doc' and 'task_name' use current context as default
values. If only 'task_name' is provided it will be overriden by
task name from current context. If 'task_name' is not provided
when 'asset_doc' is, it is considered that task name is not specified,
which can lead to error if subset name template requires task name.
Args:
creator_identifier (str): Identifier of creator plugin.
variant (str): Variant used for subset name.
asset_doc (Dict[str, Any]): Asset document which define context of
creation (possible context of created instance/s).
task_name (str): Name of task to which is context related.
pre_create_data (Dict[str, Any]): Pre-create attribute values.
Returns:
Any: Output of triggered creator's 'create' method.
Raises:
CreatorError: If creator was not found or asset is empty.
"""
creator = self._get_creator_in_create(creator_identifier)
project_name = self.project_name
if asset_doc is None:
asset_name = self.get_current_asset_name()
asset_doc = get_asset_by_name(project_name, asset_name)
task_name = self.get_current_task_name()
if asset_doc is None:
raise CreatorError(
"Creator {} was not found".format(identifier)
"Asset with name {} was not found".format(asset_name)
)
creator.create(*args, **kwargs)
if pre_create_data is None:
pre_create_data = {}
precreate_attr_defs = creator.get_pre_create_attr_defs() or []
# Create default values of precreate data
_pre_create_data = get_default_values(precreate_attr_defs)
# Update passed precreate data to default values
# TODO validate types
_pre_create_data.update(pre_create_data)
subset_name = creator.get_subset_name(
variant,
task_name,
asset_doc,
project_name,
self.host_name
)
instance_data = {
"asset": asset_doc["name"],
"task": task_name,
"family": creator.family,
"variant": variant
}
return creator.create(
subset_name,
instance_data,
_pre_create_data
)
def _create_with_unified_error(
self, identifier, creator, *args, **kwargs
):
error_message = "Failed to run Creator with identifier \"{}\". {}"
label = None
add_traceback = False
result = None
fail_info = None
success = False
try:
# Try to get creator and his label
if creator is None:
creator = self._get_creator_in_create(identifier)
label = getattr(creator, "label", label)
# Run create
result = creator.create(*args, **kwargs)
success = True
except CreatorError:
failed = True
exc_info = sys.exc_info()
self.log.warning(error_message.format(identifier, exc_info[1]))
except:
failed = True
add_traceback = True
exc_info = sys.exc_info()
self.log.warning(
@ -1833,12 +1933,35 @@ class CreateContext:
exc_info=True
)
if failed:
raise CreatorsCreateFailed([
prepare_failed_creator_operation_info(
identifier, label, exc_info, add_traceback
)
])
if not success:
fail_info = prepare_failed_creator_operation_info(
identifier, label, exc_info, add_traceback
)
return result, fail_info
def create_with_unified_error(self, identifier, *args, **kwargs):
"""Trigger create but raise only one error if anything fails.
Added to raise unified exception. Capture any possible issues and
reraise it with unified information.
Args:
identifier (str): Identifier of creator.
*args (Tuple[Any]): Arguments for create method.
**kwargs (Dict[Any, Any]): Keyword argument for create method.
Raises:
CreatorsCreateFailed: When creation fails due to any possible
reason. If anything goes wrong this is only possible exception
the method should raise.
"""
result, fail_info = self._create_with_unified_error(
identifier, None, *args, **kwargs
)
if fail_info is not None:
raise CreatorsCreateFailed([fail_info])
return result
def _remove_instance(self, instance):
self._instances_by_id.pop(instance.id, None)
@ -1968,38 +2091,12 @@ class CreateContext:
Reset instances if any autocreator executed properly.
"""
error_message = "Failed to run AutoCreator with identifier \"{}\". {}"
failed_info = []
for creator in self.sorted_autocreators:
identifier = creator.identifier
label = creator.label
failed = False
add_traceback = False
try:
creator.create()
except CreatorError:
failed = True
exc_info = sys.exc_info()
self.log.warning(error_message.format(identifier, exc_info[1]))
# Use bare except because some hosts raise their exceptions that
# do not inherit from python's `BaseException`
except:
failed = True
add_traceback = True
exc_info = sys.exc_info()
self.log.warning(
error_message.format(identifier, ""),
exc_info=True
)
if failed:
failed_info.append(
prepare_failed_creator_operation_info(
identifier, label, exc_info, add_traceback
)
)
_, fail_info = self._create_with_unified_error(identifier, creator)
if fail_info is not None:
failed_info.append(fail_info)
if failed_info:
raise CreatorsCreateFailed(failed_info)

View file

@ -79,6 +79,10 @@ class SubsetConvertorPlugin(object):
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
@property
def host(self):
return self._create_context.host
@abstractproperty
def identifier(self):
"""Converted identifier.
@ -153,6 +157,12 @@ class BaseCreator:
Single object should be used for multiple instances instead of single
instance per one creator object. Do not store temp data or mid-process data
to `self` if it's not Plugin specific.
Args:
project_settings (Dict[str, Any]): Project settings.
system_settings (Dict[str, Any]): System settings.
create_context (CreateContext): Context which initialized creator.
headless (bool): Running in headless mode.
"""
# Label shown in UI
@ -605,12 +615,12 @@ class AutoCreator(BaseCreator):
pass
def discover_creator_plugins():
return discover(BaseCreator)
def discover_creator_plugins(*args, **kwargs):
return discover(BaseCreator, *args, **kwargs)
def discover_convertor_plugins():
return discover(SubsetConvertorPlugin)
def discover_convertor_plugins(*args, **kwargs):
return discover(SubsetConvertorPlugin, *args, **kwargs)
def discover_legacy_creator_plugins():

View file

@ -70,7 +70,8 @@ def get_subset_name(
host_name=None,
default_template=None,
dynamic_data=None,
project_settings=None
project_settings=None,
family_filter=None,
):
"""Calculate subset name based on passed context and OpenPype settings.
@ -82,23 +83,35 @@ def get_subset_name(
That's main reason why so many arguments are required to calculate subset
name.
Option to pass family filter was added for special cases when creator or
automated publishing require special subset name template which would be
hard to maintain using its family value.
Why not just pass the right family? -> Family is also used as fill
value and for filtering of publish plugins.
Todos:
Find better filtering options to avoid requirement of
argument 'family_filter'.
Args:
family (str): Instance family.
variant (str): In most of the cases it is user input during creation.
task_name (str): Task name on which context is instance created.
asset_doc (dict): Queried asset document with its tasks in data.
Used to get task type.
project_name (str): Name of project on which is instance created.
Important for project settings that are loaded.
host_name (str): One of filtering criteria for template profile
filters.
default_template (str): Default template if any profile does not match
passed context. Constant 'DEFAULT_SUBSET_TEMPLATE' is used if
is not passed.
dynamic_data (dict): Dynamic data specific for a creator which creates
instance.
project_settings (Union[Dict[str, Any], None]): Prepared settings for
project. Settings are queried if not passed.
project_name (Optional[str]): Name of project on which is instance
created. Important for project settings that are loaded.
host_name (Optional[str]): One of filtering criteria for template
profile filters.
default_template (Optional[str]): Default template if any profile does
not match passed context. Constant 'DEFAULT_SUBSET_TEMPLATE'
is used if is not passed.
dynamic_data (Optional[Dict[str, Any]]): Dynamic data specific for
a creator which creates instance.
project_settings (Optional[Union[Dict[str, Any]]]): Prepared settings
for project. Settings are queried if not passed.
family_filter (Optional[str]): Use different family for subset template
filtering. Value of 'family' is used when not passed.
"""
if not family:
@ -119,7 +132,7 @@ def get_subset_name(
template = get_subset_name_template(
project_name,
family,
family_filter or family,
task_name,
task_type,
host_name,

View file

@ -28,7 +28,6 @@ from openpype.lib import (
TemplateUnsolved,
)
from openpype.pipeline import (
schema,
legacy_io,
Anatomy,
)
@ -643,7 +642,10 @@ def get_representation_path(representation, root=None, dbcon=None):
def path_from_config():
try:
version_, subset, asset, project = dbcon.parenthood(representation)
project_name = dbcon.active_project()
version_, subset, asset, project = get_representation_parents(
project_name, representation
)
except ValueError:
log.debug(
"Representation %s wasn't found in database, "

View file

@ -135,11 +135,12 @@ class PluginDiscoverContext(object):
allow_duplicates (bool): Validate class name duplications.
ignore_classes (list): List of classes that will be ignored
and not added to result.
return_report (bool): Output will be full report if set to 'True'.
Returns:
DiscoverResult: Object holding succesfully discovered plugins,
ignored plugins, plugins with missing abstract implementation
and duplicated plugin.
Union[DiscoverResult, list[Any]]: Object holding successfully
discovered plugins, ignored plugins, plugins with missing
abstract implementation and duplicated plugin.
"""
if not ignore_classes:
@ -268,9 +269,34 @@ class _GlobalDiscover:
return cls._context
def discover(superclass, allow_duplicates=True):
def discover(
superclass,
allow_duplicates=True,
ignore_classes=None,
return_report=False
):
"""Find and return subclasses of `superclass`
Args:
superclass (type): Class which determines discovered subclasses.
allow_duplicates (bool): Validate class name duplications.
ignore_classes (list): List of classes that will be ignored
and not added to result.
return_report (bool): Output will be full report if set to 'True'.
Returns:
Union[DiscoverResult, list[Any]]: Object holding successfully
discovered plugins, ignored plugins, plugins with missing
abstract implementation and duplicated plugin.
"""
context = _GlobalDiscover.get_context()
return context.discover(superclass, allow_duplicates)
return context.discover(
superclass,
allow_duplicates,
ignore_classes,
return_report
)
def get_last_discovered_plugins(superclass):

View file

@ -25,7 +25,6 @@ from .publish_plugins import (
from .lib import (
get_publish_template_name,
DiscoverResult,
publish_plugins_discover,
load_help_content_from_plugin,
load_help_content_from_filepath,
@ -36,6 +35,7 @@ from .lib import (
filter_instances_for_context_plugin,
context_plugin_should_run,
get_instance_staging_dir,
get_publish_repre_path,
)
from .abstract_expected_files import ExpectedFiles
@ -68,7 +68,6 @@ __all__ = (
"get_publish_template_name",
"DiscoverResult",
"publish_plugins_discover",
"load_help_content_from_plugin",
"load_help_content_from_filepath",
@ -79,6 +78,7 @@ __all__ = (
"filter_instances_for_context_plugin",
"context_plugin_should_run",
"get_instance_staging_dir",
"get_publish_repre_path",
"ExpectedFiles",

View file

@ -10,11 +10,18 @@ import six
import pyblish.plugin
import pyblish.api
from openpype.lib import Logger, filter_profiles
from openpype.lib import (
Logger,
filter_profiles
)
from openpype.settings import (
get_project_settings,
get_system_settings,
)
from openpype.pipeline import (
tempdir
)
from openpype.pipeline.plugin_discover import DiscoverResult
from .contants import (
DEFAULT_PUBLISH_TEMPLATE,
@ -196,28 +203,6 @@ def get_publish_template_name(
return template or default_template
class DiscoverResult:
"""Hold result of publish plugins discovery.
Stores discovered plugins duplicated plugins and file paths which
crashed on execution of file.
"""
def __init__(self):
self.plugins = []
self.crashed_file_paths = {}
self.duplicated_plugins = []
def __iter__(self):
for plugin in self.plugins:
yield plugin
def __getitem__(self, item):
return self.plugins[item]
def __setitem__(self, item, value):
self.plugins[item] = value
class HelpContent:
def __init__(self, title, description, detail=None):
self.title = title
@ -285,7 +270,7 @@ def publish_plugins_discover(paths=None):
"""
# The only difference with `pyblish.api.discover`
result = DiscoverResult()
result = DiscoverResult(pyblish.api.Plugin)
plugins = dict()
plugin_names = []
@ -595,7 +580,7 @@ def context_plugin_should_run(plugin, context):
Args:
plugin (pyblish.api.Plugin): Plugin with filters.
context (pyblish.api.Context): Pyblish context with insances.
context (pyblish.api.Context): Pyblish context with instances.
Returns:
bool: Context plugin should run based on valid instances.
@ -609,12 +594,21 @@ def context_plugin_should_run(plugin, context):
def get_instance_staging_dir(instance):
"""Unified way how staging dir is stored and created on instances.
First check if 'stagingDir' is already set in instance data. If there is
not create new in tempdir.
First check if 'stagingDir' is already set in instance data.
In case there already is new tempdir will not be created.
It also supports `OPENPYPE_TMPDIR`, so studio can define own temp
shared repository per project or even per more granular context.
Template formatting is supported also with optional keys. Folder is
created in case it doesn't exists.
Available anatomy formatting keys:
- root[work | <root name key>]
- project[name | code]
Note:
Staging dir does not have to be necessarily in tempdir so be carefull
about it's usage.
Staging dir does not have to be necessarily in tempdir so be careful
about its usage.
Args:
instance (pyblish.lib.Instance): Instance for which we want to get
@ -623,12 +617,73 @@ def get_instance_staging_dir(instance):
Returns:
str: Path to staging dir of instance.
"""
staging_dir = instance.data.get('stagingDir')
if staging_dir:
return staging_dir
staging_dir = instance.data.get("stagingDir")
if not staging_dir:
anatomy = instance.context.data.get("anatomy")
# get customized tempdir path from `OPENPYPE_TMPDIR` env var
custom_temp_dir = tempdir.create_custom_tempdir(
anatomy.project_name, anatomy)
if custom_temp_dir:
staging_dir = os.path.normpath(
tempfile.mkdtemp(
prefix="pyblish_tmp_",
dir=custom_temp_dir
)
)
else:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data["stagingDir"] = staging_dir
instance.data['stagingDir'] = staging_dir
return staging_dir
def get_publish_repre_path(instance, repre, only_published=False):
"""Get representation path that can be used for integration.
When 'only_published' is set to true the validation of path is not
relevant. In that case we just need what is set in 'published_path'
as "reference". The reference is not used to get or upload the file but
for reference where the file was published.
Args:
instance (pyblish.Instance): Processed instance object. Used
for source of staging dir if representation does not have
filled it.
repre (dict): Representation on instance which could be and
could not be integrated with main integrator.
only_published (bool): Care only about published paths and
ignore if filepath is not existing anymore.
Returns:
str: Path to representation file.
None: Path is not filled or does not exists.
"""
published_path = repre.get("published_path")
if published_path:
published_path = os.path.normpath(published_path)
if os.path.exists(published_path):
return published_path
if only_published:
return published_path
comp_files = repre["files"]
if isinstance(comp_files, (tuple, list, set)):
filename = comp_files[0]
else:
filename = comp_files
staging_dir = repre.get("stagingDir")
if not staging_dir:
staging_dir = get_instance_staging_dir(instance)
src_path = os.path.normpath(os.path.join(staging_dir, filename))
if os.path.exists(src_path):
return src_path
return None

View file

@ -0,0 +1,59 @@
"""
Temporary folder operations
"""
import os
from openpype.lib import StringTemplate
from openpype.pipeline import Anatomy
def create_custom_tempdir(project_name, anatomy=None):
""" Create custom tempdir
Template path formatting is supporting:
- optional key formatting
- available keys:
- root[work | <root name key>]
- project[name | code]
Args:
project_name (str): project name
anatomy (openpype.pipeline.Anatomy)[optional]: Anatomy object
Returns:
str | None: formatted path or None
"""
openpype_tempdir = os.getenv("OPENPYPE_TMPDIR")
if not openpype_tempdir:
return
custom_tempdir = None
if "{" in openpype_tempdir:
if anatomy is None:
anatomy = Anatomy(project_name)
# create base formate data
data = {
"root": anatomy.roots,
"project": {
"name": anatomy.project_name,
"code": anatomy.project_code,
}
}
# path is anatomy template
custom_tempdir = StringTemplate.format_template(
openpype_tempdir, data).normalized()
else:
# path is absolute
custom_tempdir = openpype_tempdir
# create the dir path if it doesn't exists
if not os.path.exists(custom_tempdir):
try:
# create it if it doesn't exists
os.makedirs(custom_tempdir)
except IOError as error:
raise IOError(
"Path couldn't be created: {}".format(error))
return custom_tempdir

View file

@ -32,7 +32,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
thumbnail_paths_by_instance_id.get(None)
)
project_name = create_context.project_name
project_name = create_context.get_current_project_name()
if project_name:
context.data["projectName"] = project_name
@ -53,11 +53,15 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
context.data.update(create_context.context_data_to_store())
context.data["newPublishing"] = True
# Update context data
for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"):
value = create_context.dbcon.Session.get(key)
if value is not None:
legacy_io.Session[key] = value
os.environ[key] = value
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
for key, value in (
("AVALON_PROJECT", project_name),
("AVALON_ASSET", asset_name),
("AVALON_TASK", task_name)
):
legacy_io.Session[key] = value
os.environ[key] = value
def create_instance(
self,

View file

@ -55,7 +55,7 @@
},
"source": {
"folder": "{root[work]}/{originalDirname}",
"file": "{originalBasename}<.{@frame}><_{udim}>.{ext}",
"file": "{originalBasename}.{ext}",
"path": "{@folder}/{@file}"
},
"__dynamic_keys_labels__": {
@ -66,4 +66,4 @@
"source": "source"
}
}
}
}

View file

@ -36,6 +36,18 @@
"scene_patches": [],
"strict_error_checking": true
},
"MaxSubmitDeadline": {
"enabled": true,
"optional": false,
"active": true,
"use_published": true,
"priority": 50,
"chunk_size": 10,
"group": "none",
"deadline_pool": "",
"deadline_pool_secondary": "",
"framePerTask": 1
},
"NukeSubmitDeadline": {
"enabled": true,
"optional": false,
@ -103,8 +115,11 @@
],
"harmony": [
".*"
],
"max": [
".*"
]
}
}
}
}
}

View file

@ -324,7 +324,8 @@
"animation",
"look",
"rig",
"camera"
"camera",
"renderlayer"
],
"task_types": [],
"tasks": [],
@ -488,7 +489,8 @@
},
"keep_first_subset_name_for_review": true,
"asset_versions_status_profiles": [],
"additional_metadata_keys": []
"additional_metadata_keys": [],
"upload_reviewable_with_origin_name": false
},
"IntegrateFtrackFarmStatus": {
"farm_status_profiles": []

View file

@ -0,0 +1,8 @@
{
"RenderSettings": {
"default_render_image_folder": "renders/3dsmax",
"aov_separator": "underscore",
"image_format": "exr",
"multipass": true
}
}

View file

@ -815,6 +815,11 @@
"twoSidedLighting": true,
"lineAAEnable": true,
"multiSample": 8,
"useDefaultMaterial": false,
"wireframeOnShaded": false,
"xray": false,
"jointXray": false,
"backfaceCulling": false,
"ssaoEnable": false,
"ssaoAmount": 1,
"ssaoRadius": 16,

View file

@ -246,6 +246,7 @@
"sourcetype": "python",
"title": "Gizmo Note",
"command": "nuke.nodes.StickyNote(label='You can create your own toolbar menu in the Nuke GizmoMenu of OpenPype')",
"icon": "",
"shortcut": ""
}
]

Some files were not shown because too many files have changed in this diff Show more