[Automated] Merged develop into main

This commit is contained in:
ynbot 2023-07-17 17:21:20 +02:00 committed by GitHub
commit ebb072700e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
66 changed files with 2929 additions and 2470 deletions

View file

@ -36,6 +36,7 @@ body:
description: What version are you running? Look to OpenPype Tray
options:
- 3.16.0
- 3.16.0-nightly.2
- 3.16.0-nightly.1
- 3.15.12
- 3.15.12-nightly.4
@ -135,6 +136,10 @@ body:
- 3.14.5
- 3.14.5-nightly.3
- 3.14.5-nightly.2
- 3.14.5-nightly.1
- 3.14.4
- 3.14.4-nightly.4
validations:
required: true
- type: dropdown

View file

@ -1320,7 +1320,9 @@ def convert_update_representation_to_v4(
return flat_data
def convert_update_workfile_info_to_v4(update_data):
def convert_update_workfile_info_to_v4(
project_name, workfile_id, update_data, con
):
return {
key: value
for key, value in update_data.items()

View file

@ -0,0 +1,51 @@
"""Create a Blender scene asset."""
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
class CreateBlendScene(plugin.Creator):
"""Generic group of assets"""
name = "blendScene"
label = "Blender Scene"
family = "blendScene"
icon = "cubes"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
# Add selected objects to instance
if (self.options or {}).get("useSelection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
return asset_group

View file

@ -43,7 +43,9 @@ class CreateCamera(plugin.Creator):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
obj.select_set(True)
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
else:

View file

@ -42,7 +42,9 @@ class CreateLayout(plugin.Creator):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
obj.select_set(True)
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)

View file

@ -42,7 +42,9 @@ class CreateModel(plugin.Creator):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
obj.select_set(True)
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)

View file

@ -42,7 +42,9 @@ class CreateRig(plugin.Creator):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
obj.select_set(True)
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)

View file

@ -0,0 +1,257 @@
from typing import Dict, List, Optional
from pathlib import Path
import bpy
from openpype.pipeline import (
legacy_create,
get_representation_path,
AVALON_CONTAINER_ID,
)
from openpype.pipeline.create import get_legacy_creator_by_name
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.lib import imprint
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
)
class BlendLoader(plugin.AssetLoader):
"""Load assets from a .blend file."""
families = ["model", "rig", "layout", "camera", "blendScene"]
representations = ["blend"]
label = "Append Blend"
icon = "code-fork"
color = "orange"
@staticmethod
def _get_asset_container(objects):
empties = [obj for obj in objects if obj.type == 'EMPTY']
for empty in empties:
if empty.get(AVALON_PROPERTY):
return empty
return None
@staticmethod
def get_all_container_parents(asset_group):
parent_containers = []
parent = asset_group.parent
while parent:
if parent.get(AVALON_PROPERTY):
parent_containers.append(parent)
parent = parent.parent
return parent_containers
def _post_process_layout(self, container, asset, representation):
rigs = [
obj for obj in container.children_recursive
if (
obj.type == 'EMPTY' and
obj.get(AVALON_PROPERTY) and
obj.get(AVALON_PROPERTY).get('family') == 'rig'
)
]
for rig in rigs:
creator_plugin = get_legacy_creator_by_name("CreateAnimation")
legacy_create(
creator_plugin,
name=rig.name.split(':')[-1] + "_animation",
asset=asset,
options={
"useSelection": False,
"asset_group": rig
},
data={
"dependencies": representation
}
)
def _process_data(self, libpath, group_name):
# Append all the data from the .blend file
with bpy.data.libraries.load(
libpath, link=False, relative=False
) as (data_from, data_to):
for attr in dir(data_to):
setattr(data_to, attr, getattr(data_from, attr))
members = []
# Rename the object to add the asset name
for attr in dir(data_to):
for data in getattr(data_to, attr):
data.name = f"{group_name}:{data.name}"
members.append(data)
container = self._get_asset_container(data_to.objects)
assert container, "No asset group found"
container.name = group_name
container.empty_display_type = 'SINGLE_ARROW'
# Link the collection to the scene
bpy.context.scene.collection.objects.link(container)
# Link all the container children to the collection
for obj in container.children_recursive:
bpy.context.scene.collection.objects.link(obj)
# Remove the library from the blend file
library = bpy.data.libraries.get(bpy.path.basename(libpath))
bpy.data.libraries.remove(library)
return container, members
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "model"
representation = str(context["representation"]["_id"])
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
container, members = self._process_data(libpath, group_name)
if family == "layout":
self._post_process_layout(container, asset, representation)
avalon_container.objects.link(container)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name,
"members": members,
}
container[AVALON_PROPERTY] = data
objects = [
obj for obj in bpy.data.objects
if obj.name.startswith(f"{group_name}:")
]
self[:] = objects
return objects
def exec_update(self, container: Dict, representation: Dict):
"""
Update the loaded asset.
"""
group_name = container["objectName"]
asset_group = bpy.data.objects.get(group_name)
libpath = Path(get_representation_path(representation)).as_posix()
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
transform = asset_group.matrix_basis.copy()
old_data = dict(asset_group.get(AVALON_PROPERTY))
parent = asset_group.parent
self.exec_remove(container)
asset_group, members = self._process_data(libpath, group_name)
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
avalon_container.objects.link(asset_group)
asset_group.matrix_basis = transform
asset_group.parent = parent
# Restore the old data, but reset memebers, as they don't exist anymore
# This avoids a crash, because the memory addresses of those members
# are not valid anymore
old_data["members"] = []
asset_group[AVALON_PROPERTY] = old_data
new_data = {
"libpath": libpath,
"representation": str(representation["_id"]),
"parent": str(representation["parent"]),
"members": members,
}
imprint(asset_group, new_data)
# We need to update all the parent container members
parent_containers = self.get_all_container_parents(asset_group)
for parent_container in parent_containers:
parent_members = parent_container[AVALON_PROPERTY]["members"]
parent_container[AVALON_PROPERTY]["members"] = (
parent_members + members)
def exec_remove(self, container: Dict) -> bool:
"""
Remove an existing container from a Blender scene.
"""
group_name = container["objectName"]
asset_group = bpy.data.objects.get(group_name)
attrs = [
attr for attr in dir(bpy.data)
if isinstance(
getattr(bpy.data, attr),
bpy.types.bpy_prop_collection
)
]
members = asset_group.get(AVALON_PROPERTY).get("members", [])
# We need to update all the parent container members
parent_containers = self.get_all_container_parents(asset_group)
for parent in parent_containers:
parent.get(AVALON_PROPERTY)["members"] = list(filter(
lambda i: i not in members,
parent.get(AVALON_PROPERTY)["members"]))
for attr in attrs:
for data in getattr(bpy.data, attr):
if data in members:
# Skip the asset group
if data == asset_group:
continue
getattr(bpy.data, attr).remove(data)
bpy.data.objects.remove(asset_group)

View file

@ -1,256 +0,0 @@
"""Load a camera asset in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import bpy
from openpype.pipeline import (
get_representation_path,
AVALON_CONTAINER_ID,
)
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
)
logger = logging.getLogger("openpype").getChild(
"blender").getChild("load_camera")
class BlendCameraLoader(plugin.AssetLoader):
"""Load a camera from a .blend file.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["camera"]
representations = ["blend"]
label = "Link Camera (Blend)"
icon = "code-fork"
color = "orange"
def _remove(self, asset_group):
objects = list(asset_group.children)
for obj in objects:
if obj.type == 'CAMERA':
bpy.data.cameras.remove(obj.data)
def _process(self, libpath, asset_group, group_name):
with bpy.data.libraries.load(
libpath, link=True, relative=False
) as (data_from, data_to):
data_to.objects = data_from.objects
parent = bpy.context.scene.collection
empties = [obj for obj in data_to.objects if obj.type == 'EMPTY']
container = None
for empty in empties:
if empty.get(AVALON_PROPERTY):
container = empty
break
assert container, "No asset group found"
# Children must be linked before parents,
# otherwise the hierarchy will break
objects = []
nodes = list(container.children)
for obj in nodes:
obj.parent = asset_group
for obj in nodes:
objects.append(obj)
nodes.extend(list(obj.children))
objects.reverse()
for obj in objects:
parent.objects.link(obj)
for obj in objects:
local_obj = plugin.prepare_data(obj, group_name)
if local_obj.type != 'EMPTY':
plugin.prepare_data(local_obj.data, group_name)
if not local_obj.get(AVALON_PROPERTY):
local_obj[AVALON_PROPERTY] = dict()
avalon_info = local_obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
objects.reverse()
bpy.data.orphans_purge(do_local_ids=False)
plugin.deselect_all()
return objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
asset_group = bpy.data.objects.new(group_name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
avalon_container.objects.link(asset_group)
objects = self._process(libpath, asset_group, group_name)
bpy.context.scene.collection.objects.link(asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name
}
self[:] = objects
return objects
def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all children of the asset group, load the new ones
and add them as children of the group.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
metadata = asset_group.get(AVALON_PROPERTY)
group_libpath = metadata["libpath"]
normalized_group_libpath = (
str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_group_libpath,
normalized_libpath,
)
if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath:
count += 1
mat = asset_group.matrix_basis.copy()
self._remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
if library:
bpy.data.libraries.remove(library)
self._process(str(libpath), asset_group, object_name)
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(representation["_id"])
metadata["parent"] = str(representation["parent"])
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == libpath:
count += 1
if not asset_group:
return False
self._remove(asset_group)
bpy.data.objects.remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(libpath))
bpy.data.libraries.remove(library)
return True

View file

@ -1,469 +0,0 @@
"""Load a layout in Blender."""
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import bpy
from openpype.pipeline import (
legacy_create,
get_representation_path,
AVALON_CONTAINER_ID,
)
from openpype.pipeline.create import get_legacy_creator_by_name
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
)
class BlendLayoutLoader(plugin.AssetLoader):
"""Load layout from a .blend file."""
families = ["layout"]
representations = ["blend"]
label = "Link Layout"
icon = "code-fork"
color = "orange"
def _remove(self, asset_group):
objects = list(asset_group.children)
for obj in objects:
if obj.type == 'MESH':
for material_slot in list(obj.material_slots):
if material_slot.material:
bpy.data.materials.remove(material_slot.material)
bpy.data.meshes.remove(obj.data)
elif obj.type == 'ARMATURE':
objects.extend(obj.children)
bpy.data.armatures.remove(obj.data)
elif obj.type == 'CURVE':
bpy.data.curves.remove(obj.data)
elif obj.type == 'EMPTY':
objects.extend(obj.children)
bpy.data.objects.remove(obj)
def _remove_asset_and_library(self, asset_group):
if not asset_group.get(AVALON_PROPERTY):
return
libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
if not libpath:
return
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).all_objects:
if obj.get(AVALON_PROPERTY).get('libpath') == libpath:
count += 1
self._remove(asset_group)
bpy.data.objects.remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(libpath))
if library:
bpy.data.libraries.remove(library)
def _process(
self, libpath, asset_group, group_name, asset, representation,
actions, anim_instances
):
with bpy.data.libraries.load(
libpath, link=True, relative=False
) as (data_from, data_to):
data_to.objects = data_from.objects
parent = bpy.context.scene.collection
empties = [obj for obj in data_to.objects if obj.type == 'EMPTY']
container = None
for empty in empties:
if (empty.get(AVALON_PROPERTY) and
empty.get(AVALON_PROPERTY).get('family') == 'layout'):
container = empty
break
assert container, "No asset group found"
# Children must be linked before parents,
# otherwise the hierarchy will break
objects = []
nodes = list(container.children)
allowed_types = ['ARMATURE', 'MESH', 'EMPTY']
for obj in nodes:
if obj.type in allowed_types:
obj.parent = asset_group
for obj in nodes:
if obj.type in allowed_types:
objects.append(obj)
nodes.extend(list(obj.children))
objects.reverse()
constraints = []
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
for armature in armatures:
for bone in armature.pose.bones:
for constraint in bone.constraints:
if hasattr(constraint, 'target'):
constraints.append(constraint)
for obj in objects:
parent.objects.link(obj)
for obj in objects:
local_obj = plugin.prepare_data(obj)
action = None
if actions:
action = actions.get(local_obj.name, None)
if local_obj.type == 'MESH':
plugin.prepare_data(local_obj.data)
if obj != local_obj:
for constraint in constraints:
if constraint.target == obj:
constraint.target = local_obj
for material_slot in local_obj.material_slots:
if material_slot.material:
plugin.prepare_data(material_slot.material)
elif local_obj.type == 'ARMATURE':
plugin.prepare_data(local_obj.data)
if action:
if local_obj.animation_data is None:
local_obj.animation_data_create()
local_obj.animation_data.action = action
elif (local_obj.animation_data and
local_obj.animation_data.action):
plugin.prepare_data(
local_obj.animation_data.action)
# Set link the drivers to the local object
if local_obj.data.animation_data:
for d in local_obj.data.animation_data.drivers:
for v in d.driver.variables:
for t in v.targets:
t.id = local_obj
elif local_obj.type == 'EMPTY':
if (not anim_instances or
(anim_instances and
local_obj.name not in anim_instances.keys())):
avalon = local_obj.get(AVALON_PROPERTY)
if avalon and avalon.get('family') == 'rig':
creator_plugin = get_legacy_creator_by_name(
"CreateAnimation")
if not creator_plugin:
raise ValueError(
"Creator plugin \"CreateAnimation\" was "
"not found.")
legacy_create(
creator_plugin,
name=local_obj.name.split(':')[-1] + "_animation",
asset=asset,
options={"useSelection": False,
"asset_group": local_obj},
data={"dependencies": representation}
)
if not local_obj.get(AVALON_PROPERTY):
local_obj[AVALON_PROPERTY] = dict()
avalon_info = local_obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
objects.reverse()
armatures = [
obj for obj in bpy.data.objects
if obj.type == 'ARMATURE' and obj.library is None]
arm_act = {}
# The armatures with an animation need to be at the center of the
# scene to be hooked correctly by the curves modifiers.
for armature in armatures:
if armature.animation_data and armature.animation_data.action:
arm_act[armature] = armature.animation_data.action
armature.animation_data.action = None
armature.location = (0.0, 0.0, 0.0)
for bone in armature.pose.bones:
bone.location = (0.0, 0.0, 0.0)
bone.rotation_euler = (0.0, 0.0, 0.0)
curves = [obj for obj in data_to.objects if obj.type == 'CURVE']
for curve in curves:
curve_name = curve.name.split(':')[0]
curve_obj = bpy.data.objects.get(curve_name)
local_obj = plugin.prepare_data(curve)
plugin.prepare_data(local_obj.data)
# Curves need to reset the hook, but to do that they need to be
# in the view layer.
parent.objects.link(local_obj)
plugin.deselect_all()
local_obj.select_set(True)
bpy.context.view_layer.objects.active = local_obj
if local_obj.library is None:
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.object.hook_reset()
bpy.ops.object.mode_set(mode='OBJECT')
parent.objects.unlink(local_obj)
local_obj.use_fake_user = True
for mod in local_obj.modifiers:
mod.object = bpy.data.objects.get(f"{mod.object.name}")
if not local_obj.get(AVALON_PROPERTY):
local_obj[AVALON_PROPERTY] = dict()
avalon_info = local_obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
local_obj.parent = curve_obj
objects.append(local_obj)
for armature in armatures:
if arm_act.get(armature):
armature.animation_data.action = arm_act[armature]
while bpy.data.orphans_purge(do_local_ids=False):
pass
plugin.deselect_all()
return objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
representation = str(context["representation"]["_id"])
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
asset_group = bpy.data.objects.new(group_name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
avalon_container.objects.link(asset_group)
objects = self._process(
libpath, asset_group, group_name, asset, representation,
None, None)
for child in asset_group.children:
if child.get(AVALON_PROPERTY):
avalon_container.objects.link(child)
bpy.context.scene.collection.objects.link(asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name
}
self[:] = objects
return objects
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
metadata = asset_group.get(AVALON_PROPERTY)
group_libpath = metadata["libpath"]
normalized_group_libpath = (
str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_group_libpath,
normalized_libpath,
)
if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
actions = {}
anim_instances = {}
for obj in asset_group.children:
obj_meta = obj.get(AVALON_PROPERTY)
if obj_meta.get('family') == 'rig':
# Get animation instance
collections = list(obj.users_collection)
for c in collections:
avalon = c.get(AVALON_PROPERTY)
if avalon and avalon.get('family') == 'animation':
anim_instances[obj.name] = c.name
break
# Get armature's action
rig = None
for child in obj.children:
if child.type == 'ARMATURE':
rig = child
break
if not rig:
raise Exception("No armature in the rig asset group.")
if rig.animation_data and rig.animation_data.action:
instance_name = obj_meta.get('instance_name')
actions[instance_name] = rig.animation_data.action
mat = asset_group.matrix_basis.copy()
# Remove the children of the asset_group first
for child in list(asset_group.children):
self._remove_asset_and_library(child)
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath:
count += 1
self._remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
if library:
bpy.data.libraries.remove(library)
asset = container.get("asset_name").split("_")[0]
self._process(
str(libpath), asset_group, object_name, asset,
str(representation.get("_id")), actions, anim_instances
)
# Link the new objects to the animation collection
for inst in anim_instances.keys():
try:
obj = bpy.data.objects[inst]
bpy.data.collections[anim_instances[inst]].objects.link(obj)
except KeyError:
self.log.info(f"Object {inst} does not exist anymore.")
coll = bpy.data.collections.get(anim_instances[inst])
if (coll):
bpy.data.collections.remove(coll)
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
for child in asset_group.children:
if child.get(AVALON_PROPERTY):
avalon_container.objects.link(child)
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(representation["_id"])
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
if not asset_group:
return False
# Remove the children of the asset_group first
for child in list(asset_group.children):
self._remove_asset_and_library(child)
self._remove_asset_and_library(asset_group)
return True

View file

@ -1,296 +0,0 @@
"""Load a model asset in Blender."""
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import bpy
from openpype.pipeline import (
get_representation_path,
AVALON_CONTAINER_ID,
)
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
)
class BlendModelLoader(plugin.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
"""
families = ["model"]
representations = ["blend"]
label = "Link Model"
icon = "code-fork"
color = "orange"
def _remove(self, asset_group):
objects = list(asset_group.children)
for obj in objects:
if obj.type == 'MESH':
for material_slot in list(obj.material_slots):
bpy.data.materials.remove(material_slot.material)
bpy.data.meshes.remove(obj.data)
elif obj.type == 'EMPTY':
objects.extend(obj.children)
bpy.data.objects.remove(obj)
def _process(self, libpath, asset_group, group_name):
with bpy.data.libraries.load(
libpath, link=True, relative=False
) as (data_from, data_to):
data_to.objects = data_from.objects
parent = bpy.context.scene.collection
empties = [obj for obj in data_to.objects if obj.type == 'EMPTY']
container = None
for empty in empties:
if empty.get(AVALON_PROPERTY):
container = empty
break
assert container, "No asset group found"
# Children must be linked before parents,
# otherwise the hierarchy will break
objects = []
nodes = list(container.children)
for obj in nodes:
obj.parent = asset_group
for obj in nodes:
objects.append(obj)
nodes.extend(list(obj.children))
objects.reverse()
for obj in objects:
parent.objects.link(obj)
for obj in objects:
local_obj = plugin.prepare_data(obj, group_name)
if local_obj.type != 'EMPTY':
plugin.prepare_data(local_obj.data, group_name)
for material_slot in local_obj.material_slots:
if material_slot.material:
plugin.prepare_data(material_slot.material, group_name)
if not local_obj.get(AVALON_PROPERTY):
local_obj[AVALON_PROPERTY] = dict()
avalon_info = local_obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
objects.reverse()
bpy.data.orphans_purge(do_local_ids=False)
plugin.deselect_all()
return objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
asset_group = bpy.data.objects.new(group_name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
avalon_container.objects.link(asset_group)
plugin.deselect_all()
if options is not None:
parent = options.get('parent')
transform = options.get('transform')
if parent and transform:
location = transform.get('translation')
rotation = transform.get('rotation')
scale = transform.get('scale')
asset_group.location = (
location.get('x'),
location.get('y'),
location.get('z')
)
asset_group.rotation_euler = (
rotation.get('x'),
rotation.get('y'),
rotation.get('z')
)
asset_group.scale = (
scale.get('x'),
scale.get('y'),
scale.get('z')
)
bpy.context.view_layer.objects.active = parent
asset_group.select_set(True)
bpy.ops.object.parent_set(keep_transform=True)
plugin.deselect_all()
objects = self._process(libpath, asset_group, group_name)
bpy.context.scene.collection.objects.link(asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name
}
self[:] = objects
return objects
def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
metadata = asset_group.get(AVALON_PROPERTY)
group_libpath = metadata["libpath"]
normalized_group_libpath = (
str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_group_libpath,
normalized_libpath,
)
if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath:
count += 1
mat = asset_group.matrix_basis.copy()
self._remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
if library:
bpy.data.libraries.remove(library)
self._process(str(libpath), asset_group, object_name)
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(representation["_id"])
metadata["parent"] = str(representation["parent"])
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == libpath:
count += 1
if not asset_group:
return False
self._remove(asset_group)
bpy.data.objects.remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(libpath))
bpy.data.libraries.remove(library)
return True

View file

@ -1,417 +0,0 @@
"""Load a rig asset in Blender."""
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import bpy
from openpype.pipeline import (
legacy_create,
get_representation_path,
AVALON_CONTAINER_ID,
)
from openpype.pipeline.create import get_legacy_creator_by_name
from openpype.hosts.blender.api import (
plugin,
get_selection,
)
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
)
class BlendRigLoader(plugin.AssetLoader):
"""Load rigs from a .blend file."""
families = ["rig"]
representations = ["blend"]
label = "Link Rig"
icon = "code-fork"
color = "orange"
def _remove(self, asset_group):
objects = list(asset_group.children)
for obj in objects:
if obj.type == 'MESH':
for material_slot in list(obj.material_slots):
if material_slot.material:
bpy.data.materials.remove(material_slot.material)
bpy.data.meshes.remove(obj.data)
elif obj.type == 'ARMATURE':
objects.extend(obj.children)
bpy.data.armatures.remove(obj.data)
elif obj.type == 'CURVE':
bpy.data.curves.remove(obj.data)
elif obj.type == 'EMPTY':
objects.extend(obj.children)
bpy.data.objects.remove(obj)
def _process(self, libpath, asset_group, group_name, action):
with bpy.data.libraries.load(
libpath, link=True, relative=False
) as (data_from, data_to):
data_to.objects = data_from.objects
parent = bpy.context.scene.collection
empties = [obj for obj in data_to.objects if obj.type == 'EMPTY']
container = None
for empty in empties:
if empty.get(AVALON_PROPERTY):
container = empty
break
assert container, "No asset group found"
# Children must be linked before parents,
# otherwise the hierarchy will break
objects = []
nodes = list(container.children)
allowed_types = ['ARMATURE', 'MESH']
for obj in nodes:
if obj.type in allowed_types:
obj.parent = asset_group
for obj in nodes:
if obj.type in allowed_types:
objects.append(obj)
nodes.extend(list(obj.children))
objects.reverse()
constraints = []
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
for armature in armatures:
for bone in armature.pose.bones:
for constraint in bone.constraints:
if hasattr(constraint, 'target'):
constraints.append(constraint)
for obj in objects:
parent.objects.link(obj)
for obj in objects:
local_obj = plugin.prepare_data(obj, group_name)
if local_obj.type == 'MESH':
plugin.prepare_data(local_obj.data, group_name)
if obj != local_obj:
for constraint in constraints:
if constraint.target == obj:
constraint.target = local_obj
for material_slot in local_obj.material_slots:
if material_slot.material:
plugin.prepare_data(material_slot.material, group_name)
elif local_obj.type == 'ARMATURE':
plugin.prepare_data(local_obj.data, group_name)
if action is not None:
if local_obj.animation_data is None:
local_obj.animation_data_create()
local_obj.animation_data.action = action
elif (local_obj.animation_data and
local_obj.animation_data.action is not None):
plugin.prepare_data(
local_obj.animation_data.action, group_name)
# Set link the drivers to the local object
if local_obj.data.animation_data:
for d in local_obj.data.animation_data.drivers:
for v in d.driver.variables:
for t in v.targets:
t.id = local_obj
if not local_obj.get(AVALON_PROPERTY):
local_obj[AVALON_PROPERTY] = dict()
avalon_info = local_obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
objects.reverse()
curves = [obj for obj in data_to.objects if obj.type == 'CURVE']
for curve in curves:
local_obj = plugin.prepare_data(curve, group_name)
plugin.prepare_data(local_obj.data, group_name)
local_obj.use_fake_user = True
for mod in local_obj.modifiers:
mod_target_name = mod.object.name
mod.object = bpy.data.objects.get(
f"{group_name}:{mod_target_name}")
if not local_obj.get(AVALON_PROPERTY):
local_obj[AVALON_PROPERTY] = dict()
avalon_info = local_obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
local_obj.parent = asset_group
objects.append(local_obj)
while bpy.data.orphans_purge(do_local_ids=False):
pass
plugin.deselect_all()
return objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
asset_group = bpy.data.objects.new(group_name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
avalon_container.objects.link(asset_group)
action = None
plugin.deselect_all()
create_animation = False
anim_file = None
if options is not None:
parent = options.get('parent')
transform = options.get('transform')
action = options.get('action')
create_animation = options.get('create_animation')
anim_file = options.get('animation_file')
if parent and transform:
location = transform.get('translation')
rotation = transform.get('rotation')
scale = transform.get('scale')
asset_group.location = (
location.get('x'),
location.get('y'),
location.get('z')
)
asset_group.rotation_euler = (
rotation.get('x'),
rotation.get('y'),
rotation.get('z')
)
asset_group.scale = (
scale.get('x'),
scale.get('y'),
scale.get('z')
)
bpy.context.view_layer.objects.active = parent
asset_group.select_set(True)
bpy.ops.object.parent_set(keep_transform=True)
plugin.deselect_all()
objects = self._process(libpath, asset_group, group_name, action)
if create_animation:
creator_plugin = get_legacy_creator_by_name("CreateAnimation")
if not creator_plugin:
raise ValueError("Creator plugin \"CreateAnimation\" was "
"not found.")
asset_group.select_set(True)
animation_asset = options.get('animation_asset')
legacy_create(
creator_plugin,
name=namespace + "_animation",
# name=f"{unique_number}_{subset}_animation",
asset=animation_asset,
options={"useSelection": False, "asset_group": asset_group},
data={"dependencies": str(context["representation"]["_id"])}
)
plugin.deselect_all()
if anim_file:
bpy.ops.import_scene.fbx(filepath=anim_file, anim_offset=0.0)
imported = get_selection()
armature = [
o for o in asset_group.children if o.type == 'ARMATURE'][0]
imported_group = [
o for o in imported if o.type == 'EMPTY'][0]
for obj in imported:
if obj.type == 'ARMATURE':
if not armature.animation_data:
armature.animation_data_create()
armature.animation_data.action = obj.animation_data.action
self._remove(imported_group)
bpy.data.objects.remove(imported_group)
bpy.context.scene.collection.objects.link(asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name
}
self[:] = objects
return objects
def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all children of the asset group, load the new ones
and add them as children of the group.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
metadata = asset_group.get(AVALON_PROPERTY)
group_libpath = metadata["libpath"]
normalized_group_libpath = (
str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_group_libpath,
normalized_libpath,
)
if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath:
count += 1
# Get the armature of the rig
objects = asset_group.children
armature = [obj for obj in objects if obj.type == 'ARMATURE'][0]
action = None
if armature.animation_data and armature.animation_data.action:
action = armature.animation_data.action
mat = asset_group.matrix_basis.copy()
self._remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
bpy.data.libraries.remove(library)
self._process(str(libpath), asset_group, object_name, action)
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(representation["_id"])
metadata["parent"] = str(representation["parent"])
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing asset group from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the asset group was deleted.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == libpath:
count += 1
if not asset_group:
return False
self._remove(asset_group)
bpy.data.objects.remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(libpath))
bpy.data.libraries.remove(library)
return True

View file

@ -10,7 +10,7 @@ class ExtractBlend(publish.Extractor):
label = "Extract Blend"
hosts = ["blender"]
families = ["model", "camera", "rig", "action", "layout"]
families = ["model", "camera", "rig", "action", "layout", "blendScene"]
optional = True
def process(self, instance):

View file

@ -9,7 +9,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
label = "Increment Workfile Version"
optional = True
hosts = ["blender"]
families = ["animation", "model", "rig", "action", "layout"]
families = ["animation", "model", "rig", "action", "layout", "blendScene"]
def process(self, context):

View file

@ -69,6 +69,8 @@ class HoudiniLegacyConvertor(SubsetConvertorPlugin):
"creator_identifier": self.family_to_id[family],
"instance_node": subset.path()
}
if family == "pointcache":
data["families"] = ["abc"]
self.log.info("Converting {} to {}".format(
subset.path(), self.family_to_id[family]))
imprint(subset, data)

View file

@ -0,0 +1,92 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache bgeo files."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance, CreatorError
from openpype.lib import EnumDef
class CreateBGEO(plugin.HoudiniCreator):
"""BGEO pointcache creator."""
identifier = "io.openpype.creators.houdini.bgeo"
label = "BGEO PointCache"
family = "pointcache"
icon = "gears"
def create(self, subset_name, instance_data, pre_create_data):
import hou
instance_data.pop("active", None)
instance_data.update({"node_type": "geometry"})
instance = super(CreateBGEO, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
file_path = "{}{}".format(
hou.text.expandString("$HIP/pyblish/"),
"{}.$F4.{}".format(
subset_name,
pre_create_data.get("bgeo_type") or "bgeo.sc")
)
parms = {
"sopoutput": file_path
}
instance_node.parm("trange").set(1)
if self.selected_nodes:
# if selection is on SOP level, use it
if isinstance(self.selected_nodes[0], hou.SopNode):
parms["soppath"] = self.selected_nodes[0].path()
else:
# try to find output node with the lowest index
outputs = [
child for child in self.selected_nodes[0].children()
if child.type().name() == "output"
]
if not outputs:
instance_node.setParms(parms)
raise CreatorError((
"Missing output node in SOP level for the selection. "
"Please select correct SOP path in created instance."
))
outputs.sort(key=lambda output: output.evalParm("outputidx"))
parms["soppath"] = outputs[0].path()
instance_node.setParms(parms)
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
bgeo_enum = [
{
"value": "bgeo",
"label": "uncompressed bgeo (.bgeo)"
},
{
"value": "bgeosc",
"label": "BLOSC compressed bgeo (.bgeosc)"
},
{
"value": "bgeo.sc",
"label": "BLOSC compressed bgeo (.bgeo.sc)"
},
{
"value": "bgeo.gz",
"label": "GZ compressed bgeo (.bgeo.gz)"
},
{
"value": "bgeo.lzma",
"label": "LZMA compressed bgeo (.bgeo.lzma)"
},
{
"value": "bgeo.bz2",
"label": "BZip2 compressed bgeo (.bgeo.bz2)"
}
]
return attrs + [
EnumDef("bgeo_type", bgeo_enum, label="BGEO Options"),
]

View file

@ -13,7 +13,8 @@ class CollectFrames(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass", "redshiftproxy", "review"]
families = ["vdbcache", "imagesequence", "ass",
"redshiftproxy", "review", "bgeo"]
def process(self, instance):
@ -32,9 +33,9 @@ class CollectFrames(pyblish.api.InstancePlugin):
output = output_parm.eval()
_, ext = lib.splitext(
output,
allowed_multidot_extensions=[".ass.gz"]
)
output, allowed_multidot_extensions=[
".ass.gz", ".bgeo.sc", ".bgeo.gz",
".bgeo.lzma", ".bgeo.bz2"])
file_name = os.path.basename(output)
result = file_name
@ -76,7 +77,7 @@ class CollectFrames(pyblish.api.InstancePlugin):
frame = match.group(1)
padding = len(frame)
# Get the parts of the filename surrounding the frame number
# Get the parts of the filename surrounding the frame number,
# so we can put our own frame numbers in.
span = match.span(1)
prefix = match.string[: span[0]]

View file

@ -0,0 +1,21 @@
"""Collector for pointcache types.
This will add additional family to pointcache instance based on
the creator_identifier parameter.
"""
import pyblish.api
class CollectPointcacheType(pyblish.api.InstancePlugin):
"""Collect data type for pointcache instance."""
order = pyblish.api.CollectorOrder
hosts = ["houdini"]
families = ["pointcache"]
label = "Collect type of pointcache"
def process(self, instance):
if instance.data["creator_identifier"] == "io.openpype.creators.houdini.bgeo": # noqa: E501
instance.data["families"] += ["bgeo"]
elif instance.data["creator_identifier"] == "io.openpype.creators.houdini.alembic": # noqa: E501
instance.data["families"] += ["abc"]

View file

@ -13,7 +13,7 @@ class ExtractAlembic(publish.Extractor):
order = pyblish.api.ExtractorOrder
label = "Extract Alembic"
hosts = ["houdini"]
families = ["pointcache", "camera"]
families = ["abc", "camera"]
def process(self, instance):

View file

@ -0,0 +1,53 @@
import os
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.houdini.api.lib import render_rop
from openpype.hosts.houdini.api import lib
import hou
class ExtractBGEO(publish.Extractor):
order = pyblish.api.ExtractorOrder
label = "Extract BGEO"
hosts = ["houdini"]
families = ["bgeo"]
def process(self, instance):
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter
output = ropnode.evalParm("sopoutput")
staging_dir, file_name = os.path.split(output)
instance.data["stagingDir"] = staging_dir
# We run the render
self.log.info("Writing bgeo files '{}' to '{}'.".format(
file_name, staging_dir))
# write files
render_rop(ropnode)
output = instance.data["frames"]
_, ext = lib.splitext(
output[0], allowed_multidot_extensions=[
".ass.gz", ".bgeo.sc", ".bgeo.gz",
".bgeo.lzma", ".bgeo.bz2"])
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
"name": "bgeo",
"ext": ext.lstrip("."),
"files": output,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"]
}
instance.data["representations"].append(representation)

View file

@ -17,7 +17,7 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder + 0.1
families = ["pointcache"]
families = ["abc"]
hosts = ["houdini"]
label = "Validate Primitive to Detail (Abc)"

View file

@ -18,7 +18,7 @@ class ValidateAlembicROPFaceSets(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder + 0.1
families = ["pointcache"]
families = ["abc"]
hosts = ["houdini"]
label = "Validate Alembic ROP Face Sets"

View file

@ -14,7 +14,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder + 0.1
families = ["pointcache"]
families = ["abc"]
hosts = ["houdini"]
label = "Validate Input Node (Abc)"

View file

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
"""Validator plugin for SOP Path in bgeo isntance."""
import pyblish.api
from openpype.pipeline import PublishValidationError
class ValidateNoSOPPath(pyblish.api.InstancePlugin):
"""Validate if SOP Path in BGEO instance exists."""
order = pyblish.api.ValidatorOrder
families = ["bgeo"]
label = "Validate BGEO SOP Path"
def process(self, instance):
import hou
node = hou.node(instance.data.get("instance_node"))
sop_path = node.evalParm("soppath")
if not sop_path:
raise PublishValidationError(
("Empty SOP Path ('soppath' parameter) found in "
f"the BGEO instance Geometry - {node.path()}"))
if not isinstance(hou.node(sop_path), hou.SopNode):
raise PublishValidationError(
"SOP path is not pointing to valid SOP node.")

View file

@ -19,12 +19,11 @@ class ValidateFileExtension(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder
families = ["pointcache", "camera", "vdbcache"]
families = ["camera", "vdbcache"]
hosts = ["houdini"]
label = "Output File Extension"
family_extensions = {
"pointcache": ".abc",
"camera": ".abc",
"vdbcache": ".vdb",
}

View file

@ -24,7 +24,7 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
"""
order = ValidateContentsOrder + 0.1
families = ["pointcache"]
families = ["abc"]
hosts = ["houdini"]
label = "Validate Prims Hierarchy Path"
actions = [AddDefaultPathAction]

View file

@ -295,6 +295,13 @@ class CollectMayaRender(pyblish.api.InstancePlugin):
"colorspaceView": colorspace_data["view"],
}
rr_settings = (
context.data["system_settings"]["modules"]["royalrender"]
)
if rr_settings["enabled"]:
data["rrPathName"] = instance.data.get("rrPathName")
self.log.info(data["rrPathName"])
if self.sync_workfile_version:
data["version"] = context.data["version"]
for instance in context:

View file

@ -25,7 +25,8 @@ from .lib import (
select_nodes,
duplicate_node,
node_tempfile,
get_main_window
get_main_window,
WorkfileSettings,
)
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
@ -955,6 +956,9 @@ def build_workfile_template(*args, **kwargs):
builder = NukeTemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)
# set all settings to shot context default
WorkfileSettings().set_context_settings()
def update_workfile_template(*args):
builder = NukeTemplateBuilder(registered_host())

View file

@ -22,6 +22,9 @@ from openpype.pipeline.publish import (
KnownPublishError,
OpenPypePyblishPluginMixin
)
from openpype.pipeline.publish.lib import (
replace_with_published_scene_path
)
JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError)
@ -525,72 +528,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
published.
"""
instance = self._instance
workfile_instance = self._get_workfile_instance(instance.context)
if workfile_instance is None:
return
# determine published path from Anatomy.
template_data = workfile_instance.data.get("anatomyData")
rep = workfile_instance.data["representations"][0]
template_data["representation"] = rep.get("name")
template_data["ext"] = rep.get("ext")
template_data["comment"] = None
anatomy = instance.context.data['anatomy']
template_obj = anatomy.templates_obj["publish"]["path"]
template_filled = template_obj.format_strict(template_data)
file_path = os.path.normpath(template_filled)
self.log.info("Using published scene for render {}".format(file_path))
if not os.path.exists(file_path):
self.log.error("published scene does not exist!")
raise
if not replace_in_path:
return file_path
# now we need to switch scene in expected files
# because <scene> token will now point to published
# scene file and that might differ from current one
def _clean_name(path):
return os.path.splitext(os.path.basename(path))[0]
new_scene = _clean_name(file_path)
orig_scene = _clean_name(instance.context.data["currentFile"])
expected_files = instance.data.get("expectedFiles")
if isinstance(expected_files[0], dict):
# we have aovs and we need to iterate over them
new_exp = {}
for aov, files in expected_files[0].items():
replaced_files = []
for f in files:
replaced_files.append(
str(f).replace(orig_scene, new_scene)
)
new_exp[aov] = replaced_files
# [] might be too much here, TODO
instance.data["expectedFiles"] = [new_exp]
else:
new_exp = []
for f in expected_files:
new_exp.append(
str(f).replace(orig_scene, new_scene)
)
instance.data["expectedFiles"] = new_exp
metadata_folder = instance.data.get("publishRenderMetadataFolder")
if metadata_folder:
metadata_folder = metadata_folder.replace(orig_scene,
new_scene)
instance.data["publishRenderMetadataFolder"] = metadata_folder
self.log.info("Scene name was switched {} -> {}".format(
orig_scene, new_scene
))
return file_path
return replace_with_published_scene_path(
self._instance, replace_in_path=replace_in_path)
def assemble_payload(
self, job_info=None, plugin_info=None, aux_files=None):
@ -651,22 +590,3 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
self._instance.data["deadlineSubmissionJob"] = result
return result["_id"]
@staticmethod
def _get_workfile_instance(context):
"""Find workfile instance in context"""
for instance in context:
is_workfile = (
"workfile" in instance.data.get("families", []) or
instance.data["family"] == "workfile"
)
if not is_workfile:
continue
# test if there is instance of workfile waiting
# to be published.
assert instance.data.get("publish", True) is True, (
"Workfile (scene) must be published along")
return instance

View file

@ -47,6 +47,7 @@ from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.tests.lib import is_in_tests
from openpype.lib import is_running_from_build
from openpype.pipeline.farm.tools import iter_expected_files
def _validate_deadline_bool_value(instance, attribute, value):
@ -238,7 +239,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
# Add list of expected files to job
# ---------------------------------
exp = instance.data.get("expectedFiles")
for filepath in self._iter_expected_files(exp):
for filepath in iter_expected_files(exp):
job_info.OutputDirectory += os.path.dirname(filepath)
job_info.OutputFilename += os.path.basename(filepath)
@ -296,7 +297,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
# TODO: Avoid the need for this logic here, needed for submit publish
# Store output dir for unified publisher (filesequence)
expected_files = instance.data["expectedFiles"]
first_file = next(self._iter_expected_files(expected_files))
first_file = next(iter_expected_files(expected_files))
output_dir = os.path.dirname(first_file)
instance.data["outputDir"] = output_dir
instance.data["toBeRenderedOn"] = "deadline"
@ -815,16 +816,6 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
end=int(self._instance.data["frameEndHandle"]),
)
@staticmethod
def _iter_expected_files(exp):
if isinstance(exp[0], dict):
for _aov, files in exp[0].items():
for file in files:
yield file
else:
for file in exp:
yield file
@classmethod
def get_attribute_defs(cls):
defs = super(MayaSubmitDeadline, cls).get_attribute_defs()
@ -863,7 +854,6 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
return defs
def _format_tiles(
filename,
index,

View file

@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
"""Submit publishing job to farm."""
import os
import json
import re
@ -12,47 +11,22 @@ import pyblish.api
from openpype.client import (
get_last_version_by_subset_name,
get_representations,
)
from openpype.pipeline import (
get_representation_path,
legacy_io,
)
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
from openpype.pipeline import publish
from openpype.lib import EnumDef
from openpype.tests.lib import is_in_tests
from openpype.pipeline.farm.patterning import match_aov_pattern
from openpype.lib import is_running_from_build
from openpype.pipeline import publish
def get_resources(project_name, version, extension=None):
"""Get the files from the specific version."""
# TODO this functions seems to be weird
# - it's looking for representation with one extension or first (any)
# representation from a version?
# - not sure how this should work, maybe it does for specific use cases
# but probably can't be used for all resources from 2D workflows
extensions = None
if extension:
extensions = [extension]
repre_docs = list(get_representations(
project_name, version_ids=[version["_id"]], extensions=extensions
))
assert repre_docs, "This is a bug"
representation = repre_docs[0]
directory = get_representation_path(representation)
print("Source: ", directory)
resources = sorted(
[
os.path.normpath(os.path.join(directory, fname))
for fname in os.listdir(directory)
]
)
return resources
from openpype.pipeline.farm.pyblish_functions import (
create_skeleton_instance,
create_instances_for_aov,
attach_instances_to_subset,
prepare_representations,
create_metadata_path
)
def get_resource_files(resources, frame_range=None):
@ -83,7 +57,8 @@ def get_resource_files(resources, frame_range=None):
class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
publish.OpenPypePyblishPluginMixin,
publish.ColormanagedPyblishPluginMixin):
"""Process Job submitted on farm.
These jobs are dependent on a deadline or muster job
@ -185,36 +160,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
# poor man exclusion
skip_integration_repre_list = []
def _create_metadata_path(self, instance):
ins_data = instance.data
# Ensure output dir exists
output_dir = ins_data.get(
"publishRenderMetadataFolder", ins_data["outputDir"])
try:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
except OSError:
# directory is not available
self.log.warning("Path is unreachable: `{}`".format(output_dir))
metadata_filename = "{}_metadata.json".format(ins_data["subset"])
metadata_path = os.path.join(output_dir, metadata_filename)
# Convert output dir to `{root}/rest/of/path/...` with Anatomy
success, rootless_mtdt_p = self.anatomy.find_root_template_from_path(
metadata_path)
if not success:
# `rootless_path` is not set to `output_dir` if none of roots match
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(output_dir))
rootless_mtdt_p = metadata_path
return metadata_path, rootless_mtdt_p
def _submit_deadline_post_job(self, instance, job, instances):
"""Submit publish job to Deadline.
@ -229,6 +174,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
subset = data["subset"]
job_name = "Publish - {subset}".format(subset=subset)
anatomy = instance.context.data['anatomy']
# instance.data.get("subset") != instances[0]["subset"]
# 'Main' vs 'renderMain'
override_version = None
@ -237,7 +184,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
override_version = instance_version
output_dir = self._get_publish_folder(
instance.context.data['anatomy'],
anatomy,
deepcopy(instance.data["anatomyData"]),
instance.data.get("asset"),
instances[0]["subset"],
@ -248,7 +195,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
# Transfer the environment from the original job to this dependent
# job so they use the same environment
metadata_path, rootless_metadata_path = \
self._create_metadata_path(instance)
create_metadata_path(instance, anatomy)
environment = {
"AVALON_PROJECT": instance.context.data["projectName"],
@ -335,13 +282,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
self.log.info("Adding tile assembly jobs as dependencies...")
job_index = 0
for assembly_id in instance.data.get("assemblySubmissionJobs"):
payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501
payload["JobInfo"]["JobDependency{}".format(
job_index)] = assembly_id # noqa: E501
job_index += 1
elif instance.data.get("bakingSubmissionJobs"):
self.log.info("Adding baking submission jobs as dependencies...")
job_index = 0
for assembly_id in instance.data["bakingSubmissionJobs"]:
payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501
payload["JobInfo"]["JobDependency{}".format(
job_index)] = assembly_id # noqa: E501
job_index += 1
else:
payload["JobInfo"]["JobDependency0"] = job["_id"]
@ -369,413 +318,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
return deadline_publish_job_id
def _copy_extend_frames(self, instance, representation):
"""Copy existing frames from latest version.
This will copy all existing frames from subset's latest version back
to render directory and rename them to what renderer is expecting.
Arguments:
instance (pyblish.plugin.Instance): instance to get required
data from
representation (dict): presentation to operate on
"""
import speedcopy
self.log.info("Preparing to copy ...")
start = instance.data.get("frameStart")
end = instance.data.get("frameEnd")
# get latest version of subset
# this will stop if subset wasn't published yet
project_name = self.context.data["projectName"]
version = get_last_version_by_subset_name(
project_name,
instance.data.get("subset"),
asset_name=instance.data.get("asset")
)
# get its files based on extension
subset_resources = get_resources(
project_name, version, representation.get("ext")
)
r_col, _ = clique.assemble(subset_resources)
# if override remove all frames we are expecting to be rendered
# so we'll copy only those missing from current render
if instance.data.get("overrideExistingFrame"):
for frame in range(start, end + 1):
if frame not in r_col.indexes:
continue
r_col.indexes.remove(frame)
# now we need to translate published names from representation
# back. This is tricky, right now we'll just use same naming
# and only switch frame numbers
resource_files = []
r_filename = os.path.basename(
representation.get("files")[0]) # first file
op = re.search(self.R_FRAME_NUMBER, r_filename)
pre = r_filename[:op.start("frame")]
post = r_filename[op.end("frame"):]
assert op is not None, "padding string wasn't found"
for frame in list(r_col):
fn = re.search(self.R_FRAME_NUMBER, frame)
# silencing linter as we need to compare to True, not to
# type
assert fn is not None, "padding string wasn't found"
# list of tuples (source, destination)
staging = representation.get("stagingDir")
staging = self.anatomy.fill_root(staging)
resource_files.append(
(frame,
os.path.join(staging,
"{}{}{}".format(pre,
fn.group("frame"),
post)))
)
# test if destination dir exists and create it if not
output_dir = os.path.dirname(representation.get("files")[0])
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# copy files
for source in resource_files:
speedcopy.copy(source[0], source[1])
self.log.info(" > {}".format(source[1]))
self.log.info(
"Finished copying %i files" % len(resource_files))
def _create_instances_for_aov(
self, instance_data, exp_files, additional_data, do_not_add_review
):
"""Create instance for each AOV found.
This will create new instance for every aov it can detect in expected
files list.
Arguments:
instance_data (pyblish.plugin.Instance): skeleton data for instance
(those needed) later by collector
exp_files (list): list of expected files divided by aovs
additional_data (dict):
do_not_add_review (bool): explicitly skip review
Returns:
list of instances
"""
task = self.context.data["task"]
host_name = self.context.data["hostName"]
subset = instance_data["subset"]
cameras = instance_data.get("cameras", [])
instances = []
# go through aovs in expected files
for aov, files in exp_files[0].items():
cols, rem = clique.assemble(files)
# we shouldn't have any reminders. And if we do, it should
# be just one item for single frame renders.
if not cols and rem:
assert len(rem) == 1, ("Found multiple non related files "
"to render, don't know what to do "
"with them.")
col = rem[0]
ext = os.path.splitext(col)[1].lstrip(".")
else:
# but we really expect only one collection.
# Nothing else make sense.
assert len(cols) == 1, "only one image sequence type is expected" # noqa: E501
ext = cols[0].tail.lstrip(".")
col = list(cols[0])
self.log.debug(col)
# create subset name `familyTaskSubset_AOV`
group_name = 'render{}{}{}{}'.format(
task[0].upper(), task[1:],
subset[0].upper(), subset[1:])
cam = [c for c in cameras if c in col.head]
if cam:
if aov:
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
else:
subset_name = '{}_{}'.format(group_name, cam)
else:
if aov:
subset_name = '{}_{}'.format(group_name, aov)
else:
subset_name = '{}'.format(group_name)
if isinstance(col, (list, tuple)):
staging = os.path.dirname(col[0])
else:
staging = os.path.dirname(col)
success, rootless_staging_dir = (
self.anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
self.log.info("Creating data for: {}".format(subset_name))
if isinstance(col, list):
render_file_name = os.path.basename(col[0])
else:
render_file_name = os.path.basename(col)
aov_patterns = self.aov_filter
preview = match_aov_pattern(
host_name, aov_patterns, render_file_name
)
# toggle preview on if multipart is on
if instance_data.get("multipartExr"):
self.log.debug("Adding preview tag because its multipartExr")
preview = True
self.log.debug("preview:{}".format(preview))
new_instance = deepcopy(instance_data)
new_instance["subset"] = subset_name
new_instance["subsetGroup"] = group_name
preview = preview and not do_not_add_review
if preview:
new_instance["review"] = True
# create representation
if isinstance(col, (list, tuple)):
files = [os.path.basename(f) for f in col]
else:
files = os.path.basename(col)
# Copy render product "colorspace" data to representation.
colorspace = ""
products = additional_data["renderProducts"].layer_data.products
for product in products:
if product.productName == aov:
colorspace = product.colorspace
break
rep = {
"name": ext,
"ext": ext,
"files": files,
"frameStart": int(instance_data.get("frameStartHandle")),
"frameEnd": int(instance_data.get("frameEndHandle")),
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"fps": new_instance.get("fps"),
"tags": ["review"] if preview else [],
"colorspaceData": {
"colorspace": colorspace,
"config": {
"path": additional_data["colorspaceConfig"],
"template": additional_data["colorspaceTemplate"]
},
"display": additional_data["display"],
"view": additional_data["view"]
}
}
# support conversion from tiled to scanline
if instance_data.get("convertToScanline"):
self.log.info("Adding scanline conversion.")
rep["tags"].append("toScanline")
# poor man exclusion
if ext in self.skip_integration_repre_list:
rep["tags"].append("delete")
self._solve_families(new_instance, preview)
new_instance["representations"] = [rep]
# if extending frames from existing version, copy files from there
# into our destination directory
if new_instance.get("extendFrames", False):
self._copy_extend_frames(new_instance, rep)
instances.append(new_instance)
self.log.debug("instances:{}".format(instances))
return instances
def _get_representations(self, instance_data, exp_files,
do_not_add_review):
"""Create representations for file sequences.
This will return representations of expected files if they are not
in hierarchy of aovs. There should be only one sequence of files for
most cases, but if not - we create representation from each of them.
Arguments:
instance_data (dict): instance.data for which we are
setting representations
exp_files (list): list of expected files
do_not_add_review (bool): explicitly skip review
Returns:
list of representations
"""
representations = []
host_name = self.context.data["hostName"]
collections, remainders = clique.assemble(exp_files)
# create representation for every collected sequence
for collection in collections:
ext = collection.tail.lstrip(".")
preview = False
# TODO 'useSequenceForReview' is temporary solution which does
# not work for 100% of cases. We must be able to tell what
# expected files contains more explicitly and from what
# should be review made.
# - "review" tag is never added when is set to 'False'
if instance_data["useSequenceForReview"]:
# toggle preview on if multipart is on
if instance_data.get("multipartExr", False):
self.log.debug(
"Adding preview tag because its multipartExr"
)
preview = True
else:
render_file_name = list(collection)[0]
# if filtered aov name is found in filename, toggle it for
# preview video rendering
preview = match_aov_pattern(
host_name, self.aov_filter, render_file_name
)
staging = os.path.dirname(list(collection)[0])
success, rootless_staging_dir = (
self.anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
frame_start = int(instance_data.get("frameStartHandle"))
if instance_data.get("slate"):
frame_start -= 1
preview = preview and not do_not_add_review
rep = {
"name": ext,
"ext": ext,
"files": [os.path.basename(f) for f in list(collection)],
"frameStart": frame_start,
"frameEnd": int(instance_data.get("frameEndHandle")),
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"fps": instance_data.get("fps"),
"tags": ["review"] if preview else [],
}
# poor man exclusion
if ext in self.skip_integration_repre_list:
rep["tags"].append("delete")
if instance_data.get("multipartExr", False):
rep["tags"].append("multipartExr")
# support conversion from tiled to scanline
if instance_data.get("convertToScanline"):
self.log.info("Adding scanline conversion.")
rep["tags"].append("toScanline")
representations.append(rep)
self._solve_families(instance_data, preview)
# add remainders as representations
for remainder in remainders:
ext = remainder.split(".")[-1]
staging = os.path.dirname(remainder)
success, rootless_staging_dir = (
self.anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
rep = {
"name": ext,
"ext": ext,
"files": os.path.basename(remainder),
"stagingDir": staging,
}
preview = match_aov_pattern(
host_name, self.aov_filter, remainder
)
preview = preview and not do_not_add_review
if preview:
rep.update({
"fps": instance_data.get("fps"),
"tags": ["review"]
})
self._solve_families(instance_data, preview)
already_there = False
for repre in instance_data.get("representations", []):
# might be added explicitly before by publish_on_farm
already_there = repre.get("files") == rep["files"]
if already_there:
self.log.debug("repre {} already_there".format(repre))
break
if not already_there:
representations.append(rep)
for rep in representations:
# inject colorspace data
self.set_representation_colorspace(
rep, self.context,
colorspace=instance_data["colorspace"]
)
return representations
def _solve_families(self, instance, preview=False):
families = instance.get("families")
# if we have one representation with preview tag
# flag whole instance for review and for ftrack
if preview:
if "ftrack" not in families:
if os.environ.get("FTRACK_SERVER"):
self.log.debug(
"Adding \"ftrack\" to families because of preview tag."
)
families.append("ftrack")
if "review" not in families:
self.log.debug(
"Adding \"review\" to families because of preview tag."
)
families.append("review")
instance["families"] = families
def process(self, instance):
# type: (pyblish.api.Instance) -> None
"""Process plugin.
Detect type of renderfarm submission and create and post dependent job
in case of Deadline. It creates json file with metadata needed for
Detect type of render farm submission and create and post dependent
job in case of Deadline. It creates json file with metadata needed for
publishing in directory of render.
Args:
@ -786,151 +335,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
self.log.debug("Skipping local instance.")
return
data = instance.data.copy()
context = instance.context
self.context = context
self.anatomy = instance.context.data["anatomy"]
asset = data.get("asset") or context.data["asset"]
subset = data.get("subset")
start = instance.data.get("frameStart")
if start is None:
start = context.data["frameStart"]
end = instance.data.get("frameEnd")
if end is None:
end = context.data["frameEnd"]
handle_start = instance.data.get("handleStart")
if handle_start is None:
handle_start = context.data["handleStart"]
handle_end = instance.data.get("handleEnd")
if handle_end is None:
handle_end = context.data["handleEnd"]
fps = instance.data.get("fps")
if fps is None:
fps = context.data["fps"]
if data.get("extendFrames", False):
start, end = self._extend_frames(
asset,
subset,
start,
end,
data["overrideExistingFrame"])
try:
source = data["source"]
except KeyError:
source = context.data["currentFile"]
success, rootless_path = (
self.anatomy.find_root_template_from_path(source)
)
if success:
source = rootless_path
else:
# `rootless_path` is not set to `source` if none of roots match
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues."
).format(source))
family = "render"
if ("prerender" in instance.data["families"] or
"prerender.farm" in instance.data["families"]):
family = "prerender"
families = [family]
# pass review to families if marked as review
do_not_add_review = False
if data.get("review"):
families.append("review")
elif data.get("review") == False:
self.log.debug("Instance has review explicitly disabled.")
do_not_add_review = True
instance_skeleton_data = {
"family": family,
"subset": subset,
"families": families,
"asset": asset,
"frameStart": start,
"frameEnd": end,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStartHandle": start - handle_start,
"frameEndHandle": end + handle_end,
"comment": instance.data["comment"],
"fps": fps,
"source": source,
"extendFrames": data.get("extendFrames"),
"overrideExistingFrame": data.get("overrideExistingFrame"),
"pixelAspect": data.get("pixelAspect", 1),
"resolutionWidth": data.get("resolutionWidth", 1920),
"resolutionHeight": data.get("resolutionHeight", 1080),
"multipartExr": data.get("multipartExr", False),
"jobBatchName": data.get("jobBatchName", ""),
"useSequenceForReview": data.get("useSequenceForReview", True),
# map inputVersions `ObjectId` -> `str` so json supports it
"inputVersions": list(map(str, data.get("inputVersions", []))),
"colorspace": instance.data.get("colorspace")
}
# skip locking version if we are creating v01
instance_version = instance.data.get("version") # take this if exists
if instance_version != 1:
instance_skeleton_data["version"] = instance_version
# transfer specific families from original instance to new render
for item in self.families_transfer:
if item in instance.data.get("families", []):
instance_skeleton_data["families"] += [item]
# transfer specific properties from original instance based on
# mapping dictionary `instance_transfer`
for key, values in self.instance_transfer.items():
if key in instance.data.get("families", []):
for v in values:
instance_skeleton_data[v] = instance.data.get(v)
# look into instance data if representations are not having any
# which are having tag `publish_on_farm` and include them
for repre in instance.data.get("representations", []):
staging_dir = repre.get("stagingDir")
if staging_dir:
success, rootless_staging_dir = (
self.anatomy.find_root_template_from_path(
staging_dir
)
)
if success:
repre["stagingDir"] = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging_dir))
repre["stagingDir"] = staging_dir
if "publish_on_farm" in repre.get("tags"):
# create representations attribute of not there
if "representations" not in instance_skeleton_data.keys():
instance_skeleton_data["representations"] = []
instance_skeleton_data["representations"].append(repre)
instances = None
assert data.get("expectedFiles"), ("Submission from old Pype version"
" - missing expectedFiles")
anatomy = instance.context.data["anatomy"]
instance_skeleton_data = create_skeleton_instance(
instance, families_transfer=self.families_transfer,
instance_transfer=self.instance_transfer)
"""
if content of `expectedFiles` are dictionaries, we will handle
it as list of AOVs, creating instance from every one of them.
if content of `expectedFiles` list are dictionaries, we will handle
it as list of AOVs, creating instance for every one of them.
Example:
--------
@ -952,7 +364,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
This will create instances for `beauty` and `Z` subset
adding those files to their respective representations.
If we've got only list of files, we collect all filesequences.
If we have only list of files, we collect all file sequences.
More then one doesn't probably make sense, but we'll handle it
like creating one instance with multiple representations.
@ -969,58 +381,26 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
This will result in one instance with two representations:
`foo` and `xxx`
"""
do_not_add_review = False
if instance.data.get("review") is False:
self.log.debug("Instance has review explicitly disabled.")
do_not_add_review = True
self.log.info(data.get("expectedFiles"))
if isinstance(data.get("expectedFiles")[0], dict):
# we cannot attach AOVs to other subsets as we consider every
# AOV subset of its own.
additional_data = {
"renderProducts": instance.data["renderProducts"],
"colorspaceConfig": instance.data["colorspaceConfig"],
"display": instance.data["colorspaceDisplay"],
"view": instance.data["colorspaceView"]
}
# Get templated path from absolute config path.
anatomy = instance.context.data["anatomy"]
colorspaceTemplate = instance.data["colorspaceConfig"]
success, rootless_staging_dir = (
anatomy.find_root_template_from_path(colorspaceTemplate)
)
if success:
colorspaceTemplate = rootless_staging_dir
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(colorspaceTemplate))
additional_data["colorspaceTemplate"] = colorspaceTemplate
if len(data.get("attachTo")) > 0:
assert len(data.get("expectedFiles")[0].keys()) == 1, (
"attaching multiple AOVs or renderable cameras to "
"subset is not supported")
# create instances for every AOV we found in expected files.
# note: this is done for every AOV and every render camere (if
# there are multiple renderable cameras in scene)
instances = self._create_instances_for_aov(
instance_skeleton_data,
data.get("expectedFiles"),
additional_data,
do_not_add_review
)
self.log.info("got {} instance{}".format(
len(instances),
"s" if len(instances) > 1 else ""))
if isinstance(instance.data.get("expectedFiles")[0], dict):
instances = create_instances_for_aov(
instance, instance_skeleton_data,
self.aov_filter, self.skip_integration_repre_list,
do_not_add_review)
else:
representations = self._get_representations(
representations = prepare_representations(
instance_skeleton_data,
data.get("expectedFiles"),
do_not_add_review
instance.data.get("expectedFiles"),
anatomy,
self.aov_filter,
self.skip_integration_repre_list,
do_not_add_review,
instance.context,
self
)
if "representations" not in instance_skeleton_data.keys():
@ -1030,25 +410,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
instance_skeleton_data["representations"] += representations
instances = [instance_skeleton_data]
# if we are attaching to other subsets, create copy of existing
# instances, change data to match its subset and replace
# existing instances with modified data
# attach instances to subset
if instance.data.get("attachTo"):
self.log.info("Attaching render to subset:")
new_instances = []
for at in instance.data.get("attachTo"):
for i in instances:
new_i = copy(i)
new_i["version"] = at.get("version")
new_i["subset"] = at.get("subset")
new_i["family"] = at.get("family")
new_i["append"] = True
# don't set subsetGroup if we are attaching
new_i.pop("subsetGroup")
new_instances.append(new_i)
self.log.info(" - {} / v{}".format(
at.get("subset"), at.get("version")))
instances = new_instances
instances = attach_instances_to_subset(
instance.data.get("attachTo"), instances
)
r''' SUBMiT PUBLiSH JOB 2 D34DLiN3
____
@ -1063,11 +429,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
render_job = None
submission_type = ""
if instance.data.get("toBeRenderedOn") == "deadline":
render_job = data.pop("deadlineSubmissionJob", None)
render_job = instance.data.pop("deadlineSubmissionJob", None)
submission_type = "deadline"
if instance.data.get("toBeRenderedOn") == "muster":
render_job = data.pop("musterSubmissionJob", None)
render_job = instance.data.pop("musterSubmissionJob", None)
submission_type = "muster"
if not render_job and instance.data.get("tileRendering") is False:
@ -1089,10 +455,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
render_job["Props"]["Batch"] = instance.data.get(
"jobBatchName")
else:
render_job["Props"]["Batch"] = os.path.splitext(
os.path.basename(context.data.get("currentFile")))[0]
batch = os.path.splitext(os.path.basename(
instance.context.data.get("currentFile")))[0]
render_job["Props"]["Batch"] = batch
# User is deadline user
render_job["Props"]["User"] = context.data.get(
render_job["Props"]["User"] = instance.context.data.get(
"deadlineUser", getpass.getuser())
render_job["Props"]["Env"] = {
@ -1118,15 +485,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
# publish job file
publish_job = {
"asset": asset,
"frameStart": start,
"frameEnd": end,
"fps": context.data.get("fps", None),
"source": source,
"user": context.data["user"],
"version": context.data["version"], # this is workfile version
"intent": context.data.get("intent"),
"comment": context.data.get("comment"),
"asset": instance_skeleton_data["asset"],
"frameStart": instance_skeleton_data["frameStart"],
"frameEnd": instance_skeleton_data["frameEnd"],
"fps": instance_skeleton_data["fps"],
"source": instance_skeleton_data["source"],
"user": instance.context.data["user"],
"version": instance.context.data["version"], # workfile version
"intent": instance.context.data.get("intent"),
"comment": instance.context.data.get("comment"),
"job": render_job or None,
"session": legacy_io.Session.copy(),
"instances": instances
@ -1136,7 +503,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
publish_job["deadline_publish_job_id"] = deadline_publish_job_id
# add audio to metadata file if available
audio_file = context.data.get("audioFile")
audio_file = instance.context.data.get("audioFile")
if audio_file and os.path.isfile(audio_file):
publish_job.update({"audio": audio_file})
@ -1149,54 +516,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
}
publish_job.update({"ftrack": ftrack})
metadata_path, rootless_metadata_path = self._create_metadata_path(
instance)
metadata_path, rootless_metadata_path = \
create_metadata_path(instance, anatomy)
self.log.info("Writing json file: {}".format(metadata_path))
with open(metadata_path, "w") as f:
json.dump(publish_job, f, indent=4, sort_keys=True)
def _extend_frames(self, asset, subset, start, end):
"""Get latest version of asset nad update frame range.
Based on minimum and maximuma values.
Arguments:
asset (str): asset name
subset (str): subset name
start (int): start frame
end (int): end frame
Returns:
(int, int): upddate frame start/end
"""
# Frame comparison
prev_start = None
prev_end = None
project_name = self.context.data["projectName"]
version = get_last_version_by_subset_name(
project_name,
subset,
asset_name=asset
)
# Set prev start / end frames for comparison
if not prev_start and not prev_end:
prev_start = version["data"]["frameStart"]
prev_end = version["data"]["frameEnd"]
updated_start = min(start, prev_start)
updated_end = max(end, prev_end)
self.log.info(
"Updating start / end frame : "
"{} - {}".format(updated_start, updated_end)
)
return updated_start, updated_end
def _get_publish_folder(self, anatomy, template_data,
asset, subset,
family='render', version=None):

View file

@ -3,10 +3,10 @@
import sys
import os
from openpype.settings import get_project_settings
from openpype.lib.local_settings import OpenPypeSettingsRegistry
from openpype.lib import Logger, run_subprocess
from .rr_job import RRJob, SubmitFile, SubmitterParameter
from openpype.lib.vendor_bin_utils import find_tool_in_custom_paths
class Api:
@ -15,69 +15,57 @@ class Api:
RR_SUBMIT_CONSOLE = 1
RR_SUBMIT_API = 2
def __init__(self, settings, project=None):
def __init__(self, rr_path=None):
self.log = Logger.get_logger("RoyalRender")
self._settings = settings
self._initialize_rr(project)
self._rr_path = rr_path
os.environ["RR_ROOT"] = rr_path
def _initialize_rr(self, project=None):
# type: (str) -> None
"""Initialize RR Path.
@staticmethod
def get_rr_bin_path(rr_root, tool_name=None):
# type: (str, str) -> str
"""Get path to RR bin folder.
Args:
project (str, Optional): Project name to set RR api in
context.
tool_name (str): Name of RR executable you want.
rr_root (str): Custom RR root if needed.
Returns:
str: Path to the tool based on current platform.
"""
if project:
project_settings = get_project_settings(project)
rr_path = (
project_settings
["royalrender"]
["rr_paths"]
)
else:
rr_path = (
self._settings
["modules"]
["royalrender"]
["rr_path"]
["default"]
)
os.environ["RR_ROOT"] = rr_path
self._rr_path = rr_path
def _get_rr_bin_path(self, rr_root=None):
# type: (str) -> str
"""Get path to RR bin folder."""
rr_root = rr_root or self._rr_path
is_64bit_python = sys.maxsize > 2 ** 32
rr_bin_path = ""
rr_bin_parts = [rr_root, "bin"]
if sys.platform.lower() == "win32":
rr_bin_path = "/bin/win64"
if not is_64bit_python:
# we are using 64bit python
rr_bin_path = "/bin/win"
rr_bin_path = rr_bin_path.replace(
"/", os.path.sep
)
rr_bin_parts.append("win")
if sys.platform.lower() == "darwin":
rr_bin_path = "/bin/mac64"
if not is_64bit_python:
rr_bin_path = "/bin/mac"
rr_bin_parts.append("mac")
if sys.platform.lower() == "linux":
rr_bin_path = "/bin/lx64"
if sys.platform.lower().startswith("linux"):
rr_bin_parts.append("lx")
return os.path.join(rr_root, rr_bin_path)
rr_bin_path = os.sep.join(rr_bin_parts)
paths_to_check = []
# if we use 64bit python, append 64bit specific path first
if is_64bit_python:
if not tool_name:
return rr_bin_path + "64"
paths_to_check.append(rr_bin_path + "64")
# otherwise use 32bit
if not tool_name:
return rr_bin_path
paths_to_check.append(rr_bin_path)
return find_tool_in_custom_paths(paths_to_check, tool_name)
def _initialize_module_path(self):
# type: () -> None
"""Set RR modules for Python."""
# default for linux
rr_bin = self._get_rr_bin_path()
rr_bin = self.get_rr_bin_path(self._rr_path)
rr_module_path = os.path.join(rr_bin, "lx64/lib")
if sys.platform.lower() == "win32":
@ -91,51 +79,46 @@ class Api:
sys.path.append(os.path.join(self._rr_path, rr_module_path))
def create_submission(self, jobs, submitter_attributes, file_name=None):
# type: (list[RRJob], list[SubmitterParameter], str) -> SubmitFile
@staticmethod
def create_submission(jobs, submitter_attributes):
# type: (list[RRJob], list[SubmitterParameter]) -> SubmitFile
"""Create jobs submission file.
Args:
jobs (list): List of :class:`RRJob`
submitter_attributes (list): List of submitter attributes
:class:`SubmitterParameter` for whole submission batch.
file_name (str), optional): File path to write data to.
Returns:
str: XML data of job submission files.
"""
raise NotImplementedError
return SubmitFile(SubmitterParameters=submitter_attributes, Jobs=jobs)
def submit_file(self, file, mode=RR_SUBMIT_CONSOLE):
# type: (SubmitFile, int) -> None
if mode == self.RR_SUBMIT_CONSOLE:
self._submit_using_console(file)
return
# RR v7 supports only Python 2.7 so we bail out in fear
# RR v7 supports only Python 2.7, so we bail out in fear
# until there is support for Python 3 😰
raise NotImplementedError(
"Submission via RoyalRender API is not supported yet")
# self._submit_using_api(file)
def _submit_using_console(self, file):
# type: (SubmitFile) -> bool
rr_console = os.path.join(
self._get_rr_bin_path(),
"rrSubmitterconsole"
)
def _submit_using_console(self, job_file):
# type: (SubmitFile) -> None
rr_start_local = self.get_rr_bin_path(
self._rr_path, "rrStartLocal")
if sys.platform.lower() == "darwin":
if "/bin/mac64" in rr_console:
rr_console = rr_console.replace("/bin/mac64", "/bin/mac")
self.log.info("rr_console: {}".format(rr_start_local))
if sys.platform.lower() == "win32":
if "/bin/win64" in rr_console:
rr_console = rr_console.replace("/bin/win64", "/bin/win")
rr_console += ".exe"
args = [rr_console, file]
run_subprocess(" ".join(args), logger=self.log)
args = [rr_start_local, "rrSubmitterconsole", job_file]
self.log.info("Executing: {}".format(" ".join(args)))
env = os.environ
env["RR_ROOT"] = self._rr_path
run_subprocess(args, logger=self.log, env=env)
def _submit_using_api(self, file):
# type: (SubmitFile) -> None

View file

@ -0,0 +1,304 @@
# -*- coding: utf-8 -*-
"""Submitting render job to RoyalRender."""
import os
import re
import platform
from datetime import datetime
import pyblish.api
from openpype.tests.lib import is_in_tests
from openpype.pipeline.publish.lib import get_published_workfile_instance
from openpype.pipeline.publish import KnownPublishError
from openpype.modules.royalrender.api import Api as rrApi
from openpype.modules.royalrender.rr_job import (
RRJob, CustomAttribute, get_rr_platform)
from openpype.lib import (
is_running_from_build,
BoolDef,
NumberDef,
)
from openpype.pipeline import OpenPypePyblishPluginMixin
class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
"""Creates separate rendering job for Royal Render"""
label = "Create Nuke Render job in RR"
order = pyblish.api.IntegratorOrder + 0.1
hosts = ["nuke"]
families = ["render", "prerender"]
targets = ["local"]
optional = True
priority = 50
chunk_size = 1
concurrent_tasks = 1
use_gpu = True
use_published = True
@classmethod
def get_attribute_defs(cls):
return [
NumberDef(
"priority",
label="Priority",
default=cls.priority,
decimals=0
),
NumberDef(
"chunk",
label="Frames Per Task",
default=cls.chunk_size,
decimals=0,
minimum=1,
maximum=1000
),
NumberDef(
"concurrency",
label="Concurrency",
default=cls.concurrent_tasks,
decimals=0,
minimum=1,
maximum=10
),
BoolDef(
"use_gpu",
default=cls.use_gpu,
label="Use GPU"
),
BoolDef(
"suspend_publish",
default=False,
label="Suspend publish"
),
BoolDef(
"use_published",
default=cls.use_published,
label="Use published workfile"
)
]
def __init__(self, *args, **kwargs):
self._rr_root = None
self.scene_path = None
self.job = None
self.submission_parameters = None
self.rr_api = None
def process(self, instance):
if not instance.data.get("farm"):
self.log.info("Skipping local instance.")
return
instance.data["attributeValues"] = self.get_attr_values_from_data(
instance.data)
# add suspend_publish attributeValue to instance data
instance.data["suspend_publish"] = instance.data["attributeValues"][
"suspend_publish"]
context = instance.context
self._rr_root = self._resolve_rr_path(context, instance.data.get(
"rrPathName")) # noqa
self.log.debug(self._rr_root)
if not self._rr_root:
raise KnownPublishError(
("Missing RoyalRender root. "
"You need to configure RoyalRender module."))
self.rr_api = rrApi(self._rr_root)
self.scene_path = context.data["currentFile"]
if self.use_published:
published_workfile = get_published_workfile_instance(context)
# fallback if nothing was set
if published_workfile is None:
self.log.warning("Falling back to workfile")
file_path = context.data["currentFile"]
else:
workfile_repre = published_workfile.data["representations"][0]
file_path = workfile_repre["published_path"]
self.scene_path = file_path
self.log.info(
"Using published scene for render {}".format(self.scene_path)
)
if not instance.data.get("expectedFiles"):
instance.data["expectedFiles"] = []
if not instance.data.get("rrJobs"):
instance.data["rrJobs"] = []
def get_job(self, instance, script_path, render_path, node_name):
"""Get RR job based on current instance.
Args:
script_path (str): Path to Nuke script.
render_path (str): Output path.
node_name (str): Name of the render node.
Returns:
RRJob: RoyalRender Job instance.
"""
start_frame = int(instance.data["frameStartHandle"])
end_frame = int(instance.data["frameEndHandle"])
batch_name = os.path.basename(script_path)
jobname = "%s - %s" % (batch_name, instance.name)
if is_in_tests():
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
render_dir = os.path.normpath(os.path.dirname(render_path))
output_filename_0 = self.pad_file_name(render_path, str(start_frame))
file_name, file_ext = os.path.splitext(
os.path.basename(output_filename_0))
custom_attributes = []
if is_running_from_build():
custom_attributes = [
CustomAttribute(
name="OpenPypeVersion",
value=os.environ.get("OPENPYPE_VERSION"))
]
# this will append expected files to instance as needed.
expected_files = self.expected_files(
instance, render_path, start_frame, end_frame)
instance.data["expectedFiles"].extend(expected_files)
job = RRJob(
Software="",
Renderer="",
SeqStart=int(start_frame),
SeqEnd=int(end_frame),
SeqStep=int(instance.data.get("byFrameStep", 1)),
SeqFileOffset=0,
Version=0,
SceneName=script_path,
IsActive=True,
ImageDir=render_dir.replace("\\", "/"),
ImageFilename=file_name,
ImageExtension=file_ext,
ImagePreNumberLetter="",
ImageSingleOutputFile=False,
SceneOS=get_rr_platform(),
Layer=node_name,
SceneDatabaseDir=script_path,
CustomSHotName=jobname,
CompanyProjectName=instance.context.data["projectName"],
ImageWidth=instance.data["resolutionWidth"],
ImageHeight=instance.data["resolutionHeight"],
CustomAttributes=custom_attributes
)
return job
def update_job_with_host_specific(self, instance, job):
"""Host specific mapping for RRJob"""
raise NotImplementedError
@staticmethod
def _resolve_rr_path(context, rr_path_name):
# type: (pyblish.api.Context, str) -> str
rr_settings = (
context.data
["system_settings"]
["modules"]
["royalrender"]
)
try:
default_servers = rr_settings["rr_paths"]
project_servers = (
context.data
["project_settings"]
["royalrender"]
["rr_paths"]
)
rr_servers = {
k: default_servers[k]
for k in project_servers
if k in default_servers
}
except (AttributeError, KeyError):
# Handle situation were we had only one url for royal render.
return context.data["defaultRRPath"][platform.system().lower()]
return rr_servers[rr_path_name][platform.system().lower()]
def expected_files(self, instance, path, start_frame, end_frame):
"""Get expected files.
This function generate expected files from provided
path and start/end frames.
It was taken from Deadline module, but this should be
probably handled better in collector to support more
flexible scenarios.
Args:
instance (Instance)
path (str): Output path.
start_frame (int): Start frame.
end_frame (int): End frame.
Returns:
list: List of expected files.
"""
dir_name = os.path.dirname(path)
file = os.path.basename(path)
expected_files = []
if "#" in file:
pparts = file.split("#")
padding = "%0{}d".format(len(pparts) - 1)
file = pparts[0] + padding + pparts[-1]
if "%" not in file:
expected_files.append(path)
return expected_files
if instance.data.get("slate"):
start_frame -= 1
expected_files.extend(
os.path.join(dir_name, (file % i)).replace("\\", "/")
for i in range(start_frame, (end_frame + 1))
)
return expected_files
def pad_file_name(self, path, first_frame):
"""Return output file path with #### for padding.
RR requires the path to be formatted with # in place of numbers.
For example `/path/to/render.####.png`
Args:
path (str): path to rendered image
first_frame (str): from representation to cleany replace with #
padding
Returns:
str
"""
self.log.debug("pad_file_name path: `{}`".format(path))
if "%" in path:
search_results = re.search(r"(%0)(\d)(d.)", path).groups()
self.log.debug("_ search_results: `{}`".format(search_results))
return int(search_results[1])
if "#" in path:
self.log.debug("already padded: `{}`".format(path))
return path
if first_frame:
padding = len(first_frame)
path = path.replace(first_frame, "#" * padding)
return path

View file

@ -1,23 +0,0 @@
# -*- coding: utf-8 -*-
"""Collect default Deadline server."""
import pyblish.api
class CollectDefaultRRPath(pyblish.api.ContextPlugin):
"""Collect default Royal Render path."""
order = pyblish.api.CollectorOrder
label = "Default Royal Render Path"
def process(self, context):
try:
rr_module = context.data.get(
"openPypeModules")["royalrender"]
except AttributeError:
msg = "Cannot get OpenPype Royal Render module."
self.log.error(msg)
raise AssertionError(msg)
# get default deadline webservice url from deadline module
self.log.debug(rr_module.rr_paths)
context.data["defaultRRPath"] = rr_module.rr_paths["default"] # noqa: E501

View file

@ -5,29 +5,31 @@ import pyblish.api
class CollectRRPathFromInstance(pyblish.api.InstancePlugin):
"""Collect RR Path from instance."""
order = pyblish.api.CollectorOrder + 0.01
label = "Royal Render Path from the Instance"
families = ["rendering"]
order = pyblish.api.CollectorOrder
label = "Collect Royal Render path name from the Instance"
families = ["render", "prerender", "renderlayer"]
def process(self, instance):
instance.data["rrPath"] = self._collect_rr_path(instance)
instance.data["rrPathName"] = self._collect_rr_path_name(instance)
self.log.info(
"Using {} for submission.".format(instance.data["rrPath"]))
"Using '{}' for submission.".format(instance.data["rrPathName"]))
@staticmethod
def _collect_rr_path(render_instance):
def _collect_rr_path_name(instance):
# type: (pyblish.api.Instance) -> str
"""Get Royal Render path from render instance."""
"""Get Royal Render pat name from render instance."""
rr_settings = (
render_instance.context.data
instance.context.data
["system_settings"]
["modules"]
["royalrender"]
)
if not instance.data.get("rrPaths"):
return "default"
try:
default_servers = rr_settings["rr_paths"]
project_servers = (
render_instance.context.data
instance.context.data
["project_settings"]
["royalrender"]
["rr_paths"]
@ -40,10 +42,6 @@ class CollectRRPathFromInstance(pyblish.api.InstancePlugin):
except (AttributeError, KeyError):
# Handle situation were we had only one url for royal render.
return render_instance.context.data["defaultRRPath"]
return rr_settings["rr_paths"]["default"]
return rr_servers[
list(rr_servers.keys())[
int(render_instance.data.get("rrPaths"))
]
]
return list(rr_servers.keys())[int(instance.data.get("rrPaths"))]

View file

@ -71,7 +71,7 @@ class CollectSequencesFromJob(pyblish.api.ContextPlugin):
"""Gather file sequences from job directory.
When "OPENPYPE_PUBLISH_DATA" environment variable is set these paths
(folders or .json files) are parsed for image sequences. Otherwise the
(folders or .json files) are parsed for image sequences. Otherwise, the
current working directory is searched for file sequences.
"""

View file

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
"""Submitting render job to RoyalRender."""
import os
from maya.OpenMaya import MGlobal
from openpype.modules.royalrender import lib
from openpype.pipeline.farm.tools import iter_expected_files
class CreateMayaRoyalRenderJob(lib.BaseCreateRoyalRenderJob):
label = "Create Maya Render job in RR"
hosts = ["maya"]
families = ["renderlayer"]
def update_job_with_host_specific(self, instance, job):
job.Software = "Maya"
job.Version = "{0:.2f}".format(MGlobal.apiVersion() / 10000)
if instance.data.get("cameras"):
job.Camera = instance.data["cameras"][0].replace("'", '"')
workspace = instance.context.data["workspaceDir"]
job.SceneDatabaseDir = workspace
return job
def process(self, instance):
"""Plugin entry point."""
super(CreateMayaRoyalRenderJob, self).process(instance)
expected_files = instance.data["expectedFiles"]
first_file_path = next(iter_expected_files(expected_files))
output_dir = os.path.dirname(first_file_path)
instance.data["outputDir"] = output_dir
layer = instance.data["setMembers"] # type: str
layer_name = layer.removeprefix("rs_")
job = self.get_job(instance, self.scene_path, first_file_path,
layer_name)
job = self.update_job_with_host_specific(instance, job)
instance.data["rrJobs"].append(job)

View file

@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-
"""Submitting render job to RoyalRender."""
import re
from openpype.modules.royalrender import lib
class CreateNukeRoyalRenderJob(lib.BaseCreateRoyalRenderJob):
"""Creates separate rendering job for Royal Render"""
label = "Create Nuke Render job in RR"
hosts = ["nuke"]
families = ["render", "prerender"]
def process(self, instance):
super(CreateNukeRoyalRenderJob, self).process(instance)
# redefinition of families
if "render" in instance.data["family"]:
instance.data["family"] = "write"
instance.data["families"].insert(0, "render2d")
elif "prerender" in instance.data["family"]:
instance.data["family"] = "write"
instance.data["families"].insert(0, "prerender")
jobs = self.create_jobs(instance)
for job in jobs:
job = self.update_job_with_host_specific(instance, job)
instance.data["rrJobs"].append(job)
def update_job_with_host_specific(self, instance, job):
nuke_version = re.search(
r"\d+\.\d+", instance.context.data.get("hostVersion"))
job.Software = "Nuke"
job.Version = nuke_version.group()
return job
def create_jobs(self, instance):
"""Nuke creates multiple RR jobs - for baking etc."""
# get output path
render_path = instance.data['path']
script_path = self.scene_path
node = instance.data["transientData"]["node"]
# main job
jobs = [
self.get_job(
instance,
script_path,
render_path,
node.name()
)
]
for baking_script in instance.data.get("bakingNukeScripts", []):
render_path = baking_script["bakeRenderPath"]
script_path = baking_script["bakeScriptPath"]
exe_node_name = baking_script["bakeWriteNodeName"]
jobs.append(self.get_job(
instance,
script_path,
render_path,
exe_node_name
))
return jobs

View file

@ -0,0 +1,286 @@
# -*- coding: utf-8 -*-
"""Create publishing job on RoyalRender."""
import os
import attr
import json
import re
import pyblish.api
from openpype.modules.royalrender.rr_job import (
RRJob,
RREnvList,
get_rr_platform
)
from openpype.pipeline.publish import KnownPublishError
from openpype.pipeline import (
legacy_io,
)
from openpype.pipeline.farm.pyblish_functions import (
create_skeleton_instance,
create_instances_for_aov,
attach_instances_to_subset,
prepare_representations,
create_metadata_path
)
from openpype.pipeline import publish
class CreatePublishRoyalRenderJob(pyblish.api.InstancePlugin,
publish.ColormanagedPyblishPluginMixin):
"""Creates job which publishes rendered files to publish area.
Job waits until all rendering jobs are finished, triggers `publish` command
where it reads from prepared .json file with metadata about what should
be published, renames prepared images and publishes them.
When triggered it produces .log file next to .json file in work area.
"""
label = "Create publish job in RR"
order = pyblish.api.IntegratorOrder + 0.2
icon = "tractor"
targets = ["local"]
hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"]
families = ["render.farm", "prerender.farm",
"renderlayer", "imagesequence", "vrayscene"]
aov_filter = {"maya": [r".*([Bb]eauty).*"],
"aftereffects": [r".*"], # for everything from AE
"harmony": [r".*"], # for everything from AE
"celaction": [r".*"]}
skip_integration_repre_list = []
# mapping of instance properties to be transferred to new instance
# for every specified family
instance_transfer = {
"slate": ["slateFrames", "slate"],
"review": ["lutPath"],
"render2d": ["bakingNukeScripts", "version"],
"renderlayer": ["convertToScanline"]
}
# list of family names to transfer to new family if present
families_transfer = ["render3d", "render2d", "ftrack", "slate"]
environ_job_filter = [
"OPENPYPE_METADATA_FILE"
]
environ_keys = [
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"AVALON_APP_NAME",
"OPENPYPE_USERNAME",
"OPENPYPE_SG_USER",
"OPENPYPE_MONGO"
]
priority = 50
def process(self, instance):
context = instance.context
self.context = context
self.anatomy = instance.context.data["anatomy"]
if not instance.data.get("farm"):
self.log.info("Skipping local instance.")
return
instance_skeleton_data = create_skeleton_instance(
instance,
families_transfer=self.families_transfer,
instance_transfer=self.instance_transfer)
do_not_add_review = False
if instance.data.get("review") is False:
self.log.debug("Instance has review explicitly disabled.")
do_not_add_review = True
if isinstance(instance.data.get("expectedFiles")[0], dict):
instances = create_instances_for_aov(
instance, instance_skeleton_data,
self.aov_filter, self.skip_integration_repre_list,
do_not_add_review)
else:
representations = prepare_representations(
instance_skeleton_data,
instance.data.get("expectedFiles"),
self.anatomy,
self.aov_filter,
self.skip_integration_repre_list,
do_not_add_review,
instance.context,
self
)
if "representations" not in instance_skeleton_data.keys():
instance_skeleton_data["representations"] = []
# add representation
instance_skeleton_data["representations"] += representations
instances = [instance_skeleton_data]
# attach instances to subset
if instance.data.get("attachTo"):
instances = attach_instances_to_subset(
instance.data.get("attachTo"), instances
)
self.log.info("Creating RoyalRender Publish job ...")
if not instance.data.get("rrJobs"):
self.log.error(("There is no prior RoyalRender "
"job on the instance."))
raise KnownPublishError(
"Can't create publish job without prior rendering jobs first")
rr_job = self.get_job(instance, instances)
instance.data["rrJobs"].append(rr_job)
# publish job file
publish_job = {
"asset": instance_skeleton_data["asset"],
"frameStart": instance_skeleton_data["frameStart"],
"frameEnd": instance_skeleton_data["frameEnd"],
"fps": instance_skeleton_data["fps"],
"source": instance_skeleton_data["source"],
"user": instance.context.data["user"],
"version": instance.context.data["version"], # workfile version
"intent": instance.context.data.get("intent"),
"comment": instance.context.data.get("comment"),
"job": attr.asdict(rr_job),
"session": legacy_io.Session.copy(),
"instances": instances
}
metadata_path, rootless_metadata_path = \
create_metadata_path(instance, self.anatomy)
self.log.info("Writing json file: {}".format(metadata_path))
with open(metadata_path, "w") as f:
json.dump(publish_job, f, indent=4, sort_keys=True)
def get_job(self, instance, instances):
"""Create RR publishing job.
Based on provided original instance and additional instances,
create publishing job and return it to be submitted to farm.
Args:
instance (Instance): Original instance.
instances (list of Instance): List of instances to
be published on farm.
Returns:
RRJob: RoyalRender publish job.
"""
data = instance.data.copy()
subset = data["subset"]
jobname = "Publish - {subset}".format(subset=subset)
# Transfer the environment from the original job to this dependent
# job, so they use the same environment
metadata_path, rootless_metadata_path = \
create_metadata_path(instance, self.anatomy)
anatomy_data = instance.context.data["anatomyData"]
environment = RREnvList({
"AVALON_PROJECT": anatomy_data["project"]["name"],
"AVALON_ASSET": anatomy_data["asset"],
"AVALON_TASK": anatomy_data["task"]["name"],
"OPENPYPE_USERNAME": anatomy_data["user"]
})
# add environments from self.environ_keys
for env_key in self.environ_keys:
if os.getenv(env_key):
environment[env_key] = os.environ[env_key]
# pass environment keys from self.environ_job_filter
# and collect all pre_ids to wait for
job_environ = {}
jobs_pre_ids = []
for job in instance.data["rrJobs"]: # type: RRJob
if job.rrEnvList:
job_environ.update(
dict(RREnvList.parse(job.rrEnvList))
)
jobs_pre_ids.append(job.PreID)
for env_j_key in self.environ_job_filter:
if job_environ.get(env_j_key):
environment[env_j_key] = job_environ[env_j_key]
priority = self.priority or instance.data.get("priority", 50)
# rr requires absolut path or all jobs won't show up in rControl
abs_metadata_path = self.anatomy.fill_root(rootless_metadata_path)
# command line set in E01__OpenPype__PublishJob.cfg, here only
# additional logging
args = [
">", os.path.join(os.path.dirname(abs_metadata_path),
"rr_out.log"),
"2>&1"
]
job = RRJob(
Software="OpenPype",
Renderer="Once",
SeqStart=1,
SeqEnd=1,
SeqStep=1,
SeqFileOffset=0,
Version=self._sanitize_version(os.environ.get("OPENPYPE_VERSION")),
SceneName=abs_metadata_path,
# command line arguments
CustomAddCmdFlags=" ".join(args),
IsActive=True,
ImageFilename="execOnce.file",
ImageDir="<SceneFolder>",
ImageExtension="",
ImagePreNumberLetter="",
SceneOS=get_rr_platform(),
rrEnvList=environment.serialize(),
Priority=priority,
CustomSHotName=jobname,
CompanyProjectName=instance.context.data["projectName"]
)
# add assembly jobs as dependencies
if instance.data.get("tileRendering"):
self.log.info("Adding tile assembly jobs as dependencies...")
job.WaitForPreIDs += instance.data.get("assemblySubmissionJobs")
elif instance.data.get("bakingSubmissionJobs"):
self.log.info("Adding baking submission jobs as dependencies...")
job.WaitForPreIDs += instance.data["bakingSubmissionJobs"]
else:
job.WaitForPreIDs += jobs_pre_ids
return job
def _sanitize_version(self, version):
"""Returns version in format MAJOR.MINORPATCH
3.15.7-nightly.2 >> 3.157
"""
VERSION_REGEX = re.compile(
r"(?P<major>0|[1-9]\d*)"
r"\.(?P<minor>0|[1-9]\d*)"
r"\.(?P<patch>0|[1-9]\d*)"
r"(?:-(?P<prerelease>[a-zA-Z\d\-.]*))?"
r"(?:\+(?P<buildmetadata>[a-zA-Z\d\-.]*))?"
)
valid_parts = VERSION_REGEX.findall(version)
if len(valid_parts) != 1:
# Return invalid version with filled 'origin' attribute
return version
# Unpack found version
major, minor, patch, pre, post = valid_parts[0]
return "{}.{}{}".format(major, minor, patch)

View file

@ -0,0 +1,131 @@
# -*- coding: utf-8 -*-
"""Submit jobs to RoyalRender."""
import tempfile
import platform
import pyblish.api
from openpype.modules.royalrender.api import (
RRJob,
Api as rrApi,
SubmitterParameter
)
from openpype.pipeline.publish import KnownPublishError
class SubmitJobsToRoyalRender(pyblish.api.ContextPlugin):
"""Find all jobs, create submission XML and submit it to RoyalRender."""
label = "Submit jobs to RoyalRender"
order = pyblish.api.IntegratorOrder + 0.3
targets = ["local"]
def __init__(self):
super(SubmitJobsToRoyalRender, self).__init__()
self._rr_root = None
self._rr_api = None
self._submission_parameters = []
def process(self, context):
rr_settings = (
context.data
["system_settings"]
["modules"]
["royalrender"]
)
if rr_settings["enabled"] is not True:
self.log.warning("RoyalRender modules is disabled.")
return
# iterate over all instances and try to find RRJobs
jobs = []
instance_rr_path = None
for instance in context:
if isinstance(instance.data.get("rrJob"), RRJob):
jobs.append(instance.data.get("rrJob"))
if instance.data.get("rrJobs"):
if all(
isinstance(job, RRJob)
for job in instance.data.get("rrJobs")):
jobs += instance.data.get("rrJobs")
if instance.data.get("rrPathName"):
instance_rr_path = instance.data["rrPathName"]
if jobs:
self._rr_root = self._resolve_rr_path(context, instance_rr_path)
if not self._rr_root:
raise KnownPublishError(
("Missing RoyalRender root. "
"You need to configure RoyalRender module."))
self._rr_api = rrApi(self._rr_root)
self._submission_parameters = self.get_submission_parameters()
self.process_submission(jobs)
return
self.log.info("No RoyalRender jobs found")
def process_submission(self, jobs):
# type: ([RRJob]) -> None
idx_pre_id = 0
for job in jobs:
job.PreID = idx_pre_id
if idx_pre_id > 0:
job.WaitForPreIDs.append(idx_pre_id - 1)
idx_pre_id += 1
submission = rrApi.create_submission(
jobs,
self._submission_parameters)
xml = tempfile.NamedTemporaryFile(suffix=".xml", delete=False)
with open(xml.name, "w") as f:
f.write(submission.serialize())
self.log.info("submitting job(s) file: {}".format(xml.name))
self._rr_api.submit_file(file=xml.name)
def create_file(self, name, ext, contents=None):
temp = tempfile.NamedTemporaryFile(
dir=self.tempdir,
suffix=ext,
prefix=name + '.',
delete=False,
)
if contents:
with open(temp.name, 'w') as f:
f.write(contents)
return temp.name
def get_submission_parameters(self):
return [SubmitterParameter("RequiredMemory", "0")]
@staticmethod
def _resolve_rr_path(context, rr_path_name):
# type: (pyblish.api.Context, str) -> str
rr_settings = (
context.data
["system_settings"]
["modules"]
["royalrender"]
)
try:
default_servers = rr_settings["rr_paths"]
project_servers = (
context.data
["project_settings"]
["royalrender"]
["rr_paths"]
)
rr_servers = {
k: default_servers[k]
for k in project_servers
if k in default_servers
}
except (AttributeError, KeyError):
# Handle situation were we had only one url for royal render.
return context.data["defaultRRPath"][platform.system().lower()]
return rr_servers[rr_path_name][platform.system().lower()]

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
"""Python wrapper for RoyalRender XML job file."""
import sys
from xml.dom import minidom as md
import attr
from collections import namedtuple, OrderedDict
@ -8,8 +9,36 @@ from collections import namedtuple, OrderedDict
CustomAttribute = namedtuple("CustomAttribute", ["name", "value"])
def get_rr_platform():
# type: () -> str
"""Returns name of platform used in rr jobs."""
if sys.platform.lower() in ["win32", "win64"]:
return "windows"
elif sys.platform.lower() == "darwin":
return "mac"
else:
return "linux"
class RREnvList(dict):
def serialize(self):
# <rrEnvList>VariableA=ValueA~~~VariableB=ValueB</rrEnvList>
return "~~~".join(
["{}={}".format(k, v) for k, v in sorted(self.items())])
@staticmethod
def parse(data):
# type: (str) -> RREnvList
"""Parse rrEnvList string and return it as RREnvList object."""
out = RREnvList()
for var in data.split("~~~"):
k, v = var.split("=")
out[k] = v
return out
@attr.s
class RRJob:
class RRJob(object):
"""Mapping of Royal Render job file to a data class."""
# Required
@ -35,7 +64,7 @@ class RRJob:
# Is the job enabled for submission?
# enabled by default
IsActive = attr.ib() # type: str
IsActive = attr.ib() # type: bool
# Sequence settings of this job
SeqStart = attr.ib() # type: int
@ -60,7 +89,7 @@ class RRJob:
# If you render a single file, e.g. Quicktime or Avi, then you have to
# set this value. Videos have to be rendered at once on one client.
ImageSingleOutputFile = attr.ib(default="false") # type: str
ImageSingleOutputFile = attr.ib(default=False) # type: bool
# Semi-Required (required for some render applications)
# -----------------------------------------------------
@ -87,7 +116,7 @@ class RRJob:
# Frame Padding of the frame number in the rendered filename.
# Some render config files are setting the padding at render time.
ImageFramePadding = attr.ib(default=None) # type: str
ImageFramePadding = attr.ib(default=None) # type: int
# Some render applications support overriding the image format at
# the render commandline.
@ -108,7 +137,7 @@ class RRJob:
# jobs send from this machine. If a job with the PreID was found, then
# this jobs waits for the other job. Note: This flag can be used multiple
# times to wait for multiple jobs.
WaitForPreID = attr.ib(default=None) # type: int
WaitForPreIDs = attr.ib(factory=list) # type: list
# List of submitter options per job
# list item must be of `SubmitterParameter` type
@ -120,6 +149,9 @@ class RRJob:
# list item must be of `CustomAttribute` named tuple
CustomAttributes = attr.ib(factory=list) # type: list
# This is used to hold command line arguments for Execute job
CustomAddCmdFlags = attr.ib(default=None) # type: str
# Additional information for subsequent publish script and
# for better display in rrControl
UserName = attr.ib(default=None) # type: str
@ -129,6 +161,7 @@ class RRJob:
CustomUserInfo = attr.ib(default=None) # type: str
SubmitMachine = attr.ib(default=None) # type: str
Color_ID = attr.ib(default=2) # type: int
CompanyProjectName = attr.ib(default=None) # type: str
RequiredLicenses = attr.ib(default=None) # type: str
@ -137,6 +170,10 @@ class RRJob:
TotalFrames = attr.ib(default=None) # type: int
Tiled = attr.ib(default=None) # type: str
# Environment
# only used in RR 8.3 and newer
rrEnvList = attr.ib(default=None) # type: str
class SubmitterParameter:
"""Wrapper for Submitter Parameters."""
@ -160,7 +197,7 @@ class SubmitterParameter:
@attr.s
class SubmitFile:
class SubmitFile(object):
"""Class wrapping Royal Render submission XML file."""
# Syntax version of the submission file.
@ -169,11 +206,11 @@ class SubmitFile:
# Delete submission file after processing
DeleteXML = attr.ib(default=1) # type: int
# List of submitter options per job
# List of the submitter options per job.
# list item must be of `SubmitterParameter` type
SubmitterParameters = attr.ib(factory=list) # type: list
# List of job is submission batch.
# List of the jobs in submission batch.
# list item must be of type `RRJob`
Jobs = attr.ib(factory=list) # type: list
@ -225,7 +262,7 @@ class SubmitFile:
# <SubmitterParameter>foo=bar~baz~goo</SubmitterParameter>
self._process_submitter_parameters(
self.SubmitterParameters, root, job_file)
root.appendChild(job_file)
for job in self.Jobs: # type: RRJob
if not isinstance(job, RRJob):
raise AttributeError(
@ -241,16 +278,28 @@ class SubmitFile:
job, dict_factory=OrderedDict, filter=filter_data)
serialized_job.pop("CustomAttributes")
serialized_job.pop("SubmitterParameters")
# we are handling `WaitForPreIDs` separately.
wait_pre_ids = serialized_job.pop("WaitForPreIDs", [])
for custom_attr in job_custom_attributes: # type: CustomAttribute
serialized_job["Custom{}".format(
custom_attr.name)] = custom_attr.value
for item, value in serialized_job.items():
xml_attr = root.create(item)
xml_attr = root.createElement(item)
xml_attr.appendChild(
root.createTextNode(value)
root.createTextNode(str(value))
)
xml_job.appendChild(xml_attr)
# WaitForPreID - can be used multiple times
for pre_id in wait_pre_ids:
xml_attr = root.createElement("WaitForPreID")
xml_attr.appendChild(
root.createTextNode(str(pre_id))
)
xml_job.appendChild(xml_attr)
job_file.appendChild(xml_job)
return root.toprettyxml(indent="\t")

Binary file not shown.

After

Width:  |  Height:  |  Size: 2 KiB

View file

@ -0,0 +1,71 @@
IconApp= E01__OpenPype.png
Name= OpenPype
rendererName= Once
Version= 1
Version_Minor= 0
Type=Execute
TYPEv9=Execute
ExecuteJobType=Once
################################# [Windows] [Linux] [Osx] ##################################
CommandLine=<envFileExecute <rrEnvFile>>
CommandLine=<rrEnvLine>
::win CommandLine= set "CUDA_VISIBLE_DEVICES=<GpuListC>"
::lx CommandLine= setenv CUDA_VISIBLE_DEVICES <GpuListC>
::osx CommandLine= setenv CUDA_VISIBLE_DEVICES <GpuListC>
CommandLine=
<SetEnvGlobal>
CommandLine=
<SetEnvSoft>
CommandLine=
<ResetExitCode>
CommandLine= "<Exe>" --headless publish <Scene>
--targets royalrender
--targets farm
<AdditionalCommandlineParam>
<CustomFlags>
CommandLine=
<CheckExitCode>
################################## Render Settings ##################################
################################## Submitter Settings ##################################
StartMultipleInstances= 0~0
SceneFileExtension= *.json
AllowImageNameChange= 0
AllowImageDirChange= 0
SequenceDivide= 0~1
PPSequenceCheck=0~0
PPCreateSmallVideo=0~0
PPCreateFullVideo=0~0
AllowLocalRenderOut= 0~0
################################## Client Settings ##################################
IconApp=E01__OpenPype.png
licenseFailLine=
errorSearchLine=
permanentErrorSearchLine =
Frozen_MinCoreUsage=0.3
Frozen_Minutes=30

View file

@ -0,0 +1,2 @@
IconApp= E01__OpenPype.png
Name= OpenPype

View file

@ -0,0 +1,12 @@
[Windows]
Executable= openpype_console.exe
Path= OS; <ProgramFiles(x86)>\OpenPype\*\openpype_console.exe
Path= 32; <ProgramFiles(x86)>\OpenPype\*\openpype_console.exe
[Linux]
Executable= openpype_console
Path= OS; /opt/openpype/*/openpype_console
[Mac]
Executable= openpype_console
Path= OS; /Applications/OpenPype*/Content/MacOS/openpype_console

View file

@ -0,0 +1,11 @@
PrePostType= pre
CommandLine=
<ResetExitCode>
CommandLine= <OsxApp "<rrBin64>rrPythonconsole" > "<RR_DIR>render_apps/_prepost_scripts/PreOpenPypeInjectEnvironments.py"
CommandLine=
<CheckExitCode> <FN>
CommandLine= "<RenderAppPath:OpenPype>"
CommandLine=
<CheckExitCode> <FN>

View file

@ -0,0 +1,4 @@
# -*- coding: utf-8 -*-
import os
os.environ["OPENYPYPE_TESTVAR"] = "OpenPype was here"

View file

@ -0,0 +1,881 @@
import copy
import attr
import pyblish.api
import os
import clique
from copy import deepcopy
import re
import warnings
from openpype.pipeline import (
get_current_project_name,
get_representation_path,
Anatomy,
)
from openpype.client import (
get_last_version_by_subset_name,
get_representations
)
from openpype.lib import Logger
from openpype.pipeline.publish import KnownPublishError
from openpype.pipeline.farm.patterning import match_aov_pattern
@attr.s
class TimeData(object):
"""Structure used to handle time related data."""
start = attr.ib(type=int)
end = attr.ib(type=int)
fps = attr.ib()
step = attr.ib(default=1, type=int)
handle_start = attr.ib(default=0, type=int)
handle_end = attr.ib(default=0, type=int)
def remap_source(path, anatomy):
"""Try to remap path to rootless path.
Args:
path (str): Path to be remapped to rootless.
anatomy (Anatomy): Anatomy object to handle remapping
itself.
Returns:
str: Remapped path.
Throws:
ValueError: if the root cannot be found.
"""
success, rootless_path = (
anatomy.find_root_template_from_path(path)
)
if success:
source = rootless_path
else:
raise ValueError(
"Root from template path cannot be found: {}".format(path))
return source
def extend_frames(asset, subset, start, end):
"""Get latest version of asset nad update frame range.
Based on minimum and maximum values.
Arguments:
asset (str): asset name
subset (str): subset name
start (int): start frame
end (int): end frame
Returns:
(int, int): update frame start/end
"""
# Frame comparison
prev_start = None
prev_end = None
project_name = get_current_project_name()
version = get_last_version_by_subset_name(
project_name,
subset,
asset_name=asset
)
# Set prev start / end frames for comparison
if not prev_start and not prev_end:
prev_start = version["data"]["frameStart"]
prev_end = version["data"]["frameEnd"]
updated_start = min(start, prev_start)
updated_end = max(end, prev_end)
return updated_start, updated_end
def get_time_data_from_instance_or_context(instance):
"""Get time data from instance (or context).
If time data is not found on instance, data from context will be used.
Args:
instance (pyblish.api.Instance): Source instance.
Returns:
TimeData: dataclass holding time information.
"""
return TimeData(
start=(instance.data.get("frameStart") or
instance.context.data.get("frameStart")),
end=(instance.data.get("frameEnd") or
instance.context.data.get("frameEnd")),
fps=(instance.data.get("fps") or
instance.context.data.get("fps")),
handle_start=(instance.data.get("handleStart") or
instance.context.data.get("handleStart")), # noqa: E501
handle_end=(instance.data.get("handleStart") or
instance.context.data.get("handleStart"))
)
def get_transferable_representations(instance):
"""Transfer representations from original instance.
This will get all representations on the original instance that
are flagged with `publish_on_farm` and return them to be included
on skeleton instance if needed.
Args:
instance (pyblish.api.Instance): Original instance to be processed.
Return:
list of dicts: List of transferable representations.
"""
anatomy = instance.context.data["anatomy"] # type: Anatomy
to_transfer = []
for representation in instance.data.get("representations", []):
if "publish_on_farm" not in representation.get("tags"):
continue
trans_rep = representation.copy()
staging_dir = trans_rep.get("stagingDir")
if staging_dir:
try:
trans_rep["stagingDir"] = remap_source(staging_dir, anatomy)
except ValueError:
log = Logger.get_logger("farm_publishing")
log.warning(
("Could not find root path for remapping \"{}\". "
"This may cause issues on farm.").format(staging_dir))
to_transfer.append(trans_rep)
return to_transfer
def create_skeleton_instance(
instance, families_transfer=None, instance_transfer=None):
# type: (pyblish.api.Instance, list, dict) -> dict
"""Create skeleton instance from original instance data.
This will create dictionary containing skeleton
- common - data used for publishing rendered instances.
This skeleton instance is then extended with additional data
and serialized to be processed by farm job.
Args:
instance (pyblish.api.Instance): Original instance to
be used as a source of data.
families_transfer (list): List of family names to transfer
from the original instance to the skeleton.
instance_transfer (dict): Dict with keys as families and
values as a list of property names to transfer to the
new skeleton.
Returns:
dict: Dictionary with skeleton instance data.
"""
# list of family names to transfer to new family if present
context = instance.context
data = instance.data.copy()
anatomy = instance.context.data["anatomy"] # type: Anatomy
# get time related data from instance (or context)
time_data = get_time_data_from_instance_or_context(instance)
if data.get("extendFrames", False):
time_data.start, time_data.end = extend_frames(
data["asset"],
data["subset"],
time_data.start,
time_data.end,
)
source = data.get("source") or context.data.get("currentFile")
success, rootless_path = (
anatomy.find_root_template_from_path(source)
)
if success:
source = rootless_path
else:
# `rootless_path` is not set to `source` if none of roots match
log = Logger.get_logger("farm_publishing")
log.warning(("Could not find root path for remapping \"{}\". "
"This may cause issues.").format(source))
family = ("render"
if "prerender" not in instance.data["families"]
else "prerender")
families = [family]
# pass review to families if marked as review
if data.get("review"):
families.append("review")
instance_skeleton_data = {
"family": family,
"subset": data["subset"],
"families": families,
"asset": data["asset"],
"frameStart": time_data.start,
"frameEnd": time_data.end,
"handleStart": time_data.handle_start,
"handleEnd": time_data.handle_end,
"frameStartHandle": time_data.start - time_data.handle_start,
"frameEndHandle": time_data.end + time_data.handle_end,
"comment": data.get("comment"),
"fps": time_data.fps,
"source": source,
"extendFrames": data.get("extendFrames"),
"overrideExistingFrame": data.get("overrideExistingFrame"),
"pixelAspect": data.get("pixelAspect", 1),
"resolutionWidth": data.get("resolutionWidth", 1920),
"resolutionHeight": data.get("resolutionHeight", 1080),
"multipartExr": data.get("multipartExr", False),
"jobBatchName": data.get("jobBatchName", ""),
"useSequenceForReview": data.get("useSequenceForReview", True),
# map inputVersions `ObjectId` -> `str` so json supports it
"inputVersions": list(map(str, data.get("inputVersions", []))),
"colorspace": data.get("colorspace")
}
# skip locking version if we are creating v01
instance_version = data.get("version") # take this if exists
if instance_version != 1:
instance_skeleton_data["version"] = instance_version
# transfer specific families from original instance to new render
for item in families_transfer:
if item in instance.data.get("families", []):
instance_skeleton_data["families"] += [item]
# transfer specific properties from original instance based on
# mapping dictionary `instance_transfer`
for key, values in instance_transfer.items():
if key in instance.data.get("families", []):
for v in values:
instance_skeleton_data[v] = instance.data.get(v)
representations = get_transferable_representations(instance)
instance_skeleton_data["representations"] = []
instance_skeleton_data["representations"] += representations
return instance_skeleton_data
def _add_review_families(families):
"""Adds review flag to families.
Handles situation when new instances are created which should have review
in families. In that case they should have 'ftrack' too.
TODO: This is ugly and needs to be refactored. Ftrack family should be
added in different way (based on if the module is enabled?)
"""
# if we have one representation with preview tag
# flag whole instance for review and for ftrack
if "ftrack" not in families and os.environ.get("FTRACK_SERVER"):
families.append("ftrack")
if "review" not in families:
families.append("review")
return families
def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
skip_integration_repre_list,
do_not_add_review,
context,
color_managed_plugin):
"""Create representations for file sequences.
This will return representations of expected files if they are not
in hierarchy of aovs. There should be only one sequence of files for
most cases, but if not - we create representation from each of them.
Arguments:
skeleton_data (dict): instance data for which we are
setting representations
exp_files (list): list of expected files
anatomy (Anatomy):
aov_filter (dict): add review for specific aov names
skip_integration_repre_list (list): exclude specific extensions,
do_not_add_review (bool): explicitly skip review
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
Returns:
list of representations
"""
representations = []
host_name = os.environ.get("AVALON_APP", "")
collections, remainders = clique.assemble(exp_files)
log = Logger.get_logger("farm_publishing")
# create representation for every collected sequence
for collection in collections:
ext = collection.tail.lstrip(".")
preview = False
# TODO 'useSequenceForReview' is temporary solution which does
# not work for 100% of cases. We must be able to tell what
# expected files contains more explicitly and from what
# should be review made.
# - "review" tag is never added when is set to 'False'
if skeleton_data["useSequenceForReview"]:
# toggle preview on if multipart is on
if skeleton_data.get("multipartExr", False):
log.debug(
"Adding preview tag because its multipartExr"
)
preview = True
else:
render_file_name = list(collection)[0]
# if filtered aov name is found in filename, toggle it for
# preview video rendering
preview = match_aov_pattern(
host_name, aov_filter, render_file_name
)
staging = os.path.dirname(list(collection)[0])
success, rootless_staging_dir = (
anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
frame_start = int(skeleton_data.get("frameStartHandle"))
if skeleton_data.get("slate"):
frame_start -= 1
# explicitly disable review by user
preview = preview and not do_not_add_review
rep = {
"name": ext,
"ext": ext,
"files": [os.path.basename(f) for f in list(collection)],
"frameStart": frame_start,
"frameEnd": int(skeleton_data.get("frameEndHandle")),
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"fps": skeleton_data.get("fps"),
"tags": ["review"] if preview else [],
}
# poor man exclusion
if ext in skip_integration_repre_list:
rep["tags"].append("delete")
if skeleton_data.get("multipartExr", False):
rep["tags"].append("multipartExr")
# support conversion from tiled to scanline
if skeleton_data.get("convertToScanline"):
log.info("Adding scanline conversion.")
rep["tags"].append("toScanline")
representations.append(rep)
if preview:
skeleton_data["families"] = _add_review_families(
skeleton_data["families"])
# add remainders as representations
for remainder in remainders:
ext = remainder.split(".")[-1]
staging = os.path.dirname(remainder)
success, rootless_staging_dir = (
anatomy.find_root_template_from_path(staging)
)
if success:
staging = rootless_staging_dir
else:
log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(staging))
rep = {
"name": ext,
"ext": ext,
"files": os.path.basename(remainder),
"stagingDir": staging,
}
preview = match_aov_pattern(
host_name, aov_filter, remainder
)
preview = preview and not do_not_add_review
if preview:
rep.update({
"fps": skeleton_data.get("fps"),
"tags": ["review"]
})
skeleton_data["families"] = \
_add_review_families(skeleton_data["families"])
already_there = False
for repre in skeleton_data.get("representations", []):
# might be added explicitly before by publish_on_farm
already_there = repre.get("files") == rep["files"]
if already_there:
log.debug("repre {} already_there".format(repre))
break
if not already_there:
representations.append(rep)
for rep in representations:
# inject colorspace data
color_managed_plugin.set_representation_colorspace(
rep, context,
colorspace=skeleton_data["colorspace"]
)
return representations
def create_instances_for_aov(instance, skeleton, aov_filter,
skip_integration_repre_list,
do_not_add_review):
"""Create instances from AOVs.
This will create new pyblish.api.Instances by going over expected
files defined on original instance.
Args:
instance (pyblish.api.Instance): Original instance.
skeleton (dict): Skeleton instance data.
skip_integration_repre_list (list): skip
Returns:
list of pyblish.api.Instance: Instances created from
expected files.
"""
# we cannot attach AOVs to other subsets as we consider every
# AOV subset of its own.
log = Logger.get_logger("farm_publishing")
additional_color_data = {
"renderProducts": instance.data["renderProducts"],
"colorspaceConfig": instance.data["colorspaceConfig"],
"display": instance.data["colorspaceDisplay"],
"view": instance.data["colorspaceView"]
}
# Get templated path from absolute config path.
anatomy = instance.context.data["anatomy"]
colorspace_template = instance.data["colorspaceConfig"]
try:
additional_color_data["colorspaceTemplate"] = remap_source(
colorspace_template, anatomy)
except ValueError as e:
log.warning(e)
additional_color_data["colorspaceTemplate"] = colorspace_template
# if there are subset to attach to and more than one AOV,
# we cannot proceed.
if (
len(instance.data.get("attachTo", [])) > 0
and len(instance.data.get("expectedFiles")[0].keys()) != 1
):
raise KnownPublishError(
"attaching multiple AOVs or renderable cameras to "
"subset is not supported yet.")
# create instances for every AOV we found in expected files.
# NOTE: this is done for every AOV and every render camera (if
# there are multiple renderable cameras in scene)
return _create_instances_for_aov(
instance,
skeleton,
aov_filter,
additional_color_data,
skip_integration_repre_list,
do_not_add_review
)
def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
skip_integration_repre_list, do_not_add_review):
"""Create instance for each AOV found.
This will create new instance for every AOV it can detect in expected
files list.
Args:
instance (pyblish.api.Instance): Original instance.
skeleton (dict): Skeleton data for instance (those needed) later
by collector.
additional_data (dict): ..
skip_integration_repre_list (list): list of extensions that shouldn't
be published
do_not_addbe _review (bool): explicitly disable review
Returns:
list of instances
Throws:
ValueError:
"""
# TODO: this needs to be taking the task from context or instance
task = os.environ["AVALON_TASK"]
anatomy = instance.context.data["anatomy"]
subset = skeleton["subset"]
cameras = instance.data.get("cameras", [])
exp_files = instance.data["expectedFiles"]
log = Logger.get_logger("farm_publishing")
instances = []
# go through AOVs in expected files
for aov, files in exp_files[0].items():
cols, rem = clique.assemble(files)
# we shouldn't have any reminders. And if we do, it should
# be just one item for single frame renders.
if not cols and rem:
if len(rem) != 1:
raise ValueError("Found multiple non related files "
"to render, don't know what to do "
"with them.")
col = rem[0]
ext = os.path.splitext(col)[1].lstrip(".")
else:
# but we really expect only one collection.
# Nothing else make sense.
if len(cols) != 1:
raise ValueError("Only one image sequence type is expected.") # noqa: E501
ext = cols[0].tail.lstrip(".")
col = list(cols[0])
# create subset name `familyTaskSubset_AOV`
group_name = 'render{}{}{}{}'.format(
task[0].upper(), task[1:],
subset[0].upper(), subset[1:])
# if there are multiple cameras, we need to add camera name
if isinstance(col, (list, tuple)):
cam = [c for c in cameras if c in col[0]]
else:
# in case of single frame
cam = [c for c in cameras if c in col]
if cam:
if aov:
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
else:
subset_name = '{}_{}'.format(group_name, cam)
else:
if aov:
subset_name = '{}_{}'.format(group_name, aov)
else:
subset_name = '{}'.format(group_name)
if isinstance(col, (list, tuple)):
staging = os.path.dirname(col[0])
else:
staging = os.path.dirname(col)
try:
staging = remap_source(staging, anatomy)
except ValueError as e:
log.warning(e)
log.info("Creating data for: {}".format(subset_name))
app = os.environ.get("AVALON_APP", "")
if isinstance(col, list):
render_file_name = os.path.basename(col[0])
else:
render_file_name = os.path.basename(col)
aov_patterns = aov_filter
preview = match_aov_pattern(app, aov_patterns, render_file_name)
# toggle preview on if multipart is on
if instance.data.get("multipartExr"):
log.debug("Adding preview tag because its multipartExr")
preview = True
new_instance = deepcopy(skeleton)
new_instance["subset"] = subset_name
new_instance["subsetGroup"] = group_name
# explicitly disable review by user
preview = preview and not do_not_add_review
if preview:
new_instance["review"] = True
# create representation
if isinstance(col, (list, tuple)):
files = [os.path.basename(f) for f in col]
else:
files = os.path.basename(col)
# Copy render product "colorspace" data to representation.
colorspace = ""
products = additional_data["renderProducts"].layer_data.products
for product in products:
if product.productName == aov:
colorspace = product.colorspace
break
rep = {
"name": ext,
"ext": ext,
"files": files,
"frameStart": int(skeleton["frameStartHandle"]),
"frameEnd": int(skeleton["frameEndHandle"]),
# If expectedFile are absolute, we need only filenames
"stagingDir": staging,
"fps": new_instance.get("fps"),
"tags": ["review"] if preview else [],
"colorspaceData": {
"colorspace": colorspace,
"config": {
"path": additional_data["colorspaceConfig"],
"template": additional_data["colorspaceTemplate"]
},
"display": additional_data["display"],
"view": additional_data["view"]
}
}
# support conversion from tiled to scanline
if instance.data.get("convertToScanline"):
log.info("Adding scanline conversion.")
rep["tags"].append("toScanline")
# poor man exclusion
if ext in skip_integration_repre_list:
rep["tags"].append("delete")
if preview:
new_instance["families"] = _add_review_families(
new_instance["families"])
new_instance["representations"] = [rep]
# if extending frames from existing version, copy files from there
# into our destination directory
if new_instance.get("extendFrames", False):
copy_extend_frames(new_instance, rep)
instances.append(new_instance)
log.debug("instances:{}".format(instances))
return instances
def get_resources(project_name, version, extension=None):
"""Get the files from the specific version.
This will return all get all files from representation.
Todo:
This is really weird function, and it's use is
highly controversial. First, it will not probably work
ar all in final release of AYON, second, the logic isn't sound.
It should try to find representation matching the current one -
because it is used to pull out files from previous version to
be included in this one.
.. deprecated:: 3.15.5
This won't work in AYON and even the logic must be refactored.
Args:
project_name (str): Name of the project.
version (dict): Version document.
extension (str): extension used to filter
representations.
Returns:
list: of files
"""
warnings.warn((
"This won't work in AYON and even "
"the logic must be refactored."), DeprecationWarning)
extensions = []
if extension:
extensions = [extension]
# there is a `context_filter` argument that won't probably work in
# final release of AYON. SO we'll rather not use it
repre_docs = list(get_representations(
project_name, version_ids=[version["_id"]]))
filtered = []
for doc in repre_docs:
if doc["context"]["ext"] in extensions:
filtered.append(doc)
representation = filtered[0]
directory = get_representation_path(representation)
print("Source: ", directory)
resources = sorted(
[
os.path.normpath(os.path.join(directory, file_name))
for file_name in os.listdir(directory)
]
)
return resources
def copy_extend_frames(instance, representation):
"""Copy existing frames from latest version.
This will copy all existing frames from subset's latest version back
to render directory and rename them to what renderer is expecting.
Arguments:
instance (pyblish.plugin.Instance): instance to get required
data from
representation (dict): presentation to operate on
"""
import speedcopy
R_FRAME_NUMBER = re.compile(
r".+\.(?P<frame>[0-9]+)\..+")
log = Logger.get_logger("farm_publishing")
log.info("Preparing to copy ...")
start = instance.data.get("frameStart")
end = instance.data.get("frameEnd")
project_name = instance.context.data["project"]
anatomy = instance.context.data["anatomy"] # type: Anatomy
# get latest version of subset
# this will stop if subset wasn't published yet
version = get_last_version_by_subset_name(
project_name,
instance.data.get("subset"),
asset_name=instance.data.get("asset")
)
# get its files based on extension
subset_resources = get_resources(
project_name, version, representation.get("ext")
)
r_col, _ = clique.assemble(subset_resources)
# if override remove all frames we are expecting to be rendered,
# so we'll copy only those missing from current render
if instance.data.get("overrideExistingFrame"):
for frame in range(start, end + 1):
if frame not in r_col.indexes:
continue
r_col.indexes.remove(frame)
# now we need to translate published names from representation
# back. This is tricky, right now we'll just use same naming
# and only switch frame numbers
resource_files = []
r_filename = os.path.basename(
representation.get("files")[0]) # first file
op = re.search(R_FRAME_NUMBER, r_filename)
pre = r_filename[:op.start("frame")]
post = r_filename[op.end("frame"):]
assert op is not None, "padding string wasn't found"
for frame in list(r_col):
fn = re.search(R_FRAME_NUMBER, frame)
# silencing linter as we need to compare to True, not to
# type
assert fn is not None, "padding string wasn't found"
# list of tuples (source, destination)
staging = representation.get("stagingDir")
staging = anatomy.fill_root(staging)
resource_files.append(
(frame, os.path.join(
staging, "{}{}{}".format(pre, fn["frame"], post)))
)
# test if destination dir exists and create it if not
output_dir = os.path.dirname(representation.get("files")[0])
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# copy files
for source in resource_files:
speedcopy.copy(source[0], source[1])
log.info(" > {}".format(source[1]))
log.info("Finished copying %i files" % len(resource_files))
def attach_instances_to_subset(attach_to, instances):
"""Attach instance to subset.
If we are attaching to other subsets, create copy of existing
instances, change data to match its subset and replace
existing instances with modified data.
Args:
attach_to (list): List of instances to attach to.
instances (list): List of instances to attach.
Returns:
list: List of attached instances.
"""
new_instances = []
for attach_instance in attach_to:
for i in instances:
new_inst = copy.deepcopy(i)
new_inst["version"] = attach_instance.get("version")
new_inst["subset"] = attach_instance.get("subset")
new_inst["family"] = attach_instance.get("family")
new_inst["append"] = True
# don't set subsetGroup if we are attaching
new_inst.pop("subsetGroup")
new_instances.append(new_inst)
return new_instances
def create_metadata_path(instance, anatomy):
ins_data = instance.data
# Ensure output dir exists
output_dir = ins_data.get(
"publishRenderMetadataFolder", ins_data["outputDir"])
log = Logger.get_logger("farm_publishing")
try:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
except OSError:
# directory is not available
log.warning("Path is unreachable: `{}`".format(output_dir))
metadata_filename = "{}_metadata.json".format(ins_data["subset"])
metadata_path = os.path.join(output_dir, metadata_filename)
# Convert output dir to `{root}/rest/of/path/...` with Anatomy
success, rootless_mtdt_p = anatomy.find_root_template_from_path(
metadata_path)
if not success:
# `rootless_path` is not set to `output_dir` if none of roots match
log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(output_dir))
rootless_mtdt_p = metadata_path
return metadata_path, rootless_mtdt_p

View file

@ -0,0 +1,24 @@
import pyblish.api
from openpype.pipeline import Anatomy
from typing import Tuple, Union, List
class TimeData:
start: int
end: int
fps: float | int
step: int
handle_start: int
handle_end: int
def __init__(self, start: int, end: int, fps: float | int, step: int, handle_start: int, handle_end: int):
...
...
def remap_source(source: str, anatomy: Anatomy): ...
def extend_frames(asset: str, subset: str, start: int, end: int) -> Tuple[int, int]: ...
def get_time_data_from_instance_or_context(instance: pyblish.api.Instance) -> TimeData: ...
def get_transferable_representations(instance: pyblish.api.Instance) -> list: ...
def create_skeleton_instance(instance: pyblish.api.Instance, families_transfer: list = ..., instance_transfer: dict = ...) -> dict: ...
def create_instances_for_aov(instance: pyblish.api.Instance, skeleton: dict, aov_filter: dict) -> List[pyblish.api.Instance]: ...
def attach_instances_to_subset(attach_to: list, instances: list) -> list: ...

View file

@ -0,0 +1,112 @@
import os
def get_published_workfile_instance(context):
"""Find workfile instance in context"""
for i in context:
is_workfile = (
"workfile" in i.data.get("families", []) or
i.data["family"] == "workfile"
)
if not is_workfile:
continue
# test if there is instance of workfile waiting
# to be published.
if i.data["publish"] is not True:
continue
return i
def from_published_scene(instance, replace_in_path=True):
"""Switch work scene for published scene.
If rendering/exporting from published scenes is enabled, this will
replace paths from working scene to published scene.
Args:
instance (pyblish.api.Instance): Instance data to process.
replace_in_path (bool): if True, it will try to find
old scene name in path of expected files and replace it
with name of published scene.
Returns:
str: Published scene path.
None: if no published scene is found.
Note:
Published scene path is actually determined from project Anatomy
as at the time this plugin is running the scene can be still
un-published.
"""
workfile_instance = get_published_workfile_instance(instance.context)
if workfile_instance is None:
return
# determine published path from Anatomy.
template_data = workfile_instance.data.get("anatomyData")
rep = workfile_instance.data["representations"][0]
template_data["representation"] = rep.get("name")
template_data["ext"] = rep.get("ext")
template_data["comment"] = None
anatomy = instance.context.data['anatomy']
template_obj = anatomy.templates_obj["publish"]["path"]
template_filled = template_obj.format_strict(template_data)
file_path = os.path.normpath(template_filled)
if not os.path.exists(file_path):
raise
if not replace_in_path:
return file_path
# now we need to switch scene in expected files
# because <scene> token will now point to published
# scene file and that might differ from current one
def _clean_name(path):
return os.path.splitext(os.path.basename(path))[0]
new_scene = _clean_name(file_path)
orig_scene = _clean_name(instance.context.data["currentFile"])
expected_files = instance.data.get("expectedFiles")
if isinstance(expected_files[0], dict):
# we have aovs and we need to iterate over them
new_exp = {}
for aov, files in expected_files[0].items():
replaced_files = []
for f in files:
replaced_files.append(
str(f).replace(orig_scene, new_scene)
)
new_exp[aov] = replaced_files
# [] might be too much here, TODO
instance.data["expectedFiles"] = [new_exp]
else:
new_exp = []
for f in expected_files:
new_exp.append(
str(f).replace(orig_scene, new_scene)
)
instance.data["expectedFiles"] = new_exp
metadata_folder = instance.data.get("publishRenderMetadataFolder")
if metadata_folder:
metadata_folder = metadata_folder.replace(orig_scene,
new_scene)
instance.data["publishRenderMetadataFolder"] = metadata_folder
return file_path
def iter_expected_files(exp):
if isinstance(exp[0], dict):
for _aov, files in exp[0].items():
for file in files:
yield file
else:
for file in exp:
yield file

View file

@ -869,6 +869,110 @@ def _validate_transient_template(project_name, template_name, anatomy):
).format(template_name, project_name))
def get_published_workfile_instance(context):
"""Find workfile instance in context"""
for i in context:
is_workfile = (
"workfile" in i.data.get("families", []) or
i.data["family"] == "workfile"
)
if not is_workfile:
continue
# test if there is instance of workfile waiting
# to be published.
if not i.data.get("publish", True):
continue
return i
def replace_with_published_scene_path(instance, replace_in_path=True):
"""Switch work scene path for published scene.
If rendering/exporting from published scenes is enabled, this will
replace paths from working scene to published scene.
This only works if publish contains workfile instance!
Args:
instance (pyblish.api.Instance): Pyblish instance.
replace_in_path (bool): if True, it will try to find
old scene name in path of expected files and replace it
with name of published scene.
Returns:
str: Published scene path.
None: if no published scene is found.
Note:
Published scene path is actually determined from project Anatomy
as at the time this plugin is running scene can still not be
published.
"""
log = Logger.get_logger("published_workfile")
workfile_instance = get_published_workfile_instance(instance.context)
if workfile_instance is None:
return
# determine published path from Anatomy.
template_data = workfile_instance.data.get("anatomyData")
rep = workfile_instance.data["representations"][0]
template_data["representation"] = rep.get("name")
template_data["ext"] = rep.get("ext")
template_data["comment"] = None
anatomy = instance.context.data['anatomy']
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled["publish"]["path"]
file_path = os.path.normpath(template_filled)
log.info("Using published scene for render {}".format(file_path))
if not os.path.exists(file_path):
log.error("published scene does not exist!")
raise
if not replace_in_path:
return file_path
# now we need to switch scene in expected files
# because <scene> token will now point to published
# scene file and that might differ from current one
def _clean_name(path):
return os.path.splitext(os.path.basename(path))[0]
new_scene = _clean_name(file_path)
orig_scene = _clean_name(instance.context.data["currentFile"])
expected_files = instance.data.get("expectedFiles")
if isinstance(expected_files[0], dict):
# we have aovs and we need to iterate over them
new_exp = {}
for aov, files in expected_files[0].items():
replaced_files = []
for f in files:
replaced_files.append(
str(f).replace(orig_scene, new_scene)
)
new_exp[aov] = replaced_files
# [] might be too much here, TODO
instance.data["expectedFiles"] = [new_exp]
else:
new_exp = []
for f in expected_files:
new_exp.append(
str(f).replace(orig_scene, new_scene)
)
instance.data["expectedFiles"] = new_exp
metadata_folder = instance.data.get("publishRenderMetadataFolder")
if metadata_folder:
metadata_folder = metadata_folder.replace(orig_scene,
new_scene)
instance.data["publishRenderMetadataFolder"] = metadata_folder
log.info("Scene name was switched {} -> {}".format(
orig_scene, new_scene
))
return file_path
def add_repre_files_for_cleanup(instance, repre):
""" Explicitly mark repre files to be deleted.

View file

@ -137,7 +137,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"mvUsdOverride",
"simpleUnrealTexture",
"online",
"uasset"
"uasset",
"blendScene"
]
default_template_name = "publish"

View file

@ -54,7 +54,8 @@
"camera",
"rig",
"action",
"layout"
"layout",
"blendScene"
]
},
"ExtractFBX": {

View file

@ -1,4 +1,7 @@
{
"rr_paths": [
"default"
],
"publish": {
"CollectSequencesFromJob": {
"review": true

View file

@ -185,9 +185,9 @@
"enabled": false,
"rr_paths": {
"default": {
"windows": "",
"darwin": "",
"linux": ""
"windows": "C:\\RR8",
"darwin": "/Volumes/share/RR8",
"linux": "/mnt/studio/RR8"
}
}
},

View file

@ -107,7 +107,8 @@ from .enum_entity import (
TaskTypeEnumEntity,
DeadlineUrlEnumEntity,
AnatomyTemplatesEnumEntity,
ShotgridUrlEnumEntity
ShotgridUrlEnumEntity,
RoyalRenderRootEnumEntity
)
from .list_entity import ListEntity
@ -170,6 +171,7 @@ __all__ = (
"TaskTypeEnumEntity",
"DeadlineUrlEnumEntity",
"ShotgridUrlEnumEntity",
"RoyalRenderRootEnumEntity",
"AnatomyTemplatesEnumEntity",
"ListEntity",

View file

@ -1,3 +1,5 @@
import abc
import six
import copy
from .input_entities import InputEntity
from .exceptions import EntitySchemaError
@ -477,8 +479,9 @@ class TaskTypeEnumEntity(BaseEnumEntity):
self.set(value_on_not_set)
class DeadlineUrlEnumEntity(BaseEnumEntity):
schema_types = ["deadline_url-enum"]
@six.add_metaclass(abc.ABCMeta)
class FarmRootEnumEntity(BaseEnumEntity):
schema_types = []
def _item_initialization(self):
self.multiselection = self.schema_data.get("multiselection", True)
@ -496,22 +499,8 @@ class DeadlineUrlEnumEntity(BaseEnumEntity):
# GUI attribute
self.placeholder = self.schema_data.get("placeholder")
def _get_enum_values(self):
deadline_urls_entity = self.get_entity_from_path(
"system_settings/modules/deadline/deadline_urls"
)
valid_keys = set()
enum_items_list = []
for server_name, url_entity in deadline_urls_entity.items():
enum_items_list.append(
{server_name: "{}: {}".format(server_name, url_entity.value)}
)
valid_keys.add(server_name)
return enum_items_list, valid_keys
def set_override_state(self, *args, **kwargs):
super(DeadlineUrlEnumEntity, self).set_override_state(*args, **kwargs)
super(FarmRootEnumEntity, self).set_override_state(*args, **kwargs)
self.enum_items, self.valid_keys = self._get_enum_values()
if self.multiselection:
@ -528,22 +517,50 @@ class DeadlineUrlEnumEntity(BaseEnumEntity):
elif self._current_value not in self.valid_keys:
self._current_value = tuple(self.valid_keys)[0]
@abc.abstractmethod
def _get_enum_values(self):
pass
class ShotgridUrlEnumEntity(BaseEnumEntity):
class DeadlineUrlEnumEntity(FarmRootEnumEntity):
schema_types = ["deadline_url-enum"]
def _get_enum_values(self):
deadline_urls_entity = self.get_entity_from_path(
"system_settings/modules/deadline/deadline_urls"
)
valid_keys = set()
enum_items_list = []
for server_name, url_entity in deadline_urls_entity.items():
enum_items_list.append(
{server_name: "{}: {}".format(server_name, url_entity.value)}
)
valid_keys.add(server_name)
return enum_items_list, valid_keys
class RoyalRenderRootEnumEntity(FarmRootEnumEntity):
schema_types = ["rr_root-enum"]
def _get_enum_values(self):
rr_root_entity = self.get_entity_from_path(
"system_settings/modules/royalrender/rr_paths"
)
valid_keys = set()
enum_items_list = []
for server_name, url_entity in rr_root_entity.items():
enum_items_list.append(
{server_name: "{}: {}".format(server_name, url_entity.value)}
)
valid_keys.add(server_name)
return enum_items_list, valid_keys
class ShotgridUrlEnumEntity(FarmRootEnumEntity):
schema_types = ["shotgrid_url-enum"]
def _item_initialization(self):
self.multiselection = False
self.enum_items = []
self.valid_keys = set()
self.valid_value_types = (STRING_TYPE,)
self.value_on_not_set = ""
# GUI attribute
self.placeholder = self.schema_data.get("placeholder")
def _get_enum_values(self):
shotgrid_settings = self.get_entity_from_path(
"system_settings/modules/shotgrid/shotgrid_settings"
@ -562,16 +579,6 @@ class ShotgridUrlEnumEntity(BaseEnumEntity):
valid_keys.add(server_name)
return enum_items_list, valid_keys
def set_override_state(self, *args, **kwargs):
super(ShotgridUrlEnumEntity, self).set_override_state(*args, **kwargs)
self.enum_items, self.valid_keys = self._get_enum_values()
if not self.valid_keys:
self._current_value = ""
elif self._current_value not in self.valid_keys:
self._current_value = tuple(self.valid_keys)[0]
class AnatomyTemplatesEnumEntity(BaseEnumEntity):
schema_types = ["anatomy-templates-enum"]

View file

@ -5,6 +5,12 @@
"collapsible": true,
"is_file": true,
"children": [
{
"type": "rr_root-enum",
"key": "rr_paths",
"label": "Royal Render Roots",
"multiselect": true
},
{
"type": "dict",
"collapsible": true,

View file

@ -1,10 +0,0 @@
# -*- coding: utf-8 -*-
"""Test suite for User Settings."""
# import pytest
# from openpype.modules import ModulesManager
def test_rr_job():
# manager = ModulesManager()
# rr_module = manager.modules_by_name["royalrender"]
...

View file

@ -102,6 +102,10 @@ workstation that should be submitting render jobs to muster via OpenPype.
**`templates mapping`** - you can customize Muster templates to match your existing setup here.
### Royal Render
**`Royal Render Root Paths`** - multi platform paths to Royal Render installation.
### Clockify
**`Workspace Name`** - name of the clockify workspace where you would like to be sending all the timelogs.

View file

@ -132,3 +132,25 @@ switch versions between different hda types.
When you load hda, it will install its type in your hip file and add published version as its definition file. When
you switch version via Scene Manager, it will add its definition and set it as preferred.
## Publishing and loading BGEO caches
There is a simple support for publishing and loading **BGEO** files in all supported compression variants.
### Creating BGEO instances
Select your SOP node to be exported as BGEO. If your selection is in the object level, OpenPype will try to find if there is an `output` node inside, the one with the lowest index will be used:
![BGEO output node](assets/houdini_bgeo_output_node.png)
Then you can open Publisher, in Create you select **BGEO PointCache**:
![BGEO Publisher](assets/houdini_bgeo-publisher.png)
You can select compression type and if the current selection should be connected to ROPs SOP path parameter. Publishing will produce sequence of files based on your timeline settings.
### Loading BGEO
Select your published BGEO subsets in Loader, right click and load them in:
![BGEO Publisher](assets/houdini_bgeo-loading.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View file

@ -0,0 +1,37 @@
---
id: module_royalrender
title: Royal Render Administration
sidebar_label: Royal Render
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
## Preparation
For [Royal Render](hhttps://www.royalrender.de/) support you need to set a few things up in both OpenPype and Royal Render itself
1. Deploy OpenPype executable to all nodes of Royal Render farm. See [Install & Run](admin_use.md)
2. Enable Royal Render Module in the [OpenPype Admin Settings](admin_settings_system.md#royal-render).
3. Point OpenPype to your Royal Render installation in the [OpenPype Admin Settings](admin_settings_system.md#royal-render).
4. Install our custom plugin and scripts to your RR repository. It should be as simple as copying content of `openpype/modules/royalrender/rr_root` to `path/to/your/royalrender/repository`.
## Configuration
OpenPype integration for Royal Render consists of pointing RR to location of Openpype executable. That is being done by copying `_install_paths/OpenPype.cfg` to
RR root folder. This file contains reasonable defaults. They could be changed in this file or modified Render apps in `rrControl`.
## Debugging
Current implementation uses dynamically build '.xml' file which is stored in temporary folder accessible by RR. It might make sense to
use this Openpype built file and try to run it via `*__rrServerConsole` executable from command line in case of unforeseeable issues.
## Known issues
Currently environment values set in Openpype are not propagated into render jobs on RR. It is studio responsibility to synchronize environment variables from Openpype with all render nodes for now.

View file

@ -111,6 +111,7 @@ module.exports = {
"module_site_sync",
"module_deadline",
"module_muster",
"module_royalrender",
"module_clockify",
"module_slack"
],