mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'release/2.7.0' of github.com:pypeclub/pype into hotfix/PYPE-781_pass-multipart-exr-flag
This commit is contained in:
commit
e4e9ce8c26
56 changed files with 2744 additions and 570 deletions
2
.flake8
2
.flake8
|
|
@ -1,5 +1,7 @@
|
|||
[flake8]
|
||||
# ignore = D203
|
||||
ignore = BLK100, W504, W503
|
||||
max-line-length = 79
|
||||
exclude =
|
||||
.git,
|
||||
__pycache__,
|
||||
|
|
|
|||
4
.hound.yml
Normal file
4
.hound.yml
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
flake8:
|
||||
enabled: true
|
||||
config_file: .flake8
|
||||
|
||||
|
|
@ -1,16 +1,10 @@
|
|||
import logging
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
import bpy
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from avalon import api as avalon
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from .plugin import AssetLoader
|
||||
|
||||
logger = logging.getLogger("pype.blender")
|
||||
|
||||
PARENT_DIR = os.path.dirname(__file__)
|
||||
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
|
||||
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
|
||||
|
|
@ -19,9 +13,16 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "blender", "publish")
|
|||
LOAD_PATH = os.path.join(PLUGINS_DIR, "blender", "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "blender", "create")
|
||||
|
||||
ORIGINAL_EXCEPTHOOK = sys.excepthook
|
||||
|
||||
|
||||
def pype_excepthook_handler(*args):
|
||||
traceback.print_exception(*args)
|
||||
|
||||
|
||||
def install():
|
||||
"""Install Blender configuration for Avalon."""
|
||||
sys.excepthook = pype_excepthook_handler
|
||||
pyblish.register_plugin_path(str(PUBLISH_PATH))
|
||||
avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
|
||||
avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
|
||||
|
|
@ -29,6 +30,7 @@ def install():
|
|||
|
||||
def uninstall():
|
||||
"""Uninstall Blender configuration for Avalon."""
|
||||
sys.excepthook = ORIGINAL_EXCEPTHOOK
|
||||
pyblish.deregister_plugin_path(str(PUBLISH_PATH))
|
||||
avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
|
||||
avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))
|
||||
|
|
|
|||
|
|
@ -10,14 +10,43 @@ from avalon import api
|
|||
VALID_EXTENSIONS = [".blend"]
|
||||
|
||||
|
||||
def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str:
|
||||
"""Return a consistent name for a model asset."""
|
||||
def asset_name(
|
||||
asset: str, subset: str, namespace: Optional[str] = None
|
||||
) -> str:
|
||||
"""Return a consistent name for an asset."""
|
||||
name = f"{asset}_{subset}"
|
||||
if namespace:
|
||||
name = f"{namespace}:{name}"
|
||||
return name
|
||||
|
||||
|
||||
def create_blender_context(active: Optional[bpy.types.Object] = None,
|
||||
selected: Optional[bpy.types.Object] = None,):
|
||||
"""Create a new Blender context. If an object is passed as
|
||||
parameter, it is set as selected and active.
|
||||
"""
|
||||
|
||||
if not isinstance(selected, list):
|
||||
selected = [selected]
|
||||
|
||||
for win in bpy.context.window_manager.windows:
|
||||
for area in win.screen.areas:
|
||||
if area.type == 'VIEW_3D':
|
||||
for region in area.regions:
|
||||
if region.type == 'WINDOW':
|
||||
override_context = {
|
||||
'window': win,
|
||||
'screen': win.screen,
|
||||
'area': area,
|
||||
'region': region,
|
||||
'scene': bpy.context.scene,
|
||||
'active_object': active,
|
||||
'selected_objects': selected
|
||||
}
|
||||
return override_context
|
||||
raise Exception("Could not create a custom Blender context.")
|
||||
|
||||
|
||||
class AssetLoader(api.Loader):
|
||||
"""A basic AssetLoader for Blender
|
||||
|
||||
|
|
@ -67,7 +96,8 @@ class AssetLoader(api.Loader):
|
|||
assert obj.library, f"'{obj.name}' is not linked."
|
||||
libraries.add(obj.library)
|
||||
|
||||
assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library."
|
||||
assert len(
|
||||
libraries) == 1, "'{container.name}' contains objects from more then 1 library."
|
||||
|
||||
return list(libraries)[0]
|
||||
|
||||
|
|
@ -122,7 +152,7 @@ class AssetLoader(api.Loader):
|
|||
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
instance_name = model_name(asset, subset, namespace)
|
||||
instance_name = asset_name(asset, subset, namespace)
|
||||
|
||||
return self._get_instance_collection(instance_name, nodes)
|
||||
|
||||
|
|
|
|||
|
|
@ -286,7 +286,9 @@ class AppAction(BaseHandler):
|
|||
|
||||
# Run SW if was found executable
|
||||
if execfile is not None:
|
||||
avalonlib.launch(executable=execfile, args=[], environment=env)
|
||||
popen = avalonlib.launch(
|
||||
executable=execfile, args=[], environment=env
|
||||
)
|
||||
else:
|
||||
return {
|
||||
'success': False,
|
||||
|
|
|
|||
|
|
@ -229,6 +229,8 @@ def is_latest(representation):
|
|||
"""
|
||||
|
||||
version = io.find_one({"_id": representation['parent']})
|
||||
if version["type"] == "master_version":
|
||||
return True
|
||||
|
||||
# Get highest version under the parent
|
||||
highest_version = io.find_one({
|
||||
|
|
|
|||
|
|
@ -215,14 +215,14 @@ def script_name():
|
|||
|
||||
def add_button_write_to_read(node):
|
||||
name = "createReadNode"
|
||||
label = "Create Read"
|
||||
label = "[ Create Read ]"
|
||||
value = "import write_to_read;write_to_read.write_to_read(nuke.thisNode())"
|
||||
k = nuke.PyScript_Knob(name, label, value)
|
||||
k.setFlag(0x1000)
|
||||
node.addKnob(k)
|
||||
|
||||
|
||||
def create_write_node(name, data, input=None, prenodes=None):
|
||||
def create_write_node(name, data, input=None, prenodes=None, review=True):
|
||||
''' Creating write node which is group node
|
||||
|
||||
Arguments:
|
||||
|
|
@ -231,6 +231,7 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
input (node): selected node to connect to
|
||||
prenodes (list, optional): list of lists, definitions for nodes
|
||||
to be created before write
|
||||
review (bool): adding review knob
|
||||
|
||||
Example:
|
||||
prenodes = [(
|
||||
|
|
@ -389,15 +390,8 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
|
||||
add_rendering_knobs(GN)
|
||||
|
||||
# adding write to read button
|
||||
add_button_write_to_read(GN)
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
||||
# set tile color
|
||||
tile_color = _data.get("tile_color", "0xff0000ff")
|
||||
GN["tile_color"].setValue(tile_color)
|
||||
if review:
|
||||
add_review_knob(GN)
|
||||
|
||||
# add render button
|
||||
lnk = nuke.Link_Knob("Render")
|
||||
|
|
@ -405,9 +399,20 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
lnk.setName("Render")
|
||||
GN.addKnob(lnk)
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
||||
# adding write to read button
|
||||
add_button_write_to_read(GN)
|
||||
|
||||
# Deadline tab.
|
||||
add_deadline_tab(GN)
|
||||
|
||||
|
||||
# set tile color
|
||||
tile_color = _data.get("tile_color", "0xff0000ff")
|
||||
GN["tile_color"].setValue(tile_color)
|
||||
|
||||
return GN
|
||||
|
||||
|
||||
|
|
@ -429,6 +434,17 @@ def add_rendering_knobs(node):
|
|||
knob = nuke.Boolean_Knob("render_farm", "Render on Farm")
|
||||
knob.setValue(False)
|
||||
node.addKnob(knob)
|
||||
return node
|
||||
|
||||
def add_review_knob(node):
|
||||
''' Adds additional review knob to given node
|
||||
|
||||
Arguments:
|
||||
node (obj): nuke node object to be fixed
|
||||
|
||||
Return:
|
||||
node (obj): with added knob
|
||||
'''
|
||||
if "review" not in node.knobs():
|
||||
knob = nuke.Boolean_Knob("review", "Review")
|
||||
knob.setValue(True)
|
||||
|
|
@ -1573,10 +1589,9 @@ class ExporterReviewMov(ExporterReview):
|
|||
self.nodes = {}
|
||||
|
||||
# deal with now lut defined in viewer lut
|
||||
if hasattr(klass, "viewer_lut_raw"):
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
else:
|
||||
self.viewer_lut_raw = False
|
||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
self.bake_colorspace_fallback = klass.bake_colorspace_fallback
|
||||
self.bake_colorspace_main = klass.bake_colorspace_main
|
||||
|
||||
self.name = name or "baked"
|
||||
self.ext = ext or "mov"
|
||||
|
|
@ -1637,8 +1652,26 @@ class ExporterReviewMov(ExporterReview):
|
|||
self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
# OCIODisplay node
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
colorspaces = [
|
||||
self.bake_colorspace_main, self.bake_colorspace_fallback
|
||||
]
|
||||
|
||||
if any(colorspaces):
|
||||
# OCIOColorSpace with controled output
|
||||
dag_node = nuke.createNode("OCIOColorSpace")
|
||||
for c in colorspaces:
|
||||
test = dag_node["out_colorspace"].setValue(str(c))
|
||||
if test:
|
||||
self.log.info(
|
||||
"Baking in colorspace... `{}`".format(c))
|
||||
break
|
||||
|
||||
if not test:
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
else:
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes.append(dag_node)
|
||||
|
|
|
|||
40
pype/plugins/blender/create/create_action.py
Normal file
40
pype/plugins/blender/create/create_action.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
"""Create an animation asset."""
|
||||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from avalon.blender import Creator, lib
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
class CreateAction(Creator):
|
||||
"""Action output for character rigs"""
|
||||
|
||||
name = "actionMain"
|
||||
label = "Action"
|
||||
family = "action"
|
||||
icon = "male"
|
||||
|
||||
def process(self):
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = pype.blender.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
for obj in lib.get_selection():
|
||||
if (obj.animation_data is not None
|
||||
and obj.animation_data.action is not None):
|
||||
|
||||
empty_obj = bpy.data.objects.new(name=name,
|
||||
object_data=None)
|
||||
empty_obj.animation_data_create()
|
||||
empty_obj.animation_data.action = obj.animation_data.action
|
||||
empty_obj.animation_data.action.name = name
|
||||
collection.objects.link(empty_obj)
|
||||
|
||||
return collection
|
||||
52
pype/plugins/blender/create/create_animation.py
Normal file
52
pype/plugins/blender/create/create_animation.py
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
"""Create an animation asset."""
|
||||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from avalon.blender import Creator, lib
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
class CreateAnimation(Creator):
|
||||
"""Animation output for character rigs"""
|
||||
|
||||
name = "animationMain"
|
||||
label = "Animation"
|
||||
family = "animation"
|
||||
icon = "male"
|
||||
|
||||
def process(self):
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = pype.blender.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
# Add the rig object and all the children meshes to
|
||||
# a set and link them all at the end to avoid duplicates.
|
||||
# Blender crashes if trying to link an object that is already linked.
|
||||
# This links automatically the children meshes if they were not
|
||||
# selected, and doesn't link them twice if they, insted,
|
||||
# were manually selected by the user.
|
||||
objects_to_link = set()
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
|
||||
for obj in lib.get_selection():
|
||||
|
||||
objects_to_link.add(obj)
|
||||
|
||||
if obj.type == 'ARMATURE':
|
||||
|
||||
for subobj in obj.children:
|
||||
|
||||
objects_to_link.add(subobj)
|
||||
|
||||
for obj in objects_to_link:
|
||||
|
||||
collection.objects.link(obj)
|
||||
|
||||
return collection
|
||||
|
|
@ -4,6 +4,7 @@ import bpy
|
|||
|
||||
from avalon import api
|
||||
from avalon.blender import Creator, lib
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
class CreateModel(Creator):
|
||||
|
|
@ -15,11 +16,10 @@ class CreateModel(Creator):
|
|||
icon = "cube"
|
||||
|
||||
def process(self):
|
||||
import pype.blender
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = pype.blender.plugin.model_name(asset, subset)
|
||||
name = pype.blender.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
|
|
|
|||
52
pype/plugins/blender/create/create_rig.py
Normal file
52
pype/plugins/blender/create/create_rig.py
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
"""Create a rig asset."""
|
||||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from avalon.blender import Creator, lib
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
class CreateRig(Creator):
|
||||
"""Artist-friendly rig with controls to direct motion"""
|
||||
|
||||
name = "rigMain"
|
||||
label = "Rig"
|
||||
family = "rig"
|
||||
icon = "wheelchair"
|
||||
|
||||
def process(self):
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = pype.blender.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
# Add the rig object and all the children meshes to
|
||||
# a set and link them all at the end to avoid duplicates.
|
||||
# Blender crashes if trying to link an object that is already linked.
|
||||
# This links automatically the children meshes if they were not
|
||||
# selected, and doesn't link them twice if they, insted,
|
||||
# were manually selected by the user.
|
||||
objects_to_link = set()
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
|
||||
for obj in lib.get_selection():
|
||||
|
||||
objects_to_link.add(obj)
|
||||
|
||||
if obj.type == 'ARMATURE':
|
||||
|
||||
for subobj in obj.children:
|
||||
|
||||
objects_to_link.add(subobj)
|
||||
|
||||
for obj in objects_to_link:
|
||||
|
||||
collection.objects.link(obj)
|
||||
|
||||
return collection
|
||||
304
pype/plugins/blender/load/load_action.py
Normal file
304
pype/plugins/blender/load/load_action.py
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
"""Load an action in Blender."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import pype.blender.plugin
|
||||
|
||||
logger = logging.getLogger("pype").getChild("blender").getChild("load_action")
|
||||
|
||||
|
||||
class BlendActionLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load action from a .blend file.
|
||||
|
||||
Warning:
|
||||
Loading the same asset more then once is not properly supported at the
|
||||
moment.
|
||||
"""
|
||||
|
||||
families = ["action"]
|
||||
representations = ["blend"]
|
||||
|
||||
label = "Link Action"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
lib_container = pype.blender.plugin.asset_name(asset, subset)
|
||||
container_name = pype.blender.plugin.asset_name(
|
||||
asset, subset, namespace
|
||||
)
|
||||
|
||||
container = bpy.data.collections.new(lib_container)
|
||||
container.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
container,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
container_metadata = container.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
collection = bpy.context.scene.collection
|
||||
|
||||
collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
animation_container = collection.children[lib_container].make_local()
|
||||
|
||||
objects_list = []
|
||||
|
||||
# Link meshes first, then armatures.
|
||||
# The armature is unparented for all the non-local meshes,
|
||||
# when it is made local.
|
||||
for obj in animation_container.objects:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
anim_data = obj.animation_data
|
||||
|
||||
if anim_data is not None and anim_data.action is not None:
|
||||
|
||||
anim_data.action.make_local()
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
animation_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = objects_list
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
def update(self, container: Dict, representation: Dict):
|
||||
"""Update the loaded asset.
|
||||
|
||||
This will remove all objects of the current collection, load the new
|
||||
ones and add them to the collection.
|
||||
If the objects of the collection are used in another collection they
|
||||
will not be removed, only unlinked. Normally this should not be the
|
||||
case though.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
|
||||
libpath = Path(api.get_representation_path(representation))
|
||||
extension = libpath.suffix.lower()
|
||||
|
||||
logger.info(
|
||||
"Container: %s\nRepresentation: %s",
|
||||
pformat(container, indent=2),
|
||||
pformat(representation, indent=2),
|
||||
)
|
||||
|
||||
assert collection, (
|
||||
f"The asset is not loaded: {container['objectName']}"
|
||||
)
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
assert libpath, (
|
||||
"No existing library file found for {container['objectName']}"
|
||||
)
|
||||
assert libpath.is_file(), (
|
||||
f"The file doesn't exist: {libpath}"
|
||||
)
|
||||
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
normalized_libpath = (
|
||||
str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
)
|
||||
logger.debug(
|
||||
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
|
||||
normalized_collection_libpath,
|
||||
normalized_libpath,
|
||||
)
|
||||
if normalized_collection_libpath == normalized_libpath:
|
||||
logger.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
strips = []
|
||||
|
||||
for obj in collection_metadata["objects"]:
|
||||
|
||||
# Get all the strips that use the action
|
||||
arm_objs = [
|
||||
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
|
||||
|
||||
for armature_obj in arm_objs:
|
||||
|
||||
if armature_obj.animation_data is not None:
|
||||
|
||||
for track in armature_obj.animation_data.nla_tracks:
|
||||
|
||||
for strip in track.strips:
|
||||
|
||||
if strip.action == obj.animation_data.action:
|
||||
|
||||
strips.append(strip)
|
||||
|
||||
bpy.data.actions.remove(obj.animation_data.action)
|
||||
bpy.data.objects.remove(obj)
|
||||
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
str(libpath), link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
scene.collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
anim_container = scene.collection.children[lib_container].make_local()
|
||||
|
||||
objects_list = []
|
||||
|
||||
# Link meshes first, then armatures.
|
||||
# The armature is unparented for all the non-local meshes,
|
||||
# when it is made local.
|
||||
for obj in anim_container.objects:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
anim_data = obj.animation_data
|
||||
|
||||
if anim_data is not None and anim_data.action is not None:
|
||||
|
||||
anim_data.action.make_local()
|
||||
|
||||
for strip in strips:
|
||||
|
||||
strip.action = anim_data.action
|
||||
strip.action_frame_end = anim_data.action.frame_range[1]
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": collection.name})
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
collection_metadata["objects"] = objects_list
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
||||
Arguments:
|
||||
container (avalon-core:container-1.0): Container to remove,
|
||||
from `host.ls()`.
|
||||
|
||||
Returns:
|
||||
bool: Whether the container was deleted.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
if not collection:
|
||||
return False
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
for obj in objects:
|
||||
|
||||
# Get all the strips that use the action
|
||||
arm_objs = [
|
||||
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
|
||||
|
||||
for armature_obj in arm_objs:
|
||||
|
||||
if armature_obj.animation_data is not None:
|
||||
|
||||
for track in armature_obj.animation_data.nla_tracks:
|
||||
|
||||
for strip in track.strips:
|
||||
|
||||
if strip.action == obj.animation_data.action:
|
||||
|
||||
track.strips.remove(strip)
|
||||
|
||||
bpy.data.actions.remove(obj.animation_data.action)
|
||||
bpy.data.objects.remove(obj)
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
256
pype/plugins/blender/load/load_animation.py
Normal file
256
pype/plugins/blender/load/load_animation.py
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
"""Load an animation in Blender."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
logger = logging.getLogger("pype").getChild(
|
||||
"blender").getChild("load_animation")
|
||||
|
||||
|
||||
class BlendAnimationLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load animations from a .blend file.
|
||||
|
||||
Warning:
|
||||
Loading the same asset more then once is not properly supported at the
|
||||
moment.
|
||||
"""
|
||||
|
||||
families = ["animation"]
|
||||
representations = ["blend"]
|
||||
|
||||
label = "Link Animation"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
@staticmethod
|
||||
def _remove(self, objects, lib_container):
|
||||
|
||||
for obj in objects:
|
||||
|
||||
if obj.type == 'ARMATURE':
|
||||
bpy.data.armatures.remove(obj.data)
|
||||
elif obj.type == 'MESH':
|
||||
bpy.data.meshes.remove(obj.data)
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
|
||||
@staticmethod
|
||||
def _process(self, libpath, lib_container, container_name):
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
scene.collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
anim_container = scene.collection.children[lib_container].make_local()
|
||||
|
||||
meshes = [obj for obj in anim_container.objects if obj.type == 'MESH']
|
||||
armatures = [
|
||||
obj for obj in anim_container.objects if obj.type == 'ARMATURE']
|
||||
|
||||
# Should check if there is only an armature?
|
||||
|
||||
objects_list = []
|
||||
|
||||
# Link meshes first, then armatures.
|
||||
# The armature is unparented for all the non-local meshes,
|
||||
# when it is made local.
|
||||
for obj in meshes + armatures:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
obj.data.make_local()
|
||||
|
||||
anim_data = obj.animation_data
|
||||
|
||||
if anim_data is not None and anim_data.action is not None:
|
||||
|
||||
anim_data.action.make_local()
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
return objects_list
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
lib_container = pype.blender.plugin.asset_name(asset, subset)
|
||||
container_name = pype.blender.plugin.asset_name(
|
||||
asset, subset, namespace
|
||||
)
|
||||
|
||||
container = bpy.data.collections.new(lib_container)
|
||||
container.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
container,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
container_metadata = container.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
objects_list = self._process(
|
||||
self, libpath, lib_container, container_name)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = objects_list
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
def update(self, container: Dict, representation: Dict):
|
||||
"""Update the loaded asset.
|
||||
|
||||
This will remove all objects of the current collection, load the new
|
||||
ones and add them to the collection.
|
||||
If the objects of the collection are used in another collection they
|
||||
will not be removed, only unlinked. Normally this should not be the
|
||||
case though.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
|
||||
libpath = Path(api.get_representation_path(representation))
|
||||
extension = libpath.suffix.lower()
|
||||
|
||||
logger.info(
|
||||
"Container: %s\nRepresentation: %s",
|
||||
pformat(container, indent=2),
|
||||
pformat(representation, indent=2),
|
||||
)
|
||||
|
||||
assert collection, (
|
||||
f"The asset is not loaded: {container['objectName']}"
|
||||
)
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
assert libpath, (
|
||||
"No existing library file found for {container['objectName']}"
|
||||
)
|
||||
assert libpath.is_file(), (
|
||||
f"The file doesn't exist: {libpath}"
|
||||
)
|
||||
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
normalized_libpath = (
|
||||
str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
)
|
||||
logger.debug(
|
||||
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
|
||||
normalized_collection_libpath,
|
||||
normalized_libpath,
|
||||
)
|
||||
if normalized_collection_libpath == normalized_libpath:
|
||||
logger.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
# Get the armature of the rig
|
||||
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
|
||||
assert(len(armatures) == 1)
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
objects_list = self._process(
|
||||
self, str(libpath), lib_container, collection.name)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
collection_metadata["objects"] = objects_list
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
||||
Arguments:
|
||||
container (avalon-core:container-1.0): Container to remove,
|
||||
from `host.ls()`.
|
||||
|
||||
Returns:
|
||||
bool: Whether the container was deleted.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
if not collection:
|
||||
return False
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
|
@ -5,15 +5,14 @@ from pathlib import Path
|
|||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import avalon.blender.pipeline
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import pype.blender
|
||||
from avalon import api
|
||||
import pype.blender.plugin
|
||||
|
||||
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
|
||||
|
||||
|
||||
class BlendModelLoader(pype.blender.AssetLoader):
|
||||
class BlendModelLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load models from a .blend file.
|
||||
|
||||
Because they come from a .blend file we can simply link the collection that
|
||||
|
|
@ -32,34 +31,55 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
color = "orange"
|
||||
|
||||
@staticmethod
|
||||
def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]:
|
||||
"""Find the collection(s) with name, loaded from libpath.
|
||||
def _remove(self, objects, lib_container):
|
||||
|
||||
Note:
|
||||
It is assumed that only 1 matching collection is found.
|
||||
"""
|
||||
for collection in bpy.data.collections:
|
||||
if collection.name != name:
|
||||
continue
|
||||
if collection.library is None:
|
||||
continue
|
||||
if not collection.library.filepath:
|
||||
continue
|
||||
collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve())
|
||||
normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
if collection_lib_path == normalized_libpath:
|
||||
return collection
|
||||
return None
|
||||
for obj in objects:
|
||||
|
||||
bpy.data.meshes.remove(obj.data)
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
|
||||
@staticmethod
|
||||
def _collection_contains_object(
|
||||
collection: bpy.types.Collection, object: bpy.types.Object
|
||||
) -> bool:
|
||||
"""Check if the collection contains the object."""
|
||||
for obj in collection.objects:
|
||||
if obj == object:
|
||||
return True
|
||||
return False
|
||||
def _process(self, libpath, lib_container, container_name):
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
scene.collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
model_container = scene.collection.children[lib_container].make_local()
|
||||
|
||||
objects_list = []
|
||||
|
||||
for obj in model_container.objects:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
obj.data.make_local()
|
||||
|
||||
for material_slot in obj.material_slots:
|
||||
|
||||
material_slot.material.make_local()
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
model_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
return objects_list
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
|
|
@ -76,42 +96,35 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
lib_container = pype.blender.plugin.model_name(asset, subset)
|
||||
container_name = pype.blender.plugin.model_name(
|
||||
lib_container = pype.blender.plugin.asset_name(asset, subset)
|
||||
container_name = pype.blender.plugin.asset_name(
|
||||
asset, subset, namespace
|
||||
)
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
instance_empty = bpy.data.objects.new(
|
||||
container_name, None
|
||||
)
|
||||
if not instance_empty.get("avalon"):
|
||||
instance_empty["avalon"] = dict()
|
||||
avalon_info = instance_empty["avalon"]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
scene.collection.objects.link(instance_empty)
|
||||
instance_empty.instance_type = 'COLLECTION'
|
||||
container = bpy.data.collections[lib_container]
|
||||
container.name = container_name
|
||||
instance_empty.instance_collection = container
|
||||
container.make_local()
|
||||
avalon.blender.pipeline.containerise_existing(
|
||||
container,
|
||||
collection = bpy.data.collections.new(lib_container)
|
||||
collection.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
collection,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
nodes.append(instance_empty)
|
||||
container_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
objects_list = self._process(
|
||||
self, libpath, lib_container, container_name)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = objects_list
|
||||
|
||||
nodes = list(collection.objects)
|
||||
nodes.append(collection)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
|
|
@ -154,9 +167,13 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
collection_libpath = (
|
||||
self._get_library_from_container(collection).filepath
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
|
|
@ -171,58 +188,16 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
if normalized_collection_libpath == normalized_libpath:
|
||||
logger.info("Library already loaded, not updating...")
|
||||
return
|
||||
# Let Blender's garbage collection take care of removing the library
|
||||
# itself after removing the objects.
|
||||
objects_to_remove = set()
|
||||
collection_objects = list()
|
||||
collection_objects[:] = collection.objects
|
||||
for obj in collection_objects:
|
||||
# Unlink every object
|
||||
collection.objects.unlink(obj)
|
||||
remove_obj = True
|
||||
for coll in [
|
||||
coll for coll in bpy.data.collections
|
||||
if coll != collection
|
||||
]:
|
||||
if (
|
||||
coll.objects and
|
||||
self._collection_contains_object(coll, obj)
|
||||
):
|
||||
remove_obj = False
|
||||
if remove_obj:
|
||||
objects_to_remove.add(obj)
|
||||
|
||||
for obj in objects_to_remove:
|
||||
# Only delete objects that are not used elsewhere
|
||||
bpy.data.objects.remove(obj)
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
instance_empties = [
|
||||
obj for obj in collection.users_dupli_group
|
||||
if obj.name in collection.name
|
||||
]
|
||||
if instance_empties:
|
||||
instance_empty = instance_empties[0]
|
||||
container_name = instance_empty["avalon"]["container_name"]
|
||||
objects_list = self._process(
|
||||
self, str(libpath), lib_container, collection.name)
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
str(libpath), link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [container_name]
|
||||
|
||||
new_collection = self._get_lib_collection(container_name, libpath)
|
||||
if new_collection is None:
|
||||
raise ValueError(
|
||||
"A matching collection '{container_name}' "
|
||||
"should have been found in: {libpath}"
|
||||
)
|
||||
|
||||
for obj in new_collection.objects:
|
||||
collection.objects.link(obj)
|
||||
bpy.data.collections.remove(new_collection)
|
||||
# Update the representation on the collection
|
||||
avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_prop["representation"] = str(representation["_id"])
|
||||
# Save the list of objects in the metadata container
|
||||
collection_metadata["objects"] = objects_list
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
|
@ -245,16 +220,20 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
instance_parents = list(collection.users_dupli_group)
|
||||
instance_objects = list(collection.objects)
|
||||
for obj in instance_objects + instance_parents:
|
||||
bpy.data.objects.remove(obj)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class CacheModelLoader(pype.blender.AssetLoader):
|
||||
class CacheModelLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load cache models.
|
||||
|
||||
Stores the imported asset in a collection named after the asset.
|
||||
|
|
@ -281,7 +260,8 @@ class CacheModelLoader(pype.blender.AssetLoader):
|
|||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
raise NotImplementedError("Loading of Alembic files is not yet implemented.")
|
||||
raise NotImplementedError(
|
||||
"Loading of Alembic files is not yet implemented.")
|
||||
# TODO (jasper): implement Alembic import.
|
||||
|
||||
libpath = self.fname
|
||||
|
|
@ -289,7 +269,7 @@ class CacheModelLoader(pype.blender.AssetLoader):
|
|||
subset = context["subset"]["name"]
|
||||
# TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
|
||||
lib_container = container_name = (
|
||||
pype.blender.plugin.model_name(asset, subset, namespace)
|
||||
pype.blender.plugin.asset_name(asset, subset, namespace)
|
||||
)
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
|
||||
|
|
|
|||
256
pype/plugins/blender/load/load_rig.py
Normal file
256
pype/plugins/blender/load/load_rig.py
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
"""Load a rig asset in Blender."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import pype.blender.plugin
|
||||
|
||||
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
|
||||
|
||||
|
||||
class BlendRigLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load rigs from a .blend file.
|
||||
|
||||
Because they come from a .blend file we can simply link the collection that
|
||||
contains the model. There is no further need to 'containerise' it.
|
||||
|
||||
Warning:
|
||||
Loading the same asset more then once is not properly supported at the
|
||||
moment.
|
||||
"""
|
||||
|
||||
families = ["rig"]
|
||||
representations = ["blend"]
|
||||
|
||||
label = "Link Rig"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
@staticmethod
|
||||
def _remove(self, objects, lib_container):
|
||||
|
||||
for obj in objects:
|
||||
|
||||
if obj.type == 'ARMATURE':
|
||||
bpy.data.armatures.remove(obj.data)
|
||||
elif obj.type == 'MESH':
|
||||
bpy.data.meshes.remove(obj.data)
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
|
||||
@staticmethod
|
||||
def _process(self, libpath, lib_container, container_name, action):
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
scene.collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
rig_container = scene.collection.children[lib_container].make_local()
|
||||
|
||||
meshes = [obj for obj in rig_container.objects if obj.type == 'MESH']
|
||||
armatures = [
|
||||
obj for obj in rig_container.objects if obj.type == 'ARMATURE']
|
||||
|
||||
objects_list = []
|
||||
|
||||
assert(len(armatures) == 1)
|
||||
|
||||
# Link meshes first, then armatures.
|
||||
# The armature is unparented for all the non-local meshes,
|
||||
# when it is made local.
|
||||
for obj in meshes + armatures:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
obj.data.make_local()
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
if obj.type == 'ARMATURE' and action is not None:
|
||||
|
||||
obj.animation_data.action = action
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
rig_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
return objects_list
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
lib_container = pype.blender.plugin.asset_name(asset, subset)
|
||||
container_name = pype.blender.plugin.asset_name(
|
||||
asset, subset, namespace
|
||||
)
|
||||
|
||||
container = bpy.data.collections.new(lib_container)
|
||||
container.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
container,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
container_metadata = container.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
objects_list = self._process(
|
||||
self, libpath, lib_container, container_name, None)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = objects_list
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
def update(self, container: Dict, representation: Dict):
|
||||
"""Update the loaded asset.
|
||||
|
||||
This will remove all objects of the current collection, load the new
|
||||
ones and add them to the collection.
|
||||
If the objects of the collection are used in another collection they
|
||||
will not be removed, only unlinked. Normally this should not be the
|
||||
case though.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
|
||||
libpath = Path(api.get_representation_path(representation))
|
||||
extension = libpath.suffix.lower()
|
||||
|
||||
logger.info(
|
||||
"Container: %s\nRepresentation: %s",
|
||||
pformat(container, indent=2),
|
||||
pformat(representation, indent=2),
|
||||
)
|
||||
|
||||
assert collection, (
|
||||
f"The asset is not loaded: {container['objectName']}"
|
||||
)
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
assert libpath, (
|
||||
"No existing library file found for {container['objectName']}"
|
||||
)
|
||||
assert libpath.is_file(), (
|
||||
f"The file doesn't exist: {libpath}"
|
||||
)
|
||||
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
normalized_libpath = (
|
||||
str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
)
|
||||
logger.debug(
|
||||
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
|
||||
normalized_collection_libpath,
|
||||
normalized_libpath,
|
||||
)
|
||||
if normalized_collection_libpath == normalized_libpath:
|
||||
logger.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
# Get the armature of the rig
|
||||
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
|
||||
assert(len(armatures) == 1)
|
||||
|
||||
action = armatures[0].animation_data.action
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
objects_list = self._process(
|
||||
self, str(libpath), lib_container, collection.name, action)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
collection_metadata["objects"] = objects_list
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
||||
Arguments:
|
||||
container (avalon-core:container-1.0): Container to remove,
|
||||
from `host.ls()`.
|
||||
|
||||
Returns:
|
||||
bool: Whether the container was deleted.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
if not collection:
|
||||
return False
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
|
@ -14,3 +14,6 @@ class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
|
|||
"""Inject the current working file"""
|
||||
current_file = bpy.data.filepath
|
||||
context.data['currentFile'] = current_file
|
||||
|
||||
assert current_file != '', "Current file is empty. " \
|
||||
"Save the file before continuing."
|
||||
|
|
|
|||
|
|
@ -1,22 +1,21 @@
|
|||
import typing
|
||||
from typing import Generator
|
||||
|
||||
import bpy
|
||||
import json
|
||||
|
||||
import avalon.api
|
||||
import pyblish.api
|
||||
from avalon.blender.pipeline import AVALON_PROPERTY
|
||||
|
||||
|
||||
class CollectModel(pyblish.api.ContextPlugin):
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect the data of a model."""
|
||||
|
||||
hosts = ["blender"]
|
||||
label = "Collect Model"
|
||||
label = "Collect Instances"
|
||||
order = pyblish.api.CollectorOrder
|
||||
|
||||
@staticmethod
|
||||
def get_model_collections() -> Generator:
|
||||
def get_collections() -> Generator:
|
||||
"""Return all 'model' collections.
|
||||
|
||||
Check if the family is 'model' and if it doesn't have the
|
||||
|
|
@ -25,13 +24,13 @@ class CollectModel(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
for collection in bpy.data.collections:
|
||||
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
|
||||
if (avalon_prop.get('family') == 'model'
|
||||
and not avalon_prop.get('representation')):
|
||||
if avalon_prop.get('id') == 'pyblish.avalon.instance':
|
||||
yield collection
|
||||
|
||||
def process(self, context):
|
||||
"""Collect the models from the current Blender scene."""
|
||||
collections = self.get_model_collections()
|
||||
collections = self.get_collections()
|
||||
|
||||
for collection in collections:
|
||||
avalon_prop = collection[AVALON_PROPERTY]
|
||||
asset = avalon_prop['asset']
|
||||
|
|
@ -50,4 +49,6 @@ class CollectModel(pyblish.api.ContextPlugin):
|
|||
members = list(collection.objects)
|
||||
members.append(collection)
|
||||
instance[:] = members
|
||||
self.log.debug(instance.data)
|
||||
self.log.debug(json.dumps(instance.data, indent=4))
|
||||
for obj in instance:
|
||||
self.log.debug(obj)
|
||||
95
pype/plugins/blender/publish/extract_abc.py
Normal file
95
pype/plugins/blender/publish/extract_abc.py
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
import os
|
||||
|
||||
import pype.api
|
||||
import pype.blender.plugin
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class ExtractABC(pype.api.Extractor):
|
||||
"""Extract as ABC."""
|
||||
|
||||
label = "Extract ABC"
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.fbx"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
context = bpy.context
|
||||
scene = context.scene
|
||||
view_layer = context.view_layer
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
collections = [
|
||||
obj for obj in instance if type(obj) is bpy.types.Collection]
|
||||
|
||||
assert len(collections) == 1, "There should be one and only one " \
|
||||
"collection collected for this asset"
|
||||
|
||||
old_active_layer_collection = view_layer.active_layer_collection
|
||||
|
||||
layers = view_layer.layer_collection.children
|
||||
|
||||
# Get the layer collection from the collection we need to export.
|
||||
# This is needed because in Blender you can only set the active
|
||||
# collection with the layer collection, and there is no way to get
|
||||
# the layer collection from the collection
|
||||
# (but there is the vice versa).
|
||||
layer_collections = [
|
||||
layer for layer in layers if layer.collection == collections[0]]
|
||||
|
||||
assert len(layer_collections) == 1
|
||||
|
||||
view_layer.active_layer_collection = layer_collections[0]
|
||||
|
||||
old_scale = scene.unit_settings.scale_length
|
||||
|
||||
selected = list()
|
||||
|
||||
for obj in instance:
|
||||
try:
|
||||
obj.select_set(True)
|
||||
selected.append(obj)
|
||||
except:
|
||||
continue
|
||||
|
||||
new_context = pype.blender.plugin.create_blender_context(active=selected[0], selected=selected)
|
||||
|
||||
# We set the scale of the scene for the export
|
||||
scene.unit_settings.scale_length = 0.01
|
||||
|
||||
self.log.info(new_context)
|
||||
|
||||
# We export the abc
|
||||
bpy.ops.wm.alembic_export(
|
||||
new_context,
|
||||
filepath=filepath,
|
||||
start=1,
|
||||
end=1
|
||||
)
|
||||
|
||||
view_layer.active_layer_collection = old_active_layer_collection
|
||||
|
||||
scene.unit_settings.scale_length = old_scale
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'abc',
|
||||
'ext': 'abc',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
|
|
@ -1,47 +1,47 @@
|
|||
import os
|
||||
import avalon.blender.workio
|
||||
|
||||
import pype.api
|
||||
|
||||
|
||||
class ExtractModel(pype.api.Extractor):
|
||||
"""Extract as model."""
|
||||
|
||||
label = "Model"
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.blend"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
# Just save the file to a temporary location. At least for now it's no
|
||||
# problem to have (possibly) extra stuff in the file.
|
||||
avalon.blender.workio.save_file(filepath, copy=True)
|
||||
#
|
||||
# # Store reference for integration
|
||||
# if "files" not in instance.data:
|
||||
# instance.data["files"] = list()
|
||||
#
|
||||
# # instance.data["files"].append(filename)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'blend',
|
||||
'ext': 'blend',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s", instance.name, representation)
|
||||
import os
|
||||
import avalon.blender.workio
|
||||
|
||||
import pype.api
|
||||
|
||||
|
||||
class ExtractBlend(pype.api.Extractor):
|
||||
"""Extract a blend file."""
|
||||
|
||||
label = "Extract Blend"
|
||||
hosts = ["blender"]
|
||||
families = ["animation", "model", "rig", "action"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.blend"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
# Just save the file to a temporary location. At least for now it's no
|
||||
# problem to have (possibly) extra stuff in the file.
|
||||
avalon.blender.workio.save_file(filepath, copy=True)
|
||||
#
|
||||
# # Store reference for integration
|
||||
# if "files" not in instance.data:
|
||||
# instance.data["files"] = list()
|
||||
#
|
||||
# # instance.data["files"].append(filename)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'blend',
|
||||
'ext': 'blend',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
81
pype/plugins/blender/publish/extract_fbx.py
Normal file
81
pype/plugins/blender/publish/extract_fbx.py
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
import os
|
||||
|
||||
import pype.api
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class ExtractFBX(pype.api.Extractor):
|
||||
"""Extract as FBX."""
|
||||
|
||||
label = "Extract FBX"
|
||||
hosts = ["blender"]
|
||||
families = ["model", "rig"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.fbx"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
context = bpy.context
|
||||
scene = context.scene
|
||||
view_layer = context.view_layer
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
collections = [
|
||||
obj for obj in instance if type(obj) is bpy.types.Collection]
|
||||
|
||||
assert len(collections) == 1, "There should be one and only one " \
|
||||
"collection collected for this asset"
|
||||
|
||||
old_active_layer_collection = view_layer.active_layer_collection
|
||||
|
||||
layers = view_layer.layer_collection.children
|
||||
|
||||
# Get the layer collection from the collection we need to export.
|
||||
# This is needed because in Blender you can only set the active
|
||||
# collection with the layer collection, and there is no way to get
|
||||
# the layer collection from the collection
|
||||
# (but there is the vice versa).
|
||||
layer_collections = [
|
||||
layer for layer in layers if layer.collection == collections[0]]
|
||||
|
||||
assert len(layer_collections) == 1
|
||||
|
||||
view_layer.active_layer_collection = layer_collections[0]
|
||||
|
||||
old_scale = scene.unit_settings.scale_length
|
||||
|
||||
# We set the scale of the scene for the export
|
||||
scene.unit_settings.scale_length = 0.01
|
||||
|
||||
# We export the fbx
|
||||
bpy.ops.export_scene.fbx(
|
||||
filepath=filepath,
|
||||
use_active_collection=True,
|
||||
mesh_smooth_type='FACE',
|
||||
add_leaf_bones=False
|
||||
)
|
||||
|
||||
view_layer.active_layer_collection = old_active_layer_collection
|
||||
|
||||
scene.unit_settings.scale_length = old_scale
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'fbx',
|
||||
'ext': 'fbx',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
139
pype/plugins/blender/publish/extract_fbx_animation.py
Normal file
139
pype/plugins/blender/publish/extract_fbx_animation.py
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
import os
|
||||
|
||||
import pype.api
|
||||
|
||||
import bpy
|
||||
import bpy_extras
|
||||
import bpy_extras.anim_utils
|
||||
|
||||
|
||||
class ExtractAnimationFBX(pype.api.Extractor):
|
||||
"""Extract as animation."""
|
||||
|
||||
label = "Extract FBX"
|
||||
hosts = ["blender"]
|
||||
families = ["animation"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.fbx"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
context = bpy.context
|
||||
scene = context.scene
|
||||
view_layer = context.view_layer
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
collections = [
|
||||
obj for obj in instance if type(obj) is bpy.types.Collection]
|
||||
|
||||
assert len(collections) == 1, "There should be one and only one " \
|
||||
"collection collected for this asset"
|
||||
|
||||
old_active_layer_collection = view_layer.active_layer_collection
|
||||
|
||||
layers = view_layer.layer_collection.children
|
||||
|
||||
# Get the layer collection from the collection we need to export.
|
||||
# This is needed because in Blender you can only set the active
|
||||
# collection with the layer collection, and there is no way to get
|
||||
# the layer collection from the collection
|
||||
# (but there is the vice versa).
|
||||
layer_collections = [
|
||||
layer for layer in layers if layer.collection == collections[0]]
|
||||
|
||||
assert len(layer_collections) == 1
|
||||
|
||||
view_layer.active_layer_collection = layer_collections[0]
|
||||
|
||||
old_scale = scene.unit_settings.scale_length
|
||||
|
||||
# We set the scale of the scene for the export
|
||||
scene.unit_settings.scale_length = 0.01
|
||||
|
||||
armatures = [
|
||||
obj for obj in collections[0].objects if obj.type == 'ARMATURE']
|
||||
|
||||
object_action_pairs = []
|
||||
original_actions = []
|
||||
|
||||
starting_frames = []
|
||||
ending_frames = []
|
||||
|
||||
# For each armature, we make a copy of the current action
|
||||
for obj in armatures:
|
||||
|
||||
curr_action = None
|
||||
copy_action = None
|
||||
|
||||
if obj.animation_data and obj.animation_data.action:
|
||||
|
||||
curr_action = obj.animation_data.action
|
||||
copy_action = curr_action.copy()
|
||||
|
||||
curr_frame_range = curr_action.frame_range
|
||||
|
||||
starting_frames.append(curr_frame_range[0])
|
||||
ending_frames.append(curr_frame_range[1])
|
||||
|
||||
object_action_pairs.append((obj, copy_action))
|
||||
original_actions.append(curr_action)
|
||||
|
||||
# We compute the starting and ending frames
|
||||
max_frame = min(starting_frames)
|
||||
min_frame = max(ending_frames)
|
||||
|
||||
# We bake the copy of the current action for each object
|
||||
bpy_extras.anim_utils.bake_action_objects(
|
||||
object_action_pairs,
|
||||
frames=range(int(min_frame), int(max_frame)),
|
||||
do_object=False,
|
||||
do_clean=False
|
||||
)
|
||||
|
||||
# We export the fbx
|
||||
bpy.ops.export_scene.fbx(
|
||||
filepath=filepath,
|
||||
use_active_collection=True,
|
||||
bake_anim_use_nla_strips=False,
|
||||
bake_anim_use_all_actions=False,
|
||||
add_leaf_bones=False
|
||||
)
|
||||
|
||||
view_layer.active_layer_collection = old_active_layer_collection
|
||||
|
||||
scene.unit_settings.scale_length = old_scale
|
||||
|
||||
# We delete the baked action and set the original one back
|
||||
for i in range(0, len(object_action_pairs)):
|
||||
|
||||
pair = object_action_pairs[i]
|
||||
action = original_actions[i]
|
||||
|
||||
if action:
|
||||
|
||||
pair[0].animation_data.action = action
|
||||
|
||||
if pair[1]:
|
||||
|
||||
pair[1].user_clear()
|
||||
bpy.data.actions.remove(pair[1])
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'fbx',
|
||||
'ext': 'fbx',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
|
|
@ -35,12 +35,15 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
|
|||
invalid = []
|
||||
# TODO (jasper): only check objects in the collection that will be published?
|
||||
for obj in [
|
||||
obj for obj in bpy.data.objects if obj.type == 'MESH'
|
||||
]:
|
||||
# Make sure we are in object mode.
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
if not cls.has_uvs(obj):
|
||||
invalid.append(obj)
|
||||
obj for obj in instance]:
|
||||
try:
|
||||
if obj.type == 'MESH':
|
||||
# Make sure we are in object mode.
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
if not cls.has_uvs(obj):
|
||||
invalid.append(obj)
|
||||
except:
|
||||
continue
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
'setdress': 'setdress',
|
||||
'pointcache': 'cache',
|
||||
'render': 'render',
|
||||
'render2d': 'render',
|
||||
'nukescript': 'comp',
|
||||
'write': 'render',
|
||||
'review': 'mov',
|
||||
|
|
@ -127,7 +128,10 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
# Add custom attributes for AssetVersion
|
||||
assetversion_cust_attrs = {}
|
||||
intent_val = instance.context.data.get("intent", {}).get("value")
|
||||
intent_val = instance.context.data.get("intent")
|
||||
if intent_val and isinstance(intent_val, dict):
|
||||
intent_val = intent_val.get("value")
|
||||
|
||||
if intent_val:
|
||||
assetversion_cust_attrs["intent"] = intent_val
|
||||
|
||||
|
|
|
|||
|
|
@ -71,8 +71,13 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
|
|||
|
||||
session = instance.context.data["ftrackSession"]
|
||||
|
||||
intent_val = instance.context.data.get("intent", {}).get("value")
|
||||
intent_label = instance.context.data.get("intent", {}).get("label")
|
||||
intent = instance.context.data.get("intent")
|
||||
if intent and isinstance(intent, dict):
|
||||
intent_val = intent.get("value")
|
||||
intent_label = intent.get("label")
|
||||
else:
|
||||
intent_val = intent_label = intent
|
||||
|
||||
final_label = None
|
||||
if intent_val:
|
||||
final_label = self.get_intent_label(session, intent_val)
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import pyblish.api
|
|||
class CollectAvalonEntities(pyblish.api.ContextPlugin):
|
||||
"""Collect Anatomy into Context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder - 0.02
|
||||
label = "Collect Avalon Entities"
|
||||
|
||||
def process(self, context):
|
||||
|
|
@ -47,7 +47,16 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin):
|
|||
context.data["assetEntity"] = asset_entity
|
||||
|
||||
data = asset_entity['data']
|
||||
|
||||
context.data["frameStart"] = data.get("frameStart")
|
||||
context.data["frameEnd"] = data.get("frameEnd")
|
||||
|
||||
handles = int(data.get("handles") or 0)
|
||||
context.data["handles"] = handles
|
||||
context.data["handleStart"] = int(data.get("handleStart", handles))
|
||||
context.data["handleEnd"] = int(data.get("handleEnd", handles))
|
||||
|
||||
frame_start_h = data.get("frameStart") - context.data["handleStart"]
|
||||
frame_end_h = data.get("frameEnd") + context.data["handleEnd"]
|
||||
context.data["frameStartHandle"] = frame_start_h
|
||||
context.data["frameEndHandle"] = frame_end_h
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
|
|||
`PYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
|
||||
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder - 0.0001
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
targets = ["filesequence"]
|
||||
label = "Collect rendered frames"
|
||||
|
||||
|
|
|
|||
|
|
@ -54,9 +54,12 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
"comment": instance.context.data.get("comment", "")
|
||||
})
|
||||
|
||||
intent = instance.context.data.get("intent", {}).get("label")
|
||||
if intent:
|
||||
prep_data["intent"] = intent
|
||||
intent_label = instance.context.data.get("intent")
|
||||
if intent_label and isinstance(intent_label, dict):
|
||||
intent_label = intent_label.get("label")
|
||||
|
||||
if intent_label:
|
||||
prep_data["intent"] = intent_label
|
||||
|
||||
# get anatomy project
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
|
|
|||
|
|
@ -11,7 +11,9 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
|
||||
label = "Review with Slate frame"
|
||||
order = pyblish.api.ExtractorOrder + 0.031
|
||||
families = ["slate"]
|
||||
families = ["slate", "review"]
|
||||
match = pyblish.api.Subset
|
||||
|
||||
hosts = ["nuke", "maya", "shell"]
|
||||
optional = True
|
||||
|
||||
|
|
@ -34,7 +36,8 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
fps = inst_data.get("fps")
|
||||
|
||||
# defining image ratios
|
||||
resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height
|
||||
resolution_ratio = ((float(resolution_width) * pixel_aspect) /
|
||||
resolution_height)
|
||||
delivery_ratio = float(to_width) / float(to_height)
|
||||
self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio))
|
||||
self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio))
|
||||
|
|
@ -89,7 +92,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
input_args.extend([
|
||||
"-r {}".format(fps),
|
||||
"-t 0.04"]
|
||||
)
|
||||
)
|
||||
|
||||
# output args
|
||||
codec_args = repre["_profile"].get('codec', [])
|
||||
|
|
@ -111,7 +114,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
self.log.debug("lower then delivery")
|
||||
width_scale = int(to_width * scale_factor)
|
||||
width_half_pad = int((
|
||||
to_width - width_scale)/2)
|
||||
to_width - width_scale) / 2)
|
||||
height_scale = to_height
|
||||
height_half_pad = 0
|
||||
else:
|
||||
|
|
@ -124,7 +127,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
height_scale = int(
|
||||
resolution_height * scale_factor)
|
||||
height_half_pad = int(
|
||||
(to_height - height_scale)/2)
|
||||
(to_height - height_scale) / 2)
|
||||
|
||||
self.log.debug(
|
||||
"__ width_scale: `{}`".format(width_scale))
|
||||
|
|
@ -135,8 +138,10 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
self.log.debug(
|
||||
"__ height_half_pad: `{}`".format(height_half_pad))
|
||||
|
||||
scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
|
||||
width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
|
||||
scaling_arg = ("scale={0}x{1}:flags=lanczos,"
|
||||
"pad={2}:{3}:{4}:{5}:black,setsar=1").format(
|
||||
width_scale, height_scale, to_width, to_height,
|
||||
width_half_pad, height_half_pad
|
||||
)
|
||||
|
||||
vf_back = self.add_video_filter_args(
|
||||
|
|
|
|||
604
pype/plugins/global/publish/integrate_master_version.py
Normal file
604
pype/plugins/global/publish/integrate_master_version.py
Normal file
|
|
@ -0,0 +1,604 @@
|
|||
import os
|
||||
import copy
|
||||
import clique
|
||||
import errno
|
||||
import shutil
|
||||
|
||||
from pymongo import InsertOne, ReplaceOne
|
||||
import pyblish.api
|
||||
from avalon import api, io, schema
|
||||
from avalon.vendor import filelink
|
||||
|
||||
|
||||
class IntegrateMasterVersion(pyblish.api.InstancePlugin):
|
||||
label = "Integrate Master Version"
|
||||
# Must happen after IntegrateNew
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
|
||||
optional = True
|
||||
|
||||
families = [
|
||||
"model",
|
||||
"rig",
|
||||
"setdress",
|
||||
"look",
|
||||
"pointcache",
|
||||
"animation"
|
||||
]
|
||||
|
||||
# Can specify representation names that will be ignored (lower case)
|
||||
ignored_representation_names = []
|
||||
db_representation_context_keys = [
|
||||
"project", "asset", "task", "subset", "representation",
|
||||
"family", "hierarchy", "task", "username"
|
||||
]
|
||||
# TODO add family filtering
|
||||
# QUESTION/TODO this process should happen on server if crashed due to
|
||||
# permissions error on files (files were used or user didn't have perms)
|
||||
# *but all other plugins must be sucessfully completed
|
||||
|
||||
def process(self, instance):
|
||||
self.log.debug(
|
||||
"--- Integration of Master version for subset `{}` begins.".format(
|
||||
instance.data.get("subset", str(instance))
|
||||
)
|
||||
)
|
||||
published_repres = instance.data.get("published_representations")
|
||||
if not published_repres:
|
||||
self.log.debug(
|
||||
"*** There are not published representations on the instance."
|
||||
)
|
||||
return
|
||||
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
|
||||
# TODO raise error if master not set?
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
if "master" not in anatomy.templates:
|
||||
self.log.warning("!!! Anatomy does not have set `master` key!")
|
||||
return
|
||||
|
||||
if "path" not in anatomy.templates["master"]:
|
||||
self.log.warning((
|
||||
"!!! There is not set `path` template in `master` anatomy"
|
||||
" for project \"{}\"."
|
||||
).format(project_name))
|
||||
return
|
||||
|
||||
master_template = anatomy.templates["master"]["path"]
|
||||
self.log.debug("`Master` template check was successful. `{}`".format(
|
||||
master_template
|
||||
))
|
||||
|
||||
master_publish_dir = self.get_publish_dir(instance)
|
||||
|
||||
src_version_entity = instance.data.get("versionEntity")
|
||||
filtered_repre_ids = []
|
||||
for repre_id, repre_info in published_repres.items():
|
||||
repre = repre_info["representation"]
|
||||
if repre["name"].lower() in self.ignored_representation_names:
|
||||
self.log.debug(
|
||||
"Filtering representation with name: `{}`".format(
|
||||
repre["name"].lower()
|
||||
)
|
||||
)
|
||||
filtered_repre_ids.append(repre_id)
|
||||
|
||||
for repre_id in filtered_repre_ids:
|
||||
published_repres.pop(repre_id, None)
|
||||
|
||||
if not published_repres:
|
||||
self.log.debug(
|
||||
"*** All published representations were filtered by name."
|
||||
)
|
||||
return
|
||||
|
||||
if src_version_entity is None:
|
||||
self.log.debug((
|
||||
"Published version entity was not sent in representation data."
|
||||
" Querying entity from database."
|
||||
))
|
||||
src_version_entity = (
|
||||
self.version_from_representations(published_repres)
|
||||
)
|
||||
|
||||
if not src_version_entity:
|
||||
self.log.warning((
|
||||
"!!! Can't find origin version in database."
|
||||
" Skipping Master version publish."
|
||||
))
|
||||
return
|
||||
|
||||
all_copied_files = []
|
||||
transfers = instance.data.get("transfers", list())
|
||||
for _src, dst in transfers:
|
||||
dst = os.path.normpath(dst)
|
||||
if dst not in all_copied_files:
|
||||
all_copied_files.append(dst)
|
||||
|
||||
hardlinks = instance.data.get("hardlinks", list())
|
||||
for _src, dst in hardlinks:
|
||||
dst = os.path.normpath(dst)
|
||||
if dst not in all_copied_files:
|
||||
all_copied_files.append(dst)
|
||||
|
||||
all_repre_file_paths = []
|
||||
for repre_info in published_repres.values():
|
||||
published_files = repre_info.get("published_files") or []
|
||||
for file_path in published_files:
|
||||
file_path = os.path.normpath(file_path)
|
||||
if file_path not in all_repre_file_paths:
|
||||
all_repre_file_paths.append(file_path)
|
||||
|
||||
# TODO this is not best practice of getting resources for publish
|
||||
# WARNING due to this we must remove all files from master publish dir
|
||||
instance_publish_dir = os.path.normpath(
|
||||
instance.data["publishDir"]
|
||||
)
|
||||
other_file_paths_mapping = []
|
||||
for file_path in all_copied_files:
|
||||
# Check if it is from publishDir
|
||||
if not file_path.startswith(instance_publish_dir):
|
||||
continue
|
||||
|
||||
if file_path in all_repre_file_paths:
|
||||
continue
|
||||
|
||||
dst_filepath = file_path.replace(
|
||||
instance_publish_dir, master_publish_dir
|
||||
)
|
||||
other_file_paths_mapping.append((file_path, dst_filepath))
|
||||
|
||||
# Current version
|
||||
old_version, old_repres = (
|
||||
self.current_master_ents(src_version_entity)
|
||||
)
|
||||
|
||||
old_repres_by_name = {
|
||||
repre["name"].lower(): repre for repre in old_repres
|
||||
}
|
||||
|
||||
if old_version:
|
||||
new_version_id = old_version["_id"]
|
||||
else:
|
||||
new_version_id = io.ObjectId()
|
||||
|
||||
new_master_version = {
|
||||
"_id": new_version_id,
|
||||
"version_id": src_version_entity["_id"],
|
||||
"parent": src_version_entity["parent"],
|
||||
"type": "master_version",
|
||||
"schema": "pype:master_version-1.0"
|
||||
}
|
||||
schema.validate(new_master_version)
|
||||
|
||||
# Don't make changes in database until everything is O.K.
|
||||
bulk_writes = []
|
||||
|
||||
if old_version:
|
||||
self.log.debug("Replacing old master version.")
|
||||
bulk_writes.append(
|
||||
ReplaceOne(
|
||||
{"_id": new_master_version["_id"]},
|
||||
new_master_version
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.log.debug("Creating first master version.")
|
||||
bulk_writes.append(
|
||||
InsertOne(new_master_version)
|
||||
)
|
||||
|
||||
# Separate old representations into `to replace` and `to delete`
|
||||
old_repres_to_replace = {}
|
||||
old_repres_to_delete = {}
|
||||
for repre_info in published_repres.values():
|
||||
repre = repre_info["representation"]
|
||||
repre_name_low = repre["name"].lower()
|
||||
if repre_name_low in old_repres_by_name:
|
||||
old_repres_to_replace[repre_name_low] = (
|
||||
old_repres_by_name.pop(repre_name_low)
|
||||
)
|
||||
|
||||
if old_repres_by_name:
|
||||
old_repres_to_delete = old_repres_by_name
|
||||
|
||||
archived_repres = list(io.find({
|
||||
# Check what is type of archived representation
|
||||
"type": "archived_repsentation",
|
||||
"parent": new_version_id
|
||||
}))
|
||||
archived_repres_by_name = {}
|
||||
for repre in archived_repres:
|
||||
repre_name_low = repre["name"].lower()
|
||||
archived_repres_by_name[repre_name_low] = repre
|
||||
|
||||
backup_master_publish_dir = None
|
||||
if os.path.exists(master_publish_dir):
|
||||
backup_master_publish_dir = master_publish_dir + ".BACKUP"
|
||||
max_idx = 10
|
||||
idx = 0
|
||||
_backup_master_publish_dir = backup_master_publish_dir
|
||||
while os.path.exists(_backup_master_publish_dir):
|
||||
self.log.debug((
|
||||
"Backup folder already exists."
|
||||
" Trying to remove \"{}\""
|
||||
).format(_backup_master_publish_dir))
|
||||
|
||||
try:
|
||||
shutil.rmtree(_backup_master_publish_dir)
|
||||
backup_master_publish_dir = _backup_master_publish_dir
|
||||
break
|
||||
except Exception:
|
||||
self.log.info((
|
||||
"Could not remove previous backup folder."
|
||||
" Trying to add index to folder name"
|
||||
))
|
||||
|
||||
_backup_master_publish_dir = (
|
||||
backup_master_publish_dir + str(idx)
|
||||
)
|
||||
if not os.path.exists(_backup_master_publish_dir):
|
||||
backup_master_publish_dir = _backup_master_publish_dir
|
||||
break
|
||||
|
||||
if idx > max_idx:
|
||||
raise AssertionError((
|
||||
"Backup folders are fully occupied to max index \"{}\""
|
||||
).format(max_idx))
|
||||
break
|
||||
|
||||
idx += 1
|
||||
|
||||
self.log.debug("Backup folder path is \"{}\"".format(
|
||||
backup_master_publish_dir
|
||||
))
|
||||
try:
|
||||
os.rename(master_publish_dir, backup_master_publish_dir)
|
||||
except PermissionError:
|
||||
raise AssertionError((
|
||||
"Could not create master version because it is not"
|
||||
" possible to replace current master files."
|
||||
))
|
||||
try:
|
||||
src_to_dst_file_paths = []
|
||||
for repre_info in published_repres.values():
|
||||
|
||||
# Skip if new repre does not have published repre files
|
||||
published_files = repre_info["published_files"]
|
||||
if len(published_files) == 0:
|
||||
continue
|
||||
|
||||
# Prepare anatomy data
|
||||
anatomy_data = repre_info["anatomy_data"]
|
||||
anatomy_data.pop("version", None)
|
||||
|
||||
# Get filled path to repre context
|
||||
anatomy_filled = anatomy.format(anatomy_data)
|
||||
template_filled = anatomy_filled["master"]["path"]
|
||||
|
||||
repre_data = {
|
||||
"path": str(template_filled),
|
||||
"template": master_template
|
||||
}
|
||||
repre_context = template_filled.used_values
|
||||
for key in self.db_representation_context_keys:
|
||||
if (
|
||||
key in repre_context or
|
||||
key not in anatomy_data
|
||||
):
|
||||
continue
|
||||
|
||||
repre_context[key] = anatomy_data[key]
|
||||
|
||||
# Prepare new repre
|
||||
repre = copy.deepcopy(repre_info["representation"])
|
||||
repre["parent"] = new_master_version["_id"]
|
||||
repre["context"] = repre_context
|
||||
repre["data"] = repre_data
|
||||
repre.pop("_id", None)
|
||||
|
||||
schema.validate(repre)
|
||||
|
||||
repre_name_low = repre["name"].lower()
|
||||
# Replace current representation
|
||||
if repre_name_low in old_repres_to_replace:
|
||||
old_repre = old_repres_to_replace.pop(repre_name_low)
|
||||
repre["_id"] = old_repre["_id"]
|
||||
bulk_writes.append(
|
||||
ReplaceOne(
|
||||
{"_id": old_repre["_id"]},
|
||||
repre
|
||||
)
|
||||
)
|
||||
|
||||
# Unarchive representation
|
||||
elif repre_name_low in archived_repres_by_name:
|
||||
archived_repre = archived_repres_by_name.pop(
|
||||
repre_name_low
|
||||
)
|
||||
old_id = archived_repre["old_id"]
|
||||
repre["_id"] = old_id
|
||||
bulk_writes.append(
|
||||
ReplaceOne(
|
||||
{"old_id": old_id},
|
||||
repre
|
||||
)
|
||||
)
|
||||
|
||||
# Create representation
|
||||
else:
|
||||
repre["_id"] = io.ObjectId()
|
||||
bulk_writes.append(
|
||||
InsertOne(repre)
|
||||
)
|
||||
|
||||
# Prepare paths of source and destination files
|
||||
if len(published_files) == 1:
|
||||
src_to_dst_file_paths.append(
|
||||
(published_files[0], template_filled)
|
||||
)
|
||||
continue
|
||||
|
||||
collections, remainders = clique.assemble(published_files)
|
||||
if remainders or not collections or len(collections) > 1:
|
||||
raise Exception((
|
||||
"Integrity error. Files of published representation "
|
||||
"is combination of frame collections and single files."
|
||||
"Collections: `{}` Single files: `{}`"
|
||||
).format(str(collections), str(remainders)))
|
||||
|
||||
src_col = collections[0]
|
||||
|
||||
# Get head and tail for collection
|
||||
frame_splitter = "_-_FRAME_SPLIT_-_"
|
||||
anatomy_data["frame"] = frame_splitter
|
||||
_anatomy_filled = anatomy.format(anatomy_data)
|
||||
_template_filled = _anatomy_filled["master"]["path"]
|
||||
head, tail = _template_filled.split(frame_splitter)
|
||||
padding = (
|
||||
anatomy.templates["render"]["padding"]
|
||||
)
|
||||
|
||||
dst_col = clique.Collection(
|
||||
head=head, padding=padding, tail=tail
|
||||
)
|
||||
dst_col.indexes.clear()
|
||||
dst_col.indexes.update(src_col.indexes)
|
||||
for src_file, dst_file in zip(src_col, dst_col):
|
||||
src_to_dst_file_paths.append(
|
||||
(src_file, dst_file)
|
||||
)
|
||||
|
||||
self.path_checks = []
|
||||
|
||||
# Copy(hardlink) paths of source and destination files
|
||||
# TODO should we *only* create hardlinks?
|
||||
# TODO should we keep files for deletion until this is successful?
|
||||
for src_path, dst_path in src_to_dst_file_paths:
|
||||
self.copy_file(src_path, dst_path)
|
||||
|
||||
for src_path, dst_path in other_file_paths_mapping:
|
||||
self.copy_file(src_path, dst_path)
|
||||
|
||||
# Archive not replaced old representations
|
||||
for repre_name_low, repre in old_repres_to_delete.items():
|
||||
# Replace archived representation (This is backup)
|
||||
# - should not happen to have both repre and archived repre
|
||||
if repre_name_low in archived_repres_by_name:
|
||||
archived_repre = archived_repres_by_name.pop(
|
||||
repre_name_low
|
||||
)
|
||||
repre["old_id"] = repre["_id"]
|
||||
repre["_id"] = archived_repre["_id"]
|
||||
repre["type"] = archived_repre["type"]
|
||||
bulk_writes.append(
|
||||
ReplaceOne(
|
||||
{"_id": archived_repre["_id"]},
|
||||
repre
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
repre["old_id"] = repre["_id"]
|
||||
repre["_id"] = io.ObjectId()
|
||||
repre["type"] = "archived_representation"
|
||||
bulk_writes.append(
|
||||
InsertOne(repre)
|
||||
)
|
||||
|
||||
if bulk_writes:
|
||||
io._database[io.Session["AVALON_PROJECT"]].bulk_write(
|
||||
bulk_writes
|
||||
)
|
||||
|
||||
# Remove backuped previous master
|
||||
if (
|
||||
backup_master_publish_dir is not None and
|
||||
os.path.exists(backup_master_publish_dir)
|
||||
):
|
||||
shutil.rmtree(backup_master_publish_dir)
|
||||
|
||||
except Exception:
|
||||
if (
|
||||
backup_master_publish_dir is not None and
|
||||
os.path.exists(backup_master_publish_dir)
|
||||
):
|
||||
os.rename(backup_master_publish_dir, master_publish_dir)
|
||||
self.log.error((
|
||||
"!!! Creating of Master version failed."
|
||||
" Previous master version maybe lost some data!"
|
||||
))
|
||||
raise
|
||||
|
||||
self.log.debug((
|
||||
"--- Master version integration for subset `{}`"
|
||||
" seems to be successful."
|
||||
).format(
|
||||
instance.data.get("subset", str(instance))
|
||||
))
|
||||
|
||||
def get_all_files_from_path(self, path):
|
||||
files = []
|
||||
for (dir_path, dir_names, file_names) in os.walk(path):
|
||||
for file_name in file_names:
|
||||
_path = os.path.join(dir_path, file_name)
|
||||
files.append(_path)
|
||||
return files
|
||||
|
||||
def get_publish_dir(self, instance):
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
template_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
|
||||
if "folder" in anatomy.templates["master"]:
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
publish_folder = anatomy_filled["master"]["folder"]
|
||||
else:
|
||||
# This is for cases of Deprecated anatomy without `folder`
|
||||
# TODO remove when all clients have solved this issue
|
||||
template_data.update({
|
||||
"frame": "FRAME_TEMP",
|
||||
"representation": "TEMP"
|
||||
})
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
# solve deprecated situation when `folder` key is not underneath
|
||||
# `publish` anatomy
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
self.log.warning((
|
||||
"Deprecation warning: Anatomy does not have set `folder`"
|
||||
" key underneath `publish` (in global of for project `{}`)."
|
||||
).format(project_name))
|
||||
|
||||
file_path = anatomy_filled["master"]["path"]
|
||||
# Directory
|
||||
publish_folder = os.path.dirname(file_path)
|
||||
|
||||
publish_folder = os.path.normpath(publish_folder)
|
||||
|
||||
self.log.debug("Master publish dir: \"{}\"".format(publish_folder))
|
||||
|
||||
return publish_folder
|
||||
|
||||
def copy_file(self, src_path, dst_path):
|
||||
# TODO check drives if are the same to check if cas hardlink
|
||||
dst_path = self.path_root_check(dst_path)
|
||||
src_path = self.path_root_check(src_path)
|
||||
|
||||
dirname = os.path.dirname(dst_path)
|
||||
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
self.log.debug("Folder(s) created: \"{}\"".format(dirname))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST:
|
||||
self.log.error("An unexpected error occurred.", exc_info=True)
|
||||
raise
|
||||
|
||||
self.log.debug("Folder already exists: \"{}\"".format(dirname))
|
||||
|
||||
self.log.debug("Copying file \"{}\" to \"{}\"".format(
|
||||
src_path, dst_path
|
||||
))
|
||||
|
||||
# First try hardlink and copy if paths are cross drive
|
||||
try:
|
||||
filelink.create(src_path, dst_path, filelink.HARDLINK)
|
||||
# Return when successful
|
||||
return
|
||||
|
||||
except OSError as exc:
|
||||
# re-raise exception if different than cross drive path
|
||||
if exc.errno != errno.EXDEV:
|
||||
raise
|
||||
|
||||
shutil.copy(src_path, dst_path)
|
||||
|
||||
def path_root_check(self, path):
|
||||
normalized_path = os.path.normpath(path)
|
||||
forward_slash_path = normalized_path.replace("\\", "/")
|
||||
|
||||
drive, _path = os.path.splitdrive(normalized_path)
|
||||
if os.path.exists(drive + "/"):
|
||||
key = "drive_check{}".format(drive)
|
||||
if key not in self.path_checks:
|
||||
self.log.debug(
|
||||
"Drive \"{}\" exist. Nothing to change.".format(drive)
|
||||
)
|
||||
self.path_checks.append(key)
|
||||
|
||||
return normalized_path
|
||||
|
||||
path_env_key = "PYPE_STUDIO_PROJECTS_PATH"
|
||||
mount_env_key = "PYPE_STUDIO_PROJECTS_MOUNT"
|
||||
missing_envs = []
|
||||
if path_env_key not in os.environ:
|
||||
missing_envs.append(path_env_key)
|
||||
|
||||
if mount_env_key not in os.environ:
|
||||
missing_envs.append(mount_env_key)
|
||||
|
||||
if missing_envs:
|
||||
key = "missing_envs"
|
||||
if key not in self.path_checks:
|
||||
self.path_checks.append(key)
|
||||
_add_s = ""
|
||||
if len(missing_envs) > 1:
|
||||
_add_s = "s"
|
||||
|
||||
self.log.warning((
|
||||
"Can't replace MOUNT drive path to UNC path due to missing"
|
||||
" environment variable{}: `{}`. This may cause issues"
|
||||
" during publishing process."
|
||||
).format(_add_s, ", ".join(missing_envs)))
|
||||
|
||||
return normalized_path
|
||||
|
||||
unc_root = os.environ[path_env_key].replace("\\", "/")
|
||||
mount_root = os.environ[mount_env_key].replace("\\", "/")
|
||||
|
||||
# --- Remove slashes at the end of mount and unc roots ---
|
||||
while unc_root.endswith("/"):
|
||||
unc_root = unc_root[:-1]
|
||||
|
||||
while mount_root.endswith("/"):
|
||||
mount_root = mount_root[:-1]
|
||||
# ---
|
||||
|
||||
if forward_slash_path.startswith(unc_root):
|
||||
self.log.debug((
|
||||
"Path already starts with UNC root: \"{}\""
|
||||
).format(unc_root))
|
||||
return normalized_path
|
||||
|
||||
if not forward_slash_path.startswith(mount_root):
|
||||
self.log.warning((
|
||||
"Path do not start with MOUNT root \"{}\" "
|
||||
"set in environment variable \"{}\""
|
||||
).format(unc_root, mount_env_key))
|
||||
return normalized_path
|
||||
|
||||
# Replace Mount root with Unc root
|
||||
path = unc_root + forward_slash_path[len(mount_root):]
|
||||
|
||||
return os.path.normpath(path)
|
||||
|
||||
def version_from_representations(self, repres):
|
||||
for repre in repres:
|
||||
version = io.find_one({"_id": repre["parent"]})
|
||||
if version:
|
||||
return version
|
||||
|
||||
def current_master_ents(self, version):
|
||||
master_version = io.find_one({
|
||||
"parent": version["parent"],
|
||||
"type": "master_version"
|
||||
})
|
||||
|
||||
if not master_version:
|
||||
return (None, [])
|
||||
|
||||
master_repres = list(io.find({
|
||||
"parent": master_version["_id"],
|
||||
"type": "representation"
|
||||
}))
|
||||
return (master_version, master_repres)
|
||||
|
|
@ -64,6 +64,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"scene",
|
||||
"vrayproxy",
|
||||
"render",
|
||||
"prerender",
|
||||
"imagesequence",
|
||||
"review",
|
||||
"rendersetup",
|
||||
|
|
@ -82,12 +83,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"source",
|
||||
"assembly",
|
||||
"textures"
|
||||
"action"
|
||||
]
|
||||
exclude_families = ["clip"]
|
||||
db_representation_context_keys = [
|
||||
"project", "asset", "task", "subset", "version", "representation",
|
||||
"family", "hierarchy", "task", "username"
|
||||
]
|
||||
default_template_name = "publish"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -162,6 +165,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
)
|
||||
|
||||
subset = self.get_subset(asset_entity, instance)
|
||||
instance.data["subsetEntity"] = subset
|
||||
|
||||
version_number = instance.data["version"]
|
||||
self.log.debug("Next version: v{}".format(version_number))
|
||||
|
|
@ -236,6 +240,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
bulk_writes
|
||||
)
|
||||
|
||||
version = io.find_one({"_id": version_id})
|
||||
instance.data["versionEntity"] = version
|
||||
|
||||
existing_repres = list(io.find({
|
||||
"parent": version_id,
|
||||
"type": "archived_representation"
|
||||
|
|
@ -243,9 +250,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
instance.data['version'] = version['name']
|
||||
|
||||
intent = context.data.get("intent")
|
||||
if intent is not None:
|
||||
anatomy_data["intent"] = intent
|
||||
intent_value = instance.context.data.get("intent")
|
||||
if intent_value and isinstance(intent_value, dict):
|
||||
intent_value = intent_value.get("value")
|
||||
|
||||
if intent_value:
|
||||
anatomy_data["intent"] = intent_value
|
||||
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
|
|
@ -253,15 +263,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
# Each should be a single representation (as such, a single extension)
|
||||
representations = []
|
||||
destination_list = []
|
||||
template_name = 'publish'
|
||||
|
||||
if 'transfers' not in instance.data:
|
||||
instance.data['transfers'] = []
|
||||
|
||||
published_representations = {}
|
||||
for idx, repre in enumerate(instance.data["representations"]):
|
||||
published_files = []
|
||||
|
||||
# create template data for Anatomy
|
||||
template_data = copy.deepcopy(anatomy_data)
|
||||
if intent is not None:
|
||||
template_data["intent"] = intent
|
||||
if intent_value is not None:
|
||||
template_data["intent"] = intent_value
|
||||
|
||||
resolution_width = repre.get("resolutionWidth")
|
||||
resolution_height = repre.get("resolutionHeight")
|
||||
|
|
@ -277,8 +290,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
files = repre['files']
|
||||
if repre.get('stagingDir'):
|
||||
stagingdir = repre['stagingDir']
|
||||
if repre.get('anatomy_template'):
|
||||
template_name = repre['anatomy_template']
|
||||
|
||||
template_name = (
|
||||
repre.get('anatomy_template') or self.default_template_name
|
||||
)
|
||||
if repre.get("outputName"):
|
||||
template_data["output"] = repre['outputName']
|
||||
|
||||
|
|
@ -365,14 +380,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.debug("source: {}".format(src))
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
published_files.append(dst)
|
||||
|
||||
# for adding first frame into db
|
||||
if not dst_start_frame:
|
||||
dst_start_frame = dst_padding
|
||||
|
||||
# Store used frame value to template data
|
||||
template_data["frame"] = dst_start_frame
|
||||
dst = "{0}{1}{2}".format(
|
||||
dst_head,
|
||||
dst_start_frame,
|
||||
dst_tail).replace("..", ".")
|
||||
dst_tail
|
||||
).replace("..", ".")
|
||||
repre['published_path'] = self.unc_convert(dst)
|
||||
|
||||
else:
|
||||
|
|
@ -400,9 +420,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
published_files.append(dst)
|
||||
repre['published_path'] = self.unc_convert(dst)
|
||||
self.log.debug("__ dst: {}".format(dst))
|
||||
|
||||
repre["publishedFiles"] = published_files
|
||||
|
||||
for key in self.db_representation_context_keys:
|
||||
value = template_data.get(key)
|
||||
if not value:
|
||||
|
|
@ -449,6 +472,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.debug("__ destination_list: {}".format(destination_list))
|
||||
instance.data['destination_list'] = destination_list
|
||||
representations.append(representation)
|
||||
published_representations[repre_id] = {
|
||||
"representation": representation,
|
||||
"anatomy_data": template_data,
|
||||
"published_files": published_files
|
||||
}
|
||||
self.log.debug("__ representations: {}".format(representations))
|
||||
|
||||
# Remove old representations if there are any (before insertion of new)
|
||||
|
|
@ -463,7 +491,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.debug("__ represNAME: {}".format(rep['name']))
|
||||
self.log.debug("__ represPATH: {}".format(rep['published_path']))
|
||||
io.insert_many(representations)
|
||||
instance.data["published_representations"] = representations
|
||||
instance.data["published_representations"] = (
|
||||
published_representations
|
||||
)
|
||||
# self.log.debug("Representation: {}".format(representations))
|
||||
self.log.info("Registered {} items".format(len(representations)))
|
||||
|
||||
|
|
@ -653,9 +683,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"fps": context.data.get(
|
||||
"fps", instance.data.get("fps"))}
|
||||
|
||||
intent = context.data.get("intent")
|
||||
if intent is not None:
|
||||
version_data["intent"] = intent
|
||||
intent_value = instance.context.data.get("intent")
|
||||
if intent_value and isinstance(intent_value, dict):
|
||||
intent_value = intent_value.get("value")
|
||||
|
||||
if intent_value:
|
||||
version_data["intent"] = intent_value
|
||||
|
||||
# Include optional data if present in
|
||||
optionals = [
|
||||
|
|
|
|||
|
|
@ -18,17 +18,23 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.IntegratorOrder + 0.01
|
||||
families = ["review"]
|
||||
|
||||
required_context_keys = [
|
||||
"project", "asset", "task", "subset", "version"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if not os.environ.get("AVALON_THUMBNAIL_ROOT"):
|
||||
self.log.info("AVALON_THUMBNAIL_ROOT is not set."
|
||||
" Skipping thumbnail integration.")
|
||||
self.log.warning(
|
||||
"AVALON_THUMBNAIL_ROOT is not set."
|
||||
" Skipping thumbnail integration."
|
||||
)
|
||||
return
|
||||
|
||||
published_repres = instance.data.get("published_representations")
|
||||
if not published_repres:
|
||||
self.log.debug(
|
||||
"There are not published representation ids on the instance."
|
||||
"There are no published representations on the instance."
|
||||
)
|
||||
return
|
||||
|
||||
|
|
@ -36,21 +42,22 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
|
|||
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
if "publish" not in anatomy.templates:
|
||||
raise AssertionError("Anatomy does not have set publish key!")
|
||||
self.log.warning("Anatomy is missing the \"publish\" key!")
|
||||
return
|
||||
|
||||
if "thumbnail" not in anatomy.templates["publish"]:
|
||||
raise AssertionError((
|
||||
"There is not set \"thumbnail\" template for project \"{}\""
|
||||
self.log.warning((
|
||||
"There is no \"thumbnail\" template set for the project \"{}\""
|
||||
).format(project_name))
|
||||
|
||||
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
|
||||
|
||||
io.install()
|
||||
return
|
||||
|
||||
thumb_repre = None
|
||||
for repre in published_repres:
|
||||
thumb_repre_anatomy_data = None
|
||||
for repre_info in published_repres.values():
|
||||
repre = repre_info["representation"]
|
||||
if repre["name"].lower() == "thumbnail":
|
||||
thumb_repre = repre
|
||||
thumb_repre_anatomy_data = repre_info["anatomy_data"]
|
||||
break
|
||||
|
||||
if not thumb_repre:
|
||||
|
|
@ -59,6 +66,10 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
|
|||
)
|
||||
return
|
||||
|
||||
io.install()
|
||||
|
||||
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
|
||||
|
||||
version = io.find_one({"_id": thumb_repre["parent"]})
|
||||
if not version:
|
||||
raise AssertionError(
|
||||
|
|
@ -80,7 +91,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
|
|||
thumbnail_id = ObjectId()
|
||||
|
||||
# Prepare anatomy template fill data
|
||||
template_data = copy.deepcopy(thumb_repre["context"])
|
||||
template_data = copy.deepcopy(thumb_repre_anatomy_data)
|
||||
template_data.update({
|
||||
"_id": str(thumbnail_id),
|
||||
"thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"),
|
||||
|
|
@ -89,15 +100,9 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
|
|||
})
|
||||
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
final_path = anatomy_filled.get("publish", {}).get("thumbnail")
|
||||
if not final_path:
|
||||
raise AssertionError((
|
||||
"Anatomy template was not filled with entered data"
|
||||
"\nTemplate: {} "
|
||||
"\nData: {}"
|
||||
).format(thumbnail_template, str(template_data)))
|
||||
template_filled = anatomy_filled["publish"]["thumbnail"]
|
||||
|
||||
dst_full_path = os.path.normpath(final_path)
|
||||
dst_full_path = os.path.normpath(str(template_filled))
|
||||
self.log.debug(
|
||||
"Copying file .. {} -> {}".format(src_full_path, dst_full_path)
|
||||
)
|
||||
|
|
@ -115,13 +120,20 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
|
|||
template_data.pop("_id")
|
||||
template_data.pop("thumbnail_root")
|
||||
|
||||
repre_context = template_filled.used_values
|
||||
for key in self.required_context_keys:
|
||||
value = template_data.get(key)
|
||||
if not value:
|
||||
continue
|
||||
repre_context[key] = template_data[key]
|
||||
|
||||
thumbnail_entity = {
|
||||
"_id": thumbnail_id,
|
||||
"type": "thumbnail",
|
||||
"schema": "pype:thumbnail-1.0",
|
||||
"data": {
|
||||
"template": thumbnail_template,
|
||||
"template_data": template_data
|
||||
"template_data": repre_context
|
||||
}
|
||||
}
|
||||
# Create thumbnail entity
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
hosts = ["fusion", "maya", "nuke"]
|
||||
|
||||
families = ["render.farm", "renderlayer", "imagesequence"]
|
||||
families = ["render.farm", "prerener", "renderlayer", "imagesequence"]
|
||||
|
||||
aov_filter = {"maya": ["beauty"]}
|
||||
|
||||
|
|
@ -168,9 +168,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
instance_transfer = {
|
||||
"slate": ["slateFrame"],
|
||||
"review": ["lutPath"],
|
||||
"render.farm": ["bakeScriptPath", "bakeRenderPath",
|
||||
"bakeWriteNodeName", "version"]
|
||||
}
|
||||
"render2d": ["bakeScriptPath", "bakeRenderPath",
|
||||
"bakeWriteNodeName", "version"]
|
||||
}
|
||||
|
||||
# list of family names to transfer to new family if present
|
||||
families_transfer = ["render3d", "render2d", "ftrack", "slate"]
|
||||
|
|
@ -222,9 +222,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
# Transfer the environment from the original job to this dependent
|
||||
# job so they use the same environment
|
||||
|
||||
environment = job["Props"].get("Env", {})
|
||||
environment["PYPE_METADATA_FILE"] = metadata_path
|
||||
environment["AVALON_PROJECT"] = api.Session.get("AVALON_PROJECT")
|
||||
|
||||
i = 0
|
||||
for index, key in enumerate(environment):
|
||||
if key.upper() in self.enviro_filter:
|
||||
|
|
@ -276,7 +277,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
# if override remove all frames we are expecting to be rendered
|
||||
# so we'll copy only those missing from current render
|
||||
if instance.data.get("overrideExistingFrame"):
|
||||
for frame in range(start, end+1):
|
||||
for frame in range(start, end + 1):
|
||||
if frame not in r_col.indexes:
|
||||
continue
|
||||
r_col.indexes.remove(frame)
|
||||
|
|
@ -348,10 +349,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
assert len(cols) == 1, "only one image sequence type is expected"
|
||||
|
||||
# create subset name `familyTaskSubset_AOV`
|
||||
subset_name = 'render{}{}{}{}_{}'.format(
|
||||
group_name = 'render{}{}{}{}'.format(
|
||||
task[0].upper(), task[1:],
|
||||
subset[0].upper(), subset[1:],
|
||||
aov)
|
||||
subset[0].upper(), subset[1:])
|
||||
|
||||
subset_name = '{}_{}'.format(group_name, aov)
|
||||
|
||||
staging = os.path.dirname(list(cols[0])[0])
|
||||
|
||||
|
|
@ -366,6 +368,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
new_instance = copy(instance_data)
|
||||
new_instance["subset"] = subset_name
|
||||
new_instance["subsetGroup"] = group_name
|
||||
|
||||
ext = cols[0].tail.lstrip(".")
|
||||
|
||||
|
|
@ -587,11 +590,23 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"multipartExr": data.get("multipartExr", False)
|
||||
}
|
||||
|
||||
if "prerender" in instance.data["families"]:
|
||||
instance_skeleton_data.update({
|
||||
"family": "prerender",
|
||||
"families": []})
|
||||
|
||||
# transfer specific families from original instance to new render
|
||||
for item in self.families_transfer:
|
||||
if item in instance.data.get("families", []):
|
||||
instance_skeleton_data["families"] += [item]
|
||||
|
||||
if "render.farm" in instance.data["families"]:
|
||||
instance_skeleton_data.update({
|
||||
"family": "render2d",
|
||||
"families": ["render"] + [f for f in instance.data["families"]
|
||||
if "render.farm" not in f]
|
||||
})
|
||||
|
||||
# transfer specific properties from original instance based on
|
||||
# mapping dictionary `instance_transfer`
|
||||
for key, values in self.instance_transfer.items():
|
||||
|
|
|
|||
|
|
@ -1,16 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectMayaCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
label = "Maya Current File"
|
||||
hosts = ['maya']
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
current_file = cmds.file(query=True, sceneName=True)
|
||||
context.data['currentFile'] = current_file
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import json
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
|
|
@ -32,6 +33,13 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
objectset = cmds.ls("*.id", long=True, type="objectSet",
|
||||
recursive=True, objectsOnly=True)
|
||||
|
||||
ctx_frame_start = context.data['frameStart']
|
||||
ctx_frame_end = context.data['frameEnd']
|
||||
ctx_handle_start = context.data['handleStart']
|
||||
ctx_handle_end = context.data['handleEnd']
|
||||
ctx_frame_start_handle = context.data['frameStartHandle']
|
||||
ctx_frame_end_handle = context.data['frameEndHandle']
|
||||
|
||||
context.data['objectsets'] = objectset
|
||||
for objset in objectset:
|
||||
|
||||
|
|
@ -108,14 +116,36 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
label = "{0} ({1})".format(name,
|
||||
data["asset"])
|
||||
|
||||
if "handles" in data:
|
||||
data["handleStart"] = data["handles"]
|
||||
data["handleEnd"] = data["handles"]
|
||||
|
||||
# Append start frame and end frame to label if present
|
||||
if "frameStart" and "frameEnd" in data:
|
||||
data["frameStartHandle"] = data["frameStart"] - data["handleStart"]
|
||||
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"]
|
||||
|
||||
# if frame range on maya set is the same as full shot range
|
||||
# adjust the values to match the asset data
|
||||
if (ctx_frame_start_handle == data["frameStart"]
|
||||
and ctx_frame_end_handle == data["frameEnd"]): # noqa: W503, E501
|
||||
data["frameStartHandle"] = ctx_frame_start_handle
|
||||
data["frameEndHandle"] = ctx_frame_end_handle
|
||||
data["frameStart"] = ctx_frame_start
|
||||
data["frameEnd"] = ctx_frame_end
|
||||
data["handleStart"] = ctx_handle_start
|
||||
data["handleEnd"] = ctx_handle_end
|
||||
|
||||
# if there are user values on start and end frame not matching
|
||||
# the asset, use them
|
||||
|
||||
else:
|
||||
if "handles" in data:
|
||||
data["handleStart"] = data["handles"]
|
||||
data["handleEnd"] = data["handles"]
|
||||
else:
|
||||
data["handleStart"] = 0
|
||||
data["handleEnd"] = 0
|
||||
|
||||
data["frameStartHandle"] = data["frameStart"] - data["handleStart"] # noqa: E501
|
||||
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] # noqa: E501
|
||||
|
||||
if "handles" in data:
|
||||
data.pop('handles')
|
||||
|
||||
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
|
||||
int(data["frameEndHandle"]))
|
||||
|
|
@ -127,7 +157,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
# Produce diagnostic message for any graphical
|
||||
# user interface interested in visualising it.
|
||||
self.log.info("Found: \"%s\" " % instance.data["name"])
|
||||
self.log.debug("DATA: \"%s\" " % instance.data)
|
||||
self.log.debug(
|
||||
"DATA: {} ".format(json.dumps(instance.data, indent=4)))
|
||||
|
||||
def sort_by_family(instance):
|
||||
"""Sort by family"""
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ import re
|
|||
import os
|
||||
import types
|
||||
import six
|
||||
import json
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from maya import cmds
|
||||
|
|
@ -214,6 +215,28 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
full_paths.append(full_path)
|
||||
aov_dict["beauty"] = full_paths
|
||||
|
||||
frame_start_render = int(self.get_render_attribute(
|
||||
"startFrame", layer=layer_name))
|
||||
frame_end_render = int(self.get_render_attribute(
|
||||
"endFrame", layer=layer_name))
|
||||
|
||||
if (int(context.data['frameStartHandle']) == frame_start_render
|
||||
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
|
||||
|
||||
handle_start = context.data['handleStart']
|
||||
handle_end = context.data['handleEnd']
|
||||
frame_start = context.data['frameStart']
|
||||
frame_end = context.data['frameEnd']
|
||||
frame_start_handle = context.data['frameStartHandle']
|
||||
frame_end_handle = context.data['frameEndHandle']
|
||||
else:
|
||||
handle_start = 0
|
||||
handle_end = 0
|
||||
frame_start = frame_start_render
|
||||
frame_end = frame_end_render
|
||||
frame_start_handle = frame_start_render
|
||||
frame_end_handle = frame_end_render
|
||||
|
||||
full_exp_files.append(aov_dict)
|
||||
self.log.info(full_exp_files)
|
||||
self.log.info("collecting layer: {}".format(layer_name))
|
||||
|
|
@ -224,30 +247,18 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"setMembers": layer_name,
|
||||
"multipartExr": exf.multipart,
|
||||
"publish": True,
|
||||
"frameStart": int(
|
||||
context.data["assetEntity"]["data"]["frameStart"]
|
||||
),
|
||||
"frameEnd": int(
|
||||
context.data["assetEntity"]["data"]["frameEnd"]
|
||||
),
|
||||
"frameStartHandle": int(
|
||||
self.get_render_attribute("startFrame", layer=layer_name)
|
||||
),
|
||||
"frameEndHandle": int(
|
||||
self.get_render_attribute("endFrame", layer=layer_name)
|
||||
),
|
||||
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartHandle": frame_start_handle,
|
||||
"frameEndHandle": frame_end_handle,
|
||||
"byFrameStep": int(
|
||||
self.get_render_attribute("byFrameStep", layer=layer_name)
|
||||
),
|
||||
"renderer": self.get_render_attribute(
|
||||
"currentRenderer", layer=layer_name
|
||||
),
|
||||
"handleStart": int(
|
||||
context.data["assetEntity"]["data"]["handleStart"]
|
||||
),
|
||||
"handleEnd": int(
|
||||
context.data["assetEntity"]["data"]["handleEnd"]
|
||||
),
|
||||
self.get_render_attribute("byFrameStep",
|
||||
layer=layer_name)),
|
||||
"renderer": self.get_render_attribute("currentRenderer",
|
||||
layer=layer_name),
|
||||
# instance subset
|
||||
"family": "renderlayer",
|
||||
"families": ["renderlayer"],
|
||||
|
|
@ -290,7 +301,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
instance = context.create_instance(expected_layer_name)
|
||||
instance.data["label"] = label
|
||||
instance.data.update(data)
|
||||
pass
|
||||
self.log.debug("data: {}".format(json.dumps(data, indent=4)))
|
||||
|
||||
def parse_options(self, render_globals):
|
||||
"""Get all overrides with a value, skip those without
|
||||
|
|
@ -665,7 +676,7 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
try:
|
||||
if not (
|
||||
cmds.getAttr("defaultArnoldRenderOptions.aovMode")
|
||||
and not cmds.getAttr("defaultArnoldDriver.mergeAOVs")
|
||||
and not cmds.getAttr("defaultArnoldDriver.mergeAOVs") # noqa: W503, E501
|
||||
):
|
||||
# AOVs are merged in mutli-channel file
|
||||
self.multipart = True
|
||||
|
|
@ -751,7 +762,7 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
# really? do we set it in vray just by selecting multichannel exr?
|
||||
if (
|
||||
cmds.getAttr("vraySettings.imageFormatStr")
|
||||
== "exr (multichannel)"
|
||||
== "exr (multichannel)" # noqa: W503
|
||||
):
|
||||
# AOVs are merged in mutli-channel file
|
||||
self.multipart = True
|
||||
|
|
@ -786,8 +797,7 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
if enabled:
|
||||
# todo: find how vray set format for AOVs
|
||||
enabled_aovs.append(
|
||||
(self._get_vray_aov_name(aov), default_ext)
|
||||
)
|
||||
(self._get_vray_aov_name(aov), default_ext))
|
||||
return enabled_aovs
|
||||
|
||||
def _get_vray_aov_name(self, node):
|
||||
|
|
|
|||
|
|
@ -9,13 +9,14 @@ from pype.maya import lib
|
|||
class CollectMayaScene(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
order = pyblish.api.CollectorOrder - 0.01
|
||||
label = "Maya Workfile"
|
||||
hosts = ['maya']
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
current_file = context.data['currentFile']
|
||||
current_file = cmds.file(query=True, sceneName=True)
|
||||
context.data['currentFile'] = current_file
|
||||
|
||||
folder, file = os.path.split(current_file)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
|
@ -24,9 +25,6 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
|
|||
|
||||
data = {}
|
||||
|
||||
for key, value in lib.collect_animation_data().items():
|
||||
data[key] = value
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(name=filename)
|
||||
subset = 'workfile' + task.capitalize()
|
||||
|
|
@ -38,7 +36,11 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
|
|||
"publish": True,
|
||||
"family": 'workfile',
|
||||
"families": ['workfile'],
|
||||
"setMembers": [current_file]
|
||||
"setMembers": [current_file],
|
||||
"frameStart": context.data['frameStart'],
|
||||
"frameEnd": context.data['frameEnd'],
|
||||
"handleStart": context.data['handleStart'],
|
||||
"handleEnd": context.data['handleEnd']
|
||||
})
|
||||
|
||||
data['representations'] = [{
|
||||
|
|
|
|||
|
|
@ -94,11 +94,6 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
step = instance.data.get("step", 1.0)
|
||||
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
|
||||
|
||||
# TODO: Implement a bake to non-world space
|
||||
# Currently it will always bake the resulting camera to world-space
|
||||
# and it does not allow to include the parent hierarchy, even though
|
||||
# with `bakeToWorldSpace` set to False it should include its
|
||||
# hierarchy to be correct with the family implementation.
|
||||
if not bake_to_worldspace:
|
||||
self.log.warning("Camera (Maya Ascii) export only supports world"
|
||||
"space baked camera extractions. The disabled "
|
||||
|
|
@ -113,7 +108,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
framerange[1] + handles]
|
||||
|
||||
# validate required settings
|
||||
assert len(cameras) == 1, "Not a single camera found in extraction"
|
||||
assert len(cameras) == 1, "Single camera must be found in extraction"
|
||||
assert isinstance(step, float), "Step must be a float value"
|
||||
camera = cameras[0]
|
||||
transform = cmds.listRelatives(camera, parent=True, fullPath=True)
|
||||
|
|
@ -124,21 +119,24 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
path = os.path.join(dir_path, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing camera bakes for: {0}".format(transform))
|
||||
with avalon.maya.maintained_selection():
|
||||
with lib.evaluation("off"):
|
||||
with avalon.maya.suspended_refresh():
|
||||
baked = lib.bake_to_world_space(
|
||||
transform,
|
||||
frame_range=range_with_handles,
|
||||
step=step
|
||||
)
|
||||
baked_shapes = cmds.ls(baked,
|
||||
type="camera",
|
||||
dag=True,
|
||||
shapes=True,
|
||||
long=True)
|
||||
|
||||
if bake_to_worldspace:
|
||||
self.log.info(
|
||||
"Performing camera bakes: {}".format(transform))
|
||||
baked = lib.bake_to_world_space(
|
||||
transform,
|
||||
frame_range=range_with_handles,
|
||||
step=step
|
||||
)
|
||||
baked_shapes = cmds.ls(baked,
|
||||
type="camera",
|
||||
dag=True,
|
||||
shapes=True,
|
||||
long=True)
|
||||
else:
|
||||
baked_shapes = cameras
|
||||
# Fix PLN-178: Don't allow background color to be non-black
|
||||
for cam in baked_shapes:
|
||||
attrs = {"backgroundColorR": 0.0,
|
||||
|
|
@ -164,7 +162,8 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
expressions=False)
|
||||
|
||||
# Delete the baked hierarchy
|
||||
cmds.delete(baked)
|
||||
if bake_to_worldspace:
|
||||
cmds.delete(baked)
|
||||
|
||||
massage_ma_file(path)
|
||||
|
||||
|
|
|
|||
|
|
@ -25,12 +25,8 @@ class ExtractAlembic(pype.api.Extractor):
|
|||
nodes = instance[:]
|
||||
|
||||
# Collect the start and end including handles
|
||||
start = instance.data.get("frameStart", 1)
|
||||
end = instance.data.get("frameEnd", 1)
|
||||
handles = instance.data.get("handles", 0)
|
||||
if handles:
|
||||
start -= handles
|
||||
end += handles
|
||||
start = float(instance.data.get("frameStartHandle", 1))
|
||||
end = float(instance.data.get("frameEndHandle", 1))
|
||||
|
||||
attrs = instance.data.get("attr", "").split(";")
|
||||
attrs = [value for value in attrs if value.strip()]
|
||||
|
|
|
|||
|
|
@ -1,18 +1,19 @@
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class ValidateFrameRange(pyblish.api.InstancePlugin):
|
||||
"""Valides the frame ranges.
|
||||
|
||||
Checks the `startFrame`, `endFrame` and `handles` data.
|
||||
This does NOT ensure there's actual data present.
|
||||
This is optional validator checking if the frame range on instance
|
||||
matches the one of asset. It also validate render frame range of render
|
||||
layers
|
||||
|
||||
This validates:
|
||||
- `startFrame` is lower than or equal to the `endFrame`.
|
||||
- must have both the `startFrame` and `endFrame` data.
|
||||
- The `handles` value is not lower than zero.
|
||||
Repair action will change everything to match asset.
|
||||
|
||||
This can be turned off by artist to allow custom ranges.
|
||||
"""
|
||||
|
||||
label = "Validate Frame Range"
|
||||
|
|
@ -21,25 +22,66 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
|
|||
"pointcache",
|
||||
"camera",
|
||||
"renderlayer",
|
||||
"colorbleed.vrayproxy"]
|
||||
"review",
|
||||
"yeticache"]
|
||||
optional = True
|
||||
actions = [pype.api.RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
start = instance.data.get("frameStart", None)
|
||||
end = instance.data.get("frameEnd", None)
|
||||
handles = instance.data.get("handles", None)
|
||||
frame_start_handle = int(context.data.get("frameStartHandle"))
|
||||
frame_end_handle = int(context.data.get("frameEndHandle"))
|
||||
handles = int(context.data.get("handles"))
|
||||
handle_start = int(context.data.get("handleStart"))
|
||||
handle_end = int(context.data.get("handleEnd"))
|
||||
frame_start = int(context.data.get("frameStart"))
|
||||
frame_end = int(context.data.get("frameEnd"))
|
||||
|
||||
# Check if any of the values are present
|
||||
if any(value is None for value in [start, end]):
|
||||
raise ValueError("No time values for this instance. "
|
||||
"(Missing `startFrame` or `endFrame`)")
|
||||
inst_start = int(instance.data.get("frameStartHandle"))
|
||||
inst_end = int(instance.data.get("frameEndHandle"))
|
||||
|
||||
self.log.info("Comparing start (%s) and end (%s)" % (start, end))
|
||||
if start > end:
|
||||
raise RuntimeError("The start frame is a higher value "
|
||||
"than the end frame: "
|
||||
"{0}>{1}".format(start, end))
|
||||
# basic sanity checks
|
||||
assert frame_start_handle <= frame_end_handle, (
|
||||
"start frame is lower then end frame")
|
||||
|
||||
if handles is not None:
|
||||
if handles < 0.0:
|
||||
raise RuntimeError("Handles are set to a negative value")
|
||||
assert handles >= 0, ("handles cannot have negative values")
|
||||
|
||||
# compare with data on instance
|
||||
errors = []
|
||||
|
||||
if(inst_start != frame_start_handle):
|
||||
errors.append("Instance start frame [ {} ] doesn't "
|
||||
"match the one set on instance [ {} ]: "
|
||||
"{}/{}/{}/{} (handle/start/end/handle)".format(
|
||||
inst_start,
|
||||
frame_start_handle,
|
||||
handle_start, frame_start, frame_end, handle_end
|
||||
))
|
||||
|
||||
if(inst_end != frame_end_handle):
|
||||
errors.append("Instance end frame [ {} ] doesn't "
|
||||
"match the one set on instance [ {} ]: "
|
||||
"{}/{}/{}/{} (handle/start/end/handle)".format(
|
||||
inst_end,
|
||||
frame_end_handle,
|
||||
handle_start, frame_start, frame_end, handle_end
|
||||
))
|
||||
|
||||
for e in errors:
|
||||
self.log.error(e)
|
||||
|
||||
assert len(errors) == 0, ("Frame range settings are incorrect")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
"""
|
||||
Repair instance container to match asset data.
|
||||
"""
|
||||
cmds.setAttr(
|
||||
"{}.frameStart".format(instance.data["name"]),
|
||||
instance.context.data.get("frameStartHandle"))
|
||||
|
||||
cmds.setAttr(
|
||||
"{}.frameEnd".format(instance.data["name"]),
|
||||
instance.context.data.get("frameEndHandle"))
|
||||
|
|
|
|||
|
|
@ -13,13 +13,17 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
"""Validates the global render settings
|
||||
|
||||
* File Name Prefix must start with: `maya/<Scene>`
|
||||
all other token are customizable but sane values are:
|
||||
all other token are customizable but sane values for Arnold are:
|
||||
|
||||
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
|
||||
|
||||
<Camera> token is supported also, usefull for multiple renderable
|
||||
<Camera> token is supported also, useful for multiple renderable
|
||||
cameras per render layer.
|
||||
|
||||
For Redshift omit <RenderPass> token. Redshift will append it
|
||||
automatically if AOVs are enabled and if you user Multipart EXR
|
||||
it doesn't make much sense.
|
||||
|
||||
* Frame Padding must be:
|
||||
* default: 4
|
||||
|
||||
|
|
@ -127,8 +131,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
# no vray checks implemented yet
|
||||
pass
|
||||
elif renderer == "redshift":
|
||||
# no redshift check implemented yet
|
||||
pass
|
||||
if re.search(cls.R_AOV_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Do not use AOV token [ {} ] - "
|
||||
"Redshift automatically append AOV name and "
|
||||
"it doesn't make much sense with "
|
||||
"Multipart EXR".format(prefix))
|
||||
|
||||
elif renderer == "renderman":
|
||||
file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat")
|
||||
dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir")
|
||||
|
|
@ -143,8 +152,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
dir_prefix))
|
||||
|
||||
else:
|
||||
multichannel = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
|
||||
if multichannel:
|
||||
multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
|
||||
if multipart:
|
||||
if re.search(cls.R_AOV_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
|
|
|
|||
|
|
@ -1,103 +1,10 @@
|
|||
from collections import OrderedDict
|
||||
from pype.nuke import plugin
|
||||
from pype.nuke import (
|
||||
plugin,
|
||||
lib as pnlib)
|
||||
import nuke
|
||||
|
||||
|
||||
class CreateWriteRender(plugin.PypeCreator):
|
||||
# change this to template preset
|
||||
name = "WriteRender"
|
||||
label = "Create Write Render"
|
||||
hosts = ["nuke"]
|
||||
n_class = "write"
|
||||
family = "render"
|
||||
icon = "sign-out"
|
||||
defaults = ["Main", "Mask"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateWriteRender, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
data["family"] = self.family
|
||||
data["families"] = self.n_class
|
||||
|
||||
for k, v in self.data.items():
|
||||
if k not in data.keys():
|
||||
data.update({k: v})
|
||||
|
||||
self.data = data
|
||||
self.nodes = nuke.selectedNodes()
|
||||
self.log.debug("_ self.data: '{}'".format(self.data))
|
||||
|
||||
def process(self):
|
||||
from pype.nuke import lib as pnlib
|
||||
|
||||
inputs = []
|
||||
outputs = []
|
||||
instance = nuke.toNode(self.data["subset"])
|
||||
selected_node = None
|
||||
|
||||
# use selection
|
||||
if (self.options or {}).get("useSelection"):
|
||||
nodes = self.nodes
|
||||
|
||||
if not (len(nodes) < 2):
|
||||
msg = ("Select only one node. The node you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
selected_node = nodes[0]
|
||||
inputs = [selected_node]
|
||||
outputs = selected_node.dependent()
|
||||
|
||||
if instance:
|
||||
if (instance.name() in selected_node.name()):
|
||||
selected_node = instance.dependencies()[0]
|
||||
|
||||
# if node already exist
|
||||
if instance:
|
||||
# collect input / outputs
|
||||
inputs = instance.dependencies()
|
||||
outputs = instance.dependent()
|
||||
selected_node = inputs[0]
|
||||
# remove old one
|
||||
nuke.delete(instance)
|
||||
|
||||
# recreate new
|
||||
write_data = {
|
||||
"class": self.n_class,
|
||||
"families": [self.family],
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
if self.presets.get('fpath_template'):
|
||||
self.log.info("Adding template path from preset")
|
||||
write_data.update(
|
||||
{"fpath_template": self.presets["fpath_template"]}
|
||||
)
|
||||
else:
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
|
||||
|
||||
write_node = pnlib.create_write_node(
|
||||
self.data["subset"],
|
||||
write_data,
|
||||
input=selected_node)
|
||||
|
||||
# relinking to collected connections
|
||||
for i, input in enumerate(inputs):
|
||||
write_node.setInput(i, input)
|
||||
|
||||
write_node.autoplace()
|
||||
|
||||
for output in outputs:
|
||||
output.setInput(0, write_node)
|
||||
|
||||
return write_node
|
||||
|
||||
|
||||
class CreateWritePrerender(plugin.PypeCreator):
|
||||
# change this to template preset
|
||||
name = "WritePrerender"
|
||||
|
|
@ -125,8 +32,6 @@ class CreateWritePrerender(plugin.PypeCreator):
|
|||
self.log.debug("_ self.data: '{}'".format(self.data))
|
||||
|
||||
def process(self):
|
||||
from pype.nuke import lib as pnlib
|
||||
|
||||
inputs = []
|
||||
outputs = []
|
||||
instance = nuke.toNode(self.data["subset"])
|
||||
|
|
@ -137,8 +42,9 @@ class CreateWritePrerender(plugin.PypeCreator):
|
|||
nodes = self.nodes
|
||||
|
||||
if not (len(nodes) < 2):
|
||||
msg = ("Select only one node. The node you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
msg = ("Select only one node. The node "
|
||||
"you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
self.log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
|
|
@ -174,13 +80,15 @@ class CreateWritePrerender(plugin.PypeCreator):
|
|||
else:
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}"})
|
||||
"fpath_template": ("{work}/prerenders/nuke/{subset}"
|
||||
"/{subset}.{frame}.{ext}")})
|
||||
|
||||
write_node = pnlib.create_write_node(
|
||||
self.data["subset"],
|
||||
write_data,
|
||||
input=selected_node,
|
||||
prenodes=[])
|
||||
prenodes=[],
|
||||
review=False)
|
||||
|
||||
# relinking to collected connections
|
||||
for i, input in enumerate(inputs):
|
||||
101
pype/plugins/nuke/create/create_write_render.py
Normal file
101
pype/plugins/nuke/create/create_write_render.py
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
from collections import OrderedDict
|
||||
from pype.nuke import (
|
||||
plugin,
|
||||
lib as pnlib)
|
||||
import nuke
|
||||
|
||||
|
||||
class CreateWriteRender(plugin.PypeCreator):
|
||||
# change this to template preset
|
||||
name = "WriteRender"
|
||||
label = "Create Write Render"
|
||||
hosts = ["nuke"]
|
||||
n_class = "write"
|
||||
family = "render"
|
||||
icon = "sign-out"
|
||||
defaults = ["Main", "Mask"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateWriteRender, self).__init__(*args, **kwargs)
|
||||
|
||||
data = OrderedDict()
|
||||
|
||||
data["family"] = self.family
|
||||
data["families"] = self.n_class
|
||||
|
||||
for k, v in self.data.items():
|
||||
if k not in data.keys():
|
||||
data.update({k: v})
|
||||
|
||||
self.data = data
|
||||
self.nodes = nuke.selectedNodes()
|
||||
self.log.debug("_ self.data: '{}'".format(self.data))
|
||||
|
||||
def process(self):
|
||||
|
||||
inputs = []
|
||||
outputs = []
|
||||
instance = nuke.toNode(self.data["subset"])
|
||||
selected_node = None
|
||||
|
||||
# use selection
|
||||
if (self.options or {}).get("useSelection"):
|
||||
nodes = self.nodes
|
||||
|
||||
if not (len(nodes) < 2):
|
||||
msg = ("Select only one node. "
|
||||
"The node you want to connect to, "
|
||||
"or tick off `Use selection`")
|
||||
self.log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
selected_node = nodes[0]
|
||||
inputs = [selected_node]
|
||||
outputs = selected_node.dependent()
|
||||
|
||||
if instance:
|
||||
if (instance.name() in selected_node.name()):
|
||||
selected_node = instance.dependencies()[0]
|
||||
|
||||
# if node already exist
|
||||
if instance:
|
||||
# collect input / outputs
|
||||
inputs = instance.dependencies()
|
||||
outputs = instance.dependent()
|
||||
selected_node = inputs[0]
|
||||
# remove old one
|
||||
nuke.delete(instance)
|
||||
|
||||
# recreate new
|
||||
write_data = {
|
||||
"class": self.n_class,
|
||||
"families": [self.family],
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
if self.presets.get('fpath_template'):
|
||||
self.log.info("Adding template path from preset")
|
||||
write_data.update(
|
||||
{"fpath_template": self.presets["fpath_template"]}
|
||||
)
|
||||
else:
|
||||
self.log.info("Adding template path from plugin")
|
||||
write_data.update({
|
||||
"fpath_template": ("{work}/renders/nuke/{subset}"
|
||||
"/{subset}.{frame}.{ext}")})
|
||||
|
||||
write_node = pnlib.create_write_node(
|
||||
self.data["subset"],
|
||||
write_data,
|
||||
input=selected_node)
|
||||
|
||||
# relinking to collected connections
|
||||
for i, input in enumerate(inputs):
|
||||
write_node.setInput(i, input)
|
||||
|
||||
write_node.autoplace()
|
||||
|
||||
for output in outputs:
|
||||
output.setInput(0, write_node)
|
||||
|
||||
return write_node
|
||||
|
|
@ -92,6 +92,7 @@ class LoadMov(api.Loader):
|
|||
"source",
|
||||
"plate",
|
||||
"render",
|
||||
"prerender",
|
||||
"review"] + presets["families"]
|
||||
|
||||
representations = [
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ def loader_shift(node, frame, relative=True):
|
|||
class LoadSequence(api.Loader):
|
||||
"""Load image sequence into Nuke"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render"]
|
||||
families = ["render2d", "source", "plate", "render", "prerender"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
|
||||
|
||||
label = "Load sequence"
|
||||
|
|
@ -87,7 +87,7 @@ class LoadSequence(api.Loader):
|
|||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
repr_id = context["representation"]["_id"]
|
||||
|
||||
|
||||
self.log.info("version_data: {}\n".format(version_data))
|
||||
self.log.debug(
|
||||
"Representation id `{}` ".format(repr_id))
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# establish families
|
||||
family = avalon_knob_data["family"]
|
||||
families_ak = avalon_knob_data.get("families")
|
||||
families = list()
|
||||
|
||||
# except disabled nodes but exclude backdrops in test
|
||||
|
|
@ -68,16 +69,14 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
# Add all nodes in group instances.
|
||||
if node.Class() == "Group":
|
||||
# only alter families for render family
|
||||
if ("render" in family):
|
||||
# check if node is not disabled
|
||||
families.append(avalon_knob_data["families"])
|
||||
if "write" in families_ak:
|
||||
if node["render"].value():
|
||||
self.log.info("flagged for render")
|
||||
add_family = "render.local"
|
||||
add_family = "{}.local".format(family)
|
||||
# dealing with local/farm rendering
|
||||
if node["render_farm"].value():
|
||||
self.log.info("adding render farm family")
|
||||
add_family = "render.farm"
|
||||
add_family = "{}.farm".format(family)
|
||||
instance.data["transfer"] = False
|
||||
families.append(add_family)
|
||||
else:
|
||||
|
|
@ -89,9 +88,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
instance.append(i)
|
||||
node.end()
|
||||
|
||||
family = avalon_knob_data["family"]
|
||||
families = list()
|
||||
families_ak = avalon_knob_data.get("families")
|
||||
self.log.debug("__ families: `{}`".format(families))
|
||||
|
||||
if families_ak:
|
||||
families.append(families_ak)
|
||||
|
|
@ -104,22 +101,6 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
resolution_height = format.height()
|
||||
pixel_aspect = format.pixelAspect()
|
||||
|
||||
if node.Class() not in "Read":
|
||||
if "render" not in node.knobs().keys():
|
||||
pass
|
||||
elif node["render"].value():
|
||||
self.log.info("flagged for render")
|
||||
add_family = "render.local"
|
||||
# dealing with local/farm rendering
|
||||
if node["render_farm"].value():
|
||||
self.log.info("adding render farm family")
|
||||
add_family = "render.farm"
|
||||
instance.data["transfer"] = False
|
||||
families.append(add_family)
|
||||
else:
|
||||
# add family into families
|
||||
families.insert(0, family)
|
||||
|
||||
instance.data.update({
|
||||
"subset": subset,
|
||||
"asset": os.environ["AVALON_ASSET"],
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ class CollectSlate(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.09
|
||||
label = "Collect Slate Node"
|
||||
hosts = ["nuke"]
|
||||
families = ["write"]
|
||||
families = ["render", "render.local", "render.farm"]
|
||||
|
||||
def process(self, instance):
|
||||
node = instance[0]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import os
|
||||
import nuke
|
||||
import pyblish.api
|
||||
import pype.api as pype
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
|
|
@ -13,9 +12,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
hosts = ["nuke", "nukeassist"]
|
||||
families = ["write"]
|
||||
|
||||
# preset attributes
|
||||
sync_workfile_version = True
|
||||
|
||||
def process(self, instance):
|
||||
# adding 2d focused rendering
|
||||
instance.data["families"].append("render2d")
|
||||
families = instance.data["families"]
|
||||
|
||||
node = None
|
||||
for x in instance:
|
||||
|
|
@ -53,10 +54,13 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
output_dir = os.path.dirname(path)
|
||||
self.log.debug('output dir: {}'.format(output_dir))
|
||||
|
||||
# get version to instance for integration
|
||||
instance.data['version'] = instance.context.data["version"]
|
||||
if not next((f for f in families
|
||||
if "prerender" in f),
|
||||
None) and self.sync_workfile_version:
|
||||
# get version to instance for integration
|
||||
instance.data['version'] = instance.context.data["version"]
|
||||
|
||||
self.log.debug('Write Version: %s' % instance.data('version'))
|
||||
self.log.debug('Write Version: %s' % instance.data('version'))
|
||||
|
||||
# create label
|
||||
name = node.name()
|
||||
|
|
@ -67,7 +71,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
int(last_frame)
|
||||
)
|
||||
|
||||
if 'render' in instance.data['families']:
|
||||
if [fm for fm in families
|
||||
if fm in ["render", "prerender"]]:
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = list()
|
||||
|
||||
|
|
@ -95,7 +100,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
# this will only run if slate frame is not already
|
||||
# rendered from previews publishes
|
||||
if "slate" in instance.data["families"] \
|
||||
and (frame_length == collected_frames_len):
|
||||
and (frame_length == collected_frames_len) \
|
||||
and ("prerender" not in instance.data["families"]):
|
||||
frame_slate_str = "%0{}d".format(
|
||||
len(str(last_frame))) % (first_frame - 1)
|
||||
slate_frame = collected_frames[0].replace(
|
||||
|
|
@ -124,6 +130,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
deadlinePriority = group_node["deadlinePriority"].value()
|
||||
|
||||
families = [f for f in instance.data["families"] if "write" not in f]
|
||||
|
||||
instance.data.update({
|
||||
"versionData": version_data,
|
||||
"path": path,
|
||||
|
|
@ -144,4 +151,12 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
"deadlinePriority": deadlinePriority
|
||||
})
|
||||
|
||||
if "prerender" in families:
|
||||
instance.data.update({
|
||||
"family": "prerender",
|
||||
"families": []
|
||||
})
|
||||
|
||||
self.log.debug("families: {}".format(families))
|
||||
|
||||
self.log.debug("instance.data: {}".format(instance.data))
|
||||
|
|
|
|||
|
|
@ -17,9 +17,11 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
order = pyblish.api.ExtractorOrder
|
||||
label = "Render Local"
|
||||
hosts = ["nuke"]
|
||||
families = ["render.local"]
|
||||
families = ["render.local", "prerender.local"]
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
|
||||
node = None
|
||||
for x in instance:
|
||||
if x.Class() == "Write":
|
||||
|
|
@ -30,7 +32,7 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
first_frame = instance.data.get("frameStartHandle", None)
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
if "slate" in families:
|
||||
first_frame -= 1
|
||||
|
||||
last_frame = instance.data.get("frameEndHandle", None)
|
||||
|
|
@ -53,7 +55,7 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
)
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
if "slate" in families:
|
||||
first_frame += 1
|
||||
|
||||
path = node['file'].value()
|
||||
|
|
@ -79,8 +81,16 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
out_dir
|
||||
))
|
||||
|
||||
instance.data['family'] = 'render'
|
||||
instance.data['families'].append('render')
|
||||
# redefinition of families
|
||||
if "render.local" in families:
|
||||
instance.data['family'] = 'render2d'
|
||||
families.remove('render.local')
|
||||
families.insert(0, "render")
|
||||
elif "prerender.local" in families:
|
||||
instance.data['family'] = 'prerender'
|
||||
families.remove('prerender.local')
|
||||
families.insert(0, "prerender")
|
||||
instance.data["families"] = families
|
||||
|
||||
collections, remainder = clique.assemble(collected_frames)
|
||||
self.log.info('collections: {}'.format(str(collections)))
|
||||
|
|
|
|||
|
|
@ -15,9 +15,14 @@ class ExtractReviewDataMov(pype.api.Extractor):
|
|||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
label = "Extract Review Data Mov"
|
||||
|
||||
families = ["review", "render", "render.local"]
|
||||
families = ["review"]
|
||||
hosts = ["nuke"]
|
||||
|
||||
# presets
|
||||
viewer_lut_raw = None
|
||||
bake_colorspace_fallback = None
|
||||
bake_colorspace_main = None
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
self.log.info("Creating staging dir...")
|
||||
|
|
|
|||
|
|
@ -157,11 +157,13 @@ class ExtractSlateFrame(pype.api.Extractor):
|
|||
return
|
||||
|
||||
comment = instance.context.data.get("comment")
|
||||
intent = instance.context.data.get("intent", {}).get("value", "")
|
||||
intent_value = instance.context.data.get("intent")
|
||||
if intent_value and isinstance(intent_value, dict):
|
||||
intent_value = intent_value.get("value")
|
||||
|
||||
try:
|
||||
node["f_submission_note"].setValue(comment)
|
||||
node["f_submitting_for"].setValue(intent)
|
||||
node["f_submitting_for"].setValue(intent_value or "")
|
||||
except NameError:
|
||||
return
|
||||
instance.data.pop("slateNode")
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ class IncrementScriptVersion(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.IntegratorOrder + 0.9
|
||||
label = "Increment Script Version"
|
||||
optional = True
|
||||
families = ["workfile", "render", "render.local", "render.farm"]
|
||||
hosts = ['nuke']
|
||||
|
||||
def process(self, context):
|
||||
|
|
@ -16,19 +17,7 @@ class IncrementScriptVersion(pyblish.api.ContextPlugin):
|
|||
assert all(result["success"] for result in context.data["results"]), (
|
||||
"Publishing not succesfull so version is not increased.")
|
||||
|
||||
instances = context[:]
|
||||
|
||||
prerender_check = list()
|
||||
families_check = list()
|
||||
for instance in instances:
|
||||
if ("prerender" in str(instance)) and instance.data.get("families", None):
|
||||
prerender_check.append(instance)
|
||||
if instance.data.get("families", None):
|
||||
families_check.append(True)
|
||||
|
||||
|
||||
if len(prerender_check) != len(families_check):
|
||||
from pype.lib import version_up
|
||||
path = context.data["currentFile"]
|
||||
nuke.scriptSaveAs(version_up(path))
|
||||
self.log.info('Incrementing script version')
|
||||
from pype.lib import version_up
|
||||
path = context.data["currentFile"]
|
||||
nuke.scriptSaveAs(version_up(path))
|
||||
self.log.info('Incrementing script version')
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
label = "Submit to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
hosts = ["nuke", "nukestudio"]
|
||||
families = ["render.farm"]
|
||||
families = ["render.farm", "prerender.farm"]
|
||||
optional = True
|
||||
|
||||
deadline_priority = 50
|
||||
|
|
@ -28,6 +28,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
deadline_chunk_size = 1
|
||||
|
||||
def process(self, instance):
|
||||
families = instance.data["families"]
|
||||
|
||||
node = instance[0]
|
||||
context = instance.context
|
||||
|
|
@ -82,6 +83,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
instance.data["deadlineSubmissionJob"] = resp.json()
|
||||
instance.data["publishJobState"] = "Suspended"
|
||||
|
||||
# redefinition of families
|
||||
if "render.farm" in families:
|
||||
instance.data['family'] = 'write'
|
||||
families.insert(0, "render2d")
|
||||
elif "prerender.farm" in families:
|
||||
instance.data['family'] = 'write'
|
||||
families.insert(0, "prerender")
|
||||
instance.data["families"] = families
|
||||
|
||||
def payload_submit(self,
|
||||
instance,
|
||||
script_path,
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
|
|||
""" Validates file output. """
|
||||
|
||||
order = pyblish.api.ValidatorOrder + 0.1
|
||||
families = ["render"]
|
||||
families = ["render", "prerender"]
|
||||
|
||||
label = "Validate rendered frame"
|
||||
hosts = ["nuke", "nukestudio"]
|
||||
|
|
|
|||
|
|
@ -36,7 +36,8 @@ TIMECODE = (
|
|||
MISSING_KEY_VALUE = "N/A"
|
||||
CURRENT_FRAME_KEY = "{current_frame}"
|
||||
CURRENT_FRAME_SPLITTER = "_-_CURRENT_FRAME_-_"
|
||||
TIME_CODE_KEY = "{timecode}"
|
||||
TIMECODE_KEY = "{timecode}"
|
||||
SOURCE_TIMECODE_KEY = "{source_timecode}"
|
||||
|
||||
|
||||
def _streams(source):
|
||||
|
|
@ -188,10 +189,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if not options.get("fps"):
|
||||
options["fps"] = self.frame_rate
|
||||
|
||||
options["timecode"] = ffmpeg_burnins._frames_to_timecode(
|
||||
frame_start_tc,
|
||||
self.frame_rate
|
||||
)
|
||||
if isinstance(frame_start_tc, str):
|
||||
options["timecode"] = frame_start_tc
|
||||
else:
|
||||
options["timecode"] = ffmpeg_burnins._frames_to_timecode(
|
||||
frame_start_tc,
|
||||
self.frame_rate
|
||||
)
|
||||
|
||||
self._add_burnin(text, align, options, TIMECODE)
|
||||
|
||||
|
|
@ -296,7 +300,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
args=args,
|
||||
overwrite=overwrite
|
||||
)
|
||||
print(command)
|
||||
# print(command)
|
||||
|
||||
proc = subprocess.Popen(command, shell=True)
|
||||
proc.communicate()
|
||||
|
|
@ -412,7 +416,14 @@ def burnins_from_data(
|
|||
data[CURRENT_FRAME_KEY[1:-1]] = CURRENT_FRAME_SPLITTER
|
||||
|
||||
if frame_start_tc is not None:
|
||||
data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY
|
||||
data[TIMECODE_KEY[1:-1]] = TIMECODE_KEY
|
||||
|
||||
source_timecode = stream.get("timecode")
|
||||
if source_timecode is None:
|
||||
source_timecode = stream.get("tags", {}).get("timecode")
|
||||
|
||||
if source_timecode is not None:
|
||||
data[SOURCE_TIMECODE_KEY[1:-1]] = SOURCE_TIMECODE_KEY
|
||||
|
||||
for align_text, value in presets.get('burnins', {}).items():
|
||||
if not value:
|
||||
|
|
@ -425,8 +436,6 @@ def burnins_from_data(
|
|||
" (Make sure you have new burnin presets)."
|
||||
).format(str(type(value)), str(value)))
|
||||
|
||||
has_timecode = TIME_CODE_KEY in value
|
||||
|
||||
align = None
|
||||
align_text = align_text.strip().lower()
|
||||
if align_text == "top_left":
|
||||
|
|
@ -442,6 +451,7 @@ def burnins_from_data(
|
|||
elif align_text == "bottom_right":
|
||||
align = ModifiedBurnins.BOTTOM_RIGHT
|
||||
|
||||
has_timecode = TIMECODE_KEY in value
|
||||
# Replace with missing key value if frame_start_tc is not set
|
||||
if frame_start_tc is None and has_timecode:
|
||||
has_timecode = False
|
||||
|
|
@ -449,7 +459,13 @@ def burnins_from_data(
|
|||
"`frame_start` and `frame_start_tc`"
|
||||
" are not set in entered data."
|
||||
)
|
||||
value = value.replace(TIME_CODE_KEY, MISSING_KEY_VALUE)
|
||||
value = value.replace(TIMECODE_KEY, MISSING_KEY_VALUE)
|
||||
|
||||
has_source_timecode = SOURCE_TIMECODE_KEY in value
|
||||
if source_timecode is None and has_source_timecode:
|
||||
has_source_timecode = False
|
||||
log.warning("Source does not have set timecode value.")
|
||||
value = value.replace(SOURCE_TIMECODE_KEY, MISSING_KEY_VALUE)
|
||||
|
||||
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
|
||||
|
||||
|
|
@ -465,10 +481,20 @@ def burnins_from_data(
|
|||
value = value.replace(key, MISSING_KEY_VALUE)
|
||||
|
||||
# Handle timecode differently
|
||||
if has_source_timecode:
|
||||
args = [align, frame_start, frame_end, source_timecode]
|
||||
if not value.startswith(SOURCE_TIMECODE_KEY):
|
||||
value_items = value.split(SOURCE_TIMECODE_KEY)
|
||||
text = value_items[0].format(**data)
|
||||
args.append(text)
|
||||
|
||||
burnin.add_timecode(*args)
|
||||
continue
|
||||
|
||||
if has_timecode:
|
||||
args = [align, frame_start, frame_end, frame_start_tc]
|
||||
if not value.startswith(TIME_CODE_KEY):
|
||||
value_items = value.split(TIME_CODE_KEY)
|
||||
if not value.startswith(TIMECODE_KEY):
|
||||
value_items = value.split(TIMECODE_KEY)
|
||||
text = value_items[0].format(**data)
|
||||
args.append(text)
|
||||
|
||||
|
|
|
|||
|
|
@ -25,18 +25,6 @@ log.setLevel(logging.DEBUG)
|
|||
|
||||
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
|
||||
|
||||
def _load_json(path):
|
||||
assert os.path.isfile(path), ("path to json file doesn't exist")
|
||||
data = None
|
||||
with open(path, "r") as json_file:
|
||||
try:
|
||||
data = json.load(json_file)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc)
|
||||
)
|
||||
return data
|
||||
|
||||
def __main__():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
|
@ -90,12 +78,6 @@ def __main__():
|
|||
|
||||
paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa
|
||||
|
||||
for path in paths:
|
||||
data = _load_json(path)
|
||||
log.info("Setting session using data from file")
|
||||
os.environ["AVALON_PROJECT"] = data["session"]["AVALON_PROJECT"]
|
||||
break
|
||||
|
||||
args = [
|
||||
os.path.join(pype_root, pype_command),
|
||||
"publish",
|
||||
|
|
|
|||
44
schema/master_version-1.0.json
Normal file
44
schema/master_version-1.0.json
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
{
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
|
||||
"title": "pype:master_version-1.0",
|
||||
"description": "Master version of asset",
|
||||
|
||||
"type": "object",
|
||||
|
||||
"additionalProperties": true,
|
||||
|
||||
"required": [
|
||||
"version_id",
|
||||
"schema",
|
||||
"type",
|
||||
"parent"
|
||||
],
|
||||
|
||||
"properties": {
|
||||
"_id": {
|
||||
"description": "Document's id (database will create it's if not entered)",
|
||||
"example": "ObjectId(592c33475f8c1b064c4d1696)"
|
||||
},
|
||||
"version_id": {
|
||||
"description": "The version ID from which it was created",
|
||||
"example": "ObjectId(592c33475f8c1b064c4d1695)"
|
||||
},
|
||||
"schema": {
|
||||
"description": "The schema associated with this document",
|
||||
"type": "string",
|
||||
"enum": ["avalon-core:master_version-1.0", "pype:master_version-1.0"],
|
||||
"example": "pype:master_version-1.0"
|
||||
},
|
||||
"type": {
|
||||
"description": "The type of document",
|
||||
"type": "string",
|
||||
"enum": ["master_version"],
|
||||
"example": "master_version"
|
||||
},
|
||||
"parent": {
|
||||
"description": "Unique identifier to parent document",
|
||||
"example": "ObjectId(592c33475f8c1b064c4d1697)"
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue