mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into feature/PYPE-762_multi_root
This commit is contained in:
commit
58e48660f4
65 changed files with 4658 additions and 361 deletions
3
.flake8
3
.flake8
|
|
@ -1,7 +1,6 @@
|
|||
[flake8]
|
||||
# ignore = D203
|
||||
ignore = BLK100
|
||||
ignore = W504
|
||||
ignore = BLK100, W504, W503
|
||||
max-line-length = 79
|
||||
exclude =
|
||||
.git,
|
||||
|
|
|
|||
|
|
@ -10,14 +10,43 @@ from avalon import api
|
|||
VALID_EXTENSIONS = [".blend"]
|
||||
|
||||
|
||||
def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str:
|
||||
"""Return a consistent name for a model asset."""
|
||||
def asset_name(
|
||||
asset: str, subset: str, namespace: Optional[str] = None
|
||||
) -> str:
|
||||
"""Return a consistent name for an asset."""
|
||||
name = f"{asset}_{subset}"
|
||||
if namespace:
|
||||
name = f"{namespace}:{name}"
|
||||
return name
|
||||
|
||||
|
||||
def create_blender_context(active: Optional[bpy.types.Object] = None,
|
||||
selected: Optional[bpy.types.Object] = None,):
|
||||
"""Create a new Blender context. If an object is passed as
|
||||
parameter, it is set as selected and active.
|
||||
"""
|
||||
|
||||
if not isinstance(selected, list):
|
||||
selected = [selected]
|
||||
|
||||
for win in bpy.context.window_manager.windows:
|
||||
for area in win.screen.areas:
|
||||
if area.type == 'VIEW_3D':
|
||||
for region in area.regions:
|
||||
if region.type == 'WINDOW':
|
||||
override_context = {
|
||||
'window': win,
|
||||
'screen': win.screen,
|
||||
'area': area,
|
||||
'region': region,
|
||||
'scene': bpy.context.scene,
|
||||
'active_object': active,
|
||||
'selected_objects': selected
|
||||
}
|
||||
return override_context
|
||||
raise Exception("Could not create a custom Blender context.")
|
||||
|
||||
|
||||
class AssetLoader(api.Loader):
|
||||
"""A basic AssetLoader for Blender
|
||||
|
||||
|
|
@ -67,7 +96,8 @@ class AssetLoader(api.Loader):
|
|||
assert obj.library, f"'{obj.name}' is not linked."
|
||||
libraries.add(obj.library)
|
||||
|
||||
assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library."
|
||||
assert len(
|
||||
libraries) == 1, "'{container.name}' contains objects from more then 1 library."
|
||||
|
||||
return list(libraries)[0]
|
||||
|
||||
|
|
@ -122,7 +152,7 @@ class AssetLoader(api.Loader):
|
|||
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
instance_name = model_name(asset, subset, namespace)
|
||||
instance_name = asset_name(asset, subset, namespace)
|
||||
|
||||
return self._get_instance_collection(instance_name, nodes)
|
||||
|
||||
|
|
|
|||
|
|
@ -240,6 +240,19 @@ class AppAction(BaseHandler):
|
|||
|
||||
# Full path to executable launcher
|
||||
execfile = None
|
||||
|
||||
if application.get("launch_hook"):
|
||||
hook = application.get("launch_hook")
|
||||
self.log.info("launching hook: {}".format(hook))
|
||||
ret_val = pypelib.execute_hook(
|
||||
application.get("launch_hook"), env=env)
|
||||
if not ret_val:
|
||||
return {
|
||||
'success': False,
|
||||
'message': "Hook didn't finish successfully {0}"
|
||||
.format(self.label)
|
||||
}
|
||||
|
||||
if sys.platform == "win32":
|
||||
for ext in os.environ["PATHEXT"].split(os.pathsep):
|
||||
fpath = os.path.join(path.strip('"'), self.executable + ext)
|
||||
|
|
|
|||
83
pype/hooks/unreal/unreal_prelaunch.py
Normal file
83
pype/hooks/unreal/unreal_prelaunch.py
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
import logging
|
||||
import os
|
||||
|
||||
from pype.lib import PypeHook
|
||||
from pype.unreal import lib as unreal_lib
|
||||
from pypeapp import Logger
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UnrealPrelaunch(PypeHook):
|
||||
"""
|
||||
This hook will check if current workfile path has Unreal
|
||||
project inside. IF not, it initialize it and finally it pass
|
||||
path to the project by environment variable to Unreal launcher
|
||||
shell script.
|
||||
"""
|
||||
|
||||
def __init__(self, logger=None):
|
||||
if not logger:
|
||||
self.log = Logger().get_logger(self.__class__.__name__)
|
||||
else:
|
||||
self.log = logger
|
||||
|
||||
self.signature = "( {} )".format(self.__class__.__name__)
|
||||
|
||||
def execute(self, *args, env: dict = None) -> bool:
|
||||
if not env:
|
||||
env = os.environ
|
||||
asset = env["AVALON_ASSET"]
|
||||
task = env["AVALON_TASK"]
|
||||
workdir = env["AVALON_WORKDIR"]
|
||||
engine_version = env["AVALON_APP_NAME"].split("_")[-1]
|
||||
project_name = f"{asset}_{task}"
|
||||
|
||||
# Unreal is sensitive about project names longer then 20 chars
|
||||
if len(project_name) > 20:
|
||||
self.log.warning((f"Project name exceed 20 characters "
|
||||
f"({project_name})!"))
|
||||
|
||||
# Unreal doesn't accept non alphabet characters at the start
|
||||
# of the project name. This is because project name is then used
|
||||
# in various places inside c++ code and there variable names cannot
|
||||
# start with non-alpha. We append 'P' before project name to solve it.
|
||||
# 😱
|
||||
if not project_name[:1].isalpha():
|
||||
self.log.warning(f"Project name doesn't start with alphabet "
|
||||
f"character ({project_name}). Appending 'P'")
|
||||
project_name = f"P{project_name}"
|
||||
|
||||
project_path = os.path.join(workdir, project_name)
|
||||
|
||||
self.log.info((f"{self.signature} requested UE4 version: "
|
||||
f"[ {engine_version} ]"))
|
||||
|
||||
detected = unreal_lib.get_engine_versions()
|
||||
detected_str = ', '.join(detected.keys()) or 'none'
|
||||
self.log.info((f"{self.signature} detected UE4 versions: "
|
||||
f"[ {detected_str} ]"))
|
||||
del(detected_str)
|
||||
engine_version = ".".join(engine_version.split(".")[:2])
|
||||
if engine_version not in detected.keys():
|
||||
self.log.error((f"{self.signature} requested version not "
|
||||
f"detected [ {engine_version} ]"))
|
||||
return False
|
||||
|
||||
os.makedirs(project_path, exist_ok=True)
|
||||
|
||||
project_file = os.path.join(project_path, f"{project_name}.uproject")
|
||||
engine_path = detected[engine_version]
|
||||
if not os.path.isfile(project_file):
|
||||
self.log.info((f"{self.signature} creating unreal "
|
||||
f"project [ {project_name} ]"))
|
||||
if env.get("AVALON_UNREAL_PLUGIN"):
|
||||
os.environ["AVALON_UNREAL_PLUGIN"] = env.get("AVALON_UNREAL_PLUGIN") # noqa: E501
|
||||
unreal_lib.create_unreal_project(project_name,
|
||||
engine_version,
|
||||
project_path,
|
||||
engine_path=engine_path)
|
||||
|
||||
env["PYPE_UNREAL_PROJECT_FILE"] = project_file
|
||||
env["AVALON_CURRENT_UNREAL_ENGINE"] = engine_path
|
||||
return True
|
||||
79
pype/lib.py
79
pype/lib.py
|
|
@ -1,10 +1,15 @@
|
|||
import os
|
||||
import sys
|
||||
import types
|
||||
import re
|
||||
import logging
|
||||
import itertools
|
||||
import contextlib
|
||||
import subprocess
|
||||
import inspect
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
import six
|
||||
|
||||
from avalon import io
|
||||
import avalon.api
|
||||
|
|
@ -177,7 +182,8 @@ def modified_environ(*remove, **update):
|
|||
is sure to work in all situations.
|
||||
|
||||
:param remove: Environment variables to remove.
|
||||
:param update: Dictionary of environment variables and values to add/update.
|
||||
:param update: Dictionary of environment variables
|
||||
and values to add/update.
|
||||
"""
|
||||
env = os.environ
|
||||
update = update or {}
|
||||
|
|
@ -403,8 +409,8 @@ def switch_item(container,
|
|||
"parent": version["_id"]}
|
||||
)
|
||||
|
||||
assert representation, ("Could not find representation in the database with"
|
||||
" the name '%s'" % representation_name)
|
||||
assert representation, ("Could not find representation in the database "
|
||||
"with the name '%s'" % representation_name)
|
||||
|
||||
avalon.api.switch(container, representation)
|
||||
|
||||
|
|
@ -537,7 +543,9 @@ def get_subsets(asset_name,
|
|||
"""
|
||||
Query subsets with filter on name.
|
||||
|
||||
The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
|
||||
The method will return all found subsets and its defined version
|
||||
and subsets. Version could be specified with number. Representation
|
||||
can be filtered.
|
||||
|
||||
Arguments:
|
||||
asset_name (str): asset (shot) name
|
||||
|
|
@ -554,8 +562,8 @@ def get_subsets(asset_name,
|
|||
asset_io = io.find_one({"type": "asset", "name": asset_name})
|
||||
|
||||
# check if anything returned
|
||||
assert asset_io, "Asset not existing. \
|
||||
Check correct name: `{}`".format(asset_name)
|
||||
assert asset_io, (
|
||||
"Asset not existing. Check correct name: `{}`").format(asset_name)
|
||||
|
||||
# create subsets query filter
|
||||
filter_query = {"type": "subset", "parent": asset_io["_id"]}
|
||||
|
|
@ -569,7 +577,9 @@ def get_subsets(asset_name,
|
|||
# query all assets
|
||||
subsets = [s for s in io.find(filter_query)]
|
||||
|
||||
assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
|
||||
assert subsets, ("No subsets found. Check correct filter. "
|
||||
"Try this for start `r'.*'`: "
|
||||
"asset: `{}`").format(asset_name)
|
||||
|
||||
output_dict = {}
|
||||
# Process subsets
|
||||
|
|
@ -643,3 +653,58 @@ class CustomNone:
|
|||
def __repr__(self):
|
||||
"""Representation of custom None."""
|
||||
return "<CustomNone-{}>".format(str(self.identifier))
|
||||
|
||||
|
||||
def execute_hook(hook, *args, **kwargs):
|
||||
"""
|
||||
This will load hook file, instantiate class and call `execute` method
|
||||
on it. Hook must be in a form:
|
||||
|
||||
`$PYPE_ROOT/repos/pype/path/to/hook.py/HookClass`
|
||||
|
||||
This will load `hook.py`, instantiate HookClass and then execute_hook
|
||||
`execute(*args, **kwargs)`
|
||||
|
||||
:param hook: path to hook class
|
||||
:type hook: str
|
||||
"""
|
||||
|
||||
class_name = hook.split("/")[-1]
|
||||
|
||||
abspath = os.path.join(os.getenv('PYPE_ROOT'),
|
||||
'repos', 'pype', *hook.split("/")[:-1])
|
||||
|
||||
mod_name, mod_ext = os.path.splitext(os.path.basename(abspath))
|
||||
|
||||
if not mod_ext == ".py":
|
||||
return False
|
||||
|
||||
module = types.ModuleType(mod_name)
|
||||
module.__file__ = abspath
|
||||
|
||||
try:
|
||||
with open(abspath) as f:
|
||||
six.exec_(f.read(), module.__dict__)
|
||||
|
||||
sys.modules[abspath] = module
|
||||
|
||||
except Exception as exp:
|
||||
log.exception("loading hook failed: {}".format(exp),
|
||||
exc_info=True)
|
||||
return False
|
||||
|
||||
obj = getattr(module, class_name)
|
||||
hook_obj = obj()
|
||||
ret_val = hook_obj.execute(*args, **kwargs)
|
||||
return ret_val
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class PypeHook:
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def execute(self, *args, **kwargs):
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ self = sys.modules[__name__]
|
|||
self._project = None
|
||||
|
||||
|
||||
def onScriptLoad():
|
||||
def on_script_load():
|
||||
''' Callback for ffmpeg support
|
||||
'''
|
||||
if nuke.env['LINUX']:
|
||||
|
|
@ -39,7 +39,7 @@ def onScriptLoad():
|
|||
nuke.tcl('load movWriter')
|
||||
|
||||
|
||||
def checkInventoryVersions():
|
||||
def check_inventory_versions():
|
||||
"""
|
||||
Actiual version idetifier of Loaded containers
|
||||
|
||||
|
|
@ -180,8 +180,8 @@ def format_anatomy(data):
|
|||
padding = int(anatomy.templates['render']['padding'])
|
||||
except KeyError as e:
|
||||
msg = ("`padding` key is not in `render` "
|
||||
"Anatomy template. Please, add it there and restart "
|
||||
"the pipeline (padding: \"4\"): `{}`").format(e)
|
||||
"Anatomy template. Please, add it there and restart "
|
||||
"the pipeline (padding: \"4\"): `{}`").format(e)
|
||||
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
|
@ -700,7 +700,8 @@ class WorkfileSettings(object):
|
|||
def set_reads_colorspace(self, reads):
|
||||
""" Setting colorspace to Read nodes
|
||||
|
||||
Looping trought all read nodes and tries to set colorspace based on regex rules in presets
|
||||
Looping trought all read nodes and tries to set colorspace based
|
||||
on regex rules in presets
|
||||
"""
|
||||
changes = dict()
|
||||
for n in nuke.allNodes():
|
||||
|
|
@ -872,10 +873,10 @@ class WorkfileSettings(object):
|
|||
|
||||
if any(x for x in data.values() if x is None):
|
||||
msg = ("Missing set shot attributes in DB."
|
||||
"\nContact your supervisor!."
|
||||
"\n\nWidth: `{width}`"
|
||||
"\nHeight: `{height}`"
|
||||
"\nPixel Asspect: `{pixel_aspect}`").format(**data)
|
||||
"\nContact your supervisor!."
|
||||
"\n\nWidth: `{width}`"
|
||||
"\nHeight: `{height}`"
|
||||
"\nPixel Asspect: `{pixel_aspect}`").format(**data)
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
|
|
@ -894,8 +895,9 @@ class WorkfileSettings(object):
|
|||
)
|
||||
except Exception as e:
|
||||
bbox = None
|
||||
msg = ("{}:{} \nFormat:Crop need to be set with dots, example: "
|
||||
"0.0.1920.1080, /nSetting to default").format(__name__, e)
|
||||
msg = ("{}:{} \nFormat:Crop need to be set with dots, "
|
||||
"example: 0.0.1920.1080, "
|
||||
"/nSetting to default").format(__name__, e)
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
|
|
@ -1036,7 +1038,8 @@ class BuildWorkfile(WorkfileSettings):
|
|||
"""
|
||||
Building first version of workfile.
|
||||
|
||||
Settings are taken from presets and db. It will add all subsets in last version for defined representaions
|
||||
Settings are taken from presets and db. It will add all subsets
|
||||
in last version for defined representaions
|
||||
|
||||
Arguments:
|
||||
variable (type): description
|
||||
|
|
@ -1133,7 +1136,7 @@ class BuildWorkfile(WorkfileSettings):
|
|||
regex_filter=None,
|
||||
version=None,
|
||||
representations=["exr", "dpx", "lutJson", "mov",
|
||||
"preview", "png"]):
|
||||
"preview", "png", "jpeg", "jpg"]):
|
||||
"""
|
||||
A short description.
|
||||
|
||||
|
|
@ -1263,8 +1266,6 @@ class BuildWorkfile(WorkfileSettings):
|
|||
representation (dict): avalon db entity
|
||||
|
||||
"""
|
||||
context = representation["context"]
|
||||
|
||||
loader_name = "LoadLuts"
|
||||
|
||||
loader_plugin = None
|
||||
|
|
|
|||
40
pype/plugins/blender/create/create_action.py
Normal file
40
pype/plugins/blender/create/create_action.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
"""Create an animation asset."""
|
||||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from avalon.blender import Creator, lib
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
class CreateAction(Creator):
|
||||
"""Action output for character rigs"""
|
||||
|
||||
name = "actionMain"
|
||||
label = "Action"
|
||||
family = "action"
|
||||
icon = "male"
|
||||
|
||||
def process(self):
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = pype.blender.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
for obj in lib.get_selection():
|
||||
if (obj.animation_data is not None
|
||||
and obj.animation_data.action is not None):
|
||||
|
||||
empty_obj = bpy.data.objects.new(name=name,
|
||||
object_data=None)
|
||||
empty_obj.animation_data_create()
|
||||
empty_obj.animation_data.action = obj.animation_data.action
|
||||
empty_obj.animation_data.action.name = name
|
||||
collection.objects.link(empty_obj)
|
||||
|
||||
return collection
|
||||
52
pype/plugins/blender/create/create_animation.py
Normal file
52
pype/plugins/blender/create/create_animation.py
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
"""Create an animation asset."""
|
||||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from avalon.blender import Creator, lib
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
class CreateAnimation(Creator):
|
||||
"""Animation output for character rigs"""
|
||||
|
||||
name = "animationMain"
|
||||
label = "Animation"
|
||||
family = "animation"
|
||||
icon = "male"
|
||||
|
||||
def process(self):
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = pype.blender.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
# Add the rig object and all the children meshes to
|
||||
# a set and link them all at the end to avoid duplicates.
|
||||
# Blender crashes if trying to link an object that is already linked.
|
||||
# This links automatically the children meshes if they were not
|
||||
# selected, and doesn't link them twice if they, insted,
|
||||
# were manually selected by the user.
|
||||
objects_to_link = set()
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
|
||||
for obj in lib.get_selection():
|
||||
|
||||
objects_to_link.add(obj)
|
||||
|
||||
if obj.type == 'ARMATURE':
|
||||
|
||||
for subobj in obj.children:
|
||||
|
||||
objects_to_link.add(subobj)
|
||||
|
||||
for obj in objects_to_link:
|
||||
|
||||
collection.objects.link(obj)
|
||||
|
||||
return collection
|
||||
|
|
@ -4,6 +4,7 @@ import bpy
|
|||
|
||||
from avalon import api
|
||||
from avalon.blender import Creator, lib
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
class CreateModel(Creator):
|
||||
|
|
@ -15,11 +16,10 @@ class CreateModel(Creator):
|
|||
icon = "cube"
|
||||
|
||||
def process(self):
|
||||
import pype.blender
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = pype.blender.plugin.model_name(asset, subset)
|
||||
name = pype.blender.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
|
|
|
|||
52
pype/plugins/blender/create/create_rig.py
Normal file
52
pype/plugins/blender/create/create_rig.py
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
"""Create a rig asset."""
|
||||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from avalon.blender import Creator, lib
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
class CreateRig(Creator):
|
||||
"""Artist-friendly rig with controls to direct motion"""
|
||||
|
||||
name = "rigMain"
|
||||
label = "Rig"
|
||||
family = "rig"
|
||||
icon = "wheelchair"
|
||||
|
||||
def process(self):
|
||||
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = pype.blender.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
# Add the rig object and all the children meshes to
|
||||
# a set and link them all at the end to avoid duplicates.
|
||||
# Blender crashes if trying to link an object that is already linked.
|
||||
# This links automatically the children meshes if they were not
|
||||
# selected, and doesn't link them twice if they, insted,
|
||||
# were manually selected by the user.
|
||||
objects_to_link = set()
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
|
||||
for obj in lib.get_selection():
|
||||
|
||||
objects_to_link.add(obj)
|
||||
|
||||
if obj.type == 'ARMATURE':
|
||||
|
||||
for subobj in obj.children:
|
||||
|
||||
objects_to_link.add(subobj)
|
||||
|
||||
for obj in objects_to_link:
|
||||
|
||||
collection.objects.link(obj)
|
||||
|
||||
return collection
|
||||
304
pype/plugins/blender/load/load_action.py
Normal file
304
pype/plugins/blender/load/load_action.py
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
"""Load an action in Blender."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import pype.blender.plugin
|
||||
|
||||
logger = logging.getLogger("pype").getChild("blender").getChild("load_action")
|
||||
|
||||
|
||||
class BlendActionLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load action from a .blend file.
|
||||
|
||||
Warning:
|
||||
Loading the same asset more then once is not properly supported at the
|
||||
moment.
|
||||
"""
|
||||
|
||||
families = ["action"]
|
||||
representations = ["blend"]
|
||||
|
||||
label = "Link Action"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
lib_container = pype.blender.plugin.asset_name(asset, subset)
|
||||
container_name = pype.blender.plugin.asset_name(
|
||||
asset, subset, namespace
|
||||
)
|
||||
|
||||
container = bpy.data.collections.new(lib_container)
|
||||
container.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
container,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
container_metadata = container.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
collection = bpy.context.scene.collection
|
||||
|
||||
collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
animation_container = collection.children[lib_container].make_local()
|
||||
|
||||
objects_list = []
|
||||
|
||||
# Link meshes first, then armatures.
|
||||
# The armature is unparented for all the non-local meshes,
|
||||
# when it is made local.
|
||||
for obj in animation_container.objects:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
anim_data = obj.animation_data
|
||||
|
||||
if anim_data is not None and anim_data.action is not None:
|
||||
|
||||
anim_data.action.make_local()
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
animation_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = objects_list
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
def update(self, container: Dict, representation: Dict):
|
||||
"""Update the loaded asset.
|
||||
|
||||
This will remove all objects of the current collection, load the new
|
||||
ones and add them to the collection.
|
||||
If the objects of the collection are used in another collection they
|
||||
will not be removed, only unlinked. Normally this should not be the
|
||||
case though.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
|
||||
libpath = Path(api.get_representation_path(representation))
|
||||
extension = libpath.suffix.lower()
|
||||
|
||||
logger.info(
|
||||
"Container: %s\nRepresentation: %s",
|
||||
pformat(container, indent=2),
|
||||
pformat(representation, indent=2),
|
||||
)
|
||||
|
||||
assert collection, (
|
||||
f"The asset is not loaded: {container['objectName']}"
|
||||
)
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
assert libpath, (
|
||||
"No existing library file found for {container['objectName']}"
|
||||
)
|
||||
assert libpath.is_file(), (
|
||||
f"The file doesn't exist: {libpath}"
|
||||
)
|
||||
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
normalized_libpath = (
|
||||
str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
)
|
||||
logger.debug(
|
||||
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
|
||||
normalized_collection_libpath,
|
||||
normalized_libpath,
|
||||
)
|
||||
if normalized_collection_libpath == normalized_libpath:
|
||||
logger.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
strips = []
|
||||
|
||||
for obj in collection_metadata["objects"]:
|
||||
|
||||
# Get all the strips that use the action
|
||||
arm_objs = [
|
||||
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
|
||||
|
||||
for armature_obj in arm_objs:
|
||||
|
||||
if armature_obj.animation_data is not None:
|
||||
|
||||
for track in armature_obj.animation_data.nla_tracks:
|
||||
|
||||
for strip in track.strips:
|
||||
|
||||
if strip.action == obj.animation_data.action:
|
||||
|
||||
strips.append(strip)
|
||||
|
||||
bpy.data.actions.remove(obj.animation_data.action)
|
||||
bpy.data.objects.remove(obj)
|
||||
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
str(libpath), link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
scene.collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
anim_container = scene.collection.children[lib_container].make_local()
|
||||
|
||||
objects_list = []
|
||||
|
||||
# Link meshes first, then armatures.
|
||||
# The armature is unparented for all the non-local meshes,
|
||||
# when it is made local.
|
||||
for obj in anim_container.objects:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
anim_data = obj.animation_data
|
||||
|
||||
if anim_data is not None and anim_data.action is not None:
|
||||
|
||||
anim_data.action.make_local()
|
||||
|
||||
for strip in strips:
|
||||
|
||||
strip.action = anim_data.action
|
||||
strip.action_frame_end = anim_data.action.frame_range[1]
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": collection.name})
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
collection_metadata["objects"] = objects_list
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
||||
Arguments:
|
||||
container (avalon-core:container-1.0): Container to remove,
|
||||
from `host.ls()`.
|
||||
|
||||
Returns:
|
||||
bool: Whether the container was deleted.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
if not collection:
|
||||
return False
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
for obj in objects:
|
||||
|
||||
# Get all the strips that use the action
|
||||
arm_objs = [
|
||||
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
|
||||
|
||||
for armature_obj in arm_objs:
|
||||
|
||||
if armature_obj.animation_data is not None:
|
||||
|
||||
for track in armature_obj.animation_data.nla_tracks:
|
||||
|
||||
for strip in track.strips:
|
||||
|
||||
if strip.action == obj.animation_data.action:
|
||||
|
||||
track.strips.remove(strip)
|
||||
|
||||
bpy.data.actions.remove(obj.animation_data.action)
|
||||
bpy.data.objects.remove(obj)
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
256
pype/plugins/blender/load/load_animation.py
Normal file
256
pype/plugins/blender/load/load_animation.py
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
"""Load an animation in Blender."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import pype.blender.plugin
|
||||
|
||||
|
||||
logger = logging.getLogger("pype").getChild(
|
||||
"blender").getChild("load_animation")
|
||||
|
||||
|
||||
class BlendAnimationLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load animations from a .blend file.
|
||||
|
||||
Warning:
|
||||
Loading the same asset more then once is not properly supported at the
|
||||
moment.
|
||||
"""
|
||||
|
||||
families = ["animation"]
|
||||
representations = ["blend"]
|
||||
|
||||
label = "Link Animation"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
@staticmethod
|
||||
def _remove(self, objects, lib_container):
|
||||
|
||||
for obj in objects:
|
||||
|
||||
if obj.type == 'ARMATURE':
|
||||
bpy.data.armatures.remove(obj.data)
|
||||
elif obj.type == 'MESH':
|
||||
bpy.data.meshes.remove(obj.data)
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
|
||||
@staticmethod
|
||||
def _process(self, libpath, lib_container, container_name):
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
scene.collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
anim_container = scene.collection.children[lib_container].make_local()
|
||||
|
||||
meshes = [obj for obj in anim_container.objects if obj.type == 'MESH']
|
||||
armatures = [
|
||||
obj for obj in anim_container.objects if obj.type == 'ARMATURE']
|
||||
|
||||
# Should check if there is only an armature?
|
||||
|
||||
objects_list = []
|
||||
|
||||
# Link meshes first, then armatures.
|
||||
# The armature is unparented for all the non-local meshes,
|
||||
# when it is made local.
|
||||
for obj in meshes + armatures:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
obj.data.make_local()
|
||||
|
||||
anim_data = obj.animation_data
|
||||
|
||||
if anim_data is not None and anim_data.action is not None:
|
||||
|
||||
anim_data.action.make_local()
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
return objects_list
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
lib_container = pype.blender.plugin.asset_name(asset, subset)
|
||||
container_name = pype.blender.plugin.asset_name(
|
||||
asset, subset, namespace
|
||||
)
|
||||
|
||||
container = bpy.data.collections.new(lib_container)
|
||||
container.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
container,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
container_metadata = container.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
objects_list = self._process(
|
||||
self, libpath, lib_container, container_name)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = objects_list
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
def update(self, container: Dict, representation: Dict):
|
||||
"""Update the loaded asset.
|
||||
|
||||
This will remove all objects of the current collection, load the new
|
||||
ones and add them to the collection.
|
||||
If the objects of the collection are used in another collection they
|
||||
will not be removed, only unlinked. Normally this should not be the
|
||||
case though.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
|
||||
libpath = Path(api.get_representation_path(representation))
|
||||
extension = libpath.suffix.lower()
|
||||
|
||||
logger.info(
|
||||
"Container: %s\nRepresentation: %s",
|
||||
pformat(container, indent=2),
|
||||
pformat(representation, indent=2),
|
||||
)
|
||||
|
||||
assert collection, (
|
||||
f"The asset is not loaded: {container['objectName']}"
|
||||
)
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
assert libpath, (
|
||||
"No existing library file found for {container['objectName']}"
|
||||
)
|
||||
assert libpath.is_file(), (
|
||||
f"The file doesn't exist: {libpath}"
|
||||
)
|
||||
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
normalized_libpath = (
|
||||
str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
)
|
||||
logger.debug(
|
||||
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
|
||||
normalized_collection_libpath,
|
||||
normalized_libpath,
|
||||
)
|
||||
if normalized_collection_libpath == normalized_libpath:
|
||||
logger.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
# Get the armature of the rig
|
||||
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
|
||||
assert(len(armatures) == 1)
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
objects_list = self._process(
|
||||
self, str(libpath), lib_container, collection.name)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
collection_metadata["objects"] = objects_list
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
||||
Arguments:
|
||||
container (avalon-core:container-1.0): Container to remove,
|
||||
from `host.ls()`.
|
||||
|
||||
Returns:
|
||||
bool: Whether the container was deleted.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
if not collection:
|
||||
return False
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
|
@ -5,15 +5,14 @@ from pathlib import Path
|
|||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import avalon.blender.pipeline
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import pype.blender
|
||||
from avalon import api
|
||||
import pype.blender.plugin
|
||||
|
||||
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
|
||||
|
||||
|
||||
class BlendModelLoader(pype.blender.AssetLoader):
|
||||
class BlendModelLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load models from a .blend file.
|
||||
|
||||
Because they come from a .blend file we can simply link the collection that
|
||||
|
|
@ -32,34 +31,55 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
color = "orange"
|
||||
|
||||
@staticmethod
|
||||
def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]:
|
||||
"""Find the collection(s) with name, loaded from libpath.
|
||||
def _remove(self, objects, lib_container):
|
||||
|
||||
Note:
|
||||
It is assumed that only 1 matching collection is found.
|
||||
"""
|
||||
for collection in bpy.data.collections:
|
||||
if collection.name != name:
|
||||
continue
|
||||
if collection.library is None:
|
||||
continue
|
||||
if not collection.library.filepath:
|
||||
continue
|
||||
collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve())
|
||||
normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
if collection_lib_path == normalized_libpath:
|
||||
return collection
|
||||
return None
|
||||
for obj in objects:
|
||||
|
||||
bpy.data.meshes.remove(obj.data)
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
|
||||
@staticmethod
|
||||
def _collection_contains_object(
|
||||
collection: bpy.types.Collection, object: bpy.types.Object
|
||||
) -> bool:
|
||||
"""Check if the collection contains the object."""
|
||||
for obj in collection.objects:
|
||||
if obj == object:
|
||||
return True
|
||||
return False
|
||||
def _process(self, libpath, lib_container, container_name):
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
scene.collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
model_container = scene.collection.children[lib_container].make_local()
|
||||
|
||||
objects_list = []
|
||||
|
||||
for obj in model_container.objects:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
obj.data.make_local()
|
||||
|
||||
for material_slot in obj.material_slots:
|
||||
|
||||
material_slot.material.make_local()
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
model_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
return objects_list
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
|
|
@ -76,42 +96,35 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
lib_container = pype.blender.plugin.model_name(asset, subset)
|
||||
container_name = pype.blender.plugin.model_name(
|
||||
lib_container = pype.blender.plugin.asset_name(asset, subset)
|
||||
container_name = pype.blender.plugin.asset_name(
|
||||
asset, subset, namespace
|
||||
)
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
instance_empty = bpy.data.objects.new(
|
||||
container_name, None
|
||||
)
|
||||
if not instance_empty.get("avalon"):
|
||||
instance_empty["avalon"] = dict()
|
||||
avalon_info = instance_empty["avalon"]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
scene.collection.objects.link(instance_empty)
|
||||
instance_empty.instance_type = 'COLLECTION'
|
||||
container = bpy.data.collections[lib_container]
|
||||
container.name = container_name
|
||||
instance_empty.instance_collection = container
|
||||
container.make_local()
|
||||
avalon.blender.pipeline.containerise_existing(
|
||||
container,
|
||||
collection = bpy.data.collections.new(lib_container)
|
||||
collection.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
collection,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
nodes.append(instance_empty)
|
||||
container_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
objects_list = self._process(
|
||||
self, libpath, lib_container, container_name)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = objects_list
|
||||
|
||||
nodes = list(collection.objects)
|
||||
nodes.append(collection)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
|
|
@ -154,9 +167,13 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
collection_libpath = (
|
||||
self._get_library_from_container(collection).filepath
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
|
|
@ -171,58 +188,16 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
if normalized_collection_libpath == normalized_libpath:
|
||||
logger.info("Library already loaded, not updating...")
|
||||
return
|
||||
# Let Blender's garbage collection take care of removing the library
|
||||
# itself after removing the objects.
|
||||
objects_to_remove = set()
|
||||
collection_objects = list()
|
||||
collection_objects[:] = collection.objects
|
||||
for obj in collection_objects:
|
||||
# Unlink every object
|
||||
collection.objects.unlink(obj)
|
||||
remove_obj = True
|
||||
for coll in [
|
||||
coll for coll in bpy.data.collections
|
||||
if coll != collection
|
||||
]:
|
||||
if (
|
||||
coll.objects and
|
||||
self._collection_contains_object(coll, obj)
|
||||
):
|
||||
remove_obj = False
|
||||
if remove_obj:
|
||||
objects_to_remove.add(obj)
|
||||
|
||||
for obj in objects_to_remove:
|
||||
# Only delete objects that are not used elsewhere
|
||||
bpy.data.objects.remove(obj)
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
instance_empties = [
|
||||
obj for obj in collection.users_dupli_group
|
||||
if obj.name in collection.name
|
||||
]
|
||||
if instance_empties:
|
||||
instance_empty = instance_empties[0]
|
||||
container_name = instance_empty["avalon"]["container_name"]
|
||||
objects_list = self._process(
|
||||
self, str(libpath), lib_container, collection.name)
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
str(libpath), link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [container_name]
|
||||
|
||||
new_collection = self._get_lib_collection(container_name, libpath)
|
||||
if new_collection is None:
|
||||
raise ValueError(
|
||||
"A matching collection '{container_name}' "
|
||||
"should have been found in: {libpath}"
|
||||
)
|
||||
|
||||
for obj in new_collection.objects:
|
||||
collection.objects.link(obj)
|
||||
bpy.data.collections.remove(new_collection)
|
||||
# Update the representation on the collection
|
||||
avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_prop["representation"] = str(representation["_id"])
|
||||
# Save the list of objects in the metadata container
|
||||
collection_metadata["objects"] = objects_list
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
|
@ -245,16 +220,20 @@ class BlendModelLoader(pype.blender.AssetLoader):
|
|||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
instance_parents = list(collection.users_dupli_group)
|
||||
instance_objects = list(collection.objects)
|
||||
for obj in instance_objects + instance_parents:
|
||||
bpy.data.objects.remove(obj)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class CacheModelLoader(pype.blender.AssetLoader):
|
||||
class CacheModelLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load cache models.
|
||||
|
||||
Stores the imported asset in a collection named after the asset.
|
||||
|
|
@ -281,7 +260,8 @@ class CacheModelLoader(pype.blender.AssetLoader):
|
|||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
raise NotImplementedError("Loading of Alembic files is not yet implemented.")
|
||||
raise NotImplementedError(
|
||||
"Loading of Alembic files is not yet implemented.")
|
||||
# TODO (jasper): implement Alembic import.
|
||||
|
||||
libpath = self.fname
|
||||
|
|
@ -289,7 +269,7 @@ class CacheModelLoader(pype.blender.AssetLoader):
|
|||
subset = context["subset"]["name"]
|
||||
# TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
|
||||
lib_container = container_name = (
|
||||
pype.blender.plugin.model_name(asset, subset, namespace)
|
||||
pype.blender.plugin.asset_name(asset, subset, namespace)
|
||||
)
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
|
||||
|
|
|
|||
256
pype/plugins/blender/load/load_rig.py
Normal file
256
pype/plugins/blender/load/load_rig.py
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
"""Load a rig asset in Blender."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from pprint import pformat
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from avalon import api, blender
|
||||
import bpy
|
||||
import pype.blender.plugin
|
||||
|
||||
logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
|
||||
|
||||
|
||||
class BlendRigLoader(pype.blender.plugin.AssetLoader):
|
||||
"""Load rigs from a .blend file.
|
||||
|
||||
Because they come from a .blend file we can simply link the collection that
|
||||
contains the model. There is no further need to 'containerise' it.
|
||||
|
||||
Warning:
|
||||
Loading the same asset more then once is not properly supported at the
|
||||
moment.
|
||||
"""
|
||||
|
||||
families = ["rig"]
|
||||
representations = ["blend"]
|
||||
|
||||
label = "Link Rig"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
@staticmethod
|
||||
def _remove(self, objects, lib_container):
|
||||
|
||||
for obj in objects:
|
||||
|
||||
if obj.type == 'ARMATURE':
|
||||
bpy.data.armatures.remove(obj.data)
|
||||
elif obj.type == 'MESH':
|
||||
bpy.data.meshes.remove(obj.data)
|
||||
|
||||
bpy.data.collections.remove(bpy.data.collections[lib_container])
|
||||
|
||||
@staticmethod
|
||||
def _process(self, libpath, lib_container, container_name, action):
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
with bpy.data.libraries.load(
|
||||
libpath, link=True, relative=relative
|
||||
) as (_, data_to):
|
||||
data_to.collections = [lib_container]
|
||||
|
||||
scene = bpy.context.scene
|
||||
|
||||
scene.collection.children.link(bpy.data.collections[lib_container])
|
||||
|
||||
rig_container = scene.collection.children[lib_container].make_local()
|
||||
|
||||
meshes = [obj for obj in rig_container.objects if obj.type == 'MESH']
|
||||
armatures = [
|
||||
obj for obj in rig_container.objects if obj.type == 'ARMATURE']
|
||||
|
||||
objects_list = []
|
||||
|
||||
assert(len(armatures) == 1)
|
||||
|
||||
# Link meshes first, then armatures.
|
||||
# The armature is unparented for all the non-local meshes,
|
||||
# when it is made local.
|
||||
for obj in meshes + armatures:
|
||||
|
||||
obj = obj.make_local()
|
||||
|
||||
obj.data.make_local()
|
||||
|
||||
if not obj.get(blender.pipeline.AVALON_PROPERTY):
|
||||
|
||||
obj[blender.pipeline.AVALON_PROPERTY] = dict()
|
||||
|
||||
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
|
||||
avalon_info.update({"container_name": container_name})
|
||||
|
||||
if obj.type == 'ARMATURE' and action is not None:
|
||||
|
||||
obj.animation_data.action = action
|
||||
|
||||
objects_list.append(obj)
|
||||
|
||||
rig_container.pop(blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
return objects_list
|
||||
|
||||
def process_asset(
|
||||
self, context: dict, name: str, namespace: Optional[str] = None,
|
||||
options: Optional[Dict] = None
|
||||
) -> Optional[List]:
|
||||
"""
|
||||
Arguments:
|
||||
name: Use pre-defined name
|
||||
namespace: Use pre-defined namespace
|
||||
context: Full parenthood of representation to load
|
||||
options: Additional settings dictionary
|
||||
"""
|
||||
|
||||
libpath = self.fname
|
||||
asset = context["asset"]["name"]
|
||||
subset = context["subset"]["name"]
|
||||
lib_container = pype.blender.plugin.asset_name(asset, subset)
|
||||
container_name = pype.blender.plugin.asset_name(
|
||||
asset, subset, namespace
|
||||
)
|
||||
|
||||
container = bpy.data.collections.new(lib_container)
|
||||
container.name = container_name
|
||||
blender.pipeline.containerise_existing(
|
||||
container,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
container_metadata = container.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
|
||||
container_metadata["libpath"] = libpath
|
||||
container_metadata["lib_container"] = lib_container
|
||||
|
||||
objects_list = self._process(
|
||||
self, libpath, lib_container, container_name, None)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
container_metadata["objects"] = objects_list
|
||||
|
||||
nodes = list(container.objects)
|
||||
nodes.append(container)
|
||||
self[:] = nodes
|
||||
return nodes
|
||||
|
||||
def update(self, container: Dict, representation: Dict):
|
||||
"""Update the loaded asset.
|
||||
|
||||
This will remove all objects of the current collection, load the new
|
||||
ones and add them to the collection.
|
||||
If the objects of the collection are used in another collection they
|
||||
will not be removed, only unlinked. Normally this should not be the
|
||||
case though.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
|
||||
libpath = Path(api.get_representation_path(representation))
|
||||
extension = libpath.suffix.lower()
|
||||
|
||||
logger.info(
|
||||
"Container: %s\nRepresentation: %s",
|
||||
pformat(container, indent=2),
|
||||
pformat(representation, indent=2),
|
||||
)
|
||||
|
||||
assert collection, (
|
||||
f"The asset is not loaded: {container['objectName']}"
|
||||
)
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
assert libpath, (
|
||||
"No existing library file found for {container['objectName']}"
|
||||
)
|
||||
assert libpath.is_file(), (
|
||||
f"The file doesn't exist: {libpath}"
|
||||
)
|
||||
assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
|
||||
f"Unsupported file: {libpath}"
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
collection_libpath = collection_metadata["libpath"]
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
normalized_collection_libpath = (
|
||||
str(Path(bpy.path.abspath(collection_libpath)).resolve())
|
||||
)
|
||||
normalized_libpath = (
|
||||
str(Path(bpy.path.abspath(str(libpath))).resolve())
|
||||
)
|
||||
logger.debug(
|
||||
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
|
||||
normalized_collection_libpath,
|
||||
normalized_libpath,
|
||||
)
|
||||
if normalized_collection_libpath == normalized_libpath:
|
||||
logger.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
# Get the armature of the rig
|
||||
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
|
||||
assert(len(armatures) == 1)
|
||||
|
||||
action = armatures[0].animation_data.action
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
objects_list = self._process(
|
||||
self, str(libpath), lib_container, collection.name, action)
|
||||
|
||||
# Save the list of objects in the metadata container
|
||||
collection_metadata["objects"] = objects_list
|
||||
collection_metadata["libpath"] = str(libpath)
|
||||
collection_metadata["representation"] = str(representation["_id"])
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
def remove(self, container: Dict) -> bool:
|
||||
"""Remove an existing container from a Blender scene.
|
||||
|
||||
Arguments:
|
||||
container (avalon-core:container-1.0): Container to remove,
|
||||
from `host.ls()`.
|
||||
|
||||
Returns:
|
||||
bool: Whether the container was deleted.
|
||||
|
||||
Warning:
|
||||
No nested collections are supported at the moment!
|
||||
"""
|
||||
|
||||
collection = bpy.data.collections.get(
|
||||
container["objectName"]
|
||||
)
|
||||
if not collection:
|
||||
return False
|
||||
assert not (collection.children), (
|
||||
"Nested collections are not supported."
|
||||
)
|
||||
|
||||
collection_metadata = collection.get(
|
||||
blender.pipeline.AVALON_PROPERTY)
|
||||
objects = collection_metadata["objects"]
|
||||
lib_container = collection_metadata["lib_container"]
|
||||
|
||||
self._remove(self, objects, lib_container)
|
||||
|
||||
bpy.data.collections.remove(collection)
|
||||
|
||||
return True
|
||||
|
|
@ -14,3 +14,6 @@ class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
|
|||
"""Inject the current working file"""
|
||||
current_file = bpy.data.filepath
|
||||
context.data['currentFile'] = current_file
|
||||
|
||||
assert current_file != '', "Current file is empty. " \
|
||||
"Save the file before continuing."
|
||||
|
|
|
|||
|
|
@ -1,22 +1,21 @@
|
|||
import typing
|
||||
from typing import Generator
|
||||
|
||||
import bpy
|
||||
import json
|
||||
|
||||
import avalon.api
|
||||
import pyblish.api
|
||||
from avalon.blender.pipeline import AVALON_PROPERTY
|
||||
|
||||
|
||||
class CollectModel(pyblish.api.ContextPlugin):
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect the data of a model."""
|
||||
|
||||
hosts = ["blender"]
|
||||
label = "Collect Model"
|
||||
label = "Collect Instances"
|
||||
order = pyblish.api.CollectorOrder
|
||||
|
||||
@staticmethod
|
||||
def get_model_collections() -> Generator:
|
||||
def get_collections() -> Generator:
|
||||
"""Return all 'model' collections.
|
||||
|
||||
Check if the family is 'model' and if it doesn't have the
|
||||
|
|
@ -25,13 +24,13 @@ class CollectModel(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
for collection in bpy.data.collections:
|
||||
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
|
||||
if (avalon_prop.get('family') == 'model'
|
||||
and not avalon_prop.get('representation')):
|
||||
if avalon_prop.get('id') == 'pyblish.avalon.instance':
|
||||
yield collection
|
||||
|
||||
def process(self, context):
|
||||
"""Collect the models from the current Blender scene."""
|
||||
collections = self.get_model_collections()
|
||||
collections = self.get_collections()
|
||||
|
||||
for collection in collections:
|
||||
avalon_prop = collection[AVALON_PROPERTY]
|
||||
asset = avalon_prop['asset']
|
||||
|
|
@ -50,4 +49,6 @@ class CollectModel(pyblish.api.ContextPlugin):
|
|||
members = list(collection.objects)
|
||||
members.append(collection)
|
||||
instance[:] = members
|
||||
self.log.debug(instance.data)
|
||||
self.log.debug(json.dumps(instance.data, indent=4))
|
||||
for obj in instance:
|
||||
self.log.debug(obj)
|
||||
95
pype/plugins/blender/publish/extract_abc.py
Normal file
95
pype/plugins/blender/publish/extract_abc.py
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
import os
|
||||
|
||||
import pype.api
|
||||
import pype.blender.plugin
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class ExtractABC(pype.api.Extractor):
|
||||
"""Extract as ABC."""
|
||||
|
||||
label = "Extract ABC"
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.fbx"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
context = bpy.context
|
||||
scene = context.scene
|
||||
view_layer = context.view_layer
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
collections = [
|
||||
obj for obj in instance if type(obj) is bpy.types.Collection]
|
||||
|
||||
assert len(collections) == 1, "There should be one and only one " \
|
||||
"collection collected for this asset"
|
||||
|
||||
old_active_layer_collection = view_layer.active_layer_collection
|
||||
|
||||
layers = view_layer.layer_collection.children
|
||||
|
||||
# Get the layer collection from the collection we need to export.
|
||||
# This is needed because in Blender you can only set the active
|
||||
# collection with the layer collection, and there is no way to get
|
||||
# the layer collection from the collection
|
||||
# (but there is the vice versa).
|
||||
layer_collections = [
|
||||
layer for layer in layers if layer.collection == collections[0]]
|
||||
|
||||
assert len(layer_collections) == 1
|
||||
|
||||
view_layer.active_layer_collection = layer_collections[0]
|
||||
|
||||
old_scale = scene.unit_settings.scale_length
|
||||
|
||||
selected = list()
|
||||
|
||||
for obj in instance:
|
||||
try:
|
||||
obj.select_set(True)
|
||||
selected.append(obj)
|
||||
except:
|
||||
continue
|
||||
|
||||
new_context = pype.blender.plugin.create_blender_context(active=selected[0], selected=selected)
|
||||
|
||||
# We set the scale of the scene for the export
|
||||
scene.unit_settings.scale_length = 0.01
|
||||
|
||||
self.log.info(new_context)
|
||||
|
||||
# We export the abc
|
||||
bpy.ops.wm.alembic_export(
|
||||
new_context,
|
||||
filepath=filepath,
|
||||
start=1,
|
||||
end=1
|
||||
)
|
||||
|
||||
view_layer.active_layer_collection = old_active_layer_collection
|
||||
|
||||
scene.unit_settings.scale_length = old_scale
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'abc',
|
||||
'ext': 'abc',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
|
|
@ -1,47 +1,47 @@
|
|||
import os
|
||||
import avalon.blender.workio
|
||||
|
||||
import pype.api
|
||||
|
||||
|
||||
class ExtractModel(pype.api.Extractor):
|
||||
"""Extract as model."""
|
||||
|
||||
label = "Model"
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.blend"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
# Just save the file to a temporary location. At least for now it's no
|
||||
# problem to have (possibly) extra stuff in the file.
|
||||
avalon.blender.workio.save_file(filepath, copy=True)
|
||||
#
|
||||
# # Store reference for integration
|
||||
# if "files" not in instance.data:
|
||||
# instance.data["files"] = list()
|
||||
#
|
||||
# # instance.data["files"].append(filename)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'blend',
|
||||
'ext': 'blend',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s", instance.name, representation)
|
||||
import os
|
||||
import avalon.blender.workio
|
||||
|
||||
import pype.api
|
||||
|
||||
|
||||
class ExtractBlend(pype.api.Extractor):
|
||||
"""Extract a blend file."""
|
||||
|
||||
label = "Extract Blend"
|
||||
hosts = ["blender"]
|
||||
families = ["animation", "model", "rig", "action"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.blend"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
# Just save the file to a temporary location. At least for now it's no
|
||||
# problem to have (possibly) extra stuff in the file.
|
||||
avalon.blender.workio.save_file(filepath, copy=True)
|
||||
#
|
||||
# # Store reference for integration
|
||||
# if "files" not in instance.data:
|
||||
# instance.data["files"] = list()
|
||||
#
|
||||
# # instance.data["files"].append(filename)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'blend',
|
||||
'ext': 'blend',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
81
pype/plugins/blender/publish/extract_fbx.py
Normal file
81
pype/plugins/blender/publish/extract_fbx.py
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
import os
|
||||
|
||||
import pype.api
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class ExtractFBX(pype.api.Extractor):
|
||||
"""Extract as FBX."""
|
||||
|
||||
label = "Extract FBX"
|
||||
hosts = ["blender"]
|
||||
families = ["model", "rig"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.fbx"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
context = bpy.context
|
||||
scene = context.scene
|
||||
view_layer = context.view_layer
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
collections = [
|
||||
obj for obj in instance if type(obj) is bpy.types.Collection]
|
||||
|
||||
assert len(collections) == 1, "There should be one and only one " \
|
||||
"collection collected for this asset"
|
||||
|
||||
old_active_layer_collection = view_layer.active_layer_collection
|
||||
|
||||
layers = view_layer.layer_collection.children
|
||||
|
||||
# Get the layer collection from the collection we need to export.
|
||||
# This is needed because in Blender you can only set the active
|
||||
# collection with the layer collection, and there is no way to get
|
||||
# the layer collection from the collection
|
||||
# (but there is the vice versa).
|
||||
layer_collections = [
|
||||
layer for layer in layers if layer.collection == collections[0]]
|
||||
|
||||
assert len(layer_collections) == 1
|
||||
|
||||
view_layer.active_layer_collection = layer_collections[0]
|
||||
|
||||
old_scale = scene.unit_settings.scale_length
|
||||
|
||||
# We set the scale of the scene for the export
|
||||
scene.unit_settings.scale_length = 0.01
|
||||
|
||||
# We export the fbx
|
||||
bpy.ops.export_scene.fbx(
|
||||
filepath=filepath,
|
||||
use_active_collection=True,
|
||||
mesh_smooth_type='FACE',
|
||||
add_leaf_bones=False
|
||||
)
|
||||
|
||||
view_layer.active_layer_collection = old_active_layer_collection
|
||||
|
||||
scene.unit_settings.scale_length = old_scale
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'fbx',
|
||||
'ext': 'fbx',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
139
pype/plugins/blender/publish/extract_fbx_animation.py
Normal file
139
pype/plugins/blender/publish/extract_fbx_animation.py
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
import os
|
||||
|
||||
import pype.api
|
||||
|
||||
import bpy
|
||||
import bpy_extras
|
||||
import bpy_extras.anim_utils
|
||||
|
||||
|
||||
class ExtractAnimationFBX(pype.api.Extractor):
|
||||
"""Extract as animation."""
|
||||
|
||||
label = "Extract FBX"
|
||||
hosts = ["blender"]
|
||||
families = ["animation"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.fbx"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
context = bpy.context
|
||||
scene = context.scene
|
||||
view_layer = context.view_layer
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
collections = [
|
||||
obj for obj in instance if type(obj) is bpy.types.Collection]
|
||||
|
||||
assert len(collections) == 1, "There should be one and only one " \
|
||||
"collection collected for this asset"
|
||||
|
||||
old_active_layer_collection = view_layer.active_layer_collection
|
||||
|
||||
layers = view_layer.layer_collection.children
|
||||
|
||||
# Get the layer collection from the collection we need to export.
|
||||
# This is needed because in Blender you can only set the active
|
||||
# collection with the layer collection, and there is no way to get
|
||||
# the layer collection from the collection
|
||||
# (but there is the vice versa).
|
||||
layer_collections = [
|
||||
layer for layer in layers if layer.collection == collections[0]]
|
||||
|
||||
assert len(layer_collections) == 1
|
||||
|
||||
view_layer.active_layer_collection = layer_collections[0]
|
||||
|
||||
old_scale = scene.unit_settings.scale_length
|
||||
|
||||
# We set the scale of the scene for the export
|
||||
scene.unit_settings.scale_length = 0.01
|
||||
|
||||
armatures = [
|
||||
obj for obj in collections[0].objects if obj.type == 'ARMATURE']
|
||||
|
||||
object_action_pairs = []
|
||||
original_actions = []
|
||||
|
||||
starting_frames = []
|
||||
ending_frames = []
|
||||
|
||||
# For each armature, we make a copy of the current action
|
||||
for obj in armatures:
|
||||
|
||||
curr_action = None
|
||||
copy_action = None
|
||||
|
||||
if obj.animation_data and obj.animation_data.action:
|
||||
|
||||
curr_action = obj.animation_data.action
|
||||
copy_action = curr_action.copy()
|
||||
|
||||
curr_frame_range = curr_action.frame_range
|
||||
|
||||
starting_frames.append(curr_frame_range[0])
|
||||
ending_frames.append(curr_frame_range[1])
|
||||
|
||||
object_action_pairs.append((obj, copy_action))
|
||||
original_actions.append(curr_action)
|
||||
|
||||
# We compute the starting and ending frames
|
||||
max_frame = min(starting_frames)
|
||||
min_frame = max(ending_frames)
|
||||
|
||||
# We bake the copy of the current action for each object
|
||||
bpy_extras.anim_utils.bake_action_objects(
|
||||
object_action_pairs,
|
||||
frames=range(int(min_frame), int(max_frame)),
|
||||
do_object=False,
|
||||
do_clean=False
|
||||
)
|
||||
|
||||
# We export the fbx
|
||||
bpy.ops.export_scene.fbx(
|
||||
filepath=filepath,
|
||||
use_active_collection=True,
|
||||
bake_anim_use_nla_strips=False,
|
||||
bake_anim_use_all_actions=False,
|
||||
add_leaf_bones=False
|
||||
)
|
||||
|
||||
view_layer.active_layer_collection = old_active_layer_collection
|
||||
|
||||
scene.unit_settings.scale_length = old_scale
|
||||
|
||||
# We delete the baked action and set the original one back
|
||||
for i in range(0, len(object_action_pairs)):
|
||||
|
||||
pair = object_action_pairs[i]
|
||||
action = original_actions[i]
|
||||
|
||||
if action:
|
||||
|
||||
pair[0].animation_data.action = action
|
||||
|
||||
if pair[1]:
|
||||
|
||||
pair[1].user_clear()
|
||||
bpy.data.actions.remove(pair[1])
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'fbx',
|
||||
'ext': 'fbx',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
|
|
@ -35,12 +35,15 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
|
|||
invalid = []
|
||||
# TODO (jasper): only check objects in the collection that will be published?
|
||||
for obj in [
|
||||
obj for obj in bpy.data.objects if obj.type == 'MESH'
|
||||
]:
|
||||
# Make sure we are in object mode.
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
if not cls.has_uvs(obj):
|
||||
invalid.append(obj)
|
||||
obj for obj in instance]:
|
||||
try:
|
||||
if obj.type == 'MESH':
|
||||
# Make sure we are in object mode.
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
if not cls.has_uvs(obj):
|
||||
invalid.append(obj)
|
||||
except:
|
||||
continue
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import pyblish.api
|
|||
class CollectAvalonEntities(pyblish.api.ContextPlugin):
|
||||
"""Collect Anatomy into Context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder - 0.02
|
||||
label = "Collect Avalon Entities"
|
||||
|
||||
def process(self, context):
|
||||
|
|
@ -47,7 +47,16 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin):
|
|||
context.data["assetEntity"] = asset_entity
|
||||
|
||||
data = asset_entity['data']
|
||||
|
||||
context.data["frameStart"] = data.get("frameStart")
|
||||
context.data["frameEnd"] = data.get("frameEnd")
|
||||
|
||||
handles = int(data.get("handles") or 0)
|
||||
context.data["handles"] = handles
|
||||
context.data["handleStart"] = int(data.get("handleStart", handles))
|
||||
context.data["handleEnd"] = int(data.get("handleEnd", handles))
|
||||
|
||||
frame_start_h = data.get("frameStart") - context.data["handleStart"]
|
||||
frame_end_h = data.get("frameEnd") + context.data["handleEnd"]
|
||||
context.data["frameStartHandle"] = frame_start_h
|
||||
context.data["frameEndHandle"] = frame_end_h
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
|
|||
`PYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
|
||||
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder - 0.0001
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
targets = ["filesequence"]
|
||||
label = "Collect rendered frames"
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
|
|||
if "standalonepublisher" in context.data.get("host", []):
|
||||
return
|
||||
|
||||
if "unreal" in pyblish.api.registered_hosts():
|
||||
return
|
||||
|
||||
filename = os.path.basename(context.data.get('currentFile'))
|
||||
|
||||
if '<shell>' in filename:
|
||||
|
|
|
|||
|
|
@ -81,7 +81,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"image"
|
||||
"source",
|
||||
"assembly",
|
||||
"textures"
|
||||
"fbx",
|
||||
"textures",
|
||||
"action"
|
||||
]
|
||||
exclude_families = ["clip"]
|
||||
db_representation_context_keys = [
|
||||
|
|
|
|||
|
|
@ -231,14 +231,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
# Transfer the environment from the original job to this dependent
|
||||
# job so they use the same environment
|
||||
|
||||
metadata_filename = "{}_metadata.json".format(subset)
|
||||
metadata_path = os.path.join(rootless_path, metadata_filename)
|
||||
"TODO metadata_path replace root with {root[root_name]}
|
||||
|
||||
environment = job["Props"].get("Env", {})
|
||||
environment["PYPE_METADATA_FILE"] = metadata_path
|
||||
environment["AVALON_PROJECT"] = pyblish.api.Session["AVALON_PROJECT"]
|
||||
environment["AVALON_PROJECT"] = api.Session["AVALON_PROJECT"]
|
||||
|
||||
i = 0
|
||||
for index, key in enumerate(environment):
|
||||
|
|
|
|||
11
pype/plugins/maya/create/create_unreal_staticmesh.py
Normal file
11
pype/plugins/maya/create/create_unreal_staticmesh.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
import avalon.maya
|
||||
|
||||
|
||||
class CreateUnrealStaticMesh(avalon.maya.Creator):
|
||||
name = "staticMeshMain"
|
||||
label = "Unreal - Static Mesh"
|
||||
family = "unrealStaticMesh"
|
||||
icon = "cube"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs)
|
||||
|
|
@ -16,7 +16,7 @@ class LookLoader(pype.maya.plugin.ReferenceLoader):
|
|||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
def process_reference(self, context, name, namespace, options):
|
||||
"""
|
||||
Load and try to assign Lookdev to nodes based on relationship data
|
||||
Args:
|
||||
|
|
|
|||
|
|
@ -1,16 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectMayaCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
label = "Maya Current File"
|
||||
hosts = ['maya']
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
current_file = cmds.file(query=True, sceneName=True)
|
||||
context.data['currentFile'] = current_file
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
import json
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
|
|
@ -32,6 +33,13 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
objectset = cmds.ls("*.id", long=True, type="objectSet",
|
||||
recursive=True, objectsOnly=True)
|
||||
|
||||
ctx_frame_start = context.data['frameStart']
|
||||
ctx_frame_end = context.data['frameEnd']
|
||||
ctx_handle_start = context.data['handleStart']
|
||||
ctx_handle_end = context.data['handleEnd']
|
||||
ctx_frame_start_handle = context.data['frameStartHandle']
|
||||
ctx_frame_end_handle = context.data['frameEndHandle']
|
||||
|
||||
context.data['objectsets'] = objectset
|
||||
for objset in objectset:
|
||||
|
||||
|
|
@ -108,14 +116,36 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
label = "{0} ({1})".format(name,
|
||||
data["asset"])
|
||||
|
||||
if "handles" in data:
|
||||
data["handleStart"] = data["handles"]
|
||||
data["handleEnd"] = data["handles"]
|
||||
|
||||
# Append start frame and end frame to label if present
|
||||
if "frameStart" and "frameEnd" in data:
|
||||
data["frameStartHandle"] = data["frameStart"] - data["handleStart"]
|
||||
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"]
|
||||
|
||||
# if frame range on maya set is the same as full shot range
|
||||
# adjust the values to match the asset data
|
||||
if (ctx_frame_start_handle == data["frameStart"]
|
||||
and ctx_frame_end_handle == data["frameEnd"]): # noqa: W503, E501
|
||||
data["frameStartHandle"] = ctx_frame_start_handle
|
||||
data["frameEndHandle"] = ctx_frame_end_handle
|
||||
data["frameStart"] = ctx_frame_start
|
||||
data["frameEnd"] = ctx_frame_end
|
||||
data["handleStart"] = ctx_handle_start
|
||||
data["handleEnd"] = ctx_handle_end
|
||||
|
||||
# if there are user values on start and end frame not matching
|
||||
# the asset, use them
|
||||
|
||||
else:
|
||||
if "handles" in data:
|
||||
data["handleStart"] = data["handles"]
|
||||
data["handleEnd"] = data["handles"]
|
||||
else:
|
||||
data["handleStart"] = 0
|
||||
data["handleEnd"] = 0
|
||||
|
||||
data["frameStartHandle"] = data["frameStart"] - data["handleStart"] # noqa: E501
|
||||
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] # noqa: E501
|
||||
|
||||
if "handles" in data:
|
||||
data.pop('handles')
|
||||
|
||||
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
|
||||
int(data["frameEndHandle"]))
|
||||
|
|
@ -127,7 +157,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
# Produce diagnostic message for any graphical
|
||||
# user interface interested in visualising it.
|
||||
self.log.info("Found: \"%s\" " % instance.data["name"])
|
||||
self.log.debug("DATA: \"%s\" " % instance.data)
|
||||
self.log.debug(
|
||||
"DATA: {} ".format(json.dumps(instance.data, indent=4)))
|
||||
|
||||
def sort_by_family(instance):
|
||||
"""Sort by family"""
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ import re
|
|||
import os
|
||||
import types
|
||||
import six
|
||||
import json
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from maya import cmds
|
||||
|
|
@ -202,6 +203,28 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
full_paths.append(full_path)
|
||||
aov_dict["beauty"] = full_paths
|
||||
|
||||
frame_start_render = int(self.get_render_attribute(
|
||||
"startFrame", layer=layer_name))
|
||||
frame_end_render = int(self.get_render_attribute(
|
||||
"endFrame", layer=layer_name))
|
||||
|
||||
if (int(context.data['frameStartHandle']) == frame_start_render
|
||||
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
|
||||
|
||||
handle_start = context.data['handleStart']
|
||||
handle_end = context.data['handleEnd']
|
||||
frame_start = context.data['frameStart']
|
||||
frame_end = context.data['frameEnd']
|
||||
frame_start_handle = context.data['frameStartHandle']
|
||||
frame_end_handle = context.data['frameEndHandle']
|
||||
else:
|
||||
handle_start = 0
|
||||
handle_end = 0
|
||||
frame_start = frame_start_render
|
||||
frame_end = frame_end_render
|
||||
frame_start_handle = frame_start_render
|
||||
frame_end_handle = frame_end_render
|
||||
|
||||
full_exp_files.append(aov_dict)
|
||||
self.log.info(full_exp_files)
|
||||
self.log.info("collecting layer: {}".format(layer_name))
|
||||
|
|
@ -211,24 +234,18 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"attachTo": attachTo,
|
||||
"setMembers": layer_name,
|
||||
"publish": True,
|
||||
"frameStart": int(
|
||||
context.data["assetEntity"]['data']['frameStart']),
|
||||
"frameEnd": int(
|
||||
context.data["assetEntity"]['data']['frameEnd']),
|
||||
"frameStartHandle": int(
|
||||
self.get_render_attribute("startFrame", layer=layer_name)),
|
||||
"frameEndHandle": int(
|
||||
self.get_render_attribute("endFrame", layer=layer_name)),
|
||||
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartHandle": frame_start_handle,
|
||||
"frameEndHandle": frame_end_handle,
|
||||
"byFrameStep": int(
|
||||
self.get_render_attribute("byFrameStep",
|
||||
layer=layer_name)),
|
||||
"renderer": self.get_render_attribute("currentRenderer",
|
||||
layer=layer_name),
|
||||
"handleStart": int(
|
||||
context.data["assetEntity"]['data']['handleStart']),
|
||||
"handleEnd": int(
|
||||
context.data["assetEntity"]['data']['handleEnd']),
|
||||
|
||||
# instance subset
|
||||
"family": "renderlayer",
|
||||
"families": ["renderlayer"],
|
||||
|
|
@ -271,7 +288,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
instance = context.create_instance(expected_layer_name)
|
||||
instance.data["label"] = label
|
||||
instance.data.update(data)
|
||||
pass
|
||||
self.log.debug("data: {}".format(json.dumps(data, indent=4)))
|
||||
|
||||
def parse_options(self, render_globals):
|
||||
"""Get all overrides with a value, skip those without
|
||||
|
|
@ -489,7 +506,7 @@ class AExpectedFiles:
|
|||
expected_files.append(
|
||||
'{}.{}.{}'.format(file_prefix,
|
||||
str(frame).rjust(
|
||||
layer_data["padding"], "0"),
|
||||
layer_data["padding"], "0"),
|
||||
layer_data["defaultExt"]))
|
||||
return expected_files
|
||||
|
||||
|
|
@ -625,7 +642,7 @@ class ExpectedFilesArnold(AExpectedFiles):
|
|||
enabled_aovs = []
|
||||
try:
|
||||
if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
|
||||
and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')):
|
||||
and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): # noqa: W503, E501
|
||||
# AOVs are merged in mutli-channel file
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
|
|
@ -746,10 +763,7 @@ class ExpectedFilesVray(AExpectedFiles):
|
|||
if enabled:
|
||||
# todo: find how vray set format for AOVs
|
||||
enabled_aovs.append(
|
||||
(
|
||||
self._get_vray_aov_name(aov),
|
||||
default_ext)
|
||||
)
|
||||
(self._get_vray_aov_name(aov), default_ext))
|
||||
return enabled_aovs
|
||||
|
||||
def _get_vray_aov_name(self, node):
|
||||
|
|
|
|||
|
|
@ -9,13 +9,14 @@ from pype.maya import lib
|
|||
class CollectMayaScene(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
order = pyblish.api.CollectorOrder - 0.01
|
||||
label = "Maya Workfile"
|
||||
hosts = ['maya']
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
current_file = context.data['currentFile']
|
||||
current_file = cmds.file(query=True, sceneName=True)
|
||||
context.data['currentFile'] = current_file
|
||||
|
||||
folder, file = os.path.split(current_file)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
|
@ -24,9 +25,6 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
|
|||
|
||||
data = {}
|
||||
|
||||
for key, value in lib.collect_animation_data().items():
|
||||
data[key] = value
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(name=filename)
|
||||
subset = 'workfile' + task.capitalize()
|
||||
|
|
@ -38,7 +36,11 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
|
|||
"publish": True,
|
||||
"family": 'workfile',
|
||||
"families": ['workfile'],
|
||||
"setMembers": [current_file]
|
||||
"setMembers": [current_file],
|
||||
"frameStart": context.data['frameStart'],
|
||||
"frameEnd": context.data['frameEnd'],
|
||||
"handleStart": context.data['handleStart'],
|
||||
"handleEnd": context.data['handleEnd']
|
||||
})
|
||||
|
||||
data['representations'] = [{
|
||||
|
|
|
|||
33
pype/plugins/maya/publish/collect_unreal_staticmesh.py
Normal file
33
pype/plugins/maya/publish/collect_unreal_staticmesh.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from maya import cmds
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectUnrealStaticMesh(pyblish.api.InstancePlugin):
|
||||
"""Collect unreal static mesh
|
||||
|
||||
Ensures always only a single frame is extracted (current frame). This
|
||||
also sets correct FBX options for later extraction.
|
||||
|
||||
Note:
|
||||
This is a workaround so that the `pype.model` family can use the
|
||||
same pointcache extractor implementation as animation and pointcaches.
|
||||
This always enforces the "current" frame to be published.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
label = "Collect Model Data"
|
||||
families = ["unrealStaticMesh"]
|
||||
|
||||
def process(self, instance):
|
||||
# add fbx family to trigger fbx extractor
|
||||
instance.data["families"].append("fbx")
|
||||
# set fbx overrides on instance
|
||||
instance.data["smoothingGroups"] = True
|
||||
instance.data["smoothMesh"] = True
|
||||
instance.data["triangulate"] = True
|
||||
|
||||
frame = cmds.currentTime(query=True)
|
||||
instance.data["frameStart"] = frame
|
||||
instance.data["frameEnd"] = frame
|
||||
|
|
@ -212,12 +212,11 @@ class ExtractFBX(pype.api.Extractor):
|
|||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'mov',
|
||||
'ext': 'mov',
|
||||
'name': 'fbx',
|
||||
'ext': 'fbx',
|
||||
'files': filename,
|
||||
"stagingDir": stagingDir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
|
||||
self.log.info("Extract FBX successful to: {0}".format(path))
|
||||
|
|
|
|||
|
|
@ -25,12 +25,8 @@ class ExtractAlembic(pype.api.Extractor):
|
|||
nodes = instance[:]
|
||||
|
||||
# Collect the start and end including handles
|
||||
start = instance.data.get("frameStart", 1)
|
||||
end = instance.data.get("frameEnd", 1)
|
||||
handles = instance.data.get("handles", 0)
|
||||
if handles:
|
||||
start -= handles
|
||||
end += handles
|
||||
start = float(instance.data.get("frameStartHandle", 1))
|
||||
end = float(instance.data.get("frameEndHandle", 1))
|
||||
|
||||
attrs = instance.data.get("attr", "").split(";")
|
||||
attrs = [value for value in attrs if value.strip()]
|
||||
|
|
|
|||
|
|
@ -1,18 +1,19 @@
|
|||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class ValidateFrameRange(pyblish.api.InstancePlugin):
|
||||
"""Valides the frame ranges.
|
||||
|
||||
Checks the `startFrame`, `endFrame` and `handles` data.
|
||||
This does NOT ensure there's actual data present.
|
||||
This is optional validator checking if the frame range on instance
|
||||
matches the one of asset. It also validate render frame range of render
|
||||
layers
|
||||
|
||||
This validates:
|
||||
- `startFrame` is lower than or equal to the `endFrame`.
|
||||
- must have both the `startFrame` and `endFrame` data.
|
||||
- The `handles` value is not lower than zero.
|
||||
Repair action will change everything to match asset.
|
||||
|
||||
This can be turned off by artist to allow custom ranges.
|
||||
"""
|
||||
|
||||
label = "Validate Frame Range"
|
||||
|
|
@ -21,25 +22,66 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
|
|||
"pointcache",
|
||||
"camera",
|
||||
"renderlayer",
|
||||
"colorbleed.vrayproxy"]
|
||||
"review",
|
||||
"yeticache"]
|
||||
optional = True
|
||||
actions = [pype.api.RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
start = instance.data.get("frameStart", None)
|
||||
end = instance.data.get("frameEnd", None)
|
||||
handles = instance.data.get("handles", None)
|
||||
frame_start_handle = int(context.data.get("frameStartHandle"))
|
||||
frame_end_handle = int(context.data.get("frameEndHandle"))
|
||||
handles = int(context.data.get("handles"))
|
||||
handle_start = int(context.data.get("handleStart"))
|
||||
handle_end = int(context.data.get("handleEnd"))
|
||||
frame_start = int(context.data.get("frameStart"))
|
||||
frame_end = int(context.data.get("frameEnd"))
|
||||
|
||||
# Check if any of the values are present
|
||||
if any(value is None for value in [start, end]):
|
||||
raise ValueError("No time values for this instance. "
|
||||
"(Missing `startFrame` or `endFrame`)")
|
||||
inst_start = int(instance.data.get("frameStartHandle"))
|
||||
inst_end = int(instance.data.get("frameEndHandle"))
|
||||
|
||||
self.log.info("Comparing start (%s) and end (%s)" % (start, end))
|
||||
if start > end:
|
||||
raise RuntimeError("The start frame is a higher value "
|
||||
"than the end frame: "
|
||||
"{0}>{1}".format(start, end))
|
||||
# basic sanity checks
|
||||
assert frame_start_handle <= frame_end_handle, (
|
||||
"start frame is lower then end frame")
|
||||
|
||||
if handles is not None:
|
||||
if handles < 0.0:
|
||||
raise RuntimeError("Handles are set to a negative value")
|
||||
assert handles >= 0, ("handles cannot have negative values")
|
||||
|
||||
# compare with data on instance
|
||||
errors = []
|
||||
|
||||
if(inst_start != frame_start_handle):
|
||||
errors.append("Instance start frame [ {} ] doesn't "
|
||||
"match the one set on instance [ {} ]: "
|
||||
"{}/{}/{}/{} (handle/start/end/handle)".format(
|
||||
inst_start,
|
||||
frame_start_handle,
|
||||
handle_start, frame_start, frame_end, handle_end
|
||||
))
|
||||
|
||||
if(inst_end != frame_end_handle):
|
||||
errors.append("Instance end frame [ {} ] doesn't "
|
||||
"match the one set on instance [ {} ]: "
|
||||
"{}/{}/{}/{} (handle/start/end/handle)".format(
|
||||
inst_end,
|
||||
frame_end_handle,
|
||||
handle_start, frame_start, frame_end, handle_end
|
||||
))
|
||||
|
||||
for e in errors:
|
||||
self.log.error(e)
|
||||
|
||||
assert len(errors) == 0, ("Frame range settings are incorrect")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
"""
|
||||
Repair instance container to match asset data.
|
||||
"""
|
||||
cmds.setAttr(
|
||||
"{}.frameStart".format(instance.data["name"]),
|
||||
instance.context.data.get("frameStartHandle"))
|
||||
|
||||
cmds.setAttr(
|
||||
"{}.frameEnd".format(instance.data["name"]),
|
||||
instance.context.data.get("frameEndHandle"))
|
||||
|
|
|
|||
|
|
@ -13,13 +13,17 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
"""Validates the global render settings
|
||||
|
||||
* File Name Prefix must start with: `maya/<Scene>`
|
||||
all other token are customizable but sane values are:
|
||||
all other token are customizable but sane values for Arnold are:
|
||||
|
||||
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
|
||||
|
||||
<Camera> token is supported also, usefull for multiple renderable
|
||||
<Camera> token is supported also, useful for multiple renderable
|
||||
cameras per render layer.
|
||||
|
||||
For Redshift omit <RenderPass> token. Redshift will append it
|
||||
automatically if AOVs are enabled and if you user Multipart EXR
|
||||
it doesn't make much sense.
|
||||
|
||||
* Frame Padding must be:
|
||||
* default: 4
|
||||
|
||||
|
|
@ -127,8 +131,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
# no vray checks implemented yet
|
||||
pass
|
||||
elif renderer == "redshift":
|
||||
# no redshift check implemented yet
|
||||
pass
|
||||
if re.search(cls.R_AOV_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Do not use AOV token [ {} ] - "
|
||||
"Redshift automatically append AOV name and "
|
||||
"it doesn't make much sense with "
|
||||
"Multipart EXR".format(prefix))
|
||||
|
||||
elif renderer == "renderman":
|
||||
file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat")
|
||||
dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir")
|
||||
|
|
@ -143,8 +152,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
dir_prefix))
|
||||
|
||||
else:
|
||||
multichannel = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
|
||||
if multichannel:
|
||||
multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
|
||||
if multipart:
|
||||
if re.search(cls.R_AOV_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
|
|
|
|||
|
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from maya import cmds
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
|
||||
class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin):
|
||||
"""Validate if mesh is made of triangles for Unreal Engine"""
|
||||
|
||||
order = pype.api.ValidateMeshOder
|
||||
hosts = ["maya"]
|
||||
families = ["unrealStaticMesh"]
|
||||
category = "geometry"
|
||||
label = "Mesh is Triangulated"
|
||||
actions = [pype.maya.action.SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
invalid = []
|
||||
meshes = cmds.ls(instance, type="mesh", long=True)
|
||||
for mesh in meshes:
|
||||
faces = cmds.polyEvaluate(mesh, f=True)
|
||||
tris = cmds.polyEvaluate(mesh, t=True)
|
||||
if faces != tris:
|
||||
invalid.append(mesh)
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
invalid = self.get_invalid(instance)
|
||||
assert len(invalid) == 0, (
|
||||
"Found meshes without triangles")
|
||||
120
pype/plugins/maya/publish/validate_unreal_staticmesh_naming.py
Normal file
120
pype/plugins/maya/publish/validate_unreal_staticmesh_naming.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from maya import cmds
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
import pype.maya.action
|
||||
import re
|
||||
|
||||
|
||||
class ValidateUnrealStaticmeshName(pyblish.api.InstancePlugin):
|
||||
"""Validate name of Unreal Static Mesh
|
||||
|
||||
Unreals naming convention states that staticMesh sould start with `SM`
|
||||
prefix - SM_[Name]_## (Eg. SM_sube_01). This plugin also validates other
|
||||
types of meshes - collision meshes:
|
||||
|
||||
UBX_[RenderMeshName]_##:
|
||||
Boxes are created with the Box objects type in
|
||||
Max or with the Cube polygonal primitive in Maya.
|
||||
You cannot move the vertices around or deform it
|
||||
in any way to make it something other than a
|
||||
rectangular prism, or else it will not work.
|
||||
|
||||
UCP_[RenderMeshName]_##:
|
||||
Capsules are created with the Capsule object type.
|
||||
The capsule does not need to have many segments
|
||||
(8 is a good number) at all because it is
|
||||
converted into a true capsule for collision. Like
|
||||
boxes, you should not move the individual
|
||||
vertices around.
|
||||
|
||||
USP_[RenderMeshName]_##:
|
||||
Spheres are created with the Sphere object type.
|
||||
The sphere does not need to have many segments
|
||||
(8 is a good number) at all because it is
|
||||
converted into a true sphere for collision. Like
|
||||
boxes, you should not move the individual
|
||||
vertices around.
|
||||
|
||||
UCX_[RenderMeshName]_##:
|
||||
Convex objects can be any completely closed
|
||||
convex 3D shape. For example, a box can also be
|
||||
a convex object
|
||||
|
||||
This validator also checks if collision mesh [RenderMeshName] matches one
|
||||
of SM_[RenderMeshName].
|
||||
|
||||
"""
|
||||
optional = True
|
||||
order = pype.api.ValidateContentsOrder
|
||||
hosts = ["maya"]
|
||||
families = ["unrealStaticMesh"]
|
||||
label = "Unreal StaticMesh Name"
|
||||
actions = [pype.maya.action.SelectInvalidAction]
|
||||
regex_mesh = r"SM_(?P<renderName>.*)_(\d{2})"
|
||||
regex_collision = r"((UBX)|(UCP)|(USP)|(UCX))_(?P<renderName>.*)_(\d{2})"
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
# find out if supplied transform is group or not
|
||||
def is_group(groupName):
|
||||
try:
|
||||
children = cmds.listRelatives(groupName, children=True)
|
||||
for child in children:
|
||||
if not cmds.ls(child, transforms=True):
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
invalid = []
|
||||
content_instance = instance.data.get("setMembers", None)
|
||||
if not content_instance:
|
||||
cls.log.error("Instance has no nodes!")
|
||||
return True
|
||||
pass
|
||||
descendants = cmds.listRelatives(content_instance,
|
||||
allDescendents=True,
|
||||
fullPath=True) or []
|
||||
|
||||
descendants = cmds.ls(descendants, noIntermediate=True, long=True)
|
||||
trns = cmds.ls(descendants, long=False, type=('transform'))
|
||||
|
||||
# filter out groups
|
||||
filter = [node for node in trns if not is_group(node)]
|
||||
|
||||
# compile regex for testing names
|
||||
sm_r = re.compile(cls.regex_mesh)
|
||||
cl_r = re.compile(cls.regex_collision)
|
||||
|
||||
sm_names = []
|
||||
col_names = []
|
||||
for obj in filter:
|
||||
sm_m = sm_r.match(obj)
|
||||
if sm_m is None:
|
||||
# test if it matches collision mesh
|
||||
cl_r = sm_r.match(obj)
|
||||
if cl_r is None:
|
||||
cls.log.error("invalid mesh name on: {}".format(obj))
|
||||
invalid.append(obj)
|
||||
else:
|
||||
col_names.append((cl_r.group("renderName"), obj))
|
||||
else:
|
||||
sm_names.append(sm_m.group("renderName"))
|
||||
|
||||
for c_mesh in col_names:
|
||||
if c_mesh[0] not in sm_names:
|
||||
cls.log.error(("collision name {} doesn't match any "
|
||||
"static mesh names.").format(obj))
|
||||
invalid.append(c_mesh[1])
|
||||
|
||||
return invalid
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
||||
if invalid:
|
||||
raise RuntimeError("Model naming is invalid. See log.")
|
||||
25
pype/plugins/maya/publish/validate_unreal_up_axis.py
Normal file
25
pype/plugins/maya/publish/validate_unreal_up_axis.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from maya import cmds
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
|
||||
class ValidateUnrealUpAxis(pyblish.api.ContextPlugin):
|
||||
"""Validate if Z is set as up axis in Maya"""
|
||||
|
||||
optional = True
|
||||
order = pype.api.ValidateContentsOrder
|
||||
hosts = ["maya"]
|
||||
families = ["unrealStaticMesh"]
|
||||
label = "Unreal Up-Axis check"
|
||||
actions = [pype.api.RepairAction]
|
||||
|
||||
def process(self, context):
|
||||
assert cmds.upAxis(q=True, axis=True) == "z", (
|
||||
"Invalid axis set as up axis"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
cmds.upAxis(axis="z", rotateView=True)
|
||||
|
|
@ -74,17 +74,14 @@ class ValidateScript(pyblish.api.InstancePlugin):
|
|||
if "handleEnd" in asset_attributes:
|
||||
handle_end = asset_attributes["handleEnd"]
|
||||
|
||||
# Set frame range with handles
|
||||
# asset_attributes["frameStart"] -= handle_start
|
||||
# asset_attributes["frameEnd"] += handle_end
|
||||
if len(str(asset_attributes["fps"])) > 4:
|
||||
asset_attributes["fps"] = float("{0:.8f}".format(asset_attributes["fps"]))
|
||||
asset_attributes["fps"] = float("{0:.4f}".format(
|
||||
asset_attributes["fps"]))
|
||||
|
||||
# Get values from nukescript
|
||||
script_attributes = {
|
||||
"handleStart": ctx_data["handleStart"],
|
||||
"handleEnd": ctx_data["handleEnd"],
|
||||
"fps": ctx_data["fps"],
|
||||
"fps": float("{0:.4f}".format(ctx_data["fps"])),
|
||||
"frameStart": ctx_data["frameStart"],
|
||||
"frameEnd": ctx_data["frameEnd"],
|
||||
"resolutionWidth": ctx_data["resolutionWidth"],
|
||||
|
|
|
|||
|
|
@ -47,6 +47,16 @@ class CollectClips(api.ContextPlugin):
|
|||
track = item.parent()
|
||||
source = item.source().mediaSource()
|
||||
source_path = source.firstpath()
|
||||
file_head = source.filenameHead()
|
||||
file_info = next((f for f in source.fileinfos()), None)
|
||||
source_first_frame = file_info.startFrame()
|
||||
is_sequence = False
|
||||
|
||||
if not source.singleFile():
|
||||
self.log.info("Single file")
|
||||
is_sequence = True
|
||||
source_path = file_info.filename()
|
||||
|
||||
effects = [f for f in item.linkedItems()
|
||||
if f.isEnabled()
|
||||
if isinstance(f, hiero.core.EffectTrackItem)]
|
||||
|
|
@ -78,12 +88,6 @@ class CollectClips(api.ContextPlugin):
|
|||
)
|
||||
)
|
||||
|
||||
try:
|
||||
head, padding, ext = os.path.basename(source_path).split(".")
|
||||
source_first_frame = int(padding)
|
||||
except Exception:
|
||||
source_first_frame = 0
|
||||
|
||||
data.update({
|
||||
"name": "{0}_{1}".format(track.name(), item.name()),
|
||||
"item": item,
|
||||
|
|
@ -91,6 +95,8 @@ class CollectClips(api.ContextPlugin):
|
|||
"timecodeStart": str(source.timecodeStart()),
|
||||
"timelineTimecodeStart": str(sequence.timecodeStart()),
|
||||
"sourcePath": source_path,
|
||||
"sourceFileHead": file_head,
|
||||
"isSequence": is_sequence,
|
||||
"track": track.name(),
|
||||
"trackIndex": track_index,
|
||||
"sourceFirst": source_first_frame,
|
||||
|
|
@ -101,8 +107,9 @@ class CollectClips(api.ContextPlugin):
|
|||
int(item.sourceIn())) + 1,
|
||||
"clipIn": int(item.timelineIn()),
|
||||
"clipOut": int(item.timelineOut()),
|
||||
"clipDuration": (int(item.timelineOut()) -
|
||||
int(item.timelineIn())) + 1,
|
||||
"clipDuration": (
|
||||
int(item.timelineOut()) - int(
|
||||
item.timelineIn())) + 1,
|
||||
"asset": asset,
|
||||
"family": "clip",
|
||||
"families": [],
|
||||
|
|
|
|||
|
|
@ -147,22 +147,15 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
"version": version
|
||||
})
|
||||
|
||||
source_first_frame = instance.data.get("sourceFirst")
|
||||
source_file_head = instance.data.get("sourceFileHead")
|
||||
|
||||
try:
|
||||
basename, ext = os.path.splitext(source_file)
|
||||
head, padding = os.path.splitext(basename)
|
||||
ext = ext[1:]
|
||||
padding = padding[1:]
|
||||
self.log.debug("_ padding: `{}`".format(padding))
|
||||
# head, padding, ext = source_file.split('.')
|
||||
source_first_frame = int(padding)
|
||||
padding = len(padding)
|
||||
file = "{head}.%0{padding}d.{ext}".format(
|
||||
head=head,
|
||||
padding=padding,
|
||||
ext=ext
|
||||
)
|
||||
|
||||
if instance.data.get("isSequence", False):
|
||||
self.log.info("Is sequence of files")
|
||||
file = os.path.basename(source_file)
|
||||
ext = os.path.splitext(file)[-1][1:]
|
||||
self.log.debug("source_file_head: `{}`".format(source_file_head))
|
||||
head = source_file_head[:-1]
|
||||
start_frame = int(source_first_frame + instance.data["sourceInH"])
|
||||
duration = int(
|
||||
instance.data["sourceOutH"] - instance.data["sourceInH"])
|
||||
|
|
@ -170,10 +163,10 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
self.log.debug("start_frame: `{}`".format(start_frame))
|
||||
self.log.debug("end_frame: `{}`".format(end_frame))
|
||||
files = [file % i for i in range(start_frame, (end_frame + 1), 1)]
|
||||
except Exception as e:
|
||||
self.log.warning("Exception in file: {}".format(e))
|
||||
head, ext = os.path.splitext(source_file)
|
||||
ext = ext[1:]
|
||||
else:
|
||||
self.log.info("Is single file")
|
||||
ext = os.path.splitext(source_file)[-1][1:]
|
||||
head = source_file_head
|
||||
files = source_file
|
||||
start_frame = instance.data["sourceInH"]
|
||||
end_frame = instance.data["sourceOutH"]
|
||||
|
|
|
|||
33
pype/plugins/unreal/create/create_staticmeshfbx.py
Normal file
33
pype/plugins/unreal/create/create_staticmeshfbx.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
import unreal
|
||||
from pype.unreal.plugin import Creator
|
||||
from avalon.unreal import (
|
||||
instantiate,
|
||||
)
|
||||
|
||||
|
||||
class CreateStaticMeshFBX(Creator):
|
||||
"""Static FBX geometry"""
|
||||
|
||||
name = "unrealStaticMeshMain"
|
||||
label = "Unreal - Static Mesh"
|
||||
family = "unrealStaticMesh"
|
||||
icon = "cube"
|
||||
asset_types = ["StaticMesh"]
|
||||
|
||||
root = "/Game"
|
||||
suffix = "_INS"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateStaticMeshFBX, self).__init__(*args, **kwargs)
|
||||
|
||||
def process(self):
|
||||
|
||||
name = self.data["subset"]
|
||||
|
||||
selection = []
|
||||
if (self.options or {}).get("useSelection"):
|
||||
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
|
||||
selection = [a.get_path_name() for a in sel_objects]
|
||||
|
||||
unreal.log("selection: {}".format(selection))
|
||||
instantiate(self.root, name, self.data, selection, self.suffix)
|
||||
101
pype/plugins/unreal/load/load_staticmeshfbx.py
Normal file
101
pype/plugins/unreal/load/load_staticmeshfbx.py
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
from avalon import api
|
||||
from avalon import unreal as avalon_unreal
|
||||
import unreal
|
||||
|
||||
|
||||
class StaticMeshFBXLoader(api.Loader):
|
||||
"""Load Unreal StaticMesh from FBX"""
|
||||
|
||||
families = ["unrealStaticMesh"]
|
||||
label = "Import FBX Static Mesh"
|
||||
representations = ["fbx"]
|
||||
icon = "cube"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Load and containerise representation into Content Browser.
|
||||
|
||||
This is two step process. First, import FBX to temporary path and
|
||||
then call `containerise()` on it - this moves all content to new
|
||||
directory and then it will create AssetContainer there and imprint it
|
||||
with metadata. This will mark this path as container.
|
||||
|
||||
Args:
|
||||
context (dict): application context
|
||||
name (str): subset name
|
||||
namespace (str): in Unreal this is basically path to container.
|
||||
This is not passed here, so namespace is set
|
||||
by `containerise()` because only then we know
|
||||
real path.
|
||||
data (dict): Those would be data to be imprinted. This is not used
|
||||
now, data are imprinted by `containerise()`.
|
||||
|
||||
Returns:
|
||||
list(str): list of container content
|
||||
"""
|
||||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
temp_dir, temp_name = tools.create_unique_asset_name(
|
||||
"/Game/{}".format(name), "_TMP"
|
||||
)
|
||||
|
||||
unreal.EditorAssetLibrary.make_directory(temp_dir)
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
|
||||
task.filename = self.fname
|
||||
task.destination_path = temp_dir
|
||||
task.destination_name = name
|
||||
task.replace_existing = False
|
||||
task.automated = True
|
||||
task.save = True
|
||||
|
||||
# set import options here
|
||||
task.options = unreal.FbxImportUI()
|
||||
task.options.import_animations = False
|
||||
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
|
||||
|
||||
imported_assets = unreal.EditorAssetLibrary.list_assets(
|
||||
temp_dir, recursive=True, include_folder=True
|
||||
)
|
||||
new_dir = avalon_unreal.containerise(
|
||||
name, namespace, imported_assets, context, self.__class__.__name__)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
new_dir, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
unreal.EditorAssetLibrary.delete_directory(temp_dir)
|
||||
|
||||
return asset_content
|
||||
|
||||
def update(self, container, representation):
|
||||
node = container["objectName"]
|
||||
source_path = api.get_representation_path(representation)
|
||||
destination_path = container["namespace"]
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
|
||||
task.filename = source_path
|
||||
task.destination_path = destination_path
|
||||
# strip suffix
|
||||
task.destination_name = node[:-4]
|
||||
task.replace_existing = True
|
||||
task.automated = True
|
||||
task.save = True
|
||||
|
||||
task.options = unreal.FbxImportUI()
|
||||
task.options.import_animations = False
|
||||
|
||||
# do import fbx and replace existing data
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
|
||||
container_path = "{}/{}".format(container["namespace"],
|
||||
container["objectName"])
|
||||
# update metadata
|
||||
avalon_unreal.imprint(
|
||||
container_path, {"_id": str(representation["_id"])})
|
||||
|
||||
def remove(self, container):
|
||||
unreal.EditorAssetLibrary.delete_directory(container["namespace"])
|
||||
59
pype/plugins/unreal/publish/collect_instances.py
Normal file
59
pype/plugins/unreal/publish/collect_instances.py
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
import unreal
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Gather instances by AvalonPublishInstance class
|
||||
|
||||
This collector finds all paths containing `AvalonPublishInstance` class
|
||||
asset
|
||||
|
||||
Identifier:
|
||||
id (str): "pyblish.avalon.instance"
|
||||
|
||||
"""
|
||||
|
||||
label = "Collect Instances"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["unreal"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
instance_containers = ar.get_assets_by_class(
|
||||
"AvalonPublishInstance", True)
|
||||
|
||||
for container_data in instance_containers:
|
||||
asset = container_data.get_asset()
|
||||
data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
|
||||
data["objectName"] = container_data.asset_name
|
||||
# convert to strings
|
||||
data = {str(key): str(value) for (key, value) in data.items()}
|
||||
assert data.get("family"), (
|
||||
"instance has no family"
|
||||
)
|
||||
|
||||
# content of container
|
||||
members = unreal.EditorAssetLibrary.list_assets(
|
||||
asset.get_path_name(), recursive=True, include_folder=True
|
||||
)
|
||||
self.log.debug(members)
|
||||
self.log.debug(asset.get_path_name())
|
||||
# remove instance container
|
||||
members.remove(asset.get_path_name())
|
||||
self.log.info("Creating instance for {}".format(asset.get_name()))
|
||||
|
||||
instance = context.create_instance(asset.get_name())
|
||||
instance[:] = members
|
||||
|
||||
# Store the exact members of the object set
|
||||
instance.data["setMembers"] = members
|
||||
instance.data["families"] = [data.get("family")]
|
||||
|
||||
label = "{0} ({1})".format(asset.get_name()[:-4],
|
||||
data["asset"])
|
||||
|
||||
instance.data["label"] = label
|
||||
|
||||
instance.data.update(data)
|
||||
|
|
@ -36,7 +36,8 @@ TIMECODE = (
|
|||
MISSING_KEY_VALUE = "N/A"
|
||||
CURRENT_FRAME_KEY = "{current_frame}"
|
||||
CURRENT_FRAME_SPLITTER = "_-_CURRENT_FRAME_-_"
|
||||
TIME_CODE_KEY = "{timecode}"
|
||||
TIMECODE_KEY = "{timecode}"
|
||||
SOURCE_TIMECODE_KEY = "{source_timecode}"
|
||||
|
||||
|
||||
def _streams(source):
|
||||
|
|
@ -188,10 +189,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if not options.get("fps"):
|
||||
options["fps"] = self.frame_rate
|
||||
|
||||
options["timecode"] = ffmpeg_burnins._frames_to_timecode(
|
||||
frame_start_tc,
|
||||
self.frame_rate
|
||||
)
|
||||
if isinstance(frame_start_tc, str):
|
||||
options["timecode"] = frame_start_tc
|
||||
else:
|
||||
options["timecode"] = ffmpeg_burnins._frames_to_timecode(
|
||||
frame_start_tc,
|
||||
self.frame_rate
|
||||
)
|
||||
|
||||
self._add_burnin(text, align, options, TIMECODE)
|
||||
|
||||
|
|
@ -412,7 +416,14 @@ def burnins_from_data(
|
|||
data[CURRENT_FRAME_KEY[1:-1]] = CURRENT_FRAME_SPLITTER
|
||||
|
||||
if frame_start_tc is not None:
|
||||
data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY
|
||||
data[TIMECODE_KEY[1:-1]] = TIMECODE_KEY
|
||||
|
||||
source_timecode = stream.get("timecode")
|
||||
if source_timecode is None:
|
||||
source_timecode = stream.get("tags", {}).get("timecode")
|
||||
|
||||
if source_timecode is not None:
|
||||
data[SOURCE_TIMECODE_KEY[1:-1]] = SOURCE_TIMECODE_KEY
|
||||
|
||||
for align_text, value in presets.get('burnins', {}).items():
|
||||
if not value:
|
||||
|
|
@ -425,8 +436,6 @@ def burnins_from_data(
|
|||
" (Make sure you have new burnin presets)."
|
||||
).format(str(type(value)), str(value)))
|
||||
|
||||
has_timecode = TIME_CODE_KEY in value
|
||||
|
||||
align = None
|
||||
align_text = align_text.strip().lower()
|
||||
if align_text == "top_left":
|
||||
|
|
@ -442,6 +451,7 @@ def burnins_from_data(
|
|||
elif align_text == "bottom_right":
|
||||
align = ModifiedBurnins.BOTTOM_RIGHT
|
||||
|
||||
has_timecode = TIMECODE_KEY in value
|
||||
# Replace with missing key value if frame_start_tc is not set
|
||||
if frame_start_tc is None and has_timecode:
|
||||
has_timecode = False
|
||||
|
|
@ -449,7 +459,13 @@ def burnins_from_data(
|
|||
"`frame_start` and `frame_start_tc`"
|
||||
" are not set in entered data."
|
||||
)
|
||||
value = value.replace(TIME_CODE_KEY, MISSING_KEY_VALUE)
|
||||
value = value.replace(TIMECODE_KEY, MISSING_KEY_VALUE)
|
||||
|
||||
has_source_timecode = SOURCE_TIMECODE_KEY in value
|
||||
if source_timecode is None and has_source_timecode:
|
||||
has_source_timecode = False
|
||||
log.warning("Source does not have set timecode value.")
|
||||
value = value.replace(SOURCE_TIMECODE_KEY, MISSING_KEY_VALUE)
|
||||
|
||||
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
|
||||
|
||||
|
|
@ -465,10 +481,20 @@ def burnins_from_data(
|
|||
value = value.replace(key, MISSING_KEY_VALUE)
|
||||
|
||||
# Handle timecode differently
|
||||
if has_source_timecode:
|
||||
args = [align, frame_start, frame_end, source_timecode]
|
||||
if not value.startswith(SOURCE_TIMECODE_KEY):
|
||||
value_items = value.split(SOURCE_TIMECODE_KEY)
|
||||
text = value_items[0].format(**data)
|
||||
args.append(text)
|
||||
|
||||
burnin.add_timecode(*args)
|
||||
continue
|
||||
|
||||
if has_timecode:
|
||||
args = [align, frame_start, frame_end, frame_start_tc]
|
||||
if not value.startswith(TIME_CODE_KEY):
|
||||
value_items = value.split(TIME_CODE_KEY)
|
||||
if not value.startswith(TIMECODE_KEY):
|
||||
value_items = value.split(TIMECODE_KEY)
|
||||
text = value_items[0].format(**data)
|
||||
args.append(text)
|
||||
|
||||
|
|
|
|||
|
|
@ -25,18 +25,6 @@ log.setLevel(logging.DEBUG)
|
|||
|
||||
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
|
||||
|
||||
def _load_json(path):
|
||||
assert os.path.isfile(path), ("path to json file doesn't exist")
|
||||
data = None
|
||||
with open(path, "r") as json_file:
|
||||
try:
|
||||
data = json.load(json_file)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc)
|
||||
)
|
||||
return data
|
||||
|
||||
def __main__():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
|
|
|||
2
pype/scripts/slates/__init__.py
Normal file
2
pype/scripts/slates/__init__.py
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
from . import slate_base
|
||||
from .slate_base import api
|
||||
18
pype/scripts/slates/__main__.py
Normal file
18
pype/scripts/slates/__main__.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import sys
|
||||
import json
|
||||
from slate_base import api
|
||||
|
||||
|
||||
def main(in_args=None):
|
||||
data_arg = in_args[-1]
|
||||
in_data = json.loads(data_arg)
|
||||
api.create_slates(
|
||||
in_data["fill_data"],
|
||||
in_data.get("slate_name"),
|
||||
in_data.get("slate_data"),
|
||||
in_data.get("data_output_json")
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
0
pype/scripts/slates/slate_base/__init__.py
Normal file
0
pype/scripts/slates/slate_base/__init__.py
Normal file
15
pype/scripts/slates/slate_base/api.py
Normal file
15
pype/scripts/slates/slate_base/api.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
from .font_factory import FontFactory
|
||||
from .base import BaseObj, load_default_style
|
||||
from .main_frame import MainFrame
|
||||
from .layer import Layer
|
||||
from .items import (
|
||||
BaseItem,
|
||||
ItemImage,
|
||||
ItemRectangle,
|
||||
ItemPlaceHolder,
|
||||
ItemText,
|
||||
ItemTable,
|
||||
TableField
|
||||
)
|
||||
from .lib import create_slates
|
||||
from .example import example
|
||||
373
pype/scripts/slates/slate_base/base.py
Normal file
373
pype/scripts/slates/slate_base/base.py
Normal file
|
|
@ -0,0 +1,373 @@
|
|||
import os
|
||||
import re
|
||||
import logging
|
||||
import copy
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
def load_default_style():
|
||||
cur_folder = os.path.dirname(os.path.abspath(__file__))
|
||||
default_json_path = os.path.join(cur_folder, "default_style.json")
|
||||
with open(default_json_path, "r") as _file:
|
||||
data = _file.read()
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
class BaseObj:
|
||||
"""Base Object for slates."""
|
||||
|
||||
obj_type = None
|
||||
available_parents = []
|
||||
all_style_keys = [
|
||||
"font-family", "font-size", "font-color", "font-bold", "font-italic",
|
||||
"bg-color", "bg-alter-color",
|
||||
"alignment-horizontal", "alignment-vertical",
|
||||
"padding", "padding-left", "padding-right",
|
||||
"padding-top", "padding-bottom",
|
||||
"margin", "margin-left", "margin-right",
|
||||
"margin-top", "margin-bottom", "width", "height",
|
||||
"fill", "word-wrap", "ellide", "max-lines"
|
||||
]
|
||||
fill_data_regex = r"{[^}]+}"
|
||||
|
||||
def __init__(self, parent, style={}, name=None, pos_x=None, pos_y=None):
|
||||
if not self.obj_type:
|
||||
raise NotImplementedError(
|
||||
"Class don't have set object type <{}>".format(
|
||||
self.__class__.__name__
|
||||
)
|
||||
)
|
||||
|
||||
parent_obj_type = None
|
||||
if parent:
|
||||
parent_obj_type = parent.obj_type
|
||||
|
||||
if parent_obj_type not in self.available_parents:
|
||||
expected_parents = ", ".join(self.available_parents)
|
||||
raise Exception((
|
||||
"Invalid parent <{}> for <{}>. Expected <{}>"
|
||||
).format(
|
||||
parent.__class__.__name__, self.obj_type, expected_parents
|
||||
))
|
||||
|
||||
self.parent = parent
|
||||
self._style = style
|
||||
|
||||
self.id = uuid4()
|
||||
self.name = name
|
||||
self.items = {}
|
||||
|
||||
self._pos_x = pos_x or 0
|
||||
self._pos_y = pos_y or 0
|
||||
|
||||
log_parts = []
|
||||
module = self.__class__.__module__
|
||||
if module and module != "__main__":
|
||||
log_parts.append(module)
|
||||
log_parts.append(self.__class__.__name__)
|
||||
self.log = logging.getLogger(".".join(log_parts))
|
||||
|
||||
if parent:
|
||||
parent.add_item(self)
|
||||
|
||||
def fill_data_format(self):
|
||||
return
|
||||
|
||||
@property
|
||||
def fill_data(self):
|
||||
return self.parent.fill_data
|
||||
|
||||
@property
|
||||
def main_style(self):
|
||||
return load_default_style()
|
||||
|
||||
def height(self):
|
||||
raise NotImplementedError(
|
||||
"Attribute `height` is not implemented for <{}>".format(
|
||||
self.__clas__.__name__
|
||||
)
|
||||
)
|
||||
|
||||
def width(self):
|
||||
raise NotImplementedError(
|
||||
"Attribute `width` is not implemented for <{}>".format(
|
||||
self.__clas__.__name__
|
||||
)
|
||||
)
|
||||
|
||||
def collect_data(self):
|
||||
return None
|
||||
|
||||
def find_item(self, obj_type=None, name=None):
|
||||
obj_type_fits = False
|
||||
name_fits = False
|
||||
if obj_type is None or self.obj_type == obj_type:
|
||||
obj_type_fits = True
|
||||
|
||||
if name is None or self.name != name:
|
||||
name_fits = True
|
||||
|
||||
output = []
|
||||
if obj_type_fits and name_fits:
|
||||
output.append(self)
|
||||
|
||||
if not self.items:
|
||||
return output
|
||||
|
||||
for item in self.items.values():
|
||||
output.extend(
|
||||
item.find_item(obj_type=obj_type, name=name)
|
||||
)
|
||||
return output
|
||||
|
||||
@property
|
||||
def full_style(self):
|
||||
if self.parent is not None:
|
||||
style = dict(val for val in self.parent.full_style.items())
|
||||
else:
|
||||
style = self.main_style
|
||||
|
||||
for key, value in self._style.items():
|
||||
if key in self.all_style_keys:
|
||||
# TODO which variant is right?
|
||||
style[self.obj_type][key] = value
|
||||
# style["*"][key] = value
|
||||
else:
|
||||
if key not in style:
|
||||
style[key] = {}
|
||||
|
||||
if isinstance(style[key], dict):
|
||||
style[key].update(value)
|
||||
else:
|
||||
style[key] = value
|
||||
|
||||
return style
|
||||
|
||||
def get_style_for_obj_type(self, obj_type, style=None):
|
||||
if not style:
|
||||
style = copy.deepcopy(self.full_style)
|
||||
|
||||
base = style.get("*") or {}
|
||||
obj_specific = style.get(obj_type) or {}
|
||||
name_specific = {}
|
||||
if self.name:
|
||||
name = str(self.name)
|
||||
if not name.startswith("#"):
|
||||
name = "#" + name
|
||||
name_specific = style.get(name) or {}
|
||||
|
||||
if obj_type == "table-item":
|
||||
col_regex = r"table-item-col\[([\d\-, ]+)*\]"
|
||||
row_regex = r"table-item-row\[([\d\-, ]+)*\]"
|
||||
field_regex = (
|
||||
r"table-item-field\[(([ ]+)?\d+([ ]+)?:([ ]+)?\d+([ ]+)?)*\]"
|
||||
)
|
||||
# STRICT field regex (not allowed spaces)
|
||||
# fild_regex = r"table-item-field\[(\d+:\d+)*\]"
|
||||
|
||||
def get_indexes_from_regex_match(result, field=False):
|
||||
group = result.group(1)
|
||||
indexes = []
|
||||
if field:
|
||||
return [
|
||||
int(part.strip()) for part in group.strip().split(":")
|
||||
]
|
||||
|
||||
parts = group.strip().split(",")
|
||||
for part in parts:
|
||||
part = part.strip()
|
||||
if "-" not in part:
|
||||
indexes.append(int(part))
|
||||
continue
|
||||
|
||||
sub_parts = [
|
||||
int(sub.strip()) for sub in part.split("-")
|
||||
]
|
||||
if len(sub_parts) != 2:
|
||||
# TODO logging
|
||||
self.log.warning("Invalid range '{}'".format(part))
|
||||
continue
|
||||
|
||||
for idx in range(sub_parts[0], sub_parts[1]+1):
|
||||
indexes.append(idx)
|
||||
return indexes
|
||||
|
||||
for key, value in style.items():
|
||||
if not key.startswith(obj_type):
|
||||
continue
|
||||
|
||||
result = re.search(col_regex, key)
|
||||
if result:
|
||||
indexes = get_indexes_from_regex_match(result)
|
||||
if self.col_idx in indexes:
|
||||
obj_specific.update(value)
|
||||
continue
|
||||
|
||||
result = re.search(row_regex, key)
|
||||
if result:
|
||||
indexes = get_indexes_from_regex_match(result)
|
||||
if self.row_idx in indexes:
|
||||
obj_specific.update(value)
|
||||
continue
|
||||
|
||||
result = re.search(field_regex, key)
|
||||
if result:
|
||||
row_idx, col_idx = get_indexes_from_regex_match(
|
||||
result, True
|
||||
)
|
||||
if self.col_idx == col_idx and self.row_idx == row_idx:
|
||||
obj_specific.update(value)
|
||||
|
||||
output = {}
|
||||
output.update(base)
|
||||
output.update(obj_specific)
|
||||
output.update(name_specific)
|
||||
|
||||
return output
|
||||
|
||||
@property
|
||||
def style(self):
|
||||
return self.get_style_for_obj_type(self.obj_type)
|
||||
|
||||
@property
|
||||
def item_pos_x(self):
|
||||
if self.parent.obj_type == "main_frame":
|
||||
return int(self._pos_x)
|
||||
return 0
|
||||
|
||||
@property
|
||||
def item_pos_y(self):
|
||||
if self.parent.obj_type == "main_frame":
|
||||
return int(self._pos_y)
|
||||
return 0
|
||||
|
||||
@property
|
||||
def content_pos_x(self):
|
||||
pos_x = self.item_pos_x
|
||||
margin = self.style["margin"]
|
||||
margin_left = self.style.get("margin-left") or margin
|
||||
|
||||
pos_x += margin_left
|
||||
|
||||
return pos_x
|
||||
|
||||
@property
|
||||
def content_pos_y(self):
|
||||
pos_y = self.item_pos_y
|
||||
margin = self.style["margin"]
|
||||
margin_top = self.style.get("margin-top") or margin
|
||||
return pos_y + margin_top
|
||||
|
||||
@property
|
||||
def value_pos_x(self):
|
||||
pos_x = int(self.content_pos_x)
|
||||
padding = self.style["padding"]
|
||||
padding_left = self.style.get("padding-left")
|
||||
if padding_left is None:
|
||||
padding_left = padding
|
||||
|
||||
pos_x += padding_left
|
||||
|
||||
return pos_x
|
||||
|
||||
@property
|
||||
def value_pos_y(self):
|
||||
pos_y = int(self.content_pos_y)
|
||||
padding = self.style["padding"]
|
||||
padding_top = self.style.get("padding-top")
|
||||
if padding_top is None:
|
||||
padding_top = padding
|
||||
|
||||
pos_y += padding_top
|
||||
|
||||
return pos_y
|
||||
|
||||
@property
|
||||
def value_pos_start(self):
|
||||
return (self.value_pos_x, self.value_pos_y)
|
||||
|
||||
@property
|
||||
def value_pos_end(self):
|
||||
pos_x, pos_y = self.value_pos_start
|
||||
pos_x += self.width()
|
||||
pos_y += self.height()
|
||||
return (pos_x, pos_y)
|
||||
|
||||
@property
|
||||
def content_pos_start(self):
|
||||
return (self.content_pos_x, self.content_pos_y)
|
||||
|
||||
@property
|
||||
def content_pos_end(self):
|
||||
pos_x, pos_y = self.content_pos_start
|
||||
pos_x += self.content_width()
|
||||
pos_y += self.content_height()
|
||||
return (pos_x, pos_y)
|
||||
|
||||
def value_width(self):
|
||||
raise NotImplementedError(
|
||||
"Attribute <content_width> is not implemented <{}>".format(
|
||||
self.__class__.__name__
|
||||
)
|
||||
)
|
||||
|
||||
def value_height(self):
|
||||
raise NotImplementedError(
|
||||
"Attribute <content_width> is not implemented for <{}>".format(
|
||||
self.__class__.__name__
|
||||
)
|
||||
)
|
||||
|
||||
def content_width(self):
|
||||
width = self.value_width()
|
||||
padding = self.style["padding"]
|
||||
padding_left = self.style.get("padding-left")
|
||||
if padding_left is None:
|
||||
padding_left = padding
|
||||
|
||||
padding_right = self.style.get("padding-right")
|
||||
if padding_right is None:
|
||||
padding_right = padding
|
||||
|
||||
return width + padding_left + padding_right
|
||||
|
||||
def content_height(self):
|
||||
height = self.value_height()
|
||||
padding = self.style["padding"]
|
||||
padding_top = self.style.get("padding-top")
|
||||
if padding_top is None:
|
||||
padding_top = padding
|
||||
|
||||
padding_bottom = self.style.get("padding-bottom")
|
||||
if padding_bottom is None:
|
||||
padding_bottom = padding
|
||||
|
||||
return height + padding_top + padding_bottom
|
||||
|
||||
def width(self):
|
||||
width = self.content_width()
|
||||
|
||||
margin = self.style["margin"]
|
||||
margin_left = self.style.get("margin-left") or margin
|
||||
margin_right = self.style.get("margin-right") or margin
|
||||
|
||||
return width + margin_left + margin_right
|
||||
|
||||
def height(self):
|
||||
height = self.content_height()
|
||||
|
||||
margin = self.style["margin"]
|
||||
margin_top = self.style.get("margin-top") or margin
|
||||
margin_bottom = self.style.get("margin-bottom") or margin
|
||||
|
||||
return height + margin_bottom + margin_top
|
||||
|
||||
def add_item(self, item):
|
||||
self.items[item.id] = item
|
||||
item.fill_data_format()
|
||||
|
||||
|
||||
def reset(self):
|
||||
for item in self.items.values():
|
||||
item.reset()
|
||||
58
pype/scripts/slates/slate_base/default_style.json
Normal file
58
pype/scripts/slates/slate_base/default_style.json
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
{
|
||||
"*": {
|
||||
"font-family": "arial",
|
||||
"font-size": 26,
|
||||
"font-color": "#ffffff",
|
||||
"font-bold": false,
|
||||
"font-italic": false,
|
||||
"bg-color": "#0077ff",
|
||||
"alignment-horizontal": "left",
|
||||
"alignment-vertical": "top",
|
||||
"word-wrap": true,
|
||||
"ellide": true,
|
||||
"max-lines": null
|
||||
},
|
||||
"layer": {
|
||||
"padding": 0,
|
||||
"margin": 0
|
||||
},
|
||||
"rectangle": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"fill": true
|
||||
},
|
||||
"image": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"fill": true
|
||||
},
|
||||
"placeholder": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"fill": true
|
||||
},
|
||||
"main_frame": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"bg-color": "#252525"
|
||||
},
|
||||
"table": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"bg-color": "transparent"
|
||||
},
|
||||
"table-item": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"bg-color": "#212121",
|
||||
"bg-alter-color": "#272727",
|
||||
"font-color": "#dcdcdc",
|
||||
"font-bold": false,
|
||||
"font-italic": false,
|
||||
"alignment-horizontal": "left",
|
||||
"alignment-vertical": "top",
|
||||
"word-wrap": false,
|
||||
"ellide": true,
|
||||
"max-lines": 1
|
||||
}
|
||||
}
|
||||
254
pype/scripts/slates/slate_base/example.py
Normal file
254
pype/scripts/slates/slate_base/example.py
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
# import sys
|
||||
# sys.append(r"PATH/TO/PILLOW/PACKAGE")
|
||||
|
||||
from . import api
|
||||
|
||||
|
||||
def example():
|
||||
"""Example data to demontrate function.
|
||||
|
||||
It is required to fill "destination_path", "thumbnail_path"
|
||||
and "color_bar_path" in `example_fill_data` to be able to execute.
|
||||
"""
|
||||
|
||||
example_fill_data = {
|
||||
"destination_path": "PATH/TO/OUTPUT/FILE",
|
||||
"project": {
|
||||
"name": "Testing project"
|
||||
},
|
||||
"intent": "WIP",
|
||||
"version_name": "seq01_sh0100_compositing_v01",
|
||||
"date": "2019-08-09",
|
||||
"shot_type": "2d comp",
|
||||
"submission_note": (
|
||||
"Lorem ipsum dolor sit amet, consectetuer adipiscing elit."
|
||||
" Aenean commodo ligula eget dolor. Aenean massa."
|
||||
" Cum sociis natoque penatibus et magnis dis parturient montes,"
|
||||
" nascetur ridiculus mus. Donec quam felis, ultricies nec,"
|
||||
" pellentesque eu, pretium quis, sem. Nulla consequat massa quis"
|
||||
" enim. Donec pede justo, fringilla vel,"
|
||||
" aliquet nec, vulputate eget, arcu."
|
||||
),
|
||||
"thumbnail_path": "PATH/TO/THUMBNAIL/FILE",
|
||||
"color_bar_path": "PATH/TO/COLOR/BAR/FILE",
|
||||
"vendor": "Our Studio",
|
||||
"shot_name": "sh0100",
|
||||
"frame_start": 1001,
|
||||
"frame_end": 1004,
|
||||
"duration": 3
|
||||
}
|
||||
|
||||
example_presets = {"example_HD": {
|
||||
"width": 1920,
|
||||
"height": 1080,
|
||||
"destination_path": "{destination_path}",
|
||||
"style": {
|
||||
"*": {
|
||||
"font-family": "arial",
|
||||
"font-color": "#ffffff",
|
||||
"font-bold": False,
|
||||
"font-italic": False,
|
||||
"bg-color": "#0077ff",
|
||||
"alignment-horizontal": "left",
|
||||
"alignment-vertical": "top"
|
||||
},
|
||||
"layer": {
|
||||
"padding": 0,
|
||||
"margin": 0
|
||||
},
|
||||
"rectangle": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"bg-color": "#E9324B",
|
||||
"fill": True
|
||||
},
|
||||
"main_frame": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"bg-color": "#252525"
|
||||
},
|
||||
"table": {
|
||||
"padding": 0,
|
||||
"margin": 0,
|
||||
"bg-color": "transparent"
|
||||
},
|
||||
"table-item": {
|
||||
"padding": 5,
|
||||
"padding-bottom": 10,
|
||||
"margin": 0,
|
||||
"bg-color": "#212121",
|
||||
"bg-alter-color": "#272727",
|
||||
"font-color": "#dcdcdc",
|
||||
"font-bold": False,
|
||||
"font-italic": False,
|
||||
"alignment-horizontal": "left",
|
||||
"alignment-vertical": "top",
|
||||
"word-wrap": False,
|
||||
"ellide": True,
|
||||
"max-lines": 1
|
||||
},
|
||||
"table-item-col[0]": {
|
||||
"font-size": 20,
|
||||
"font-color": "#898989",
|
||||
"font-bold": True,
|
||||
"ellide": False,
|
||||
"word-wrap": True,
|
||||
"max-lines": None
|
||||
},
|
||||
"table-item-col[1]": {
|
||||
"font-size": 40,
|
||||
"padding-left": 10
|
||||
},
|
||||
"#colorbar": {
|
||||
"bg-color": "#9932CC"
|
||||
}
|
||||
},
|
||||
"items": [{
|
||||
"type": "layer",
|
||||
"direction": 1,
|
||||
"name": "MainLayer",
|
||||
"style": {
|
||||
"#MainLayer": {
|
||||
"width": 1094,
|
||||
"height": 1000,
|
||||
"margin": 25,
|
||||
"padding": 0
|
||||
},
|
||||
"#LeftSide": {
|
||||
"margin-right": 25
|
||||
}
|
||||
},
|
||||
"items": [{
|
||||
"type": "layer",
|
||||
"name": "LeftSide",
|
||||
"items": [{
|
||||
"type": "layer",
|
||||
"direction": 1,
|
||||
"style": {
|
||||
"table-item": {
|
||||
"bg-color": "transparent",
|
||||
"padding-bottom": 20
|
||||
},
|
||||
"table-item-col[0]": {
|
||||
"font-size": 20,
|
||||
"font-color": "#898989",
|
||||
"alignment-horizontal": "right"
|
||||
},
|
||||
"table-item-col[1]": {
|
||||
"alignment-horizontal": "left",
|
||||
"font-bold": True,
|
||||
"font-size": 40
|
||||
}
|
||||
},
|
||||
"items": [{
|
||||
"type": "table",
|
||||
"values": [
|
||||
["Show:", "{project[name]}"]
|
||||
],
|
||||
"style": {
|
||||
"table-item-field[0:0]": {
|
||||
"width": 150
|
||||
},
|
||||
"table-item-field[0:1]": {
|
||||
"width": 580
|
||||
}
|
||||
}
|
||||
}, {
|
||||
"type": "table",
|
||||
"values": [
|
||||
["Submitting For:", "{intent}"]
|
||||
],
|
||||
"style": {
|
||||
"table-item-field[0:0]": {
|
||||
"width": 160
|
||||
},
|
||||
"table-item-field[0:1]": {
|
||||
"width": 218,
|
||||
"alignment-horizontal": "right"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}, {
|
||||
"type": "rectangle",
|
||||
"style": {
|
||||
"bg-color": "#bc1015",
|
||||
"width": 1108,
|
||||
"height": 5,
|
||||
"fill": True
|
||||
}
|
||||
}, {
|
||||
"type": "table",
|
||||
"use_alternate_color": True,
|
||||
"values": [
|
||||
["Version name:", "{version_name}"],
|
||||
["Date:", "{date}"],
|
||||
["Shot Types:", "{shot_type}"],
|
||||
["Submission Note:", "{submission_note}"]
|
||||
],
|
||||
"style": {
|
||||
"table-item": {
|
||||
"padding-bottom": 20
|
||||
},
|
||||
"table-item-field[0:1]": {
|
||||
"font-bold": True
|
||||
},
|
||||
"table-item-field[3:0]": {
|
||||
"word-wrap": True,
|
||||
"ellide": True,
|
||||
"max-lines": 4
|
||||
},
|
||||
"table-item-col[0]": {
|
||||
"alignment-horizontal": "right",
|
||||
"width": 150
|
||||
},
|
||||
"table-item-col[1]": {
|
||||
"alignment-horizontal": "left",
|
||||
"width": 958
|
||||
}
|
||||
}
|
||||
}]
|
||||
}, {
|
||||
"type": "layer",
|
||||
"name": "RightSide",
|
||||
"items": [{
|
||||
"type": "placeholder",
|
||||
"name": "thumbnail",
|
||||
"path": "{thumbnail_path}",
|
||||
"style": {
|
||||
"width": 730,
|
||||
"height": 412
|
||||
}
|
||||
}, {
|
||||
"type": "placeholder",
|
||||
"name": "colorbar",
|
||||
"path": "{color_bar_path}",
|
||||
"return_data": True,
|
||||
"style": {
|
||||
"width": 730,
|
||||
"height": 55
|
||||
}
|
||||
}, {
|
||||
"type": "table",
|
||||
"use_alternate_color": True,
|
||||
"values": [
|
||||
["Vendor:", "{vendor}"],
|
||||
["Shot Name:", "{shot_name}"],
|
||||
["Frames:", "{frame_start} - {frame_end} ({duration})"]
|
||||
],
|
||||
"style": {
|
||||
"table-item-col[0]": {
|
||||
"alignment-horizontal": "left",
|
||||
"width": 200
|
||||
},
|
||||
"table-item-col[1]": {
|
||||
"alignment-horizontal": "right",
|
||||
"width": 530,
|
||||
"font-size": 30
|
||||
}
|
||||
}
|
||||
}]
|
||||
}]
|
||||
}]
|
||||
}}
|
||||
|
||||
api.create_slates(example_fill_data, "example_HD", example_presets)
|
||||
93
pype/scripts/slates/slate_base/font_factory.py
Normal file
93
pype/scripts/slates/slate_base/font_factory.py
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
import os
|
||||
import sys
|
||||
import collections
|
||||
|
||||
from PIL import ImageFont
|
||||
|
||||
|
||||
class FontFactory:
|
||||
fonts = None
|
||||
default = None
|
||||
|
||||
@classmethod
|
||||
def get_font(cls, family, font_size=None, italic=False, bold=False):
|
||||
if cls.fonts is None:
|
||||
cls.load_fonts()
|
||||
|
||||
styles = []
|
||||
if bold:
|
||||
styles.append("Bold")
|
||||
|
||||
if italic:
|
||||
styles.append("Italic")
|
||||
|
||||
if not styles:
|
||||
styles.append("Regular")
|
||||
|
||||
style = " ".join(styles)
|
||||
family = family.lower()
|
||||
family_styles = cls.fonts.get(family)
|
||||
if not family_styles:
|
||||
return cls.default
|
||||
|
||||
font = family_styles.get(style)
|
||||
if font:
|
||||
if font_size:
|
||||
font = font.font_variant(size=font_size)
|
||||
return font
|
||||
|
||||
# Return first found
|
||||
for font in family_styles:
|
||||
if font_size:
|
||||
font = font.font_variant(size=font_size)
|
||||
return font
|
||||
|
||||
return cls.default
|
||||
|
||||
@classmethod
|
||||
def load_fonts(cls):
|
||||
|
||||
cls.default = ImageFont.load_default()
|
||||
|
||||
available_font_ext = [".ttf", ".ttc"]
|
||||
dirs = []
|
||||
if sys.platform == "win32":
|
||||
# check the windows font repository
|
||||
# NOTE: must use uppercase WINDIR, to work around bugs in
|
||||
# 1.5.2's os.environ.get()
|
||||
windir = os.environ.get("WINDIR")
|
||||
if windir:
|
||||
dirs.append(os.path.join(windir, "fonts"))
|
||||
|
||||
elif sys.platform in ("linux", "linux2"):
|
||||
lindirs = os.environ.get("XDG_DATA_DIRS", "")
|
||||
if not lindirs:
|
||||
# According to the freedesktop spec, XDG_DATA_DIRS should
|
||||
# default to /usr/share
|
||||
lindirs = "/usr/share"
|
||||
dirs += [
|
||||
os.path.join(lindir, "fonts") for lindir in lindirs.split(":")
|
||||
]
|
||||
|
||||
elif sys.platform == "darwin":
|
||||
dirs += [
|
||||
"/Library/Fonts",
|
||||
"/System/Library/Fonts",
|
||||
os.path.expanduser("~/Library/Fonts")
|
||||
]
|
||||
|
||||
available_fonts = collections.defaultdict(dict)
|
||||
for directory in dirs:
|
||||
for walkroot, walkdir, walkfilenames in os.walk(directory):
|
||||
for walkfilename in walkfilenames:
|
||||
ext = os.path.splitext(walkfilename)[1]
|
||||
if ext.lower() not in available_font_ext:
|
||||
continue
|
||||
|
||||
fontpath = os.path.join(walkroot, walkfilename)
|
||||
font_obj = ImageFont.truetype(fontpath)
|
||||
family = font_obj.font.family.lower()
|
||||
style = font_obj.font.style
|
||||
available_fonts[family][style] = font_obj
|
||||
|
||||
cls.fonts = available_fonts
|
||||
667
pype/scripts/slates/slate_base/items.py
Normal file
667
pype/scripts/slates/slate_base/items.py
Normal file
|
|
@ -0,0 +1,667 @@
|
|||
import os
|
||||
import re
|
||||
from PIL import Image
|
||||
|
||||
from .base import BaseObj
|
||||
from .font_factory import FontFactory
|
||||
|
||||
|
||||
class BaseItem(BaseObj):
|
||||
available_parents = ["main_frame", "layer"]
|
||||
|
||||
@property
|
||||
def item_pos_x(self):
|
||||
if self.parent.obj_type == "main_frame":
|
||||
return self._pos_x
|
||||
return self.parent.child_pos_x(self.id)
|
||||
|
||||
@property
|
||||
def item_pos_y(self):
|
||||
if self.parent.obj_type == "main_frame":
|
||||
return self._pos_y
|
||||
return self.parent.child_pos_y(self.id)
|
||||
|
||||
def add_item(self, *args, **kwargs):
|
||||
raise Exception("Can't add item to an item, use layers instead.")
|
||||
|
||||
def draw(self, image, drawer):
|
||||
raise NotImplementedError(
|
||||
"Method `draw` is not implemented for <{}>".format(
|
||||
self.__clas__.__name__
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ItemImage(BaseItem):
|
||||
obj_type = "image"
|
||||
|
||||
def __init__(self, image_path, *args, **kwargs):
|
||||
self.image_path = image_path
|
||||
super(ItemImage, self).__init__(*args, **kwargs)
|
||||
|
||||
def fill_data_format(self):
|
||||
if re.match(self.fill_data_regex, self.image_path):
|
||||
self.image_path = self.image_path.format(**self.fill_data)
|
||||
|
||||
def draw(self, image, drawer):
|
||||
source_image = Image.open(os.path.normpath(self.image_path))
|
||||
paste_image = source_image.resize(
|
||||
(self.value_width(), self.value_height()),
|
||||
Image.ANTIALIAS
|
||||
)
|
||||
image.paste(
|
||||
paste_image,
|
||||
(self.value_pos_x, self.value_pos_y)
|
||||
)
|
||||
|
||||
def value_width(self):
|
||||
return int(self.style["width"])
|
||||
|
||||
def value_height(self):
|
||||
return int(self.style["height"])
|
||||
|
||||
|
||||
class ItemRectangle(BaseItem):
|
||||
obj_type = "rectangle"
|
||||
|
||||
def draw(self, image, drawer):
|
||||
bg_color = self.style["bg-color"]
|
||||
fill = self.style.get("fill", False)
|
||||
kwargs = {}
|
||||
if fill:
|
||||
kwargs["fill"] = bg_color
|
||||
else:
|
||||
kwargs["outline"] = bg_color
|
||||
|
||||
start_pos_x = self.value_pos_x
|
||||
start_pos_y = self.value_pos_y
|
||||
end_pos_x = start_pos_x + self.value_width()
|
||||
end_pos_y = start_pos_y + self.value_height()
|
||||
drawer.rectangle(
|
||||
(
|
||||
(start_pos_x, start_pos_y),
|
||||
(end_pos_x, end_pos_y)
|
||||
),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def value_width(self):
|
||||
return int(self.style["width"])
|
||||
|
||||
def value_height(self):
|
||||
return int(self.style["height"])
|
||||
|
||||
|
||||
class ItemPlaceHolder(BaseItem):
|
||||
obj_type = "placeholder"
|
||||
|
||||
def __init__(self, image_path, *args, **kwargs):
|
||||
self.image_path = image_path
|
||||
super(ItemPlaceHolder, self).__init__(*args, **kwargs)
|
||||
|
||||
def fill_data_format(self):
|
||||
if re.match(self.fill_data_regex, self.image_path):
|
||||
self.image_path = self.image_path.format(**self.fill_data)
|
||||
|
||||
def draw(self, image, drawer):
|
||||
bg_color = self.style["bg-color"]
|
||||
|
||||
kwargs = {}
|
||||
if bg_color != "tranparent":
|
||||
kwargs["fill"] = bg_color
|
||||
|
||||
start_pos_x = self.value_pos_x
|
||||
start_pos_y = self.value_pos_y
|
||||
end_pos_x = start_pos_x + self.value_width()
|
||||
end_pos_y = start_pos_y + self.value_height()
|
||||
|
||||
drawer.rectangle(
|
||||
(
|
||||
(start_pos_x, start_pos_y),
|
||||
(end_pos_x, end_pos_y)
|
||||
),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def value_width(self):
|
||||
return int(self.style["width"])
|
||||
|
||||
def value_height(self):
|
||||
return int(self.style["height"])
|
||||
|
||||
def collect_data(self):
|
||||
return {
|
||||
"pos_x": self.value_pos_x,
|
||||
"pos_y": self.value_pos_y,
|
||||
"width": self.value_width(),
|
||||
"height": self.value_height(),
|
||||
"path": self.image_path
|
||||
}
|
||||
|
||||
|
||||
class ItemText(BaseItem):
|
||||
obj_type = "text"
|
||||
|
||||
def __init__(self, value, *args, **kwargs):
|
||||
self.value = value
|
||||
super(ItemText, self).__init__(*args, **kwargs)
|
||||
|
||||
def draw(self, image, drawer):
|
||||
bg_color = self.style["bg-color"]
|
||||
if bg_color and bg_color.lower() != "transparent":
|
||||
# TODO border outline styles
|
||||
drawer.rectangle(
|
||||
(self.content_pos_start, self.content_pos_end),
|
||||
fill=bg_color,
|
||||
outline=None
|
||||
)
|
||||
|
||||
font_color = self.style["font-color"]
|
||||
font_family = self.style["font-family"]
|
||||
font_size = self.style["font-size"]
|
||||
font_bold = self.style.get("font-bold", False)
|
||||
font_italic = self.style.get("font-italic", False)
|
||||
|
||||
font = FontFactory.get_font(
|
||||
font_family, font_size, font_italic, font_bold
|
||||
)
|
||||
drawer.text(
|
||||
self.value_pos_start,
|
||||
self.value,
|
||||
font=font,
|
||||
fill=font_color
|
||||
)
|
||||
|
||||
def value_width(self):
|
||||
font_family = self.style["font-family"]
|
||||
font_size = self.style["font-size"]
|
||||
font_bold = self.style.get("font-bold", False)
|
||||
font_italic = self.style.get("font-italic", False)
|
||||
|
||||
font = FontFactory.get_font(
|
||||
font_family, font_size, font_italic, font_bold
|
||||
)
|
||||
width = font.getsize(self.value)[0]
|
||||
return int(width)
|
||||
|
||||
def value_height(self):
|
||||
font_family = self.style["font-family"]
|
||||
font_size = self.style["font-size"]
|
||||
font_bold = self.style.get("font-bold", False)
|
||||
font_italic = self.style.get("font-italic", False)
|
||||
|
||||
font = FontFactory.get_font(
|
||||
font_family, font_size, font_italic, font_bold
|
||||
)
|
||||
height = font.getsize(self.value)[1]
|
||||
return int(height)
|
||||
|
||||
|
||||
class ItemTable(BaseItem):
|
||||
|
||||
obj_type = "table"
|
||||
|
||||
def __init__(self, values, use_alternate_color=False, *args, **kwargs):
|
||||
|
||||
self.values_by_cords = None
|
||||
self.prepare_values(values)
|
||||
|
||||
super(ItemTable, self).__init__(*args, **kwargs)
|
||||
self.size_values = None
|
||||
self.calculate_sizes()
|
||||
|
||||
self.use_alternate_color = use_alternate_color
|
||||
|
||||
def add_item(self, item):
|
||||
if item.obj_type == "table-item":
|
||||
return
|
||||
super(ItemTable, self).add_item(item)
|
||||
|
||||
def fill_data_format(self):
|
||||
for item in self.values:
|
||||
item.fill_data_format()
|
||||
|
||||
def prepare_values(self, _values):
|
||||
values = []
|
||||
values_by_cords = []
|
||||
row_count = 0
|
||||
col_count = 0
|
||||
for row in _values:
|
||||
row_count += 1
|
||||
if len(row) > col_count:
|
||||
col_count = len(row)
|
||||
|
||||
for row_idx in range(row_count):
|
||||
values_by_cords.append([])
|
||||
for col_idx in range(col_count):
|
||||
values_by_cords[row_idx].append([])
|
||||
if col_idx <= len(_values[row_idx]) - 1:
|
||||
col = _values[row_idx][col_idx]
|
||||
else:
|
||||
col = ""
|
||||
|
||||
col_item = TableField(row_idx, col_idx, col, parent=self)
|
||||
values_by_cords[row_idx][col_idx] = col_item
|
||||
values.append(col_item)
|
||||
|
||||
self.values = values
|
||||
self.values_by_cords = values_by_cords
|
||||
|
||||
def calculate_sizes(self):
|
||||
row_heights = []
|
||||
col_widths = []
|
||||
for row_idx, row in enumerate(self.values_by_cords):
|
||||
row_heights.append(0)
|
||||
for col_idx, col_item in enumerate(row):
|
||||
if len(col_widths) < col_idx + 1:
|
||||
col_widths.append(0)
|
||||
|
||||
_width = col_widths[col_idx]
|
||||
item_width = col_item.width()
|
||||
if _width < item_width:
|
||||
col_widths[col_idx] = item_width
|
||||
|
||||
_height = row_heights[row_idx]
|
||||
item_height = col_item.height()
|
||||
if _height < item_height:
|
||||
row_heights[row_idx] = item_height
|
||||
|
||||
self.size_values = (row_heights, col_widths)
|
||||
|
||||
def draw(self, image, drawer):
|
||||
bg_color = self.style["bg-color"]
|
||||
if bg_color and bg_color.lower() != "transparent":
|
||||
# TODO border outline styles
|
||||
drawer.rectangle(
|
||||
(self.content_pos_start, self.content_pos_end),
|
||||
fill=bg_color,
|
||||
outline=None
|
||||
)
|
||||
|
||||
for value in self.values:
|
||||
value.draw(image, drawer)
|
||||
|
||||
def value_width(self):
|
||||
row_heights, col_widths = self.size_values
|
||||
width = 0
|
||||
for _width in col_widths:
|
||||
width += _width
|
||||
|
||||
if width != 0:
|
||||
width -= 1
|
||||
return width
|
||||
|
||||
def value_height(self):
|
||||
row_heights, col_widths = self.size_values
|
||||
height = 0
|
||||
for _height in row_heights:
|
||||
height += _height
|
||||
|
||||
if height != 0:
|
||||
height -= 1
|
||||
return height
|
||||
|
||||
def content_pos_info_by_cord(self, row_idx, col_idx):
|
||||
row_heights, col_widths = self.size_values
|
||||
pos_x = int(self.value_pos_x)
|
||||
pos_y = int(self.value_pos_y)
|
||||
width = 0
|
||||
height = 0
|
||||
for idx, value in enumerate(col_widths):
|
||||
if col_idx == idx:
|
||||
width = value
|
||||
break
|
||||
pos_x += value
|
||||
|
||||
for idx, value in enumerate(row_heights):
|
||||
if row_idx == idx:
|
||||
height = value
|
||||
break
|
||||
pos_y += value
|
||||
|
||||
return (pos_x, pos_y, width, height)
|
||||
|
||||
|
||||
class TableField(BaseItem):
|
||||
|
||||
obj_type = "table-item"
|
||||
available_parents = ["table"]
|
||||
ellide_text = "..."
|
||||
|
||||
def __init__(self, row_idx, col_idx, value, *args, **kwargs):
|
||||
super(TableField, self).__init__(*args, **kwargs)
|
||||
self.row_idx = row_idx
|
||||
self.col_idx = col_idx
|
||||
self.value = value
|
||||
|
||||
def recalculate_by_width(self, value, max_width):
|
||||
padding = self.style["padding"]
|
||||
padding_left = self.style.get("padding-left")
|
||||
if padding_left is None:
|
||||
padding_left = padding
|
||||
|
||||
padding_right = self.style.get("padding-right")
|
||||
if padding_right is None:
|
||||
padding_right = padding
|
||||
|
||||
max_width -= (padding_left + padding_right)
|
||||
|
||||
if not value:
|
||||
return ""
|
||||
|
||||
word_wrap = self.style.get("word-wrap")
|
||||
ellide = self.style.get("ellide")
|
||||
max_lines = self.style.get("max-lines")
|
||||
|
||||
font_family = self.style["font-family"]
|
||||
font_size = self.style["font-size"]
|
||||
font_bold = self.style.get("font-bold", False)
|
||||
font_italic = self.style.get("font-italic", False)
|
||||
|
||||
font = FontFactory.get_font(
|
||||
font_family, font_size, font_italic, font_bold
|
||||
)
|
||||
val_width = font.getsize(value)[0]
|
||||
if val_width <= max_width:
|
||||
return value
|
||||
|
||||
if not ellide and not word_wrap:
|
||||
# TODO logging
|
||||
self.log.warning((
|
||||
"Can't draw text because is too long with"
|
||||
" `word-wrap` and `ellide` turned off <{}>"
|
||||
).format(value))
|
||||
return ""
|
||||
|
||||
elif ellide and not word_wrap:
|
||||
max_lines = 1
|
||||
|
||||
words = [word for word in value.split()]
|
||||
words_len = len(words)
|
||||
lines = []
|
||||
last_index = None
|
||||
while True:
|
||||
start_index = 0
|
||||
if last_index is not None:
|
||||
start_index = int(last_index) + 1
|
||||
|
||||
line = ""
|
||||
for idx in range(start_index, words_len):
|
||||
_word = words[idx]
|
||||
connector = " "
|
||||
if line == "":
|
||||
connector = ""
|
||||
|
||||
_line = connector.join([line, _word])
|
||||
_line_width = font.getsize(_line)[0]
|
||||
if _line_width > max_width:
|
||||
break
|
||||
line = _line
|
||||
last_index = idx
|
||||
|
||||
if line:
|
||||
lines.append(line)
|
||||
|
||||
if last_index == words_len - 1:
|
||||
break
|
||||
|
||||
elif last_index is None:
|
||||
add_message = ""
|
||||
if ellide:
|
||||
add_message = " String was shortened to `{}`."
|
||||
line = ""
|
||||
for idx, char in enumerate(words[idx]):
|
||||
_line = line + char + self.ellide_text
|
||||
_line_width = font.getsize(_line)[0]
|
||||
if _line_width > max_width:
|
||||
if idx == 0:
|
||||
line = _line
|
||||
break
|
||||
line = line + char
|
||||
|
||||
lines.append(line)
|
||||
# TODO logging
|
||||
self.log.warning((
|
||||
"Font size is too big.{} <{}>"
|
||||
).format(add_message, value))
|
||||
break
|
||||
|
||||
output = ""
|
||||
if not lines:
|
||||
return output
|
||||
|
||||
over_max_lines = (max_lines and len(lines) > max_lines)
|
||||
if not over_max_lines:
|
||||
return "\n".join([line for line in lines])
|
||||
|
||||
lines = [lines[idx] for idx in range(max_lines)]
|
||||
if not ellide:
|
||||
return "\n".join(lines)
|
||||
|
||||
last_line = lines[-1]
|
||||
last_line_width = font.getsize(last_line + self.ellide_text)[0]
|
||||
if last_line_width <= max_width:
|
||||
lines[-1] += self.ellide_text
|
||||
return "\n".join([line for line in lines])
|
||||
|
||||
last_line_words = last_line.split()
|
||||
if len(last_line_words) == 1:
|
||||
if max_lines > 1:
|
||||
# TODO try previous line?
|
||||
lines[-1] = self.ellide_text
|
||||
return "\n".join([line for line in lines])
|
||||
|
||||
line = ""
|
||||
for idx, word in enumerate(last_line_words):
|
||||
_line = line + word + self.ellide_text
|
||||
_line_width = font.getsize(_line)[0]
|
||||
if _line_width > max_width:
|
||||
if idx == 0:
|
||||
line = _line
|
||||
break
|
||||
line = _line
|
||||
lines[-1] = line
|
||||
|
||||
return "\n".join([line for line in lines])
|
||||
|
||||
line = ""
|
||||
for idx, _word in enumerate(last_line_words):
|
||||
connector = " "
|
||||
if line == "":
|
||||
connector = ""
|
||||
|
||||
_line = connector.join([line, _word + self.ellide_text])
|
||||
_line_width = font.getsize(_line)[0]
|
||||
|
||||
if _line_width <= max_width:
|
||||
line = connector.join([line, _word])
|
||||
continue
|
||||
|
||||
if idx != 0:
|
||||
line += self.ellide_text
|
||||
break
|
||||
|
||||
if max_lines != 1:
|
||||
# TODO try previous line?
|
||||
line = self.ellide_text
|
||||
break
|
||||
|
||||
for idx, char in enumerate(_word):
|
||||
_line = line + char + self.ellide_text
|
||||
_line_width = font.getsize(_line)[0]
|
||||
if _line_width > max_width:
|
||||
if idx == 0:
|
||||
line = _line
|
||||
break
|
||||
line = line + char
|
||||
break
|
||||
|
||||
lines[-1] = line
|
||||
|
||||
return "\n".join([line for line in lines])
|
||||
|
||||
def fill_data_format(self):
|
||||
value = self.value
|
||||
if re.match(self.fill_data_regex, value):
|
||||
value = value.format(**self.fill_data)
|
||||
|
||||
self.orig_value = value
|
||||
|
||||
max_width = self.style.get("max-width")
|
||||
max_width = self.style.get("width") or max_width
|
||||
if max_width:
|
||||
value = self.recalculate_by_width(value, max_width)
|
||||
|
||||
self.value = value
|
||||
|
||||
def content_width(self):
|
||||
width = self.style.get("width")
|
||||
if width:
|
||||
return int(width)
|
||||
return super(TableField, self).content_width()
|
||||
|
||||
def content_height(self):
|
||||
return super(TableField, self).content_height()
|
||||
|
||||
def value_width(self):
|
||||
if not self.value:
|
||||
return 0
|
||||
|
||||
font_family = self.style["font-family"]
|
||||
font_size = self.style["font-size"]
|
||||
font_bold = self.style.get("font-bold", False)
|
||||
font_italic = self.style.get("font-italic", False)
|
||||
|
||||
font = FontFactory.get_font(
|
||||
font_family, font_size, font_italic, font_bold
|
||||
)
|
||||
width = font.getsize_multiline(self.value)[0] + 1
|
||||
|
||||
min_width = self.style.get("min-height")
|
||||
if min_width and min_width > width:
|
||||
width = min_width
|
||||
|
||||
return int(width)
|
||||
|
||||
def value_height(self):
|
||||
if not self.value:
|
||||
return 0
|
||||
|
||||
height = self.style.get("height")
|
||||
if height:
|
||||
return int(height)
|
||||
|
||||
font_family = self.style["font-family"]
|
||||
font_size = self.style["font-size"]
|
||||
font_bold = self.style.get("font-bold", False)
|
||||
font_italic = self.style.get("font-italic", False)
|
||||
|
||||
font = FontFactory.get_font(
|
||||
font_family, font_size, font_italic, font_bold
|
||||
)
|
||||
height = font.getsize_multiline(self.value)[1] + 1
|
||||
|
||||
min_height = self.style.get("min-height")
|
||||
if min_height and min_height > height:
|
||||
height = min_height
|
||||
|
||||
return int(height)
|
||||
|
||||
@property
|
||||
def item_pos_x(self):
|
||||
pos_x, pos_y, width, height = (
|
||||
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
|
||||
)
|
||||
return pos_x
|
||||
|
||||
@property
|
||||
def item_pos_y(self):
|
||||
pos_x, pos_y, width, height = (
|
||||
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
|
||||
)
|
||||
return pos_y
|
||||
|
||||
@property
|
||||
def value_pos_x(self):
|
||||
pos_x, pos_y, width, height = (
|
||||
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
|
||||
)
|
||||
alignment_hor = self.style["alignment-horizontal"].lower()
|
||||
if alignment_hor in ["center", "centre"]:
|
||||
pos_x += (width - self.value_width()) / 2
|
||||
|
||||
elif alignment_hor == "right":
|
||||
pos_x += width - self.value_width()
|
||||
|
||||
else:
|
||||
padding = self.style["padding"]
|
||||
padding_left = self.style.get("padding-left")
|
||||
if padding_left is None:
|
||||
padding_left = padding
|
||||
|
||||
pos_x += padding_left
|
||||
|
||||
return int(pos_x)
|
||||
|
||||
@property
|
||||
def value_pos_y(self):
|
||||
pos_x, pos_y, width, height = (
|
||||
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
|
||||
)
|
||||
|
||||
alignment_ver = self.style["alignment-vertical"].lower()
|
||||
if alignment_ver in ["center", "centre"]:
|
||||
pos_y += (height - self.value_height()) / 2
|
||||
|
||||
elif alignment_ver == "bottom":
|
||||
pos_y += height - self.value_height()
|
||||
|
||||
else:
|
||||
padding = self.style["padding"]
|
||||
padding_top = self.style.get("padding-top")
|
||||
if padding_top is None:
|
||||
padding_top = padding
|
||||
|
||||
pos_y += padding_top
|
||||
|
||||
return int(pos_y)
|
||||
|
||||
def draw(self, image, drawer):
|
||||
pos_x, pos_y, width, height = (
|
||||
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
|
||||
)
|
||||
pos_start = (pos_x, pos_y)
|
||||
pos_end = (pos_x + width, pos_y + height)
|
||||
bg_color = self.style["bg-color"]
|
||||
if self.parent.use_alternate_color and (self.row_idx % 2) == 1:
|
||||
bg_color = self.style["bg-alter-color"]
|
||||
|
||||
if bg_color and bg_color.lower() != "transparent":
|
||||
# TODO border outline styles
|
||||
drawer.rectangle(
|
||||
(pos_start, pos_end),
|
||||
fill=bg_color,
|
||||
outline=None
|
||||
)
|
||||
|
||||
font_color = self.style["font-color"]
|
||||
font_family = self.style["font-family"]
|
||||
font_size = self.style["font-size"]
|
||||
font_bold = self.style.get("font-bold", False)
|
||||
font_italic = self.style.get("font-italic", False)
|
||||
|
||||
font = FontFactory.get_font(
|
||||
font_family, font_size, font_italic, font_bold
|
||||
)
|
||||
|
||||
alignment_hor = self.style["alignment-horizontal"].lower()
|
||||
if alignment_hor == "centre":
|
||||
alignment_hor = "center"
|
||||
|
||||
drawer.multiline_text(
|
||||
self.value_pos_start,
|
||||
self.value,
|
||||
font=font,
|
||||
fill=font_color,
|
||||
align=alignment_hor
|
||||
)
|
||||
139
pype/scripts/slates/slate_base/layer.py
Normal file
139
pype/scripts/slates/slate_base/layer.py
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
from .base import BaseObj
|
||||
|
||||
|
||||
class Layer(BaseObj):
|
||||
obj_type = "layer"
|
||||
available_parents = ["main_frame", "layer"]
|
||||
|
||||
# Direction can be 0=vertical/ 1=horizontal
|
||||
def __init__(self, direction=0, *args, **kwargs):
|
||||
super(Layer, self).__init__(*args, **kwargs)
|
||||
self._direction = direction
|
||||
|
||||
@property
|
||||
def item_pos_x(self):
|
||||
if self.parent.obj_type == self.obj_type:
|
||||
pos_x = self.parent.child_pos_x(self.id)
|
||||
elif self.parent.obj_type == "main_frame":
|
||||
pos_x = self._pos_x
|
||||
else:
|
||||
pos_x = self.parent.value_pos_x
|
||||
return int(pos_x)
|
||||
|
||||
@property
|
||||
def item_pos_y(self):
|
||||
if self.parent.obj_type == self.obj_type:
|
||||
pos_y = self.parent.child_pos_y(self.id)
|
||||
elif self.parent.obj_type == "main_frame":
|
||||
pos_y = self._pos_y
|
||||
else:
|
||||
pos_y = self.parent.value_pos_y
|
||||
|
||||
return int(pos_y)
|
||||
|
||||
@property
|
||||
def direction(self):
|
||||
if self._direction not in (0, 1):
|
||||
self.log.warning((
|
||||
"Direction of Layer must be 0 or 1 "
|
||||
"(0 is horizontal / 1 is vertical)! Setting to 0."
|
||||
))
|
||||
return 0
|
||||
return self._direction
|
||||
|
||||
def child_pos_x(self, item_id):
|
||||
pos_x = self.value_pos_x
|
||||
alignment_hor = self.style["alignment-horizontal"].lower()
|
||||
|
||||
item = None
|
||||
for id, _item in self.items.items():
|
||||
if item_id == id:
|
||||
item = _item
|
||||
break
|
||||
|
||||
if self.direction == 1:
|
||||
for id, _item in self.items.items():
|
||||
if item_id == id:
|
||||
break
|
||||
|
||||
pos_x += _item.width()
|
||||
if _item.obj_type not in ["image", "placeholder"]:
|
||||
pos_x += 1
|
||||
|
||||
else:
|
||||
if alignment_hor in ["center", "centre"]:
|
||||
pos_x += (self.content_width() - item.content_width()) / 2
|
||||
|
||||
elif alignment_hor == "right":
|
||||
pos_x += self.content_width() - item.content_width()
|
||||
|
||||
else:
|
||||
margin = self.style["margin"]
|
||||
margin_left = self.style.get("margin-left") or margin
|
||||
pos_x += margin_left
|
||||
|
||||
return int(pos_x)
|
||||
|
||||
def child_pos_y(self, item_id):
|
||||
pos_y = self.value_pos_y
|
||||
alignment_ver = self.style["alignment-horizontal"].lower()
|
||||
|
||||
item = None
|
||||
for id, _item in self.items.items():
|
||||
if item_id == id:
|
||||
item = _item
|
||||
break
|
||||
|
||||
if self.direction != 1:
|
||||
for id, item in self.items.items():
|
||||
if item_id == id:
|
||||
break
|
||||
pos_y += item.height()
|
||||
if item.obj_type not in ["image", "placeholder"]:
|
||||
pos_y += 1
|
||||
|
||||
else:
|
||||
if alignment_ver in ["center", "centre"]:
|
||||
pos_y += (self.content_height() - item.content_height()) / 2
|
||||
|
||||
elif alignment_ver == "bottom":
|
||||
pos_y += self.content_height() - item.content_height()
|
||||
|
||||
return int(pos_y)
|
||||
|
||||
def value_height(self):
|
||||
height = 0
|
||||
for item in self.items.values():
|
||||
if self.direction == 1:
|
||||
if height > item.height():
|
||||
continue
|
||||
# times 1 because won't get object pointer but number
|
||||
height = item.height()
|
||||
else:
|
||||
height += item.height()
|
||||
|
||||
# TODO this is not right
|
||||
min_height = self.style.get("min-height")
|
||||
if min_height and min_height > height:
|
||||
return min_height
|
||||
return height
|
||||
|
||||
def value_width(self):
|
||||
width = 0
|
||||
for item in self.items.values():
|
||||
if self.direction == 0:
|
||||
if width > item.width():
|
||||
continue
|
||||
# times 1 because won't get object pointer but number
|
||||
width = item.width()
|
||||
else:
|
||||
width += item.width()
|
||||
|
||||
min_width = self.style.get("min-width")
|
||||
if min_width and min_width > width:
|
||||
return min_width
|
||||
return width
|
||||
|
||||
def draw(self, image, drawer):
|
||||
for item in self.items.values():
|
||||
item.draw(image, drawer)
|
||||
152
pype/scripts/slates/slate_base/lib.py
Normal file
152
pype/scripts/slates/slate_base/lib.py
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
import os
|
||||
import json
|
||||
import logging
|
||||
try:
|
||||
from queue import Queue
|
||||
except Exception:
|
||||
from Queue import Queue
|
||||
|
||||
from .main_frame import MainFrame
|
||||
from .layer import Layer
|
||||
from .items import (
|
||||
ItemTable, ItemImage, ItemRectangle, ItemPlaceHolder
|
||||
)
|
||||
|
||||
try:
|
||||
from pypeapp.config import get_presets
|
||||
except Exception:
|
||||
get_presets = dict
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
RequiredSlateKeys = ["width", "height", "destination_path"]
|
||||
|
||||
|
||||
# TODO proper documentation
|
||||
def create_slates(
|
||||
fill_data, slate_name=None, slate_data=None, data_output_json=None
|
||||
):
|
||||
"""Implmentation for command line executing.
|
||||
|
||||
Data for slates are by defaule taken from presets. That requires to enter,
|
||||
`slate_name`. If `slate_data` are entered then they are used.
|
||||
|
||||
`data_output` should be path to json file where data will be collected.
|
||||
"""
|
||||
if slate_data is None and slate_name is None:
|
||||
raise TypeError(
|
||||
"`create_slates` expects to enter data for slates or name"
|
||||
" of slate preset."
|
||||
)
|
||||
|
||||
elif slate_data is None:
|
||||
slate_presets = (
|
||||
get_presets()
|
||||
.get("tools", {})
|
||||
.get("slates")
|
||||
) or {}
|
||||
slate_data = slate_presets.get(slate_name)
|
||||
if slate_data is None:
|
||||
raise ValueError(
|
||||
"Preset name \"{}\" was not found in slate presets.".format(
|
||||
slate_name
|
||||
)
|
||||
)
|
||||
|
||||
missing_keys = []
|
||||
for key in RequiredSlateKeys:
|
||||
if key not in slate_data:
|
||||
missing_keys.append("`{}`".format(key))
|
||||
|
||||
if missing_keys:
|
||||
log.error("Slate data of <{}> miss required keys: {}".format(
|
||||
slate_name, ", ".join(missing_keys)
|
||||
))
|
||||
return False
|
||||
|
||||
width = slate_data["width"]
|
||||
height = slate_data["height"]
|
||||
dst_path = slate_data["destination_path"]
|
||||
style = slate_data.get("style") or {}
|
||||
|
||||
main = MainFrame(width, height, dst_path, fill_data, style=style)
|
||||
|
||||
load_queue = Queue()
|
||||
for item in slate_data["items"]:
|
||||
load_queue.put((item, main))
|
||||
|
||||
while not load_queue.empty():
|
||||
item_data, parent = load_queue.get()
|
||||
|
||||
item_type = item_data["type"].lower()
|
||||
item_style = item_data.get("style", {})
|
||||
item_name = item_data.get("name")
|
||||
|
||||
pos_x = item_data.get("pos_x")
|
||||
pos_y = item_data.get("pos_y")
|
||||
if parent.obj_type != "main_frame":
|
||||
if pos_x or pos_y:
|
||||
# TODO logging
|
||||
log.warning((
|
||||
"You have specified `pos_x` and `pos_y` but won't be used."
|
||||
" Possible only if parent of an item is `main_frame`."
|
||||
))
|
||||
pos_x = None
|
||||
pos_y = None
|
||||
|
||||
kwargs = {
|
||||
"parent": parent,
|
||||
"style": item_style,
|
||||
"name": item_name,
|
||||
"pos_x": pos_x,
|
||||
"pos_y": pos_y
|
||||
}
|
||||
|
||||
if item_type == "layer":
|
||||
direction = item_data.get("direction", 0)
|
||||
item_obj = Layer(direction, **kwargs)
|
||||
for item in item_data.get("items", []):
|
||||
load_queue.put((item, item_obj))
|
||||
|
||||
elif item_type == "table":
|
||||
use_alternate_color = item_data.get("use_alternate_color", False)
|
||||
values = item_data.get("values") or []
|
||||
ItemTable(values, use_alternate_color, **kwargs)
|
||||
|
||||
elif item_type == "image":
|
||||
path = item_data["path"]
|
||||
ItemImage(path, **kwargs)
|
||||
|
||||
elif item_type == "rectangle":
|
||||
ItemRectangle(**kwargs)
|
||||
|
||||
elif item_type == "placeholder":
|
||||
path = item_data["path"]
|
||||
ItemPlaceHolder(path, **kwargs)
|
||||
|
||||
else:
|
||||
# TODO logging
|
||||
log.warning(
|
||||
"Not implemented object type `{}` - skipping".format(item_type)
|
||||
)
|
||||
|
||||
main.draw()
|
||||
log.debug("Slate creation finished")
|
||||
|
||||
if not data_output_json:
|
||||
return
|
||||
|
||||
if not data_output_json.endswith(".json"):
|
||||
raise ValueError("Output path must be .json file.")
|
||||
|
||||
data_output_json_dir = os.path.dirname(data_output_json)
|
||||
if not os.path.exists(data_output_json_dir):
|
||||
log.info("Creating folder \"{}\"".format(data_output_json_dir))
|
||||
os.makedirs(data_output_json_dir)
|
||||
|
||||
output_data = main.collect_data()
|
||||
with open(data_output_json, "w") as json_file:
|
||||
json_file.write(json.dumps(output_data, indent=4))
|
||||
|
||||
log.info("Metadata collected in \"{}\".".format(data_output_json))
|
||||
77
pype/scripts/slates/slate_base/main_frame.py
Normal file
77
pype/scripts/slates/slate_base/main_frame.py
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
import os
|
||||
import re
|
||||
from PIL import Image, ImageDraw
|
||||
|
||||
from .base import BaseObj
|
||||
|
||||
|
||||
class MainFrame(BaseObj):
|
||||
|
||||
obj_type = "main_frame"
|
||||
available_parents = [None]
|
||||
|
||||
def __init__(
|
||||
self, width, height, destination_path, fill_data={}, *args, **kwargs
|
||||
):
|
||||
kwargs["parent"] = None
|
||||
super(MainFrame, self).__init__(*args, **kwargs)
|
||||
self._width = width
|
||||
self._height = height
|
||||
self.dst_path = destination_path
|
||||
self._fill_data = fill_data
|
||||
self.fill_data_format()
|
||||
|
||||
def fill_data_format(self):
|
||||
if re.match(self.fill_data_regex, self.dst_path):
|
||||
self.dst_path = self.dst_path.format(**self.fill_data)
|
||||
|
||||
@property
|
||||
def fill_data(self):
|
||||
return self._fill_data
|
||||
|
||||
def value_width(self):
|
||||
width = 0
|
||||
for item in self.items.values():
|
||||
width += item.width()
|
||||
return width
|
||||
|
||||
def value_height(self):
|
||||
height = 0
|
||||
for item in self.items.values():
|
||||
height += item.height()
|
||||
return height
|
||||
|
||||
def width(self):
|
||||
return self._width
|
||||
|
||||
def height(self):
|
||||
return self._height
|
||||
|
||||
def draw(self, path=None):
|
||||
dir_path = os.path.dirname(self.dst_path)
|
||||
if not os.path.exists(dir_path):
|
||||
os.makedirs(dir_path)
|
||||
|
||||
bg_color = self.style["bg-color"]
|
||||
image = Image.new("RGB", (self.width(), self.height()), color=bg_color)
|
||||
drawer = ImageDraw.Draw(image)
|
||||
for item in self.items.values():
|
||||
item.draw(image, drawer)
|
||||
|
||||
image.save(self.dst_path)
|
||||
self.reset()
|
||||
|
||||
def collect_data(self):
|
||||
output = {}
|
||||
output["width"] = self.width()
|
||||
output["height"] = self.height()
|
||||
output["slate_path"] = self.dst_path
|
||||
|
||||
placeholders = self.find_item(obj_type="placeholder")
|
||||
placeholders_data = []
|
||||
for placeholder in placeholders:
|
||||
placeholders_data.append(placeholder.collect_data())
|
||||
|
||||
output["placeholders"] = placeholders_data
|
||||
|
||||
return output
|
||||
45
pype/unreal/__init__.py
Normal file
45
pype/unreal/__init__.py
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
from avalon import api as avalon
|
||||
from pyblish import api as pyblish
|
||||
|
||||
logger = logging.getLogger("pype.unreal")
|
||||
|
||||
PARENT_DIR = os.path.dirname(__file__)
|
||||
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
|
||||
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
|
||||
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "unreal", "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "unreal", "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "unreal", "create")
|
||||
|
||||
|
||||
def install():
|
||||
"""Install Unreal configuration for Avalon."""
|
||||
print("-=" * 40)
|
||||
logo = '''.
|
||||
.
|
||||
____________
|
||||
/ \\ __ \\
|
||||
\\ \\ \\/_\\ \\
|
||||
\\ \\ _____/ ______
|
||||
\\ \\ \\___// \\ \\
|
||||
\\ \\____\\ \\ \\_____\\
|
||||
\\/_____/ \\/______/ PYPE Club .
|
||||
.
|
||||
'''
|
||||
print(logo)
|
||||
print("installing Pype for Unreal ...")
|
||||
print("-=" * 40)
|
||||
logger.info("installing Pype for Unreal")
|
||||
pyblish.register_plugin_path(str(PUBLISH_PATH))
|
||||
avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
|
||||
avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
|
||||
|
||||
|
||||
def uninstall():
|
||||
"""Uninstall Unreal configuration for Avalon."""
|
||||
pyblish.deregister_plugin_path(str(PUBLISH_PATH))
|
||||
avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
|
||||
avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))
|
||||
425
pype/unreal/lib.py
Normal file
425
pype/unreal/lib.py
Normal file
|
|
@ -0,0 +1,425 @@
|
|||
import sys
|
||||
import os
|
||||
import platform
|
||||
import json
|
||||
from distutils import dir_util
|
||||
import subprocess
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
def get_engine_versions():
|
||||
"""
|
||||
This will try to detect location and versions of installed Unreal Engine.
|
||||
Location can be overridden by `UNREAL_ENGINE_LOCATION` environment
|
||||
variable.
|
||||
|
||||
Returns:
|
||||
|
||||
dict: dictionary with version as a key and dir as value.
|
||||
|
||||
Example:
|
||||
|
||||
>>> get_engine_version()
|
||||
{
|
||||
"4.23": "C:/Epic Games/UE_4.23",
|
||||
"4.24": "C:/Epic Games/UE_4.24"
|
||||
}
|
||||
"""
|
||||
try:
|
||||
engine_locations = {}
|
||||
root, dirs, files = next(os.walk(os.environ["UNREAL_ENGINE_LOCATION"]))
|
||||
|
||||
for dir in dirs:
|
||||
if dir.startswith("UE_"):
|
||||
ver = dir.split("_")[1]
|
||||
engine_locations[ver] = os.path.join(root, dir)
|
||||
except KeyError:
|
||||
# environment variable not set
|
||||
pass
|
||||
except OSError:
|
||||
# specified directory doesn't exists
|
||||
pass
|
||||
|
||||
# if we've got something, terminate autodetection process
|
||||
if engine_locations:
|
||||
return engine_locations
|
||||
|
||||
# else kick in platform specific detection
|
||||
if platform.system().lower() == "windows":
|
||||
return _win_get_engine_versions()
|
||||
elif platform.system().lower() == "linux":
|
||||
# on linux, there is no installation and getting Unreal Engine involves
|
||||
# git clone. So we'll probably depend on `UNREAL_ENGINE_LOCATION`.
|
||||
pass
|
||||
elif platform.system().lower() == "darwin":
|
||||
return _darwin_get_engine_version()
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def _win_get_engine_versions():
|
||||
"""
|
||||
If engines are installed via Epic Games Launcher then there is:
|
||||
`%PROGRAMDATA%/Epic/UnrealEngineLauncher/LauncherInstalled.dat`
|
||||
This file is JSON file listing installed stuff, Unreal engines
|
||||
are marked with `"AppName" = "UE_X.XX"`` like `UE_4.24`
|
||||
"""
|
||||
install_json_path = os.path.join(
|
||||
os.environ.get("PROGRAMDATA"),
|
||||
"Epic",
|
||||
"UnrealEngineLauncher",
|
||||
"LauncherInstalled.dat",
|
||||
)
|
||||
|
||||
return _parse_launcher_locations(install_json_path)
|
||||
|
||||
|
||||
def _darwin_get_engine_version() -> dict:
|
||||
"""
|
||||
It works the same as on Windows, just JSON file location is different.
|
||||
"""
|
||||
install_json_path = os.path.join(
|
||||
os.environ.get("HOME"),
|
||||
"Library",
|
||||
"Application Support",
|
||||
"Epic",
|
||||
"UnrealEngineLauncher",
|
||||
"LauncherInstalled.dat",
|
||||
)
|
||||
|
||||
return _parse_launcher_locations(install_json_path)
|
||||
|
||||
|
||||
def _parse_launcher_locations(install_json_path: str) -> dict:
|
||||
"""
|
||||
This will parse locations from json file.
|
||||
|
||||
:param install_json_path: path to `LauncherInstalled.dat`
|
||||
:type install_json_path: str
|
||||
:returns: returns dict with unreal engine versions as keys and
|
||||
paths to those engine installations as value.
|
||||
:rtype: dict
|
||||
"""
|
||||
engine_locations = {}
|
||||
if os.path.isfile(install_json_path):
|
||||
with open(install_json_path, "r") as ilf:
|
||||
try:
|
||||
install_data = json.load(ilf)
|
||||
except json.JSONDecodeError:
|
||||
raise Exception(
|
||||
"Invalid `LauncherInstalled.dat file. `"
|
||||
"Cannot determine Unreal Engine location."
|
||||
)
|
||||
|
||||
for installation in install_data.get("InstallationList", []):
|
||||
if installation.get("AppName").startswith("UE_"):
|
||||
ver = installation.get("AppName").split("_")[1]
|
||||
engine_locations[ver] = installation.get("InstallLocation")
|
||||
|
||||
return engine_locations
|
||||
|
||||
|
||||
def create_unreal_project(project_name: str,
|
||||
ue_version: str,
|
||||
pr_dir: str,
|
||||
engine_path: str,
|
||||
dev_mode: bool = False) -> None:
|
||||
"""
|
||||
This will create `.uproject` file at specified location. As there is no
|
||||
way I know to create project via command line, this is easiest option.
|
||||
Unreal project file is basically JSON file. If we find
|
||||
`AVALON_UNREAL_PLUGIN` environment variable we assume this is location
|
||||
of Avalon Integration Plugin and we copy its content to project folder
|
||||
and enable this plugin.
|
||||
|
||||
:param project_name: project name
|
||||
:type project_name: str
|
||||
:param ue_version: unreal engine version (like 4.23)
|
||||
:type ue_version: str
|
||||
:param pr_dir: path to directory where project will be created
|
||||
:type pr_dir: str
|
||||
:param engine_path: Path to Unreal Engine installation
|
||||
:type engine_path: str
|
||||
:param dev_mode: Flag to trigger C++ style Unreal project needing
|
||||
Visual Studio and other tools to compile plugins from
|
||||
sources. This will trigger automatically if `Binaries`
|
||||
directory is not found in plugin folders as this indicates
|
||||
this is only source distribution of the plugin. Dev mode
|
||||
is also set by preset file `unreal/project_setup.json` in
|
||||
**PYPE_CONFIG**.
|
||||
:type dev_mode: bool
|
||||
:returns: None
|
||||
"""
|
||||
preset = config.get_presets()["unreal"]["project_setup"]
|
||||
|
||||
if os.path.isdir(os.environ.get("AVALON_UNREAL_PLUGIN", "")):
|
||||
# copy plugin to correct path under project
|
||||
plugins_path = os.path.join(pr_dir, "Plugins")
|
||||
avalon_plugin_path = os.path.join(plugins_path, "Avalon")
|
||||
if not os.path.isdir(avalon_plugin_path):
|
||||
os.makedirs(avalon_plugin_path, exist_ok=True)
|
||||
dir_util._path_created = {}
|
||||
dir_util.copy_tree(os.environ.get("AVALON_UNREAL_PLUGIN"),
|
||||
avalon_plugin_path)
|
||||
|
||||
if (not os.path.isdir(os.path.join(avalon_plugin_path, "Binaries"))
|
||||
or not os.path.join(avalon_plugin_path, "Intermediate")):
|
||||
dev_mode = True
|
||||
|
||||
# data for project file
|
||||
data = {
|
||||
"FileVersion": 3,
|
||||
"EngineAssociation": ue_version,
|
||||
"Category": "",
|
||||
"Description": "",
|
||||
"Plugins": [
|
||||
{"Name": "PythonScriptPlugin", "Enabled": True},
|
||||
{"Name": "EditorScriptingUtilities", "Enabled": True},
|
||||
{"Name": "Avalon", "Enabled": True}
|
||||
]
|
||||
}
|
||||
|
||||
if preset["install_unreal_python_engine"]:
|
||||
# If `PYPE_UNREAL_ENGINE_PYTHON_PLUGIN` is set, copy it from there to
|
||||
# support offline installation.
|
||||
# Otherwise clone UnrealEnginePython to Plugins directory
|
||||
# https://github.com/20tab/UnrealEnginePython.git
|
||||
uep_path = os.path.join(plugins_path, "UnrealEnginePython")
|
||||
if os.environ.get("PYPE_UNREAL_ENGINE_PYTHON_PLUGIN"):
|
||||
|
||||
os.makedirs(uep_path, exist_ok=True)
|
||||
dir_util._path_created = {}
|
||||
dir_util.copy_tree(
|
||||
os.environ.get("PYPE_UNREAL_ENGINE_PYTHON_PLUGIN"),
|
||||
uep_path)
|
||||
else:
|
||||
# WARNING: this will trigger dev_mode, because we need to compile
|
||||
# this plugin.
|
||||
dev_mode = True
|
||||
import git
|
||||
git.Repo.clone_from(
|
||||
"https://github.com/20tab/UnrealEnginePython.git",
|
||||
uep_path)
|
||||
|
||||
data["Plugins"].append(
|
||||
{"Name": "UnrealEnginePython", "Enabled": True})
|
||||
|
||||
if (not os.path.isdir(os.path.join(uep_path, "Binaries"))
|
||||
or not os.path.join(uep_path, "Intermediate")):
|
||||
dev_mode = True
|
||||
|
||||
if dev_mode or preset["dev_mode"]:
|
||||
# this will add project module and necessary source file to make it
|
||||
# C++ project and to (hopefully) make Unreal Editor to compile all
|
||||
# sources at start
|
||||
|
||||
data["Modules"] = [{
|
||||
"Name": project_name,
|
||||
"Type": "Runtime",
|
||||
"LoadingPhase": "Default",
|
||||
"AdditionalDependencies": ["Engine"],
|
||||
}]
|
||||
|
||||
if preset["install_unreal_python_engine"]:
|
||||
# now we need to fix python path in:
|
||||
# `UnrealEnginePython.Build.cs`
|
||||
# to point to our python
|
||||
with open(os.path.join(
|
||||
uep_path, "Source",
|
||||
"UnrealEnginePython",
|
||||
"UnrealEnginePython.Build.cs"), mode="r") as f:
|
||||
build_file = f.read()
|
||||
|
||||
fix = build_file.replace(
|
||||
'private string pythonHome = "";',
|
||||
'private string pythonHome = "{}";'.format(
|
||||
sys.base_prefix.replace("\\", "/")))
|
||||
|
||||
with open(os.path.join(
|
||||
uep_path, "Source",
|
||||
"UnrealEnginePython",
|
||||
"UnrealEnginePython.Build.cs"), mode="w") as f:
|
||||
f.write(fix)
|
||||
|
||||
# write project file
|
||||
project_file = os.path.join(pr_dir, "{}.uproject".format(project_name))
|
||||
with open(project_file, mode="w") as pf:
|
||||
json.dump(data, pf, indent=4)
|
||||
|
||||
# ensure we have PySide installed in engine
|
||||
# TODO: make it work for other platforms 🍎 🐧
|
||||
if platform.system().lower() == "windows":
|
||||
python_path = os.path.join(engine_path, "Engine", "Binaries",
|
||||
"ThirdParty", "Python", "Win64",
|
||||
"python.exe")
|
||||
|
||||
subprocess.run([python_path, "-m",
|
||||
"pip", "install", "pyside"])
|
||||
|
||||
if dev_mode or preset["dev_mode"]:
|
||||
_prepare_cpp_project(project_file, engine_path)
|
||||
|
||||
|
||||
def _prepare_cpp_project(project_file: str, engine_path: str) -> None:
|
||||
"""
|
||||
This function will add source files needed for project to be
|
||||
rebuild along with the avalon integration plugin.
|
||||
|
||||
There seems not to be automated way to do it from command line.
|
||||
But there might be way to create at least those target and build files
|
||||
by some generator. This needs more research as manually writing
|
||||
those files is rather hackish. :skull_and_crossbones:
|
||||
|
||||
:param project_file: path to .uproject file
|
||||
:type project_file: str
|
||||
:param engine_path: path to unreal engine associated with project
|
||||
:type engine_path: str
|
||||
"""
|
||||
|
||||
project_name = os.path.splitext(os.path.basename(project_file))[0]
|
||||
project_dir = os.path.dirname(project_file)
|
||||
targets_dir = os.path.join(project_dir, "Source")
|
||||
sources_dir = os.path.join(targets_dir, project_name)
|
||||
|
||||
os.makedirs(sources_dir, exist_ok=True)
|
||||
os.makedirs(os.path.join(project_dir, "Content"), exist_ok=True)
|
||||
|
||||
module_target = '''
|
||||
using UnrealBuildTool;
|
||||
using System.Collections.Generic;
|
||||
|
||||
public class {0}Target : TargetRules
|
||||
{{
|
||||
public {0}Target( TargetInfo Target) : base(Target)
|
||||
{{
|
||||
Type = TargetType.Game;
|
||||
ExtraModuleNames.AddRange( new string[] {{ "{0}" }} );
|
||||
}}
|
||||
}}
|
||||
'''.format(project_name)
|
||||
|
||||
editor_module_target = '''
|
||||
using UnrealBuildTool;
|
||||
using System.Collections.Generic;
|
||||
|
||||
public class {0}EditorTarget : TargetRules
|
||||
{{
|
||||
public {0}EditorTarget( TargetInfo Target) : base(Target)
|
||||
{{
|
||||
Type = TargetType.Editor;
|
||||
|
||||
ExtraModuleNames.AddRange( new string[] {{ "{0}" }} );
|
||||
}}
|
||||
}}
|
||||
'''.format(project_name)
|
||||
|
||||
module_build = '''
|
||||
using UnrealBuildTool;
|
||||
public class {0} : ModuleRules
|
||||
{{
|
||||
public {0}(ReadOnlyTargetRules Target) : base(Target)
|
||||
{{
|
||||
PCHUsage = PCHUsageMode.UseExplicitOrSharedPCHs;
|
||||
PublicDependencyModuleNames.AddRange(new string[] {{ "Core",
|
||||
"CoreUObject", "Engine", "InputCore" }});
|
||||
PrivateDependencyModuleNames.AddRange(new string[] {{ }});
|
||||
}}
|
||||
}}
|
||||
'''.format(project_name)
|
||||
|
||||
module_cpp = '''
|
||||
#include "{0}.h"
|
||||
#include "Modules/ModuleManager.h"
|
||||
|
||||
IMPLEMENT_PRIMARY_GAME_MODULE( FDefaultGameModuleImpl, {0}, "{0}" );
|
||||
'''.format(project_name)
|
||||
|
||||
module_header = '''
|
||||
#pragma once
|
||||
#include "CoreMinimal.h"
|
||||
'''
|
||||
|
||||
game_mode_cpp = '''
|
||||
#include "{0}GameModeBase.h"
|
||||
'''.format(project_name)
|
||||
|
||||
game_mode_h = '''
|
||||
#pragma once
|
||||
|
||||
#include "CoreMinimal.h"
|
||||
#include "GameFramework/GameModeBase.h"
|
||||
#include "{0}GameModeBase.generated.h"
|
||||
|
||||
UCLASS()
|
||||
class {1}_API A{0}GameModeBase : public AGameModeBase
|
||||
{{
|
||||
GENERATED_BODY()
|
||||
}};
|
||||
'''.format(project_name, project_name.upper())
|
||||
|
||||
with open(os.path.join(
|
||||
targets_dir, f"{project_name}.Target.cs"), mode="w") as f:
|
||||
f.write(module_target)
|
||||
|
||||
with open(os.path.join(
|
||||
targets_dir, f"{project_name}Editor.Target.cs"), mode="w") as f:
|
||||
f.write(editor_module_target)
|
||||
|
||||
with open(os.path.join(
|
||||
sources_dir, f"{project_name}.Build.cs"), mode="w") as f:
|
||||
f.write(module_build)
|
||||
|
||||
with open(os.path.join(
|
||||
sources_dir, f"{project_name}.cpp"), mode="w") as f:
|
||||
f.write(module_cpp)
|
||||
|
||||
with open(os.path.join(
|
||||
sources_dir, f"{project_name}.h"), mode="w") as f:
|
||||
f.write(module_header)
|
||||
|
||||
with open(os.path.join(
|
||||
sources_dir, f"{project_name}GameModeBase.cpp"), mode="w") as f:
|
||||
f.write(game_mode_cpp)
|
||||
|
||||
with open(os.path.join(
|
||||
sources_dir, f"{project_name}GameModeBase.h"), mode="w") as f:
|
||||
f.write(game_mode_h)
|
||||
|
||||
if platform.system().lower() == "windows":
|
||||
u_build_tool = (f"{engine_path}/Engine/Binaries/DotNET/"
|
||||
"UnrealBuildTool.exe")
|
||||
u_header_tool = (f"{engine_path}/Engine/Binaries/Win64/"
|
||||
f"UnrealHeaderTool.exe")
|
||||
elif platform.system().lower() == "linux":
|
||||
# WARNING: there is no UnrealBuildTool on linux?
|
||||
u_build_tool = ""
|
||||
u_header_tool = ""
|
||||
elif platform.system().lower() == "darwin":
|
||||
# WARNING: there is no UnrealBuildTool on Mac?
|
||||
u_build_tool = ""
|
||||
u_header_tool = ""
|
||||
|
||||
u_build_tool = u_build_tool.replace("\\", "/")
|
||||
u_header_tool = u_header_tool.replace("\\", "/")
|
||||
|
||||
command1 = [u_build_tool, "-projectfiles", f"-project={project_file}",
|
||||
"-progress"]
|
||||
|
||||
subprocess.run(command1)
|
||||
|
||||
command2 = [u_build_tool, f"-ModuleWithSuffix={project_name},3555"
|
||||
"Win64", "Development", "-TargetType=Editor"
|
||||
f'-Project="{project_file}"', f'"{project_file}"'
|
||||
"-IgnoreJunk"]
|
||||
|
||||
subprocess.run(command2)
|
||||
|
||||
"""
|
||||
uhtmanifest = os.path.join(os.path.dirname(project_file),
|
||||
f"{project_name}.uhtmanifest")
|
||||
|
||||
command3 = [u_header_tool, f'"{project_file}"', f'"{uhtmanifest}"',
|
||||
"-Unattended", "-WarningsAsErrors", "-installed"]
|
||||
|
||||
subprocess.run(command3)
|
||||
"""
|
||||
11
pype/unreal/plugin.py
Normal file
11
pype/unreal/plugin.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
from avalon import api
|
||||
|
||||
|
||||
class Creator(api.Creator):
|
||||
"""This serves as skeleton for future Pype specific functionality"""
|
||||
pass
|
||||
|
||||
|
||||
class Loader(api.Loader):
|
||||
"""This serves as skeleton for future Pype specific functionality"""
|
||||
pass
|
||||
BIN
res/app_icons/ue4.png
Normal file
BIN
res/app_icons/ue4.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 45 KiB |
|
|
@ -4,8 +4,8 @@ import KnobScripter
|
|||
|
||||
from pype.nuke.lib import (
|
||||
writes_version_sync,
|
||||
onScriptLoad,
|
||||
checkInventoryVersions
|
||||
on_script_load,
|
||||
check_inventory_versions
|
||||
)
|
||||
|
||||
import nuke
|
||||
|
|
@ -15,9 +15,9 @@ log = Logger().get_logger(__name__, "nuke")
|
|||
|
||||
|
||||
# nuke.addOnScriptSave(writes_version_sync)
|
||||
nuke.addOnScriptSave(onScriptLoad)
|
||||
nuke.addOnScriptLoad(checkInventoryVersions)
|
||||
nuke.addOnScriptSave(checkInventoryVersions)
|
||||
nuke.addOnScriptSave(on_script_load)
|
||||
nuke.addOnScriptLoad(check_inventory_versions)
|
||||
nuke.addOnScriptSave(check_inventory_versions)
|
||||
# nuke.addOnScriptSave(writes_version_sync)
|
||||
|
||||
log.info('Automatic syncing of write file knob to script version')
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue