Merge branch 'develop' into 3.0/bugfix/hiero-functionality-issues

This commit is contained in:
Jakub Jezek 2021-04-06 13:23:14 +02:00
commit e8d00b5f3c
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
1716 changed files with 24822 additions and 58435 deletions

150
openpype/__init__.py Normal file
View file

@ -0,0 +1,150 @@
# -*- coding: utf-8 -*-
"""Pype module."""
import os
import platform
import functools
import logging
from .settings import get_project_settings
from .lib import (
Anatomy,
filter_pyblish_plugins,
change_timer_to_current_context
)
pyblish = avalon = _original_discover = None
log = logging.getLogger(__name__)
PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__))
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
# Global plugin paths
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
def import_wrapper(func):
"""Wrap module imports to specific functions."""
@functools.wraps(func)
def decorated(*args, **kwargs):
global pyblish
global avalon
global _original_discover
if pyblish is None:
from pyblish import api as pyblish
from avalon import api as avalon
# we are monkey patching `avalon.api.discover()` to allow us to
# load plugin presets on plugins being discovered by avalon.
# Little bit of hacking, but it allows us to add out own features
# without need to modify upstream code.
_original_discover = avalon.discover
return func(*args, **kwargs)
return decorated
@import_wrapper
def patched_discover(superclass):
"""Patch `avalon.api.discover()`.
Monkey patched version of :func:`avalon.api.discover()`. It allows
us to load presets on plugins being discovered.
"""
# run original discover and get plugins
plugins = _original_discover(superclass)
# determine host application to use for finding presets
if avalon.registered_host() is None:
return plugins
host = avalon.registered_host().__name__.split(".")[-1]
# map plugin superclass to preset json. Currenly suppoted is load and
# create (avalon.api.Loader and avalon.api.Creator)
plugin_type = "undefined"
if superclass.__name__.split(".")[-1] == "Loader":
plugin_type = "load"
elif superclass.__name__.split(".")[-1] == "Creator":
plugin_type = "create"
print(">>> Finding presets for {}:{} ...".format(host, plugin_type))
try:
settings = (
get_project_settings(os.environ['AVALON_PROJECT'])
[host][plugin_type]
)
except KeyError:
print("*** no presets found.")
else:
for plugin in plugins:
if plugin.__name__ in settings:
print(">>> We have preset for {}".format(plugin.__name__))
for option, value in settings[plugin.__name__].items():
if option == "enabled" and value is False:
setattr(plugin, "active", False)
print(" - is disabled by preset")
else:
setattr(plugin, option, value)
print(" - setting `{}`: `{}`".format(option, value))
return plugins
@import_wrapper
def install():
"""Install Pype to Avalon."""
log.info("Registering global plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
pyblish.register_discovery_filter(filter_pyblish_plugins)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
project_name = os.environ.get("AVALON_PROJECT")
# Register studio specific plugins
if project_name:
anatomy = Anatomy(project_name)
anatomy.set_root_environments()
avalon.register_root(anatomy.roots)
project_settings = get_project_settings(project_name)
platform_name = platform.system().lower()
project_plugins = (
project_settings
.get("global", {})
.get("project_plugins", {})
.get(platform_name)
) or []
for path in project_plugins:
if not path or not os.path.exists(path):
continue
pyblish.register_plugin_path(path)
avalon.register_plugin_path(avalon.Loader, path)
avalon.register_plugin_path(avalon.Creator, path)
avalon.register_plugin_path(avalon.InventoryAction, path)
# apply monkey patched discover to original one
log.info("Patching discovery")
avalon.discover = patched_discover
avalon.on("taskChanged", _on_task_change)
def _on_task_change(*args):
change_timer_to_current_context()
@import_wrapper
def uninstall():
"""Uninstall Pype from Avalon."""
log.info("Deregistering global plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
pyblish.deregister_discovery_filter(filter_pyblish_plugins)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
log.info("Global plug-ins unregistred")
# restore original discover
avalon.discover = _original_discover

14
openpype/__main__.py Normal file
View file

@ -0,0 +1,14 @@
# -*- coding: utf-8 -*-
"""Main entry point for Pype command."""
from . import cli
import sys
import traceback
if __name__ == '__main__':
try:
cli.main(obj={}, prog_name="pype")
except Exception:
exc_info = sys.exc_info()
print("!!! Pype crashed:")
traceback.print_exception(*exc_info)
sys.exit(1)

90
openpype/action.py Normal file
View file

@ -0,0 +1,90 @@
# absolute_import is needed to counter the `module has no cmds error` in Maya
from __future__ import absolute_import
import pyblish.api
def get_errored_instances_from_context(context):
instances = list()
for result in context.data["results"]:
if result["instance"] is None:
# When instance is None we are on the "context" result
continue
if result["error"]:
instances.append(result["instance"])
return instances
def get_errored_plugins_from_data(context):
"""Get all failed validation plugins
Args:
context (object):
Returns:
list of plugins which failed during validation
"""
plugins = list()
results = context.data.get("results", [])
for result in results:
if result["success"] is True:
continue
plugins.append(result["plugin"])
return plugins
class RepairAction(pyblish.api.Action):
"""Repairs the action
To process the repairing this requires a static `repair(instance)` method
is available on the plugin.
"""
label = "Repair"
on = "failed" # This action is only available on a failed plug-in
icon = "wrench" # Icon from Awesome Icon
def process(self, context, plugin):
if not hasattr(plugin, "repair"):
raise RuntimeError("Plug-in does not have repair method.")
# Get the errored instances
self.log.info("Finding failed instances..")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
for instance in instances:
plugin.repair(instance)
class RepairContextAction(pyblish.api.Action):
"""Repairs the action
To process the repairing this requires a static `repair(instance)` method
is available on the plugin.
"""
label = "Repair Context"
on = "failed" # This action is only available on a failed plug-in
def process(self, context, plugin):
if not hasattr(plugin, "repair"):
raise RuntimeError("Plug-in does not have repair method.")
# Get the errored instances
self.log.info("Finding failed instances..")
errored_plugins = get_errored_plugins_from_data(context)
# Apply pyblish.logic to get the instances for the plug-in
if plugin in errored_plugins:
self.log.info("Attempting fix ...")
plugin.repair(context)

124
openpype/api.py Normal file
View file

@ -0,0 +1,124 @@
from .settings import (
get_system_settings,
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
get_environments,
SystemSettings,
ProjectSettings
)
from .lib import (
PypeLogger,
Anatomy,
config,
execute,
run_subprocess,
version_up,
get_asset,
get_hierarchy,
get_version_from_path,
get_last_version_from_path,
get_app_environments_for_context,
source_hash,
get_latest_version,
get_global_environments,
get_local_site_id,
change_openpype_mongo_url
)
from .lib.mongo import (
decompose_url,
compose_url,
get_default_components
)
from .lib.applications import (
ApplicationManager
)
from .lib.avalon_context import (
BuildWorkfile
)
from . import resources
from .plugin import (
PypeCreatorMixin,
Creator,
Extractor,
ValidatePipelineOrder,
ValidateContentsOrder,
ValidateSceneOrder,
ValidateMeshOrder,
ValidationException
)
# temporary fix, might
from .action import (
get_errored_instances_from_context,
RepairAction,
RepairContextAction
)
# for backward compatibility with Pype 2
Logger = PypeLogger
__all__ = [
"get_system_settings",
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_environments",
"SystemSettings",
"PypeLogger",
"Logger",
"Anatomy",
"config",
"execute",
"decompose_url",
"compose_url",
"get_default_components",
"ApplicationManager",
"BuildWorkfile",
# Resources
"resources",
# Pype creator mixin
"PypeCreatorMixin",
"Creator",
# plugin classes
"Extractor",
# ordering
"ValidatePipelineOrder",
"ValidateContentsOrder",
"ValidateSceneOrder",
"ValidateMeshOrder",
# action
"get_errored_instances_from_context",
"RepairAction",
"RepairContextAction",
"ValidationException",
# get contextual data
"version_up",
"get_hierarchy",
"get_asset",
"get_version_from_path",
"get_last_version_from_path",
"get_app_environments_for_context",
"source_hash",
"run_subprocess",
"get_latest_version",
"get_global_environments",
"get_local_site_id",
"change_openpype_mongo_url"
]

257
openpype/cli.py Normal file
View file

@ -0,0 +1,257 @@
# -*- coding: utf-8 -*-
"""Package for handling pype command line arguments."""
import os
import sys
import click
# import sys
from .pype_commands import PypeCommands
@click.group(invoke_without_command=True)
@click.pass_context
@click.option("--use-version",
expose_value=False, help="use specified version")
@click.option("--use-staging", is_flag=True,
expose_value=False, help="use staging variants")
def main(ctx):
"""Pype is main command serving as entry point to pipeline system.
It wraps different commands together.
"""
if ctx.invoked_subcommand is None:
ctx.invoke(tray)
@main.command()
@click.option("-d", "--dev", is_flag=True, help="Settings in Dev mode")
def settings(dev=False):
"""Show Pype Settings UI."""
PypeCommands().launch_settings_gui(dev)
@main.command()
def standalonepublisher():
"""Show Pype Standalone publisher UI."""
PypeCommands().launch_standalone_publisher()
@main.command()
@click.option("-d", "--debug",
is_flag=True, help=("Run pype tray in debug mode"))
def tray(debug=False):
"""Launch pype tray.
Default action of pype command is to launch tray widget to control basic
aspects of pype. See documentation for more information.
Running pype with `--debug` will result in lot of information useful for
debugging to be shown in console.
"""
PypeCommands().launch_tray(debug)
@main.command()
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("--ftrack-url", envvar="FTRACK_SERVER",
help="Ftrack server url")
@click.option("--ftrack-user", envvar="FTRACK_API_USER",
help="Ftrack api user")
@click.option("--ftrack-api-key", envvar="FTRACK_API_KEY",
help="Ftrack api key")
@click.option("--ftrack-events-path",
envvar="FTRACK_EVENTS_PATH",
help=("path to ftrack event handlers"))
@click.option("--no-stored-credentials", is_flag=True,
help="don't use stored credentials")
@click.option("--store-credentials", is_flag=True,
help="store provided credentials")
@click.option("--legacy", is_flag=True,
help="run event server without mongo storing")
@click.option("--clockify-api-key", envvar="CLOCKIFY_API_KEY",
help="Clockify API key.")
@click.option("--clockify-workspace", envvar="CLOCKIFY_WORKSPACE",
help="Clockify workspace")
def eventserver(debug,
ftrack_url,
ftrack_user,
ftrack_api_key,
ftrack_events_path,
no_stored_credentials,
store_credentials,
legacy,
clockify_api_key,
clockify_workspace):
"""Launch ftrack event server.
This should be ideally used by system service (such us systemd or upstart
on linux and window service).
You have to set either proper environment variables to provide URL and
credentials or use option to specify them. If you use --store_credentials
provided credentials will be stored for later use.
"""
if debug:
os.environ['OPENPYPE_DEBUG'] = "3"
PypeCommands().launch_eventservercli(
ftrack_url,
ftrack_user,
ftrack_api_key,
ftrack_events_path,
no_stored_credentials,
store_credentials,
legacy,
clockify_api_key,
clockify_workspace
)
@main.command()
@click.argument("output_json_path")
@click.option("--project", help="Project name", default=None)
@click.option("--asset", help="Asset name", default=None)
@click.option("--task", help="Task name", default=None)
@click.option("--app", help="Application name", default=None)
def extractenvironments(output_json_path, project, asset, task, app):
"""Extract environment variables for entered context to a json file.
Entered output filepath will be created if does not exists.
All context options must be passed otherwise only pype's global
environments will be extracted.
Context options are "project", "asset", "task", "app"
"""
PypeCommands.extractenvironments(
output_json_path, project, asset, task, app
)
@main.command()
@click.argument("paths", nargs=-1)
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
def publish(debug, paths):
"""Start CLI publishing.
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
if debug:
os.environ['OPENPYPE_DEBUG'] = '3'
PypeCommands.publish(list(paths))
@main.command()
@click.option("-d", "--debug", is_flag=True, help="Print debug messages")
@click.option("-p", "--project", required=True,
help="name of project asset is under")
@click.option("-a", "--asset", required=True,
help="name of asset to which we want to copy textures")
@click.option("--path", required=True,
help="path where textures are found",
type=click.Path(exists=True))
def texturecopy(debug, project, asset, path):
"""Copy specified textures to provided asset path.
It validates if project and asset exists. Then it will use speedcopy to
copy all textures found in all directories under --path to destination
folder, determined by template texture in anatomy. I will use source
filename and automatically rise version number on directory.
Result will be copied without directory structure so it will be flat then.
Nothing is written to database.
"""
if debug:
os.environ['OPENPYPE_DEBUG'] = '3'
PypeCommands().texture_copy(project, asset, path)
@main.command(context_settings={"ignore_unknown_options": True})
@click.option("--app", help="Registered application name")
@click.option("--project", help="Project name",
default=lambda: os.environ.get('AVALON_PROJECT', ''))
@click.option("--asset", help="Asset name",
default=lambda: os.environ.get('AVALON_ASSET', ''))
@click.option("--task", help="Task name",
default=lambda: os.environ.get('AVALON_TASK', ''))
@click.option("--tools", help="List of tools to add")
@click.option("--user", help="Pype user name",
default=lambda: os.environ.get('OPENPYPE_USERNAME', ''))
@click.option("-fs",
"--ftrack-server",
help="Registered application name",
default=lambda: os.environ.get('FTRACK_SERVER', ''))
@click.option("-fu",
"--ftrack-user",
help="Registered application name",
default=lambda: os.environ.get('FTRACK_API_USER', ''))
@click.option("-fk",
"--ftrack-key",
help="Registered application name",
default=lambda: os.environ.get('FTRACK_API_KEY', ''))
@click.argument('arguments', nargs=-1)
def launch(app, project, asset, task,
ftrack_server, ftrack_user, ftrack_key, tools, arguments, user):
"""Launch registered application name in Pype context.
You can define applications in pype-config toml files. Project, asset name
and task name must be provided (even if they are not used by app itself).
Optionally you can specify ftrack credentials if needed.
ARGUMENTS are passed to launched application.
"""
# TODO: this needs to switch for Settings
if ftrack_server:
os.environ["FTRACK_SERVER"] = ftrack_server
if ftrack_server:
os.environ["FTRACK_API_USER"] = ftrack_user
if ftrack_server:
os.environ["FTRACK_API_KEY"] = ftrack_key
if user:
os.environ["OPENPYPE_USERNAME"] = user
# test required
if not project or not asset or not task:
print("!!! Missing required arguments")
return
PypeCommands().run_application(app, project, asset, task, tools, arguments)
@main.command()
@click.option("-p", "--path", help="Path to zip file", default=None)
def generate_zip(path):
"""Generate Pype zip from current sources.
If PATH is not provided, it will create zip file in user data dir.
"""
PypeCommands().generate_zip(path)
@main.command(
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True))
@click.argument("script", required=True, type=click.Path(exists=True))
def run(script):
"""Run python script in Pype context."""
import runpy
if not script:
print("Error: missing path to script file.")
else:
args = sys.argv
args.remove("run")
args.remove(script)
sys.argv = args
args_string = " ".join(args[1:])
print(f"... running: {script} {args_string}")
runpy.run_path(script, run_name="__main__", )

View file

@ -0,0 +1,29 @@
import os
from openpype.lib import PreLaunchHook
class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"""Add last workfile path to launch arguments.
This is not possible to do for all applications the same way.
"""
order = 0
app_groups = ["maya", "nuke", "nukex", "hiero", "nukestudio"]
def execute(self):
if not self.data.get("start_last_workfile"):
self.log.info("It is set to not start last workfile on start.")
return
last_workfile = self.data.get("last_workfile_path")
if not last_workfile:
self.log.warning("Last workfile was not collected.")
return
if not os.path.exists(last_workfile):
self.log.info("Current context does not have any workfile yet.")
return
# Add path to workfile to arguments
self.launch_context.launch_args.append(last_workfile)

View file

@ -0,0 +1,93 @@
from openpype.api import Anatomy
from openpype.lib import (
PreLaunchHook,
EnvironmentPrepData,
prepare_host_environments,
prepare_context_environments
)
import avalon.api
class GlobalHostDataHook(PreLaunchHook):
order = -100
def execute(self):
"""Prepare global objects to `data` that will be used for sure."""
if not self.application.is_host:
self.log.info(
"Skipped hook {}. Application is not marked as host.".format(
self.__class__.__name__
)
)
return
self.prepare_global_data()
if not self.data.get("asset_doc"):
return
app = self.launch_context.application
temp_data = EnvironmentPrepData({
"project_name": self.data["project_name"],
"asset_name": self.data["asset_name"],
"task_name": self.data["task_name"],
"app": app,
"dbcon": self.data["dbcon"],
"project_doc": self.data["project_doc"],
"asset_doc": self.data["asset_doc"],
"anatomy": self.data["anatomy"],
"env": self.launch_context.env,
"log": self.log
})
prepare_host_environments(temp_data)
prepare_context_environments(temp_data)
temp_data.pop("log")
self.data.update(temp_data)
def prepare_global_data(self):
"""Prepare global objects to `data` that will be used for sure."""
# Mongo documents
project_name = self.data.get("project_name")
if not project_name:
self.log.info(
"Skipping global data preparation."
" Key `project_name` was not found in launch context."
)
return
self.log.debug("Project name is set to \"{}\"".format(project_name))
# Anatomy
self.data["anatomy"] = Anatomy(project_name)
# Mongo connection
dbcon = avalon.api.AvalonMongoDB()
dbcon.Session["AVALON_PROJECT"] = project_name
dbcon.install()
self.data["dbcon"] = dbcon
# Project document
project_doc = dbcon.find_one({"type": "project"})
self.data["project_doc"] = project_doc
asset_name = self.data.get("asset_name")
if not asset_name:
self.log.warning(
"Asset name was not set. Skipping asset document query."
)
return
asset_doc = dbcon.find_one({
"type": "asset",
"name": asset_name
})
self.data["asset_doc"] = asset_doc

View file

@ -0,0 +1,47 @@
import os
from openpype.lib import (
PreLaunchHook,
get_pype_execute_args
)
from openpype import PACKAGE_DIR as OPENPYPE_DIR
class NonPythonHostHook(PreLaunchHook):
"""Launch arguments preparation.
Non python host implementation do not launch host directly but use
python script which launch the host. For these cases it is necessary to
prepend python (or openpype) executable and script path before application's.
"""
app_groups = ["harmony", "photoshop", "aftereffects"]
def execute(self):
# Pop executable
executable_path = self.launch_context.launch_args.pop(0)
# Pop rest of launch arguments - There should not be other arguments!
remainders = []
while self.launch_context.launch_args:
remainders.append(self.launch_context.launch_args.pop(0))
script_path = os.path.join(
OPENPYPE_DIR,
"scripts",
"non_python_host_launch.py"
)
new_launch_args = get_pype_execute_args(
"run", script_path, executable_path
)
# Add workfile path if exists
workfile_path = self.data["last_workfile_path"]
if os.path.exists(workfile_path):
new_launch_args.append(workfile_path)
# Append as whole list as these areguments should not be separated
self.launch_context.launch_args.append(new_launch_args)
if remainders:
self.launch_context.launch_args.extend(remainders)

View file

@ -0,0 +1,34 @@
import os
from openpype.lib import PreLaunchHook
class PrePython2Vendor(PreLaunchHook):
"""Prepend python 2 dependencies for py2 hosts."""
# WARNING This hook will probably be deprecated in OpenPype 3 - kept for test
order = 10
app_groups = ["hiero", "nuke", "nukex"]
def execute(self):
# Prepare vendor dir path
self.log.info("adding global python 2 vendor")
pype_root = os.getenv("OPENPYPE_ROOT")
python_2_vendor = os.path.join(
pype_root,
"openpype",
"vendor",
"python",
"python_2"
)
# Add Python 2 modules
python_paths = [
python_2_vendor
]
# Load PYTHONPATH from current launch context
python_path = self.launch_context.env.get("PYTHONPATH")
if python_path:
python_paths.append(python_path)
# Set new PYTHONPATH to launch context environments
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths)

View file

@ -0,0 +1,24 @@
import os
from openpype.lib import PreLaunchHook
class LaunchWithWindowsShell(PreLaunchHook):
"""Add shell command before executable.
Some hosts have issues when are launched directly from python in that case
it is possible to prepend shell executable which will trigger process
instead.
"""
order = 10
app_groups = ["resolve", "nuke", "nukex", "hiero", "nukestudio"]
platforms = ["windows"]
def execute(self):
# Get comspec which is cmd.exe in most cases.
comspec = os.environ.get("COMSPEC", "cmd.exe")
# Add comspec to arguments list and add "/k"
new_args = [comspec, "/c"]
new_args.extend(self.launch_context.launch_args)
# Replace launch args with new one
self.launch_context.launch_args = new_args

View file

View file

View file

@ -0,0 +1,83 @@
import os
import sys
import logging
from avalon import io
from avalon import api as avalon
from avalon.vendor import Qt
from openpype import lib
import pyblish.api as pyblish
import openpype.hosts.aftereffects
log = logging.getLogger("pype.hosts.aftereffects")
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.aftereffects.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
def check_inventory():
if not lib.any_outdated():
return
host = pyblish.registered_host()
outdated_containers = []
for container in host.ls():
representation = container['representation']
representation_doc = io.find_one(
{
"_id": io.ObjectId(representation),
"type": "representation"
},
projection={"parent": True}
)
if representation_doc and not lib.is_latest(representation_doc):
outdated_containers.append(container)
# Warn about outdated containers.
print("Starting new QApplication..")
app = Qt.QtWidgets.QApplication(sys.argv)
message_box = Qt.QtWidgets.QMessageBox()
message_box.setIcon(Qt.QtWidgets.QMessageBox.Warning)
msg = "There are outdated containers in the scene."
message_box.setText(msg)
message_box.exec_()
# Garbage collect QApplication.
del app
def application_launch():
check_inventory()
def install():
print("Installing Pype config...")
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
avalon.on("application.launched", application_launch)
def uninstall():
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle layer visibility on instance toggles."""
instance[0].Visible = new_value

View file

@ -0,0 +1,58 @@
import openpype.api
from avalon.vendor import Qt
from avalon import aftereffects
import logging
log = logging.getLogger(__name__)
class CreateRender(openpype.api.Creator):
"""Render folder for publish.
Creates subsets in format 'familyTaskSubsetname',
eg 'renderCompositingMain'.
Create only single instance from composition at a time.
"""
name = "renderDefault"
label = "Render on Farm"
family = "render"
def process(self):
stub = aftereffects.stub() # only after After Effects is up
if (self.options or {}).get("useSelection"):
items = stub.get_selected_items(comps=True,
folders=False,
footages=False)
if len(items) > 1:
self._show_msg("Please select only single composition at time.")
return False
if not items:
self._show_msg("Nothing to create. Select composition " +
"if 'useSelection' or create at least " +
"one composition.")
return False
existing_subsets = [instance['subset'].lower()
for instance in aftereffects.list_instances()]
item = items.pop()
if self.name.lower() in existing_subsets:
txt = "Instance with name \"{}\" already exists.".format(self.name)
self._show_msg(txt)
return False
self.data["members"] = [item.id]
self.data["uuid"] = item.id # for SubsetManager
stub.imprint(item, self.data)
stub.set_label_color(item.id, 14) # Cyan options 0 - 16
stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"])
def _show_msg(self, txt):
msg = Qt.QtWidgets.QMessageBox()
msg.setIcon(Qt.QtWidgets.QMessageBox.Warning)
msg.setText(txt)
msg.exec_()

View file

@ -0,0 +1,100 @@
import re
from avalon import api, aftereffects
from openpype.lib import get_background_layers, get_unique_layer_name
stub = aftereffects.stub()
class BackgroundLoader(api.Loader):
"""
Load images from Background family
Creates for each background separate folder with all imported images
from background json AND automatically created composition with layers,
each layer for separate image.
For each load container is created and stored in project (.aep)
metadata
"""
families = ["background"]
representations = ["json"]
def load(self, context, name=None, namespace=None, data=None):
items = stub.get_items(comps=True)
existing_items = [layer.name for layer in items]
comp_name = get_unique_layer_name(
existing_items,
"{}_{}".format(context["asset"]["name"], name))
layers = get_background_layers(self.fname)
comp = stub.import_background(None, stub.LOADED_ICON + comp_name,
layers)
if not comp:
self.log.warning(
"Import background failed.")
self.log.warning("Check host app for alert error.")
return
self[:] = [comp]
namespace = namespace or comp_name
return aftereffects.containerise(
name,
namespace,
comp,
context,
self.__class__.__name__
)
def update(self, container, representation):
""" Switch asset or change version """
context = representation.get("context", {})
_ = container.pop("layer")
# without iterator number (_001, 002...)
namespace_from_container = re.sub(r'_\d{3}$', '',
container["namespace"])
comp_name = "{}_{}".format(context["asset"], context["subset"])
# switching assets
if namespace_from_container != comp_name:
items = stub.get_items(comps=True)
existing_items = [layer.name for layer in items]
comp_name = get_unique_layer_name(
existing_items,
"{}_{}".format(context["asset"], context["subset"]))
else: # switching version - keep same name
comp_name = container["namespace"]
path = api.get_representation_path(representation)
layers = get_background_layers(path)
comp = stub.reload_background(container["members"][1],
stub.LOADED_ICON + comp_name,
layers)
# update container
container["representation"] = str(representation["_id"])
container["name"] = context["subset"]
container["namespace"] = comp_name
container["members"] = comp.members
stub.imprint(comp, container)
def remove(self, container):
"""
Removes element from scene: deletes layer + removes from file
metadata.
Args:
container (dict): container to be removed - used to get layer_id
"""
print("!!!! container:: {}".format(container))
layer = container.pop("layer")
stub.imprint(layer, {})
stub.delete_item(layer.id)
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,109 @@
from avalon import api, aftereffects
from openpype import lib
import re
stub = aftereffects.stub()
class FileLoader(api.Loader):
"""Load images
Stores the imported asset in a container named after the asset.
"""
label = "Load file"
families = ["image",
"plate",
"render",
"prerender",
"review",
"audio"]
representations = ["*"]
def load(self, context, name=None, namespace=None, data=None):
layers = stub.get_items(comps=True, folders=True, footages=True)
existing_layers = [layer.name for layer in layers]
comp_name = lib.get_unique_layer_name(
existing_layers, "{}_{}".format(context["asset"]["name"], name))
import_options = {}
file = self.fname
repr_cont = context["representation"]["context"]
if "#" not in file:
frame = repr_cont.get("frame")
if frame:
padding = len(frame)
file = file.replace(frame, "#" * padding)
import_options['sequence'] = True
if not file:
repr_id = context["representation"]["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
if '.psd' in file:
import_options['ImportAsType'] = 'ImportAsType.COMP'
comp = stub.import_file(self.fname, stub.LOADED_ICON + comp_name,
import_options)
if not comp:
self.log.warning(
"Representation id `{}` is failing to load".format(file))
self.log.warning("Check host app for alert error.")
return
self[:] = [comp]
namespace = namespace or comp_name
return aftereffects.containerise(
name,
namespace,
comp,
context,
self.__class__.__name__
)
def update(self, container, representation):
""" Switch asset or change version """
layer = container.pop("layer")
context = representation.get("context", {})
namespace_from_container = re.sub(r'_\d{3}$', '',
container["namespace"])
layer_name = "{}_{}".format(context["asset"], context["subset"])
# switching assets
if namespace_from_container != layer_name:
layers = stub.get_items(comps=True)
existing_layers = [layer.name for layer in layers]
layer_name = lib.get_unique_layer_name(
existing_layers,
"{}_{}".format(context["asset"], context["subset"]))
else: # switching version - keep same name
layer_name = container["namespace"]
path = api.get_representation_path(representation)
# with aftereffects.maintained_selection(): # TODO
stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name)
stub.imprint(
layer, {"representation": str(representation["_id"]),
"name": context["subset"],
"namespace": layer_name}
)
def remove(self, container):
"""
Removes element from scene: deletes layer + removes from Headline
Args:
container (dict): container to be removed - used to get layer_id
"""
layer = container.pop("layer")
stub.imprint(layer, {})
stub.delete_item(layer.id)
def switch(self, container, representation):
self.update(container, representation)

View file

@ -0,0 +1,21 @@
import pyblish.api
from avalon import aftereffects
class AddPublishHighlight(pyblish.api.InstancePlugin):
"""
Revert back rendered comp name and add publish highlight
"""
label = "Add render highlight"
order = pyblish.api.IntegratorOrder + 8.0
hosts = ["aftereffects"]
families = ["render.farm"]
optional = True
def process(self, instance):
stub = aftereffects.stub()
item = instance.data
# comp name contains highlight icon
stub.rename_item(item["comp_id"], item["comp_name"])

View file

@ -0,0 +1,27 @@
import os
import pyblish.api
from avalon import aftereffects
class CollectAudio(pyblish.api.ContextPlugin):
"""Inject audio file url for rendered composition into context.
Needs to run AFTER 'collect_render'. Use collected comp_id to check
if there is an AVLayer in this composition
"""
order = pyblish.api.CollectorOrder + 0.499
label = "Collect Audio"
hosts = ["aftereffects"]
def process(self, context):
for instance in context:
if instance.data["family"] == 'render.farm':
comp_id = instance.data["comp_id"]
if not comp_id:
self.log.debug("No comp_id filled in instance")
return
context.data["audioFile"] = os.path.normpath(
aftereffects.stub().get_audio_url(comp_id)
).replace("\\", "/")

View file

@ -0,0 +1,18 @@
import os
import pyblish.api
from avalon import aftereffects
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Current File"
hosts = ["aftereffects"]
def process(self, context):
context.data["currentFile"] = os.path.normpath(
aftereffects.stub().get_active_document_full_name()
).replace("\\", "/")

View file

@ -0,0 +1,165 @@
from openpype.lib import abstract_collect_render
from openpype.lib.abstract_collect_render import RenderInstance
import pyblish.api
import attr
import os
from avalon import aftereffects
@attr.s
class AERenderInstance(RenderInstance):
# extend generic, composition name is needed
comp_name = attr.ib(default=None)
comp_id = attr.ib(default=None)
class CollectAERender(abstract_collect_render.AbstractCollectRender):
order = pyblish.api.CollectorOrder + 0.498
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
padding_width = 6
rendered_extension = 'png'
stub = aftereffects.stub()
def get_instances(self, context):
instances = []
current_file = context.data["currentFile"]
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
compositions = self.stub.get_items(True)
compositions_by_id = {item.id: item for item in compositions}
for inst in self.stub.get_metadata():
schema = inst.get('schema')
# loaded asset container skip it
if schema and 'container' in schema:
continue
if not inst["members"]:
raise ValueError("Couldn't find id, unable to publish. " +
"Please recreate instance.")
item_id = inst["members"][0]
work_area_info = self.stub.get_work_area(int(item_id))
frameStart = work_area_info.workAreaStart
frameEnd = round(work_area_info.workAreaStart +
float(work_area_info.workAreaDuration) *
float(work_area_info.frameRate)) - 1
if inst["family"] == "render" and inst["active"]:
instance = AERenderInstance(
family="render.farm", # other way integrate would catch it
families=["render.farm"],
version=version,
time="",
source=current_file,
label="{} - farm".format(inst["subset"]),
subset=inst["subset"],
asset=context.data["assetEntity"]["name"],
attachTo=False,
setMembers='',
publish=True,
renderer='aerender',
name=inst["subset"],
resolutionWidth=asset_entity["data"].get(
"resolutionWidth",
project_entity["data"]["resolutionWidth"]),
resolutionHeight=asset_entity["data"].get(
"resolutionHeight",
project_entity["data"]["resolutionHeight"]),
pixelAspect=1,
tileRendering=False,
tilesX=0,
tilesY=0,
frameStart=frameStart,
frameEnd=frameEnd,
frameStep=1,
toBeRenderedOn='deadline'
)
comp = compositions_by_id.get(int(item_id))
if not comp:
raise ValueError("There is no composition for item {}".
format(item_id))
instance.comp_name = comp.name
instance.comp_id = item_id
instance._anatomy = context.data["anatomy"]
instance.anatomyData = context.data["anatomyData"]
instance.outputDir = self._get_output_dir(instance)
instances.append(instance)
self.log.debug("instances::{}".format(instances))
return instances
def get_expected_files(self, render_instance):
"""
Returns list of rendered files that should be created by
Deadline. These are not published directly, they are source
for later 'submit_publish_job'.
Args:
render_instance (RenderInstance): to pull anatomy and parts used
in url
Returns:
(list) of absolut urls to rendered file
"""
start = render_instance.frameStart
end = render_instance.frameEnd
# pull file name from Render Queue Output module
render_q = self.stub.get_render_info()
if not render_q:
raise ValueError("No file extension set in Render Queue")
_, ext = os.path.splitext(os.path.basename(render_q.file_name))
base_dir = self._get_output_dir(render_instance)
expected_files = []
if "#" not in render_q.file_name: # single frame (mov)W
path = os.path.join(base_dir, "{}_{}_{}.{}".format(
render_instance.asset,
render_instance.subset,
"v{:03d}".format(render_instance.version),
ext.replace('.', '')
))
expected_files.append(path)
else:
for frame in range(start, end + 1):
path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format(
render_instance.asset,
render_instance.subset,
"v{:03d}".format(render_instance.version),
str(frame).zfill(self.padding_width),
ext.replace('.', '')
))
expected_files.append(path)
return expected_files
def _get_output_dir(self, render_instance):
"""
Returns dir path of rendered files, used in submit_publish_job
for metadata.json location.
Should be in separate folder inside of work area.
Args:
render_instance (RenderInstance):
Returns:
(str): absolute path to rendered files
"""
# render to folder of workfile
base_dir = os.path.dirname(render_instance.source)
file_name, _ = os.path.splitext(
os.path.basename(render_instance.source))
base_dir = os.path.join(base_dir, 'renders', 'aftereffects', file_name)
# for submit_publish_job
return base_dir

View file

@ -0,0 +1,70 @@
import os
from avalon import api
import pyblish.api
class CollectWorkfile(pyblish.api.ContextPlugin):
""" Adds the AE render instances """
label = "Collect After Effects Workfile Instance"
order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
task = api.Session["AVALON_TASK"]
current_file = context.data["currentFile"]
staging_dir = os.path.dirname(current_file)
scene_file = os.path.basename(current_file)
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
shared_instance_data = {
"asset": asset_entity["name"],
"frameStart": asset_entity["data"]["frameStart"],
"frameEnd": asset_entity["data"]["frameEnd"],
"handleStart": asset_entity["data"]["handleStart"],
"handleEnd": asset_entity["data"]["handleEnd"],
"fps": asset_entity["data"]["fps"],
"resolutionWidth": asset_entity["data"].get(
"resolutionWidth",
project_entity["data"]["resolutionWidth"]),
"resolutionHeight": asset_entity["data"].get(
"resolutionHeight",
project_entity["data"]["resolutionHeight"]),
"pixelAspect": 1,
"step": 1,
"version": version
}
# workfile instance
family = "workfile"
subset = family + task.capitalize()
# Create instance
instance = context.create_instance(subset)
# creating instance data
instance.data.update({
"subset": subset,
"label": scene_file,
"family": family,
"families": [family, "ftrack"],
"representations": list()
})
# adding basic script data
instance.data.update(shared_instance_data)
# creating representation
representation = {
'name': 'aep',
'ext': 'aep',
'files': scene_file,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info('Publishing After Effects workfile')
for i in context:
self.log.debug(f"{i.data['families']}")

View file

@ -0,0 +1,15 @@
import openpype.api
from avalon import aftereffects
class ExtractSaveScene(openpype.api.Extractor):
"""Save scene before extraction."""
order = openpype.api.Extractor.order - 0.48
label = "Extract Save Scene"
hosts = ["aftereffects"]
families = ["workfile"]
def process(self, instance):
stub = aftereffects.stub()
stub.save()

View file

@ -0,0 +1,30 @@
import pyblish.api
from openpype.action import get_errored_plugins_from_data
from openpype.lib import version_up
from avalon import aftereffects
class IncrementWorkfile(pyblish.api.InstancePlugin):
"""Increment the current workfile.
Saves the current scene with an increased version number.
"""
label = "Increment Workfile"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["aftereffects"]
families = ["workfile"]
optional = True
def process(self, instance):
errored_plugins = get_errored_plugins_from_data(instance.context)
if errored_plugins:
raise RuntimeError(
"Skipping incrementing current file because publishing failed."
)
scene_path = version_up(instance.context.data["currentFile"])
aftereffects.stub().saveAs(scene_path, True)
self.log.info("Incremented workfile to: {}".format(scene_path))

View file

@ -0,0 +1,23 @@
import openpype.api
from avalon import aftereffects
class RemovePublishHighlight(openpype.api.Extractor):
"""Clean utf characters which are not working in DL
Published compositions are marked with unicode icon which causes
problems on specific render environments. Clean it first, sent to
rendering, add it later back to avoid confusion.
"""
order = openpype.api.Extractor.order - 0.49 # just before save
label = "Clean render comp"
hosts = ["aftereffects"]
families = ["render.farm"]
def process(self, instance):
stub = aftereffects.stub()
self.log.debug("instance::{}".format(instance.data))
item = instance.data
comp_name = item["comp_name"].replace(stub.PUBLISH_ICON, '')
stub.rename_item(item["comp_id"], comp_name)

Binary file not shown.

View file

View file

@ -0,0 +1,63 @@
import os
import sys
import traceback
import bpy
from avalon import api as avalon
from pyblish import api as pyblish
import openpype.hosts.blender
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.blender.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
ORIGINAL_EXCEPTHOOK = sys.excepthook
def pype_excepthook_handler(*args):
traceback.print_exception(*args)
def install():
"""Install Blender configuration for Avalon."""
sys.excepthook = pype_excepthook_handler
pyblish.register_plugin_path(str(PUBLISH_PATH))
avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
avalon.on("new", on_new)
avalon.on("open", on_open)
def uninstall():
"""Uninstall Blender configuration for Avalon."""
sys.excepthook = ORIGINAL_EXCEPTHOOK
pyblish.deregister_plugin_path(str(PUBLISH_PATH))
avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))
def set_start_end_frames():
from avalon import io
asset_name = io.Session["AVALON_ASSET"]
asset_doc = io.find_one({
"type": "asset",
"name": asset_name
})
bpy.context.scene.frame_start = asset_doc["data"]["frameStart"]
bpy.context.scene.frame_end = asset_doc["data"]["frameEnd"]
def on_new(arg1, arg2):
set_start_end_frames()
def on_open(arg1, arg2):
set_start_end_frames()

View file

@ -0,0 +1,47 @@
import bpy
import pyblish.api
from openpype.api import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid objects in Blender when a publish plug-in failed."""
label = "Select Invalid"
on = "failed"
icon = "search"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes...")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning(
"Failed plug-in doens't have any selectable objects."
)
bpy.ops.object.select_all(action='DESELECT')
# Make sure every node is only processed once
invalid = list(set(invalid))
if not invalid:
self.log.info("No invalid nodes found.")
return
invalid_names = [obj.name for obj in invalid]
self.log.info(
"Selecting invalid objects: %s", ", ".join(invalid_names)
)
# Select the objects and also make the last one the active object.
for obj in invalid:
obj.select_set(True)
bpy.context.view_layer.objects.active = invalid[-1]

View file

@ -0,0 +1,233 @@
"""Shared functionality for pipeline plugins for Blender."""
from pathlib import Path
from typing import Dict, List, Optional
import bpy
from avalon import api
import avalon.blender
from openpype.api import PypeCreatorMixin
VALID_EXTENSIONS = [".blend", ".json"]
def asset_name(
asset: str, subset: str, namespace: Optional[str] = None
) -> str:
"""Return a consistent name for an asset."""
name = f"{asset}"
if namespace:
name = f"{name}_{namespace}"
name = f"{name}_{subset}"
return name
def get_unique_number(
asset: str, subset: str
) -> str:
"""Return a unique number based on the asset name."""
avalon_containers = [
c for c in bpy.data.collections
if c.name == 'AVALON_CONTAINERS'
]
containers = []
# First, add the children of avalon containers
for c in avalon_containers:
containers.extend(c.children)
# then keep looping to include all the children
for c in containers:
containers.extend(c.children)
container_names = [
c.name for c in containers
]
count = 1
name = f"{asset}_{count:0>2}_{subset}_CON"
while name in container_names:
count += 1
name = f"{asset}_{count:0>2}_{subset}_CON"
return f"{count:0>2}"
def prepare_data(data, container_name):
name = data.name
local_data = data.make_local()
local_data.name = f"{name}:{container_name}"
return local_data
def create_blender_context(active: Optional[bpy.types.Object] = None,
selected: Optional[bpy.types.Object] = None,):
"""Create a new Blender context. If an object is passed as
parameter, it is set as selected and active.
"""
if not isinstance(selected, list):
selected = [selected]
override_context = bpy.context.copy()
for win in bpy.context.window_manager.windows:
for area in win.screen.areas:
if area.type == 'VIEW_3D':
for region in area.regions:
if region.type == 'WINDOW':
override_context['window'] = win
override_context['screen'] = win.screen
override_context['area'] = area
override_context['region'] = region
override_context['scene'] = bpy.context.scene
override_context['active_object'] = active
override_context['selected_objects'] = selected
return override_context
raise Exception("Could not create a custom Blender context.")
def get_parent_collection(collection):
"""Get the parent of the input collection"""
check_list = [bpy.context.scene.collection]
for c in check_list:
if collection.name in c.children.keys():
return c
check_list.extend(c.children)
return None
def get_local_collection_with_name(name):
for collection in bpy.data.collections:
if collection.name == name and collection.library is None:
return collection
return None
class Creator(PypeCreatorMixin, avalon.blender.Creator):
pass
class AssetLoader(api.Loader):
"""A basic AssetLoader for Blender
This will implement the basic logic for linking/appending assets
into another Blender scene.
The `update` method should be implemented by a sub-class, because
it's different for different types (e.g. model, rig, animation,
etc.).
"""
@staticmethod
def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]:
"""Get the 'instance empty' that holds the collection instance."""
for node in nodes:
if not isinstance(node, bpy.types.Object):
continue
if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION'
and node.instance_collection and node.name == instance_name):
return node
return None
@staticmethod
def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]:
"""Get the 'instance collection' (container) for this asset."""
for node in nodes:
if not isinstance(node, bpy.types.Collection):
continue
if node.name == instance_name:
return node
return None
@staticmethod
def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library:
"""Find the library file from the container.
It traverses the objects from this collection, checks if there is only
1 library from which the objects come from and returns the library.
Warning:
No nested collections are supported at the moment!
"""
assert not container.children, "Nested collections are not supported."
assert container.objects, "The collection doesn't contain any objects."
libraries = set()
for obj in container.objects:
assert obj.library, f"'{obj.name}' is not linked."
libraries.add(obj.library)
assert len(
libraries) == 1, "'{container.name}' contains objects from more then 1 library."
return list(libraries)[0]
def process_asset(self,
context: dict,
name: str,
namespace: Optional[str] = None,
options: Optional[Dict] = None):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def load(self,
context: dict,
name: Optional[str] = None,
namespace: Optional[str] = None,
options: Optional[Dict] = None) -> Optional[bpy.types.Collection]:
"""Load asset via database
Arguments:
context: Full parenthood of representation to load
name: Use pre-defined name
namespace: Use pre-defined namespace
options: Additional settings dictionary
"""
# TODO (jasper): make it possible to add the asset several times by
# just re-using the collection
assert Path(self.fname).exists(), f"{self.fname} doesn't exist."
asset = context["asset"]["name"]
subset = context["subset"]["name"]
unique_number = get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
name = name or asset_name(
asset, subset, unique_number
)
nodes = self.process_asset(
context=context,
name=name,
namespace=namespace,
options=options,
)
# Only containerise if anything was loaded by the Loader.
if not nodes:
return None
# Only containerise if it's not already a collection from a .blend file.
# representation = context["representation"]["name"]
# if representation != "blend":
# from avalon.blender.pipeline import containerise
# return containerise(
# name=name,
# namespace=namespace,
# nodes=nodes,
# context=context,
# loader=self.__class__.__name__,
# )
asset = context["asset"]["name"]
subset = context["subset"]["name"]
instance_name = asset_name(asset, subset, unique_number) + '_CON'
return self._get_instance_collection(instance_name, nodes)
def update(self, container: Dict, representation: Dict):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def remove(self, container: Dict) -> bool:
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")

View file

@ -0,0 +1,148 @@
import os
import subprocess
from openpype.lib import PreLaunchHook
class InstallPySideToBlender(PreLaunchHook):
"""Install Qt binding to blender's python packages.
Prelaunch hook does 2 things:
1.) Blender's python packages are pushed to the beginning of PYTHONPATH.
2.) Check if blender has installed PySide2 and will try to install if not.
For pipeline implementation is required to have Qt binding installed in
blender's python packages.
Prelaunch hook can work only on Windows right now.
"""
app_groups = ["blender"]
platforms = ["windows"]
def execute(self):
# Prelaunch hook is not crutial
try:
self.inner_execute()
except Exception:
self.log.warning(
"Processing of {} crashed.".format(self.__class__.__name__),
exc_info=True
)
def inner_execute(self):
# Get blender's python directory
executable = self.launch_context.executable.executable_path
# Blender installation contain subfolder named with it's version where
# python binaries are stored.
version_subfolder = self.launch_context.app_name.split("_")[1]
pythond_dir = os.path.join(
os.path.dirname(executable),
version_subfolder,
"python"
)
# Change PYTHONPATH to contain blender's packages as first
python_paths = [
os.path.join(pythond_dir, "lib"),
os.path.join(pythond_dir, "lib", "site-packages"),
]
python_path = self.launch_context.env.get("PYTHONPATH") or ""
for path in python_path.split(os.pathsep):
if path:
python_paths.append(path)
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths)
# Get blender's python executable
python_executable = os.path.join(pythond_dir, "bin", "python.exe")
if not os.path.exists(python_executable):
self.log.warning(
"Couldn't find python executable for blender. {}".format(
executable
)
)
return
# Check if PySide2 is installed and skip if yes
if self.is_pyside_installed(python_executable):
return
# Install PySide2 in blender's python
self.install_pyside_windows(python_executable)
def install_pyside_windows(self, python_executable):
"""Install PySide2 python module to blender's python.
Installation requires administration rights that's why it is required
to use "pywin32" module which can execute command's and ask for
administration rights.
"""
try:
import win32api
import win32con
import win32process
import win32event
import pywintypes
from win32comext.shell.shell import ShellExecuteEx
from win32comext.shell import shellcon
except Exception:
self.log.warning("Couldn't import \"pywin32\" modules")
return
try:
# Parameters
# - use "-m pip" as module pip to install PySide2 and argument
# "--ignore-installed" is to force install module to blender's
# site-packages and make sure it is binary compatible
parameters = "-m pip install --ignore-installed PySide2"
# Execute command and ask for administrator's rights
process_info = ShellExecuteEx(
nShow=win32con.SW_SHOWNORMAL,
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS,
lpVerb="runas",
lpFile=python_executable,
lpParameters=parameters,
lpDirectory=os.path.dirname(python_executable)
)
process_handle = process_info["hProcess"]
obj = win32event.WaitForSingleObject(
process_handle, win32event.INFINITE
)
returncode = win32process.GetExitCodeProcess(process_handle)
if returncode == 0:
self.log.info(
"Successfully installed PySide2 module to blender."
)
return
except pywintypes.error:
pass
self.log.warning("Failed to instal PySide2 module to blender.")
def is_pyside_installed(self, python_executable):
"""Check if PySide2 module is in blender's pip list.
Check that PySide2 is installed directly in blender's site-packages.
It is possible that it is installed in user's site-packages but that
may be incompatible with blender's python.
"""
# Get pip list from blender's python executable
args = [python_executable, "-m", "pip", "list"]
process = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
lines = stdout.decode().split("\r\n")
# Second line contain dashes that define maximum length of module name.
# Second column of dashes define maximum length of module version.
package_dashes, *_ = lines[1].split(" ")
package_len = len(package_dashes)
# Got through printed lines starting at line 3
for idx in range(2, len(lines)):
line = lines[idx]
if not line:
continue
package_name = line[0:package_len].strip()
if package_name.lower() == "pyside2":
return True
return False

View file

@ -0,0 +1,40 @@
"""Create an animation asset."""
import bpy
from avalon import api
import openpype.hosts.blender.api.plugin
from avalon.blender import lib
class CreateAction(openpype.hosts.blender.api.plugin.Creator):
"""Action output for character rigs"""
name = "actionMain"
label = "Action"
family = "action"
icon = "male"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
if (obj.animation_data is not None
and obj.animation_data.action is not None):
empty_obj = bpy.data.objects.new(name=name,
object_data=None)
empty_obj.animation_data_create()
empty_obj.animation_data.action = obj.animation_data.action
empty_obj.animation_data.action.name = name
collection.objects.link(empty_obj)
return collection

View file

@ -0,0 +1,30 @@
"""Create an animation asset."""
import bpy
from avalon import api, blender
import openpype.hosts.blender.api.plugin
class CreateAnimation(openpype.hosts.blender.api.plugin.Creator):
"""Animation output for character rigs"""
name = "animationMain"
label = "Animation"
family = "animation"
icon = "male"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
blender.lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
for obj in blender.lib.get_selection():
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,32 @@
"""Create a camera asset."""
import bpy
from avalon import api
from avalon.blender import lib
import openpype.hosts.blender.api.plugin
class CreateCamera(openpype.hosts.blender.api.plugin.Creator):
"""Polygonal static geometry"""
name = "cameraMain"
label = "Camera"
family = "camera"
icon = "video-camera"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,40 @@
"""Create a layout asset."""
import bpy
from avalon import api
from avalon.blender import lib
import openpype.hosts.blender.api.plugin
class CreateLayout(openpype.hosts.blender.api.plugin.Creator):
"""Layout output for character rigs"""
name = "layoutMain"
label = "Layout"
family = "layout"
icon = "cubes"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
# Add the rig object and all the children meshes to
# a set and link them all at the end to avoid duplicates.
# Blender crashes if trying to link an object that is already linked.
# This links automatically the children meshes if they were not
# selected, and doesn't link them twice if they, insted,
# were manually selected by the user.
objects_to_link = set()
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
collection.children.link(obj.users_collection[0])
return collection

View file

@ -0,0 +1,32 @@
"""Create a model asset."""
import bpy
from avalon import api
from avalon.blender import lib
import openpype.hosts.blender.api.plugin
class CreateModel(openpype.hosts.blender.api.plugin.Creator):
"""Polygonal static geometry"""
name = "modelMain"
label = "Model"
family = "model"
icon = "cube"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,41 @@
"""Create a rig asset."""
import bpy
from avalon import api
from avalon.blender import lib
import openpype.hosts.blender.api.plugin
class CreateRig(openpype.hosts.blender.api.plugin.Creator):
"""Artist-friendly rig with controls to direct motion"""
name = "rigMain"
label = "Rig"
family = "rig"
icon = "wheelchair"
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
lib.imprint(collection, self.data)
# Add the rig object and all the children meshes to
# a set and link them all at the end to avoid duplicates.
# Blender crashes if trying to link an object that is already linked.
# This links automatically the children meshes if they were not
# selected, and doesn't link them twice if they, insted,
# were manually selected by the user.
if (self.options or {}).get("useSelection"):
for obj in lib.get_selection():
for child in obj.users_collection[0].children:
collection.children.link(child)
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,25 @@
import bpy
from avalon import api, blender
import openpype.hosts.blender.api.plugin
class CreateSetDress(openpype.hosts.blender.api.plugin.Creator):
"""A grouped package of loaded content"""
name = "setdressMain"
label = "Set Dress"
family = "setdress"
icon = "cubes"
defaults = ["Main", "Anim"]
def process(self):
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = api.Session.get('AVALON_TASK')
blender.lib.imprint(collection, self.data)
return collection

View file

@ -0,0 +1,292 @@
"""Load an action in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import openpype.hosts.blender.api.plugin
logger = logging.getLogger("openpype").getChild("blender").getChild("load_action")
class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
"""Load action from a .blend file.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["action"]
representations = ["blend"]
label = "Link Action"
icon = "code-fork"
color = "orange"
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
container_name = openpype.hosts.blender.api.plugin.asset_name(
asset, subset, namespace
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
collection = bpy.context.scene.collection
collection.children.link(bpy.data.collections[lib_container])
animation_container = collection.children[lib_container].make_local()
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in animation_container.objects:
obj = obj.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
objects_list.append(obj)
animation_container.pop(blender.pipeline.AVALON_PROPERTY)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
bpy.ops.object.select_all(action='DESELECT')
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in openpype.hosts.blender.api.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
strips = []
for obj in list(collection_metadata["objects"]):
# Get all the strips that use the action
arm_objs = [
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
for armature_obj in arm_objs:
if armature_obj.animation_data is not None:
for track in armature_obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action == obj.animation_data.action:
strips.append(strip)
bpy.data.actions.remove(obj.animation_data.action)
bpy.data.objects.remove(obj)
lib_container = collection_metadata["lib_container"]
bpy.data.collections.remove(bpy.data.collections[lib_container])
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
str(libpath), link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
anim_container = scene.collection.children[lib_container].make_local()
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in anim_container.objects:
obj = obj.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
for strip in strips:
strip.action = anim_data.action
strip.action_frame_end = anim_data.action.frame_range[1]
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": collection.name})
objects_list.append(obj)
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
for obj in list(objects):
# Get all the strips that use the action
arm_objs = [
arm for arm in bpy.data.objects if arm.type == 'ARMATURE']
for armature_obj in arm_objs:
if armature_obj.animation_data is not None:
for track in armature_obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action == obj.animation_data.action:
track.strips.remove(strip)
bpy.data.actions.remove(obj.animation_data.action)
bpy.data.objects.remove(obj)
bpy.data.collections.remove(bpy.data.collections[lib_container])
bpy.data.collections.remove(collection)
return True

View file

@ -0,0 +1,248 @@
"""Load an animation in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import openpype.hosts.blender.api.plugin
logger = logging.getLogger("openpype").getChild(
"blender").getChild("load_animation")
class BlendAnimationLoader(openpype.hosts.blender.api.plugin.AssetLoader):
"""Load animations from a .blend file.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["animation"]
representations = ["blend"]
label = "Link Animation"
icon = "code-fork"
color = "orange"
def _remove(self, objects, lib_container):
for obj in list(objects):
if obj.type == 'ARMATURE':
bpy.data.armatures.remove(obj.data)
elif obj.type == 'MESH':
bpy.data.meshes.remove(obj.data)
bpy.data.collections.remove(bpy.data.collections[lib_container])
def _process(self, libpath, lib_container, container_name):
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
anim_container = scene.collection.children[lib_container].make_local()
meshes = [obj for obj in anim_container.objects if obj.type == 'MESH']
armatures = [
obj for obj in anim_container.objects if obj.type == 'ARMATURE']
# Should check if there is only an armature?
objects_list = []
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in meshes + armatures:
obj = obj.make_local()
obj.data.make_local()
anim_data = obj.animation_data
if anim_data is not None and anim_data.action is not None:
anim_data.action.make_local()
if not obj.get(blender.pipeline.AVALON_PROPERTY):
obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
objects_list.append(obj)
anim_container.pop(blender.pipeline.AVALON_PROPERTY)
bpy.ops.object.select_all(action='DESELECT')
return objects_list
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
container_name = openpype.hosts.blender.api.plugin.asset_name(
asset, subset, namespace
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
objects_list = self._process(
libpath, lib_container, container_name)
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in openpype.hosts.blender.api.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(objects, lib_container)
objects_list = self._process(
str(libpath), lib_container, collection.name)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(objects, lib_container)
bpy.data.collections.remove(collection)
return True

View file

@ -0,0 +1,247 @@
"""Load a camera asset in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import openpype.hosts.blender.api.plugin
logger = logging.getLogger("openpype").getChild("blender").getChild("load_camera")
class BlendCameraLoader(openpype.hosts.blender.api.plugin.AssetLoader):
"""Load a camera from a .blend file.
Warning:
Loading the same asset more then once is not properly supported at the
moment.
"""
families = ["camera"]
representations = ["blend"]
label = "Link Camera"
icon = "code-fork"
color = "orange"
def _remove(self, objects, lib_container):
for obj in list(objects):
bpy.data.cameras.remove(obj.data)
bpy.data.collections.remove(bpy.data.collections[lib_container])
def _process(self, libpath, lib_container, container_name, actions):
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
camera_container = scene.collection.children[lib_container].make_local()
objects_list = []
for obj in camera_container.objects:
local_obj = obj.make_local()
local_obj.data.make_local()
if not local_obj.get(blender.pipeline.AVALON_PROPERTY):
local_obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
if actions[0] is not None:
if local_obj.animation_data is None:
local_obj.animation_data_create()
local_obj.animation_data.action = actions[0]
if actions[1] is not None:
if local_obj.data.animation_data is None:
local_obj.data.animation_data_create()
local_obj.data.animation_data.action = actions[1]
objects_list.append(local_obj)
camera_container.pop(blender.pipeline.AVALON_PROPERTY)
bpy.ops.object.select_all(action='DESELECT')
return objects_list
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
container_name = openpype.hosts.blender.api.plugin.asset_name(
asset, subset, namespace
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
objects_list = self._process(
libpath, lib_container, container_name, (None, None))
# Save the list of objects in the metadata container
container_metadata["objects"] = objects_list
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in openpype.hosts.blender.api.plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
logger.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
logger.info("Library already loaded, not updating...")
return
camera = objects[0]
camera_action = None
camera_data_action = None
if camera.animation_data and camera.animation_data.action:
camera_action = camera.animation_data.action
if camera.data.animation_data and camera.data.animation_data.action:
camera_data_action = camera.data.animation_data.action
actions = (camera_action, camera_data_action)
self._remove(objects, lib_container)
objects_list = self._process(
str(libpath), lib_container, collection.name, actions)
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
self._remove(objects, lib_container)
bpy.data.collections.remove(collection)
return True

View file

@ -0,0 +1,674 @@
"""Load a layout in Blender."""
import json
from logging import log, warning
import math
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender, pipeline
import bpy
import openpype.hosts.blender.api.plugin as plugin
from openpype.lib import get_creator_by_name
class BlendLayoutLoader(plugin.AssetLoader):
"""Load layout from a .blend file."""
families = ["layout"]
representations = ["blend"]
label = "Link Layout"
icon = "code-fork"
color = "orange"
animation_creator_name = "CreateAnimation"
setdress_creator_name = "CreateSetDress"
def _remove(self, objects, obj_container):
for obj in list(objects):
if obj.type == 'ARMATURE':
bpy.data.armatures.remove(obj.data)
elif obj.type == 'MESH':
bpy.data.meshes.remove(obj.data)
elif obj.type == 'CAMERA':
bpy.data.cameras.remove(obj.data)
elif obj.type == 'CURVE':
bpy.data.curves.remove(obj.data)
for element_container in obj_container.children:
for child in element_container.children:
bpy.data.collections.remove(child)
bpy.data.collections.remove(element_container)
bpy.data.collections.remove(obj_container)
def _process(self, libpath, lib_container, container_name, actions):
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
scene.collection.children.link(bpy.data.collections[lib_container])
layout_container = scene.collection.children[lib_container].make_local()
layout_container.name = container_name
objects_local_types = ['MESH', 'CAMERA', 'CURVE']
objects = []
armatures = []
containers = list(layout_container.children)
for container in layout_container.children:
if container.name == blender.pipeline.AVALON_CONTAINERS:
containers.remove(container)
for container in containers:
container.make_local()
objects.extend([
obj for obj in container.objects
if obj.type in objects_local_types
])
armatures.extend([
obj for obj in container.objects
if obj.type == 'ARMATURE'
])
containers.extend(list(container.children))
# Link meshes first, then armatures.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in objects + armatures:
local_obj = obj.make_local()
if obj.data:
obj.data.make_local()
if not local_obj.get(blender.pipeline.AVALON_PROPERTY):
local_obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
action = actions.get(local_obj.name, None)
if local_obj.type == 'ARMATURE' and action is not None:
local_obj.animation_data.action = action
layout_container.pop(blender.pipeline.AVALON_PROPERTY)
bpy.ops.object.select_all(action='DESELECT')
return layout_container
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = plugin.asset_name(
asset, subset
)
unique_number = plugin.get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
container_name = plugin.asset_name(
asset, subset, unique_number
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
obj_container = self._process(
libpath, lib_container, container_name, {})
container_metadata["obj_container"] = obj_container
# Save the list of objects in the metadata container
container_metadata["objects"] = obj_container.all_objects
# nodes = list(container.objects)
# nodes.append(container)
nodes = [container]
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
objects = collection_metadata["objects"]
lib_container = collection_metadata["lib_container"]
obj_container = collection_metadata["obj_container"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
actions = {}
for obj in objects:
if obj.type == 'ARMATURE':
if obj.animation_data and obj.animation_data.action:
actions[obj.name] = obj.animation_data.action
self._remove(objects, obj_container)
obj_container = self._process(
str(libpath), lib_container, collection.name, actions)
# Save the list of objects in the metadata container
collection_metadata["obj_container"] = obj_container
collection_metadata["objects"] = obj_container.all_objects
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
objects = collection_metadata["objects"]
obj_container = collection_metadata["obj_container"]
self._remove(objects, obj_container)
bpy.data.collections.remove(collection)
return True
class UnrealLayoutLoader(plugin.AssetLoader):
"""Load layout published from Unreal."""
families = ["layout"]
representations = ["json"]
label = "Link Layout"
icon = "code-fork"
color = "orange"
def _remove_objects(self, objects):
for obj in list(objects):
if obj.type == 'ARMATURE':
bpy.data.armatures.remove(obj.data)
elif obj.type == 'MESH':
bpy.data.meshes.remove(obj.data)
elif obj.type == 'CAMERA':
bpy.data.cameras.remove(obj.data)
elif obj.type == 'CURVE':
bpy.data.curves.remove(obj.data)
else:
self.log.error(
f"Object {obj.name} of type {obj.type} not recognized.")
def _remove_collections(self, collection):
if collection.children:
for child in collection.children:
self._remove_collections(child)
bpy.data.collections.remove(child)
def _remove(self, layout_container):
layout_container_metadata = layout_container.get(
blender.pipeline.AVALON_PROPERTY)
if layout_container.children:
for child in layout_container.children:
child_container = child.get(blender.pipeline.AVALON_PROPERTY)
child_container['objectName'] = child.name
api.remove(child_container)
for c in bpy.data.collections:
metadata = c.get('avalon')
if metadata:
print("metadata.get('id')")
print(metadata.get('id'))
if metadata and metadata.get('id') == 'pyblish.avalon.instance':
print("metadata.get('dependencies')")
print(metadata.get('dependencies'))
print("layout_container_metadata.get('representation')")
print(layout_container_metadata.get('representation'))
if metadata.get('dependencies') == layout_container_metadata.get('representation'):
for child in c.children:
bpy.data.collections.remove(child)
bpy.data.collections.remove(c)
break
def _get_loader(self, loaders, family):
name = ""
if family == 'rig':
name = "BlendRigLoader"
elif family == 'model':
name = "BlendModelLoader"
if name == "":
return None
for loader in loaders:
if loader.__name__ == name:
return loader
return None
def set_transform(self, obj, transform):
location = transform.get('translation')
rotation = transform.get('rotation')
scale = transform.get('scale')
# Y position is inverted in sign because Unreal and Blender have the
# Y axis mirrored
obj.location = (
location.get('x'),
-location.get('y'),
location.get('z')
)
obj.rotation_euler = (
rotation.get('x'),
-rotation.get('y'),
-rotation.get('z')
)
obj.scale = (
scale.get('x'),
scale.get('y'),
scale.get('z')
)
def _process(
self, libpath, layout_container, container_name, representation,
actions, parent
):
with open(libpath, "r") as fp:
data = json.load(fp)
scene = bpy.context.scene
layout_collection = bpy.data.collections.new(container_name)
scene.collection.children.link(layout_collection)
all_loaders = api.discover(api.Loader)
avalon_container = bpy.data.collections.get(
blender.pipeline.AVALON_CONTAINERS)
for element in data:
reference = element.get('reference')
family = element.get('family')
loaders = api.loaders_from_representation(all_loaders, reference)
loader = self._get_loader(loaders, family)
if not loader:
continue
instance_name = element.get('instance_name')
element_container = api.load(
loader,
reference,
namespace=instance_name
)
if not element_container:
continue
avalon_container.children.unlink(element_container)
layout_container.children.link(element_container)
element_metadata = element_container.get(
blender.pipeline.AVALON_PROPERTY)
# Unlink the object's collection from the scene collection and
# link it in the layout collection
element_collection = element_metadata.get('obj_container')
scene.collection.children.unlink(element_collection)
layout_collection.children.link(element_collection)
objects = element_metadata.get('objects')
element_metadata['instance_name'] = instance_name
objects_to_transform = []
creator_plugin = get_creator_by_name(self.animation_creator_name)
if not creator_plugin:
raise ValueError("Creator plugin \"{}\" was not found.".format(
self.animation_creator_name
))
if family == 'rig':
for o in objects:
if o.type == 'ARMATURE':
objects_to_transform.append(o)
# Create an animation subset for each rig
o.select_set(True)
asset = api.Session["AVALON_ASSET"]
c = api.create(
creator_plugin,
name="animation_" + element_collection.name,
asset=asset,
options={"useSelection": True},
data={"dependencies": representation})
scene.collection.children.unlink(c)
parent.children.link(c)
o.select_set(False)
break
elif family == 'model':
objects_to_transform = objects
for o in objects_to_transform:
self.set_transform(o, element.get('transform'))
if actions:
if o.type == 'ARMATURE':
action = actions.get(instance_name, None)
if action:
if o.animation_data is None:
o.animation_data_create()
o.animation_data.action = action
return layout_collection
def process_asset(self,
context: dict,
name: str,
namespace: Optional[str] = None,
options: Optional[Dict] = None):
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = plugin.asset_name(
asset, subset
)
unique_number = plugin.get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
container_name = plugin.asset_name(
asset, subset, unique_number
)
layout_container = bpy.data.collections.new(container_name)
blender.pipeline.containerise_existing(
layout_container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = layout_container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
# Create a setdress subset to contain all the animation for all
# the rigs in the layout
creator_plugin = get_creator_by_name(self.setdress_creator_name)
if not creator_plugin:
raise ValueError("Creator plugin \"{}\" was not found.".format(
self.setdress_creator_name
))
parent = api.create(
creator_plugin,
name="animation",
asset=api.Session["AVALON_ASSET"],
options={"useSelection": True},
data={"dependencies": str(context["representation"]["_id"])})
layout_collection = self._process(
libpath, layout_container, container_name,
str(context["representation"]["_id"]), None, parent)
container_metadata["obj_container"] = layout_collection
# Save the list of objects in the metadata container
container_metadata["objects"] = layout_collection.all_objects
nodes = [layout_container]
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
"""
layout_container = bpy.data.collections.get(
container["objectName"]
)
if not layout_container:
return False
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert layout_container, (
f"The asset is not loaded: {container['objectName']}"
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
layout_container_metadata = layout_container.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = layout_container_metadata["libpath"]
lib_container = layout_container_metadata["lib_container"]
obj_container = plugin.get_local_collection_with_name(
layout_container_metadata["obj_container"].name
)
objects = obj_container.all_objects
container_name = obj_container.name
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
actions = {}
for obj in objects:
if obj.type == 'ARMATURE':
if obj.animation_data and obj.animation_data.action:
obj_cont_name = obj.get(
blender.pipeline.AVALON_PROPERTY).get('container_name')
obj_cont = plugin.get_local_collection_with_name(
obj_cont_name)
element_metadata = obj_cont.get(
blender.pipeline.AVALON_PROPERTY)
instance_name = element_metadata.get('instance_name')
actions[instance_name] = obj.animation_data.action
self._remove(layout_container)
bpy.data.collections.remove(obj_container)
creator_plugin = get_creator_by_name(self.setdress_creator_name)
if not creator_plugin:
raise ValueError("Creator plugin \"{}\" was not found.".format(
self.setdress_creator_name
))
parent = api.create(
creator_plugin,
name="animation",
asset=api.Session["AVALON_ASSET"],
options={"useSelection": True},
data={"dependencies": str(representation["_id"])})
layout_collection = self._process(
libpath, layout_container, container_name,
str(representation["_id"]), actions, parent)
layout_container_metadata["obj_container"] = layout_collection
layout_container_metadata["objects"] = layout_collection.all_objects
layout_container_metadata["libpath"] = str(libpath)
layout_container_metadata["representation"] = str(
representation["_id"])
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
"""
layout_container = bpy.data.collections.get(
container["objectName"]
)
if not layout_container:
return False
layout_container_metadata = layout_container.get(
blender.pipeline.AVALON_PROPERTY)
obj_container = plugin.get_local_collection_with_name(
layout_container_metadata["obj_container"].name
)
self._remove(layout_container)
bpy.data.collections.remove(obj_container)
bpy.data.collections.remove(layout_container)
return True

View file

@ -0,0 +1,306 @@
"""Load a model asset in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import openpype.hosts.blender.api.plugin as plugin
class BlendModelLoader(plugin.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
"""
families = ["model"]
representations = ["blend"]
label = "Link Model"
icon = "code-fork"
color = "orange"
def _remove(self, objects, container):
for obj in list(objects):
for material_slot in list(obj.material_slots):
bpy.data.materials.remove(material_slot.material)
bpy.data.meshes.remove(obj.data)
bpy.data.collections.remove(container)
def _process(
self, libpath, lib_container, container_name,
parent_collection
):
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
parent = parent_collection
if parent is None:
parent = bpy.context.scene.collection
parent.children.link(bpy.data.collections[lib_container])
model_container = parent.children[lib_container].make_local()
model_container.name = container_name
for obj in model_container.objects:
local_obj = plugin.prepare_data(obj, container_name)
plugin.prepare_data(local_obj.data, container_name)
for material_slot in local_obj.material_slots:
plugin.prepare_data(material_slot.material, container_name)
if not obj.get(blender.pipeline.AVALON_PROPERTY):
local_obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": container_name})
model_container.pop(blender.pipeline.AVALON_PROPERTY)
bpy.ops.object.select_all(action='DESELECT')
return model_container
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = plugin.asset_name(
asset, subset
)
unique_number = plugin.get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
container_name = plugin.asset_name(
asset, subset, unique_number
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
obj_container = self._process(
libpath, lib_container, container_name, None)
container_metadata["obj_container"] = obj_container
# Save the list of objects in the metadata container
container_metadata["objects"] = obj_container.all_objects
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
lib_container = collection_metadata["lib_container"]
obj_container = plugin.get_local_collection_with_name(
collection_metadata["obj_container"].name
)
objects = obj_container.all_objects
container_name = obj_container.name
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
parent = plugin.get_parent_collection(obj_container)
self._remove(objects, obj_container)
obj_container = self._process(
str(libpath), lib_container, container_name, parent)
# Save the list of objects in the metadata container
collection_metadata["obj_container"] = obj_container
collection_metadata["objects"] = obj_container.all_objects
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
obj_container = plugin.get_local_collection_with_name(
collection_metadata["obj_container"].name
)
objects = obj_container.all_objects
self._remove(objects, obj_container)
bpy.data.collections.remove(collection)
return True
class CacheModelLoader(plugin.AssetLoader):
"""Load cache models.
Stores the imported asset in a collection named after the asset.
Note:
At least for now it only supports Alembic files.
"""
families = ["model"]
representations = ["abc"]
label = "Link Model"
icon = "code-fork"
color = "orange"
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
raise NotImplementedError(
"Loading of Alembic files is not yet implemented.")
# TODO (jasper): implement Alembic import.
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
# TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
lib_container = container_name = (
plugin.asset_name(asset, subset, namespace)
)
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (data_from, data_to):
data_to.collections = [lib_container]
scene = bpy.context.scene
instance_empty = bpy.data.objects.new(
container_name, None
)
scene.collection.objects.link(instance_empty)
instance_empty.instance_type = 'COLLECTION'
collection = bpy.data.collections[lib_container]
collection.name = container_name
instance_empty.instance_collection = collection
nodes = list(collection.objects)
nodes.append(collection)
nodes.append(instance_empty)
self[:] = nodes
return nodes

View file

@ -0,0 +1,301 @@
"""Load a rig asset in Blender."""
import logging
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
from avalon import api, blender
import bpy
import openpype.hosts.blender.api.plugin as plugin
class BlendRigLoader(plugin.AssetLoader):
"""Load rigs from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
"""
families = ["rig"]
representations = ["blend"]
label = "Link Rig"
icon = "code-fork"
color = "orange"
def _remove(self, objects, obj_container):
for obj in list(objects):
if obj.type == 'ARMATURE':
bpy.data.armatures.remove(obj.data)
elif obj.type == 'MESH':
bpy.data.meshes.remove(obj.data)
elif obj.type == 'CURVE':
bpy.data.curves.remove(obj.data)
for child in obj_container.children:
bpy.data.collections.remove(child)
bpy.data.collections.remove(obj_container)
def make_local_and_metadata(self, obj, collection_name):
local_obj = plugin.prepare_data(obj, collection_name)
plugin.prepare_data(local_obj.data, collection_name)
if not local_obj.get(blender.pipeline.AVALON_PROPERTY):
local_obj[blender.pipeline.AVALON_PROPERTY] = dict()
avalon_info = local_obj[blender.pipeline.AVALON_PROPERTY]
avalon_info.update({"container_name": collection_name + '_CON'})
return local_obj
def _process(
self, libpath, lib_container, collection_name,
action, parent_collection
):
relative = bpy.context.preferences.filepaths.use_relative_paths
with bpy.data.libraries.load(
libpath, link=True, relative=relative
) as (_, data_to):
data_to.collections = [lib_container]
parent = parent_collection
if parent is None:
parent = bpy.context.scene.collection
parent.children.link(bpy.data.collections[lib_container])
rig_container = parent.children[lib_container].make_local()
rig_container.name = collection_name
objects = []
armatures = [
obj for obj in rig_container.objects
if obj.type == 'ARMATURE'
]
for child in rig_container.children:
local_child = plugin.prepare_data(child, collection_name)
objects.extend(local_child.objects)
# for obj in bpy.data.objects:
# obj.select_set(False)
constraints = []
for armature in armatures:
for bone in armature.pose.bones:
for constraint in bone.constraints:
if hasattr(constraint, 'target'):
constraints.append(constraint)
# Link armatures after other objects.
# The armature is unparented for all the non-local meshes,
# when it is made local.
for obj in objects:
local_obj = self.make_local_and_metadata(obj, collection_name)
if obj != local_obj:
for constraint in constraints:
if constraint.target == obj:
constraint.target = local_obj
for armature in armatures:
local_obj = self.make_local_and_metadata(armature, collection_name)
if action is not None:
local_obj.animation_data.action = action
# Set link the drivers to the local object
if local_obj.data.animation_data:
for d in local_obj.data.animation_data.drivers:
for v in d.driver.variables:
for t in v.targets:
t.id = local_obj
rig_container.pop(blender.pipeline.AVALON_PROPERTY)
bpy.ops.object.select_all(action='DESELECT')
return rig_container
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = plugin.asset_name(
asset, subset
)
unique_number = plugin.get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
collection_name = plugin.asset_name(
asset, subset, unique_number
)
container = bpy.data.collections.new(collection_name)
blender.pipeline.containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
container_metadata = container.get(
blender.pipeline.AVALON_PROPERTY)
container_metadata["libpath"] = libpath
container_metadata["lib_container"] = lib_container
obj_container = self._process(
libpath, lib_container, collection_name, None, None)
container_metadata["obj_container"] = obj_container
# Save the list of objects in the metadata container
container_metadata["objects"] = obj_container.all_objects
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
lib_container = collection_metadata["lib_container"]
obj_container = plugin.get_local_collection_with_name(
collection_metadata["obj_container"].name
)
objects = obj_container.all_objects
container_name = obj_container.name
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
# Get the armature of the rig
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
assert(len(armatures) == 1)
action = None
if armatures[0].animation_data and armatures[0].animation_data.action:
action = armatures[0].animation_data.action
parent = plugin.get_parent_collection(obj_container)
self._remove(objects, obj_container)
obj_container = self._process(
str(libpath), lib_container, container_name, action, parent)
# Save the list of objects in the metadata container
collection_metadata["obj_container"] = obj_container
collection_metadata["objects"] = obj_container.all_objects
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
bpy.ops.object.select_all(action='DESELECT')
def remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
Warning:
No nested collections are supported at the moment!
"""
collection = bpy.data.collections.get(
container["objectName"]
)
if not collection:
return False
assert not (collection.children), (
"Nested collections are not supported."
)
collection_metadata = collection.get(
blender.pipeline.AVALON_PROPERTY)
obj_container = plugin.get_local_collection_with_name(
collection_metadata["obj_container"].name
)
objects = obj_container.all_objects
self._remove(objects, obj_container)
bpy.data.collections.remove(collection)
return True

View file

@ -0,0 +1,19 @@
import bpy
import pyblish.api
class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Blender Current File"
hosts = ['blender']
def process(self, context):
"""Inject the current working file"""
current_file = bpy.data.filepath
context.data['currentFile'] = current_file
assert current_file != '', "Current file is empty. " \
"Save the file before continuing."

View file

@ -0,0 +1,54 @@
from typing import Generator
import bpy
import json
import pyblish.api
from avalon.blender.pipeline import AVALON_PROPERTY
class CollectInstances(pyblish.api.ContextPlugin):
"""Collect the data of a model."""
hosts = ["blender"]
label = "Collect Instances"
order = pyblish.api.CollectorOrder
@staticmethod
def get_collections() -> Generator:
"""Return all 'model' collections.
Check if the family is 'model' and if it doesn't have the
representation set. If the representation is set, it is a loaded model
and we don't want to publish it.
"""
for collection in bpy.data.collections:
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
if avalon_prop.get('id') == 'pyblish.avalon.instance':
yield collection
def process(self, context):
"""Collect the models from the current Blender scene."""
collections = self.get_collections()
for collection in collections:
avalon_prop = collection[AVALON_PROPERTY]
asset = avalon_prop['asset']
family = avalon_prop['family']
subset = avalon_prop['subset']
task = avalon_prop['task']
name = f"{asset}_{subset}"
instance = context.create_instance(
name=name,
family=family,
families=[family],
subset=subset,
asset=asset,
task=task,
)
members = list(collection.objects)
members.append(collection)
instance[:] = members
self.log.debug(json.dumps(instance.data, indent=4))
for obj in instance:
self.log.debug(obj)

View file

@ -0,0 +1,96 @@
import os
import openpype.api
import openpype.hosts.blender.api.plugin
import bpy
class ExtractABC(openpype.api.Extractor):
"""Extract as ABC."""
label = "Extract ABC"
hosts = ["blender"]
families = ["model"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
scene = context.scene
view_layer = context.view_layer
# Perform extraction
self.log.info("Performing extraction..")
collections = [
obj for obj in instance if type(obj) is bpy.types.Collection]
assert len(collections) == 1, "There should be one and only one " \
"collection collected for this asset"
old_active_layer_collection = view_layer.active_layer_collection
layers = view_layer.layer_collection.children
# Get the layer collection from the collection we need to export.
# This is needed because in Blender you can only set the active
# collection with the layer collection, and there is no way to get
# the layer collection from the collection
# (but there is the vice versa).
layer_collections = [
layer for layer in layers if layer.collection == collections[0]]
assert len(layer_collections) == 1
view_layer.active_layer_collection = layer_collections[0]
old_scale = scene.unit_settings.scale_length
selected = list()
for obj in instance:
try:
obj.select_set(True)
selected.append(obj)
except:
continue
new_context = openpype.hosts.blender.api.plugin.create_blender_context(
active=selected[0], selected=selected)
# We set the scale of the scene for the export
scene.unit_settings.scale_length = 0.01
self.log.info(new_context)
# We export the abc
bpy.ops.wm.alembic_export(
new_context,
filepath=filepath,
start=1,
end=1
)
view_layer.active_layer_collection = old_active_layer_collection
scene.unit_settings.scale_length = old_scale
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -0,0 +1,61 @@
import os
import json
import openpype.api
import pyblish.api
import bpy
class ExtractSetDress(openpype.api.Extractor):
"""Extract setdress."""
label = "Extract SetDress"
hosts = ["blender"]
families = ["setdress"]
optional = True
order = pyblish.api.ExtractorOrder + 0.1
def process(self, instance):
stagingdir = self.staging_dir(instance)
json_data = []
for i in instance.context:
collection = i.data.get("name")
container = None
for obj in bpy.data.collections[collection].objects:
if obj.type == "ARMATURE":
container_name = obj.get("avalon").get("container_name")
container = bpy.data.collections[container_name]
if container:
json_dict = {
"subset": i.data.get("subset"),
"container": container.name,
}
json_dict["instance_name"] = container.get("avalon").get(
"instance_name"
)
json_data.append(json_dict)
if "representations" not in instance.data:
instance.data["representations"] = []
json_filename = f"{instance.name}.json"
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:
json.dump(json_data, fp=file, indent=2)
json_representation = {
"name": "json",
"ext": "json",
"files": json_filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(json_representation)
self.log.info(
"Extracted instance '{}' to: {}".format(instance.name,
json_representation)
)

View file

@ -0,0 +1,47 @@
import os
import avalon.blender.workio
import openpype.api
class ExtractBlend(openpype.api.Extractor):
"""Extract a blend file."""
label = "Extract Blend"
hosts = ["blender"]
families = ["model", "camera", "rig", "action", "layout", "animation"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
# Just save the file to a temporary location. At least for now it's no
# problem to have (possibly) extra stuff in the file.
avalon.blender.workio.save_file(filepath, copy=True)
#
# # Store reference for integration
# if "files" not in instance.data:
# instance.data["files"] = list()
#
# # instance.data["files"].append(filename)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'blend',
'ext': 'blend',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -0,0 +1,81 @@
import os
import openpype.api
import bpy
class ExtractFBX(openpype.api.Extractor):
"""Extract as FBX."""
label = "Extract FBX"
hosts = ["blender"]
families = ["model", "rig"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
scene = context.scene
view_layer = context.view_layer
# Perform extraction
self.log.info("Performing extraction..")
collections = [
obj for obj in instance if type(obj) is bpy.types.Collection]
assert len(collections) == 1, "There should be one and only one " \
"collection collected for this asset"
old_active_layer_collection = view_layer.active_layer_collection
layers = view_layer.layer_collection.children
# Get the layer collection from the collection we need to export.
# This is needed because in Blender you can only set the active
# collection with the layer collection, and there is no way to get
# the layer collection from the collection
# (but there is the vice versa).
layer_collections = [
layer for layer in layers if layer.collection == collections[0]]
assert len(layer_collections) == 1
view_layer.active_layer_collection = layer_collections[0]
old_scale = scene.unit_settings.scale_length
# We set the scale of the scene for the export
scene.unit_settings.scale_length = 0.01
# We export the fbx
bpy.ops.export_scene.fbx(
filepath=filepath,
use_active_collection=True,
mesh_smooth_type='FACE',
add_leaf_bones=False
)
view_layer.active_layer_collection = old_active_layer_collection
scene.unit_settings.scale_length = old_scale
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -0,0 +1,136 @@
import os
import openpype.api
import bpy
import bpy_extras
import bpy_extras.anim_utils
class ExtractAnimationFBX(openpype.api.Extractor):
"""Extract as animation."""
label = "Extract FBX"
hosts = ["blender"]
families = ["animation"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
context = bpy.context
scene = context.scene
# Perform extraction
self.log.info("Performing extraction..")
collections = [
obj for obj in instance if type(obj) is bpy.types.Collection]
assert len(collections) == 1, "There should be one and only one " \
"collection collected for this asset"
old_scale = scene.unit_settings.scale_length
# We set the scale of the scene for the export
scene.unit_settings.scale_length = 0.01
armatures = [
obj for obj in collections[0].objects if obj.type == 'ARMATURE']
assert len(collections) == 1, "There should be one and only one " \
"armature collected for this asset"
armature = armatures[0]
armature_name = armature.name
original_name = armature_name.split(':')[0]
armature.name = original_name
object_action_pairs = []
original_actions = []
starting_frames = []
ending_frames = []
# For each armature, we make a copy of the current action
curr_action = None
copy_action = None
if armature.animation_data and armature.animation_data.action:
curr_action = armature.animation_data.action
copy_action = curr_action.copy()
curr_frame_range = curr_action.frame_range
starting_frames.append(curr_frame_range[0])
ending_frames.append(curr_frame_range[1])
else:
self.log.info("Object have no animation.")
return
object_action_pairs.append((armature, copy_action))
original_actions.append(curr_action)
# We compute the starting and ending frames
max_frame = min(starting_frames)
min_frame = max(ending_frames)
# We bake the copy of the current action for each object
bpy_extras.anim_utils.bake_action_objects(
object_action_pairs,
frames=range(int(min_frame), int(max_frame)),
do_object=False,
do_clean=False
)
for obj in bpy.data.objects:
obj.select_set(False)
armature.select_set(True)
fbx_filename = f"{instance.name}_{armature.name}.fbx"
filepath = os.path.join(stagingdir, fbx_filename)
override = bpy.context.copy()
override['selected_objects'] = [armature]
bpy.ops.export_scene.fbx(
override,
filepath=filepath,
use_selection=True,
bake_anim_use_nla_strips=False,
bake_anim_use_all_actions=False,
add_leaf_bones=False,
armature_nodetype='ROOT',
object_types={'ARMATURE'}
)
armature.name = armature_name
armature.select_set(False)
scene.unit_settings.scale_length = old_scale
# We delete the baked action and set the original one back
for i in range(0, len(object_action_pairs)):
pair = object_action_pairs[i]
action = original_actions[i]
if action:
pair[0].animation_data.action = action
if pair[1]:
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
if "representations" not in instance.data:
instance.data["representations"] = []
fbx_representation = {
'name': 'fbx',
'ext': 'fbx',
'files': fbx_filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(fbx_representation)
self.log.info("Extracted instance '{}' to: {}".format(
instance.name, fbx_representation))

View file

@ -0,0 +1,25 @@
import pyblish.api
import avalon.blender.workio
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
"""Increment current workfile version."""
order = pyblish.api.IntegratorOrder + 0.9
label = "Increment Workfile Version"
optional = True
hosts = ["blender"]
families = ["animation", "model", "rig", "action"]
def process(self, context):
assert all(result["success"] for result in context.data["results"]), (
"Publishing not succesfull so version is not increased.")
from openpype.lib import version_up
path = context.data["currentFile"]
filepath = version_up(path)
avalon.blender.workio.save_file(filepath, copy=False)
self.log.info('Incrementing script version')

View file

@ -0,0 +1,49 @@
import json
from avalon import io
import pyblish.api
class IntegrateAnimation(pyblish.api.InstancePlugin):
"""Generate a JSON file for animation."""
label = "Integrate Animation"
order = pyblish.api.IntegratorOrder + 0.1
optional = True
hosts = ["blender"]
families = ["setdress"]
def process(self, instance):
self.log.info("Integrate Animation")
representation = instance.data.get('representations')[0]
json_path = representation.get('publishedFiles')[0]
with open(json_path, "r") as file:
data = json.load(file)
# Update the json file for the setdress to add the published
# representations of the animations
for json_dict in data:
i = None
for elem in instance.context:
if elem.data.get('subset') == json_dict['subset']:
i = elem
break
if not i:
continue
rep = None
pub_repr = i.data.get('published_representations')
for elem in pub_repr:
if pub_repr.get(elem).get('representation').get('name') == "fbx":
rep = pub_repr.get(elem)
break
if not rep:
continue
obj_id = rep.get('representation').get('_id')
if obj_id:
json_dict['_id'] = str(obj_id)
with open(json_path, "w") as file:
json.dump(data, fp=file, indent=2)

View file

@ -0,0 +1,52 @@
from typing import List
import bpy
import pyblish.api
import openpype.hosts.blender.api.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
"""Validate that the current mesh has UV's."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
label = "Mesh Has UV's"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
optional = True
@staticmethod
def has_uvs(obj: bpy.types.Object) -> bool:
"""Check if an object has uv's."""
if not obj.data.uv_layers:
return False
for uv_layer in obj.data.uv_layers:
for polygon in obj.data.polygons:
for loop_index in polygon.loop_indices:
if not uv_layer.data[loop_index].uv:
return False
return True
@classmethod
def get_invalid(cls, instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in instance]:
try:
if obj.type == 'MESH':
# Make sure we are in object mode.
bpy.ops.object.mode_set(mode='OBJECT')
if not cls.has_uvs(obj):
invalid.append(obj)
except:
continue
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}")

View file

@ -0,0 +1,35 @@
from typing import List
import bpy
import pyblish.api
import openpype.hosts.blender.api.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
"""Ensure that meshes don't have a negative scale."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["model"]
label = "Mesh No Negative Scale"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
if any(v < 0 for v in obj.scale):
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Meshes found in instance with negative scale: {invalid}"
)

View file

@ -0,0 +1,3 @@
from openpype.hosts.blender import api
api.install()

View file

View file

@ -0,0 +1 @@
kwargs = None

View file

@ -0,0 +1,106 @@
import os
import sys
import copy
import argparse
from avalon import io
from avalon.tools import publish
import pyblish.api
import pyblish.util
from openpype.api import Logger
import openpype
import openpype.hosts.celaction
from openpype.hosts.celaction import api as celaction
log = Logger().get_logger("Celaction_cli_publisher")
publish_host = "celaction"
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.celaction.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
def cli():
parser = argparse.ArgumentParser(prog="celaction_publish")
parser.add_argument("--currentFile",
help="Pass file to Context as `currentFile`")
parser.add_argument("--chunk",
help=("Render chanks on farm"))
parser.add_argument("--frameStart",
help=("Start of frame range"))
parser.add_argument("--frameEnd",
help=("End of frame range"))
parser.add_argument("--resolutionWidth",
help=("Width of resolution"))
parser.add_argument("--resolutionHeight",
help=("Height of resolution"))
celaction.kwargs = parser.parse_args(sys.argv[1:]).__dict__
def _prepare_publish_environments():
"""Prepares environments based on request data."""
env = copy.deepcopy(os.environ)
project_name = os.getenv("AVALON_PROJECT")
asset_name = os.getenv("AVALON_ASSET")
io.install()
project_doc = io.find_one({
"type": "project"
})
av_asset = io.find_one({
"type": "asset",
"name": asset_name
})
parents = av_asset["data"]["parents"]
hierarchy = ""
if parents:
hierarchy = "/".join(parents)
env["AVALON_PROJECT"] = project_name
env["AVALON_ASSET"] = asset_name
env["AVALON_TASK"] = os.getenv("AVALON_TASK")
env["AVALON_WORKDIR"] = os.getenv("AVALON_WORKDIR")
env["AVALON_HIERARCHY"] = hierarchy
env["AVALON_PROJECTCODE"] = project_doc["data"].get("code", "")
env["AVALON_APP"] = f"hosts.{publish_host}"
env["AVALON_APP_NAME"] = "celaction_local"
env["PYBLISH_HOSTS"] = publish_host
os.environ.update(env)
def main():
# prepare all environments
_prepare_publish_environments()
# Registers pype's Global pyblish plugins
openpype.install()
if os.path.exists(PUBLISH_PATH):
log.info(f"Registering path: {PUBLISH_PATH}")
pyblish.api.register_plugin_path(PUBLISH_PATH)
pyblish.api.register_host(publish_host)
return publish.show()
if __name__ == "__main__":
cli()
result = main()
sys.exit(not bool(result))

View file

@ -0,0 +1,122 @@
import os
import shutil
import winreg
from openpype.lib import PreLaunchHook
from openpype.hosts.celaction import api as celaction
class CelactionPrelaunchHook(PreLaunchHook):
"""
Bootstrap celacion with pype
"""
workfile_ext = "scn"
app_groups = ["celaction"]
platforms = ["windows"]
def execute(self):
# Add workfile path to launch arguments
workfile_path = self.workfile_path()
if workfile_path:
self.launch_context.launch_args.append(workfile_path)
project_name = self.data["project_name"]
asset_name = self.data["asset_name"]
task_name = self.data["task_name"]
# get publish version of celaction
app = "celaction_publish"
# setting output parameters
path = r"Software\CelAction\CelAction2D\User Settings"
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
"Software\\CelAction\\CelAction2D\\User Settings", 0,
winreg.KEY_ALL_ACCESS)
# TODO: this will need to be checked more thoroughly
pype_exe = os.getenv("OPENPYPE_EXECUTABLE")
winreg.SetValueEx(hKey, "SubmitAppTitle", 0, winreg.REG_SZ, pype_exe)
parameters = [
"launch",
f"--app {app}",
f"--project {project_name}",
f"--asset {asset_name}",
f"--task {task_name}",
"--currentFile \\\"\"*SCENE*\"\\\"",
"--chunk 10",
"--frameStart *START*",
"--frameEnd *END*",
"--resolutionWidth *X*",
"--resolutionHeight *Y*",
# "--programDir \"'*PROGPATH*'\""
]
winreg.SetValueEx(hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
" ".join(parameters))
# setting resolution parameters
path = r"Software\CelAction\CelAction2D\User Settings\Dialogs"
path += r"\SubmitOutput"
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path)
hKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(hKey, "SaveScene", 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(hKey, "CustomX", 0, winreg.REG_DWORD, 1920)
winreg.SetValueEx(hKey, "CustomY", 0, winreg.REG_DWORD, 1080)
# making sure message dialogs don't appear when overwriting
path = r"Software\CelAction\CelAction2D\User Settings\Messages"
path += r"\OverwriteScene"
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path)
hKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 6)
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
path = r"Software\CelAction\CelAction2D\User Settings\Messages"
path += r"\SceneSaved"
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path)
hKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
def workfile_path(self):
workfile_path = self.data["last_workfile_path"]
# copy workfile from template if doesnt exist any on path
if not os.path.exists(workfile_path):
# TODO add ability to set different template workfile path via
# settings
pype_celaction_dir = os.path.dirname(os.path.dirname(
os.path.abspath(celaction.__file__)
))
template_path = os.path.join(
pype_celaction_dir,
"resources",
"celaction_template_scene.scn"
)
if not os.path.exists(template_path):
self.log.warning(
"Couldn't find workfile template file in {}".format(
template_path
)
)
return
self.log.info(
f"Creating workfile from template: \"{template_path}\""
)
# Copy template workfile to new destinantion
shutil.copy2(
os.path.normpath(template_path),
os.path.normpath(workfile_path)
)
self.log.info(f"Workfile to open: \"{workfile_path}\"")
return workfile_path

View file

@ -0,0 +1,126 @@
import os
import collections
import pyblish.api
from avalon import io
from pprint import pformat
class AppendCelactionAudio(pyblish.api.ContextPlugin):
label = "Colect Audio for publishing"
order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
self.log.info('Collecting Audio Data')
asset_doc = context.data["assetEntity"]
# get all available representations
subsets = self.get_subsets(
asset_doc,
representations=["audio", "wav"]
)
self.log.info(f"subsets is: {pformat(subsets)}")
if not subsets.get("audioMain"):
raise AttributeError("`audioMain` subset does not exist")
reprs = subsets.get("audioMain", {}).get("representations", [])
self.log.info(f"reprs is: {pformat(reprs)}")
repr = next((r for r in reprs), None)
if not repr:
raise "Missing `audioMain` representation"
self.log.info(f"represetation is: {repr}")
audio_file = repr.get('data', {}).get('path', "")
if os.path.exists(audio_file):
context.data["audioFile"] = audio_file
self.log.info(
'audio_file: {}, has been added to context'.format(audio_file))
else:
self.log.warning("Couldn't find any audio file on Ftrack.")
def get_subsets(self, asset_doc, representations):
"""
Query subsets with filter on name.
The method will return all found subsets and its defined version
and subsets. Version could be specified with number. Representation
can be filtered.
Arguments:
asset_doct (dict): Asset (shot) mongo document
representations (list): list for all representations
Returns:
dict: subsets with version and representaions in keys
"""
# Query all subsets for asset
subset_docs = io.find({
"type": "subset",
"parent": asset_doc["_id"]
})
# Collect all subset ids
subset_ids = [
subset_doc["_id"]
for subset_doc in subset_docs
]
# Check if we found anything
assert subset_ids, (
"No subsets found. Check correct filter. "
"Try this for start `r'.*'`: asset: `{}`"
).format(asset_doc["name"])
# Last version aggregation
pipeline = [
# Find all versions of those subsets
{"$match": {
"type": "version",
"parent": {"$in": subset_ids}
}},
# Sorting versions all together
{"$sort": {"name": 1}},
# Group them by "parent", but only take the last
{"$group": {
"_id": "$parent",
"_version_id": {"$last": "$_id"},
"name": {"$last": "$name"}
}}
]
last_versions_by_subset_id = dict()
for doc in io.aggregate(pipeline):
doc["parent"] = doc["_id"]
doc["_id"] = doc.pop("_version_id")
last_versions_by_subset_id[doc["parent"]] = doc
version_docs_by_id = {}
for version_doc in last_versions_by_subset_id.values():
version_docs_by_id[version_doc["_id"]] = version_doc
repre_docs = io.find({
"type": "representation",
"parent": {"$in": list(version_docs_by_id.keys())},
"name": {"$in": representations}
})
repre_docs_by_version_id = collections.defaultdict(list)
for repre_doc in repre_docs:
version_id = repre_doc["parent"]
repre_docs_by_version_id[version_id].append(repre_doc)
output_dict = {}
for version_id, repre_docs in repre_docs_by_version_id.items():
version_doc = version_docs_by_id[version_id]
subset_id = version_doc["parent"]
subset_doc = last_versions_by_subset_id[subset_id]
# Store queried docs by subset name
output_dict[subset_doc["name"]] = {
"representations": repre_docs,
"version": version_doc
}
return output_dict

View file

@ -0,0 +1,23 @@
import pyblish.api
from openpype.hosts.celaction import api as celaction
class CollectCelactionCliKwargs(pyblish.api.Collector):
""" Collects all keyword arguments passed from the terminal """
label = "Collect Celaction Cli Kwargs"
order = pyblish.api.Collector.order - 0.1
def process(self, context):
kwargs = celaction.kwargs.copy()
self.log.info("Storing kwargs: %s" % kwargs)
context.set_data("kwargs", kwargs)
# get kwargs onto context data as keys with values
for k, v in kwargs.items():
self.log.info(f"Setting `{k}` to instance.data with value: `{v}`")
if k in ["frameStart", "frameEnd"]:
context.data[k] = kwargs[k] = int(v)
else:
context.data[k] = v

View file

@ -0,0 +1,96 @@
import os
from avalon import api
import pyblish.api
class CollectCelactionInstances(pyblish.api.ContextPlugin):
""" Adds the celaction render instances """
label = "Collect Celaction Instances"
order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
task = api.Session["AVALON_TASK"]
current_file = context.data["currentFile"]
staging_dir = os.path.dirname(current_file)
scene_file = os.path.basename(current_file)
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
shared_instance_data = {
"asset": asset_entity["name"],
"frameStart": asset_entity["data"]["frameStart"],
"frameEnd": asset_entity["data"]["frameEnd"],
"handleStart": asset_entity["data"]["handleStart"],
"handleEnd": asset_entity["data"]["handleEnd"],
"fps": asset_entity["data"]["fps"],
"resolutionWidth": asset_entity["data"].get(
"resolutionWidth",
project_entity["data"]["resolutionWidth"]),
"resolutionHeight": asset_entity["data"].get(
"resolutionHeight",
project_entity["data"]["resolutionHeight"]),
"pixelAspect": 1,
"step": 1,
"version": version
}
celaction_kwargs = context.data.get("kwargs", {})
if celaction_kwargs:
shared_instance_data.update(celaction_kwargs)
# workfile instance
family = "workfile"
subset = family + task.capitalize()
# Create instance
instance = context.create_instance(subset)
# creating instance data
instance.data.update({
"subset": subset,
"label": scene_file,
"family": family,
"families": [family, "ftrack"],
"representations": list()
})
# adding basic script data
instance.data.update(shared_instance_data)
# creating representation
representation = {
'name': 'scn',
'ext': 'scn',
'files': scene_file,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info('Publishing Celaction workfile')
# render instance
family = "render.farm"
subset = f"render{task}Main"
instance = context.create_instance(name=subset)
# getting instance state
instance.data["publish"] = True
# add assetEntity data into instance
instance.data.update({
"label": "{} - farm".format(subset),
"family": family,
"families": [family],
"subset": subset
})
# adding basic script data
instance.data.update(shared_instance_data)
self.log.info('Publishing Celaction render instance')
self.log.debug(f"Instance data: `{instance.data}`")
for i in context:
self.log.debug(f"{i.data['families']}")

View file

@ -0,0 +1,56 @@
import os
import pyblish.api
import copy
class CollectRenderPath(pyblish.api.InstancePlugin):
"""Generate file and directory path where rendered images will be"""
label = "Collect Render Path"
order = pyblish.api.CollectorOrder + 0.495
families = ["render.farm"]
# Presets
anatomy_render_key = None
publish_render_metadata = None
def process(self, instance):
anatomy = instance.context.data["anatomy"]
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
anatomy_data["family"] = "render"
padding = anatomy.templates.get("frame_padding", 4)
anatomy_data.update({
"frame": f"%0{padding}d",
"representation": "png"
})
anatomy_filled = anatomy.format(anatomy_data)
# get anatomy rendering keys
anatomy_render_key = self.anatomy_render_key or "render"
publish_render_metadata = self.publish_render_metadata or "render"
# get folder and path for rendering images from celaction
render_dir = anatomy_filled[anatomy_render_key]["folder"]
render_path = anatomy_filled[anatomy_render_key]["path"]
# create dir if it doesnt exists
try:
if not os.path.isdir(render_dir):
os.makedirs(render_dir, exist_ok=True)
except OSError:
# directory is not available
self.log.warning("Path is unreachable: `{}`".format(render_dir))
# add rendering path to instance data
instance.data["path"] = render_path
# get anatomy for published renders folder path
if anatomy_filled.get(publish_render_metadata):
instance.data["publishRenderMetadataFolder"] = anatomy_filled[
publish_render_metadata]["folder"]
self.log.info("Metadata render path: `{}`".format(
instance.data["publishRenderMetadataFolder"]
))
self.log.info(f"Render output path set to: `{render_path}`")

View file

@ -0,0 +1,20 @@
import shutil
import openpype
import pyblish.api
class VersionUpScene(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder + 0.5
label = 'Version Up Scene'
families = ['workfile']
optional = True
active = True
def process(self, context):
current_file = context.data.get('currentFile')
v_up = openpype.lib.version_up(current_file)
self.log.debug('Current file is: {}'.format(current_file))
self.log.debug('Version up: {}'.format(v_up))
shutil.copy2(current_file, v_up)
self.log.info('Scene saved into new version: {}'.format(v_up))

View file

@ -0,0 +1,265 @@
import os
import json
import getpass
from avalon.vendor import requests
import re
import pyblish.api
class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
"""Submit CelAction2D scene to Deadline
Renders are submitted to a Deadline Web Service as
supplied via settings key "DEADLINE_REST_URL".
"""
label = "Submit CelAction to Deadline"
order = pyblish.api.IntegratorOrder + 0.1
hosts = ["celaction"]
families = ["render.farm"]
deadline_department = ""
deadline_priority = 50
deadline_pool = ""
deadline_pool_secondary = ""
deadline_group = ""
deadline_chunk_size = 1
enviro_filter = [
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER"
]
def process(self, instance):
instance.data["toBeRenderedOn"] = "deadline"
context = instance.context
deadline_url = (
context.data["system_settings"]
["modules"]
["deadline"]
["DEADLINE_REST_URL"]
)
assert deadline_url, "Requires DEADLINE_REST_URL"
self.deadline_url = "{}/api/jobs".format(deadline_url)
self._comment = context.data.get("comment", "")
self._deadline_user = context.data.get(
"deadlineUser", getpass.getuser())
self._frame_start = int(instance.data["frameStart"])
self._frame_end = int(instance.data["frameEnd"])
# get output path
render_path = instance.data['path']
script_path = context.data["currentFile"]
response = self.payload_submit(instance,
script_path,
render_path
)
# Store output dir for unified publisher (filesequence)
instance.data["deadlineSubmissionJob"] = response.json()
instance.data["outputDir"] = os.path.dirname(
render_path).replace("\\", "/")
instance.data["publishJobState"] = "Suspended"
instance.context.data['ftrackStatus'] = "Render"
# adding 2d render specific family for version identification in Loader
instance.data["families"] = ["render2d"]
def payload_submit(self,
instance,
script_path,
render_path
):
resolution_width = instance.data["resolutionWidth"]
resolution_height = instance.data["resolutionHeight"]
render_dir = os.path.normpath(os.path.dirname(render_path))
render_path = os.path.normpath(render_path)
script_name = os.path.basename(script_path)
jobname = "%s - %s" % (script_name, instance.name)
output_filename_0 = self.preview_fname(render_path)
try:
# Ensure render folder exists
os.makedirs(render_dir)
except OSError:
pass
# define chunk and priority
chunk_size = instance.context.data.get("chunk")
if chunk_size == 0:
chunk_size = self.deadline_chunk_size
# search for %02d pattern in name, and padding number
search_results = re.search(r"(.%0)(\d)(d)[._]", render_path).groups()
split_patern = "".join(search_results)
padding_number = int(search_results[1])
args = [
f"<QUOTE>{script_path}<QUOTE>",
"-a",
"-16",
"-s <STARTFRAME>",
"-e <ENDFRAME>",
f"-d <QUOTE>{render_dir}<QUOTE>",
f"-x {resolution_width}",
f"-y {resolution_height}",
f"-r <QUOTE>{render_path.replace(split_patern, '')}<QUOTE>",
f"-= AbsoluteFrameNumber=on -= PadDigits={padding_number}",
"-= ClearAttachment=on",
]
payload = {
"JobInfo": {
# Job name, as seen in Monitor
"Name": jobname,
# plugin definition
"Plugin": "CelAction",
# Top-level group name
"BatchName": script_name,
# Arbitrary username, for visualisation in Monitor
"UserName": self._deadline_user,
"Department": self.deadline_department,
"Priority": self.deadline_priority,
"Group": self.deadline_group,
"Pool": self.deadline_pool,
"SecondaryPool": self.deadline_pool_secondary,
"ChunkSize": chunk_size,
"Frames": f"{self._frame_start}-{self._frame_end}",
"Comment": self._comment,
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
"OutputFilename0": output_filename_0.replace("\\", "/"),
# # Asset dependency to wait for at least the scene file to sync.
# "AssetDependency0": script_path
"ScheduledType": "Once",
"JobDelay": "00:00:08:00"
},
"PluginInfo": {
# Input
"SceneFile": script_path,
# Output directory
"OutputFilePath": render_dir.replace("\\", "/"),
# Plugin attributes
"StartupDirectory": "",
"Arguments": " ".join(args),
# Resolve relative references
"ProjectPath": script_path,
"AWSAssetFile0": render_path,
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
plugin = payload["JobInfo"]["Plugin"]
self.log.info("using render plugin : {}".format(plugin))
i = 0
for key, values in dict(os.environ).items():
if key.upper() in self.enviro_filter:
payload["JobInfo"].update(
{
"EnvironmentKeyValue%d"
% i: "{key}={value}".format(
key=key, value=values
)
}
)
i += 1
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# adding expectied files to instance.data
self.expected_files(instance, render_path)
self.log.debug("__ expectedFiles: `{}`".format(
instance.data["expectedFiles"]))
response = requests.post(self.deadline_url, json=payload)
if not response.ok:
raise Exception(response.text)
return response
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("frameStart", "frameEnd"):
value = instance.data[key]
if int(value) == value:
continue
self.log.warning(
"%f=%d was rounded off to nearest integer"
% (value, int(value))
)
def preview_fname(self, path):
"""Return output file path with #### for padding.
Deadline requires the path to be formatted with # in place of numbers.
For example `/path/to/render.####.png`
Args:
path (str): path to rendered images
Returns:
str
"""
self.log.debug("_ path: `{}`".format(path))
if "%" in path:
search_results = re.search(r"[._](%0)(\d)(d)[._]", path).groups()
split_patern = "".join(search_results)
split_path = path.split(split_patern)
hashes = "#" * int(search_results[1])
return "".join([split_path[0], hashes, split_path[-1]])
if "#" in path:
self.log.debug("_ path: `{}`".format(path))
return path
else:
return path
def expected_files(self,
instance,
path):
""" Create expected files in instance data
"""
if not instance.data.get("expectedFiles"):
instance.data["expectedFiles"] = list()
dir = os.path.dirname(path)
file = os.path.basename(path)
if "#" in file:
pparts = file.split("#")
padding = "%0{}d".format(len(pparts) - 1)
file = pparts[0] + padding + pparts[-1]
if "%" not in file:
instance.data["expectedFiles"].append(path)
return
for i in range(self._frame_start, (self._frame_end + 1)):
instance.data["expectedFiles"].append(
os.path.join(dir, (file % i)).replace("\\", "/"))

View file

View file

@ -0,0 +1,38 @@
from .pipeline import (
install,
uninstall,
publish,
launch_workfiles_app
)
from .utils import (
setup
)
from .lib import (
get_additional_data,
update_frame_range
)
from .menu import launch_openpype_menu
__all__ = [
# pipeline
"install",
"uninstall",
"publish",
"launch_workfiles_app",
# utils
"setup",
"get_resolve_module",
# lib
"get_additional_data",
"update_frame_range",
# menu
"launch_openpype_menu",
]

View file

@ -0,0 +1,142 @@
import sys
from avalon.vendor.Qt import QtGui
import avalon.fusion
from avalon import io
self = sys.modules[__name__]
self._project = None
def update_frame_range(start, end, comp=None, set_render_range=True):
"""Set Fusion comp's start and end frame range
Args:
start (float, int): start frame
end (float, int): end frame
comp (object, Optional): comp object from fusion
set_render_range (bool, Optional): When True this will also set the
composition's render start and end frame.
Returns:
None
"""
if not comp:
comp = avalon.fusion.get_current_comp()
attrs = {
"COMPN_GlobalStart": start,
"COMPN_GlobalEnd": end
}
if set_render_range:
attrs.update({
"COMPN_RenderStart": start,
"COMPN_RenderEnd": end
})
with avalon.fusion.comp_lock_and_undo_chunk(comp):
comp.SetAttrs(attrs)
def get_additional_data(container):
"""Get Fusion related data for the container
Args:
container(dict): the container found by the ls() function
Returns:
dict
"""
tool = container["_tool"]
tile_color = tool.TileColor
if tile_color is None:
return {}
return {"color": QtGui.QColor.fromRgbF(tile_color["R"],
tile_color["G"],
tile_color["B"])}
def switch_item(container,
asset_name=None,
subset_name=None,
representation_name=None):
"""Switch container asset, subset or representation of a container by name.
It'll always switch to the latest version - of course a different
approach could be implemented.
Args:
container (dict): data of the item to switch with
asset_name (str): name of the asset
subset_name (str): name of the subset
representation_name (str): name of the representation
Returns:
dict
"""
if all(not x for x in [asset_name, subset_name, representation_name]):
raise ValueError("Must have at least one change provided to switch.")
# Collect any of current asset, subset and representation if not provided
# so we can use the original name from those.
if any(not x for x in [asset_name, subset_name, representation_name]):
_id = io.ObjectId(container["representation"])
representation = io.find_one({"type": "representation", "_id": _id})
version, subset, asset, project = io.parenthood(representation)
if asset_name is None:
asset_name = asset["name"]
if subset_name is None:
subset_name = subset["name"]
if representation_name is None:
representation_name = representation["name"]
# Find the new one
asset = io.find_one({
"name": asset_name,
"type": "asset"
})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({
"name": subset_name,
"type": "subset",
"parent": asset["_id"]
})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[('name', -1)]
)
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({
"name": representation_name,
"type": "representation",
"parent": version["_id"]}
)
assert representation, ("Could not find representation in the database "
"with the name '%s'" % representation_name)
avalon.api.switch(container, representation)
return representation

View file

@ -0,0 +1,170 @@
import os
import sys
from Qt import QtWidgets, QtCore
from .pipeline import (
publish,
launch_workfiles_app
)
from avalon.tools import (
creator,
loader,
sceneinventory,
libraryloader
)
from openpype.hosts.fusion.scripts import (
set_rendermode,
duplicate_with_inputs
)
def load_stylesheet():
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
if not os.path.exists(path):
print("Unable to load stylesheet, file not found in resources")
return ""
with open(path, "r") as file_stream:
stylesheet = file_stream.read()
return stylesheet
class Spacer(QtWidgets.QWidget):
def __init__(self, height, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.setFixedHeight(height)
real_spacer = QtWidgets.QWidget(self)
real_spacer.setObjectName("Spacer")
real_spacer.setFixedHeight(height)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(real_spacer)
self.setLayout(layout)
class OpenPypeMenu(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.setObjectName("OpenPypeMenu")
self.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
self.render_mode_widget = None
self.setWindowTitle("OpenPype")
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
create_btn = QtWidgets.QPushButton("Create ...", self)
publish_btn = QtWidgets.QPushButton("Publish ...", self)
load_btn = QtWidgets.QPushButton("Load ...", self)
inventory_btn = QtWidgets.QPushButton("Inventory ...", self)
libload_btn = QtWidgets.QPushButton("Library ...", self)
rendermode_btn = QtWidgets.QPushButton("Set render mode ...", self)
duplicate_with_inputs_btn = QtWidgets.QPushButton(
"Duplicate with input connections", self
)
reset_resolution_btn = QtWidgets.QPushButton(
"Reset Resolution from project", self
)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(10, 20, 10, 20)
layout.addWidget(workfiles_btn)
layout.addWidget(create_btn)
layout.addWidget(publish_btn)
layout.addWidget(load_btn)
layout.addWidget(inventory_btn)
layout.addWidget(Spacer(15, self))
layout.addWidget(libload_btn)
layout.addWidget(Spacer(15, self))
layout.addWidget(rendermode_btn)
layout.addWidget(Spacer(15, self))
layout.addWidget(duplicate_with_inputs_btn)
layout.addWidget(reset_resolution_btn)
self.setLayout(layout)
workfiles_btn.clicked.connect(self.on_workfile_clicked)
create_btn.clicked.connect(self.on_create_clicked)
publish_btn.clicked.connect(self.on_publish_clicked)
load_btn.clicked.connect(self.on_load_clicked)
inventory_btn.clicked.connect(self.on_inventory_clicked)
libload_btn.clicked.connect(self.on_libload_clicked)
rendermode_btn.clicked.connect(self.on_rendernode_clicked)
duplicate_with_inputs_btn.clicked.connect(
self.on_duplicate_with_inputs_clicked)
reset_resolution_btn.clicked.connect(self.on_reset_resolution_clicked)
def on_workfile_clicked(self):
print("Clicked Workfile")
launch_workfiles_app()
def on_create_clicked(self):
print("Clicked Create")
creator.show()
def on_publish_clicked(self):
print("Clicked Publish")
publish(None)
def on_load_clicked(self):
print("Clicked Load")
loader.show(use_context=True)
def on_inventory_clicked(self):
print("Clicked Inventory")
sceneinventory.show()
def on_libload_clicked(self):
print("Clicked Library")
libraryloader.show()
def on_rendernode_clicked(self):
from avalon import style
print("Clicked Set Render Mode")
if self.render_mode_widget is None:
window = set_rendermode.SetRenderMode()
window.setStyleSheet(style.load_stylesheet())
window.show()
self.render_mode_widget = window
else:
self.render_mode_widget.show()
def on_duplicate_with_inputs_clicked(self):
duplicate_with_inputs.duplicate_with_input_connections()
print("Clicked Set Colorspace")
def on_reset_resolution_clicked(self):
print("Clicked Reset Resolution")
def launch_openpype_menu():
app = QtWidgets.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
pype_menu = OpenPypeMenu()
stylesheet = load_stylesheet()
pype_menu.setStyleSheet(stylesheet)
pype_menu.show()
sys.exit(app.exec_())

View file

@ -0,0 +1,29 @@
QWidget {
background-color: #282828;
border-radius: 3;
}
QPushButton {
border: 1px solid #090909;
background-color: #201f1f;
color: #ffffff;
padding: 5;
}
QPushButton:focus {
background-color: "#171717";
color: #d0d0d0;
}
QPushButton:hover {
background-color: "#171717";
color: #e64b3d;
}
#OpenPypeMenu {
border: 1px solid #fef9ef;
}
#Spacer {
background-color: #282828;
}

View file

@ -0,0 +1,111 @@
"""
Basic avalon integration
"""
import os
from avalon.tools import workfiles
from avalon import api as avalon
from pyblish import api as pyblish
from openpype.api import Logger
import openpype.hosts.fusion
log = Logger().get_logger(__name__)
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.fusion.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
def install():
"""Install fusion-specific functionality of avalon-core.
This is where you install menus and register families, data
and loaders into fusion.
It is called automatically when installing via `api.install(avalon.fusion)`
See the Maya equivalent for inspiration on how to implement this.
"""
# Disable all families except for the ones we explicitly want to see
family_states = ["imagesequence",
"camera",
"pointcache"]
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
log.info("pype.hosts.fusion installed")
pyblish.register_host("fusion")
pyblish.register_plugin_path(PUBLISH_PATH)
log.info("Registering Fusion plug-ins..")
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
def uninstall():
"""Uninstall all tha was installed
This is where you undo everything that was done in `install()`.
That means, removing menus, deregistering families and data
and everything. It should be as though `install()` was never run,
because odds are calling this function means the user is interested
in re-installing shortly afterwards. If, for example, he has been
modifying the menu or registered families.
"""
pyblish.deregister_host("fusion")
pyblish.deregister_plugin_path(PUBLISH_PATH)
log.info("Deregistering Fusion plug-ins..")
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
def on_pyblish_instance_toggled(instance, new_value, old_value):
"""Toggle saver tool passthrough states on instance toggles."""
from avalon.fusion import comp_lock_and_undo_chunk
comp = instance.context.data.get("currentComp")
if not comp:
return
savers = [tool for tool in instance if
getattr(tool, "ID", None) == "Saver"]
if not savers:
return
# Whether instances should be passthrough based on new value
passthrough = not new_value
with comp_lock_and_undo_chunk(comp,
undo_queue_name="Change instance "
"active state"):
for tool in savers:
attrs = tool.GetAttrs()
current = attrs["TOOLB_PassThrough"]
if current != passthrough:
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
def launch_workfiles_app(*args):
workdir = os.environ["AVALON_WORKDIR"]
workfiles.show(workdir)
def publish(parent):
"""Shorthand to publish from within host"""
from avalon.tools import publish
return publish.show(parent)

View file

@ -0,0 +1,86 @@
#! python3
"""
Fusion tools for setting environment
"""
import os
import shutil
from openpype.api import Logger
import openpype.hosts.fusion
log = Logger().get_logger(__name__)
def _sync_utility_scripts(env=None):
""" Synchronizing basic utlility scripts for resolve.
To be able to run scripts from inside `Fusion/Workspace/Scripts` menu
all scripts has to be accessible from defined folder.
"""
if not env:
env = os.environ
# initiate inputs
scripts = {}
us_env = env.get("FUSION_UTILITY_SCRIPTS_SOURCE_DIR")
us_dir = env.get("FUSION_UTILITY_SCRIPTS_DIR", "")
us_paths = [os.path.join(
os.path.dirname(os.path.abspath(openpype.hosts.fusion.__file__)),
"utility_scripts"
)]
# collect script dirs
if us_env:
log.info(f"Utility Scripts Env: `{us_env}`")
us_paths = us_env.split(
os.pathsep) + us_paths
# collect scripts from dirs
for path in us_paths:
scripts.update({path: os.listdir(path)})
log.info(f"Utility Scripts Dir: `{us_paths}`")
log.info(f"Utility Scripts: `{scripts}`")
# make sure no script file is in folder
if next((s for s in os.listdir(us_dir)), None):
for s in os.listdir(us_dir):
path = os.path.normpath(
os.path.join(us_dir, s))
log.info(f"Removing `{path}`...")
# remove file or directory if not in our folders
if not os.path.isdir(path):
os.remove(path)
else:
shutil.rmtree(path)
# copy scripts into Resolve's utility scripts dir
for d, sl in scripts.items():
# directory and scripts list
for s in sl:
# script in script list
src = os.path.normpath(os.path.join(d, s))
dst = os.path.normpath(os.path.join(us_dir, s))
log.info(f"Copying `{src}` to `{dst}`...")
# copy file or directory from our folders to fusion's folder
if not os.path.isdir(src):
shutil.copy2(src, dst)
else:
shutil.copytree(src, dst)
def setup(env=None):
""" Wrapper installer started from pype.hooks.fusion.FusionPrelaunch()
"""
if not env:
env = os.environ
# synchronize resolve utility scripts
_sync_utility_scripts(env)
log.info("Fusion Pype wrapper has been installed")

View file

@ -0,0 +1,50 @@
import os
import importlib
from openpype.lib import PreLaunchHook
from openpype.hosts.fusion.api import utils
class FusionPrelaunch(PreLaunchHook):
"""
This hook will check if current workfile path has Fusion
project inside.
"""
app_groups = ["fusion"]
def execute(self):
# making sure pyton 3.6 is installed at provided path
py36_dir = os.path.normpath(self.launch_context.env.get("PYTHON36", ""))
assert os.path.isdir(py36_dir), (
"Python 3.6 is not installed at the provided folder path. Either "
"make sure the `environments\resolve.json` is having correctly "
"set `PYTHON36` or make sure Python 3.6 is installed "
f"in given path. \nPYTHON36E: `{py36_dir}`"
)
self.log.info(f"Path to Fusion Python folder: `{py36_dir}`...")
self.launch_context.env["PYTHON36"] = py36_dir
# setting utility scripts dir for scripts syncing
us_dir = os.path.normpath(
self.launch_context.env.get("FUSION_UTILITY_SCRIPTS_DIR", "")
)
assert os.path.isdir(us_dir), (
"Fusion utility script dir does not exists. Either make sure "
"the `environments\fusion.json` is having correctly set "
"`FUSION_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n"
f"FUSION_UTILITY_SCRIPTS_DIR: `{us_dir}`"
)
try:
__import__("avalon.fusion")
__import__("pyblish")
except ImportError:
self.log.warning(
"pyblish: Could not load Fusion integration.",
exc_info=True
)
else:
# Resolve Setup integration
importlib.reload(utils)
utils.setup(self.launch_context.env)

View file

@ -0,0 +1,45 @@
import os
import openpype.api
from avalon import fusion
class CreateOpenEXRSaver(openpype.api.Creator):
name = "openexrDefault"
label = "Create OpenEXR Saver"
hosts = ["fusion"]
family = "render"
def process(self):
file_format = "OpenEXRFormat"
comp = fusion.get_current_comp()
# todo: improve method of getting current environment
# todo: pref avalon.Session over os.environ
workdir = os.path.normpath(os.environ["AVALON_WORKDIR"])
filename = "{}..tiff".format(self.name)
filepath = os.path.join(workdir, "render", filename)
with fusion.comp_lock_and_undo_chunk(comp):
args = (-32768, -32768) # Magical position numbers
saver = comp.AddTool("Saver", *args)
saver.SetAttrs({"TOOLS_Name": self.name})
# Setting input attributes is different from basic attributes
# Not confused with "MainInputAttributes" which
saver["Clip"] = filepath
saver["OutputFormat"] = file_format
# # # Set standard TIFF settings
if saver[file_format] is None:
raise RuntimeError("File format is not set to TiffFormat, "
"this is a bug")
# Set file format attributes
saver[file_format]["Depth"] = 1 # int8 | int16 | float32 | other
saver[file_format]["SaveAlpha"] = 0

View file

@ -0,0 +1,25 @@
from avalon import api
class FusionSelectContainers(api.InventoryAction):
label = "Select Containers"
icon = "mouse-pointer"
color = "#d8d8d8"
def process(self, containers):
import avalon.fusion
tools = [i["_tool"] for i in containers]
comp = avalon.fusion.get_current_comp()
flow = comp.CurrentFrame.FlowView
with avalon.fusion.comp_lock_and_undo_chunk(comp, self.label):
# Clear selection
flow.Select()
# Select tool
for tool in tools:
flow.Select(tool)

View file

@ -0,0 +1,68 @@
from avalon import api, style
from avalon.vendor.Qt import QtGui, QtWidgets
import avalon.fusion
class FusionSetToolColor(api.InventoryAction):
"""Update the color of the selected tools"""
label = "Set Tool Color"
icon = "plus"
color = "#d8d8d8"
_fallback_color = QtGui.QColor(1.0, 1.0, 1.0)
def process(self, containers):
"""Color all selected tools the selected colors"""
result = []
comp = avalon.fusion.get_current_comp()
# Get tool color
first = containers[0]
tool = first["_tool"]
color = tool.TileColor
if color is not None:
qcolor = QtGui.QColor().fromRgbF(color["R"], color["G"], color["B"])
else:
qcolor = self._fallback_color
# Launch pick color
picked_color = self.get_color_picker(qcolor)
if not picked_color:
return
with avalon.fusion.comp_lock_and_undo_chunk(comp):
for container in containers:
# Convert color to RGB 0-1 floats
rgb_f = picked_color.getRgbF()
rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
# Update tool
tool = container["_tool"]
tool.TileColor = rgb_f_table
result.append(container)
return result
def get_color_picker(self, color):
"""Launch color picker and return chosen color
Args:
color(QtGui.QColor): Start color to display
Returns:
QtGui.QColor
"""
color_dialog = QtWidgets.QColorDialog(color)
color_dialog.setStyleSheet(style.load_stylesheet())
accepted = color_dialog.exec_()
if not accepted:
return
return color_dialog.selectedColor()

View file

@ -0,0 +1,76 @@
"""A module containing generic loader actions that will display in the Loader.
"""
from avalon import api
class FusionSetFrameRangeLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
"camera",
"imagesequence",
"yeticache",
"pointcache"]
representations = ["*"]
label = "Set frame range"
order = 11
icon = "clock-o"
color = "white"
def load(self, context, name, namespace, data):
from openpype.hosts.fusion.api import lib
version = context['version']
version_data = version.get("data", {})
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
"end frame data is missing..")
return
lib.update_frame_range(start, end)
class FusionSetFrameRangeWithHandlesLoader(api.Loader):
"""Specific loader of Alembic for the avalon.animation family"""
families = ["animation",
"camera",
"imagesequence",
"yeticache",
"pointcache"]
representations = ["*"]
label = "Set frame range (with handles)"
order = 12
icon = "clock-o"
color = "white"
def load(self, context, name, namespace, data):
from openpype.hosts.fusion.api import lib
version = context['version']
version_data = version.get("data", {})
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
"end frame data is missing..")
return
# Include handles
handles = version_data.get("handles", 0)
start -= handles
end += handles
lib.update_frame_range(start, end)

View file

@ -0,0 +1,263 @@
import os
import contextlib
from avalon import api
import avalon.io as io
from avalon import fusion
comp = fusion.get_current_comp()
@contextlib.contextmanager
def preserve_inputs(tool, inputs):
"""Preserve the tool's inputs after context"""
comp = tool.Comp()
values = {}
for name in inputs:
tool_input = getattr(tool, name)
value = tool_input[comp.TIME_UNDEFINED]
values[name] = value
try:
yield
finally:
for name, value in values.items():
tool_input = getattr(tool, name)
tool_input[comp.TIME_UNDEFINED] = value
@contextlib.contextmanager
def preserve_trim(loader, log=None):
"""Preserve the relative trim of the Loader tool.
This tries to preserve the loader's trim (trim in and trim out) after
the context by reapplying the "amount" it trims on the clip's length at
start and end.
"""
# Get original trim as amount of "trimming" from length
time = loader.Comp().TIME_UNDEFINED
length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
trim_from_start = loader["ClipTimeStart"][time]
trim_from_end = length - loader["ClipTimeEnd"][time]
try:
yield
finally:
length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
if trim_from_start > length:
trim_from_start = length
if log:
log.warning("Reducing trim in to %d "
"(because of less frames)" % trim_from_start)
remainder = length - trim_from_start
if trim_from_end > remainder:
trim_from_end = remainder
if log:
log.warning("Reducing trim in to %d "
"(because of less frames)" % trim_from_end)
loader["ClipTimeStart"][time] = trim_from_start
loader["ClipTimeEnd"][time] = length - trim_from_end
def loader_shift(loader, frame, relative=True):
"""Shift global in time by i preserving duration
This moves the loader by i frames preserving global duration. When relative
is False it will shift the global in to the start frame.
Args:
loader (tool): The fusion loader tool.
frame (int): The amount of frames to move.
relative (bool): When True the shift is relative, else the shift will
change the global in to frame.
Returns:
int: The resulting relative frame change (how much it moved)
"""
comp = loader.Comp()
time = comp.TIME_UNDEFINED
old_in = loader["GlobalIn"][time]
old_out = loader["GlobalOut"][time]
if relative:
shift = frame
else:
shift = frame - old_in
# Shifting global in will try to automatically compensate for the change
# in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
# input values to "just shift" the clip
with preserve_inputs(loader, inputs=["ClipTimeStart",
"ClipTimeEnd",
"HoldFirstFrame",
"HoldLastFrame"]):
# GlobalIn cannot be set past GlobalOut or vice versa
# so we must apply them in the order of the shift.
if shift > 0:
loader["GlobalOut"][time] = old_out + shift
loader["GlobalIn"][time] = old_in + shift
else:
loader["GlobalIn"][time] = old_in + shift
loader["GlobalOut"][time] = old_out + shift
return int(shift)
class FusionLoadSequence(api.Loader):
"""Load image sequence into Fusion"""
families = ["imagesequence", "review"]
representations = ["*"]
label = "Load sequence"
order = -10
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
from avalon.fusion import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
# Use the first file for now
path = self._get_first_image(os.path.dirname(self.fname))
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create Loader"):
args = (-32768, -32768)
tool = comp.AddTool("Loader", *args)
tool["Clip"] = path
# Set global in point to start frame (if in version.data)
start = context["version"]["data"].get("frameStart", None)
if start is not None:
loader_shift(tool, start, relative=False)
imprint_container(tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update the Loader's path
Fusion automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
- ClipTimeStart: Fusion reset to 0 if duration changes
- We keep the trim in as close as possible to the previous value.
When there are less frames then the amount of trim we reduce
it accordingly.
- ClipTimeEnd: Fusion reset to 0 if duration changes
- We keep the trim out as close as possible to the previous value
within new amount of frames after trim in (ClipTimeStart) has
been set.
- GlobalIn: Fusion reset to comp's global in if duration changes
- We change it to the "frameStart"
- GlobalEnd: Fusion resets to globalIn + length if duration changes
- We do the same like Fusion - allow fusion to take control.
- HoldFirstFrame: Fusion resets this to 0
- We preverse the value.
- HoldLastFrame: Fusion resets this to 0
- We preverse the value.
- Reverse: Fusion resets to disabled if "Loop" is not enabled.
- We preserve the value.
- Depth: Fusion resets to "Format"
- We preverse the value.
- KeyCode: Fusion resets to ""
- We preverse the value.
- TimeCodeOffset: Fusion resets to 0
- We preverse the value.
"""
from avalon.fusion import comp_lock_and_undo_chunk
tool = container["_tool"]
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
root = api.get_representation_path(representation)
path = self._get_first_image(root)
# Get start frame from version data
version = io.find_one({"type": "version",
"_id": representation["parent"]})
start = version["data"].get("frameStart")
if start is None:
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(tool.Name, representation))
start = 0
with comp_lock_and_undo_chunk(comp, "Update Loader"):
# Update the loader's path whilst preserving some values
with preserve_trim(tool, log=self.log):
with preserve_inputs(tool,
inputs=("HoldFirstFrame",
"HoldLastFrame",
"Reverse",
"Depth",
"KeyCode",
"TimeCodeOffset")):
tool["Clip"] = path
# Set the global in to the start frame of the sequence
global_in_changed = loader_shift(tool, start, relative=False)
if global_in_changed:
# Log this change to the user
self.log.debug("Changed '%s' global in: %d" % (tool.Name,
start))
# Update the imprinted representation
tool.SetData("avalon.representation", str(representation["_id"]))
def remove(self, container):
from avalon.fusion import comp_lock_and_undo_chunk
tool = container["_tool"]
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove Loader"):
tool.Delete()
def _get_first_image(self, root):
"""Get first file in representation root"""
files = sorted(os.listdir(root))
return os.path.join(root, files[0])

View file

@ -0,0 +1,24 @@
import os
import pyblish.api
from avalon import fusion
class CollectCurrentCompFusion(pyblish.api.ContextPlugin):
"""Collect current comp"""
order = pyblish.api.CollectorOrder - 0.4
label = "Collect Current Comp"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
current_comp = fusion.get_current_comp()
assert current_comp, "Must have active Fusion composition"
context.data["currentComp"] = current_comp
# Store path to current file
filepath = current_comp.GetAttrs().get("COMPS_FileName", "")
context.data['currentFile'] = filepath

View file

@ -0,0 +1,22 @@
import pyblish.api
class CollectFusionVersion(pyblish.api.ContextPlugin):
"""Collect current comp"""
order = pyblish.api.CollectorOrder
label = "Collect Fusion Version"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
comp = context.data.get("currentComp")
if not comp:
raise RuntimeError("No comp previously collected, unable to "
"retrieve Fusion version.")
version = comp.GetApp().Version
context.data["fusionVersion"] = version
self.log.info("Fusion version: %s" % version)

View file

@ -0,0 +1,99 @@
import os
import pyblish.api
def get_comp_render_range(comp):
"""Return comp's start and end render range."""
comp_attrs = comp.GetAttrs()
start = comp_attrs["COMPN_RenderStart"]
end = comp_attrs["COMPN_RenderEnd"]
# Whenever render ranges are undefined fall back
# to the comp's global start and end
if start == -1000000000:
start = comp_attrs["COMPN_GlobalEnd"]
if end == -1000000000:
end = comp_attrs["COMPN_GlobalStart"]
return start, end
class CollectInstances(pyblish.api.ContextPlugin):
"""Collect Fusion saver instances
This additionally stores the Comp start and end render range in the
current context's data as "frameStart" and "frameEnd".
"""
order = pyblish.api.CollectorOrder
label = "Collect Instances"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
from avalon.fusion.lib import get_frame_path
comp = context.data["currentComp"]
# Get all savers in the comp
tools = comp.GetToolList(False).values()
savers = [tool for tool in tools if tool.ID == "Saver"]
start, end = get_comp_render_range(comp)
context.data["frameStart"] = int(start)
context.data["frameEnd"] = int(end)
for tool in savers:
path = tool["Clip"][comp.TIME_UNDEFINED]
tool_attrs = tool.GetAttrs()
active = not tool_attrs["TOOLB_PassThrough"]
if not path:
self.log.warning("Skipping saver because it "
"has no path set: {}".format(tool.Name))
continue
filename = os.path.basename(path)
head, padding, tail = get_frame_path(filename)
ext = os.path.splitext(path)[1]
assert tail == ext, ("Tail does not match %s" % ext)
subset = head.rstrip("_. ") # subset is head of the filename
# Include start and end render frame in label
label = "{subset} ({start}-{end})".format(subset=subset,
start=int(start),
end=int(end))
instance = context.create_instance(subset)
instance.data.update({
"asset": os.environ["AVALON_ASSET"], # todo: not a constant
"subset": subset,
"path": path,
"outputDir": os.path.dirname(path),
"ext": ext, # todo: should be redundant
"label": label,
"frameStart": context.data["frameStart"],
"frameEnd": context.data["frameEnd"],
"fps": context.data["fps"],
"families": ["render", "review", "ftrack"],
"family": "render",
"active": active,
"publish": active # backwards compatibility
})
instance.append(tool)
self.log.info("Found: \"%s\" " % path)
# Sort/grouped by family (preserving local index)
context[:] = sorted(context, key=self.sort_by_family)
return context
def sort_by_family(self, instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))

View file

@ -0,0 +1,44 @@
import pyblish.api
class CollectFusionRenderMode(pyblish.api.InstancePlugin):
"""Collect current comp's render Mode
Options:
local
farm
Note that this value is set for each comp separately. When you save the
comp this information will be stored in that file. If for some reason the
available tool does not visualize which render mode is set for the
current comp, please run the following line in the console (Py2)
comp.GetData("pype.rendermode")
This will return the name of the current render mode as seen above under
Options.
"""
order = pyblish.api.CollectorOrder + 0.4
label = "Collect Render Mode"
hosts = ["fusion"]
families = ["render"]
def process(self, instance):
"""Collect all image sequence tools"""
options = ["local", "farm"]
comp = instance.context.data.get("currentComp")
if not comp:
raise RuntimeError("No comp previously collected, unable to "
"retrieve Fusion version.")
rendermode = comp.GetData("pype.rendermode") or "local"
assert rendermode in options, "Must be supported render mode"
self.log.info("Render mode: {0}".format(rendermode))
# Append family
family = "render.{0}".format(rendermode)
instance.data["families"].append(family)

View file

@ -0,0 +1,34 @@
import pyblish.api
class FusionIncrementCurrentFile(pyblish.api.ContextPlugin):
"""Increment the current file.
Saves the current file with an increased version number.
"""
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["fusion"]
families = ["render.farm"]
optional = True
def process(self, context):
from openpype.lib import version_up
from openpype.action import get_errored_plugins_from_data
errored_plugins = get_errored_plugins_from_data(context)
if any(plugin.__name__ == "FusionSubmitDeadline"
for plugin in errored_plugins):
raise RuntimeError("Skipping incrementing current file because "
"submission to render farm failed.")
comp = context.data.get("currentComp")
assert comp, "Must have comp"
current_filepath = context.data["currentFile"]
new_filepath = version_up(current_filepath)
comp.Save(new_filepath)

View file

@ -0,0 +1,67 @@
import os
import pyblish.api
import avalon.fusion as fusion
from pprint import pformat
class Fusionlocal(pyblish.api.InstancePlugin):
"""Render the current Fusion composition locally.
Extract the result of savers by starting a comp render
This will run the local render of Fusion.
"""
order = pyblish.api.ExtractorOrder - 0.1
label = "Render Local"
hosts = ["fusion"]
families = ["render.local"]
def process(self, instance):
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
current_comp = context.data["currentComp"]
frame_start = current_comp.GetAttrs("COMPN_RenderStart")
frame_end = current_comp.GetAttrs("COMPN_RenderEnd")
path = instance.data["path"]
output_dir = instance.data["outputDir"]
ext = os.path.splitext(os.path.basename(path))[-1]
self.log.info("Starting render")
self.log.info("Start frame: {}".format(frame_start))
self.log.info("End frame: {}".format(frame_end))
with fusion.comp_lock_and_undo_chunk(current_comp):
result = current_comp.Render()
if "representations" not in instance.data:
instance.data["representations"] = []
collected_frames = os.listdir(output_dir)
repre = {
'name': ext[1:],
'ext': ext[1:],
'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start,
'files': collected_frames,
"stagingDir": output_dir,
}
instance.data["representations"].append(repre)
# review representation
repre_preview = repre.copy()
repre_preview["name"] = repre_preview["ext"] = "mp4"
repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"]
instance.data["representations"].append(repre_preview)
self.log.debug(f"_ instance.data: {pformat(instance.data)}")
if not result:
raise RuntimeError("Comp render failed")

View file

@ -0,0 +1,21 @@
import pyblish.api
class FusionSaveComp(pyblish.api.ContextPlugin):
"""Save current comp"""
label = "Save current file"
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["fusion"]
families = ["render"]
def process(self, context):
comp = context.data.get("currentComp")
assert comp, "Must have comp"
current = comp.GetAttrs().get("COMPS_FileName", "")
assert context.data['currentFile'] == current
self.log.info("Saving current file..")
comp.Save()

View file

@ -0,0 +1,155 @@
import os
import json
import getpass
from avalon import api
from avalon.vendor import requests
import pyblish.api
class FusionSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit current Comp to Deadline
Renders are submitted to a Deadline Web Service as
supplied via settings key "DEADLINE_REST_URL".
"""
label = "Submit to Deadline"
order = pyblish.api.IntegratorOrder
hosts = ["fusion"]
families = ["render.farm"]
def process(self, instance):
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
from avalon.fusion.lib import get_frame_path
deadline_url = (
context.data["system_settings"]
["modules"]
["deadline"]
["DEADLINE_REST_URL"]
)
assert deadline_url, "Requires DEADLINE_REST_URL"
# Collect all saver instances in context that are to be rendered
saver_instances = []
for instance in context[:]:
if not self.families[0] in instance.data.get("families"):
# Allow only saver family instances
continue
if not instance.data.get("publish", True):
# Skip inactive instances
continue
self.log.debug(instance.data["name"])
saver_instances.append(instance)
if not saver_instances:
raise RuntimeError("No instances found for Deadline submittion")
fusion_version = int(context.data["fusionVersion"])
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
comment = context.data.get("comment", "")
deadline_user = context.data.get("deadlineUser", getpass.getuser())
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
# /products/deadline/8.0/1_User%20Manual/manual
# /manual-submission.html#job-info-file-options
payload = {
"JobInfo": {
# Top-level group name
"BatchName": filename,
# Asset dependency to wait for at least the scene file to sync.
"AssetDependency0": filepath,
# Job name, as seen in Monitor
"Name": filename,
# User, as seen in Monitor
"UserName": deadline_user,
# Use a default submission pool for Fusion
"Pool": "fusion",
"Plugin": "Fusion",
"Frames": "{start}-{end}".format(
start=int(context.data["frameStart"]),
end=int(context.data["frameEnd"])
),
"Comment": comment,
},
"PluginInfo": {
# Input
"FlowFile": filepath,
# Mandatory for Deadline
"Version": str(fusion_version),
# Render in high quality
"HighQuality": True,
# Whether saver output should be checked after rendering
# is complete
"CheckOutput": True,
# Proxy: higher numbers smaller images for faster test renders
# 1 = no proxy quality
"Proxy": 1,
},
# Mandatory for Deadline, may be empty
"AuxFiles": []
}
# Enable going to rendered frames from Deadline Monitor
for index, instance in enumerate(saver_instances):
head, padding, tail = get_frame_path(instance.data["path"])
path = "{}{}{}".format(head, "#" * padding, tail)
folder, filename = os.path.split(path)
payload["JobInfo"]["OutputDirectory%d" % index] = folder
payload["JobInfo"]["OutputFilename%d" % index] = filename
# Include critical variables with submission
keys = [
# TODO: This won't work if the slaves don't have accesss to
# these paths, such as if slaves are running Linux and the
# submitter is on Windows.
"PYTHONPATH",
"OFX_PLUGIN_PATH",
"FUSION9_MasterPrefs"
]
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
payload["JobInfo"].update({
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
key=key,
value=environment[key]
) for index, key in enumerate(environment)
})
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(DEADLINE_REST_URL)
response = requests.post(url, json=payload)
if not response.ok:
raise Exception(response.text)
# Store the response for dependent job submission plug-ins
for instance in saver_instances:
instance.data["deadlineSubmissionJob"] = response.json()

View file

@ -0,0 +1,40 @@
import pyblish.api
from openpype import action
class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
"""Validate if all Background tool are set to float32 bit"""
order = pyblish.api.ValidatorOrder
label = "Validate Background Depth 32 bit"
actions = [action.RepairAction]
hosts = ["fusion"]
families = ["render"]
optional = True
@classmethod
def get_invalid(cls, instance):
context = instance.context
comp = context.data.get("currentComp")
assert comp, "Must have Comp object"
backgrounds = comp.GetToolList(False, "Background").values()
if not backgrounds:
return []
return [i for i in backgrounds if i.GetInput("Depth") != 4.0]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found %i nodes which are not set to float32"
% len(invalid))
@classmethod
def repair(cls, instance):
comp = instance.context.data.get("currentComp")
invalid = cls.get_invalid(instance)
for i in invalid:
i.SetInput("Depth", 4.0, comp.TIME_UNDEFINED)

View file

@ -0,0 +1,29 @@
import os
import pyblish.api
class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
"""Ensure current comp is saved"""
order = pyblish.api.ValidatorOrder
label = "Validate Comp Saved"
families = ["render"]
hosts = ["fusion"]
def process(self, context):
comp = context.data.get("currentComp")
assert comp, "Must have Comp object"
attrs = comp.GetAttrs()
filename = attrs["COMPS_FileName"]
if not filename:
raise RuntimeError("Comp is not saved.")
if not os.path.exists(filename):
raise RuntimeError("Comp file does not exist: %s" % filename)
if attrs["COMPB_Modified"]:
self.log.warning("Comp is modified. Save your comp to ensure your "
"changes propagate correctly.")

View file

@ -0,0 +1,41 @@
import pyblish.api
from openpype import action
class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
"""Valid if all savers have the input attribute CreateDir checked on
This attribute ensures that the folders to which the saver will write
will be created.
"""
order = pyblish.api.ValidatorOrder
actions = [action.RepairAction]
label = "Validate Create Folder Checked"
families = ["render"]
hosts = ["fusion"]
@classmethod
def get_invalid(cls, instance):
active = instance.data.get("active", instance.data.get("publish"))
if not active:
return []
tool = instance[0]
create_dir = tool.GetInput("CreateDir")
if create_dir == 0.0:
cls.log.error("%s has Create Folder turned off" % instance[0].Name)
return [tool]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found Saver with Create Folder During "
"Render checked off")
@classmethod
def repair(cls, instance):
invalid = cls.get_invalid(instance)
for tool in invalid:
tool.SetInput("CreateDir", 1.0)

View file

@ -0,0 +1,36 @@
import os
import pyblish.api
class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
"""Ensure the Saver has an extension in the filename path
This disallows files written as `filename` instead of `filename.frame.ext`.
Fusion does not always set an extension for your filename when
changing the file format of the saver.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Filename Has Extension"
families = ["render"]
hosts = ["fusion"]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found Saver without an extension")
@classmethod
def get_invalid(cls, instance):
path = instance.data["path"]
fname, ext = os.path.splitext(path)
if not ext:
tool = instance[0]
cls.log.error("%s has no extension specified" % tool.Name)
return [tool]
return []

View file

@ -0,0 +1,29 @@
import pyblish.api
class ValidateSaverHasInput(pyblish.api.InstancePlugin):
"""Validate saver has incoming connection
This ensures a Saver has at least an input connection.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Saver Has Input"
families = ["render"]
hosts = ["fusion"]
@classmethod
def get_invalid(cls, instance):
saver = instance[0]
if not saver.Input.GetConnectedOutput():
return [saver]
return []
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Saver has no incoming connection: "
"{} ({})".format(instance, invalid[0].Name))

View file

@ -0,0 +1,44 @@
import pyblish.api
class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
"""Validate saver passthrough is similar to Pyblish publish state"""
order = pyblish.api.ValidatorOrder
label = "Validate Saver Passthrough"
families = ["render"]
hosts = ["fusion"]
def process(self, context):
# Workaround for ContextPlugin always running, even if no instance
# is present with the family
instances = pyblish.api.instances_by_plugin(instances=list(context),
plugin=self)
if not instances:
self.log.debug("Ignoring plugin.. (bugfix)")
invalid_instances = []
for instance in instances:
invalid = self.is_invalid(instance)
if invalid:
invalid_instances.append(instance)
if invalid_instances:
self.log.info("Reset pyblish to collect your current scene state, "
"that should fix error.")
raise RuntimeError("Invalid instances: "
"{0}".format(invalid_instances))
def is_invalid(self, instance):
saver = instance[0]
attr = saver.GetAttrs()
active = not attr["TOOLB_PassThrough"]
if active != instance.data["publish"]:
self.log.info("Saver has different passthrough state than "
"Pyblish: {} ({})".format(instance, saver.Name))
return [saver]
return []

Some files were not shown because too many files have changed in this diff Show more