mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 00:44:52 +01:00
Merge remote-tracking branch 'origin/develop' into feature/OP-3130_unreal-5-support
This commit is contained in:
commit
dfe17ff18a
356 changed files with 8540 additions and 5050 deletions
3
.gitmodules
vendored
3
.gitmodules
vendored
|
|
@ -1,3 +0,0 @@
|
|||
[submodule "repos/avalon-core"]
|
||||
path = repos/avalon-core
|
||||
url = https://github.com/pypeclub/avalon-core.git
|
||||
|
|
@ -627,8 +627,6 @@ class BootstrapRepos:
|
|||
|
||||
Attributes:
|
||||
data_dir (Path): local OpenPype installation directory.
|
||||
live_repo_dir (Path): path to repos directory if running live,
|
||||
otherwise `None`.
|
||||
registry (OpenPypeSettingsRegistry): OpenPype registry object.
|
||||
zip_filter (list): List of files to exclude from zip
|
||||
openpype_filter (list): list of top level directories to
|
||||
|
|
@ -654,7 +652,7 @@ class BootstrapRepos:
|
|||
self.registry = OpenPypeSettingsRegistry()
|
||||
self.zip_filter = [".pyc", "__pycache__"]
|
||||
self.openpype_filter = [
|
||||
"openpype", "repos", "schema", "LICENSE"
|
||||
"openpype", "schema", "LICENSE"
|
||||
]
|
||||
self._message = message
|
||||
|
||||
|
|
@ -667,11 +665,6 @@ class BootstrapRepos:
|
|||
progress_callback = empty_progress
|
||||
self._progress_callback = progress_callback
|
||||
|
||||
if getattr(sys, "frozen", False):
|
||||
self.live_repo_dir = Path(sys.executable).parent / "repos"
|
||||
else:
|
||||
self.live_repo_dir = Path(Path(__file__).parent / ".." / "repos")
|
||||
|
||||
@staticmethod
|
||||
def get_version_path_from_list(
|
||||
version: str, version_list: list) -> Union[Path, None]:
|
||||
|
|
@ -736,11 +729,12 @@ class BootstrapRepos:
|
|||
# if repo dir is not set, we detect local "live" OpenPype repository
|
||||
# version and use it as a source. Otherwise repo_dir is user
|
||||
# entered location.
|
||||
if not repo_dir:
|
||||
version = OpenPypeVersion.get_installed_version_str()
|
||||
repo_dir = self.live_repo_dir
|
||||
else:
|
||||
if repo_dir:
|
||||
version = self.get_version(repo_dir)
|
||||
else:
|
||||
installed_version = OpenPypeVersion.get_installed_version()
|
||||
version = str(installed_version)
|
||||
repo_dir = installed_version.path
|
||||
|
||||
if not version:
|
||||
self._print("OpenPype not found.", LOG_ERROR)
|
||||
|
|
@ -756,7 +750,7 @@ class BootstrapRepos:
|
|||
Path(temp_dir) / f"openpype-v{version}.zip"
|
||||
self._print(f"creating zip: {temp_zip}")
|
||||
|
||||
self._create_openpype_zip(temp_zip, repo_dir.parent)
|
||||
self._create_openpype_zip(temp_zip, repo_dir)
|
||||
if not os.path.exists(temp_zip):
|
||||
self._print("make archive failed.", LOG_ERROR)
|
||||
return None
|
||||
|
|
@ -1057,27 +1051,11 @@ class BootstrapRepos:
|
|||
if not archive.is_file() and not archive.exists():
|
||||
raise ValueError("Archive is not file.")
|
||||
|
||||
with ZipFile(archive, "r") as zip_file:
|
||||
name_list = zip_file.namelist()
|
||||
|
||||
roots = []
|
||||
paths = []
|
||||
for item in name_list:
|
||||
if not item.startswith("repos/"):
|
||||
continue
|
||||
|
||||
root = item.split("/")[1]
|
||||
|
||||
if root not in roots:
|
||||
roots.append(root)
|
||||
paths.append(
|
||||
f"{archive}{os.path.sep}repos{os.path.sep}{root}")
|
||||
sys.path.insert(0, paths[-1])
|
||||
|
||||
sys.path.insert(0, f"{archive}")
|
||||
archive_path = str(archive)
|
||||
sys.path.insert(0, archive_path)
|
||||
pythonpath = os.getenv("PYTHONPATH", "")
|
||||
python_paths = pythonpath.split(os.pathsep)
|
||||
python_paths += paths
|
||||
python_paths.insert(0, archive_path)
|
||||
|
||||
os.environ["PYTHONPATH"] = os.pathsep.join(python_paths)
|
||||
|
||||
|
|
@ -1094,24 +1072,8 @@ class BootstrapRepos:
|
|||
directory (Path): path to directory.
|
||||
|
||||
"""
|
||||
|
||||
sys.path.insert(0, directory.as_posix())
|
||||
directory /= "repos"
|
||||
if not directory.exists() and not directory.is_dir():
|
||||
raise ValueError("directory is invalid")
|
||||
|
||||
roots = []
|
||||
for item in directory.iterdir():
|
||||
if item.is_dir():
|
||||
root = item.as_posix()
|
||||
if root not in roots:
|
||||
roots.append(root)
|
||||
sys.path.insert(0, root)
|
||||
|
||||
pythonpath = os.getenv("PYTHONPATH", "")
|
||||
paths = pythonpath.split(os.pathsep)
|
||||
paths += roots
|
||||
|
||||
os.environ["PYTHONPATH"] = os.pathsep.join(paths)
|
||||
|
||||
@staticmethod
|
||||
def find_openpype_version(version, staging):
|
||||
|
|
@ -1437,6 +1399,7 @@ class BootstrapRepos:
|
|||
# create destination parent directories even if they don't exist.
|
||||
destination.mkdir(parents=True)
|
||||
|
||||
remove_source_file = False
|
||||
# version is directory
|
||||
if openpype_version.path.is_dir():
|
||||
# create zip inside temporary directory.
|
||||
|
|
@ -1470,6 +1433,8 @@ class BootstrapRepos:
|
|||
self._progress_callback(35)
|
||||
openpype_version.path = self._copy_zip(
|
||||
openpype_version.path, destination)
|
||||
# Mark zip to be deleted when done
|
||||
remove_source_file = True
|
||||
|
||||
# extract zip there
|
||||
self._print("extracting zip to destination ...")
|
||||
|
|
@ -1478,6 +1443,10 @@ class BootstrapRepos:
|
|||
zip_ref.extractall(destination)
|
||||
self._progress_callback(100)
|
||||
|
||||
# Remove zip file copied to local app data
|
||||
if remove_source_file:
|
||||
os.remove(openpype_version.path)
|
||||
|
||||
return destination
|
||||
|
||||
def _copy_zip(self, source: Path, destination: Path) -> Path:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ from .settings import (
|
|||
get_project_settings,
|
||||
get_current_project_settings,
|
||||
get_anatomy_settings,
|
||||
get_environments,
|
||||
|
||||
SystemSettings,
|
||||
ProjectSettings
|
||||
|
|
@ -23,7 +22,6 @@ from .lib import (
|
|||
get_app_environments_for_context,
|
||||
source_hash,
|
||||
get_latest_version,
|
||||
get_global_environments,
|
||||
get_local_site_id,
|
||||
change_openpype_mongo_url,
|
||||
create_project_folders,
|
||||
|
|
@ -69,10 +67,10 @@ __all__ = [
|
|||
"get_project_settings",
|
||||
"get_current_project_settings",
|
||||
"get_anatomy_settings",
|
||||
"get_environments",
|
||||
"get_project_basic_paths",
|
||||
|
||||
"SystemSettings",
|
||||
"ProjectSettings",
|
||||
|
||||
"PypeLogger",
|
||||
"Logger",
|
||||
|
|
@ -102,8 +100,9 @@ __all__ = [
|
|||
|
||||
# get contextual data
|
||||
"version_up",
|
||||
"get_hierarchy",
|
||||
"get_asset",
|
||||
"get_hierarchy",
|
||||
"get_workdir_data",
|
||||
"get_version_from_path",
|
||||
"get_last_version_from_path",
|
||||
"get_app_environments_for_context",
|
||||
|
|
@ -111,7 +110,6 @@ __all__ = [
|
|||
|
||||
"run_subprocess",
|
||||
"get_latest_version",
|
||||
"get_global_environments",
|
||||
|
||||
"get_local_site_id",
|
||||
"change_openpype_mongo_url",
|
||||
|
|
|
|||
|
|
@ -5,8 +5,7 @@ from openpype.lib import (
|
|||
prepare_app_environments,
|
||||
prepare_context_environments
|
||||
)
|
||||
|
||||
import avalon.api
|
||||
from openpype.pipeline import AvalonMongoDB
|
||||
|
||||
|
||||
class GlobalHostDataHook(PreLaunchHook):
|
||||
|
|
@ -64,7 +63,7 @@ class GlobalHostDataHook(PreLaunchHook):
|
|||
self.data["anatomy"] = Anatomy(project_name)
|
||||
|
||||
# Mongo connection
|
||||
dbcon = avalon.api.AvalonMongoDB()
|
||||
dbcon = AvalonMongoDB()
|
||||
dbcon.Session["AVALON_PROJECT"] = project_name
|
||||
dbcon.install()
|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,8 @@ from wsrpc_aiohttp import (
|
|||
|
||||
from Qt import QtCore
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
from avalon import api
|
||||
from openpype.tools.adobe_webserver.app import WebServerTool
|
||||
|
||||
from .ws_stub import AfterEffectsServerStub
|
||||
|
|
@ -271,13 +270,13 @@ class AfterEffectsRoute(WebSocketRoute):
|
|||
log.info("Setting context change")
|
||||
log.info("project {} asset {} ".format(project, asset))
|
||||
if project:
|
||||
api.Session["AVALON_PROJECT"] = project
|
||||
legacy_io.Session["AVALON_PROJECT"] = project
|
||||
os.environ["AVALON_PROJECT"] = project
|
||||
if asset:
|
||||
api.Session["AVALON_ASSET"] = asset
|
||||
legacy_io.Session["AVALON_ASSET"] = asset
|
||||
os.environ["AVALON_ASSET"] = asset
|
||||
if task:
|
||||
api.Session["AVALON_TASK"] = task
|
||||
legacy_io.Session["AVALON_TASK"] = task
|
||||
os.environ["AVALON_TASK"] = task
|
||||
|
||||
async def read(self):
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import sys
|
|||
from Qt import QtWidgets
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
from openpype import lib
|
||||
from openpype.api import Logger
|
||||
|
|
@ -14,7 +13,7 @@ from openpype.pipeline import (
|
|||
deregister_loader_plugin_path,
|
||||
deregister_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
registered_host,
|
||||
legacy_io,
|
||||
)
|
||||
import openpype.hosts.aftereffects
|
||||
from openpype.lib import register_event_callback
|
||||
|
|
@ -140,23 +139,11 @@ def check_inventory():
|
|||
if not lib.any_outdated():
|
||||
return
|
||||
|
||||
host = pyblish.api.registered_host()
|
||||
outdated_containers = []
|
||||
for container in host.ls():
|
||||
representation = container['representation']
|
||||
representation_doc = io.find_one(
|
||||
{
|
||||
"_id": io.ObjectId(representation),
|
||||
"type": "representation"
|
||||
},
|
||||
projection={"parent": True}
|
||||
)
|
||||
if representation_doc and not lib.is_latest(representation_doc):
|
||||
outdated_containers.append(container)
|
||||
|
||||
# Warn about outdated containers.
|
||||
print("Starting new QApplication..")
|
||||
_app = QtWidgets.QApplication(sys.argv)
|
||||
_app = QtWidgets.QApplication.instance()
|
||||
if not _app:
|
||||
print("Starting new QApplication..")
|
||||
_app = QtWidgets.QApplication([])
|
||||
|
||||
message_box = QtWidgets.QMessageBox()
|
||||
message_box.setIcon(QtWidgets.QMessageBox.Warning)
|
||||
|
|
@ -282,11 +269,10 @@ def update_context_data(data, changes):
|
|||
|
||||
def get_context_title():
|
||||
"""Returns title for Creator window"""
|
||||
import avalon.api
|
||||
|
||||
project_name = avalon.api.Session["AVALON_PROJECT"]
|
||||
asset_name = avalon.api.Session["AVALON_ASSET"]
|
||||
task_name = avalon.api.Session["AVALON_TASK"]
|
||||
project_name = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
return "{}/{}/{}".format(project_name, asset_name, task_name)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,11 @@
|
|||
from avalon import api as avalon_api
|
||||
|
||||
from openpype import resources
|
||||
from openpype.lib import BoolDef, UISeparatorDef
|
||||
from openpype.hosts.aftereffects import api
|
||||
from openpype.pipeline import (
|
||||
Creator,
|
||||
CreatedInstance,
|
||||
CreatorError
|
||||
CreatorError,
|
||||
legacy_io,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -116,7 +115,7 @@ class RenderCreator(Creator):
|
|||
instance_data.pop("uuid")
|
||||
|
||||
if not instance_data.get("task"):
|
||||
instance_data["task"] = avalon_api.Session.get("AVALON_TASK")
|
||||
instance_data["task"] = legacy_io.Session.get("AVALON_TASK")
|
||||
|
||||
if not instance_data.get("creator_attributes"):
|
||||
is_old_farm = instance_data["family"] != "renderLocal"
|
||||
|
|
|
|||
|
|
@ -1,9 +1,8 @@
|
|||
from avalon import io
|
||||
|
||||
import openpype.hosts.aftereffects.api as api
|
||||
from openpype.pipeline import (
|
||||
AutoCreator,
|
||||
CreatedInstance
|
||||
CreatedInstance,
|
||||
legacy_io,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -36,13 +35,16 @@ class AEWorkfileCreator(AutoCreator):
|
|||
break
|
||||
|
||||
variant = ''
|
||||
project_name = io.Session["AVALON_PROJECT"]
|
||||
asset_name = io.Session["AVALON_ASSET"]
|
||||
task_name = io.Session["AVALON_TASK"]
|
||||
host_name = io.Session["AVALON_APP"]
|
||||
project_name = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
host_name = legacy_io.Session["AVALON_APP"]
|
||||
|
||||
if existing_instance is None:
|
||||
asset_doc = io.find_one({"type": "asset", "name": asset_name})
|
||||
asset_doc = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
subset_name = self.get_subset_name(
|
||||
variant, task_name, asset_doc, project_name, host_name
|
||||
)
|
||||
|
|
@ -67,7 +69,10 @@ class AEWorkfileCreator(AutoCreator):
|
|||
existing_instance["asset"] != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
asset_doc = io.find_one({"type": "asset", "name": asset_name})
|
||||
asset_doc = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
subset_name = self.get_subset_name(
|
||||
variant, task_name, asset_doc, project_name, host_name
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from avalon import api
|
||||
|
||||
import pyblish.api
|
||||
from openpype.lib import get_subset_name_with_asset_doc
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.ContextPlugin):
|
||||
|
|
@ -41,7 +42,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
|
|||
instance.data["publish"] = instance.data["active"] # for DL
|
||||
|
||||
def _get_new_instance(self, context, scene_file):
|
||||
task = api.Session["AVALON_TASK"]
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
version = context.data["version"]
|
||||
asset_entity = context.data["assetEntity"]
|
||||
project_entity = context.data["projectEntity"]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
from avalon import api
|
||||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.pipeline import (
|
||||
PublishXmlValidationError,
|
||||
legacy_io,
|
||||
)
|
||||
from openpype.hosts.aftereffects.api import get_stub
|
||||
|
||||
|
||||
|
|
@ -27,7 +30,7 @@ class ValidateInstanceAssetRepair(pyblish.api.Action):
|
|||
for instance in instances:
|
||||
data = stub.read(instance[0])
|
||||
|
||||
data["asset"] = api.Session["AVALON_ASSET"]
|
||||
data["asset"] = legacy_io.Session["AVALON_ASSET"]
|
||||
stub.imprint(instance[0].instance_id, data)
|
||||
|
||||
|
||||
|
|
@ -51,7 +54,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
instance_asset = instance.data["asset"]
|
||||
current_asset = api.Session["AVALON_ASSET"]
|
||||
current_asset = legacy_io.Session["AVALON_ASSET"]
|
||||
msg = (
|
||||
f"Instance asset {instance_asset} is not the same "
|
||||
f"as current context {current_asset}."
|
||||
|
|
|
|||
|
|
@ -15,9 +15,9 @@ from Qt import QtWidgets, QtCore
|
|||
import bpy
|
||||
import bpy.utils.previews
|
||||
|
||||
import avalon.api
|
||||
from openpype.tools.utils import host_tools
|
||||
from openpype import style
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
from .workio import OpenFileCacher
|
||||
|
||||
|
|
@ -279,7 +279,7 @@ class LaunchLoader(LaunchQtApp):
|
|||
|
||||
def before_window_show(self):
|
||||
self._window.set_context(
|
||||
{"asset": avalon.api.Session["AVALON_ASSET"]},
|
||||
{"asset": legacy_io.Session["AVALON_ASSET"]},
|
||||
refresh=True
|
||||
)
|
||||
|
||||
|
|
@ -327,8 +327,8 @@ class LaunchWorkFiles(LaunchQtApp):
|
|||
def execute(self, context):
|
||||
result = super().execute(context)
|
||||
self._window.set_context({
|
||||
"asset": avalon.api.Session["AVALON_ASSET"],
|
||||
"task": avalon.api.Session["AVALON_TASK"]
|
||||
"asset": legacy_io.Session["AVALON_ASSET"],
|
||||
"task": legacy_io.Session["AVALON_TASK"]
|
||||
})
|
||||
return result
|
||||
|
||||
|
|
@ -358,8 +358,8 @@ class TOPBAR_MT_avalon(bpy.types.Menu):
|
|||
else:
|
||||
pyblish_menu_icon_id = 0
|
||||
|
||||
asset = avalon.api.Session['AVALON_ASSET']
|
||||
task = avalon.api.Session['AVALON_TASK']
|
||||
asset = legacy_io.Session['AVALON_ASSET']
|
||||
task = legacy_io.Session['AVALON_TASK']
|
||||
context_label = f"{asset}, {task}"
|
||||
context_label_item = layout.row()
|
||||
context_label_item.operator(
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
import importlib
|
||||
import traceback
|
||||
from typing import Callable, Dict, Iterator, List, Optional
|
||||
|
||||
|
|
@ -10,16 +9,15 @@ from . import lib
|
|||
from . import ops
|
||||
|
||||
import pyblish.api
|
||||
import avalon.api
|
||||
from avalon import io, schema
|
||||
|
||||
from openpype.pipeline import (
|
||||
schema,
|
||||
legacy_io,
|
||||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
deregister_loader_plugin_path,
|
||||
deregister_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
uninstall_host,
|
||||
)
|
||||
from openpype.api import Logger
|
||||
from openpype.lib import (
|
||||
|
|
@ -85,8 +83,8 @@ def uninstall():
|
|||
|
||||
|
||||
def set_start_end_frames():
|
||||
asset_name = io.Session["AVALON_ASSET"]
|
||||
asset_doc = io.find_one({
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset_doc = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
|
|
@ -190,7 +188,7 @@ def _on_task_changed():
|
|||
# `directory` attribute, so it opens in that directory (does it?).
|
||||
# https://docs.blender.org/api/blender2.8/bpy.types.Operator.html#calling-a-file-selector
|
||||
# https://docs.blender.org/api/blender2.8/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add
|
||||
workdir = avalon.api.Session["AVALON_WORKDIR"]
|
||||
workdir = legacy_io.Session["AVALON_WORKDIR"]
|
||||
log.debug("New working directory: %s", workdir)
|
||||
|
||||
|
||||
|
|
@ -201,26 +199,6 @@ def _register_events():
|
|||
log.info("Installed event callback for 'taskChanged'...")
|
||||
|
||||
|
||||
def reload_pipeline(*args):
|
||||
"""Attempt to reload pipeline at run-time.
|
||||
|
||||
Warning:
|
||||
This is primarily for development and debugging purposes and not well
|
||||
tested.
|
||||
|
||||
"""
|
||||
|
||||
uninstall_host()
|
||||
|
||||
for module in (
|
||||
"avalon.io",
|
||||
"avalon.pipeline",
|
||||
"avalon.api",
|
||||
):
|
||||
module = importlib.import_module(module)
|
||||
importlib.reload(module)
|
||||
|
||||
|
||||
def _discover_gui() -> Optional[Callable]:
|
||||
"""Return the most desirable of the currently registered GUIs"""
|
||||
|
||||
|
|
|
|||
|
|
@ -266,7 +266,7 @@ class AssetLoader(LoaderPlugin):
|
|||
# Only containerise if it's not already a collection from a .blend file.
|
||||
# representation = context["representation"]["name"]
|
||||
# if representation != "blend":
|
||||
# from avalon.blender.pipeline import containerise
|
||||
# from openpype.hosts.blender.api.pipeline import containerise
|
||||
# return containerise(
|
||||
# name=name,
|
||||
# namespace=namespace,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.hosts.blender.api.plugin
|
||||
from openpype.hosts.blender.api import lib
|
||||
|
||||
|
|
@ -22,7 +22,7 @@ class CreateAction(openpype.hosts.blender.api.plugin.Creator):
|
|||
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.blender.api import plugin, lib, ops
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ class CreateAnimation(plugin.Creator):
|
|||
# asset_group.empty_display_type = 'SINGLE_ARROW'
|
||||
asset_group = bpy.data.collections.new(name=name)
|
||||
instances.children.link(asset_group)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
lib.imprint(asset_group, self.data)
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.blender.api import plugin, lib, ops
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
|
||||
|
||||
|
|
@ -40,7 +40,7 @@ class CreateCamera(plugin.Creator):
|
|||
asset_group = bpy.data.objects.new(name=name, object_data=None)
|
||||
asset_group.empty_display_type = 'SINGLE_ARROW'
|
||||
instances.objects.link(asset_group)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
print(f"self.data: {self.data}")
|
||||
lib.imprint(asset_group, self.data)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.blender.api import plugin, lib, ops
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ class CreateLayout(plugin.Creator):
|
|||
asset_group = bpy.data.objects.new(name=name, object_data=None)
|
||||
asset_group.empty_display_type = 'SINGLE_ARROW'
|
||||
instances.objects.link(asset_group)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
lib.imprint(asset_group, self.data)
|
||||
|
||||
# Add selected objects to instance
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.blender.api import plugin, lib, ops
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ class CreateModel(plugin.Creator):
|
|||
asset_group = bpy.data.objects.new(name=name, object_data=None)
|
||||
asset_group.empty_display_type = 'SINGLE_ARROW'
|
||||
instances.objects.link(asset_group)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
lib.imprint(asset_group, self.data)
|
||||
|
||||
# Add selected objects to instance
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.hosts.blender.api.plugin
|
||||
from openpype.hosts.blender.api import lib
|
||||
|
||||
|
|
@ -22,7 +22,7 @@ class CreatePointcache(openpype.hosts.blender.api.plugin.Creator):
|
|||
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
|
||||
collection = bpy.data.collections.new(name=name)
|
||||
bpy.context.scene.collection.children.link(collection)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
lib.imprint(collection, self.data)
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import bpy
|
||||
|
||||
from avalon import api
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.blender.api import plugin, lib, ops
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ class CreateRig(plugin.Creator):
|
|||
asset_group = bpy.data.objects.new(name=name, object_data=None)
|
||||
asset_group.empty_display_type = 'SINGLE_ARROW'
|
||||
instances.objects.link(asset_group)
|
||||
self.data['task'] = api.Session.get('AVALON_TASK')
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
lib.imprint(asset_group, self.data)
|
||||
|
||||
# Add selected objects to instance
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import bpy
|
|||
import bpy_extras
|
||||
import bpy_extras.anim_utils
|
||||
|
||||
from avalon import io
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.blender.api import plugin
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
|
||||
import openpype.api
|
||||
|
|
@ -139,7 +139,7 @@ class ExtractLayout(openpype.api.Extractor):
|
|||
|
||||
self.log.debug("Parent: {}".format(parent))
|
||||
# Get blend reference
|
||||
blend = io.find_one(
|
||||
blend = legacy_io.find_one(
|
||||
{
|
||||
"type": "representation",
|
||||
"parent": ObjectId(parent),
|
||||
|
|
@ -150,7 +150,7 @@ class ExtractLayout(openpype.api.Extractor):
|
|||
if blend:
|
||||
blend_id = blend["_id"]
|
||||
# Get fbx reference
|
||||
fbx = io.find_one(
|
||||
fbx = legacy_io.find_one(
|
||||
{
|
||||
"type": "representation",
|
||||
"parent": ObjectId(parent),
|
||||
|
|
@ -161,7 +161,7 @@ class ExtractLayout(openpype.api.Extractor):
|
|||
if fbx:
|
||||
fbx_id = fbx["_id"]
|
||||
# Get abc reference
|
||||
abc = io.find_one(
|
||||
abc = legacy_io.find_one(
|
||||
{
|
||||
"type": "representation",
|
||||
"parent": ObjectId(parent),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import json
|
||||
|
||||
from avalon import io
|
||||
import pyblish.api
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
import os
|
||||
import collections
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
from pprint import pformat
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
||||
|
|
@ -60,7 +60,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
|
||||
# Query all subsets for asset
|
||||
subset_docs = io.find({
|
||||
subset_docs = legacy_io.find({
|
||||
"type": "subset",
|
||||
"parent": asset_doc["_id"]
|
||||
})
|
||||
|
|
@ -93,7 +93,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
}}
|
||||
]
|
||||
last_versions_by_subset_id = dict()
|
||||
for doc in io.aggregate(pipeline):
|
||||
for doc in legacy_io.aggregate(pipeline):
|
||||
doc["parent"] = doc["_id"]
|
||||
doc["_id"] = doc.pop("_version_id")
|
||||
last_versions_by_subset_id[doc["parent"]] = doc
|
||||
|
|
@ -102,7 +102,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin):
|
|||
for version_doc in last_versions_by_subset_id.values():
|
||||
version_docs_by_id[version_doc["_id"]] = version_doc
|
||||
|
||||
repre_docs = io.find({
|
||||
repre_docs = legacy_io.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": list(version_docs_by_id.keys())},
|
||||
"name": {"$in": representations}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import os
|
||||
from avalon import api
|
||||
import pyblish.api
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class CollectCelactionInstances(pyblish.api.ContextPlugin):
|
||||
|
|
@ -10,7 +10,7 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
task = api.Session["AVALON_TASK"]
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
current_file = context.data["currentFile"]
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
scene_file = os.path.basename(current_file)
|
||||
|
|
|
|||
|
|
@ -873,6 +873,5 @@ class OpenClipSolver(flib.MediaInfoFile):
|
|||
if feed_clr_obj is not None:
|
||||
feed_clr_obj = ET.Element(
|
||||
"colourSpace", {"type": "string"})
|
||||
feed_clr_obj.text = profile_name
|
||||
feed_storage_obj.append(feed_clr_obj)
|
||||
|
||||
feed_clr_obj.text = profile_name
|
||||
|
|
|
|||
|
|
@ -26,12 +26,10 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
add_tasks = []
|
||||
|
||||
def process(self, context):
|
||||
project = context.data["flameProject"]
|
||||
selected_segments = context.data["flameSelectedSegments"]
|
||||
self.log.debug("__ selected_segments: {}".format(selected_segments))
|
||||
|
||||
self.otio_timeline = context.data["otioTimeline"]
|
||||
self.clips_in_reels = opfapi.get_clips_in_reels(project)
|
||||
self.fps = context.data["fps"]
|
||||
|
||||
# process all sellected
|
||||
|
|
@ -63,9 +61,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
# get file path
|
||||
file_path = clip_data["fpath"]
|
||||
|
||||
# get source clip
|
||||
source_clip = self._get_reel_clip(file_path)
|
||||
|
||||
first_frame = opfapi.get_frame_from_filename(file_path) or 0
|
||||
|
||||
head, tail = self._get_head_tail(clip_data, first_frame)
|
||||
|
|
@ -103,7 +98,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
"families": families,
|
||||
"publish": marker_data["publish"],
|
||||
"fps": self.fps,
|
||||
"flameSourceClip": source_clip,
|
||||
"sourceFirstFrame": int(first_frame),
|
||||
"path": file_path,
|
||||
"flameAddTasks": self.add_tasks,
|
||||
|
|
@ -258,14 +252,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
)
|
||||
return head, tail
|
||||
|
||||
def _get_reel_clip(self, path):
|
||||
match_reel_clip = [
|
||||
clip for clip in self.clips_in_reels
|
||||
if clip["fpath"] == path
|
||||
]
|
||||
if match_reel_clip:
|
||||
return match_reel_clip.pop()
|
||||
|
||||
def _get_resolution_to_data(self, data, context):
|
||||
assert data.get("otioClip"), "Missing `otioClip` data"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import pyblish.api
|
||||
import avalon.api as avalon
|
||||
|
||||
import openpype.lib as oplib
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
from openpype.hosts.flame.otio import flame_export
|
||||
|
||||
|
|
@ -18,7 +19,7 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin):
|
|||
|
||||
# main
|
||||
asset_doc = context.data["assetEntity"]
|
||||
task_name = avalon.Session["AVALON_TASK"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
project = opfapi.get_current_project()
|
||||
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import re
|
||||
from pprint import pformat
|
||||
from copy import deepcopy
|
||||
|
||||
|
|
@ -6,6 +7,8 @@ import pyblish.api
|
|||
import openpype.api
|
||||
from openpype.hosts.flame import api as opfapi
|
||||
|
||||
import flame
|
||||
|
||||
|
||||
class ExtractSubsetResources(openpype.api.Extractor):
|
||||
"""
|
||||
|
|
@ -20,27 +23,31 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
# plugin defaults
|
||||
default_presets = {
|
||||
"thumbnail": {
|
||||
"active": True,
|
||||
"ext": "jpg",
|
||||
"xml_preset_file": "Jpeg (8-bit).xml",
|
||||
"xml_preset_dir": "",
|
||||
"export_type": "File Sequence",
|
||||
"ignore_comment_attrs": True,
|
||||
"parsed_comment_attrs": False,
|
||||
"colorspace_out": "Output - sRGB",
|
||||
"representation_add_range": False,
|
||||
"representation_tags": ["thumbnail"]
|
||||
"representation_tags": ["thumbnail"],
|
||||
"path_regex": ".*"
|
||||
},
|
||||
"ftrackpreview": {
|
||||
"active": True,
|
||||
"ext": "mov",
|
||||
"xml_preset_file": "Apple iPad (1920x1080).xml",
|
||||
"xml_preset_dir": "",
|
||||
"export_type": "Movie",
|
||||
"ignore_comment_attrs": True,
|
||||
"parsed_comment_attrs": False,
|
||||
"colorspace_out": "Output - Rec.709",
|
||||
"representation_add_range": True,
|
||||
"representation_tags": [
|
||||
"review",
|
||||
"delete"
|
||||
]
|
||||
],
|
||||
"path_regex": ".*"
|
||||
}
|
||||
}
|
||||
keep_original_representation = False
|
||||
|
|
@ -61,13 +68,10 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
|
||||
# flame objects
|
||||
segment = instance.data["item"]
|
||||
asset_name = instance.data["asset"]
|
||||
segment_name = segment.name.get_value()
|
||||
clip_path = instance.data["path"]
|
||||
sequence_clip = instance.context.data["flameSequence"]
|
||||
clip_data = instance.data["flameSourceClip"]
|
||||
|
||||
reel_clip = None
|
||||
if clip_data:
|
||||
reel_clip = clip_data["PyClip"]
|
||||
|
||||
# segment's parent track name
|
||||
s_track_name = segment.parent.name.get_value()
|
||||
|
|
@ -104,14 +108,44 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
for unique_name, preset_config in export_presets.items():
|
||||
modify_xml_data = {}
|
||||
|
||||
# get activating attributes
|
||||
activated_preset = preset_config["active"]
|
||||
filter_path_regex = preset_config.get("filter_path_regex")
|
||||
|
||||
self.log.info(
|
||||
"Preset `{}` is active `{}` with filter `{}`".format(
|
||||
unique_name, activated_preset, filter_path_regex
|
||||
)
|
||||
)
|
||||
self.log.debug(
|
||||
"__ clip_path: `{}`".format(clip_path))
|
||||
|
||||
# skip if not activated presete
|
||||
if not activated_preset:
|
||||
continue
|
||||
|
||||
# exclude by regex filter if any
|
||||
if (
|
||||
filter_path_regex
|
||||
and not re.search(filter_path_regex, clip_path)
|
||||
):
|
||||
continue
|
||||
|
||||
# get all presets attributes
|
||||
extension = preset_config["ext"]
|
||||
preset_file = preset_config["xml_preset_file"]
|
||||
preset_dir = preset_config["xml_preset_dir"]
|
||||
export_type = preset_config["export_type"]
|
||||
repre_tags = preset_config["representation_tags"]
|
||||
ignore_comment_attrs = preset_config["ignore_comment_attrs"]
|
||||
parsed_comment_attrs = preset_config["parsed_comment_attrs"]
|
||||
color_out = preset_config["colorspace_out"]
|
||||
|
||||
self.log.info(
|
||||
"Processing `{}` as `{}` to `{}` type...".format(
|
||||
preset_file, export_type, extension
|
||||
)
|
||||
)
|
||||
|
||||
# get attribures related loading in integrate_batch_group
|
||||
load_to_batch_group = preset_config.get(
|
||||
"load_to_batch_group")
|
||||
|
|
@ -131,161 +165,157 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
in_mark = (source_start_handles - source_first_frame) + 1
|
||||
out_mark = in_mark + source_duration_handles
|
||||
|
||||
# make test for type of preset and available reel_clip
|
||||
if (
|
||||
not reel_clip
|
||||
and export_type != "Sequence Publish"
|
||||
):
|
||||
self.log.warning((
|
||||
"Skipping preset {}. Not available "
|
||||
"reel clip for {}").format(
|
||||
preset_file, segment_name
|
||||
))
|
||||
continue
|
||||
|
||||
# by default export source clips
|
||||
exporting_clip = reel_clip
|
||||
|
||||
exporting_clip = None
|
||||
name_patern_xml = "<name>_{}.".format(
|
||||
unique_name)
|
||||
if export_type == "Sequence Publish":
|
||||
# change export clip to sequence
|
||||
exporting_clip = sequence_clip
|
||||
exporting_clip = flame.duplicate(sequence_clip)
|
||||
|
||||
# change in/out marks to timeline in/out
|
||||
in_mark = clip_in
|
||||
out_mark = clip_out
|
||||
# only keep visible layer where instance segment is child
|
||||
self.hide_others(
|
||||
exporting_clip, segment_name, s_track_name)
|
||||
|
||||
# add xml tags modifications
|
||||
modify_xml_data.update({
|
||||
"exportHandles": True,
|
||||
"nbHandles": handles,
|
||||
"startFrame": frame_start
|
||||
})
|
||||
# change name patern
|
||||
name_patern_xml = (
|
||||
"<segment name>_<shot name>_{}.").format(
|
||||
unique_name)
|
||||
else:
|
||||
exporting_clip = self.import_clip(clip_path)
|
||||
exporting_clip.name.set_value("{}_{}".format(
|
||||
asset_name, segment_name))
|
||||
|
||||
if not ignore_comment_attrs:
|
||||
# add any xml overrides collected form segment.comment
|
||||
modify_xml_data.update(instance.data["xml_overrides"])
|
||||
# change in/out marks to timeline in/out
|
||||
in_mark = clip_in
|
||||
out_mark = clip_out
|
||||
|
||||
# add xml tags modifications
|
||||
modify_xml_data.update({
|
||||
"exportHandles": True,
|
||||
"nbHandles": handles,
|
||||
"startFrame": frame_start,
|
||||
"namePattern": name_patern_xml
|
||||
})
|
||||
|
||||
if parsed_comment_attrs:
|
||||
# add any xml overrides collected form segment.comment
|
||||
modify_xml_data.update(instance.data["xml_overrides"])
|
||||
|
||||
self.log.debug("__ modify_xml_data: {}".format(pformat(
|
||||
modify_xml_data
|
||||
)))
|
||||
|
||||
# with maintained duplication loop all presets
|
||||
with opfapi.maintained_object_duplication(
|
||||
exporting_clip) as duplclip:
|
||||
kwargs = {}
|
||||
export_kwargs = {}
|
||||
# validate xml preset file is filled
|
||||
if preset_file == "":
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` is not filled").format(
|
||||
unique_name)
|
||||
)
|
||||
|
||||
if export_type == "Sequence Publish":
|
||||
# only keep visible layer where instance segment is child
|
||||
self.hide_others(duplclip, segment_name, s_track_name)
|
||||
# resolve xml preset dir if not filled
|
||||
if preset_dir == "":
|
||||
preset_dir = opfapi.get_preset_path_by_xml_name(
|
||||
preset_file)
|
||||
|
||||
# validate xml preset file is filled
|
||||
if preset_file == "":
|
||||
if not preset_dir:
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` is not filled").format(
|
||||
unique_name)
|
||||
"`XML preset file` {} is not found").format(
|
||||
unique_name, preset_file)
|
||||
)
|
||||
|
||||
# resolve xml preset dir if not filled
|
||||
if preset_dir == "":
|
||||
preset_dir = opfapi.get_preset_path_by_xml_name(
|
||||
preset_file)
|
||||
# create preset path
|
||||
preset_orig_xml_path = str(os.path.join(
|
||||
preset_dir, preset_file
|
||||
))
|
||||
|
||||
if not preset_dir:
|
||||
raise ValueError(
|
||||
("Check Settings for {} preset: "
|
||||
"`XML preset file` {} is not found").format(
|
||||
unique_name, preset_file)
|
||||
)
|
||||
preset_path = opfapi.modify_preset_file(
|
||||
preset_orig_xml_path, staging_dir, modify_xml_data)
|
||||
|
||||
# create preset path
|
||||
preset_orig_xml_path = str(os.path.join(
|
||||
preset_dir, preset_file
|
||||
))
|
||||
# define kwargs based on preset type
|
||||
if "thumbnail" in unique_name:
|
||||
export_kwargs["thumb_frame_number"] = int(in_mark + (
|
||||
source_duration_handles / 2))
|
||||
else:
|
||||
export_kwargs.update({
|
||||
"in_mark": in_mark,
|
||||
"out_mark": out_mark
|
||||
})
|
||||
|
||||
preset_path = opfapi.modify_preset_file(
|
||||
preset_orig_xml_path, staging_dir, modify_xml_data)
|
||||
# get and make export dir paths
|
||||
export_dir_path = str(os.path.join(
|
||||
staging_dir, unique_name
|
||||
))
|
||||
os.makedirs(export_dir_path)
|
||||
|
||||
# define kwargs based on preset type
|
||||
if "thumbnail" in unique_name:
|
||||
kwargs["thumb_frame_number"] = in_mark + (
|
||||
source_duration_handles / 2)
|
||||
else:
|
||||
kwargs.update({
|
||||
"in_mark": in_mark,
|
||||
"out_mark": out_mark
|
||||
})
|
||||
# export
|
||||
opfapi.export_clip(
|
||||
export_dir_path, exporting_clip, preset_path, **export_kwargs)
|
||||
|
||||
# get and make export dir paths
|
||||
export_dir_path = str(os.path.join(
|
||||
staging_dir, unique_name
|
||||
))
|
||||
os.makedirs(export_dir_path)
|
||||
# create representation data
|
||||
representation_data = {
|
||||
"name": unique_name,
|
||||
"outputName": unique_name,
|
||||
"ext": extension,
|
||||
"stagingDir": export_dir_path,
|
||||
"tags": repre_tags,
|
||||
"data": {
|
||||
"colorspace": color_out
|
||||
},
|
||||
"load_to_batch_group": load_to_batch_group,
|
||||
"batch_group_loader_name": batch_group_loader_name
|
||||
}
|
||||
|
||||
# export
|
||||
opfapi.export_clip(
|
||||
export_dir_path, duplclip, preset_path, **kwargs)
|
||||
# collect all available content of export dir
|
||||
files = os.listdir(export_dir_path)
|
||||
|
||||
extension = preset_config["ext"]
|
||||
# make sure no nested folders inside
|
||||
n_stage_dir, n_files = self._unfolds_nested_folders(
|
||||
export_dir_path, files, extension)
|
||||
|
||||
# create representation data
|
||||
representation_data = {
|
||||
"name": unique_name,
|
||||
"outputName": unique_name,
|
||||
"ext": extension,
|
||||
"stagingDir": export_dir_path,
|
||||
"tags": repre_tags,
|
||||
"data": {
|
||||
"colorspace": color_out
|
||||
},
|
||||
"load_to_batch_group": load_to_batch_group,
|
||||
"batch_group_loader_name": batch_group_loader_name
|
||||
}
|
||||
# fix representation in case of nested folders
|
||||
if n_stage_dir:
|
||||
representation_data["stagingDir"] = n_stage_dir
|
||||
files = n_files
|
||||
|
||||
# collect all available content of export dir
|
||||
files = os.listdir(export_dir_path)
|
||||
# add files to represetation but add
|
||||
# imagesequence as list
|
||||
if (
|
||||
# first check if path in files is not mov extension
|
||||
[
|
||||
f for f in files
|
||||
if os.path.splitext(f)[-1] == ".mov"
|
||||
]
|
||||
# then try if thumbnail is not in unique name
|
||||
or unique_name == "thumbnail"
|
||||
):
|
||||
representation_data["files"] = files.pop()
|
||||
else:
|
||||
representation_data["files"] = files
|
||||
|
||||
# make sure no nested folders inside
|
||||
n_stage_dir, n_files = self._unfolds_nested_folders(
|
||||
export_dir_path, files, extension)
|
||||
# add frame range
|
||||
if preset_config["representation_add_range"]:
|
||||
representation_data.update({
|
||||
"frameStart": frame_start_handle,
|
||||
"frameEnd": (
|
||||
frame_start_handle + source_duration_handles),
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
|
||||
# fix representation in case of nested folders
|
||||
if n_stage_dir:
|
||||
representation_data["stagingDir"] = n_stage_dir
|
||||
files = n_files
|
||||
instance.data["representations"].append(representation_data)
|
||||
|
||||
# add files to represetation but add
|
||||
# imagesequence as list
|
||||
if (
|
||||
# first check if path in files is not mov extension
|
||||
[
|
||||
f for f in files
|
||||
if os.path.splitext(f)[-1] == ".mov"
|
||||
]
|
||||
# then try if thumbnail is not in unique name
|
||||
or unique_name == "thumbnail"
|
||||
):
|
||||
representation_data["files"] = files.pop()
|
||||
else:
|
||||
representation_data["files"] = files
|
||||
# add review family if found in tags
|
||||
if "review" in repre_tags:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
# add frame range
|
||||
if preset_config["representation_add_range"]:
|
||||
representation_data.update({
|
||||
"frameStart": frame_start_handle,
|
||||
"frameEnd": (
|
||||
frame_start_handle + source_duration_handles),
|
||||
"fps": instance.data["fps"]
|
||||
})
|
||||
self.log.info("Added representation: {}".format(
|
||||
representation_data))
|
||||
|
||||
instance.data["representations"].append(representation_data)
|
||||
|
||||
# add review family if found in tags
|
||||
if "review" in repre_tags:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
self.log.info("Added representation: {}".format(
|
||||
representation_data))
|
||||
if export_type == "Sequence Publish":
|
||||
# at the end remove the duplicated clip
|
||||
flame.delete(exporting_clip)
|
||||
|
||||
self.log.debug("All representations: {}".format(
|
||||
pformat(instance.data["representations"])))
|
||||
|
|
@ -373,3 +403,18 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
for segment in track.segments:
|
||||
if segment.name.get_value() != segment_name:
|
||||
segment.hidden = True
|
||||
|
||||
def import_clip(self, path):
|
||||
"""
|
||||
Import clip from path
|
||||
"""
|
||||
clips = flame.import_clips(path)
|
||||
self.log.info("Clips [{}] imported from `{}`".format(clips, path))
|
||||
if not clips:
|
||||
self.log.warning("Path `{}` is not having any clips".format(path))
|
||||
return None
|
||||
elif len(clips) > 1:
|
||||
self.log.warning(
|
||||
"Path `{}` is containing more that one clip".format(path)
|
||||
)
|
||||
return clips[0]
|
||||
|
|
|
|||
|
|
@ -1,26 +0,0 @@
|
|||
import pyblish
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class ValidateSourceClip(pyblish.api.InstancePlugin):
|
||||
"""Validate instance is not having empty `flameSourceClip`"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Source Clip"
|
||||
hosts = ["flame"]
|
||||
families = ["clip"]
|
||||
optional = True
|
||||
active = False
|
||||
|
||||
def process(self, instance):
|
||||
flame_source_clip = instance.data["flameSourceClip"]
|
||||
|
||||
self.log.debug("_ flame_source_clip: {}".format(flame_source_clip))
|
||||
|
||||
if flame_source_clip is None:
|
||||
raise AttributeError((
|
||||
"Timeline segment `{}` is not having "
|
||||
"relative clip in reels. Please make sure "
|
||||
"you push `Save Sources` button in Conform Tab").format(
|
||||
instance.data["asset"]
|
||||
))
|
||||
|
|
@ -6,8 +6,10 @@ import contextlib
|
|||
from bson.objectid import ObjectId
|
||||
from Qt import QtGui
|
||||
|
||||
from avalon import io
|
||||
from openpype.pipeline import switch_container
|
||||
from openpype.pipeline import (
|
||||
switch_container,
|
||||
legacy_io,
|
||||
)
|
||||
from .pipeline import get_current_comp, comp_lock_and_undo_chunk
|
||||
|
||||
self = sys.modules[__name__]
|
||||
|
|
@ -94,8 +96,10 @@ def switch_item(container,
|
|||
# so we can use the original name from those.
|
||||
if any(not x for x in [asset_name, subset_name, representation_name]):
|
||||
_id = ObjectId(container["representation"])
|
||||
representation = io.find_one({"type": "representation", "_id": _id})
|
||||
version, subset, asset, project = io.parenthood(representation)
|
||||
representation = legacy_io.find_one({
|
||||
"type": "representation", "_id": _id
|
||||
})
|
||||
version, subset, asset, project = legacy_io.parenthood(representation)
|
||||
|
||||
if asset_name is None:
|
||||
asset_name = asset["name"]
|
||||
|
|
@ -107,14 +111,14 @@ def switch_item(container,
|
|||
representation_name = representation["name"]
|
||||
|
||||
# Find the new one
|
||||
asset = io.find_one({
|
||||
asset = legacy_io.find_one({
|
||||
"name": asset_name,
|
||||
"type": "asset"
|
||||
})
|
||||
assert asset, ("Could not find asset in the database with the name "
|
||||
"'%s'" % asset_name)
|
||||
|
||||
subset = io.find_one({
|
||||
subset = legacy_io.find_one({
|
||||
"name": subset_name,
|
||||
"type": "subset",
|
||||
"parent": asset["_id"]
|
||||
|
|
@ -122,7 +126,7 @@ def switch_item(container,
|
|||
assert subset, ("Could not find subset in the database with the name "
|
||||
"'%s'" % subset_name)
|
||||
|
||||
version = io.find_one(
|
||||
version = legacy_io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
|
|
@ -134,7 +138,7 @@ def switch_item(container,
|
|||
asset_name, subset_name
|
||||
)
|
||||
|
||||
representation = io.find_one({
|
||||
representation = legacy_io.find_one({
|
||||
"name": representation_name,
|
||||
"type": "representation",
|
||||
"parent": version["_id"]}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,8 @@ def install():
|
|||
This is where you install menus and register families, data
|
||||
and loaders into fusion.
|
||||
|
||||
It is called automatically when installing via `api.install(avalon.fusion)`
|
||||
It is called automatically when installing via
|
||||
`openpype.pipeline.install_host(openpype.hosts.fusion.api)`
|
||||
|
||||
See the Maya equivalent for inspiration on how to implement this.
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from openpype.pipeline import load
|
|||
|
||||
|
||||
class FusionSetFrameRangeLoader(load.LoaderPlugin):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Set frame range excluding pre- and post-handles"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
|
|
@ -40,7 +40,7 @@ class FusionSetFrameRangeLoader(load.LoaderPlugin):
|
|||
|
||||
|
||||
class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Set frame range including pre- and post-handles"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
|
|
|
|||
|
|
@ -1,10 +1,9 @@
|
|||
import os
|
||||
import contextlib
|
||||
|
||||
from avalon import io
|
||||
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
legacy_io,
|
||||
get_representation_path,
|
||||
)
|
||||
from openpype.hosts.fusion.api import (
|
||||
|
|
@ -212,8 +211,10 @@ class FusionLoadSequence(load.LoaderPlugin):
|
|||
path = self._get_first_image(root)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({"type": "version",
|
||||
"_id": representation["parent"]})
|
||||
version = legacy_io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
start = version["data"].get("frameStart")
|
||||
if start is None:
|
||||
self.log.warning("Missing start frame for updated version"
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@ import getpass
|
|||
|
||||
import requests
|
||||
|
||||
from avalon import api
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class FusionSubmitDeadline(pyblish.api.InstancePlugin):
|
||||
"""Submit current Comp to Deadline
|
||||
|
|
@ -133,7 +133,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"FUSION9_MasterPrefs"
|
||||
]
|
||||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **api.Session)
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
|
||||
payload["JobInfo"].update({
|
||||
"EnvironmentKeyValue%d" % index: "{key}={value}".format(
|
||||
|
|
@ -146,7 +146,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
|
||||
|
||||
# E.g. http://192.168.0.1:8082/api/jobs
|
||||
url = "{}/api/jobs".format(DEADLINE_REST_URL)
|
||||
url = "{}/api/jobs".format(deadline_url)
|
||||
response = requests.post(url, json=payload)
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
|
|
|||
|
|
@ -4,10 +4,8 @@ import sys
|
|||
import logging
|
||||
|
||||
# Pipeline imports
|
||||
import avalon.api
|
||||
from avalon import io
|
||||
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
install_host,
|
||||
registered_host,
|
||||
)
|
||||
|
|
@ -167,7 +165,7 @@ def update_frame_range(comp, representations):
|
|||
"""
|
||||
|
||||
version_ids = [r["parent"] for r in representations]
|
||||
versions = io.find({"type": "version", "_id": {"$in": version_ids}})
|
||||
versions = legacy_io.find({"type": "version", "_id": {"$in": version_ids}})
|
||||
versions = list(versions)
|
||||
|
||||
versions = [v for v in versions
|
||||
|
|
@ -205,12 +203,11 @@ def switch(asset_name, filepath=None, new=True):
|
|||
|
||||
# Assert asset name exists
|
||||
# It is better to do this here then to wait till switch_shot does it
|
||||
asset = io.find_one({"type": "asset", "name": asset_name})
|
||||
asset = legacy_io.find_one({"type": "asset", "name": asset_name})
|
||||
assert asset, "Could not find '%s' in the database" % asset_name
|
||||
|
||||
# Get current project
|
||||
self._project = io.find_one({"type": "project",
|
||||
"name": avalon.api.Session["AVALON_PROJECT"]})
|
||||
self._project = legacy_io.find_one({"type": "project"})
|
||||
|
||||
# Go to comp
|
||||
if not filepath:
|
||||
|
|
@ -241,7 +238,7 @@ def switch(asset_name, filepath=None, new=True):
|
|||
current_comp.Print(message)
|
||||
|
||||
# Build the session to switch to
|
||||
switch_to_session = avalon.api.Session.copy()
|
||||
switch_to_session = legacy_io.Session.copy()
|
||||
switch_to_session["AVALON_ASSET"] = asset['name']
|
||||
|
||||
if new:
|
||||
|
|
|
|||
|
|
@ -5,11 +5,13 @@ import logging
|
|||
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
||||
from avalon import io
|
||||
import qtawesome as qta
|
||||
|
||||
from openpype import style
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.pipeline import (
|
||||
install_host,
|
||||
legacy_io,
|
||||
)
|
||||
from openpype.hosts.fusion import api
|
||||
from openpype.lib.avalon_context import get_workdir_from_session
|
||||
|
||||
|
|
@ -164,7 +166,7 @@ class App(QtWidgets.QWidget):
|
|||
return items
|
||||
|
||||
def collect_assets(self):
|
||||
return list(io.find({"type": "asset"}, {"name": True}))
|
||||
return list(legacy_io.find({"type": "asset"}, {"name": True}))
|
||||
|
||||
def populate_comp_box(self, files):
|
||||
"""Ensure we display the filename only but the path is stored as well
|
||||
|
|
|
|||
|
|
@ -419,7 +419,6 @@ class ExtractImage(pyblish.api.InstancePlugin):
|
|||
```python
|
||||
import os
|
||||
|
||||
from avalon import api, io
|
||||
import openpype.hosts.harmony.api as harmony
|
||||
|
||||
signature = str(uuid4()).replace("-", "_")
|
||||
|
|
@ -611,7 +610,7 @@ class ImageSequenceLoader(load.LoaderPlugin):
|
|||
def update(self, container, representation):
|
||||
node = container.pop("node")
|
||||
|
||||
version = io.find_one({"_id": representation["parent"]})
|
||||
version = legacy_io.find_one({"_id": representation["parent"]})
|
||||
files = []
|
||||
for f in version["data"]["files"]:
|
||||
files.append(
|
||||
|
|
|
|||
|
|
@ -463,7 +463,7 @@ def imprint(node_id, data, remove=False):
|
|||
remove (bool): Removes the data from the scene.
|
||||
|
||||
Example:
|
||||
>>> from avalon.harmony import lib
|
||||
>>> from openpype.hosts.harmony.api import lib
|
||||
>>> node = "Top/Display"
|
||||
>>> data = {"str": "someting", "int": 1, "float": 0.32, "bool": True}
|
||||
>>> lib.imprint(layer, data)
|
||||
|
|
|
|||
|
|
@ -5,11 +5,10 @@ import logging
|
|||
from bson.objectid import ObjectId
|
||||
import pyblish.api
|
||||
|
||||
from avalon import io
|
||||
|
||||
from openpype import lib
|
||||
from openpype.lib import register_event_callback
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
deregister_loader_plugin_path,
|
||||
|
|
@ -111,7 +110,7 @@ def check_inventory():
|
|||
outdated_containers = []
|
||||
for container in ls():
|
||||
representation = container['representation']
|
||||
representation_doc = io.find_one(
|
||||
representation_doc = legacy_io.find_one(
|
||||
{
|
||||
"_id": ObjectId(representation),
|
||||
"type": "representation"
|
||||
|
|
|
|||
|
|
@ -3,13 +3,13 @@
|
|||
from pathlib import Path
|
||||
|
||||
import attr
|
||||
from avalon import api
|
||||
|
||||
from openpype.lib import get_formatted_current_time
|
||||
import openpype.lib.abstract_collect_render
|
||||
import openpype.hosts.harmony.api as harmony
|
||||
from openpype.lib.abstract_collect_render import RenderInstance
|
||||
import openpype.lib
|
||||
import openpype.lib.abstract_collect_render
|
||||
from openpype.lib.abstract_collect_render import RenderInstance
|
||||
from openpype.lib import get_formatted_current_time
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.hosts.harmony.api as harmony
|
||||
|
||||
|
||||
@attr.s
|
||||
|
|
@ -143,7 +143,8 @@ class CollectFarmRender(openpype.lib.abstract_collect_render.
|
|||
source=context.data["currentFile"],
|
||||
label=node.split("/")[1],
|
||||
subset=subset_name,
|
||||
asset=api.Session["AVALON_ASSET"],
|
||||
asset=legacy_io.Session["AVALON_ASSET"],
|
||||
task=task_name,
|
||||
attachTo=False,
|
||||
setMembers=[node],
|
||||
publish=info[4],
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ import hiero
|
|||
from Qt import QtWidgets
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
import avalon.api as avalon
|
||||
import avalon.io
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.api import (Logger, Anatomy, get_anatomy_settings)
|
||||
from . import tags
|
||||
|
||||
|
|
@ -38,8 +37,6 @@ self.pype_tag_name = "openpypeData"
|
|||
self.default_sequence_name = "openpypeSequence"
|
||||
self.default_bin_name = "openpypeBin"
|
||||
|
||||
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
|
||||
|
||||
|
||||
def flatten(_list):
|
||||
for item in _list:
|
||||
|
|
@ -49,6 +46,7 @@ def flatten(_list):
|
|||
else:
|
||||
yield item
|
||||
|
||||
|
||||
def get_current_project(remove_untitled=False):
|
||||
projects = flatten(hiero.core.projects())
|
||||
if not remove_untitled:
|
||||
|
|
@ -384,7 +382,7 @@ def get_publish_attribute(tag):
|
|||
|
||||
def sync_avalon_data_to_workfile():
|
||||
# import session to get project dir
|
||||
project_name = avalon.Session["AVALON_PROJECT"]
|
||||
project_name = legacy_io.Session["AVALON_PROJECT"]
|
||||
|
||||
anatomy = Anatomy(project_name)
|
||||
work_template = anatomy.templates["work"]["path"]
|
||||
|
|
@ -409,7 +407,7 @@ def sync_avalon_data_to_workfile():
|
|||
project.setProjectRoot(active_project_root)
|
||||
|
||||
# get project data from avalon db
|
||||
project_doc = avalon.io.find_one({"type": "project"})
|
||||
project_doc = legacy_io.find_one({"type": "project"})
|
||||
project_data = project_doc["data"]
|
||||
|
||||
log.debug("project_data: {}".format(project_data))
|
||||
|
|
@ -555,10 +553,10 @@ class PublishAction(QtWidgets.QAction):
|
|||
#
|
||||
# '''
|
||||
# import hiero.core
|
||||
# from avalon.nuke import imprint
|
||||
# from pype.hosts.nuke import (
|
||||
# lib as nklib
|
||||
# )
|
||||
# from openpype.hosts.nuke.api.lib import (
|
||||
# BuildWorkfile,
|
||||
# imprint
|
||||
# )
|
||||
#
|
||||
# # check if the file exists if does then Raise "File exists!"
|
||||
# if os.path.exists(filepath):
|
||||
|
|
@ -585,8 +583,7 @@ class PublishAction(QtWidgets.QAction):
|
|||
#
|
||||
# nuke_script.addNode(root_node)
|
||||
#
|
||||
# # here to call pype.hosts.nuke.lib.BuildWorkfile
|
||||
# script_builder = nklib.BuildWorkfile(
|
||||
# script_builder = BuildWorkfile(
|
||||
# root_node=root_node,
|
||||
# root_path=root_path,
|
||||
# nodes=nuke_script.getNodes(),
|
||||
|
|
@ -995,7 +992,6 @@ def check_inventory_versions():
|
|||
it to red.
|
||||
"""
|
||||
from . import parse_container
|
||||
from avalon import io
|
||||
|
||||
# presets
|
||||
clip_color_last = "green"
|
||||
|
|
@ -1007,19 +1003,19 @@ def check_inventory_versions():
|
|||
|
||||
if container:
|
||||
# get representation from io
|
||||
representation = io.find_one({
|
||||
representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"_id": ObjectId(container["representation"])
|
||||
})
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
version = legacy_io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
versions = legacy_io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
|
|
|||
|
|
@ -1,14 +1,16 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
import hiero.core
|
||||
from openpype.api import Logger
|
||||
from openpype.tools.utils import host_tools
|
||||
from avalon.api import Session
|
||||
from hiero.ui import findMenuAction
|
||||
|
||||
from openpype.api import Logger
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tools.utils import host_tools
|
||||
|
||||
from . import tags
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._change_context_menu = None
|
||||
|
|
@ -24,8 +26,10 @@ def update_menu_task_label():
|
|||
log.warning("Can't find menuItem: {}".format(object_name))
|
||||
return
|
||||
|
||||
label = "{}, {}".format(Session["AVALON_ASSET"],
|
||||
Session["AVALON_TASK"])
|
||||
label = "{}, {}".format(
|
||||
legacy_io.Session["AVALON_ASSET"],
|
||||
legacy_io.Session["AVALON_TASK"]
|
||||
)
|
||||
|
||||
menu = found_menu.menu()
|
||||
self._change_context_menu = label
|
||||
|
|
@ -51,7 +55,8 @@ def menu_install():
|
|||
menu_name = os.environ['AVALON_LABEL']
|
||||
|
||||
context_label = "{0}, {1}".format(
|
||||
Session["AVALON_ASSET"], Session["AVALON_TASK"]
|
||||
legacy_io.Session["AVALON_ASSET"],
|
||||
legacy_io.Session["AVALON_TASK"]
|
||||
)
|
||||
|
||||
self._change_context_menu = context_label
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@ import os
|
|||
import contextlib
|
||||
from collections import OrderedDict
|
||||
|
||||
from avalon import schema
|
||||
from pyblish import api as pyblish
|
||||
from openpype.api import Logger
|
||||
from openpype.pipeline import (
|
||||
schema,
|
||||
register_creator_plugin_path,
|
||||
register_loader_plugin_path,
|
||||
deregister_creator_plugin_path,
|
||||
|
|
@ -20,8 +20,6 @@ from . import lib, menu, events
|
|||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
|
||||
|
||||
# plugin paths
|
||||
API_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
HOST_DIR = os.path.dirname(API_DIR)
|
||||
|
|
@ -247,15 +245,10 @@ def reload_config():
|
|||
import importlib
|
||||
|
||||
for module in (
|
||||
"avalon",
|
||||
"avalon.lib",
|
||||
"avalon.pipeline",
|
||||
"pyblish",
|
||||
"pypeapp",
|
||||
"{}.api".format(AVALON_CONFIG),
|
||||
"{}.hosts.hiero.lib".format(AVALON_CONFIG),
|
||||
"{}.hosts.hiero.menu".format(AVALON_CONFIG),
|
||||
"{}.hosts.hiero.tags".format(AVALON_CONFIG)
|
||||
"openpype.api",
|
||||
"openpype.hosts.hiero.lib",
|
||||
"openpype.hosts.hiero.menu",
|
||||
"openpype.hosts.hiero.tags"
|
||||
):
|
||||
log.info("Reloading module: {}...".format(module))
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -3,23 +3,13 @@ import os
|
|||
import hiero
|
||||
|
||||
from openpype.api import Logger
|
||||
from avalon import io
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def tag_data():
|
||||
return {
|
||||
# "Retiming": {
|
||||
# "editable": "1",
|
||||
# "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
|
||||
# "icon": "retiming.png",
|
||||
# "metadata": {
|
||||
# "family": "retiming",
|
||||
# "marginIn": 1,
|
||||
# "marginOut": 1
|
||||
# }
|
||||
# },
|
||||
"[Lenses]": {
|
||||
"Set lense here": {
|
||||
"editable": "1",
|
||||
|
|
@ -48,6 +38,16 @@ def tag_data():
|
|||
"family": "comment",
|
||||
"subset": "main"
|
||||
}
|
||||
},
|
||||
"FrameMain": {
|
||||
"editable": "1",
|
||||
"note": "Publishing a frame subset.",
|
||||
"icon": "z_layer_main.png",
|
||||
"metadata": {
|
||||
"family": "frame",
|
||||
"subset": "main",
|
||||
"format": "png"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -141,7 +141,7 @@ def add_tags_to_workfile():
|
|||
nks_pres_tags = tag_data()
|
||||
|
||||
# Get project task types.
|
||||
tasks = io.find_one({"type": "project"})["config"]["tasks"]
|
||||
tasks = legacy_io.find_one({"type": "project"})["config"]["tasks"]
|
||||
nks_pres_tags["[Tasks]"] = {}
|
||||
log.debug("__ tasks: {}".format(tasks))
|
||||
for task_type in tasks.keys():
|
||||
|
|
@ -159,7 +159,7 @@ def add_tags_to_workfile():
|
|||
# asset builds and shots.
|
||||
if int(os.getenv("TAG_ASSETBUILD_STARTUP", 0)) == 1:
|
||||
nks_pres_tags["[AssetBuilds]"] = {}
|
||||
for asset in io.find({"type": "asset"}):
|
||||
for asset in legacy_io.find({"type": "asset"}):
|
||||
if asset["data"]["entityType"] == "AssetBuild":
|
||||
nks_pres_tags["[AssetBuilds]"][asset["name"]] = {
|
||||
"editable": "1",
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
from avalon import io
|
||||
from openpype.pipeline import get_representation_path
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
get_representation_path,
|
||||
)
|
||||
import openpype.hosts.hiero.api as phiero
|
||||
# from openpype.hosts.hiero.api import plugin, lib
|
||||
# reload(lib)
|
||||
|
|
@ -105,7 +107,7 @@ class LoadClip(phiero.SequenceLoader):
|
|||
namespace = container['namespace']
|
||||
track_item = phiero.get_track_items(
|
||||
track_item_name=namespace)
|
||||
version = io.find_one({
|
||||
version = legacy_io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
|
|
@ -174,7 +176,7 @@ class LoadClip(phiero.SequenceLoader):
|
|||
# define version name
|
||||
version_name = version.get("name", None)
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
versions = legacy_io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
|
|
|||
|
|
@ -0,0 +1,142 @@
|
|||
from pprint import pformat
|
||||
import re
|
||||
import ast
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFrameTagInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect frames from tags.
|
||||
|
||||
Tag is expected to have metadata:
|
||||
{
|
||||
"family": "frame"
|
||||
"subset": "main"
|
||||
}
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Frames"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
self._context = context
|
||||
|
||||
# collect all sequence tags
|
||||
subset_data = self._create_frame_subset_data_sequence(context)
|
||||
|
||||
self.log.debug("__ subset_data: {}".format(
|
||||
pformat(subset_data)
|
||||
))
|
||||
|
||||
# create instances
|
||||
self._create_instances(subset_data)
|
||||
|
||||
def _get_tag_data(self, tag):
|
||||
data = {}
|
||||
|
||||
# get tag metadata attribute
|
||||
tag_data = tag.metadata()
|
||||
|
||||
# convert tag metadata to normal keys names and values to correct types
|
||||
for k, v in dict(tag_data).items():
|
||||
key = k.replace("tag.", "")
|
||||
|
||||
try:
|
||||
# capture exceptions which are related to strings only
|
||||
if re.match(r"^[\d]+$", v):
|
||||
value = int(v)
|
||||
elif re.match(r"^True$", v):
|
||||
value = True
|
||||
elif re.match(r"^False$", v):
|
||||
value = False
|
||||
elif re.match(r"^None$", v):
|
||||
value = None
|
||||
elif re.match(r"^[\w\d_]+$", v):
|
||||
value = v
|
||||
else:
|
||||
value = ast.literal_eval(v)
|
||||
except (ValueError, SyntaxError):
|
||||
value = v
|
||||
|
||||
data[key] = value
|
||||
|
||||
return data
|
||||
|
||||
def _create_frame_subset_data_sequence(self, context):
|
||||
|
||||
sequence_tags = []
|
||||
sequence = context.data["activeTimeline"]
|
||||
|
||||
# get all publishable sequence frames
|
||||
publish_frames = range(int(sequence.duration() + 1))
|
||||
|
||||
self.log.debug("__ publish_frames: {}".format(
|
||||
pformat(publish_frames)
|
||||
))
|
||||
|
||||
# get all sequence tags
|
||||
for tag in sequence.tags():
|
||||
tag_data = self._get_tag_data(tag)
|
||||
self.log.debug("__ tag_data: {}".format(
|
||||
pformat(tag_data)
|
||||
))
|
||||
if not tag_data:
|
||||
continue
|
||||
|
||||
if "family" not in tag_data:
|
||||
continue
|
||||
|
||||
if tag_data["family"] != "frame":
|
||||
continue
|
||||
|
||||
sequence_tags.append(tag_data)
|
||||
|
||||
self.log.debug("__ sequence_tags: {}".format(
|
||||
pformat(sequence_tags)
|
||||
))
|
||||
|
||||
# first collect all available subset tag frames
|
||||
subset_data = {}
|
||||
for tag_data in sequence_tags:
|
||||
frame = int(tag_data["start"])
|
||||
|
||||
if frame not in publish_frames:
|
||||
continue
|
||||
|
||||
subset = tag_data["subset"]
|
||||
|
||||
if subset in subset_data:
|
||||
# update existing subset key
|
||||
subset_data[subset]["frames"].append(frame)
|
||||
else:
|
||||
# create new subset key
|
||||
subset_data[subset] = {
|
||||
"frames": [frame],
|
||||
"format": tag_data["format"],
|
||||
"asset": context.data["assetEntity"]["name"]
|
||||
}
|
||||
return subset_data
|
||||
|
||||
def _create_instances(self, subset_data):
|
||||
# create instance per subset
|
||||
for subset_name, subset_data in subset_data.items():
|
||||
name = "frame" + subset_name.title()
|
||||
data = {
|
||||
"name": name,
|
||||
"label": "{} {}".format(name, subset_data["frames"]),
|
||||
"family": "image",
|
||||
"families": ["frame"],
|
||||
"asset": subset_data["asset"],
|
||||
"subset": name,
|
||||
"format": subset_data["format"],
|
||||
"frames": subset_data["frames"]
|
||||
}
|
||||
self._context.create_instance(**data)
|
||||
|
||||
self.log.info(
|
||||
"Created instance: {}".format(
|
||||
json.dumps(data, sort_keys=True, indent=4)
|
||||
)
|
||||
)
|
||||
82
openpype/hosts/hiero/plugins/publish/extract_frames.py
Normal file
82
openpype/hosts/hiero/plugins/publish/extract_frames.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import openpype
|
||||
|
||||
|
||||
class ExtractFrames(openpype.api.Extractor):
|
||||
"""Extracts frames"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Frames"
|
||||
hosts = ["hiero"]
|
||||
families = ["frame"]
|
||||
movie_extensions = ["mov", "mp4"]
|
||||
|
||||
def process(self, instance):
|
||||
oiio_tool_path = openpype.lib.get_oiio_tools_path()
|
||||
staging_dir = self.staging_dir(instance)
|
||||
output_template = os.path.join(staging_dir, instance.data["name"])
|
||||
sequence = instance.context.data["activeTimeline"]
|
||||
|
||||
files = []
|
||||
for frame in instance.data["frames"]:
|
||||
track_item = sequence.trackItemAt(frame)
|
||||
media_source = track_item.source().mediaSource()
|
||||
input_path = media_source.fileinfos()[0].filename()
|
||||
input_frame = (
|
||||
track_item.mapTimelineToSource(frame) +
|
||||
track_item.source().mediaSource().startTime()
|
||||
)
|
||||
output_ext = instance.data["format"]
|
||||
output_path = output_template
|
||||
output_path += ".{:04d}.{}".format(int(frame), output_ext)
|
||||
|
||||
args = [oiio_tool_path]
|
||||
|
||||
ext = os.path.splitext(input_path)[1][1:]
|
||||
if ext in self.movie_extensions:
|
||||
args.extend(["--subimage", str(int(input_frame))])
|
||||
else:
|
||||
args.extend(["--frames", str(int(input_frame))])
|
||||
|
||||
if ext == "exr":
|
||||
args.extend(["--powc", "0.45,0.45,0.45,1.0"])
|
||||
|
||||
args.extend([input_path, "-o", output_path])
|
||||
output = openpype.api.run_subprocess(args)
|
||||
|
||||
failed_output = "oiiotool produced no output."
|
||||
if failed_output in output:
|
||||
raise ValueError(
|
||||
"oiiotool processing failed. Args: {}".format(args)
|
||||
)
|
||||
|
||||
files.append(output_path)
|
||||
|
||||
# Feedback to user because "oiiotool" can make the publishing
|
||||
# appear unresponsive.
|
||||
self.log.info(
|
||||
"Processed {} of {} frames".format(
|
||||
instance.data["frames"].index(frame) + 1,
|
||||
len(instance.data["frames"])
|
||||
)
|
||||
)
|
||||
|
||||
if len(files) == 1:
|
||||
instance.data["representations"] = [
|
||||
{
|
||||
"name": output_ext,
|
||||
"ext": output_ext,
|
||||
"files": os.path.basename(files[0]),
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
]
|
||||
else:
|
||||
instance.data["representations"] = [
|
||||
{
|
||||
"name": output_ext,
|
||||
"ext": output_ext,
|
||||
"files": [os.path.basename(x) for x in files],
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
]
|
||||
|
|
@ -1,12 +1,15 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import hiero.ui
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from avalon import api as avalon
|
||||
from pprint import pformat
|
||||
from openpype.hosts.hiero.api.otio import hiero_export
|
||||
from Qt.QtGui import QPixmap
|
||||
import tempfile
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
from Qt.QtGui import QPixmap
|
||||
|
||||
import hiero.ui
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from openpype.hosts.hiero.api.otio import hiero_export
|
||||
|
||||
|
||||
class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
||||
|
|
@ -17,7 +20,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
|
||||
asset = avalon.Session["AVALON_ASSET"]
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
subset = "workfile"
|
||||
project = phiero.get_current_project()
|
||||
active_timeline = hiero.ui.activeSequence()
|
||||
|
|
@ -65,6 +68,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
|||
"subset": "{}{}".format(asset, subset.capitalize()),
|
||||
"item": project,
|
||||
"family": "workfile",
|
||||
"families": [],
|
||||
"representations": [workfile_representation, thumb_representation]
|
||||
}
|
||||
|
||||
|
|
@ -74,6 +78,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
|||
# update context with main project attributes
|
||||
context_data = {
|
||||
"activeProject": project,
|
||||
"activeTimeline": active_timeline,
|
||||
"otioTimeline": otio_timeline,
|
||||
"currentFile": curent_file,
|
||||
"colorspace": self.get_colorspace(project),
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from pyblish import api
|
||||
from avalon import io
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class CollectAssetBuilds(api.ContextPlugin):
|
||||
|
|
@ -18,7 +18,7 @@ class CollectAssetBuilds(api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
asset_builds = {}
|
||||
for asset in io.find({"type": "asset"}):
|
||||
for asset in legacy_io.find({"type": "asset"}):
|
||||
if asset["data"]["entityType"] == "AssetBuild":
|
||||
self.log.debug("Found \"{}\" in database.".format(asset))
|
||||
asset_builds[asset["name"]] = asset
|
||||
|
|
|
|||
|
|
@ -1,38 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectClipResolution(pyblish.api.InstancePlugin):
|
||||
"""Collect clip geometry resolution"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Clip Resolution"
|
||||
hosts = ["hiero"]
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
sequence = instance.context.data['activeSequence']
|
||||
item = instance.data["item"]
|
||||
source_resolution = instance.data.get("sourceResolution", None)
|
||||
|
||||
resolution_width = int(sequence.format().width())
|
||||
resolution_height = int(sequence.format().height())
|
||||
pixel_aspect = sequence.format().pixelAspect()
|
||||
|
||||
# source exception
|
||||
if source_resolution:
|
||||
resolution_width = int(item.source().mediaSource().width())
|
||||
resolution_height = int(item.source().mediaSource().height())
|
||||
pixel_aspect = item.source().mediaSource().pixelAspect()
|
||||
|
||||
resolution_data = {
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"pixelAspect": pixel_aspect
|
||||
}
|
||||
# add to instacne data
|
||||
instance.data.update(resolution_data)
|
||||
|
||||
self.log.info("Resolution of instance '{}' is: {}".format(
|
||||
instance,
|
||||
resolution_data
|
||||
))
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHostVersion(pyblish.api.ContextPlugin):
|
||||
"""Inject the hosts version into context"""
|
||||
|
||||
label = "Collect Host and HostVersion"
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
|
||||
def process(self, context):
|
||||
import nuke
|
||||
import pyblish.api
|
||||
|
||||
context.set_data("host", pyblish.api.current_host())
|
||||
context.set_data('hostVersion', value=nuke.NUKE_VERSION_STRING)
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectTagRetime(api.InstancePlugin):
|
||||
"""Collect Retiming from Tags of selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.014
|
||||
label = "Collect Retiming Tag"
|
||||
hosts = ["hiero"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
# gets tags
|
||||
tags = instance.data["tags"]
|
||||
|
||||
for t in tags:
|
||||
t_metadata = dict(t["metadata"])
|
||||
t_family = t_metadata.get("tag.family", "")
|
||||
|
||||
# gets only task family tags and collect labels
|
||||
if "retiming" in t_family:
|
||||
margin_in = t_metadata.get("tag.marginIn", "")
|
||||
margin_out = t_metadata.get("tag.marginOut", "")
|
||||
|
||||
instance.data["retimeMarginIn"] = int(margin_in)
|
||||
instance.data["retimeMarginOut"] = int(margin_out)
|
||||
instance.data["retime"] = True
|
||||
|
||||
self.log.info("retimeMarginIn: `{}`".format(margin_in))
|
||||
self.log.info("retimeMarginOut: `{}`".format(margin_out))
|
||||
|
||||
instance.data["families"] += ["retime"]
|
||||
|
|
@ -1,223 +0,0 @@
|
|||
from compiler.ast import flatten
|
||||
from pyblish import api
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
import hiero
|
||||
# from openpype.hosts.hiero.api import lib
|
||||
# reload(lib)
|
||||
# reload(phiero)
|
||||
|
||||
|
||||
class PreCollectInstances(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.CollectorOrder - 0.509
|
||||
label = "Pre-collect Instances"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
track_items = phiero.get_track_items(
|
||||
selected=True, check_tagged=True, check_enabled=True)
|
||||
# only return enabled track items
|
||||
if not track_items:
|
||||
track_items = phiero.get_track_items(
|
||||
check_enabled=True, check_tagged=True)
|
||||
# get sequence and video tracks
|
||||
sequence = context.data["activeSequence"]
|
||||
tracks = sequence.videoTracks()
|
||||
|
||||
# add collection to context
|
||||
tracks_effect_items = self.collect_sub_track_items(tracks)
|
||||
|
||||
context.data["tracksEffectItems"] = tracks_effect_items
|
||||
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(len(track_items)))
|
||||
|
||||
for _ti in track_items:
|
||||
data = {}
|
||||
clip = _ti.source()
|
||||
|
||||
# get clips subtracks and anotations
|
||||
annotations = self.clip_annotations(clip)
|
||||
subtracks = self.clip_subtrack(_ti)
|
||||
self.log.debug("Annotations: {}".format(annotations))
|
||||
self.log.debug(">> Subtracks: {}".format(subtracks))
|
||||
|
||||
# get pype tag data
|
||||
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
|
||||
# self.log.debug(pformat(tag_parsed_data))
|
||||
|
||||
if not tag_parsed_data:
|
||||
continue
|
||||
|
||||
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
# add tag data to instance data
|
||||
data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
asset = tag_parsed_data["asset"]
|
||||
subset = tag_parsed_data["subset"]
|
||||
review_track = tag_parsed_data.get("reviewTrack")
|
||||
hiero_track = tag_parsed_data.get("heroTrack")
|
||||
audio = tag_parsed_data.get("audio")
|
||||
|
||||
# remove audio attribute from data
|
||||
data.pop("audio")
|
||||
|
||||
# insert family into families
|
||||
family = tag_parsed_data["family"]
|
||||
families = [str(f) for f in tag_parsed_data["families"]]
|
||||
families.insert(0, str(family))
|
||||
|
||||
track = _ti.parent()
|
||||
media_source = _ti.source().mediaSource()
|
||||
source_path = media_source.firstpath()
|
||||
file_head = media_source.filenameHead()
|
||||
file_info = media_source.fileinfos().pop()
|
||||
source_first_frame = int(file_info.startFrame())
|
||||
|
||||
# apply only for review and master track instance
|
||||
if review_track and hiero_track:
|
||||
families += ["review", "ftrack"]
|
||||
|
||||
data.update({
|
||||
"name": "{} {} {}".format(asset, subset, families),
|
||||
"asset": asset,
|
||||
"item": _ti,
|
||||
"families": families,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
|
||||
# track item attributes
|
||||
"track": track.name(),
|
||||
"trackItem": track,
|
||||
"reviewTrack": review_track,
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _ti.sourceMediaColourTransform()
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"source": source_path,
|
||||
"sourceMedia": media_source,
|
||||
"sourcePath": source_path,
|
||||
"sourceFileHead": file_head,
|
||||
"sourceFirst": source_first_frame,
|
||||
|
||||
# clip's effect
|
||||
"clipEffectItems": subtracks
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
self.log.info("Creating instance.data: {}".format(instance.data))
|
||||
|
||||
if audio:
|
||||
a_data = dict()
|
||||
|
||||
# add tag data to instance data
|
||||
a_data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
# create main attributes
|
||||
subset = "audioMain"
|
||||
family = "audio"
|
||||
families = ["clip", "ftrack"]
|
||||
families.insert(0, str(family))
|
||||
|
||||
name = "{} {} {}".format(asset, subset, families)
|
||||
|
||||
a_data.update({
|
||||
"name": name,
|
||||
"subset": subset,
|
||||
"asset": asset,
|
||||
"family": family,
|
||||
"families": families,
|
||||
"item": _ti,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
})
|
||||
|
||||
a_instance = context.create_instance(**a_data)
|
||||
self.log.info("Creating audio instance: {}".format(a_instance))
|
||||
|
||||
@staticmethod
|
||||
def clip_annotations(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.Annotation
|
||||
"""
|
||||
annotations = []
|
||||
subTrackItems = flatten(clip.subTrackItems())
|
||||
annotations += [item for item in subTrackItems if isinstance(
|
||||
item, hiero.core.Annotation)]
|
||||
return annotations
|
||||
|
||||
@staticmethod
|
||||
def clip_subtrack(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.SubTrackItem
|
||||
"""
|
||||
subtracks = []
|
||||
subTrackItems = flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
# avoid all anotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
# # avoid all not anaibled
|
||||
if not item.isEnabled():
|
||||
continue
|
||||
subtracks.append(item)
|
||||
return subtracks
|
||||
|
||||
@staticmethod
|
||||
def collect_sub_track_items(tracks):
|
||||
"""
|
||||
Returns dictionary with track index as key and list of subtracks
|
||||
"""
|
||||
# collect all subtrack items
|
||||
sub_track_items = dict()
|
||||
for track in tracks:
|
||||
items = track.items()
|
||||
|
||||
# skip if no clips on track > need track with effect only
|
||||
if items:
|
||||
continue
|
||||
|
||||
# skip all disabled tracks
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
track_index = track.trackIndex()
|
||||
_sub_track_items = flatten(track.subTrackItems())
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(_sub_track_items) < 1:
|
||||
continue
|
||||
|
||||
enabled_sti = list()
|
||||
# loop all found subtrack items and check if they are enabled
|
||||
for _sti in _sub_track_items:
|
||||
# checking if not enabled
|
||||
if not _sti.isEnabled():
|
||||
continue
|
||||
if isinstance(_sti, hiero.core.Annotation):
|
||||
continue
|
||||
# collect the subtrack item
|
||||
enabled_sti.append(_sti)
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(enabled_sti) < 1:
|
||||
continue
|
||||
|
||||
# add collection of subtrackitems to dict
|
||||
sub_track_items[track_index] = enabled_sti
|
||||
|
||||
return sub_track_items
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from avalon import api as avalon
|
||||
|
||||
|
||||
class PreCollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
label = "Pre-collect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.51
|
||||
|
||||
def process(self, context):
|
||||
asset = avalon.Session["AVALON_ASSET"]
|
||||
subset = "workfile"
|
||||
|
||||
project = phiero.get_current_project()
|
||||
active_sequence = phiero.get_current_sequence()
|
||||
video_tracks = active_sequence.videoTracks()
|
||||
audio_tracks = active_sequence.audioTracks()
|
||||
current_file = project.path()
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
base_name = os.path.basename(current_file)
|
||||
|
||||
# get workfile's colorspace properties
|
||||
_clrs = {}
|
||||
_clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa
|
||||
_clrs["lutSetting16Bit"] = project.lutSetting16Bit()
|
||||
_clrs["lutSetting8Bit"] = project.lutSetting8Bit()
|
||||
_clrs["lutSettingFloat"] = project.lutSettingFloat()
|
||||
_clrs["lutSettingLog"] = project.lutSettingLog()
|
||||
_clrs["lutSettingViewer"] = project.lutSettingViewer()
|
||||
_clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
|
||||
_clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
|
||||
_clrs["ocioConfigName"] = project.ocioConfigName()
|
||||
_clrs["ocioConfigPath"] = project.ocioConfigPath()
|
||||
|
||||
# set main project attributes to context
|
||||
context.data["activeProject"] = project
|
||||
context.data["activeSequence"] = active_sequence
|
||||
context.data["videoTracks"] = video_tracks
|
||||
context.data["audioTracks"] = audio_tracks
|
||||
context.data["currentFile"] = current_file
|
||||
context.data["colorspace"] = _clrs
|
||||
|
||||
self.log.info("currentFile: {}".format(current_file))
|
||||
|
||||
# creating workfile representation
|
||||
representation = {
|
||||
'name': 'hrox',
|
||||
'ext': 'hrox',
|
||||
'files': base_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance_data = {
|
||||
"name": "{}_{}".format(asset, subset),
|
||||
"asset": asset,
|
||||
"subset": "{}{}".format(asset, subset.capitalize()),
|
||||
"item": project,
|
||||
"family": "workfile",
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _clrs
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"sourcePath": current_file,
|
||||
"representations": [representation]
|
||||
}
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
|
|
@ -4,8 +4,8 @@ from contextlib import contextmanager
|
|||
|
||||
import six
|
||||
|
||||
from avalon import api, io
|
||||
from openpype.api import get_asset
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
import hou
|
||||
|
|
@ -75,9 +75,13 @@ def generate_ids(nodes, asset_id=None):
|
|||
|
||||
if asset_id is None:
|
||||
# Get the asset ID from the database for the asset of current context
|
||||
asset_data = io.find_one({"type": "asset",
|
||||
"name": api.Session["AVALON_ASSET"]},
|
||||
projection={"_id": True})
|
||||
asset_data = legacy_io.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"name": legacy_io.Session["AVALON_ASSET"]
|
||||
},
|
||||
projection={"_id": True}
|
||||
)
|
||||
assert asset_data, "No current asset found in Session"
|
||||
asset_id = asset_data['_id']
|
||||
|
||||
|
|
@ -424,8 +428,8 @@ def maintained_selection():
|
|||
def reset_framerange():
|
||||
"""Set frame range to current asset"""
|
||||
|
||||
asset_name = api.Session["AVALON_ASSET"]
|
||||
asset = io.find_one({"name": asset_name, "type": "asset"})
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = legacy_io.find_one({"name": asset_name, "type": "asset"})
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
"""Houdini-specific USD Library functions."""
|
||||
|
||||
import contextlib
|
||||
|
||||
import logging
|
||||
|
||||
from Qt import QtWidgets, QtCore, QtGui
|
||||
from avalon import io
|
||||
|
||||
from openpype import style
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget
|
||||
|
||||
from pxr import Sdf
|
||||
|
|
@ -20,11 +21,12 @@ class SelectAssetDialog(QtWidgets.QWidget):
|
|||
Args:
|
||||
parm: Parameter where selected asset name is set.
|
||||
"""
|
||||
|
||||
def __init__(self, parm):
|
||||
self.setWindowTitle("Pick Asset")
|
||||
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup)
|
||||
|
||||
assets_widget = SingleSelectAssetsWidget(io, parent=self)
|
||||
assets_widget = SingleSelectAssetsWidget(legacy_io, parent=self)
|
||||
|
||||
layout = QtWidgets.QHBoxLayout(self)
|
||||
layout.addWidget(assets_widget)
|
||||
|
|
@ -44,7 +46,7 @@ class SelectAssetDialog(QtWidgets.QWidget):
|
|||
select_id = None
|
||||
name = self._parm.eval()
|
||||
if name:
|
||||
db_asset = io.find_one(
|
||||
db_asset = legacy_io.find_one(
|
||||
{"name": name, "type": "asset"},
|
||||
{"_id": True}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import hou
|
||||
from avalon import io
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.houdini.api import lib
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
|
||||
|
|
@ -22,13 +23,16 @@ class CreateHDA(plugin.Creator):
|
|||
# type: (str) -> bool
|
||||
"""Check if existing subset name versions already exists."""
|
||||
# Get all subsets of the current asset
|
||||
asset_id = io.find_one({"name": self.data["asset"], "type": "asset"},
|
||||
projection={"_id": True})['_id']
|
||||
subset_docs = io.find(
|
||||
asset_id = legacy_io.find_one(
|
||||
{"name": self.data["asset"], "type": "asset"},
|
||||
projection={"_id": True}
|
||||
)['_id']
|
||||
subset_docs = legacy_io.find(
|
||||
{
|
||||
"type": "subset",
|
||||
"parent": asset_id
|
||||
}, {"name": 1}
|
||||
},
|
||||
{"name": 1}
|
||||
)
|
||||
existing_subset_names = set(subset_docs.distinct("name"))
|
||||
existing_subset_names_low = {
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from openpype.pipeline import load
|
|||
|
||||
|
||||
class SetFrameRangeLoader(load.LoaderPlugin):
|
||||
"""Set Houdini frame range"""
|
||||
"""Set frame range excluding pre- and post-handles"""
|
||||
|
||||
families = [
|
||||
"animation",
|
||||
|
|
@ -44,7 +44,7 @@ class SetFrameRangeLoader(load.LoaderPlugin):
|
|||
|
||||
|
||||
class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
|
||||
"""Set Maya frame range including pre- and post-handles"""
|
||||
"""Set frame range including pre- and post-handles"""
|
||||
|
||||
families = [
|
||||
"animation",
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from openpype.hosts.houdini.api import pipeline
|
|||
|
||||
|
||||
class AbcLoader(load.LoaderPlugin):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Load Alembic"""
|
||||
|
||||
families = ["model", "animation", "pointcache", "gpuCache"]
|
||||
label = "Load Alembic"
|
||||
|
|
|
|||
75
openpype/hosts/houdini/plugins/load/load_alembic_archive.py
Normal file
75
openpype/hosts/houdini/plugins/load/load_alembic_archive.py
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_path,
|
||||
)
|
||||
from openpype.hosts.houdini.api import pipeline
|
||||
|
||||
|
||||
class AbcArchiveLoader(load.LoaderPlugin):
|
||||
"""Load Alembic as full geometry network hierarchy """
|
||||
|
||||
families = ["model", "animation", "pointcache", "gpuCache"]
|
||||
label = "Load Alembic as Archive"
|
||||
representations = ["abc"]
|
||||
order = -5
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
import hou
|
||||
|
||||
# Format file name, Houdini only wants forward slashes
|
||||
file_path = os.path.normpath(self.fname)
|
||||
file_path = file_path.replace("\\", "/")
|
||||
|
||||
# Get the root node
|
||||
obj = hou.node("/obj")
|
||||
|
||||
# Define node name
|
||||
namespace = namespace if namespace else context["asset"]["name"]
|
||||
node_name = "{}_{}".format(namespace, name) if namespace else name
|
||||
|
||||
# Create an Alembic archive node
|
||||
node = obj.createNode("alembicarchive", node_name=node_name)
|
||||
node.moveToGoodPosition()
|
||||
|
||||
# TODO: add FPS of project / asset
|
||||
node.setParms({"fileName": file_path,
|
||||
"channelRef": True})
|
||||
|
||||
# Apply some magic
|
||||
node.parm("buildHierarchy").pressButton()
|
||||
node.moveToGoodPosition()
|
||||
|
||||
nodes = [node]
|
||||
|
||||
self[:] = nodes
|
||||
|
||||
return pipeline.containerise(node_name,
|
||||
namespace,
|
||||
nodes,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
suffix="")
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
node = container["node"]
|
||||
|
||||
# Update the file path
|
||||
file_path = get_representation_path(representation)
|
||||
file_path = file_path.replace("\\", "/")
|
||||
|
||||
# Update attributes
|
||||
node.setParms({"fileName": file_path,
|
||||
"representation": str(representation["_id"])})
|
||||
|
||||
# Rebuild
|
||||
node.parm("buildHierarchy").pressButton()
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
node = container["node"]
|
||||
node.destroy()
|
||||
107
openpype/hosts/houdini/plugins/load/load_bgeo.py
Normal file
107
openpype/hosts/houdini/plugins/load/load_bgeo.py
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import re
|
||||
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_path,
|
||||
)
|
||||
from openpype.hosts.houdini.api import pipeline
|
||||
|
||||
|
||||
class BgeoLoader(load.LoaderPlugin):
|
||||
"""Load bgeo files to Houdini."""
|
||||
|
||||
label = "Load bgeo"
|
||||
families = ["model", "pointcache", "bgeo"]
|
||||
representations = [
|
||||
"bgeo", "bgeosc", "bgeogz",
|
||||
"bgeo.sc", "bgeo.gz", "bgeo.lzma", "bgeo.bz2"]
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
import hou
|
||||
|
||||
# Get the root node
|
||||
obj = hou.node("/obj")
|
||||
|
||||
# Define node name
|
||||
namespace = namespace if namespace else context["asset"]["name"]
|
||||
node_name = "{}_{}".format(namespace, name) if namespace else name
|
||||
|
||||
# Create a new geo node
|
||||
container = obj.createNode("geo", node_name=node_name)
|
||||
is_sequence = bool(context["representation"]["context"].get("frame"))
|
||||
|
||||
# Remove the file node, it only loads static meshes
|
||||
# Houdini 17 has removed the file node from the geo node
|
||||
file_node = container.node("file1")
|
||||
if file_node:
|
||||
file_node.destroy()
|
||||
|
||||
# Explicitly create a file node
|
||||
file_node = container.createNode("file", node_name=node_name)
|
||||
file_node.setParms({"file": self.format_path(self.fname, is_sequence)})
|
||||
|
||||
# Set display on last node
|
||||
file_node.setDisplayFlag(True)
|
||||
|
||||
nodes = [container, file_node]
|
||||
self[:] = nodes
|
||||
|
||||
return pipeline.containerise(
|
||||
node_name,
|
||||
namespace,
|
||||
nodes,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
suffix="",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def format_path(path, is_sequence):
|
||||
"""Format file path correctly for single bgeo or bgeo sequence."""
|
||||
if not os.path.exists(path):
|
||||
raise RuntimeError("Path does not exist: %s" % path)
|
||||
|
||||
# The path is either a single file or sequence in a folder.
|
||||
if not is_sequence:
|
||||
filename = path
|
||||
print("single")
|
||||
else:
|
||||
filename = re.sub(r"(.*)\.(\d+)\.(bgeo.*)", "\\1.$F4.\\3", path)
|
||||
|
||||
filename = os.path.join(path, filename)
|
||||
|
||||
filename = os.path.normpath(filename)
|
||||
filename = filename.replace("\\", "/")
|
||||
|
||||
return filename
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
node = container["node"]
|
||||
try:
|
||||
file_node = next(
|
||||
n for n in node.children() if n.type().name() == "file"
|
||||
)
|
||||
except StopIteration:
|
||||
self.log.error("Could not find node of type `alembic`")
|
||||
return
|
||||
|
||||
# Update the file path
|
||||
file_path = get_representation_path(representation)
|
||||
file_path = self.format_path(file_path)
|
||||
|
||||
file_node.setParms({"fileName": file_path})
|
||||
|
||||
# Update attribute
|
||||
node.setParms({"representation": str(representation["_id"])})
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
node = container["node"]
|
||||
node.destroy()
|
||||
|
|
@ -78,7 +78,7 @@ def transfer_non_default_values(src, dest, ignore=None):
|
|||
|
||||
|
||||
class CameraLoader(load.LoaderPlugin):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Load camera from an Alembic file"""
|
||||
|
||||
families = ["camera"]
|
||||
label = "Load Camera (abc)"
|
||||
|
|
|
|||
|
|
@ -42,9 +42,9 @@ def get_image_avalon_container():
|
|||
|
||||
|
||||
class ImageLoader(load.LoaderPlugin):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Load images into COP2"""
|
||||
|
||||
families = ["colorbleed.imagesequence"]
|
||||
families = ["imagesequence"]
|
||||
label = "Load Image (COP2)"
|
||||
representations = ["*"]
|
||||
order = -10
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from openpype.hosts.houdini.api import pipeline
|
|||
|
||||
|
||||
class VdbLoader(load.LoaderPlugin):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Load VDB"""
|
||||
|
||||
families = ["vdbcache"]
|
||||
label = "Load VDB"
|
||||
|
|
|
|||
|
|
@ -1,3 +1,7 @@
|
|||
import os
|
||||
import subprocess
|
||||
|
||||
from openpype.lib.vendor_bin_utils import find_executable
|
||||
from openpype.pipeline import load
|
||||
|
||||
|
||||
|
|
@ -14,12 +18,7 @@ class ShowInUsdview(load.LoaderPlugin):
|
|||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import avalon.lib as lib
|
||||
|
||||
usdview = lib.which("usdview")
|
||||
usdview = find_executable("usdview")
|
||||
|
||||
filepath = os.path.normpath(self.fname)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import pyblish.api
|
||||
|
||||
from avalon import io
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.lib.usdlib as usdlib
|
||||
|
||||
|
||||
|
|
@ -50,7 +50,10 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.debug("Add bootstrap for: %s" % bootstrap)
|
||||
|
||||
asset = io.find_one({"name": instance.data["asset"], "type": "asset"})
|
||||
asset = legacy_io.find_one({
|
||||
"name": instance.data["asset"],
|
||||
"type": "asset"
|
||||
})
|
||||
assert asset, "Asset must exist: %s" % asset
|
||||
|
||||
# Check which are not about to be created and don't exist yet
|
||||
|
|
@ -104,7 +107,8 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
|
|||
# Or, if they already exist in the database we can
|
||||
# skip them too.
|
||||
return bool(
|
||||
io.find_one(
|
||||
{"name": subset, "type": "subset", "parent": asset["_id"]}
|
||||
legacy_io.find_one(
|
||||
{"name": subset, "type": "subset", "parent": asset["_id"]},
|
||||
{"_id": True}
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,10 @@ from collections import deque
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
from openpype.pipeline import get_representation_path
|
||||
from openpype.pipeline import (
|
||||
get_representation_path,
|
||||
legacy_io,
|
||||
)
|
||||
import openpype.hosts.houdini.api.usd as hou_usdlib
|
||||
from openpype.hosts.houdini.api.lib import render_rop
|
||||
|
||||
|
|
@ -266,8 +269,6 @@ class ExtractUSDLayered(openpype.api.Extractor):
|
|||
instance.data["files"].append(fname)
|
||||
|
||||
def _compare_with_latest_publish(self, dependency, new_file):
|
||||
|
||||
from avalon import api, io
|
||||
import filecmp
|
||||
|
||||
_, ext = os.path.splitext(new_file)
|
||||
|
|
@ -275,10 +276,10 @@ class ExtractUSDLayered(openpype.api.Extractor):
|
|||
# Compare this dependency with the latest published version
|
||||
# to detect whether we should make this into a new publish
|
||||
# version. If not, skip it.
|
||||
asset = io.find_one(
|
||||
asset = legacy_io.find_one(
|
||||
{"name": dependency.data["asset"], "type": "asset"}
|
||||
)
|
||||
subset = io.find_one(
|
||||
subset = legacy_io.find_one(
|
||||
{
|
||||
"name": dependency.data["subset"],
|
||||
"type": "subset",
|
||||
|
|
@ -290,7 +291,7 @@ class ExtractUSDLayered(openpype.api.Extractor):
|
|||
self.log.debug("No existing subset..")
|
||||
return False
|
||||
|
||||
version = io.find_one(
|
||||
version = legacy_io.find_one(
|
||||
{"type": "version", "parent": subset["_id"], },
|
||||
sort=[("name", -1)]
|
||||
)
|
||||
|
|
@ -298,7 +299,7 @@ class ExtractUSDLayered(openpype.api.Extractor):
|
|||
self.log.debug("No existing version..")
|
||||
return False
|
||||
|
||||
representation = io.find_one(
|
||||
representation = legacy_io.find_one(
|
||||
{
|
||||
"name": ext.lstrip("."),
|
||||
"type": "representation",
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
import re
|
||||
|
||||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
from avalon import io
|
||||
import openpype.api
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin):
|
||||
|
|
@ -23,16 +23,20 @@ class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin):
|
|||
shade_subset = subset.split(".", 1)[0]
|
||||
model_subset = re.sub("^usdShade", "usdModel", shade_subset)
|
||||
|
||||
asset_doc = io.find_one({"name": asset, "type": "asset"})
|
||||
asset_doc = legacy_io.find_one(
|
||||
{"name": asset, "type": "asset"},
|
||||
{"_id": True}
|
||||
)
|
||||
if not asset_doc:
|
||||
raise RuntimeError("Asset does not exist: %s" % asset)
|
||||
|
||||
subset_doc = io.find_one(
|
||||
subset_doc = legacy_io.find_one(
|
||||
{
|
||||
"name": model_subset,
|
||||
"type": "subset",
|
||||
"parent": asset_doc["_id"],
|
||||
}
|
||||
},
|
||||
{"_id": True}
|
||||
)
|
||||
if not subset_doc:
|
||||
raise RuntimeError(
|
||||
|
|
|
|||
|
|
@ -1,17 +1,21 @@
|
|||
import os
|
||||
import hou
|
||||
import husdoutputprocessors.base as base
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
|
||||
import colorbleed.usdlib as usdlib
|
||||
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
registered_root,
|
||||
)
|
||||
|
||||
|
||||
def _get_project_publish_template():
|
||||
"""Return publish template from database for current project"""
|
||||
from avalon import io
|
||||
project = io.find_one({"type": "project"},
|
||||
projection={"config.template.publish": True})
|
||||
project = legacy_io.find_one(
|
||||
{"type": "project"},
|
||||
projection={"config.template.publish": True}
|
||||
)
|
||||
return project["config"]["template"]["publish"]
|
||||
|
||||
|
||||
|
|
@ -133,12 +137,11 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase):
|
|||
|
||||
"""
|
||||
|
||||
from avalon import api, io
|
||||
from openpype.pipeline import registered_root
|
||||
|
||||
PROJECT = api.Session["AVALON_PROJECT"]
|
||||
asset_doc = io.find_one({"name": asset,
|
||||
"type": "asset"})
|
||||
PROJECT = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset_doc = legacy_io.find_one({
|
||||
"name": asset,
|
||||
"type": "asset"
|
||||
})
|
||||
if not asset_doc:
|
||||
raise RuntimeError("Invalid asset name: '%s'" % asset)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.api import get_errored_instances_from_context
|
||||
|
||||
|
||||
|
|
@ -75,8 +75,10 @@ class GenerateUUIDsOnInvalidAction(pyblish.api.Action):
|
|||
from . import lib
|
||||
|
||||
asset = instance.data['asset']
|
||||
asset_id = io.find_one({"name": asset, "type": "asset"},
|
||||
projection={"_id": True})['_id']
|
||||
asset_id = legacy_io.find_one(
|
||||
{"name": asset, "type": "asset"},
|
||||
projection={"_id": True}
|
||||
)['_id']
|
||||
for node, _id in lib.generate_ids(nodes, asset_id=asset_id):
|
||||
lib.set_id(node, _id, overwrite=True)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""OpenPype script commands to be used directly in Maya."""
|
||||
from maya import cmds
|
||||
from avalon import api, io
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class ToolWindows:
|
||||
|
|
@ -73,13 +74,13 @@ def reset_frame_range():
|
|||
59.94: '59.94fps',
|
||||
44100: '44100fps',
|
||||
48000: '48000fps'
|
||||
}.get(float(api.Session.get("AVALON_FPS", 25)), "pal")
|
||||
}.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal")
|
||||
|
||||
cmds.currentUnit(time=fps)
|
||||
|
||||
# Set frame start/end
|
||||
asset_name = api.Session["AVALON_ASSET"]
|
||||
asset = io.find_one({"name": asset_name, "type": "asset"})
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = legacy_io.find_one({"name": asset_name, "type": "asset"})
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
|
|
@ -144,8 +145,8 @@ def reset_resolution():
|
|||
resolution_height = 1080
|
||||
|
||||
# Get resolution from asset
|
||||
asset_name = api.Session["AVALON_ASSET"]
|
||||
asset_doc = io.find_one({"name": asset_name, "type": "asset"})
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset_doc = legacy_io.find_one({"name": asset_name, "type": "asset"})
|
||||
resolution = _resolution_from_document(asset_doc)
|
||||
# Try get resolution from project
|
||||
if resolution is None:
|
||||
|
|
@ -154,7 +155,7 @@ def reset_resolution():
|
|||
"Asset \"{}\" does not have set resolution."
|
||||
" Trying to get resolution from project"
|
||||
).format(asset_name))
|
||||
project_doc = io.find_one({"type": "project"})
|
||||
project_doc = legacy_io.find_one({"type": "project"})
|
||||
resolution = _resolution_from_document(project_doc)
|
||||
|
||||
if resolution is None:
|
||||
|
|
|
|||
|
|
@ -17,11 +17,10 @@ import bson
|
|||
from maya import cmds, mel
|
||||
import maya.api.OpenMaya as om
|
||||
|
||||
from avalon import api, io
|
||||
|
||||
from openpype import lib
|
||||
from openpype.api import get_anatomy_settings
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
discover_loader_plugins,
|
||||
loaders_from_representation,
|
||||
get_representation_path,
|
||||
|
|
@ -1388,9 +1387,13 @@ def generate_ids(nodes, asset_id=None):
|
|||
|
||||
if asset_id is None:
|
||||
# Get the asset ID from the database for the asset of current context
|
||||
asset_data = io.find_one({"type": "asset",
|
||||
"name": api.Session["AVALON_ASSET"]},
|
||||
projection={"_id": True})
|
||||
asset_data = legacy_io.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"name": legacy_io.Session["AVALON_ASSET"]
|
||||
},
|
||||
projection={"_id": True}
|
||||
)
|
||||
assert asset_data, "No current asset found in Session"
|
||||
asset_id = asset_data['_id']
|
||||
|
||||
|
|
@ -1545,9 +1548,11 @@ def list_looks(asset_id):
|
|||
|
||||
# # get all subsets with look leading in
|
||||
# the name associated with the asset
|
||||
subset = io.find({"parent": bson.ObjectId(asset_id),
|
||||
"type": "subset",
|
||||
"name": {"$regex": "look*"}})
|
||||
subset = legacy_io.find({
|
||||
"parent": bson.ObjectId(asset_id),
|
||||
"type": "subset",
|
||||
"name": {"$regex": "look*"}
|
||||
})
|
||||
|
||||
return list(subset)
|
||||
|
||||
|
|
@ -1566,13 +1571,17 @@ def assign_look_by_version(nodes, version_id):
|
|||
"""
|
||||
|
||||
# Get representations of shader file and relationships
|
||||
look_representation = io.find_one({"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": "ma"})
|
||||
look_representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": "ma"
|
||||
})
|
||||
|
||||
json_representation = io.find_one({"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": "json"})
|
||||
json_representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": "json"
|
||||
})
|
||||
|
||||
# See if representation is already loaded, if so reuse it.
|
||||
host = registered_host()
|
||||
|
|
@ -1637,9 +1646,11 @@ def assign_look(nodes, subset="lookDefault"):
|
|||
except bson.errors.InvalidId:
|
||||
log.warning("Asset ID is not compatible with bson")
|
||||
continue
|
||||
subset_data = io.find_one({"type": "subset",
|
||||
"name": subset,
|
||||
"parent": asset_id})
|
||||
subset_data = legacy_io.find_one({
|
||||
"type": "subset",
|
||||
"name": subset,
|
||||
"parent": asset_id
|
||||
})
|
||||
|
||||
if not subset_data:
|
||||
log.warning("No subset '{}' found for {}".format(subset, asset_id))
|
||||
|
|
@ -1647,13 +1658,18 @@ def assign_look(nodes, subset="lookDefault"):
|
|||
|
||||
# get last version
|
||||
# with backwards compatibility
|
||||
version = io.find_one({"parent": subset_data['_id'],
|
||||
"type": "version",
|
||||
"data.families":
|
||||
{"$in": ["look"]}
|
||||
},
|
||||
sort=[("name", -1)],
|
||||
projection={"_id": True, "name": True})
|
||||
version = legacy_io.find_one(
|
||||
{
|
||||
"parent": subset_data['_id'],
|
||||
"type": "version",
|
||||
"data.families": {"$in": ["look"]}
|
||||
},
|
||||
sort=[("name", -1)],
|
||||
projection={
|
||||
"_id": True,
|
||||
"name": True
|
||||
}
|
||||
)
|
||||
|
||||
log.debug("Assigning look '{}' <v{:03d}>".format(subset,
|
||||
version["name"]))
|
||||
|
|
@ -2136,7 +2152,7 @@ def reset_scene_resolution():
|
|||
None
|
||||
"""
|
||||
|
||||
project_doc = io.find_one({"type": "project"})
|
||||
project_doc = legacy_io.find_one({"type": "project"})
|
||||
project_data = project_doc["data"]
|
||||
asset_data = lib.get_asset()["data"]
|
||||
|
||||
|
|
@ -2169,13 +2185,13 @@ def set_context_settings():
|
|||
"""
|
||||
|
||||
# Todo (Wijnand): apply renderer and resolution of project
|
||||
project_doc = io.find_one({"type": "project"})
|
||||
project_doc = legacy_io.find_one({"type": "project"})
|
||||
project_data = project_doc["data"]
|
||||
asset_data = lib.get_asset()["data"]
|
||||
|
||||
# Set project fps
|
||||
fps = asset_data.get("fps", project_data.get("fps", 25))
|
||||
api.Session["AVALON_FPS"] = str(fps)
|
||||
legacy_io.Session["AVALON_FPS"] = str(fps)
|
||||
set_scene_fps(fps)
|
||||
|
||||
reset_scene_resolution()
|
||||
|
|
@ -2937,7 +2953,7 @@ def update_content_on_context_change():
|
|||
This will update scene content to match new asset on context change
|
||||
"""
|
||||
scene_sets = cmds.listSets(allSets=True)
|
||||
new_asset = api.Session["AVALON_ASSET"]
|
||||
new_asset = legacy_io.Session["AVALON_ASSET"]
|
||||
new_data = lib.get_asset()["data"]
|
||||
for s in scene_sets:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -77,8 +77,10 @@ IMAGE_PREFIXES = {
|
|||
"arnold": "defaultRenderGlobals.imageFilePrefix",
|
||||
"renderman": "rmanGlobals.imageFileFormat",
|
||||
"redshift": "defaultRenderGlobals.imageFilePrefix",
|
||||
"mayahardware2": "defaultRenderGlobals.imageFilePrefix"
|
||||
}
|
||||
|
||||
RENDERMAN_IMAGE_DIR = "maya/<scene>/<layer>"
|
||||
|
||||
@attr.s
|
||||
class LayerMetadata(object):
|
||||
|
|
@ -154,7 +156,8 @@ def get(layer, render_instance=None):
|
|||
"arnold": RenderProductsArnold,
|
||||
"vray": RenderProductsVray,
|
||||
"redshift": RenderProductsRedshift,
|
||||
"renderman": RenderProductsRenderman
|
||||
"renderman": RenderProductsRenderman,
|
||||
"mayahardware2": RenderProductsMayaHardware
|
||||
}.get(renderer_name.lower(), None)
|
||||
if renderer is None:
|
||||
raise UnsupportedRendererException(
|
||||
|
|
@ -1054,6 +1057,8 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
:func:`ARenderProducts.get_render_products()`
|
||||
|
||||
"""
|
||||
from rfm2.api.displays import get_displays # noqa
|
||||
|
||||
cameras = [
|
||||
self.sanitize_camera_name(c)
|
||||
for c in self.get_renderable_cameras()
|
||||
|
|
@ -1066,47 +1071,122 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
]
|
||||
products = []
|
||||
|
||||
default_ext = "exr"
|
||||
displays = cmds.listConnections("rmanGlobals.displays")
|
||||
for aov in displays:
|
||||
enabled = self._get_attr(aov, "enabled")
|
||||
# NOTE: This is guessing extensions from renderman display types.
|
||||
# Some of them are just framebuffers, d_texture format can be
|
||||
# set in display setting. We set those now to None, but it
|
||||
# should be handled more gracefully.
|
||||
display_types = {
|
||||
"d_deepexr": "exr",
|
||||
"d_it": None,
|
||||
"d_null": None,
|
||||
"d_openexr": "exr",
|
||||
"d_png": "png",
|
||||
"d_pointcloud": "ptc",
|
||||
"d_targa": "tga",
|
||||
"d_texture": None,
|
||||
"d_tiff": "tif"
|
||||
}
|
||||
|
||||
displays = get_displays()["displays"]
|
||||
for name, display in displays.items():
|
||||
enabled = display["params"]["enable"]["value"]
|
||||
if not enabled:
|
||||
continue
|
||||
|
||||
aov_name = str(aov)
|
||||
aov_name = name
|
||||
if aov_name == "rmanDefaultDisplay":
|
||||
aov_name = "beauty"
|
||||
|
||||
extensions = display_types.get(
|
||||
display["driverNode"]["type"], "exr")
|
||||
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext,
|
||||
ext=extensions,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
def get_files(self, product, camera):
|
||||
def get_files(self, product):
|
||||
"""Get expected files.
|
||||
|
||||
In renderman we hack it with prepending path. This path would
|
||||
normally be translated from `rmanGlobals.imageOutputDir`. We skip
|
||||
this and hardcode prepend path we expect. There is no place for user
|
||||
to mess around with this settings anyway and it is enforced in
|
||||
render settings validator.
|
||||
"""
|
||||
files = super(RenderProductsRenderman, self).get_files(product, camera)
|
||||
files = super(RenderProductsRenderman, self).get_files(product)
|
||||
|
||||
layer_data = self.layer_data
|
||||
new_files = []
|
||||
|
||||
resolved_image_dir = re.sub("<scene>", layer_data.sceneName, RENDERMAN_IMAGE_DIR, flags=re.IGNORECASE) # noqa: E501
|
||||
resolved_image_dir = re.sub("<layer>", layer_data.layerName, resolved_image_dir, flags=re.IGNORECASE) # noqa: E501
|
||||
for file in files:
|
||||
new_file = "{}/{}/{}".format(
|
||||
layer_data["sceneName"], layer_data["layerName"], file
|
||||
)
|
||||
new_file = "{}/{}".format(resolved_image_dir, file)
|
||||
new_files.append(new_file)
|
||||
|
||||
return new_files
|
||||
|
||||
|
||||
class RenderProductsMayaHardware(ARenderProducts):
|
||||
"""Expected files for MayaHardware renderer."""
|
||||
|
||||
renderer = "mayahardware2"
|
||||
|
||||
extensions = [
|
||||
{"label": "JPEG", "index": 8, "extension": "jpg"},
|
||||
{"label": "PNG", "index": 32, "extension": "png"},
|
||||
{"label": "EXR(exr)", "index": 40, "extension": "exr"}
|
||||
]
|
||||
|
||||
def _get_extension(self, value):
|
||||
result = None
|
||||
if isinstance(value, int):
|
||||
extensions = {
|
||||
extension["index"]: extension["extension"]
|
||||
for extension in self.extensions
|
||||
}
|
||||
try:
|
||||
result = extensions[value]
|
||||
except KeyError:
|
||||
raise NotImplementedError(
|
||||
"Could not find extension for {}".format(value)
|
||||
)
|
||||
|
||||
if isinstance(value, six.string_types):
|
||||
extensions = {
|
||||
extension["label"]: extension["extension"]
|
||||
for extension in self.extensions
|
||||
}
|
||||
try:
|
||||
result = extensions[value]
|
||||
except KeyError:
|
||||
raise NotImplementedError(
|
||||
"Could not find extension for {}".format(value)
|
||||
)
|
||||
|
||||
if not result:
|
||||
raise NotImplementedError(
|
||||
"Could not find extension for {}".format(value)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def get_render_products(self):
|
||||
"""Get all AOVs.
|
||||
See Also:
|
||||
:func:`ARenderProducts.get_render_products()`
|
||||
"""
|
||||
ext = self._get_extension(
|
||||
self._get_attr("defaultRenderGlobals.imageFormat")
|
||||
)
|
||||
|
||||
products = []
|
||||
for cam in self.get_renderable_cameras():
|
||||
product = RenderProduct(productName="beauty", ext=ext, camera=cam)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
|
||||
class AOVError(Exception):
|
||||
"""Custom exception for determining AOVs."""
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@ from Qt import QtWidgets, QtGui
|
|||
import maya.utils
|
||||
import maya.cmds as cmds
|
||||
|
||||
import avalon.api
|
||||
|
||||
from openpype.api import BuildWorkfile
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tools.utils import host_tools
|
||||
from openpype.hosts.maya.api import lib
|
||||
from .lib import get_main_window, IS_HEADLESS
|
||||
|
|
@ -40,15 +39,15 @@ def install():
|
|||
parent_widget = get_main_window()
|
||||
cmds.menu(
|
||||
MENU_NAME,
|
||||
label=avalon.api.Session["AVALON_LABEL"],
|
||||
label=legacy_io.Session["AVALON_LABEL"],
|
||||
tearOff=True,
|
||||
parent="MayaWindow"
|
||||
)
|
||||
|
||||
# Create context menu
|
||||
context_label = "{}, {}".format(
|
||||
avalon.api.Session["AVALON_ASSET"],
|
||||
avalon.api.Session["AVALON_TASK"]
|
||||
legacy_io.Session["AVALON_ASSET"],
|
||||
legacy_io.Session["AVALON_TASK"]
|
||||
)
|
||||
cmds.menuItem(
|
||||
"currentContext",
|
||||
|
|
@ -211,7 +210,7 @@ def update_menu_task_label():
|
|||
return
|
||||
|
||||
label = "{}, {}".format(
|
||||
avalon.api.Session["AVALON_ASSET"],
|
||||
avalon.api.Session["AVALON_TASK"]
|
||||
legacy_io.Session["AVALON_ASSET"],
|
||||
legacy_io.Session["AVALON_TASK"]
|
||||
)
|
||||
cmds.menuItem(object_name, edit=True, label=label)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ from maya import utils, cmds, OpenMaya
|
|||
import maya.api.OpenMaya as om
|
||||
|
||||
import pyblish.api
|
||||
import avalon.api
|
||||
|
||||
import openpype.hosts.maya
|
||||
from openpype.tools.utils import host_tools
|
||||
|
|
@ -18,6 +17,7 @@ from openpype.lib import (
|
|||
)
|
||||
from openpype.lib.path_tools import HostDirmap
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
register_loader_plugin_path,
|
||||
register_inventory_action_path,
|
||||
register_creator_plugin_path,
|
||||
|
|
@ -93,7 +93,7 @@ def _set_project():
|
|||
None
|
||||
|
||||
"""
|
||||
workdir = avalon.api.Session["AVALON_WORKDIR"]
|
||||
workdir = legacy_io.Session["AVALON_WORKDIR"]
|
||||
|
||||
try:
|
||||
os.makedirs(workdir)
|
||||
|
|
@ -473,7 +473,7 @@ def on_task_changed():
|
|||
# Run
|
||||
menu.update_menu_task_label()
|
||||
|
||||
workdir = avalon.api.Session["AVALON_WORKDIR"]
|
||||
workdir = legacy_io.Session["AVALON_WORKDIR"]
|
||||
if os.path.exists(workdir):
|
||||
log.info("Updating Maya workspace for task change to %s", workdir)
|
||||
|
||||
|
|
@ -494,9 +494,9 @@ def on_task_changed():
|
|||
lib.update_content_on_context_change()
|
||||
|
||||
msg = " project: {}\n asset: {}\n task:{}".format(
|
||||
avalon.api.Session["AVALON_PROJECT"],
|
||||
avalon.api.Session["AVALON_ASSET"],
|
||||
avalon.api.Session["AVALON_TASK"]
|
||||
legacy_io.Session["AVALON_PROJECT"],
|
||||
legacy_io.Session["AVALON_ASSET"],
|
||||
legacy_io.Session["AVALON_TASK"]
|
||||
)
|
||||
|
||||
lib.show_message(
|
||||
|
|
|
|||
|
|
@ -10,8 +10,9 @@ from bson.objectid import ObjectId
|
|||
|
||||
from maya import cmds
|
||||
|
||||
from avalon import io
|
||||
from openpype.pipeline import (
|
||||
schema,
|
||||
legacy_io,
|
||||
discover_loader_plugins,
|
||||
loaders_from_representation,
|
||||
load_container,
|
||||
|
|
@ -253,7 +254,6 @@ def get_contained_containers(container):
|
|||
|
||||
"""
|
||||
|
||||
import avalon.schema
|
||||
from .pipeline import parse_container
|
||||
|
||||
# Get avalon containers in this package setdress container
|
||||
|
|
@ -263,7 +263,7 @@ def get_contained_containers(container):
|
|||
try:
|
||||
member_container = parse_container(node)
|
||||
containers.append(member_container)
|
||||
except avalon.schema.ValidationError:
|
||||
except schema.ValidationError:
|
||||
pass
|
||||
|
||||
return containers
|
||||
|
|
@ -283,21 +283,23 @@ def update_package_version(container, version):
|
|||
"""
|
||||
|
||||
# Versioning (from `core.maya.pipeline`)
|
||||
current_representation = io.find_one({
|
||||
current_representation = legacy_io.find_one({
|
||||
"_id": ObjectId(container["representation"])
|
||||
})
|
||||
|
||||
assert current_representation is not None, "This is a bug"
|
||||
|
||||
version_, subset, asset, project = io.parenthood(current_representation)
|
||||
version_, subset, asset, project = legacy_io.parenthood(
|
||||
current_representation
|
||||
)
|
||||
|
||||
if version == -1:
|
||||
new_version = io.find_one({
|
||||
new_version = legacy_io.find_one({
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
}, sort=[("name", -1)])
|
||||
else:
|
||||
new_version = io.find_one({
|
||||
new_version = legacy_io.find_one({
|
||||
"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"name": version,
|
||||
|
|
@ -306,7 +308,7 @@ def update_package_version(container, version):
|
|||
assert new_version is not None, "This is a bug"
|
||||
|
||||
# Get the new representation (new file)
|
||||
new_representation = io.find_one({
|
||||
new_representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": new_version["_id"],
|
||||
"name": current_representation["name"]
|
||||
|
|
@ -328,7 +330,7 @@ def update_package(set_container, representation):
|
|||
"""
|
||||
|
||||
# Load the original package data
|
||||
current_representation = io.find_one({
|
||||
current_representation = legacy_io.find_one({
|
||||
"_id": ObjectId(set_container['representation']),
|
||||
"type": "representation"
|
||||
})
|
||||
|
|
@ -479,10 +481,10 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
|
|||
# Check whether the conversion can be done by the Loader.
|
||||
# They *must* use the same asset, subset and Loader for
|
||||
# `update_container` to make sense.
|
||||
old = io.find_one({
|
||||
old = legacy_io.find_one({
|
||||
"_id": ObjectId(representation_current)
|
||||
})
|
||||
new = io.find_one({
|
||||
new = legacy_io.find_one({
|
||||
"_id": ObjectId(representation_new)
|
||||
})
|
||||
is_valid = compare_representations(old=old, new=new)
|
||||
|
|
|
|||
|
|
@ -18,9 +18,10 @@ from openpype.api import (
|
|||
get_project_settings,
|
||||
get_asset)
|
||||
from openpype.modules import ModulesManager
|
||||
from openpype.pipeline import CreatorError
|
||||
|
||||
from avalon.api import Session
|
||||
from openpype.pipeline import (
|
||||
CreatorError,
|
||||
legacy_io,
|
||||
)
|
||||
|
||||
|
||||
class CreateRender(plugin.Creator):
|
||||
|
|
@ -75,16 +76,20 @@ class CreateRender(plugin.Creator):
|
|||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'mayahardware2': 'defaultRenderGlobals.imageFilePrefix',
|
||||
}
|
||||
|
||||
_image_prefixes = {
|
||||
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa
|
||||
'vray': 'maya/<scene>/<Layer>/<Layer>',
|
||||
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa
|
||||
'renderman': 'maya/<Scene>/<layer>/<layer>{aov_separator}<aov>',
|
||||
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>' # noqa
|
||||
# this needs `imageOutputDir`
|
||||
# (<ws>/renders/maya/<scene>) set separately
|
||||
'renderman': '<layer>_<aov>.<f4>.<ext>',
|
||||
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>', # noqa
|
||||
'mayahardware2': 'maya/<Scene>/<RenderLayer>/<RenderLayer>', # noqa
|
||||
}
|
||||
|
||||
_aov_chars = {
|
||||
|
|
@ -103,7 +108,7 @@ class CreateRender(plugin.Creator):
|
|||
self.deadline_servers = {}
|
||||
return
|
||||
self._project_settings = get_project_settings(
|
||||
Session["AVALON_PROJECT"])
|
||||
legacy_io.Session["AVALON_PROJECT"])
|
||||
|
||||
# project_settings/maya/create/CreateRender/aov_separator
|
||||
try:
|
||||
|
|
@ -439,6 +444,10 @@ class CreateRender(plugin.Creator):
|
|||
|
||||
self._set_global_output_settings()
|
||||
|
||||
if renderer == "renderman":
|
||||
cmds.setAttr("rmanGlobals.imageOutputDir",
|
||||
"maya/<scene>/<layer>", type="string")
|
||||
|
||||
def _set_vray_settings(self, asset):
|
||||
# type: (dict) -> None
|
||||
"""Sets important settings for Vray."""
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator for Unreal Skeletal Meshes."""
|
||||
from openpype.hosts.maya.api import plugin, lib
|
||||
from avalon.api import Session
|
||||
from openpype.pipeline import legacy_io
|
||||
from maya import cmds # noqa
|
||||
|
||||
|
||||
|
|
@ -26,7 +26,7 @@ class CreateUnrealSkeletalMesh(plugin.Creator):
|
|||
dynamic_data = super(CreateUnrealSkeletalMesh, cls).get_dynamic_data(
|
||||
variant, task_name, asset_id, project_name, host_name
|
||||
)
|
||||
dynamic_data["asset"] = Session.get("AVALON_ASSET")
|
||||
dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET")
|
||||
return dynamic_data
|
||||
|
||||
def process(self):
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator for Unreal Static Meshes."""
|
||||
from openpype.hosts.maya.api import plugin, lib
|
||||
from avalon.api import Session
|
||||
from openpype.api import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
from maya import cmds # noqa
|
||||
|
||||
|
||||
|
|
@ -18,7 +18,7 @@ class CreateUnrealStaticMesh(plugin.Creator):
|
|||
"""Constructor."""
|
||||
super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs)
|
||||
self._project_settings = get_project_settings(
|
||||
Session["AVALON_PROJECT"])
|
||||
legacy_io.Session["AVALON_PROJECT"])
|
||||
|
||||
@classmethod
|
||||
def get_dynamic_data(
|
||||
|
|
@ -27,7 +27,7 @@ class CreateUnrealStaticMesh(plugin.Creator):
|
|||
dynamic_data = super(CreateUnrealStaticMesh, cls).get_dynamic_data(
|
||||
variant, task_name, asset_id, project_name, host_name
|
||||
)
|
||||
dynamic_data["asset"] = Session.get("AVALON_ASSET")
|
||||
dynamic_data["asset"] = legacy_io.Session.get("AVALON_ASSET")
|
||||
return dynamic_data
|
||||
|
||||
def process(self):
|
||||
|
|
|
|||
|
|
@ -18,11 +18,12 @@ from openpype.api import (
|
|||
)
|
||||
|
||||
from openpype.lib import requests_get
|
||||
from openpype.pipeline import CreatorError
|
||||
from openpype.pipeline import (
|
||||
CreatorError,
|
||||
legacy_io,
|
||||
)
|
||||
from openpype.modules import ModulesManager
|
||||
|
||||
from avalon.api import Session
|
||||
|
||||
|
||||
class CreateVRayScene(plugin.Creator):
|
||||
"""Create Vray Scene."""
|
||||
|
|
@ -47,7 +48,7 @@ class CreateVRayScene(plugin.Creator):
|
|||
self.deadline_servers = {}
|
||||
return
|
||||
self._project_settings = get_project_settings(
|
||||
Session["AVALON_PROJECT"])
|
||||
legacy_io.Session["AVALON_PROJECT"])
|
||||
|
||||
try:
|
||||
default_servers = deadline_settings["deadline_urls"]
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
import json
|
||||
from avalon import io
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from openpype.pipeline import (
|
||||
InventoryAction,
|
||||
get_representation_context,
|
||||
legacy_io,
|
||||
)
|
||||
from openpype.hosts.maya.api.lib import (
|
||||
maintained_selection,
|
||||
|
|
@ -39,7 +40,7 @@ class ImportModelRender(InventoryAction):
|
|||
else:
|
||||
nodes.append(n)
|
||||
|
||||
repr_doc = io.find_one({
|
||||
repr_doc = legacy_io.find_one({
|
||||
"_id": ObjectId(container["representation"]),
|
||||
})
|
||||
version_id = repr_doc["parent"]
|
||||
|
|
@ -63,7 +64,7 @@ class ImportModelRender(InventoryAction):
|
|||
from maya import cmds
|
||||
|
||||
# Get representations of shader file and relationships
|
||||
look_repr = io.find_one({
|
||||
look_repr = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": {"$regex": self.scene_type_regex},
|
||||
|
|
@ -72,7 +73,7 @@ class ImportModelRender(InventoryAction):
|
|||
print("No model render sets for this model version..")
|
||||
return
|
||||
|
||||
json_repr = io.find_one({
|
||||
json_repr = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": self.look_data_type,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import openpype.hosts.maya.api.plugin
|
|||
|
||||
|
||||
class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Loader to reference an Alembic file"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from openpype.hosts.maya.api.lib import (
|
|||
|
||||
|
||||
class SetFrameRangeLoader(load.LoaderPlugin):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Set frame range excluding pre- and post-handles"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
|
|
@ -44,7 +44,7 @@ class SetFrameRangeLoader(load.LoaderPlugin):
|
|||
|
||||
|
||||
class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
|
||||
"""Specific loader of Alembic for the avalon.animation family"""
|
||||
"""Set frame range including pre- and post-handles"""
|
||||
|
||||
families = ["animation",
|
||||
"camera",
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ from openpype.hosts.maya.api.pipeline import containerise
|
|||
|
||||
|
||||
class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""Load the Proxy"""
|
||||
"""Load Arnold Proxy as reference"""
|
||||
|
||||
families = ["ass"]
|
||||
representations = ["ass"]
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
from maya import cmds, mel
|
||||
from avalon import io
|
||||
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
load,
|
||||
get_representation_path
|
||||
get_representation_path,
|
||||
)
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
from openpype.hosts.maya.api.lib import unique_namespace
|
||||
|
|
@ -64,9 +65,9 @@ class AudioLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
# Set frame range.
|
||||
version = io.find_one({"_id": representation["parent"]})
|
||||
subset = io.find_one({"_id": version["parent"]})
|
||||
asset = io.find_one({"_id": subset["parent"]})
|
||||
version = legacy_io.find_one({"_id": representation["parent"]})
|
||||
subset = legacy_io.find_one({"_id": version["parent"]})
|
||||
asset = legacy_io.find_one({"_id": subset["parent"]})
|
||||
audio_node.sourceStart.set(1 - asset["data"]["frameStart"])
|
||||
audio_node.sourceEnd.set(asset["data"]["frameEnd"])
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from openpype.api import get_project_settings
|
|||
|
||||
|
||||
class GpuCacheLoader(load.LoaderPlugin):
|
||||
"""Load model Alembic as gpuCache"""
|
||||
"""Load Alembic as gpuCache"""
|
||||
|
||||
families = ["model"]
|
||||
representations = ["abc"]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from Qt import QtWidgets, QtCore
|
||||
|
||||
from avalon import io
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
|
|
@ -216,9 +216,9 @@ class ImagePlaneLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
# Set frame range.
|
||||
version = io.find_one({"_id": representation["parent"]})
|
||||
subset = io.find_one({"_id": version["parent"]})
|
||||
asset = io.find_one({"_id": subset["parent"]})
|
||||
version = legacy_io.find_one({"_id": representation["parent"]})
|
||||
subset = legacy_io.find_one({"_id": version["parent"]})
|
||||
asset = legacy_io.find_one({"_id": subset["parent"]})
|
||||
start_frame = asset["data"]["frameStart"]
|
||||
end_frame = asset["data"]["frameEnd"]
|
||||
image_plane_shape.frameOffset.set(1 - start_frame)
|
||||
|
|
|
|||
|
|
@ -5,8 +5,10 @@ from collections import defaultdict
|
|||
|
||||
from Qt import QtWidgets
|
||||
|
||||
from avalon import io
|
||||
from openpype.pipeline import get_representation_path
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
get_representation_path,
|
||||
)
|
||||
import openpype.hosts.maya.api.plugin
|
||||
from openpype.hosts.maya.api import lib
|
||||
from openpype.widgets.message_window import ScrollMessageBox
|
||||
|
|
@ -71,7 +73,7 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
shader_nodes = cmds.ls(members, type='shadingEngine')
|
||||
nodes = set(self._get_nodes_with_shader(shader_nodes))
|
||||
|
||||
json_representation = io.find_one({
|
||||
json_representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": representation['parent'],
|
||||
"name": "json"
|
||||
|
|
|
|||
|
|
@ -1,16 +1,18 @@
|
|||
import os
|
||||
from maya import cmds
|
||||
from avalon import api
|
||||
|
||||
from openpype.api import get_project_settings
|
||||
from openpype.lib import get_creator_by_name
|
||||
from openpype.pipeline import legacy_create
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
legacy_create,
|
||||
)
|
||||
import openpype.hosts.maya.api.plugin
|
||||
from openpype.hosts.maya.api.lib import maintained_selection
|
||||
|
||||
|
||||
class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""Load the model"""
|
||||
"""Reference file"""
|
||||
|
||||
families = ["model",
|
||||
"pointcache",
|
||||
|
|
@ -143,7 +145,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
roots = cmds.ls(self[:], assemblies=True, long=True)
|
||||
assert roots, "No root nodes in rig, this is a bug."
|
||||
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
dependency = str(context["representation"]["_id"])
|
||||
|
||||
self.log.info("Creating subset: {}".format(namespace))
|
||||
|
|
|
|||
|
|
@ -74,6 +74,7 @@ def _fix_duplicate_vvg_callbacks():
|
|||
|
||||
|
||||
class LoadVDBtoVRay(load.LoaderPlugin):
|
||||
"""Load OpenVDB in a V-Ray Volume Grid"""
|
||||
|
||||
families = ["vdbcache"]
|
||||
representations = ["vdb"]
|
||||
|
|
|
|||
|
|
@ -11,9 +11,9 @@ from bson.objectid import ObjectId
|
|||
|
||||
import maya.cmds as cmds
|
||||
|
||||
from avalon import io
|
||||
from openpype.api import get_project_settings
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
|
|
@ -185,12 +185,11 @@ class VRayProxyLoader(load.LoaderPlugin):
|
|||
"""
|
||||
self.log.debug(
|
||||
"Looking for abc in published representations of this version.")
|
||||
abc_rep = io.find_one(
|
||||
{
|
||||
"type": "representation",
|
||||
"parent": ObjectId(version_id),
|
||||
"name": "abc"
|
||||
})
|
||||
abc_rep = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": ObjectId(version_id),
|
||||
"name": "abc"
|
||||
})
|
||||
|
||||
if abc_rep:
|
||||
self.log.debug("Found, we'll link alembic to vray proxy.")
|
||||
|
|
|
|||
|
|
@ -7,9 +7,9 @@ from pprint import pprint
|
|||
|
||||
from maya import cmds
|
||||
|
||||
from avalon import io
|
||||
from openpype.api import get_project_settings
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
|
|
@ -111,11 +111,11 @@ class YetiCacheLoader(load.LoaderPlugin):
|
|||
|
||||
def update(self, container, representation):
|
||||
|
||||
io.install()
|
||||
legacy_io.install()
|
||||
namespace = container["namespace"]
|
||||
container_node = container["objectName"]
|
||||
|
||||
fur_settings = io.find_one(
|
||||
fur_settings = legacy_io.find_one(
|
||||
{"parent": representation["parent"], "name": "fursettings"}
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,23 +1,16 @@
|
|||
from maya import cmds
|
||||
import pymel.core as pm
|
||||
|
||||
import pyblish.api
|
||||
import avalon.api
|
||||
|
||||
|
||||
class CollectAssData(pyblish.api.InstancePlugin):
|
||||
"""Collect Ass data
|
||||
|
||||
"""
|
||||
"""Collect Ass data."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
label = 'Collect Ass'
|
||||
families = ["ass"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
||||
context = instance.context
|
||||
|
||||
objsets = instance.data['setMembers']
|
||||
|
||||
for objset in objsets:
|
||||
|
|
|
|||
|
|
@ -49,8 +49,8 @@ import maya.app.renderSetup.model.renderSetup as renderSetup
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import api
|
||||
from openpype.lib import get_formatted_current_time
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.maya.api.lib_renderproducts import get as get_layer_render_products # noqa: E501
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
|
|
@ -93,7 +93,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
render_globals = render_instance
|
||||
collected_render_layers = render_instance.data["setMembers"]
|
||||
filepath = context.data["currentFile"].replace("\\", "/")
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
workspace = context.data["workspaceDir"]
|
||||
|
||||
deadline_settings = (
|
||||
|
|
@ -194,13 +194,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
assert render_products, "no render products generated"
|
||||
exp_files = []
|
||||
multipart = False
|
||||
render_cameras = []
|
||||
for product in render_products:
|
||||
if product.multipart:
|
||||
multipart = True
|
||||
product_name = product.productName
|
||||
if product.camera and layer_render_products.has_camera_token():
|
||||
render_cameras.append(product.camera)
|
||||
product_name = "{}{}".format(
|
||||
product.camera,
|
||||
"_" + product_name if product_name else "")
|
||||
|
|
@ -210,7 +208,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
product)
|
||||
})
|
||||
|
||||
assert render_cameras, "No render cameras found."
|
||||
has_cameras = any(product.camera for product in render_products)
|
||||
assert has_cameras, "No render cameras found."
|
||||
|
||||
self.log.info("multipart: {}".format(
|
||||
multipart))
|
||||
|
|
@ -327,8 +326,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"byFrameStep": int(
|
||||
self.get_render_attribute("byFrameStep",
|
||||
layer=layer_name)),
|
||||
"renderer": self.get_render_attribute("currentRenderer",
|
||||
layer=layer_name),
|
||||
"renderer": self.get_render_attribute(
|
||||
"currentRenderer", layer=layer_name).lower(),
|
||||
# instance subset
|
||||
"family": "renderlayer",
|
||||
"families": ["renderlayer"],
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@ from maya import cmds, mel
|
|||
import pymel.core as pm
|
||||
|
||||
import pyblish.api
|
||||
import avalon.api
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin):
|
||||
|
|
@ -19,7 +20,7 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.debug('instance: {}'.format(instance))
|
||||
|
||||
task = avalon.api.Session["AVALON_TASK"]
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
# get cameras
|
||||
members = instance.data['setMembers']
|
||||
|
|
|
|||
|
|
@ -6,7 +6,8 @@ import maya.app.renderSetup.model.renderSetup as renderSetup
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
from avalon import api
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.lib import get_formatted_current_time
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
|
|
@ -117,7 +118,7 @@ class CollectVrayScene(pyblish.api.InstancePlugin):
|
|||
# instance subset
|
||||
"family": "vrayscene_layer",
|
||||
"families": ["vrayscene_layer"],
|
||||
"asset": api.Session["AVALON_ASSET"],
|
||||
"asset": legacy_io.Session["AVALON_ASSET"],
|
||||
"time": get_formatted_current_time(),
|
||||
"author": context.data["user"],
|
||||
# Add source to allow tracing back to the scene from
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import pyblish.api
|
||||
import avalon.api
|
||||
import os
|
||||
import pyblish.api
|
||||
|
||||
from maya import cmds
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.ContextPlugin):
|
||||
|
|
@ -19,7 +20,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
|
|||
folder, file = os.path.split(current_file)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
||||
task = avalon.api.Session["AVALON_TASK"]
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
data = {}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,9 @@ from collections import OrderedDict
|
|||
from maya import cmds # noqa
|
||||
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
import openpype.api
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
# Modes for transfer
|
||||
|
|
@ -40,7 +40,7 @@ def find_paths_by_hash(texture_hash):
|
|||
|
||||
"""
|
||||
key = "data.sourceHashes.{0}".format(texture_hash)
|
||||
return io.distinct(key, {"type": "version"})
|
||||
return legacy_io.distinct(key, {"type": "version"})
|
||||
|
||||
|
||||
def maketx(source, destination, *args):
|
||||
|
|
|
|||
|
|
@ -7,11 +7,10 @@ import appdirs
|
|||
|
||||
from maya import cmds
|
||||
|
||||
from avalon import api
|
||||
|
||||
import pyblish.api
|
||||
from openpype.lib import requests_post
|
||||
from openpype.hosts.maya.api import lib
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.api import get_system_settings
|
||||
|
||||
|
||||
|
|
@ -489,7 +488,6 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
"MAYA_RENDER_DESC_PATH",
|
||||
"MAYA_MODULE_PATH",
|
||||
"ARNOLD_PLUGIN_PATH",
|
||||
"AVALON_SCHEMA",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_SERVER",
|
||||
|
|
@ -503,7 +501,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
"TOOL_ENV"
|
||||
]
|
||||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **api.Session)
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
# self.log.debug("enviro: {}".format(pprint(environment)))
|
||||
for path in os.environ:
|
||||
if path.lower().startswith('pype_'):
|
||||
|
|
@ -548,4 +546,3 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
"%f=%d was rounded off to nearest integer"
|
||||
% (value, int(value))
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,16 +1,17 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validate model nodes names."""
|
||||
import os
|
||||
import re
|
||||
from maya import cmds
|
||||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
import avalon.api
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.hosts.maya.api.action
|
||||
from openpype.hosts.maya.api.shader_definition_editor import (
|
||||
DEFINITION_FILENAME)
|
||||
from openpype.lib.mongo import OpenPypeMongoConnection
|
||||
import gridfs
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
class ValidateModelName(pyblish.api.InstancePlugin):
|
||||
|
|
@ -68,7 +69,7 @@ class ValidateModelName(pyblish.api.InstancePlugin):
|
|||
invalid.append(top_group)
|
||||
else:
|
||||
if "asset" in r.groupindex:
|
||||
if m.group("asset") != avalon.api.Session["AVALON_ASSET"]:
|
||||
if m.group("asset") != legacy_io.Session["AVALON_ASSET"]:
|
||||
cls.log.error("Invalid asset name in top level group.")
|
||||
return top_group
|
||||
if "subset" in r.groupindex:
|
||||
|
|
@ -76,7 +77,7 @@ class ValidateModelName(pyblish.api.InstancePlugin):
|
|||
cls.log.error("Invalid subset name in top level group.")
|
||||
return top_group
|
||||
if "project" in r.groupindex:
|
||||
if m.group("project") != avalon.api.Session["AVALON_PROJECT"]:
|
||||
if m.group("project") != legacy_io.Session["AVALON_PROJECT"]:
|
||||
cls.log.error("Invalid project name in top level group.")
|
||||
return top_group
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue