diff --git a/.all-contributorsrc b/.all-contributorsrc
index b30f3b2499..60812cdb3c 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -1,6 +1,6 @@
{
"projectName": "OpenPype",
- "projectOwner": "pypeclub",
+ "projectOwner": "ynput",
"repoType": "github",
"repoHost": "https://github.com",
"files": [
@@ -319,8 +319,18 @@
"code",
"doc"
]
+ },
+ {
+ "login": "movalex",
+ "name": "Alexey Bogomolov",
+ "avatar_url": "https://avatars.githubusercontent.com/u/11698866?v=4",
+ "profile": "http://abogomolov.com",
+ "contributions": [
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
- "skipCi": true
+ "skipCi": true,
+ "commitType": "docs"
}
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 244eb1a363..3406ca8b65 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -35,6 +35,12 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
+ - 3.15.10-nightly.1
+ - 3.15.9
+ - 3.15.9-nightly.2
+ - 3.15.9-nightly.1
+ - 3.15.8
+ - 3.15.8-nightly.3
- 3.15.8-nightly.2
- 3.15.8-nightly.1
- 3.15.7
@@ -129,12 +135,6 @@ body:
- 3.14.3-nightly.2
- 3.14.3-nightly.1
- 3.14.2
- - 3.14.2-nightly.5
- - 3.14.2-nightly.4
- - 3.14.2-nightly.3
- - 3.14.2-nightly.2
- - 3.14.2-nightly.1
- - 3.14.1
validations:
required: true
- type: dropdown
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bba6b64bfe..ec6544e659 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,639 @@
# Changelog
+## [3.15.9](https://github.com/ynput/OpenPype/tree/3.15.9)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.8...3.15.9)
+
+### **π New features**
+
+
+
+Blender: Implemented Loading of Alembic Camera #4990
+
+Implemented loading of Alembic cameras in Blender.
+
+
+___
+
+
+
+
+
+Unreal: Implemented Creator, Loader and Extractor for Levels #5008
+
+Creator, Loader and Extractor for Unreal Levels have been implemented.
+
+
+___
+
+
+
+### **π Enhancements**
+
+
+
+Blender: Added setting for base unit scale #4987
+
+A setting for the base unit scale has been added for Blender.The unit scale is automatically applied when opening a file or creating a new one.
+
+
+___
+
+
+
+
+
+Unreal: Changed naming and path of Camera Levels #5010
+
+The levels created for the camera in Unreal now include `_camera` in the name, to be better identifiable, and are placed in the camera folder.
+
+
+___
+
+
+
+
+
+Settings: Added option to nest settings templates #5022
+
+It is possible to nest settings templates in another templates.
+
+
+___
+
+
+
+
+
+Enhancement/publisher: Remove "hit play to continue" label on continue #5029
+
+Remove "hit play to continue" message on continue so that it doesn't show anymore when play was clicked.
+
+
+___
+
+
+
+
+
+Ftrack: Limit number of ftrack events to query at once #5033
+
+Limit the amount of ftrack events received from mongo at once to 100.
+
+
+___
+
+
+
+
+
+General: Small code cleanups #5034
+
+Small code cleanup and updates.
+
+
+___
+
+
+
+
+
+Global: collect frames to fix with settings #5036
+
+Settings for `Collect Frames to Fix` will allow disable per project the plugin. Also `Rewriting latest version` attribute is hiddable from settings.
+
+
+___
+
+
+
+
+
+General: Publish plugin apply settings can expect only project settings #5037
+
+Only project settings are passed to optional `apply_settings` method, if the method expects only one argument.
+
+
+___
+
+
+
+### **π Bug fixes**
+
+
+
+Maya: Load Assembly fix invalid imports #4859
+
+Refactors imports so they are now correct.
+
+
+___
+
+
+
+
+
+Maya: Skipping rendersetup for members. #4973
+
+When publishing a `rendersetup`, the objectset is and should be empty.
+
+
+___
+
+
+
+
+
+Maya: Validate Rig Output IDs #5016
+
+Absolute names of node were not used, so plugin did not fetch the nodes properly.Also missed pymel command.
+
+
+___
+
+
+
+
+
+Deadline: escape rootless path in publish job #4910
+
+If the publish path on Deadline job contains spaces or other characters, command was failing because the path wasn't properly escaped. This is fixing it.
+
+
+___
+
+
+
+
+
+General: Company name and URL changed #4974
+
+The current records were obsolete in inno_setup, changed to the up-to-date.
+___
+
+
+
+
+
+Unreal: Fix usage of 'get_full_path' function #5014
+
+This PR changes all the occurrences of `get_full_path` functions to alternatives to get the path of the objects.
+
+
+___
+
+
+
+
+
+Unreal: Fix sequence frames validator to use correct data #5021
+
+Fix sequence frames validator to use clipIn and clipOut data instead of frameStart and frameEnd.
+
+
+___
+
+
+
+
+
+Unreal: Fix render instances collection to use correct data #5023
+
+Fix render instances collection to use `frameStart` and `frameEnd` from the Project Manager, instead of the sequence's ones.
+
+
+___
+
+
+
+
+
+Resolve: loader is opening even if no timeline in project #5025
+
+Loader is opening now even no timeline is available in a project.
+
+
+___
+
+
+
+
+
+nuke: callback for dirmapping is on demand #5030
+
+Nuke was slowed down on processing due this callback. Since it is disabled by default it made sense to add it only on demand.
+
+
+___
+
+
+
+
+
+Publisher: UI works with instances without label #5032
+
+Publisher UI does not crash if instance don't have filled 'label' key in instance data.
+
+
+___
+
+
+
+
+
+Publisher: Call explicitly prepared tab methods #5044
+
+It is not possible to go to Create tab during publishing from OpenPype menu.
+
+
+___
+
+
+
+
+
+Ftrack: Role names are not case sensitive in ftrack event server status action #5058
+
+Event server status action is not case sensitive for role names of user.
+
+
+___
+
+
+
+
+
+Publisher: Fix border widget #5063
+
+Fixed border lines in Publisher UI to be painted correctly with correct indentation and size.
+
+
+___
+
+
+
+
+
+Unreal: Fix Commandlet Project and Permissions #5066
+
+Fix problem when creating an Unreal Project when Commandlet Project is in a protected location.
+
+
+___
+
+
+
+
+
+Unreal: Added verification for Unreal app name format #5070
+
+The Unreal app name is used to determine the Unreal version folder, so it is necessary that if follows the format `x-x`, where `x` is any integer. This PR adds a verification that the app name follows that format.
+
+
+___
+
+
+
+### **π Documentation**
+
+
+
+Docs: Display wrong image in ExtractOIIOTranscode #5045
+
+Wrong image display in `https://openpype.io/docs/project_settings/settings_project_global#extract-oiio-transcode`.
+
+
+___
+
+
+
+### **Merged pull requests**
+
+
+
+Drop-down menu to list all families in create placeholder #4928
+
+Currently in the create placeholder window, we need to write the family manually. This replace the text field by an enum field with all families for the current software.
+
+
+___
+
+
+
+
+
+add sync to specific projects or listen only #4919
+
+Extend kitsu sync service with additional arguments to sync specific projects.
+
+
+___
+
+
+
+
+
+
+## [3.15.8](https://github.com/ynput/OpenPype/tree/3.15.8)
+
+
+[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.7...3.15.8)
+
+### **π New features**
+
+
+
+Publisher: Show instances in report page #4915
+
+Show publish instances in report page. Also added basic log view with logs grouped by instance. Validation error detail now have 2 colums, one with erro details second with logs. Crashed state shows fast access to report action buttons. Success will show only logs. Publish frame is shrunked automatically on publish stop.
+
+
+___
+
+
+
+
+
+Fusion - Loader plugins updates #4920
+
+Update to some Fusion loader plugins:The sequence loader can now load footage from the image and online family.The FBX loader can now import all formats Fusions FBX node can read.You can now import the content of another workfile into your current comp with the workfile loader.
+
+
+___
+
+
+
+
+
+Fusion: deadline farm rendering #4955
+
+Enabling Fusion for deadline farm rendering.
+
+
+___
+
+
+
+
+
+AfterEffects: set frame range and resolution #4983
+
+Frame information (frame start, duration, fps) and resolution (width and height) is applied to selected composition from Asset Management System (Ftrack or DB) automatically when published instance is created.It is also possible explicitly propagate both values from DB to selected composition by newly added menu buttons.
+
+
+___
+
+
+
+
+
+Publish: Enhance automated publish plugin settings #4986
+
+Added plugins option to define settings category where to look for settings of a plugin and added public helper functions to apply settings `get_plugin_settings` and `apply_plugin_settings_automatically`.
+
+
+___
+
+
+
+### **π Enhancements**
+
+
+
+Load Rig References - Change Rig to Animation in Animation instance #4877
+
+We are using the template builder to build an animation scene. All the rig placeholders are imported correctly, but the automatically created animation instances retain the rig family in their names and subsets. In our example, we need animationMain instead of rigMain, because this name will be used in the following steps like lighting.Here is the result we need. I checked, and it's not a template builder problem, because even if I load a rig as a reference, the result is the same. For me, since we are in the animation instance, it makes more sense to have animation instead of rig in the name. The naming is just fine if we use create from the Openpype menu.
+
+
+___
+
+
+
+
+
+Enhancement: Resolve prelaunch code refactoring and update defaults #4916
+
+The main reason of this PR is wrong default settings in `openpype/settings/defaults/system_settings/applications.json` for Resolve host. The `bin` folder should not be a part of the macos and Linux `RESOLVE_PYTHON3_PATH` variable.The rest of this PR is some code cleanups for Resolve prelaunch hook to simplify further development.Also added a .gitignore for vscode workspace files.
+
+
+___
+
+
+
+
+
+Unreal: π move Unreal plugin to separate repository #4980
+
+To support Epic Marketplace have to move AYON Unreal integration plugins to separate repository. This is replacing current files with git submodule, so the change should be functionally without impact.New repository lives here: https://github.com/ynput/ayon-unreal-plugin
+
+
+___
+
+
+
+
+
+General: Lib code cleanup #5003
+
+Small cleanup in lib files in openpype.
+
+
+___
+
+
+
+
+
+Allow to open with djv by extension instead of representation name #5004
+
+Filter open in djv action by extension instead of representation.
+
+
+___
+
+
+
+
+
+DJV open action `extensions` as `set` #5005
+
+Change `extensions` attribute to `set`.
+
+
+___
+
+
+
+
+
+Nuke: extract thumbnail with multiple reposition nodes #5011
+
+Added support for multiple reposition nodes.
+
+
+___
+
+
+
+
+
+Enhancement: Improve logging levels and messages for artist facing publish reports #5018
+
+Tweak the logging levels and messages to try and only show those logs that an artist should see and could understand. Move anything that's slightly more involved into a "debug" message instead.
+
+
+___
+
+
+
+### **π Bug fixes**
+
+
+
+Bugfix/frame variable fix #4978
+
+Renamed variables to match OpenPype terminology to reduce confusion and add consistency.
+___
+
+
+
+
+
+Global: plugins cleanup plugin will leave beauty rendered files #4790
+
+Attempt to mark more files to be cleaned up explicitly in intermediate `renders` folder in work area for farm jobs.
+
+
+___
+
+
+
+
+
+Fix: Download last workfile doesn't work if not already downloaded #4942
+
+Some optimization condition is messing with the feature: if the published workfile is not already downloaded, it won't download it...
+
+
+___
+
+
+
+
+
+Unreal: Fix transform when loading layout to match existing assets #4972
+
+Fixed transform when loading layout to match existing assets.
+
+
+___
+
+
+
+
+
+fix the bug of fbx loaders in Max #4977
+
+bug fix of fbx loaders for not being able to parent to the CON instances while importing cameras(and models) which is published from other DCCs such as Maya.
+
+
+___
+
+
+
+
+
+AfterEffects: allow returning stub with not saved workfile #4984
+
+Allows to use Workfile app to Save first empty workfile.
+
+
+___
+
+
+
+
+
+Blender: Fix Alembic loading #4985
+
+Fixed problem occurring when trying to load an Alembic model in Blender.
+
+
+___
+
+
+
+
+
+Unreal: Addon Py2 compatibility #4994
+
+Fixed Python 2 compatibility of unreal addon.
+
+
+___
+
+
+
+
+
+Nuke: fixed missing files key in representation #4999
+
+Issue with missing keys once rendering target set to existing frames is fixed. Instance has to be evaluated in validation for missing files.
+
+
+___
+
+
+
+
+
+Unreal: Fix the frame range when loading camera #5002
+
+The keyframes of the camera, when loaded, were not using the correct frame range.
+
+
+___
+
+
+
+
+
+Fusion: fixing frame range targeting #5013
+
+Frame range targeting at Rendering instances is now following configured options.
+
+
+___
+
+
+
+
+
+Deadline: fix selection from multiple webservices #5015
+
+Multiple different DL webservice could be configured. First they must by configured in System Settings., then they could be configured per project in `project_settings/deadline/deadline_servers`.Only single webservice could be a target of publish though.
+
+
+___
+
+
+
+### **Merged pull requests**
+
+
+
+3dsmax: Refactored publish plugins to use proper implementation of pymxs #4988
+
+
+___
+
+
+
+
+
+
## [3.15.7](https://github.com/ynput/OpenPype/tree/3.15.7)
diff --git a/README.md b/README.md
index 514ffb62c0..8757e3db92 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-[](#contributors-)
+[](#contributors-)
OpenPype
====
@@ -303,41 +303,44 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
diff --git a/inno_setup.iss b/inno_setup.iss
index 3adde52a8b..418bedbd4d 100644
--- a/inno_setup.iss
+++ b/inno_setup.iss
@@ -14,10 +14,10 @@ AppId={{B9E9DF6A-5BDA-42DD-9F35-C09D564C4D93}
AppName={#MyAppName}
AppVersion={#AppVer}
AppVerName={#MyAppName} version {#AppVer}
-AppPublisher=Orbi Tools s.r.o
-AppPublisherURL=http://pype.club
-AppSupportURL=http://pype.club
-AppUpdatesURL=http://pype.club
+AppPublisher=Ynput s.r.o
+AppPublisherURL=https://ynput.io
+AppSupportURL=https://ynput.io
+AppUpdatesURL=https://ynput.io
DefaultDirName={autopf}\{#MyAppName}\{#AppVer}
UsePreviousAppDir=no
DisableProgramGroupPage=yes
diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py
index c2aee1e653..9cc557c01a 100644
--- a/openpype/hosts/blender/api/pipeline.py
+++ b/openpype/hosts/blender/api/pipeline.py
@@ -26,6 +26,8 @@ from openpype.lib import (
emit_event
)
import openpype.hosts.blender
+from openpype.settings import get_project_settings
+
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.blender.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
@@ -83,6 +85,31 @@ def uninstall():
ops.unregister()
+def show_message(title, message):
+ from openpype.widgets.message_window import Window
+ from .ops import BlenderApplication
+
+ BlenderApplication.get_app()
+
+ Window(
+ parent=None,
+ title=title,
+ message=message,
+ level="warning")
+
+
+def message_window(title, message):
+ from .ops import (
+ MainThreadItem,
+ execute_in_main_thread,
+ _process_app_events
+ )
+
+ mti = MainThreadItem(show_message, title, message)
+ execute_in_main_thread(mti)
+ _process_app_events()
+
+
def set_start_end_frames():
project_name = legacy_io.active_project()
asset_name = legacy_io.Session["AVALON_ASSET"]
@@ -125,10 +152,36 @@ def set_start_end_frames():
def on_new():
set_start_end_frames()
+ project = os.environ.get("AVALON_PROJECT")
+ settings = get_project_settings(project)
+
+ unit_scale_settings = settings.get("blender").get("unit_scale_settings")
+ unit_scale_enabled = unit_scale_settings.get("enabled")
+ if unit_scale_enabled:
+ unit_scale = unit_scale_settings.get("base_file_unit_scale")
+ bpy.context.scene.unit_settings.scale_length = unit_scale
+
def on_open():
set_start_end_frames()
+ project = os.environ.get("AVALON_PROJECT")
+ settings = get_project_settings(project)
+
+ unit_scale_settings = settings.get("blender").get("unit_scale_settings")
+ unit_scale_enabled = unit_scale_settings.get("enabled")
+ apply_on_opening = unit_scale_settings.get("apply_on_opening")
+ if unit_scale_enabled and apply_on_opening:
+ unit_scale = unit_scale_settings.get("base_file_unit_scale")
+ prev_unit_scale = bpy.context.scene.unit_settings.scale_length
+
+ if unit_scale != prev_unit_scale:
+ bpy.context.scene.unit_settings.scale_length = unit_scale
+
+ message_window(
+ "Base file unit scale changed",
+ "Base file unit scale changed to match the project settings.")
+
@bpy.app.handlers.persistent
def _on_save_pre(*args):
diff --git a/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py b/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py
new file mode 100644
index 0000000000..559e9ae0ce
--- /dev/null
+++ b/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py
@@ -0,0 +1,55 @@
+from pathlib import Path
+
+from openpype.lib import PreLaunchHook
+
+
+class AddPythonScriptToLaunchArgs(PreLaunchHook):
+ """Add python script to be executed before Blender launch."""
+
+ # Append after file argument
+ order = 15
+ app_groups = [
+ "blender",
+ ]
+
+ def execute(self):
+ if not self.launch_context.data.get("python_scripts"):
+ return
+
+ # Add path to workfile to arguments
+ for python_script_path in self.launch_context.data["python_scripts"]:
+ self.log.info(
+ f"Adding python script {python_script_path} to launch"
+ )
+ # Test script path exists
+ python_script_path = Path(python_script_path)
+ if not python_script_path.exists():
+ self.log.warning(
+ f"Python script {python_script_path} doesn't exist. "
+ "Skipped..."
+ )
+ continue
+
+ if "--" in self.launch_context.launch_args:
+ # Insert before separator
+ separator_index = self.launch_context.launch_args.index("--")
+ self.launch_context.launch_args.insert(
+ separator_index,
+ "-P",
+ )
+ self.launch_context.launch_args.insert(
+ separator_index + 1,
+ python_script_path.as_posix(),
+ )
+ else:
+ self.launch_context.launch_args.extend(
+ ["-P", python_script_path.as_posix()]
+ )
+
+ # Ensure separator
+ if "--" not in self.launch_context.launch_args:
+ self.launch_context.launch_args.append("--")
+
+ self.launch_context.launch_args.extend(
+ [*self.launch_context.data.get("script_args", [])]
+ )
diff --git a/openpype/hosts/blender/plugins/load/load_camera_abc.py b/openpype/hosts/blender/plugins/load/load_camera_abc.py
new file mode 100644
index 0000000000..21b48f409f
--- /dev/null
+++ b/openpype/hosts/blender/plugins/load/load_camera_abc.py
@@ -0,0 +1,209 @@
+"""Load an asset in Blender from an Alembic file."""
+
+from pathlib import Path
+from pprint import pformat
+from typing import Dict, List, Optional
+
+import bpy
+
+from openpype.pipeline import (
+ get_representation_path,
+ AVALON_CONTAINER_ID,
+)
+from openpype.hosts.blender.api import plugin, lib
+from openpype.hosts.blender.api.pipeline import (
+ AVALON_CONTAINERS,
+ AVALON_PROPERTY,
+)
+
+
+class AbcCameraLoader(plugin.AssetLoader):
+ """Load a camera from Alembic file.
+
+ Stores the imported asset in an empty named after the asset.
+ """
+
+ families = ["camera"]
+ representations = ["abc"]
+
+ label = "Load Camera (ABC)"
+ icon = "code-fork"
+ color = "orange"
+
+ def _remove(self, asset_group):
+ objects = list(asset_group.children)
+
+ for obj in objects:
+ if obj.type == "CAMERA":
+ bpy.data.cameras.remove(obj.data)
+ elif obj.type == "EMPTY":
+ objects.extend(obj.children)
+ bpy.data.objects.remove(obj)
+
+ def _process(self, libpath, asset_group, group_name):
+ plugin.deselect_all()
+
+ bpy.ops.wm.alembic_import(filepath=libpath)
+
+ objects = lib.get_selection()
+
+ for obj in objects:
+ obj.parent = asset_group
+
+ for obj in objects:
+ name = obj.name
+ obj.name = f"{group_name}:{name}"
+ if obj.type != "EMPTY":
+ name_data = obj.data.name
+ obj.data.name = f"{group_name}:{name_data}"
+
+ if not obj.get(AVALON_PROPERTY):
+ obj[AVALON_PROPERTY] = dict()
+
+ avalon_info = obj[AVALON_PROPERTY]
+ avalon_info.update({"container_name": group_name})
+
+ plugin.deselect_all()
+
+ return objects
+
+ def process_asset(
+ self,
+ context: dict,
+ name: str,
+ namespace: Optional[str] = None,
+ options: Optional[Dict] = None,
+ ) -> Optional[List]:
+ """
+ Arguments:
+ name: Use pre-defined name
+ namespace: Use pre-defined namespace
+ context: Full parenthood of representation to load
+ options: Additional settings dictionary
+ """
+ libpath = self.fname
+ asset = context["asset"]["name"]
+ subset = context["subset"]["name"]
+
+ asset_name = plugin.asset_name(asset, subset)
+ unique_number = plugin.get_unique_number(asset, subset)
+ group_name = plugin.asset_name(asset, subset, unique_number)
+ namespace = namespace or f"{asset}_{unique_number}"
+
+ avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
+ if not avalon_container:
+ avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
+ bpy.context.scene.collection.children.link(avalon_container)
+
+ asset_group = bpy.data.objects.new(group_name, object_data=None)
+ avalon_container.objects.link(asset_group)
+
+ objects = self._process(libpath, asset_group, group_name)
+
+ objects = []
+ nodes = list(asset_group.children)
+
+ for obj in nodes:
+ objects.append(obj)
+ nodes.extend(list(obj.children))
+
+ bpy.context.scene.collection.objects.link(asset_group)
+
+ asset_group[AVALON_PROPERTY] = {
+ "schema": "openpype:container-2.0",
+ "id": AVALON_CONTAINER_ID,
+ "name": name,
+ "namespace": namespace or "",
+ "loader": str(self.__class__.__name__),
+ "representation": str(context["representation"]["_id"]),
+ "libpath": libpath,
+ "asset_name": asset_name,
+ "parent": str(context["representation"]["parent"]),
+ "family": context["representation"]["context"]["family"],
+ "objectName": group_name,
+ }
+
+ self[:] = objects
+ return objects
+
+ def exec_update(self, container: Dict, representation: Dict):
+ """Update the loaded asset.
+
+ This will remove all objects of the current collection, load the new
+ ones and add them to the collection.
+ If the objects of the collection are used in another collection they
+ will not be removed, only unlinked. Normally this should not be the
+ case though.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+ libpath = Path(get_representation_path(representation))
+ extension = libpath.suffix.lower()
+
+ self.log.info(
+ "Container: %s\nRepresentation: %s",
+ pformat(container, indent=2),
+ pformat(representation, indent=2),
+ )
+
+ assert asset_group, (
+ f"The asset is not loaded: {container['objectName']}")
+ assert libpath, (
+ f"No existing library file found for {container['objectName']}")
+ assert libpath.is_file(), f"The file doesn't exist: {libpath}"
+ assert extension in plugin.VALID_EXTENSIONS, (
+ f"Unsupported file: {libpath}")
+
+ metadata = asset_group.get(AVALON_PROPERTY)
+ group_libpath = metadata["libpath"]
+
+ normalized_group_libpath = str(
+ Path(bpy.path.abspath(group_libpath)).resolve())
+ normalized_libpath = str(
+ Path(bpy.path.abspath(str(libpath))).resolve())
+ self.log.debug(
+ "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
+ normalized_group_libpath,
+ normalized_libpath,
+ )
+ if normalized_group_libpath == normalized_libpath:
+ self.log.info("Library already loaded, not updating...")
+ return
+
+ mat = asset_group.matrix_basis.copy()
+
+ self._remove(asset_group)
+ self._process(str(libpath), asset_group, object_name)
+
+ asset_group.matrix_basis = mat
+
+ metadata["libpath"] = str(libpath)
+ metadata["representation"] = str(representation["_id"])
+
+ def exec_remove(self, container: Dict) -> bool:
+ """Remove an existing container from a Blender scene.
+
+ Arguments:
+ container (openpype:container-1.0): Container to remove,
+ from `host.ls()`.
+
+ Returns:
+ bool: Whether the container was deleted.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ object_name = container["objectName"]
+ asset_group = bpy.data.objects.get(object_name)
+
+ if not asset_group:
+ return False
+
+ self._remove(asset_group)
+
+ bpy.data.objects.remove(asset_group)
+
+ return True
diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py
index c33209823e..cba8c38c2f 100644
--- a/openpype/hosts/fusion/api/lib.py
+++ b/openpype/hosts/fusion/api/lib.py
@@ -256,8 +256,11 @@ def switch_item(container,
@contextlib.contextmanager
-def maintained_selection():
- comp = get_current_comp()
+def maintained_selection(comp=None):
+ """Reset comp selection from before the context after the context"""
+ if comp is None:
+ comp = get_current_comp()
+
previous_selection = comp.GetToolList(True).values()
try:
yield
@@ -269,6 +272,33 @@ def maintained_selection():
flow.Select(tool, True)
+@contextlib.contextmanager
+def maintained_comp_range(comp=None,
+ global_start=True,
+ global_end=True,
+ render_start=True,
+ render_end=True):
+ """Reset comp frame ranges from before the context after the context"""
+ if comp is None:
+ comp = get_current_comp()
+
+ comp_attrs = comp.GetAttrs()
+ preserve_attrs = {}
+ if global_start:
+ preserve_attrs["COMPN_GlobalStart"] = comp_attrs["COMPN_GlobalStart"]
+ if global_end:
+ preserve_attrs["COMPN_GlobalEnd"] = comp_attrs["COMPN_GlobalEnd"]
+ if render_start:
+ preserve_attrs["COMPN_RenderStart"] = comp_attrs["COMPN_RenderStart"]
+ if render_end:
+ preserve_attrs["COMPN_RenderEnd"] = comp_attrs["COMPN_RenderEnd"]
+
+ try:
+ yield
+ finally:
+ comp.SetAttrs(preserve_attrs)
+
+
def get_frame_path(path):
"""Get filename for the Fusion Saver with padded number as '#'
diff --git a/openpype/hosts/fusion/plugins/create/create_saver.py b/openpype/hosts/fusion/plugins/create/create_saver.py
index f1e7791972..04898d0a45 100644
--- a/openpype/hosts/fusion/plugins/create/create_saver.py
+++ b/openpype/hosts/fusion/plugins/create/create_saver.py
@@ -233,7 +233,7 @@ class CreateSaver(NewCreator):
def _get_frame_range_enum(self):
frame_range_options = {
"asset_db": "Current asset context",
- "render_range": "From viewer render in/out",
+ "render_range": "From render in/out",
"comp_range": "From composition timeline"
}
diff --git a/openpype/hosts/fusion/plugins/publish/collect_inputs.py b/openpype/hosts/fusion/plugins/publish/collect_inputs.py
index 1bb3cd1220..a6628300db 100644
--- a/openpype/hosts/fusion/plugins/publish/collect_inputs.py
+++ b/openpype/hosts/fusion/plugins/publish/collect_inputs.py
@@ -113,4 +113,4 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
inputs = [c["representation"] for c in containers]
instance.data["inputRepresentations"] = inputs
- self.log.info("Collected inputs: %s" % inputs)
+ self.log.debug("Collected inputs: %s" % inputs)
diff --git a/openpype/hosts/fusion/plugins/publish/collect_render.py b/openpype/hosts/fusion/plugins/publish/collect_render.py
index d0b7f1c4ff..a20a142701 100644
--- a/openpype/hosts/fusion/plugins/publish/collect_render.py
+++ b/openpype/hosts/fusion/plugins/publish/collect_render.py
@@ -17,6 +17,8 @@ class FusionRenderInstance(RenderInstance):
tool = attr.ib(default=None)
workfileComp = attr.ib(default=None)
publish_attributes = attr.ib(default={})
+ frameStartHandle = attr.ib(default=None)
+ frameEndHandle = attr.ib(default=None)
class CollectFusionRender(
@@ -83,8 +85,8 @@ class CollectFusionRender(
frameEnd=inst.data["frameEnd"],
handleStart=inst.data["handleStart"],
handleEnd=inst.data["handleEnd"],
- ignoreFrameHandleCheck=(
- inst.data["frame_range_source"] == "render_range"),
+ frameStartHandle=inst.data["frameStartHandle"],
+ frameEndHandle=inst.data["frameEndHandle"],
frameStep=1,
fps=comp_frame_format_prefs.get("Rate"),
app_version=comp.GetApp().Version,
diff --git a/openpype/hosts/fusion/plugins/publish/extract_render_local.py b/openpype/hosts/fusion/plugins/publish/extract_render_local.py
index f801f30577..25c101cf00 100644
--- a/openpype/hosts/fusion/plugins/publish/extract_render_local.py
+++ b/openpype/hosts/fusion/plugins/publish/extract_render_local.py
@@ -1,11 +1,12 @@
import os
import logging
import contextlib
+import collections
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
-from openpype.hosts.fusion.api.lib import get_frame_path
+from openpype.hosts.fusion.api.lib import get_frame_path, maintained_comp_range
log = logging.getLogger(__name__)
@@ -52,11 +53,14 @@ class FusionRenderLocal(
hosts = ["fusion"]
families = ["render.local"]
+ is_rendered_key = "_fusionrenderlocal_has_rendered"
+
def process(self, instance):
- context = instance.context
# Start render
- self.render_once(context)
+ result = self.render(instance)
+ if result is False:
+ raise RuntimeError(f"Comp render failed for {instance}")
self._add_representation(instance)
@@ -69,39 +73,48 @@ class FusionRenderLocal(
)
)
- def render_once(self, context):
- """Render context comp only once, even with more render instances"""
+ def render(self, instance):
+ """Render instance.
- # This plug-in assumes all render nodes get rendered at the same time
- # to speed up the rendering. The check below makes sure that we only
- # execute the rendering once and not for each instance.
- key = f"__hasRun{self.__class__.__name__}"
+ We try to render the minimal amount of times by combining the instances
+ that have a matching frame range in one Fusion render. Then for the
+ batch of instances we store whether the render succeeded or failed.
- savers_to_render = [
- # Get the saver tool from the instance
- instance.data["tool"] for instance in context if
- # Only active instances
- instance.data.get("publish", True) and
- # Only render.local instances
- "render.local" in instance.data.get("families", [])
- ]
+ """
- if key not in context.data:
- # We initialize as false to indicate it wasn't successful yet
- # so we can keep track of whether Fusion succeeded
- context.data[key] = False
+ if self.is_rendered_key in instance.data:
+ # This instance was already processed in batch with another
+ # instance, so we just return the render result directly
+ self.log.debug(f"Instance {instance} was already rendered")
+ return instance.data[self.is_rendered_key]
- current_comp = context.data["currentComp"]
- frame_start = context.data["frameStartHandle"]
- frame_end = context.data["frameEndHandle"]
+ instances_by_frame_range = self.get_render_instances_by_frame_range(
+ instance.context
+ )
- self.log.info("Starting Fusion render")
- self.log.info(f"Start frame: {frame_start}")
- self.log.info(f"End frame: {frame_end}")
- saver_names = ", ".join(saver.Name for saver in savers_to_render)
- self.log.info(f"Rendering tools: {saver_names}")
+ # Render matching batch of instances that share the same frame range
+ frame_range = self.get_instance_render_frame_range(instance)
+ render_instances = instances_by_frame_range[frame_range]
- with comp_lock_and_undo_chunk(current_comp):
+ # We initialize render state false to indicate it wasn't successful
+ # yet to keep track of whether Fusion succeeded. This is for cases
+ # where an error below this might cause the comp render result not
+ # to be stored for the instances of this batch
+ for render_instance in render_instances:
+ render_instance.data[self.is_rendered_key] = False
+
+ savers_to_render = [inst.data["tool"] for inst in render_instances]
+ current_comp = instance.context.data["currentComp"]
+ frame_start, frame_end = frame_range
+
+ self.log.info(
+ f"Starting Fusion render frame range {frame_start}-{frame_end}"
+ )
+ saver_names = ", ".join(saver.Name for saver in savers_to_render)
+ self.log.info(f"Rendering tools: {saver_names}")
+
+ with comp_lock_and_undo_chunk(current_comp):
+ with maintained_comp_range(current_comp):
with enabled_savers(current_comp, savers_to_render):
result = current_comp.Render(
{
@@ -111,10 +124,11 @@ class FusionRenderLocal(
}
)
- context.data[key] = bool(result)
+ # Store the render state for all the rendered instances
+ for render_instance in render_instances:
+ render_instance.data[self.is_rendered_key] = bool(result)
- if context.data[key] is False:
- raise RuntimeError("Comp render failed")
+ return result
def _add_representation(self, instance):
"""Add representation to instance"""
@@ -151,3 +165,35 @@ class FusionRenderLocal(
instance.data["representations"].append(repre)
return instance
+
+ def get_render_instances_by_frame_range(self, context):
+ """Return enabled render.local instances grouped by their frame range.
+
+ Arguments:
+ context (pyblish.Context): The pyblish context
+
+ Returns:
+ dict: (start, end): instances mapping
+
+ """
+
+ instances_to_render = [
+ instance for instance in context if
+ # Only active instances
+ instance.data.get("publish", True) and
+ # Only render.local instances
+ "render.local" in instance.data.get("families", [])
+ ]
+
+ # Instances by frame ranges
+ instances_by_frame_range = collections.defaultdict(list)
+ for instance in instances_to_render:
+ start, end = self.get_instance_render_frame_range(instance)
+ instances_by_frame_range[(start, end)].append(instance)
+
+ return dict(instances_by_frame_range)
+
+ def get_instance_render_frame_range(self, instance):
+ start = instance.data["frameStartHandle"]
+ end = instance.data["frameEndHandle"]
+ return start, end
diff --git a/openpype/hosts/fusion/plugins/publish/save_scene.py b/openpype/hosts/fusion/plugins/publish/save_scene.py
index a249c453d8..0798e7c8b7 100644
--- a/openpype/hosts/fusion/plugins/publish/save_scene.py
+++ b/openpype/hosts/fusion/plugins/publish/save_scene.py
@@ -17,5 +17,5 @@ class FusionSaveComp(pyblish.api.ContextPlugin):
current = comp.GetAttrs().get("COMPS_FileName", "")
assert context.data['currentFile'] == current
- self.log.info("Saving current file..")
+ self.log.info("Saving current file: {}".format(current))
comp.Save()
diff --git a/openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py b/openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py
new file mode 100644
index 0000000000..06cd0ca186
--- /dev/null
+++ b/openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py
@@ -0,0 +1,41 @@
+import pyblish.api
+
+from openpype.pipeline import PublishValidationError
+
+
+class ValidateInstanceFrameRange(pyblish.api.InstancePlugin):
+ """Validate instance frame range is within comp's global render range."""
+
+ order = pyblish.api.ValidatorOrder
+ label = "Validate Filename Has Extension"
+ families = ["render"]
+ hosts = ["fusion"]
+
+ def process(self, instance):
+
+ context = instance.context
+ global_start = context.data["compFrameStart"]
+ global_end = context.data["compFrameEnd"]
+
+ render_start = instance.data["frameStartHandle"]
+ render_end = instance.data["frameEndHandle"]
+
+ if render_start < global_start or render_end > global_end:
+
+ message = (
+ f"Instance {instance} render frame range "
+ f"({render_start}-{render_end}) is outside of the comp's "
+ f"global render range ({global_start}-{global_end}) and thus "
+ f"can't be rendered. "
+ )
+ description = (
+ f"{message}\n\n"
+ f"Either update the comp's global range or the instance's "
+ f"frame range to ensure the comp's frame range includes the "
+ f"to render frame range for the instance."
+ )
+ raise PublishValidationError(
+ title="Frame range outside of comp range",
+ message=message,
+ description=description
+ )
diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py
index 77844d2448..c9bebfa8b2 100644
--- a/openpype/hosts/hiero/plugins/load/load_clip.py
+++ b/openpype/hosts/hiero/plugins/load/load_clip.py
@@ -41,8 +41,8 @@ class LoadClip(phiero.SequenceLoader):
clip_name_template = "{asset}_{subset}_{representation}"
+ @classmethod
def apply_settings(cls, project_settings, system_settings):
-
plugin_type_settings = (
project_settings
.get("hiero", {})
diff --git a/openpype/hosts/houdini/api/colorspace.py b/openpype/hosts/houdini/api/colorspace.py
new file mode 100644
index 0000000000..7047644225
--- /dev/null
+++ b/openpype/hosts/houdini/api/colorspace.py
@@ -0,0 +1,56 @@
+import attr
+import hou
+from openpype.hosts.houdini.api.lib import get_color_management_preferences
+
+
+@attr.s
+class LayerMetadata(object):
+ """Data class for Render Layer metadata."""
+ frameStart = attr.ib()
+ frameEnd = attr.ib()
+
+
+@attr.s
+class RenderProduct(object):
+ """Getting Colorspace as
+ Specific Render Product Parameter for submitting
+ publish job.
+
+ """
+ colorspace = attr.ib() # colorspace
+ view = attr.ib()
+ productName = attr.ib(default=None)
+
+
+class ARenderProduct(object):
+
+ def __init__(self):
+ """Constructor."""
+ # Initialize
+ self.layer_data = self._get_layer_data()
+ self.layer_data.products = self.get_colorspace_data()
+
+ def _get_layer_data(self):
+ return LayerMetadata(
+ frameStart=int(hou.playbar.frameRange()[0]),
+ frameEnd=int(hou.playbar.frameRange()[1]),
+ )
+
+ def get_colorspace_data(self):
+ """To be implemented by renderer class.
+
+ This should return a list of RenderProducts.
+
+ Returns:
+ list: List of RenderProduct
+
+ """
+ data = get_color_management_preferences()
+ colorspace_data = [
+ RenderProduct(
+ colorspace=data["display"],
+ view=data["view"],
+ productName=""
+ )
+ ]
+ return colorspace_data
diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py
index 2e58f3dd98..a33ba7aad2 100644
--- a/openpype/hosts/houdini/api/lib.py
+++ b/openpype/hosts/houdini/api/lib.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import sys
import os
+import re
import uuid
import logging
from contextlib import contextmanager
@@ -581,3 +582,74 @@ def splitext(name, allowed_multidot_extensions):
return name[:-len(ext)], ext
return os.path.splitext(name)
+
+
+def get_top_referenced_parm(parm):
+
+ processed = set() # disallow infinite loop
+ while True:
+ if parm.path() in processed:
+ raise RuntimeError("Parameter references result in cycle.")
+
+ processed.add(parm.path())
+
+ ref = parm.getReferencedParm()
+ if ref.path() == parm.path():
+ # It returns itself when it doesn't reference
+ # another parameter
+ return ref
+ else:
+ parm = ref
+
+
+def evalParmNoFrame(node, parm, pad_character="#"):
+
+ parameter = node.parm(parm)
+ assert parameter, "Parameter does not exist: %s.%s" % (node, parm)
+
+ # If the parameter has a parameter reference, then get that
+ # parameter instead as otherwise `unexpandedString()` fails.
+ parameter = get_top_referenced_parm(parameter)
+
+ # Substitute out the frame numbering with padded characters
+ try:
+ raw = parameter.unexpandedString()
+ except hou.Error as exc:
+ print("Failed: %s" % parameter)
+ raise RuntimeError(exc)
+
+ def replace(match):
+ padding = 1
+ n = match.group(2)
+ if n and int(n):
+ padding = int(n)
+ return pad_character * padding
+
+ expression = re.sub(r"(\$F([0-9]*))", replace, raw)
+
+ with hou.ScriptEvalContext(parameter):
+ return hou.expandStringAtFrame(expression, 0)
+
+
+def get_color_management_preferences():
+ """Get default OCIO preferences"""
+ data = {
+ "config": hou.Color.ocio_configPath()
+
+ }
+
+ # Get default display and view from OCIO
+ display = hou.Color.ocio_defaultDisplay()
+ disp_regex = re.compile(r"^(?P.+-)(?P.+)$")
+ disp_match = disp_regex.match(display)
+
+ view = hou.Color.ocio_defaultView()
+ view_regex = re.compile(r"^(?P.+- )(?P.+)$")
+ view_match = view_regex.match(view)
+ data.update({
+ "display": disp_match.group("display"),
+ "view": view_match.group("view")
+
+ })
+
+ return data
diff --git a/openpype/hosts/houdini/plugins/create/create_arnold_rop.py b/openpype/hosts/houdini/plugins/create/create_arnold_rop.py
new file mode 100644
index 0000000000..bddf26dbd5
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_arnold_rop.py
@@ -0,0 +1,71 @@
+from openpype.hosts.houdini.api import plugin
+from openpype.lib import EnumDef
+
+
+class CreateArnoldRop(plugin.HoudiniCreator):
+ """Arnold ROP"""
+
+ identifier = "io.openpype.creators.houdini.arnold_rop"
+ label = "Arnold ROP"
+ family = "arnold_rop"
+ icon = "magic"
+ defaults = ["master"]
+
+ # Default extension
+ ext = "exr"
+
+ def create(self, subset_name, instance_data, pre_create_data):
+ import hou
+
+ # Remove the active, we are checking the bypass flag of the nodes
+ instance_data.pop("active", None)
+ instance_data.update({"node_type": "arnold"})
+
+ # Add chunk size attribute
+ instance_data["chunkSize"] = 1
+ # Submit for job publishing
+ instance_data["farm"] = True
+
+ instance = super(CreateArnoldRop, self).create(
+ subset_name,
+ instance_data,
+ pre_create_data) # type: plugin.CreatedInstance
+
+ instance_node = hou.node(instance.get("instance_node"))
+
+ ext = pre_create_data.get("image_format")
+
+ filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format(
+ renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
+ subset_name=subset_name,
+ ext=ext,
+ )
+ parms = {
+ # Render frame range
+ "trange": 1,
+
+ # Arnold ROP settings
+ "ar_picture": filepath,
+ "ar_exr_half_precision": 1 # half precision
+ }
+
+ instance_node.setParms(parms)
+
+ # Lock any parameters in this list
+ to_lock = ["family", "id"]
+ self.lock_parameters(instance_node, to_lock)
+
+ def get_pre_create_attr_defs(self):
+ attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs()
+
+ image_format_enum = [
+ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
+ "rad", "rat", "rta", "sgi", "tga", "tif",
+ ]
+
+ return attrs + [
+ EnumDef("image_format",
+ image_format_enum,
+ default=self.ext,
+ label="Image Format Options")
+ ]
diff --git a/openpype/hosts/houdini/plugins/create/create_karma_rop.py b/openpype/hosts/houdini/plugins/create/create_karma_rop.py
new file mode 100644
index 0000000000..edfb992e1a
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_karma_rop.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+"""Creator plugin to create Karma ROP."""
+from openpype.hosts.houdini.api import plugin
+from openpype.pipeline import CreatedInstance
+from openpype.lib import BoolDef, EnumDef, NumberDef
+
+
+class CreateKarmaROP(plugin.HoudiniCreator):
+ """Karma ROP"""
+ identifier = "io.openpype.creators.houdini.karma_rop"
+ label = "Karma ROP"
+ family = "karma_rop"
+ icon = "magic"
+ defaults = ["master"]
+
+ def create(self, subset_name, instance_data, pre_create_data):
+ import hou # noqa
+
+ instance_data.pop("active", None)
+ instance_data.update({"node_type": "karma"})
+ # Add chunk size attribute
+ instance_data["chunkSize"] = 10
+ # Submit for job publishing
+ instance_data["farm"] = True
+
+ instance = super(CreateKarmaROP, self).create(
+ subset_name,
+ instance_data,
+ pre_create_data) # type: CreatedInstance
+
+ instance_node = hou.node(instance.get("instance_node"))
+
+ ext = pre_create_data.get("image_format")
+
+ filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format(
+ renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
+ subset_name=subset_name,
+ ext=ext,
+ )
+ checkpoint = "{cp_dir}{subset_name}.$F4.checkpoint".format(
+ cp_dir=hou.text.expandString("$HIP/pyblish/"),
+ subset_name=subset_name
+ )
+
+ usd_directory = "{usd_dir}{subset_name}_$RENDERID".format(
+ usd_dir=hou.text.expandString("$HIP/pyblish/renders/usd_renders/"), # noqa
+ subset_name=subset_name
+ )
+
+ parms = {
+ # Render Frame Range
+ "trange": 1,
+ # Karma ROP Setting
+ "picture": filepath,
+ # Karma Checkpoint Setting
+ "productName": checkpoint,
+ # USD Output Directory
+ "savetodirectory": usd_directory,
+ }
+
+ res_x = pre_create_data.get("res_x")
+ res_y = pre_create_data.get("res_y")
+
+ if self.selected_nodes:
+ # If camera found in selection
+ # we will use as render camera
+ camera = None
+ for node in self.selected_nodes:
+ if node.type().name() == "cam":
+ has_camera = pre_create_data.get("cam_res")
+ if has_camera:
+ res_x = node.evalParm("resx")
+ res_y = node.evalParm("resy")
+
+ if not camera:
+ self.log.warning("No render camera found in selection")
+
+ parms.update({
+ "camera": camera or "",
+ "resolutionx": res_x,
+ "resolutiony": res_y,
+ })
+
+ instance_node.setParms(parms)
+
+ # Lock some Avalon attributes
+ to_lock = ["family", "id"]
+ self.lock_parameters(instance_node, to_lock)
+
+ def get_pre_create_attr_defs(self):
+ attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
+
+ image_format_enum = [
+ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
+ "rad", "rat", "rta", "sgi", "tga", "tif",
+ ]
+
+ return attrs + [
+ EnumDef("image_format",
+ image_format_enum,
+ default="exr",
+ label="Image Format Options"),
+ NumberDef("res_x",
+ label="width",
+ default=1920,
+ decimals=0),
+ NumberDef("res_y",
+ label="height",
+ default=720,
+ decimals=0),
+ BoolDef("cam_res",
+ label="Camera Resolution",
+ default=False)
+ ]
diff --git a/openpype/hosts/houdini/plugins/create/create_mantra_rop.py b/openpype/hosts/houdini/plugins/create/create_mantra_rop.py
new file mode 100644
index 0000000000..5ca53e96de
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_mantra_rop.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+"""Creator plugin to create Mantra ROP."""
+from openpype.hosts.houdini.api import plugin
+from openpype.pipeline import CreatedInstance
+from openpype.lib import EnumDef, BoolDef
+
+
+class CreateMantraROP(plugin.HoudiniCreator):
+ """Mantra ROP"""
+ identifier = "io.openpype.creators.houdini.mantra_rop"
+ label = "Mantra ROP"
+ family = "mantra_rop"
+ icon = "magic"
+ defaults = ["master"]
+
+ def create(self, subset_name, instance_data, pre_create_data):
+ import hou # noqa
+
+ instance_data.pop("active", None)
+ instance_data.update({"node_type": "ifd"})
+ # Add chunk size attribute
+ instance_data["chunkSize"] = 10
+ # Submit for job publishing
+ instance_data["farm"] = True
+
+ instance = super(CreateMantraROP, self).create(
+ subset_name,
+ instance_data,
+ pre_create_data) # type: CreatedInstance
+
+ instance_node = hou.node(instance.get("instance_node"))
+
+ ext = pre_create_data.get("image_format")
+
+ filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format(
+ renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
+ subset_name=subset_name,
+ ext=ext,
+ )
+
+ parms = {
+ # Render Frame Range
+ "trange": 1,
+ # Mantra ROP Setting
+ "vm_picture": filepath,
+ }
+
+ if self.selected_nodes:
+ # If camera found in selection
+ # we will use as render camera
+ camera = None
+ for node in self.selected_nodes:
+ if node.type().name() == "cam":
+ camera = node.path()
+
+ if not camera:
+ self.log.warning("No render camera found in selection")
+
+ parms.update({"camera": camera or ""})
+
+ custom_res = pre_create_data.get("override_resolution")
+ if custom_res:
+ parms.update({"override_camerares": 1})
+ instance_node.setParms(parms)
+
+ # Lock some Avalon attributes
+ to_lock = ["family", "id"]
+ self.lock_parameters(instance_node, to_lock)
+
+ def get_pre_create_attr_defs(self):
+ attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
+
+ image_format_enum = [
+ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
+ "rad", "rat", "rta", "sgi", "tga", "tif",
+ ]
+
+ return attrs + [
+ EnumDef("image_format",
+ image_format_enum,
+ default="exr",
+ label="Image Format Options"),
+ BoolDef("override_resolution",
+ label="Override Camera Resolution",
+ tooltip="Override the current camera "
+ "resolution, recommended for IPR.",
+ default=False)
+ ]
diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py
index 2cbe9bfda1..e14ff15bf8 100644
--- a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py
+++ b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py
@@ -1,7 +1,10 @@
# -*- coding: utf-8 -*-
"""Creator plugin to create Redshift ROP."""
+import hou # noqa
+
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
+from openpype.lib import EnumDef
class CreateRedshiftROP(plugin.HoudiniCreator):
@@ -11,20 +14,16 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
family = "redshift_rop"
icon = "magic"
defaults = ["master"]
+ ext = "exr"
def create(self, subset_name, instance_data, pre_create_data):
- import hou # noqa
instance_data.pop("active", None)
instance_data.update({"node_type": "Redshift_ROP"})
# Add chunk size attribute
instance_data["chunkSize"] = 10
-
- # Clear the family prefix from the subset
- subset = subset_name
- subset_no_prefix = subset[len(self.family):]
- subset_no_prefix = subset_no_prefix[0].lower() + subset_no_prefix[1:]
- subset_name = subset_no_prefix
+ # Submit for job publishing
+ instance_data["farm"] = True
instance = super(CreateRedshiftROP, self).create(
subset_name,
@@ -34,11 +33,10 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
instance_node = hou.node(instance.get("instance_node"))
basename = instance_node.name()
- instance_node.setName(basename + "_ROP", unique_name=True)
# Also create the linked Redshift IPR Rop
try:
- ipr_rop = self.parent.createNode(
+ ipr_rop = instance_node.parent().createNode(
"Redshift_IPR", node_name=basename + "_IPR"
)
except hou.OperationFailed:
@@ -50,19 +48,58 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1))
# Set the linked rop to the Redshift ROP
- ipr_rop.parm("linked_rop").set(ipr_rop.relativePathTo(instance))
+ ipr_rop.parm("linked_rop").set(instance_node.path())
+
+ ext = pre_create_data.get("image_format")
+ filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format(
+ renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
+ subset_name=subset_name,
+ fmt="${aov}.$F4.{ext}".format(aov="AOV", ext=ext)
+ )
- prefix = '${HIP}/render/${HIPNAME}/`chs("subset")`.${AOV}.$F4.exr'
parms = {
# Render frame range
"trange": 1,
# Redshift ROP settings
- "RS_outputFileNamePrefix": prefix,
- "RS_outputMultilayerMode": 0, # no multi-layered exr
+ "RS_outputFileNamePrefix": filepath,
+ "RS_outputMultilayerMode": "1", # no multi-layered exr
"RS_outputBeautyAOVSuffix": "beauty",
}
+
+ if self.selected_nodes:
+ # set up the render camera from the selected node
+ camera = None
+ for node in self.selected_nodes:
+ if node.type().name() == "cam":
+ camera = node.path()
+ parms.update({
+ "RS_renderCamera": camera or ""})
instance_node.setParms(parms)
# Lock some Avalon attributes
to_lock = ["family", "id"]
self.lock_parameters(instance_node, to_lock)
+
+ def remove_instances(self, instances):
+ for instance in instances:
+ node = instance.data.get("instance_node")
+
+ ipr_node = hou.node(f"{node}_IPR")
+ if ipr_node:
+ ipr_node.destroy()
+
+ return super(CreateRedshiftROP, self).remove_instances(instances)
+
+ def get_pre_create_attr_defs(self):
+ attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
+ image_format_enum = [
+ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
+ "rad", "rat", "rta", "sgi", "tga", "tif",
+ ]
+
+ return attrs + [
+ EnumDef("image_format",
+ image_format_enum,
+ default=self.ext,
+ label="Image Format Options")
+ ]
diff --git a/openpype/hosts/houdini/plugins/create/create_vray_rop.py b/openpype/hosts/houdini/plugins/create/create_vray_rop.py
new file mode 100644
index 0000000000..1de9be4ed6
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/create/create_vray_rop.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+"""Creator plugin to create VRay ROP."""
+import hou
+
+from openpype.hosts.houdini.api import plugin
+from openpype.pipeline import CreatedInstance
+from openpype.lib import EnumDef, BoolDef
+
+
+class CreateVrayROP(plugin.HoudiniCreator):
+ """VRay ROP"""
+
+ identifier = "io.openpype.creators.houdini.vray_rop"
+ label = "VRay ROP"
+ family = "vray_rop"
+ icon = "magic"
+ defaults = ["master"]
+
+ ext = "exr"
+
+ def create(self, subset_name, instance_data, pre_create_data):
+
+ instance_data.pop("active", None)
+ instance_data.update({"node_type": "vray_renderer"})
+ # Add chunk size attribute
+ instance_data["chunkSize"] = 10
+ # Submit for job publishing
+ instance_data["farm"] = True
+
+ instance = super(CreateVrayROP, self).create(
+ subset_name,
+ instance_data,
+ pre_create_data) # type: CreatedInstance
+
+ instance_node = hou.node(instance.get("instance_node"))
+
+ # Add IPR for Vray
+ basename = instance_node.name()
+ try:
+ ipr_rop = instance_node.parent().createNode(
+ "vray", node_name=basename + "_IPR"
+ )
+ except hou.OperationFailed:
+ raise plugin.OpenPypeCreatorError(
+ "Cannot create Vray render node. "
+ "Make sure Vray installed and enabled!"
+ )
+
+ ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1))
+ ipr_rop.parm("rop").set(instance_node.path())
+
+ parms = {
+ "trange": 1,
+ "SettingsEXR_bits_per_channel": "16" # half precision
+ }
+
+ if self.selected_nodes:
+ # set up the render camera from the selected node
+ camera = None
+ for node in self.selected_nodes:
+ if node.type().name() == "cam":
+ camera = node.path()
+ parms.update({
+ "render_camera": camera or ""
+ })
+
+ # Enable render element
+ ext = pre_create_data.get("image_format")
+ instance_data["RenderElement"] = pre_create_data.get("render_element_enabled") # noqa
+ if pre_create_data.get("render_element_enabled", True):
+ # Vray has its own tag for AOV file output
+ filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format(
+ renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
+ subset_name=subset_name,
+ fmt="${aov}.$F4.{ext}".format(aov="AOV",
+ ext=ext)
+ )
+ filepath = "{}{}".format(
+ hou.text.expandString("$HIP/pyblish/renders/"),
+ "{}/{}.${}.$F4.{}".format(subset_name,
+ subset_name,
+ "AOV",
+ ext)
+ )
+ re_rop = instance_node.parent().createNode(
+ "vray_render_channels",
+ node_name=basename + "_render_element"
+ )
+ # move the render element node next to the vray renderer node
+ re_rop.setPosition(instance_node.position() + hou.Vector2(0, 1))
+ re_path = re_rop.path()
+ parms.update({
+ "use_render_channels": 1,
+ "SettingsOutput_img_file_path": filepath,
+ "render_network_render_channels": re_path
+ })
+
+ else:
+ filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format(
+ renders_dir=hou.text.expandString("$HIP/pyblish/renders/"),
+ subset_name=subset_name,
+ fmt="$F4.{ext}".format(ext=ext)
+ )
+ parms.update({
+ "use_render_channels": 0,
+ "SettingsOutput_img_file_path": filepath
+ })
+
+ custom_res = pre_create_data.get("override_resolution")
+ if custom_res:
+ parms.update({"override_camerares": 1})
+
+ instance_node.setParms(parms)
+
+ # lock parameters from AVALON
+ to_lock = ["family", "id"]
+ self.lock_parameters(instance_node, to_lock)
+
+ def remove_instances(self, instances):
+ for instance in instances:
+ node = instance.data.get("instance_node")
+ # for the extra render node from the plugins
+ # such as vray and redshift
+ ipr_node = hou.node("{}{}".format(node, "_IPR"))
+ if ipr_node:
+ ipr_node.destroy()
+ re_node = hou.node("{}{}".format(node,
+ "_render_element"))
+ if re_node:
+ re_node.destroy()
+
+ return super(CreateVrayROP, self).remove_instances(instances)
+
+ def get_pre_create_attr_defs(self):
+ attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
+ image_format_enum = [
+ "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
+ "rad", "rat", "rta", "sgi", "tga", "tif",
+ ]
+
+ return attrs + [
+ EnumDef("image_format",
+ image_format_enum,
+ default=self.ext,
+ label="Image Format Options"),
+ BoolDef("override_resolution",
+ label="Override Camera Resolution",
+ tooltip="Override the current camera "
+ "resolution, recommended for IPR.",
+ default=False),
+ BoolDef("render_element_enabled",
+ label="Render Element",
+ tooltip="Create Render Element Node "
+ "if enabled",
+ default=False)
+ ]
diff --git a/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py b/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py
new file mode 100644
index 0000000000..614785487f
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py
@@ -0,0 +1,135 @@
+import os
+import re
+
+import hou
+import pyblish.api
+
+from openpype.hosts.houdini.api import colorspace
+from openpype.hosts.houdini.api.lib import (
+ evalParmNoFrame, get_color_management_preferences)
+
+
+class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
+ """Collect Arnold ROP Render Products
+
+ Collects the instance.data["files"] for the render products.
+
+ Provides:
+ instance -> files
+
+ """
+
+ label = "Arnold ROP Render Products"
+ order = pyblish.api.CollectorOrder + 0.4
+ hosts = ["houdini"]
+ families = ["arnold_rop"]
+
+ def process(self, instance):
+
+ rop = hou.node(instance.data.get("instance_node"))
+
+ # Collect chunkSize
+ chunk_size_parm = rop.parm("chunkSize")
+ if chunk_size_parm:
+ chunk_size = int(chunk_size_parm.eval())
+ instance.data["chunkSize"] = chunk_size
+ self.log.debug("Chunk Size: %s" % chunk_size)
+
+ default_prefix = evalParmNoFrame(rop, "ar_picture")
+ render_products = []
+
+ # Default beauty AOV
+ beauty_product = self.get_render_product_name(prefix=default_prefix,
+ suffix=None)
+ render_products.append(beauty_product)
+
+ files_by_aov = {
+ "": self.generate_expected_files(instance, beauty_product)
+ }
+
+ num_aovs = rop.evalParm("ar_aovs")
+ for index in range(1, num_aovs + 1):
+ # Skip disabled AOVs
+ if not rop.evalParm("ar_enable_aovP{}".format(index)):
+ continue
+
+ if rop.evalParm("ar_aov_exr_enable_layer_name{}".format(index)):
+ label = rop.evalParm("ar_aov_exr_layer_name{}".format(index))
+ else:
+ label = evalParmNoFrame(rop, "ar_aov_label{}".format(index))
+
+ aov_product = self.get_render_product_name(default_prefix,
+ suffix=label)
+ render_products.append(aov_product)
+ files_by_aov[label] = self.generate_expected_files(instance,
+ aov_product)
+
+ for product in render_products:
+ self.log.debug("Found render product: {}".format(product))
+
+ instance.data["files"] = list(render_products)
+ instance.data["renderProducts"] = colorspace.ARenderProduct()
+
+ # For now by default do NOT try to publish the rendered output
+ instance.data["publishJobState"] = "Suspended"
+ instance.data["attachTo"] = [] # stub required data
+
+ if "expectedFiles" not in instance.data:
+ instance.data["expectedFiles"] = list()
+ instance.data["expectedFiles"].append(files_by_aov)
+
+ # update the colorspace data
+ colorspace_data = get_color_management_preferences()
+ instance.data["colorspaceConfig"] = colorspace_data["config"]
+ instance.data["colorspaceDisplay"] = colorspace_data["display"]
+ instance.data["colorspaceView"] = colorspace_data["view"]
+
+ def get_render_product_name(self, prefix, suffix):
+ """Return the output filename using the AOV prefix and suffix"""
+
+ # When AOV is explicitly defined in prefix we just swap it out
+ # directly with the AOV suffix to embed it.
+ # Note: ${AOV} seems to be evaluated in the parameter as %AOV%
+ if "%AOV%" in prefix:
+ # It seems that when some special separator characters are present
+ # before the %AOV% token that Redshift will secretly remove it if
+ # there is no suffix for the current product, for example:
+ # foo_%AOV% -> foo.exr
+ pattern = "%AOV%" if suffix else "[._-]?%AOV%"
+ product_name = re.sub(pattern,
+ suffix,
+ prefix,
+ flags=re.IGNORECASE)
+ else:
+ if suffix:
+ # Add ".{suffix}" before the extension
+ prefix_base, ext = os.path.splitext(prefix)
+ product_name = prefix_base + "." + suffix + ext
+ else:
+ product_name = prefix
+
+ return product_name
+
+ def generate_expected_files(self, instance, path):
+ """Create expected files in instance data"""
+
+ dir = os.path.dirname(path)
+ file = os.path.basename(path)
+
+ if "#" in file:
+ def replace(match):
+ return "%0{}d".format(len(match.group()))
+
+ file = re.sub("#+", replace, file)
+
+ if "%" not in file:
+ return path
+
+ expected_files = []
+ start = instance.data["frameStart"]
+ end = instance.data["frameEnd"]
+ for i in range(int(start), (int(end) + 1)):
+ expected_files.append(
+ os.path.join(dir, (file % i)).replace("\\", "/"))
+
+ return expected_files
diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/openpype/hosts/houdini/plugins/publish/collect_frames.py
index 6c695f64e9..91a3d9d170 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_frames.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_frames.py
@@ -8,19 +8,16 @@ import pyblish.api
from openpype.hosts.houdini.api import lib
-
class CollectFrames(pyblish.api.InstancePlugin):
"""Collect all frames which would be saved from the ROP nodes"""
- order = pyblish.api.CollectorOrder
+ order = pyblish.api.CollectorOrder + 0.01
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass", "redshiftproxy", "review"]
def process(self, instance):
ropnode = hou.node(instance.data["instance_node"])
- frame_data = lib.get_frame_data(ropnode)
- instance.data.update(frame_data)
start_frame = instance.data.get("frameStart", None)
end_frame = instance.data.get("frameEnd", None)
@@ -34,8 +31,10 @@ class CollectFrames(pyblish.api.InstancePlugin):
self.log.warning("Using current frame: {}".format(hou.frame()))
output = output_parm.eval()
- _, ext = lib.splitext(output,
- allowed_multidot_extensions=[".ass.gz"])
+ _, ext = lib.splitext(
+ output,
+ allowed_multidot_extensions=[".ass.gz"]
+ )
file_name = os.path.basename(output)
result = file_name
diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py
index 6411376ea3..e92a42f2e8 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_inputs.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py
@@ -117,4 +117,4 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
inputs = [c["representation"] for c in containers]
instance.data["inputRepresentations"] = inputs
- self.log.info("Collected inputs: %s" % inputs)
+ self.log.debug("Collected inputs: %s" % inputs)
diff --git a/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py b/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py
new file mode 100644
index 0000000000..584343cd64
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py
@@ -0,0 +1,56 @@
+import hou
+
+import pyblish.api
+
+
+class CollectInstanceNodeFrameRange(pyblish.api.InstancePlugin):
+ """Collect time range frame data for the instance node."""
+
+ order = pyblish.api.CollectorOrder + 0.001
+ label = "Instance Node Frame Range"
+ hosts = ["houdini"]
+
+ def process(self, instance):
+
+ node_path = instance.data.get("instance_node")
+ node = hou.node(node_path) if node_path else None
+ if not node_path or not node:
+ self.log.debug("No instance node found for instance: "
+ "{}".format(instance))
+ return
+
+ frame_data = self.get_frame_data(node)
+ if not frame_data:
+ return
+
+ self.log.info("Collected time data: {}".format(frame_data))
+ instance.data.update(frame_data)
+
+ def get_frame_data(self, node):
+ """Get the frame data: start frame, end frame and steps
+ Args:
+ node(hou.Node)
+
+ Returns:
+ dict
+
+ """
+
+ data = {}
+
+ if node.parm("trange") is None:
+ self.log.debug("Node has no 'trange' parameter: "
+ "{}".format(node.path()))
+ return data
+
+ if node.evalParm("trange") == 0:
+ # Ignore 'render current frame'
+ self.log.debug("Node '{}' has 'Render current frame' set. "
+ "Time range data ignored.".format(node.path()))
+ return data
+
+ data["frameStart"] = node.evalParm("f1")
+ data["frameEnd"] = node.evalParm("f2")
+ data["byFrameStep"] = node.evalParm("f3")
+
+ return data
diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py
index bb85630552..3772c9e705 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_instances.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_instances.py
@@ -55,7 +55,9 @@ class CollectInstances(pyblish.api.ContextPlugin):
has_family = node.evalParm("family")
assert has_family, "'%s' is missing 'family'" % node.name()
- self.log.info("processing {}".format(node))
+ self.log.info(
+ "Processing legacy instance node {}".format(node.path())
+ )
data = lib.read(node)
# Check bypass state and reverse
@@ -68,16 +70,10 @@ class CollectInstances(pyblish.api.ContextPlugin):
if "active" in data:
data["publish"] = data["active"]
- data.update(self.get_frame_data(node))
-
# Create nice name if the instance has a frame range.
label = data.get("name", node.name())
label += " (%s)" % data["asset"] # include asset in name
- if "frameStart" in data and "frameEnd" in data:
- frames = "[{frameStart} - {frameEnd}]".format(**data)
- label = "{} {}".format(label, frames)
-
instance = context.create_instance(label)
# Include `families` using `family` data
@@ -116,6 +112,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
data["frameStart"] = node.evalParm("f1")
data["frameEnd"] = node.evalParm("f2")
- data["steps"] = node.evalParm("f3")
+ data["byFrameStep"] = node.evalParm("f3")
return data
diff --git a/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py b/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py
new file mode 100644
index 0000000000..eabb1128d8
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py
@@ -0,0 +1,104 @@
+import re
+import os
+
+import hou
+import pyblish.api
+
+from openpype.hosts.houdini.api.lib import (
+ evalParmNoFrame,
+ get_color_management_preferences
+)
+from openpype.hosts.houdini.api import (
+ colorspace
+)
+
+
+class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
+ """Collect Karma Render Products
+
+ Collects the instance.data["files"] for the multipart render product.
+
+ Provides:
+ instance -> files
+
+ """
+
+ label = "Karma ROP Render Products"
+ order = pyblish.api.CollectorOrder + 0.4
+ hosts = ["houdini"]
+ families = ["karma_rop"]
+
+ def process(self, instance):
+
+ rop = hou.node(instance.data.get("instance_node"))
+
+ # Collect chunkSize
+ chunk_size_parm = rop.parm("chunkSize")
+ if chunk_size_parm:
+ chunk_size = int(chunk_size_parm.eval())
+ instance.data["chunkSize"] = chunk_size
+ self.log.debug("Chunk Size: %s" % chunk_size)
+
+ default_prefix = evalParmNoFrame(rop, "picture")
+ render_products = []
+
+ # Default beauty AOV
+ beauty_product = self.get_render_product_name(
+ prefix=default_prefix, suffix=None
+ )
+ render_products.append(beauty_product)
+
+ files_by_aov = {
+ "beauty": self.generate_expected_files(instance,
+ beauty_product)
+ }
+
+ filenames = list(render_products)
+ instance.data["files"] = filenames
+ instance.data["renderProducts"] = colorspace.ARenderProduct()
+
+ for product in render_products:
+ self.log.debug("Found render product: %s" % product)
+
+ if "expectedFiles" not in instance.data:
+ instance.data["expectedFiles"] = list()
+ instance.data["expectedFiles"].append(files_by_aov)
+
+ # update the colorspace data
+ colorspace_data = get_color_management_preferences()
+ instance.data["colorspaceConfig"] = colorspace_data["config"]
+ instance.data["colorspaceDisplay"] = colorspace_data["display"]
+ instance.data["colorspaceView"] = colorspace_data["view"]
+
+ def get_render_product_name(self, prefix, suffix):
+ product_name = prefix
+ if suffix:
+ # Add ".{suffix}" before the extension
+ prefix_base, ext = os.path.splitext(prefix)
+ product_name = "{}.{}{}".format(prefix_base, suffix, ext)
+
+ return product_name
+
+ def generate_expected_files(self, instance, path):
+ """Create expected files in instance data"""
+
+ dir = os.path.dirname(path)
+ file = os.path.basename(path)
+
+ if "#" in file:
+ def replace(match):
+ return "%0{}d".format(len(match.group()))
+
+ file = re.sub("#+", replace, file)
+
+ if "%" not in file:
+ return path
+
+ expected_files = []
+ start = instance.data["frameStart"]
+ end = instance.data["frameEnd"]
+ for i in range(int(start), (int(end) + 1)):
+ expected_files.append(
+ os.path.join(dir, (file % i)).replace("\\", "/"))
+
+ return expected_files
diff --git a/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py b/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py
new file mode 100644
index 0000000000..c4460f5350
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py
@@ -0,0 +1,127 @@
+import re
+import os
+
+import hou
+import pyblish.api
+
+from openpype.hosts.houdini.api.lib import (
+ evalParmNoFrame,
+ get_color_management_preferences
+)
+from openpype.hosts.houdini.api import (
+ colorspace
+)
+
+
+class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
+ """Collect Mantra Render Products
+
+ Collects the instance.data["files"] for the render products.
+
+ Provides:
+ instance -> files
+
+ """
+
+ label = "Mantra ROP Render Products"
+ order = pyblish.api.CollectorOrder + 0.4
+ hosts = ["houdini"]
+ families = ["mantra_rop"]
+
+ def process(self, instance):
+
+ rop = hou.node(instance.data.get("instance_node"))
+
+ # Collect chunkSize
+ chunk_size_parm = rop.parm("chunkSize")
+ if chunk_size_parm:
+ chunk_size = int(chunk_size_parm.eval())
+ instance.data["chunkSize"] = chunk_size
+ self.log.debug("Chunk Size: %s" % chunk_size)
+
+ default_prefix = evalParmNoFrame(rop, "vm_picture")
+ render_products = []
+
+ # Default beauty AOV
+ beauty_product = self.get_render_product_name(
+ prefix=default_prefix, suffix=None
+ )
+ render_products.append(beauty_product)
+
+ files_by_aov = {
+ "beauty": self.generate_expected_files(instance,
+ beauty_product)
+ }
+
+ aov_numbers = rop.evalParm("vm_numaux")
+ if aov_numbers > 0:
+ # get the filenames of the AOVs
+ for i in range(1, aov_numbers + 1):
+ var = rop.evalParm("vm_variable_plane%d" % i)
+ if var:
+ aov_name = "vm_filename_plane%d" % i
+ aov_boolean = "vm_usefile_plane%d" % i
+ aov_enabled = rop.evalParm(aov_boolean)
+ has_aov_path = rop.evalParm(aov_name)
+ if has_aov_path and aov_enabled == 1:
+ aov_prefix = evalParmNoFrame(rop, aov_name)
+ aov_product = self.get_render_product_name(
+ prefix=aov_prefix, suffix=None
+ )
+ render_products.append(aov_product)
+
+ files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
+
+ for product in render_products:
+ self.log.debug("Found render product: %s" % product)
+
+ filenames = list(render_products)
+ instance.data["files"] = filenames
+ instance.data["renderProducts"] = colorspace.ARenderProduct()
+
+ # For now by default do NOT try to publish the rendered output
+ instance.data["publishJobState"] = "Suspended"
+ instance.data["attachTo"] = [] # stub required data
+
+ if "expectedFiles" not in instance.data:
+ instance.data["expectedFiles"] = list()
+ instance.data["expectedFiles"].append(files_by_aov)
+
+ # update the colorspace data
+ colorspace_data = get_color_management_preferences()
+ instance.data["colorspaceConfig"] = colorspace_data["config"]
+ instance.data["colorspaceDisplay"] = colorspace_data["display"]
+ instance.data["colorspaceView"] = colorspace_data["view"]
+
+ def get_render_product_name(self, prefix, suffix):
+ product_name = prefix
+ if suffix:
+ # Add ".{suffix}" before the extension
+ prefix_base, ext = os.path.splitext(prefix)
+ product_name = prefix_base + "." + suffix + ext
+
+ return product_name
+
+ def generate_expected_files(self, instance, path):
+ """Create expected files in instance data"""
+
+ dir = os.path.dirname(path)
+ file = os.path.basename(path)
+
+ if "#" in file:
+ def replace(match):
+ return "%0{}d".format(len(match.group()))
+
+ file = re.sub("#+", replace, file)
+
+ if "%" not in file:
+ return path
+
+ expected_files = []
+ start = instance.data["frameStart"]
+ end = instance.data["frameEnd"]
+ for i in range(int(start), (int(end) + 1)):
+ expected_files.append(
+ os.path.join(dir, (file % i)).replace("\\", "/"))
+
+ return expected_files
diff --git a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py
index f1d73d7523..dbb15ab88f 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py
@@ -4,52 +4,13 @@ import os
import hou
import pyblish.api
-
-def get_top_referenced_parm(parm):
-
- processed = set() # disallow infinite loop
- while True:
- if parm.path() in processed:
- raise RuntimeError("Parameter references result in cycle.")
-
- processed.add(parm.path())
-
- ref = parm.getReferencedParm()
- if ref.path() == parm.path():
- # It returns itself when it doesn't reference
- # another parameter
- return ref
- else:
- parm = ref
-
-
-def evalParmNoFrame(node, parm, pad_character="#"):
-
- parameter = node.parm(parm)
- assert parameter, "Parameter does not exist: %s.%s" % (node, parm)
-
- # If the parameter has a parameter reference, then get that
- # parameter instead as otherwise `unexpandedString()` fails.
- parameter = get_top_referenced_parm(parameter)
-
- # Substitute out the frame numbering with padded characters
- try:
- raw = parameter.unexpandedString()
- except hou.Error as exc:
- print("Failed: %s" % parameter)
- raise RuntimeError(exc)
-
- def replace(match):
- padding = 1
- n = match.group(2)
- if n and int(n):
- padding = int(n)
- return pad_character * padding
-
- expression = re.sub(r"(\$F([0-9]*))", replace, raw)
-
- with hou.ScriptEvalContext(parameter):
- return hou.expandStringAtFrame(expression, 0)
+from openpype.hosts.houdini.api.lib import (
+ evalParmNoFrame,
+ get_color_management_preferences
+)
+from openpype.hosts.houdini.api import (
+ colorspace
+)
class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
@@ -87,6 +48,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
prefix=default_prefix, suffix=beauty_suffix
)
render_products.append(beauty_product)
+ files_by_aov = {
+ "_": self.generate_expected_files(instance,
+ beauty_product)}
num_aovs = rop.evalParm("RS_aov")
for index in range(num_aovs):
@@ -104,11 +68,29 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
aov_product = self.get_render_product_name(aov_prefix, aov_suffix)
render_products.append(aov_product)
+ files_by_aov[aov_suffix] = self.generate_expected_files(instance,
+ aov_product) # noqa
+
for product in render_products:
self.log.debug("Found render product: %s" % product)
filenames = list(render_products)
instance.data["files"] = filenames
+ instance.data["renderProducts"] = colorspace.ARenderProduct()
+
+ # For now by default do NOT try to publish the rendered output
+ instance.data["publishJobState"] = "Suspended"
+ instance.data["attachTo"] = [] # stub required data
+
+ if "expectedFiles" not in instance.data:
+ instance.data["expectedFiles"] = list()
+ instance.data["expectedFiles"].append(files_by_aov)
+
+ # update the colorspace data
+ colorspace_data = get_color_management_preferences()
+ instance.data["colorspaceConfig"] = colorspace_data["config"]
+ instance.data["colorspaceDisplay"] = colorspace_data["display"]
+ instance.data["colorspaceView"] = colorspace_data["view"]
def get_render_product_name(self, prefix, suffix):
"""Return the output filename using the AOV prefix and suffix"""
@@ -133,3 +115,27 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
product_name = prefix
return product_name
+
+ def generate_expected_files(self, instance, path):
+ """Create expected files in instance data"""
+
+ dir = os.path.dirname(path)
+ file = os.path.basename(path)
+
+ if "#" in file:
+ def replace(match):
+ return "%0{}d".format(len(match.group()))
+
+ file = re.sub("#+", replace, file)
+
+ if "%" not in file:
+ return path
+
+ expected_files = []
+ start = instance.data["frameStart"]
+ end = instance.data["frameEnd"]
+ for i in range(int(start), (int(end) + 1)):
+ expected_files.append(
+ os.path.join(dir, (file % i)).replace("\\", "/"))
+
+ return expected_files
diff --git a/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py b/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py
new file mode 100644
index 0000000000..2a6be6b9f1
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+"""Collector plugin for frames data on ROP instances."""
+import hou # noqa
+import pyblish.api
+from openpype.hosts.houdini.api import lib
+
+
+class CollectRopFrameRange(pyblish.api.InstancePlugin):
+ """Collect all frames which would be saved from the ROP nodes"""
+
+ order = pyblish.api.CollectorOrder
+ label = "Collect RopNode Frame Range"
+
+ def process(self, instance):
+
+ node_path = instance.data.get("instance_node")
+ if node_path is None:
+ # Instance without instance node like a workfile instance
+ return
+
+ ropnode = hou.node(node_path)
+ frame_data = lib.get_frame_data(ropnode)
+
+ if "frameStart" in frame_data and "frameEnd" in frame_data:
+
+ # Log artist friendly message about the collected frame range
+ message = (
+ "Frame range {0[frameStart]} - {0[frameEnd]}"
+ ).format(frame_data)
+ if frame_data.get("step", 1.0) != 1.0:
+ message += " with step {0[step]}".format(frame_data)
+ self.log.info(message)
+
+ instance.data.update(frame_data)
+
+ # Add frame range to label if the instance has a frame range.
+ label = instance.data.get("label", instance.data["name"])
+ instance.data["label"] = (
+ "{0} [{1[frameStart]} - {1[frameEnd]}]".format(label,
+ frame_data)
+ )
diff --git a/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py b/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py
new file mode 100644
index 0000000000..d4fe37f993
--- /dev/null
+++ b/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py
@@ -0,0 +1,129 @@
+import re
+import os
+
+import hou
+import pyblish.api
+
+from openpype.hosts.houdini.api.lib import (
+ evalParmNoFrame,
+ get_color_management_preferences
+)
+from openpype.hosts.houdini.api import (
+ colorspace
+)
+
+
+class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
+ """Collect Vray Render Products
+
+ Collects the instance.data["files"] for the render products.
+
+ Provides:
+ instance -> files
+
+ """
+
+ label = "VRay ROP Render Products"
+ order = pyblish.api.CollectorOrder + 0.4
+ hosts = ["houdini"]
+ families = ["vray_rop"]
+
+ def process(self, instance):
+
+ rop = hou.node(instance.data.get("instance_node"))
+
+ # Collect chunkSize
+ chunk_size_parm = rop.parm("chunkSize")
+ if chunk_size_parm:
+ chunk_size = int(chunk_size_parm.eval())
+ instance.data["chunkSize"] = chunk_size
+ self.log.debug("Chunk Size: %s" % chunk_size)
+
+ default_prefix = evalParmNoFrame(rop, "SettingsOutput_img_file_path")
+ render_products = []
+ # TODO: add render elements if render element
+
+ beauty_product = self.get_beauty_render_product(default_prefix)
+ render_products.append(beauty_product)
+ files_by_aov = {
+ "RGB Color": self.generate_expected_files(instance,
+ beauty_product)}
+
+ if instance.data.get("RenderElement", True):
+ render_element = self.get_render_element_name(rop, default_prefix)
+ if render_element:
+ for aov, renderpass in render_element.items():
+ render_products.append(renderpass)
+ files_by_aov[aov] = self.generate_expected_files(instance, renderpass) # noqa
+
+ for product in render_products:
+ self.log.debug("Found render product: %s" % product)
+ filenames = list(render_products)
+ instance.data["files"] = filenames
+ instance.data["renderProducts"] = colorspace.ARenderProduct()
+
+ # For now by default do NOT try to publish the rendered output
+ instance.data["publishJobState"] = "Suspended"
+ instance.data["attachTo"] = [] # stub required data
+
+ if "expectedFiles" not in instance.data:
+ instance.data["expectedFiles"] = list()
+ instance.data["expectedFiles"].append(files_by_aov)
+ self.log.debug("expectedFiles:{}".format(files_by_aov))
+
+ # update the colorspace data
+ colorspace_data = get_color_management_preferences()
+ instance.data["colorspaceConfig"] = colorspace_data["config"]
+ instance.data["colorspaceDisplay"] = colorspace_data["display"]
+ instance.data["colorspaceView"] = colorspace_data["view"]
+
+ def get_beauty_render_product(self, prefix, suffix=""):
+ """Return the beauty output filename if render element enabled
+ """
+ aov_parm = ".{}".format(suffix)
+ beauty_product = None
+ if aov_parm in prefix:
+ beauty_product = prefix.replace(aov_parm, "")
+ else:
+ beauty_product = prefix
+
+ return beauty_product
+
+ def get_render_element_name(self, node, prefix, suffix=""):
+ """Return the output filename using the AOV prefix and suffix
+ """
+ render_element_dict = {}
+ # need a rewrite
+ re_path = node.evalParm("render_network_render_channels")
+ if re_path:
+ node_children = hou.node(re_path).children()
+ for element in node_children:
+ if element.shaderName() != "vray:SettingsRenderChannels":
+ aov = str(element)
+ render_product = prefix.replace(suffix, aov)
+ render_element_dict[aov] = render_product
+ return render_element_dict
+
+ def generate_expected_files(self, instance, path):
+ """Create expected files in instance data"""
+
+ dir = os.path.dirname(path)
+ file = os.path.basename(path)
+
+ if "#" in file:
+ def replace(match):
+ return "%0{}d".format(len(match.group()))
+
+ file = re.sub("#+", replace, file)
+
+ if "%" not in file:
+ return path
+
+ expected_files = []
+ start = instance.data["frameStart"]
+ end = instance.data["frameEnd"]
+ for i in range(int(start), (int(end) + 1)):
+ expected_files.append(
+ os.path.join(dir, (file % i)).replace("\\", "/"))
+
+ return expected_files
diff --git a/openpype/hosts/houdini/plugins/publish/collect_workfile.py b/openpype/hosts/houdini/plugins/publish/collect_workfile.py
index a6e94ec29e..aa533bcf1b 100644
--- a/openpype/hosts/houdini/plugins/publish/collect_workfile.py
+++ b/openpype/hosts/houdini/plugins/publish/collect_workfile.py
@@ -32,5 +32,4 @@ class CollectWorkfile(pyblish.api.InstancePlugin):
"stagingDir": folder,
}]
- self.log.info('Collected instance: {}'.format(file))
- self.log.info('staging Dir: {}'.format(folder))
+ self.log.debug('Collected workfile instance: {}'.format(file))
diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py
index 16d9ef9aec..2493b28bc1 100644
--- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py
+++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py
@@ -2,7 +2,10 @@ import pyblish.api
from openpype.lib import version_up
from openpype.pipeline import registered_host
+from openpype.action import get_errored_plugins_from_data
from openpype.hosts.houdini.api import HoudiniHost
+from openpype.pipeline.publish import KnownPublishError
+
class IncrementCurrentFile(pyblish.api.ContextPlugin):
"""Increment the current file.
@@ -14,17 +17,32 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
- families = ["workfile"]
+ families = ["workfile",
+ "redshift_rop",
+ "arnold_rop",
+ "mantra_rop",
+ "karma_rop",
+ "usdrender"]
optional = True
def process(self, context):
+ errored_plugins = get_errored_plugins_from_data(context)
+ if any(
+ plugin.__name__ == "HoudiniSubmitPublishDeadline"
+ for plugin in errored_plugins
+ ):
+ raise KnownPublishError(
+ "Skipping incrementing current file because "
+ "submission to deadline failed."
+ )
+
# Filename must not have changed since collecting
host = registered_host() # type: HoudiniHost
current_file = host.current_file()
assert (
context.data["currentFile"] == current_file
- ), "Collected filename from current scene name."
+ ), "Collected filename mismatches from current scene name."
new_filepath = version_up(current_file)
host.save_workfile(new_filepath)
diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py
index d6e07ccab0..703d3e4895 100644
--- a/openpype/hosts/houdini/plugins/publish/save_scene.py
+++ b/openpype/hosts/houdini/plugins/publish/save_scene.py
@@ -20,7 +20,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
)
if host.has_unsaved_changes():
- self.log.info("Saving current file {}...".format(current_file))
+ self.log.info("Saving current file: {}".format(current_file))
host.save_workfile(current_file)
else:
self.log.debug("No unsaved changes, skipping file save..")
diff --git a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py b/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py
index 7707cc2dba..543c8e1407 100644
--- a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py
+++ b/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py
@@ -28,18 +28,37 @@ class ValidateWorkfilePaths(
if not self.is_active(instance.data):
return
invalid = self.get_invalid()
- self.log.info(
- "node types to check: {}".format(", ".join(self.node_types)))
- self.log.info(
- "prohibited vars: {}".format(", ".join(self.prohibited_vars))
+ self.log.debug(
+ "Checking node types: {}".format(", ".join(self.node_types)))
+ self.log.debug(
+ "Searching prohibited vars: {}".format(
+ ", ".join(self.prohibited_vars)
+ )
)
- if invalid:
- for param in invalid:
- self.log.error(
- "{}: {}".format(param.path(), param.unexpandedString()))
- raise PublishValidationError(
- "Invalid paths found", title=self.label)
+ if invalid:
+ all_container_vars = set()
+ for param in invalid:
+ value = param.unexpandedString()
+ contained_vars = [
+ var for var in self.prohibited_vars
+ if var in value
+ ]
+ all_container_vars.update(contained_vars)
+
+ self.log.error(
+ "Parm {} contains prohibited vars {}: {}".format(
+ param.path(),
+ ", ".join(contained_vars),
+ value)
+ )
+
+ message = (
+ "Prohibited vars {} found in parameter values".format(
+ ", ".join(all_container_vars)
+ )
+ )
+ raise PublishValidationError(message, title=self.label)
@classmethod
def get_invalid(cls):
@@ -63,7 +82,7 @@ class ValidateWorkfilePaths(
def repair(cls, instance):
invalid = cls.get_invalid()
for param in invalid:
- cls.log.info("processing: {}".format(param.path()))
+ cls.log.info("Processing: {}".format(param.path()))
cls.log.info("Replacing {} for {}".format(
param.unexpandedString(),
hou.text.expandString(param.unexpandedString())))
diff --git a/openpype/hosts/max/api/colorspace.py b/openpype/hosts/max/api/colorspace.py
new file mode 100644
index 0000000000..fafee4ee04
--- /dev/null
+++ b/openpype/hosts/max/api/colorspace.py
@@ -0,0 +1,50 @@
+import attr
+from pymxs import runtime as rt
+
+
+@attr.s
+class LayerMetadata(object):
+ """Data class for Render Layer metadata."""
+ frameStart = attr.ib()
+ frameEnd = attr.ib()
+
+
+@attr.s
+class RenderProduct(object):
+ """Getting Colorspace as
+ Specific Render Product Parameter for submitting
+ publish job.
+ """
+ colorspace = attr.ib() # colorspace
+ view = attr.ib()
+ productName = attr.ib(default=None)
+
+
+class ARenderProduct(object):
+
+ def __init__(self):
+ """Constructor."""
+ # Initialize
+ self.layer_data = self._get_layer_data()
+ self.layer_data.products = self.get_colorspace_data()
+
+ def _get_layer_data(self):
+ return LayerMetadata(
+ frameStart=int(rt.rendStart),
+ frameEnd=int(rt.rendEnd),
+ )
+
+ def get_colorspace_data(self):
+ """To be implemented by renderer class.
+ This should return a list of RenderProducts.
+ Returns:
+ list: List of RenderProduct
+ """
+ colorspace_data = [
+ RenderProduct(
+ colorspace="sRGB",
+ view="ACES 1.0",
+ productName=""
+ )
+ ]
+ return colorspace_data
diff --git a/openpype/hosts/max/api/lib.py b/openpype/hosts/max/api/lib.py
index 5718d8f112..1d53802ecf 100644
--- a/openpype/hosts/max/api/lib.py
+++ b/openpype/hosts/max/api/lib.py
@@ -121,7 +121,14 @@ def get_all_children(parent, node_type=None):
def get_current_renderer():
- """get current renderer"""
+ """
+ Notes:
+ Get current renderer for Max
+
+ Returns:
+ "{Current Renderer}:{Current Renderer}"
+ e.g. "Redshift_Renderer:Redshift_Renderer"
+ """
return rt.renderers.production
diff --git a/openpype/hosts/max/api/lib_renderproducts.py b/openpype/hosts/max/api/lib_renderproducts.py
index 1a44de8684..3074f8e170 100644
--- a/openpype/hosts/max/api/lib_renderproducts.py
+++ b/openpype/hosts/max/api/lib_renderproducts.py
@@ -3,94 +3,126 @@
# arnold
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
import os
+
from pymxs import runtime as rt
-from openpype.hosts.max.api.lib import (
- get_current_renderer,
- get_default_render_folder
-)
-from openpype.pipeline.context_tools import get_current_project_asset
-from openpype.settings import get_project_settings
+
+from openpype.hosts.max.api.lib import get_current_renderer
from openpype.pipeline import legacy_io
+from openpype.settings import get_project_settings
class RenderProducts(object):
def __init__(self, project_settings=None):
- self._project_settings = project_settings
- if not self._project_settings:
- self._project_settings = get_project_settings(
- legacy_io.Session["AVALON_PROJECT"]
- )
+ self._project_settings = project_settings or get_project_settings(
+ legacy_io.Session["AVALON_PROJECT"])
+
+ def get_beauty(self, container):
+ render_dir = os.path.dirname(rt.rendOutputFilename)
+
+ output_file = os.path.join(render_dir, container)
- def render_product(self, container):
- folder = rt.maxFilePath
- file = rt.maxFileName
- folder = folder.replace("\\", "/")
setting = self._project_settings
- render_folder = get_default_render_folder(setting)
- filename, ext = os.path.splitext(file)
+ img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa
- output_file = os.path.join(folder,
- render_folder,
- filename,
+ start_frame = int(rt.rendStart)
+ end_frame = int(rt.rendEnd) + 1
+
+ return {
+ "beauty": self.get_expected_beauty(
+ output_file, start_frame, end_frame, img_fmt
+ )
+ }
+
+ def get_aovs(self, container):
+ render_dir = os.path.dirname(rt.rendOutputFilename)
+
+ output_file = os.path.join(render_dir,
container)
- context = get_current_project_asset()
- # TODO: change the frame range follows the current render setting
- startFrame = int(rt.rendStart)
- endFrame = int(rt.rendEnd) + 1
-
- img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
- full_render_list = self.beauty_render_product(output_file,
- startFrame,
- endFrame,
- img_fmt)
+ setting = self._project_settings
+ img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa
+ start_frame = int(rt.rendStart)
+ end_frame = int(rt.rendEnd) + 1
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
-
-
- if renderer == "VUE_File_Renderer":
- return full_render_list
+ render_dict = {}
if renderer in [
"ART_Renderer",
- "Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
- render_elem_list = self.render_elements_product(output_file,
- startFrame,
- endFrame,
- img_fmt)
- if render_elem_list:
- full_render_list.extend(iter(render_elem_list))
- return full_render_list
+ render_name = self.get_render_elements_name()
+ if render_name:
+ for name in render_name:
+ render_dict.update({
+ name: self.get_expected_render_elements(
+ output_file, name, start_frame,
+ end_frame, img_fmt)
+ })
+ elif renderer == "Redshift_Renderer":
+ render_name = self.get_render_elements_name()
+ if render_name:
+ rs_aov_files = rt.Execute("renderers.current.separateAovFiles")
+ # this doesn't work, always returns False
+ # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles
+ if img_fmt == "exr" and not rs_aov_files:
+ for name in render_name:
+ if name == "RsCryptomatte":
+ render_dict.update({
+ name: self.get_expected_render_elements(
+ output_file, name, start_frame,
+ end_frame, img_fmt)
+ })
+ else:
+ for name in render_name:
+ render_dict.update({
+ name: self.get_expected_render_elements(
+ output_file, name, start_frame,
+ end_frame, img_fmt)
+ })
- if renderer == "Arnold":
- aov_list = self.arnold_render_product(output_file,
- startFrame,
- endFrame,
- img_fmt)
- if aov_list:
- full_render_list.extend(iter(aov_list))
- return full_render_list
+ elif renderer == "Arnold":
+ render_name = self.get_arnold_product_name()
+ if render_name:
+ for name in render_name:
+ render_dict.update({
+ name: self.get_expected_arnold_product(
+ output_file, name, start_frame, end_frame, img_fmt)
+ })
+ elif renderer in [
+ "V_Ray_6_Hotfix_3",
+ "V_Ray_GPU_6_Hotfix_3"
+ ]:
+ if img_fmt != "exr":
+ render_name = self.get_render_elements_name()
+ if render_name:
+ for name in render_name:
+ render_dict.update({
+ name: self.get_expected_render_elements(
+ output_file, name, start_frame,
+ end_frame, img_fmt) # noqa
+ })
- def beauty_render_product(self, folder, startFrame, endFrame, fmt):
+ return render_dict
+
+ def get_expected_beauty(self, folder, start_frame, end_frame, fmt):
beauty_frame_range = []
- for f in range(startFrame, endFrame):
- beauty_output = f"{folder}.{f}.{fmt}"
+ for f in range(start_frame, end_frame):
+ frame = "%04d" % f
+ beauty_output = f"{folder}.{frame}.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
beauty_frame_range.append(beauty_output)
return beauty_frame_range
- # TODO: Get the arnold render product
- def arnold_render_product(self, folder, startFrame, endFrame, fmt):
- """Get all the Arnold AOVs"""
- aovs = []
+ def get_arnold_product_name(self):
+ """Get all the Arnold AOVs name"""
+ aov_name = []
amw = rt.MaxToAOps.AOVsManagerWindow()
aov_mgr = rt.renderers.current.AOVManager
@@ -100,34 +132,51 @@ class RenderProducts(object):
return
for i in range(aov_group_num):
# get the specific AOV group
- for aov in aov_mgr.drivers[i].aov_list:
- for f in range(startFrame, endFrame):
- render_element = f"{folder}_{aov.name}.{f}.{fmt}"
- render_element = render_element.replace("\\", "/")
- aovs.append(render_element)
-
+ aov_name.extend(aov.name for aov in aov_mgr.drivers[i].aov_list)
# close the AOVs manager window
amw.close()
- return aovs
+ return aov_name
- def render_elements_product(self, folder, startFrame, endFrame, fmt):
- """Get all the render element output files. """
- render_dirname = []
+ def get_expected_arnold_product(self, folder, name,
+ start_frame, end_frame, fmt):
+ """Get all the expected Arnold AOVs"""
+ aov_list = []
+ for f in range(start_frame, end_frame):
+ frame = "%04d" % f
+ render_element = f"{folder}_{name}.{frame}.{fmt}"
+ render_element = render_element.replace("\\", "/")
+ aov_list.append(render_element)
- render_elem = rt.MaxOps.GetCurRenderElementMgr()
+ return aov_list
+
+ def get_render_elements_name(self):
+ """Get all the render element names for general """
+ render_name = []
+ render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
+ if render_elem_num < 1:
+ return
# get render elements from the renders
for i in range(render_elem_num):
renderlayer_name = render_elem.GetRenderElement(i)
- target, renderpass = str(renderlayer_name).split(":")
if renderlayer_name.enabled:
- for f in range(startFrame, endFrame):
- render_element = f"{folder}_{renderpass}.{f}.{fmt}"
- render_element = render_element.replace("\\", "/")
- render_dirname.append(render_element)
+ target, renderpass = str(renderlayer_name).split(":")
+ render_name.append(renderpass)
- return render_dirname
+ return render_name
+
+ def get_expected_render_elements(self, folder, name,
+ start_frame, end_frame, fmt):
+ """Get all the expected render element output files. """
+ render_elements = []
+ for f in range(start_frame, end_frame):
+ frame = "%04d" % f
+ render_element = f"{folder}_{name}.{frame}.{fmt}"
+ render_element = render_element.replace("\\", "/")
+ render_elements.append(render_element)
+
+ return render_elements
def image_format(self):
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
diff --git a/openpype/hosts/max/plugins/create/create_redshift_proxy.py b/openpype/hosts/max/plugins/create/create_redshift_proxy.py
new file mode 100644
index 0000000000..698ea82b69
--- /dev/null
+++ b/openpype/hosts/max/plugins/create/create_redshift_proxy.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+"""Creator plugin for creating camera."""
+from openpype.hosts.max.api import plugin
+from openpype.pipeline import CreatedInstance
+
+
+class CreateRedshiftProxy(plugin.MaxCreator):
+ identifier = "io.openpype.creators.max.redshiftproxy"
+ label = "Redshift Proxy"
+ family = "redshiftproxy"
+ icon = "gear"
+
+ def create(self, subset_name, instance_data, pre_create_data):
+
+ _ = super(CreateRedshiftProxy, self).create(
+ subset_name,
+ instance_data,
+ pre_create_data) # type: CreatedInstance
diff --git a/openpype/hosts/max/plugins/create/create_render.py b/openpype/hosts/max/plugins/create/create_render.py
index 5b35453579..41e49f4620 100644
--- a/openpype/hosts/max/plugins/create/create_render.py
+++ b/openpype/hosts/max/plugins/create/create_render.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating camera."""
+import os
from openpype.hosts.max.api import plugin
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
@@ -12,8 +13,13 @@ class CreateRender(plugin.MaxCreator):
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
- """Plugin entry point."""
- instance = super().create(
+ from pymxs import runtime as rt
+ sel_obj = list(rt.selection)
+ file = rt.maxFileName
+ filename, _ = os.path.splitext(file)
+ instance_data["AssetName"] = filename
+
+ instance = super(CreateRender, self).create(
subset_name,
instance_data,
pre_create_data)
diff --git a/openpype/hosts/max/plugins/load/load_model.py b/openpype/hosts/max/plugins/load/load_model.py
index 662b9fcb87..0ec94ab074 100644
--- a/openpype/hosts/max/plugins/load/load_model.py
+++ b/openpype/hosts/max/plugins/load/load_model.py
@@ -1,9 +1,8 @@
-
import os
-
+from openpype.pipeline import load, get_representation_path
+from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.lib import maintained_selection
-from openpype.hosts.max.api.pipeline import containerise
from openpype.pipeline import get_representation_path, load
@@ -23,25 +22,21 @@ class ModelAbcLoader(load.LoaderPlugin):
file_path = os.path.normpath(self.fname)
abc_before = {
- c for c in rt.RootNode.Children
- if rt.ClassOf(c) == rt.AlembicContainer
+ c
+ for c in rt.rootNode.Children
+ if rt.classOf(c) == rt.AlembicContainer
}
- abc_import_cmd = (f"""
-AlembicImport.ImportToRoot = false
-AlembicImport.CustomAttributes = true
-AlembicImport.UVs = true
-AlembicImport.VertexColors = true
-
-importFile @"{file_path}" #noPrompt
- """)
-
- self.log.debug(f"Executing command: {abc_import_cmd}")
- rt.Execute(abc_import_cmd)
+ rt.AlembicImport.ImportToRoot = False
+ rt.AlembicImport.CustomAttributes = True
+ rt.AlembicImport.UVs = True
+ rt.AlembicImport.VertexColors = True
+ rt.importFile(file_path, rt.name("noPrompt"))
abc_after = {
- c for c in rt.RootNode.Children
- if rt.ClassOf(c) == rt.AlembicContainer
+ c
+ for c in rt.rootNode.Children
+ if rt.classOf(c) == rt.AlembicContainer
}
# This should yield new AlembicContainer node
@@ -53,10 +48,12 @@ importFile @"{file_path}" #noPrompt
abc_container = abc_containers.pop()
return containerise(
- name, [abc_container], context, loader=self.__class__.__name__)
+ name, [abc_container], context, loader=self.__class__.__name__
+ )
def update(self, container, representation):
from pymxs import runtime as rt
+
path = get_representation_path(representation)
node = rt.GetNodeByName(container["instance_node"])
rt.Select(node.Children)
@@ -75,9 +72,10 @@ importFile @"{file_path}" #noPrompt
with maintained_selection():
rt.Select(node)
- lib.imprint(container["instance_node"], {
- "representation": str(representation["_id"])
- })
+ lib.imprint(
+ container["instance_node"],
+ {"representation": str(representation["_id"])},
+ )
def switch(self, container, representation):
self.update(container, representation)
diff --git a/openpype/hosts/max/plugins/load/load_model_fbx.py b/openpype/hosts/max/plugins/load/load_model_fbx.py
index 2aef6f02c2..ee7d04d5eb 100644
--- a/openpype/hosts/max/plugins/load/load_model_fbx.py
+++ b/openpype/hosts/max/plugins/load/load_model_fbx.py
@@ -1,8 +1,8 @@
import os
-
+from openpype.pipeline import load, get_representation_path
+from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
from openpype.hosts.max.api.lib import maintained_selection
-from openpype.hosts.max.api.pipeline import containerise
from openpype.pipeline import get_representation_path, load
@@ -22,10 +22,7 @@ class FbxModelLoader(load.LoaderPlugin):
rt.FBXImporterSetParam("Animation", False)
rt.FBXImporterSetParam("Cameras", False)
rt.FBXImporterSetParam("Preserveinstances", True)
- rt.ImportFile(
- filepath,
- rt.Name("noPrompt"),
- using=rt.FBXIMP)
+ rt.importFile(filepath, rt.name("noPrompt"), using=rt.FBXIMP)
container = rt.GetNodeByName(name)
if not container:
@@ -36,32 +33,29 @@ class FbxModelLoader(load.LoaderPlugin):
selection.Parent = container
return containerise(
- name, [container], context, loader=self.__class__.__name__)
+ name, [container], context, loader=self.__class__.__name__
+ )
def update(self, container, representation):
from pymxs import runtime as rt
-
path = get_representation_path(representation)
- node = rt.GetNodeByName(container["instance_node"])
- rt.Select(node.Children)
- fbx_reimport_cmd = (
- f"""
-FBXImporterSetParam "Animation" false
-FBXImporterSetParam "Cameras" false
-FBXImporterSetParam "AxisConversionMethod" true
-FbxExporterSetParam "UpAxis" "Y"
-FbxExporterSetParam "Preserveinstances" true
+ node = rt.getNodeByName(container["instance_node"])
+ rt.select(node.Children)
-importFile @"{path}" #noPrompt using:FBXIMP
- """)
- rt.Execute(fbx_reimport_cmd)
+ rt.FBXImporterSetParam("Animation", False)
+ rt.FBXImporterSetParam("Cameras", False)
+ rt.FBXImporterSetParam("AxisConversionMethod", True)
+ rt.FBXImporterSetParam("UpAxis", "Y")
+ rt.FBXImporterSetParam("Preserveinstances", True)
+ rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP)
with maintained_selection():
rt.Select(node)
- lib.imprint(container["instance_node"], {
- "representation": str(representation["_id"])
- })
+ lib.imprint(
+ container["instance_node"],
+ {"representation": str(representation["_id"])},
+ )
def switch(self, container, representation):
self.update(container, representation)
diff --git a/openpype/hosts/max/plugins/load/load_pointcache.py b/openpype/hosts/max/plugins/load/load_pointcache.py
index 4f7773d967..8a51e86000 100644
--- a/openpype/hosts/max/plugins/load/load_pointcache.py
+++ b/openpype/hosts/max/plugins/load/load_pointcache.py
@@ -5,6 +5,7 @@ Because of limited api, alembics can be only loaded, but not easily updated.
"""
import os
+from openpype.pipeline import load, get_representation_path
from openpype.hosts.max.api import lib, maintained_selection
from openpype.hosts.max.api.pipeline import containerise
@@ -14,9 +15,7 @@ from openpype.pipeline import get_representation_path, load
class AbcLoader(load.LoaderPlugin):
"""Alembic loader."""
- families = ["camera",
- "animation",
- "pointcache"]
+ families = ["camera", "animation", "pointcache"]
label = "Load Alembic"
representations = ["abc"]
order = -10
@@ -29,17 +28,18 @@ class AbcLoader(load.LoaderPlugin):
file_path = os.path.normpath(self.fname)
abc_before = {
- c for c in rt.RootNode.Children
- if rt.ClassOf(c) == rt.AlembicContainer
+ c
+ for c in rt.rootNode.Children
+ if rt.classOf(c) == rt.AlembicContainer
}
+
rt.AlembicImport.ImportToRoot = False
- rt.AlembicImport.StartFrame = True
- rt.AlembicImport.EndFrame = True
- rt.ImportFile(file_path, rt.Name("noPrompt"))
+ rt.importFile(file_path, rt.name("noPrompt"))
abc_after = {
- c for c in rt.RootNode.Children
- if rt.ClassOf(c) == rt.AlembicContainer
+ c
+ for c in rt.rootNode.Children
+ if rt.classOf(c) == rt.AlembicContainer
}
# This should yield new AlembicContainer node
@@ -55,7 +55,8 @@ class AbcLoader(load.LoaderPlugin):
cam_shape.playbackType = 2
return containerise(
- name, [abc_container], context, loader=self.__class__.__name__)
+ name, [abc_container], context, loader=self.__class__.__name__
+ )
def update(self, container, representation):
from pymxs import runtime as rt
@@ -63,9 +64,14 @@ class AbcLoader(load.LoaderPlugin):
path = get_representation_path(representation)
node = rt.GetNodeByName(container["instance_node"])
- lib.imprint(container["instance_node"], {
- "representation": str(representation["_id"])
- })
+ alembic_objects = self.get_container_children(node, "AlembicObject")
+ for alembic_object in alembic_objects:
+ alembic_object.source = path
+
+ lib.imprint(
+ container["instance_node"],
+ {"representation": str(representation["_id"])},
+ )
with maintained_selection():
rt.Select(node.Children)
diff --git a/openpype/hosts/max/plugins/load/load_redshift_proxy.py b/openpype/hosts/max/plugins/load/load_redshift_proxy.py
new file mode 100644
index 0000000000..31692f6367
--- /dev/null
+++ b/openpype/hosts/max/plugins/load/load_redshift_proxy.py
@@ -0,0 +1,63 @@
+import os
+import clique
+
+from openpype.pipeline import (
+ load,
+ get_representation_path
+)
+from openpype.hosts.max.api.pipeline import containerise
+from openpype.hosts.max.api import lib
+
+
+class RedshiftProxyLoader(load.LoaderPlugin):
+ """Load rs files with Redshift Proxy"""
+
+ label = "Load Redshift Proxy"
+ families = ["redshiftproxy"]
+ representations = ["rs"]
+ order = -9
+ icon = "code-fork"
+ color = "white"
+
+ def load(self, context, name=None, namespace=None, data=None):
+ from pymxs import runtime as rt
+
+ filepath = self.filepath_from_context(context)
+ rs_proxy = rt.RedshiftProxy()
+ rs_proxy.file = filepath
+ files_in_folder = os.listdir(os.path.dirname(filepath))
+ collections, remainder = clique.assemble(files_in_folder)
+ if collections:
+ rs_proxy.is_sequence = True
+
+ container = rt.container()
+ container.name = name
+ rs_proxy.Parent = container
+
+ asset = rt.getNodeByName(name)
+
+ return containerise(
+ name, [asset], context, loader=self.__class__.__name__)
+
+ def update(self, container, representation):
+ from pymxs import runtime as rt
+
+ path = get_representation_path(representation)
+ node = rt.getNodeByName(container["instance_node"])
+ for children in node.Children:
+ children_node = rt.getNodeByName(children.name)
+ for proxy in children_node.Children:
+ proxy.file = path
+
+ lib.imprint(container["instance_node"], {
+ "representation": str(representation["_id"])
+ })
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ from pymxs import runtime as rt
+
+ node = rt.getNodeByName(container["instance_node"])
+ rt.delete(node)
diff --git a/openpype/hosts/max/plugins/publish/collect_render.py b/openpype/hosts/max/plugins/publish/collect_render.py
index 00e00a8eb5..db5c84fad9 100644
--- a/openpype/hosts/max/plugins/publish/collect_render.py
+++ b/openpype/hosts/max/plugins/publish/collect_render.py
@@ -5,7 +5,8 @@ import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import get_current_asset_name
-from openpype.hosts.max.api.lib import get_max_version
+from openpype.hosts.max.api import colorspace
+from openpype.hosts.max.api.lib import get_max_version, get_current_renderer
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
from openpype.client import get_last_version_by_subset_name
@@ -28,8 +29,16 @@ class CollectRender(pyblish.api.InstancePlugin):
context.data['currentFile'] = current_file
asset = get_current_asset_name()
- render_layer_files = RenderProducts().render_product(instance.name)
+ files_by_aov = RenderProducts().get_beauty(instance.name)
folder = folder.replace("\\", "/")
+ aovs = RenderProducts().get_aovs(instance.name)
+ files_by_aov.update(aovs)
+
+ if "expectedFiles" not in instance.data:
+ instance.data["expectedFiles"] = list()
+ instance.data["files"] = list()
+ instance.data["expectedFiles"].append(files_by_aov)
+ instance.data["files"].append(files_by_aov)
img_format = RenderProducts().image_format()
project_name = context.data["projectName"]
@@ -38,7 +47,6 @@ class CollectRender(pyblish.api.InstancePlugin):
version_doc = get_last_version_by_subset_name(project_name,
instance.name,
asset_id)
-
self.log.debug("version_doc: {0}".format(version_doc))
version_int = 1
if version_doc:
@@ -46,22 +54,42 @@ class CollectRender(pyblish.api.InstancePlugin):
self.log.debug(f"Setting {version_int} to context.")
context.data["version"] = version_int
- # setup the plugin as 3dsmax for the internal renderer
+ # OCIO config not support in
+ # most of the 3dsmax renderers
+ # so this is currently hard coded
+ # TODO: add options for redshift/vray ocio config
+ instance.data["colorspaceConfig"] = ""
+ instance.data["colorspaceDisplay"] = "sRGB"
+ instance.data["colorspaceView"] = "ACES 1.0 SDR-video"
+ instance.data["renderProducts"] = colorspace.ARenderProduct()
+ instance.data["publishJobState"] = "Suspended"
+ instance.data["attachTo"] = []
+ renderer_class = get_current_renderer()
+ renderer = str(renderer_class).split(":")[0]
+ # also need to get the render dir for conversion
data = {
- "subset": instance.name,
"asset": asset,
+ "subset": str(instance.name),
"publish": True,
"maxversion": str(get_max_version()),
"imageFormat": img_format,
"family": 'maxrender',
"families": ['maxrender'],
+ "renderer": renderer,
"source": filepath,
- "expectedFiles": render_layer_files,
"plugin": "3dsmax",
"frameStart": int(rt.rendStart),
"frameEnd": int(rt.rendEnd),
"version": version_int,
"farm": True
}
- self.log.info("data: {0}".format(data))
instance.data.update(data)
+
+ # TODO: this should be unified with maya and its "multipart" flag
+ # on instance.
+ if renderer == "Redshift_Renderer":
+ instance.data.update(
+ {"separateAovFiles": rt.Execute(
+ "renderers.current.separateAovFiles")})
+
+ self.log.info("data: {0}".format(data))
diff --git a/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py
new file mode 100644
index 0000000000..3b44099609
--- /dev/null
+++ b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py
@@ -0,0 +1,62 @@
+import os
+import pyblish.api
+from openpype.pipeline import publish
+from pymxs import runtime as rt
+from openpype.hosts.max.api import maintained_selection
+
+
+class ExtractRedshiftProxy(publish.Extractor):
+ """
+ Extract Redshift Proxy with rsProxy
+ """
+
+ order = pyblish.api.ExtractorOrder - 0.1
+ label = "Extract RedShift Proxy"
+ hosts = ["max"]
+ families = ["redshiftproxy"]
+
+ def process(self, instance):
+ container = instance.data["instance_node"]
+ start = int(instance.context.data.get("frameStart"))
+ end = int(instance.context.data.get("frameEnd"))
+
+ self.log.info("Extracting Redshift Proxy...")
+ stagingdir = self.staging_dir(instance)
+ rs_filename = "{name}.rs".format(**instance.data)
+ rs_filepath = os.path.join(stagingdir, rs_filename)
+ rs_filepath = rs_filepath.replace("\\", "/")
+
+ rs_filenames = self.get_rsfiles(instance, start, end)
+
+ with maintained_selection():
+ # select and export
+ con = rt.getNodeByName(container)
+ rt.select(con.Children)
+ # Redshift rsProxy command
+ # rsProxy fp selected compress connectivity startFrame endFrame
+ # camera warnExisting transformPivotToOrigin
+ rt.rsProxy(rs_filepath, 1, 0, 0, start, end, 0, 1, 1)
+
+ self.log.info("Performing Extraction ...")
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ 'name': 'rs',
+ 'ext': 'rs',
+ 'files': rs_filenames if len(rs_filenames) > 1 else rs_filenames[0], # noqa
+ "stagingDir": stagingdir,
+ }
+ instance.data["representations"].append(representation)
+ self.log.info("Extracted instance '%s' to: %s" % (instance.name,
+ stagingdir))
+
+ def get_rsfiles(self, instance, startFrame, endFrame):
+ rs_filenames = []
+ rs_name = instance.data["name"]
+ for frame in range(startFrame, endFrame + 1):
+ rs_filename = "%s.%04d.rs" % (rs_name, frame)
+ rs_filenames.append(rs_filename)
+
+ return rs_filenames
diff --git a/openpype/hosts/max/plugins/publish/save_scene.py b/openpype/hosts/max/plugins/publish/save_scene.py
new file mode 100644
index 0000000000..a40788ab41
--- /dev/null
+++ b/openpype/hosts/max/plugins/publish/save_scene.py
@@ -0,0 +1,21 @@
+import pyblish.api
+import os
+
+
+class SaveCurrentScene(pyblish.api.ContextPlugin):
+ """Save current scene
+
+ """
+
+ label = "Save current file"
+ order = pyblish.api.ExtractorOrder - 0.49
+ hosts = ["max"]
+ families = ["maxrender", "workfile"]
+
+ def process(self, context):
+ from pymxs import runtime as rt
+ folder = rt.maxFilePath
+ file = rt.maxFileName
+ current = os.path.join(folder, file)
+ assert context.data["currentFile"] == current
+ rt.saveMaxFile(current)
diff --git a/openpype/hosts/max/plugins/publish/validate_deadline_publish.py b/openpype/hosts/max/plugins/publish/validate_deadline_publish.py
new file mode 100644
index 0000000000..b2f0e863f4
--- /dev/null
+++ b/openpype/hosts/max/plugins/publish/validate_deadline_publish.py
@@ -0,0 +1,43 @@
+import os
+import pyblish.api
+from pymxs import runtime as rt
+from openpype.pipeline.publish import (
+ RepairAction,
+ ValidateContentsOrder,
+ PublishValidationError,
+ OptionalPyblishPluginMixin
+)
+from openpype.hosts.max.api.lib_rendersettings import RenderSettings
+
+
+class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
+ OptionalPyblishPluginMixin):
+ """Validates Render File Directory is
+ not the same in every submission
+ """
+
+ order = ValidateContentsOrder
+ families = ["maxrender"]
+ hosts = ["max"]
+ label = "Render Output for Deadline"
+ optional = True
+ actions = [RepairAction]
+
+ def process(self, instance):
+ if not self.is_active(instance.data):
+ return
+ file = rt.maxFileName
+ filename, ext = os.path.splitext(file)
+ if filename not in rt.rendOutputFilename:
+ raise PublishValidationError(
+ "Render output folder "
+ "doesn't match the max scene name! "
+ "Use Repair action to "
+ "fix the folder file path.."
+ )
+
+ @classmethod
+ def repair(cls, instance):
+ container = instance.data.get("instance_node")
+ RenderSettings().render_output(container)
+ cls.log.debug("Reset the render output folder...")
diff --git a/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py b/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py
new file mode 100644
index 0000000000..bc82f82f3b
--- /dev/null
+++ b/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+import pyblish.api
+from openpype.pipeline import PublishValidationError
+from pymxs import runtime as rt
+from openpype.pipeline.publish import RepairAction
+from openpype.hosts.max.api.lib import get_current_renderer
+
+
+class ValidateRendererRedshiftProxy(pyblish.api.InstancePlugin):
+ """
+ Validates Redshift as the current renderer for creating
+ Redshift Proxy
+ """
+
+ order = pyblish.api.ValidatorOrder
+ families = ["redshiftproxy"]
+ hosts = ["max"]
+ label = "Redshift Renderer"
+ actions = [RepairAction]
+
+ def process(self, instance):
+ invalid = self.get_redshift_renderer(instance)
+ if invalid:
+ raise PublishValidationError("Please install Redshift for 3dsMax"
+ " before using the Redshift proxy instance") # noqa
+ invalid = self.get_current_renderer(instance)
+ if invalid:
+ raise PublishValidationError("The Redshift proxy extraction"
+ "discontinued since the current renderer is not Redshift") # noqa
+
+ def get_redshift_renderer(self, instance):
+ invalid = list()
+ max_renderers_list = str(rt.RendererClass.classes)
+ if "Redshift_Renderer" not in max_renderers_list:
+ invalid.append(max_renderers_list)
+
+ return invalid
+
+ def get_current_renderer(self, instance):
+ invalid = list()
+ renderer_class = get_current_renderer()
+ current_renderer = str(renderer_class).split(":")[0]
+ if current_renderer != "Redshift_Renderer":
+ invalid.append(current_renderer)
+
+ return invalid
+
+ @classmethod
+ def repair(cls, instance):
+ for Renderer in rt.RendererClass.classes:
+ renderer = Renderer()
+ if "Redshift_Renderer" in str(renderer):
+ rt.renderers.production = renderer
+ break
diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py
index 159bfe9eb3..0bb1f186eb 100644
--- a/openpype/hosts/maya/api/setdress.py
+++ b/openpype/hosts/maya/api/setdress.py
@@ -28,7 +28,9 @@ from openpype.pipeline import (
)
from openpype.hosts.maya.api.lib import (
matrix_equals,
- unique_namespace
+ unique_namespace,
+ get_container_transforms,
+ DEFAULT_MATRIX
)
log = logging.getLogger("PackageLoader")
@@ -183,8 +185,6 @@ def _add(instance, representation_id, loaders, namespace, root="|"):
"""
- from openpype.hosts.maya.lib import get_container_transforms
-
# Process within the namespace
with namespaced(namespace, new=False) as namespace:
@@ -379,8 +379,6 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
"""
- from openpype.hosts.maya.lib import DEFAULT_MATRIX, get_container_transforms
-
set_namespace = set_container['namespace']
project_name = legacy_io.active_project()
diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py
index 387b7321b9..4681175808 100644
--- a/openpype/hosts/maya/plugins/create/create_render.py
+++ b/openpype/hosts/maya/plugins/create/create_render.py
@@ -181,16 +181,34 @@ class CreateRender(plugin.Creator):
primary_pool = pool_setting["primary_pool"]
sorted_pools = self._set_default_pool(list(pools), primary_pool)
- cmds.addAttr(self.instance, longName="primaryPool",
- attributeType="enum",
- enumName=":".join(sorted_pools))
+ cmds.addAttr(
+ self.instance,
+ longName="primaryPool",
+ attributeType="enum",
+ enumName=":".join(sorted_pools)
+ )
+ cmds.setAttr(
+ "{}.primaryPool".format(self.instance),
+ 0,
+ keyable=False,
+ channelBox=True
+ )
pools = ["-"] + pools
secondary_pool = pool_setting["secondary_pool"]
sorted_pools = self._set_default_pool(list(pools), secondary_pool)
- cmds.addAttr("{}.secondaryPool".format(self.instance),
- attributeType="enum",
- enumName=":".join(sorted_pools))
+ cmds.addAttr(
+ self.instance,
+ longName="secondaryPool",
+ attributeType="enum",
+ enumName=":".join(sorted_pools)
+ )
+ cmds.setAttr(
+ "{}.secondaryPool".format(self.instance),
+ 0,
+ keyable=False,
+ channelBox=True
+ )
def _create_render_settings(self):
"""Create instance settings."""
@@ -260,6 +278,12 @@ class CreateRender(plugin.Creator):
default_priority)
self.data["tile_priority"] = tile_priority
+ strict_error_checking = maya_submit_dl.get("strict_error_checking",
+ True)
+ self.data["strict_error_checking"] = strict_error_checking
+
+ # Pool attributes should be last since they will be recreated when
+ # the deadline server changes.
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
@@ -272,9 +296,6 @@ class CreateRender(plugin.Creator):
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
- strict_error_checking = maya_submit_dl.get("strict_error_checking",
- True)
- self.data["strict_error_checking"] = strict_error_checking
if muster_enabled:
self.log.info(">>> Loading Muster credentials ...")
diff --git a/openpype/hosts/maya/plugins/load/load_assembly.py b/openpype/hosts/maya/plugins/load/load_assembly.py
index 902f38695c..275f21be5d 100644
--- a/openpype/hosts/maya/plugins/load/load_assembly.py
+++ b/openpype/hosts/maya/plugins/load/load_assembly.py
@@ -1,8 +1,14 @@
+import maya.cmds as cmds
+
from openpype.pipeline import (
load,
remove_container
)
+from openpype.hosts.maya.api.pipeline import containerise
+from openpype.hosts.maya.api.lib import unique_namespace
+from openpype.hosts.maya.api import setdress
+
class AssemblyLoader(load.LoaderPlugin):
@@ -16,9 +22,6 @@ class AssemblyLoader(load.LoaderPlugin):
def load(self, context, name, namespace, data):
- from openpype.hosts.maya.api.pipeline import containerise
- from openpype.hosts.maya.api.lib import unique_namespace
-
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
@@ -26,8 +29,6 @@ class AssemblyLoader(load.LoaderPlugin):
suffix="_",
)
- from openpype.hosts.maya.api import setdress
-
containers = setdress.load_package(
filepath=self.fname,
name=name,
@@ -50,15 +51,11 @@ class AssemblyLoader(load.LoaderPlugin):
def update(self, container, representation):
- from openpype import setdress
return setdress.update_package(container, representation)
def remove(self, container):
"""Remove all sub containers"""
- from openpype import setdress
- import maya.cmds as cmds
-
# Remove all members
member_containers = setdress.get_contained_containers(container)
for member_container in member_containers:
diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py
index f4a4a44344..74ca27ff3c 100644
--- a/openpype/hosts/maya/plugins/load/load_reference.py
+++ b/openpype/hosts/maya/plugins/load/load_reference.py
@@ -33,7 +33,7 @@ def preserve_modelpanel_cameras(container, log=None):
panel_cameras = {}
for panel in cmds.getPanel(type="modelPanel"):
cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True),
- long=True)
+ long=True)[0]
# Often but not always maya returns the transform from the
# modelPanel as opposed to the camera shape, so we convert it
diff --git a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
index 0845f653b1..f160a3a0c5 100644
--- a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
+++ b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py
@@ -35,13 +35,16 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
# camera.
cameras = cmds.ls(type="camera", long=True)
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
- camera = renderable[0]
- for node in instance.data["contentMembers"]:
- camera_shapes = cmds.listRelatives(
- node, shapes=True, type="camera"
- )
- if camera_shapes:
- camera = node
- instance.data["camera"] = camera
+ if renderable:
+ camera = renderable[0]
+ for node in instance.data["contentMembers"]:
+ camera_shapes = cmds.listRelatives(
+ node, shapes=True, type="camera"
+ )
+ if camera_shapes:
+ camera = node
+ instance.data["camera"] = camera
+ else:
+ self.log.debug("No renderable cameras found.")
self.log.debug("data: {}".format(instance.data))
diff --git a/openpype/hosts/maya/plugins/publish/collect_inputs.py b/openpype/hosts/maya/plugins/publish/collect_inputs.py
index 9c3f0f5efa..895c92762b 100644
--- a/openpype/hosts/maya/plugins/publish/collect_inputs.py
+++ b/openpype/hosts/maya/plugins/publish/collect_inputs.py
@@ -166,7 +166,7 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
inputs = [c["representation"] for c in containers]
instance.data["inputRepresentations"] = inputs
- self.log.info("Collected inputs: %s" % inputs)
+ self.log.debug("Collected inputs: %s" % inputs)
def _collect_renderlayer_inputs(self, scene_containers, instance):
"""Collects inputs from nodes in renderlayer, incl. shaders + camera"""
diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py
index 7c47f17acb..babd494758 100644
--- a/openpype/hosts/maya/plugins/publish/collect_render.py
+++ b/openpype/hosts/maya/plugins/publish/collect_render.py
@@ -336,7 +336,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
context.data["system_settings"]["modules"]["deadline"]
)
if deadline_settings["enabled"]:
- data["deadlineUrl"] = render_instance.data.get("deadlineUrl")
+ data["deadlineUrl"] = render_instance.data["deadlineUrl"]
if self.sync_workfile_version:
data["version"] = context.data["version"]
diff --git a/openpype/hosts/maya/plugins/publish/save_scene.py b/openpype/hosts/maya/plugins/publish/save_scene.py
index 45e62e7b44..495c339731 100644
--- a/openpype/hosts/maya/plugins/publish/save_scene.py
+++ b/openpype/hosts/maya/plugins/publish/save_scene.py
@@ -31,5 +31,5 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
# remove lockfile before saving
if is_workfile_lock_enabled("maya", project_name, project_settings):
remove_workfile_lock(current)
- self.log.info("Saving current file..")
+ self.log.info("Saving current file: {}".format(current))
cmds.file(save=True, force=True)
diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py
index e27723e104..8ce76c8d04 100644
--- a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py
+++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py
@@ -70,5 +70,5 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin):
@classmethod
def repair(cls, instance):
- for content_node, proxy_node in cls.get_invalid_couples(cls, instance):
- lib.set_id(proxy_node, lib.get_id(content_node), overwrite=False)
+ for content_node, proxy_node in cls.get_invalid_couples(instance):
+ lib.set_id(proxy_node, lib.get_id(content_node), overwrite=True)
diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py
index 4870f27bff..63849cfd12 100644
--- a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py
+++ b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py
@@ -13,7 +13,6 @@ class ValidateInstanceHasMembers(pyblish.api.InstancePlugin):
@classmethod
def get_invalid(cls, instance):
-
invalid = list()
if not instance.data["setMembers"]:
objectset_name = instance.data['name']
@@ -22,6 +21,10 @@ class ValidateInstanceHasMembers(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
+ # Allow renderlayer and workfile to be empty
+ skip_families = ["workfile", "renderlayer", "rendersetup"]
+ if instance.data.get("family") in skip_families:
+ return
invalid = self.get_invalid(instance)
if invalid:
diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py
index 499bfd4e37..cba70a21b7 100644
--- a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py
+++ b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py
@@ -55,7 +55,8 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
if shapes:
instance_nodes.extend(shapes)
- scene_nodes = cmds.ls(type="transform") + cmds.ls(type="mesh")
+ scene_nodes = cmds.ls(type="transform", long=True)
+ scene_nodes += cmds.ls(type="mesh", long=True)
scene_nodes = set(scene_nodes) - set(instance_nodes)
scene_nodes_by_basename = defaultdict(list)
@@ -76,7 +77,7 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
if len(ids) > 1:
cls.log.error(
"\"{}\" id mismatch to: {}".format(
- instance_node.longName(), matches
+ instance_node, matches
)
)
invalid[instance_node] = matches
diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py
index d649ffae7f..75b0f80d21 100644
--- a/openpype/hosts/nuke/api/pipeline.py
+++ b/openpype/hosts/nuke/api/pipeline.py
@@ -151,6 +151,7 @@ class NukeHost(
def add_nuke_callbacks():
""" Adding all available nuke callbacks
"""
+ nuke_settings = get_current_project_settings()["nuke"]
workfile_settings = WorkfileSettings()
# Set context settings.
nuke.addOnCreate(
@@ -169,7 +170,10 @@ def add_nuke_callbacks():
# # set apply all workfile settings on script load and save
nuke.addOnScriptLoad(WorkfileSettings().set_context_settings)
- nuke.addFilenameFilter(dirmap_file_name_filter)
+ if nuke_settings["nuke-dirmap"]["enabled"]:
+ log.info("Added Nuke's dirmaping callback ...")
+ # Add dirmap for file paths.
+ nuke.addFilenameFilter(dirmap_file_name_filter)
log.info("Added Nuke callbacks ...")
diff --git a/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py b/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py
index f391ca1e7c..21eefda249 100644
--- a/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py
+++ b/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py
@@ -5,6 +5,8 @@ import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.nuke import api as napi
+from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings
+
if sys.version_info[0] >= 3:
unicode = str
@@ -28,7 +30,7 @@ class ExtractThumbnail(publish.Extractor):
bake_viewer_process = True
bake_viewer_input_process = True
nodes = {}
-
+ reposition_nodes = None
def process(self, instance):
if instance.data.get("farm"):
@@ -123,18 +125,32 @@ class ExtractThumbnail(publish.Extractor):
temporary_nodes.append(rnode)
previous_node = rnode
- reformat_node = nuke.createNode("Reformat")
- ref_node = self.nodes.get("Reformat", None)
- if ref_node:
- for k, v in ref_node:
- self.log.debug("k, v: {0}:{1}".format(k, v))
- if isinstance(v, unicode):
- v = str(v)
- reformat_node[k].setValue(v)
+ if self.reposition_nodes is None:
+ # [deprecated] create reformat node old way
+ reformat_node = nuke.createNode("Reformat")
+ ref_node = self.nodes.get("Reformat", None)
+ if ref_node:
+ for k, v in ref_node:
+ self.log.debug("k, v: {0}:{1}".format(k, v))
+ if isinstance(v, unicode):
+ v = str(v)
+ reformat_node[k].setValue(v)
- reformat_node.setInput(0, previous_node)
- previous_node = reformat_node
- temporary_nodes.append(reformat_node)
+ reformat_node.setInput(0, previous_node)
+ previous_node = reformat_node
+ temporary_nodes.append(reformat_node)
+ else:
+ # create reformat node new way
+ for repo_node in self.reposition_nodes:
+ node_class = repo_node["node_class"]
+ knobs = repo_node["knobs"]
+ node = nuke.createNode(node_class)
+ set_node_knobs_from_settings(node, knobs)
+
+ # connect in order
+ node.setInput(0, previous_node)
+ previous_node = node
+ temporary_nodes.append(node)
# only create colorspace baking if toggled on
if bake_viewer_process:
diff --git a/openpype/hosts/nuke/startup/__init__.py b/openpype/hosts/nuke/startup/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py b/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py
new file mode 100644
index 0000000000..f0cbabe20f
--- /dev/null
+++ b/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py
@@ -0,0 +1,47 @@
+""" OpenPype custom script for resetting read nodes start frame values """
+
+import nuke
+import nukescripts
+
+
+class FrameSettingsPanel(nukescripts.PythonPanel):
+ """ Frame Settings Panel """
+ def __init__(self):
+ nukescripts.PythonPanel.__init__(self, "Set Frame Start (Read Node)")
+
+ # create knobs
+ self.frame = nuke.Int_Knob(
+ 'frame', 'Frame Number')
+ self.selected = nuke.Boolean_Knob("selection")
+ # add knobs to panel
+ self.addKnob(self.selected)
+ self.addKnob(self.frame)
+
+ # set values
+ self.selected.setValue(False)
+ self.frame.setValue(nuke.root().firstFrame())
+
+ def process(self):
+ """ Process the panel values. """
+ # get values
+ frame = self.frame.value()
+ if self.selected.value():
+ # selected nodes processing
+ if not nuke.selectedNodes():
+ return
+ for rn_ in nuke.selectedNodes():
+ if rn_.Class() != "Read":
+ continue
+ rn_["frame_mode"].setValue("start_at")
+ rn_["frame"].setValue(str(frame))
+ else:
+ # all nodes processing
+ for rn_ in nuke.allNodes(filter="Read"):
+ rn_["frame_mode"].setValue("start_at")
+ rn_["frame"].setValue(str(frame))
+
+
+def main():
+ p_ = FrameSettingsPanel()
+ if p_.showModalDialog():
+ print(p_.process())
diff --git a/openpype/hosts/resolve/api/__init__.py b/openpype/hosts/resolve/api/__init__.py
index 00a598548e..2b4546f8d6 100644
--- a/openpype/hosts/resolve/api/__init__.py
+++ b/openpype/hosts/resolve/api/__init__.py
@@ -24,6 +24,8 @@ from .lib import (
get_project_manager,
get_current_project,
get_current_timeline,
+ get_any_timeline,
+ get_new_timeline,
create_bin,
get_media_pool_item,
create_media_pool_item,
@@ -95,6 +97,8 @@ __all__ = [
"get_project_manager",
"get_current_project",
"get_current_timeline",
+ "get_any_timeline",
+ "get_new_timeline",
"create_bin",
"get_media_pool_item",
"create_media_pool_item",
diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py
index b3ad20df39..a44c527f13 100644
--- a/openpype/hosts/resolve/api/lib.py
+++ b/openpype/hosts/resolve/api/lib.py
@@ -15,6 +15,7 @@ log = Logger.get_logger(__name__)
self = sys.modules[__name__]
self.project_manager = None
self.media_storage = None
+self.current_project = None
# OpenPype sequential rename variables
self.rename_index = 0
@@ -85,22 +86,60 @@ def get_media_storage():
def get_current_project():
- # initialize project manager
- get_project_manager()
+ """Get current project object.
+ """
+ if not self.current_project:
+ self.current_project = get_project_manager().GetCurrentProject()
- return self.project_manager.GetCurrentProject()
+ return self.current_project
def get_current_timeline(new=False):
- # get current project
+ """Get current timeline object.
+
+ Args:
+ new (bool)[optional]: [DEPRECATED] if True it will create
+ new timeline if none exists
+
+ Returns:
+ TODO: will need to reflect future `None`
+ object: resolve.Timeline
+ """
project = get_current_project()
+ timeline = project.GetCurrentTimeline()
+ # return current timeline if any
+ if timeline:
+ return timeline
+
+ # TODO: [deprecated] and will be removed in future
if new:
- media_pool = project.GetMediaPool()
- new_timeline = media_pool.CreateEmptyTimeline(self.pype_timeline_name)
- project.SetCurrentTimeline(new_timeline)
+ return get_new_timeline()
- return project.GetCurrentTimeline()
+
+def get_any_timeline():
+ """Get any timeline object.
+
+ Returns:
+ object | None: resolve.Timeline
+ """
+ project = get_current_project()
+ timeline_count = project.GetTimelineCount()
+ if timeline_count > 0:
+ return project.GetTimelineByIndex(1)
+
+
+def get_new_timeline():
+ """Get new timeline object.
+
+ Returns:
+ object: resolve.Timeline
+ """
+ project = get_current_project()
+ media_pool = project.GetMediaPool()
+ new_timeline = media_pool.CreateEmptyTimeline(self.pype_timeline_name)
+ project.SetCurrentTimeline(new_timeline)
+ return new_timeline
def create_bin(name: str, root: object = None) -> object:
@@ -312,7 +351,13 @@ def get_current_timeline_items(
track_type = track_type or "video"
selecting_color = selecting_color or "Chocolate"
project = get_current_project()
- timeline = get_current_timeline()
+
+ # get timeline anyhow
+ timeline = (
+ get_current_timeline() or
+ get_any_timeline() or
+ get_new_timeline()
+ )
selected_clips = []
# get all tracks count filtered by track type
diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py
index 609cff60f7..e5846c2fc2 100644
--- a/openpype/hosts/resolve/api/plugin.py
+++ b/openpype/hosts/resolve/api/plugin.py
@@ -327,7 +327,10 @@ class ClipLoader:
self.active_timeline = options["timeline"]
else:
# create new sequence
- self.active_timeline = lib.get_current_timeline(new=True)
+ self.active_timeline = (
+ lib.get_current_timeline() or
+ lib.get_new_timeline()
+ )
else:
self.active_timeline = lib.get_current_timeline()
diff --git a/openpype/hosts/resolve/api/workio.py b/openpype/hosts/resolve/api/workio.py
index 5ce73eea53..5966fa6a43 100644
--- a/openpype/hosts/resolve/api/workio.py
+++ b/openpype/hosts/resolve/api/workio.py
@@ -43,18 +43,22 @@ def open_file(filepath):
"""
Loading project
"""
+
+ from . import bmdvr
+
pm = get_project_manager()
+ page = bmdvr.GetCurrentPage()
+ if page is not None:
+ # Save current project only if Resolve has an active page, otherwise
+ # we consider Resolve being in a pre-launch state (no open UI yet)
+ project = pm.GetCurrentProject()
+ print(f"Saving current project: {project}")
+ pm.SaveProject()
+
file = os.path.basename(filepath)
fname, _ = os.path.splitext(file)
dname, _ = fname.split("_v")
-
- # deal with current project
- project = pm.GetCurrentProject()
- log.info(f"Test `pm`: {pm}")
- pm.SaveProject()
-
try:
- log.info(f"Test `dname`: {dname}")
if not set_project_manager_to_folder_name(dname):
raise
# load project from input path
@@ -72,6 +76,7 @@ def open_file(filepath):
return False
return True
+
def current_file():
pm = get_project_manager()
current_dir = os.getenv("AVALON_WORKDIR")
diff --git a/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py b/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py
new file mode 100644
index 0000000000..0e27ddb8c3
--- /dev/null
+++ b/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py
@@ -0,0 +1,45 @@
+import os
+
+from openpype.lib import PreLaunchHook
+import openpype.hosts.resolve
+
+
+class ResolveLaunchLastWorkfile(PreLaunchHook):
+ """Special hook to open last workfile for Resolve.
+
+ Checks 'start_last_workfile', if set to False, it will not open last
+ workfile. This property is set explicitly in Launcher.
+ """
+
+ # Execute after workfile template copy
+ order = 10
+ app_groups = ["resolve"]
+
+ def execute(self):
+ if not self.data.get("start_last_workfile"):
+ self.log.info("It is set to not start last workfile on start.")
+ return
+
+ last_workfile = self.data.get("last_workfile_path")
+ if not last_workfile:
+ self.log.warning("Last workfile was not collected.")
+ return
+
+ if not os.path.exists(last_workfile):
+ self.log.info("Current context does not have any workfile yet.")
+ return
+
+ # Add path to launch environment for the startup script to pick up
+ self.log.info(f"Setting OPENPYPE_RESOLVE_OPEN_ON_LAUNCH to launch "
+ f"last workfile: {last_workfile}")
+ key = "OPENPYPE_RESOLVE_OPEN_ON_LAUNCH"
+ self.launch_context.env[key] = last_workfile
+
+ # Set the openpype prelaunch startup script path for easy access
+ # in the LUA .scriptlib code
+ op_resolve_root = os.path.dirname(openpype.hosts.resolve.__file__)
+ script_path = os.path.join(op_resolve_root, "startup.py")
+ key = "OPENPYPE_RESOLVE_STARTUP_SCRIPT"
+ self.launch_context.env[key] = script_path
+ self.log.info("Setting OPENPYPE_RESOLVE_STARTUP_SCRIPT to: "
+ f"{script_path}")
diff --git a/openpype/hosts/resolve/hooks/pre_resolve_setup.py b/openpype/hosts/resolve/hooks/pre_resolve_setup.py
index 8574b3ad01..d066fc2da2 100644
--- a/openpype/hosts/resolve/hooks/pre_resolve_setup.py
+++ b/openpype/hosts/resolve/hooks/pre_resolve_setup.py
@@ -1,4 +1,5 @@
import os
+from pathlib import Path
import platform
from openpype.lib import PreLaunchHook
from openpype.hosts.resolve.utils import setup
@@ -6,33 +7,57 @@ from openpype.hosts.resolve.utils import setup
class ResolvePrelaunch(PreLaunchHook):
"""
- This hook will check if current workfile path has Resolve
- project inside. IF not, it initialize it and finally it pass
- path to the project by environment variable to Premiere launcher
- shell script.
+ This hook will set up the Resolve scripting environment as described in
+ Resolve's documentation found with the installed application at
+ {resolve}/Support/Developer/Scripting/README.txt
+
+ Prepares the following environment variables:
+ - `RESOLVE_SCRIPT_API`
+ - `RESOLVE_SCRIPT_LIB`
+
+ It adds $RESOLVE_SCRIPT_API/Modules to PYTHONPATH.
+
+ Additionally it sets up the Python home for Python 3 based on the
+ RESOLVE_PYTHON3_HOME in the environment (usually defined in OpenPype's
+ Application environment for Resolve by the admin). For this it sets
+ PYTHONHOME and PATH variables.
+
+ It also defines:
+ - `RESOLVE_UTILITY_SCRIPTS_DIR`: Destination directory for OpenPype
+ Fusion scripts to be copied to for Resolve to pick them up.
+ - `OPENPYPE_LOG_NO_COLORS` to True to ensure OP doesn't try to
+ use logging with terminal colors as it fails in Resolve.
+
"""
+
app_groups = ["resolve"]
def execute(self):
current_platform = platform.system().lower()
- PROGRAMDATA = self.launch_context.env.get("PROGRAMDATA", "")
- RESOLVE_SCRIPT_API_ = {
+ programdata = self.launch_context.env.get("PROGRAMDATA", "")
+ resolve_script_api_locations = {
"windows": (
- f"{PROGRAMDATA}/Blackmagic Design/"
+ f"{programdata}/Blackmagic Design/"
"DaVinci Resolve/Support/Developer/Scripting"
),
"darwin": (
"/Library/Application Support/Blackmagic Design"
"/DaVinci Resolve/Developer/Scripting"
),
- "linux": "/opt/resolve/Developer/Scripting"
+ "linux": "/opt/resolve/Developer/Scripting",
}
- RESOLVE_SCRIPT_API = os.path.normpath(
- RESOLVE_SCRIPT_API_[current_platform])
- self.launch_context.env["RESOLVE_SCRIPT_API"] = RESOLVE_SCRIPT_API
+ resolve_script_api = Path(
+ resolve_script_api_locations[current_platform]
+ )
+ self.log.info(
+ f"setting RESOLVE_SCRIPT_API variable to {resolve_script_api}"
+ )
+ self.launch_context.env[
+ "RESOLVE_SCRIPT_API"
+ ] = resolve_script_api.as_posix()
- RESOLVE_SCRIPT_LIB_ = {
+ resolve_script_lib_dirs = {
"windows": (
"C:/Program Files/Blackmagic Design"
"/DaVinci Resolve/fusionscript.dll"
@@ -41,61 +66,69 @@ class ResolvePrelaunch(PreLaunchHook):
"/Applications/DaVinci Resolve/DaVinci Resolve.app"
"/Contents/Libraries/Fusion/fusionscript.so"
),
- "linux": "/opt/resolve/libs/Fusion/fusionscript.so"
+ "linux": "/opt/resolve/libs/Fusion/fusionscript.so",
}
- RESOLVE_SCRIPT_LIB = os.path.normpath(
- RESOLVE_SCRIPT_LIB_[current_platform])
- self.launch_context.env["RESOLVE_SCRIPT_LIB"] = RESOLVE_SCRIPT_LIB
+ resolve_script_lib = Path(resolve_script_lib_dirs[current_platform])
+ self.launch_context.env[
+ "RESOLVE_SCRIPT_LIB"
+ ] = resolve_script_lib.as_posix()
+ self.log.info(
+ f"setting RESOLVE_SCRIPT_LIB variable to {resolve_script_lib}"
+ )
- # TODO: add OTIO installation from `openpype/requirements.py`
+ # TODO: add OTIO installation from `openpype/requirements.py`
# making sure python <3.9.* is installed at provided path
- python3_home = os.path.normpath(
- self.launch_context.env.get("RESOLVE_PYTHON3_HOME", ""))
+ python3_home = Path(
+ self.launch_context.env.get("RESOLVE_PYTHON3_HOME", "")
+ )
- assert os.path.isdir(python3_home), (
+ assert python3_home.is_dir(), (
"Python 3 is not installed at the provided folder path. Either "
"make sure the `environments\resolve.json` is having correctly "
"set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed "
f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`"
)
- self.launch_context.env["PYTHONHOME"] = python3_home
- self.log.info(f"Path to Resolve Python folder: `{python3_home}`...")
-
- # add to the python path to path
- env_path = self.launch_context.env["PATH"]
- self.launch_context.env["PATH"] = os.pathsep.join([
- python3_home,
- os.path.join(python3_home, "Scripts")
- ] + env_path.split(os.pathsep))
-
- self.log.debug(f"PATH: {self.launch_context.env['PATH']}")
+ python3_home_str = python3_home.as_posix()
+ self.launch_context.env["PYTHONHOME"] = python3_home_str
+ self.log.info(f"Path to Resolve Python folder: `{python3_home_str}`")
# add to the PYTHONPATH
env_pythonpath = self.launch_context.env["PYTHONPATH"]
- self.launch_context.env["PYTHONPATH"] = os.pathsep.join([
- os.path.join(python3_home, "Lib", "site-packages"),
- os.path.join(RESOLVE_SCRIPT_API, "Modules"),
- ] + env_pythonpath.split(os.pathsep))
+ modules_path = Path(resolve_script_api, "Modules").as_posix()
+ self.launch_context.env[
+ "PYTHONPATH"
+ ] = f"{modules_path}{os.pathsep}{env_pythonpath}"
self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}")
- RESOLVE_UTILITY_SCRIPTS_DIR_ = {
+ # add the pythonhome folder to PATH because on Windows
+ # this is needed for Py3 to be correctly detected within Resolve
+ env_path = self.launch_context.env["PATH"]
+ self.log.info(f"Adding `{python3_home_str}` to the PATH variable")
+ self.launch_context.env[
+ "PATH"
+ ] = f"{python3_home_str}{os.pathsep}{env_path}"
+
+ self.log.debug(f"PATH: {self.launch_context.env['PATH']}")
+
+ resolve_utility_scripts_dirs = {
"windows": (
- f"{PROGRAMDATA}/Blackmagic Design"
+ f"{programdata}/Blackmagic Design"
"/DaVinci Resolve/Fusion/Scripts/Comp"
),
"darwin": (
"/Library/Application Support/Blackmagic Design"
"/DaVinci Resolve/Fusion/Scripts/Comp"
),
- "linux": "/opt/resolve/Fusion/Scripts/Comp"
+ "linux": "/opt/resolve/Fusion/Scripts/Comp",
}
- RESOLVE_UTILITY_SCRIPTS_DIR = os.path.normpath(
- RESOLVE_UTILITY_SCRIPTS_DIR_[current_platform]
+ resolve_utility_scripts_dir = Path(
+ resolve_utility_scripts_dirs[current_platform]
)
# setting utility scripts dir for scripts syncing
- self.launch_context.env["RESOLVE_UTILITY_SCRIPTS_DIR"] = (
- RESOLVE_UTILITY_SCRIPTS_DIR)
+ self.launch_context.env[
+ "RESOLVE_UTILITY_SCRIPTS_DIR"
+ ] = resolve_utility_scripts_dir.as_posix()
# remove terminal coloring tags
self.launch_context.env["OPENPYPE_LOG_NO_COLORS"] = "True"
diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py
index d30a7ea272..05bfb003d6 100644
--- a/openpype/hosts/resolve/plugins/load/load_clip.py
+++ b/openpype/hosts/resolve/plugins/load/load_clip.py
@@ -19,6 +19,7 @@ from openpype.lib.transcoding import (
IMAGE_EXTENSIONS
)
+
class LoadClip(plugin.TimelineItemLoader):
"""Load a subset to timeline as clip
diff --git a/openpype/hosts/resolve/startup.py b/openpype/hosts/resolve/startup.py
new file mode 100644
index 0000000000..79a64e0fbf
--- /dev/null
+++ b/openpype/hosts/resolve/startup.py
@@ -0,0 +1,62 @@
+"""This script is used as a startup script in Resolve through a .scriptlib file
+
+It triggers directly after the launch of Resolve and it's recommended to keep
+it optimized for fast performance since the Resolve UI is actually interactive
+while this is running. As such, there's nothing ensuring the user isn't
+continuing manually before any of the logic here runs. As such we also try
+to delay any imports as much as possible.
+
+This code runs in a separate process to the main Resolve process.
+
+"""
+import os
+
+import openpype.hosts.resolve.api
+
+
+def ensure_installed_host():
+ """Install resolve host with openpype and return the registered host.
+
+ This function can be called multiple times without triggering an
+ additional install.
+ """
+ from openpype.pipeline import install_host, registered_host
+ host = registered_host()
+ if host:
+ return host
+
+ install_host(openpype.hosts.resolve.api)
+ return registered_host()
+
+
+def launch_menu():
+ print("Launching Resolve OpenPype menu..")
+ ensure_installed_host()
+ openpype.hosts.resolve.api.launch_pype_menu()
+
+
+def open_file(path):
+ # Avoid the need to "install" the host
+ host = ensure_installed_host()
+ host.open_file(path)
+
+
+def main():
+ # Open last workfile
+ workfile_path = os.environ.get("OPENPYPE_RESOLVE_OPEN_ON_LAUNCH")
+ if workfile_path:
+ open_file(workfile_path)
+ else:
+ print("No last workfile set to open. Skipping..")
+
+ # Launch OpenPype menu
+ from openpype.settings import get_project_settings
+ from openpype.pipeline.context_tools import get_current_project_name
+ project_name = get_current_project_name()
+ settings = get_project_settings(project_name)
+ if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True):
+ launch_menu()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py b/openpype/hosts/resolve/utility_scripts/OpenPype__Menu.py
similarity index 100%
rename from openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py
rename to openpype/hosts/resolve/utility_scripts/OpenPype__Menu.py
diff --git a/openpype/hosts/resolve/utility_scripts/README.markdown b/openpype/hosts/resolve/utility_scripts/README.markdown
deleted file mode 100644
index 8b13789179..0000000000
--- a/openpype/hosts/resolve/utility_scripts/README.markdown
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/openpype/hosts/resolve/utility_scripts/OTIO_export.py b/openpype/hosts/resolve/utility_scripts/develop/OTIO_export.py
similarity index 100%
rename from openpype/hosts/resolve/utility_scripts/OTIO_export.py
rename to openpype/hosts/resolve/utility_scripts/develop/OTIO_export.py
diff --git a/openpype/hosts/resolve/utility_scripts/OTIO_import.py b/openpype/hosts/resolve/utility_scripts/develop/OTIO_import.py
similarity index 100%
rename from openpype/hosts/resolve/utility_scripts/OTIO_import.py
rename to openpype/hosts/resolve/utility_scripts/develop/OTIO_import.py
diff --git a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py b/openpype/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py
similarity index 100%
rename from openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py
rename to openpype/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py
diff --git a/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib
new file mode 100644
index 0000000000..ec9b30a18d
--- /dev/null
+++ b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib
@@ -0,0 +1,21 @@
+-- Run OpenPype's Python launch script for resolve
+function file_exists(name)
+ local f = io.open(name, "r")
+ return f ~= nil and io.close(f)
+end
+
+
+openpype_startup_script = os.getenv("OPENPYPE_RESOLVE_STARTUP_SCRIPT")
+if openpype_startup_script ~= nil then
+ script = fusion:MapPath(openpype_startup_script)
+
+ if file_exists(script) then
+ -- We must use RunScript to ensure it runs in a separate
+ -- process to Resolve itself to avoid a deadlock for
+ -- certain imports of OpenPype libraries or Qt
+ print("Running launch script: " .. script)
+ fusion:RunScript(script)
+ else
+ print("Launch script not found at: " .. script)
+ end
+end
\ No newline at end of file
diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_timeline_op.py b/openpype/hosts/resolve/utility_scripts/tests/testing_timeline_op.py
new file mode 100644
index 0000000000..8270496f64
--- /dev/null
+++ b/openpype/hosts/resolve/utility_scripts/tests/testing_timeline_op.py
@@ -0,0 +1,13 @@
+#! python3
+from openpype.pipeline import install_host
+from openpype.hosts.resolve import api as bmdvr
+from openpype.hosts.resolve.api.lib import get_current_project
+
+if __name__ == "__main__":
+ install_host(bmdvr)
+ project = get_current_project()
+ timeline_count = project.GetTimelineCount()
+ print(f"Timeline count: {timeline_count}")
+ timeline = project.GetTimelineByIndex(timeline_count)
+ print(f"Timeline name: {timeline.GetName()}")
+ print(timeline.GetTrackCount("video"))
diff --git a/openpype/hosts/resolve/utils.py b/openpype/hosts/resolve/utils.py
index 5881f153ae..5e3003862f 100644
--- a/openpype/hosts/resolve/utils.py
+++ b/openpype/hosts/resolve/utils.py
@@ -1,6 +1,6 @@
import os
import shutil
-from openpype.lib import Logger
+from openpype.lib import Logger, is_running_from_build
RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -8,30 +8,33 @@ RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def setup(env):
log = Logger.get_logger("ResolveSetup")
scripts = {}
- us_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR")
- us_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"]
+ util_scripts_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR")
+ util_scripts_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"]
- us_paths = [os.path.join(
+ util_scripts_paths = [os.path.join(
RESOLVE_ROOT_DIR,
"utility_scripts"
)]
# collect script dirs
- if us_env:
- log.info("Utility Scripts Env: `{}`".format(us_env))
- us_paths = us_env.split(
- os.pathsep) + us_paths
+ if util_scripts_env:
+ log.info("Utility Scripts Env: `{}`".format(util_scripts_env))
+ util_scripts_paths = util_scripts_env.split(
+ os.pathsep) + util_scripts_paths
# collect scripts from dirs
- for path in us_paths:
+ for path in util_scripts_paths:
scripts.update({path: os.listdir(path)})
- log.info("Utility Scripts Dir: `{}`".format(us_paths))
+ log.info("Utility Scripts Dir: `{}`".format(util_scripts_paths))
log.info("Utility Scripts: `{}`".format(scripts))
+ # Make sure scripts dir exists
+ os.makedirs(util_scripts_dir, exist_ok=True)
+
# make sure no script file is in folder
- for s in os.listdir(us_dir):
- path = os.path.join(us_dir, s)
+ for script in os.listdir(util_scripts_dir):
+ path = os.path.join(util_scripts_dir, script)
log.info("Removing `{}`...".format(path))
if os.path.isdir(path):
shutil.rmtree(path, onerror=None)
@@ -39,12 +42,25 @@ def setup(env):
os.remove(path)
# copy scripts into Resolve's utility scripts dir
- for d, sl in scripts.items():
- # directory and scripts list
- for s in sl:
- # script in script list
- src = os.path.join(d, s)
- dst = os.path.join(us_dir, s)
+ for directory, scripts in scripts.items():
+ for script in scripts:
+ if (
+ is_running_from_build() and
+ script in ["tests", "develop"]
+ ):
+ # only copy those if started from build
+ continue
+
+ src = os.path.join(directory, script)
+ dst = os.path.join(util_scripts_dir, script)
+
+ # TODO: Make this a less hacky workaround
+ if script == "openpype_startup.scriptlib":
+ # Handle special case for scriptlib that needs to be a folder
+ # up from the Comp folder in the Fusion scripts
+ dst = os.path.join(os.path.dirname(util_scripts_dir),
+ script)
+
log.info("Copying `{}` to `{}`...".format(src, dst))
if os.path.isdir(src):
shutil.copytree(
diff --git a/openpype/hosts/substancepainter/plugins/publish/save_workfile.py b/openpype/hosts/substancepainter/plugins/publish/save_workfile.py
index 4874b5e5c7..9662f31922 100644
--- a/openpype/hosts/substancepainter/plugins/publish/save_workfile.py
+++ b/openpype/hosts/substancepainter/plugins/publish/save_workfile.py
@@ -16,11 +16,12 @@ class SaveCurrentWorkfile(pyblish.api.ContextPlugin):
def process(self, context):
host = registered_host()
- if context.data["currentFile"] != host.get_current_workfile():
+ current = host.get_current_workfile()
+ if context.data["currentFile"] != current:
raise KnownPublishError("Workfile has changed during publishing!")
if host.has_unsaved_changes():
- self.log.info("Saving current file..")
+ self.log.info("Saving current file: {}".format(current))
host.save_workfile()
else:
self.log.debug("Skipping workfile save because there are no "
diff --git a/openpype/hosts/unreal/addon.py b/openpype/hosts/unreal/addon.py
index 1119b5c16c..ed23950b35 100644
--- a/openpype/hosts/unreal/addon.py
+++ b/openpype/hosts/unreal/addon.py
@@ -1,5 +1,7 @@
import os
+import re
from openpype.modules import IHostAddon, OpenPypeModule
+from openpype.widgets.message_window import Window
UNREAL_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -19,6 +21,20 @@ class UnrealAddon(OpenPypeModule, IHostAddon):
from .lib import get_compatible_integration
+ pattern = re.compile(r'^\d+-\d+$')
+
+ if not pattern.match(app.name):
+ msg = (
+ "Unreal application key in the settings must be in format"
+ "'5-0' or '5-1'"
+ )
+ Window(
+ parent=None,
+ title="Unreal application name format",
+ message=msg,
+ level="critical")
+ raise ValueError(msg)
+
ue_version = app.name.replace("-", ".")
unreal_plugin_path = os.path.join(
UNREAL_ROOT_DIR, "integration", "UE_{}".format(ue_version), "Ayon"
diff --git a/openpype/hosts/unreal/api/__init__.py b/openpype/hosts/unreal/api/__init__.py
index de0fce13d5..ac6a91eae9 100644
--- a/openpype/hosts/unreal/api/__init__.py
+++ b/openpype/hosts/unreal/api/__init__.py
@@ -22,6 +22,8 @@ from .pipeline import (
show_tools_popup,
instantiate,
UnrealHost,
+ set_sequence_hierarchy,
+ generate_sequence,
maintained_selection
)
@@ -41,5 +43,7 @@ __all__ = [
"show_tools_popup",
"instantiate",
"UnrealHost",
+ "set_sequence_hierarchy",
+ "generate_sequence",
"maintained_selection"
]
diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py
index bb45fa8c01..72816c9b81 100644
--- a/openpype/hosts/unreal/api/pipeline.py
+++ b/openpype/hosts/unreal/api/pipeline.py
@@ -9,12 +9,14 @@ import time
import pyblish.api
+from openpype.client import get_asset_by_name, get_assets
from openpype.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AYON_CONTAINER_ID,
+ legacy_io,
)
from openpype.tools.utils import host_tools
import openpype.hosts.unreal
@@ -512,6 +514,141 @@ def get_subsequences(sequence: unreal.LevelSequence):
return []
+def set_sequence_hierarchy(
+ seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths
+):
+ # Get existing sequencer tracks or create them if they don't exist
+ tracks = seq_i.get_master_tracks()
+ subscene_track = None
+ visibility_track = None
+ for t in tracks:
+ if t.get_class() == unreal.MovieSceneSubTrack.static_class():
+ subscene_track = t
+ if (t.get_class() ==
+ unreal.MovieSceneLevelVisibilityTrack.static_class()):
+ visibility_track = t
+ if not subscene_track:
+ subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
+ if not visibility_track:
+ visibility_track = seq_i.add_master_track(
+ unreal.MovieSceneLevelVisibilityTrack)
+
+ # Create the sub-scene section
+ subscenes = subscene_track.get_sections()
+ subscene = None
+ for s in subscenes:
+ if s.get_editor_property('sub_sequence') == seq_j:
+ subscene = s
+ break
+ if not subscene:
+ subscene = subscene_track.add_section()
+ subscene.set_row_index(len(subscene_track.get_sections()))
+ subscene.set_editor_property('sub_sequence', seq_j)
+ subscene.set_range(
+ min_frame_j,
+ max_frame_j + 1)
+
+ # Create the visibility section
+ ar = unreal.AssetRegistryHelpers.get_asset_registry()
+ maps = []
+ for m in map_paths:
+ # Unreal requires to load the level to get the map name
+ unreal.EditorLevelLibrary.save_all_dirty_levels()
+ unreal.EditorLevelLibrary.load_level(m)
+ maps.append(str(ar.get_asset_by_object_path(m).asset_name))
+
+ vis_section = visibility_track.add_section()
+ index = len(visibility_track.get_sections())
+
+ vis_section.set_range(
+ min_frame_j,
+ max_frame_j + 1)
+ vis_section.set_visibility(unreal.LevelVisibility.VISIBLE)
+ vis_section.set_row_index(index)
+ vis_section.set_level_names(maps)
+
+ if min_frame_j > 1:
+ hid_section = visibility_track.add_section()
+ hid_section.set_range(
+ 1,
+ min_frame_j)
+ hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
+ hid_section.set_row_index(index)
+ hid_section.set_level_names(maps)
+ if max_frame_j < max_frame_i:
+ hid_section = visibility_track.add_section()
+ hid_section.set_range(
+ max_frame_j + 1,
+ max_frame_i + 1)
+ hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
+ hid_section.set_row_index(index)
+ hid_section.set_level_names(maps)
+
+
+def generate_sequence(h, h_dir):
+ tools = unreal.AssetToolsHelpers().get_asset_tools()
+
+ sequence = tools.create_asset(
+ asset_name=h,
+ package_path=h_dir,
+ asset_class=unreal.LevelSequence,
+ factory=unreal.LevelSequenceFactoryNew()
+ )
+
+ project_name = legacy_io.active_project()
+ asset_data = get_asset_by_name(
+ project_name,
+ h_dir.split('/')[-1],
+ fields=["_id", "data.fps"]
+ )
+
+ start_frames = []
+ end_frames = []
+
+ elements = list(get_assets(
+ project_name,
+ parent_ids=[asset_data["_id"]],
+ fields=["_id", "data.clipIn", "data.clipOut"]
+ ))
+ for e in elements:
+ start_frames.append(e.get('data').get('clipIn'))
+ end_frames.append(e.get('data').get('clipOut'))
+
+ elements.extend(get_assets(
+ project_name,
+ parent_ids=[e["_id"]],
+ fields=["_id", "data.clipIn", "data.clipOut"]
+ ))
+
+ min_frame = min(start_frames)
+ max_frame = max(end_frames)
+
+ fps = asset_data.get('data').get("fps")
+
+ sequence.set_display_rate(
+ unreal.FrameRate(fps, 1.0))
+ sequence.set_playback_start(min_frame)
+ sequence.set_playback_end(max_frame)
+
+ sequence.set_work_range_start(min_frame / fps)
+ sequence.set_work_range_end(max_frame / fps)
+ sequence.set_view_range_start(min_frame / fps)
+ sequence.set_view_range_end(max_frame / fps)
+
+ tracks = sequence.get_master_tracks()
+ track = None
+ for t in tracks:
+ if (t.get_class() ==
+ unreal.MovieSceneCameraCutTrack.static_class()):
+ track = t
+ break
+ if not track:
+ track = sequence.add_master_track(
+ unreal.MovieSceneCameraCutTrack)
+
+ return sequence, (min_frame, max_frame)
+
+
@contextmanager
def maintained_selection():
"""Stub to be either implemented or replaced.
diff --git a/openpype/hosts/unreal/plugins/create/create_uasset.py b/openpype/hosts/unreal/plugins/create/create_uasset.py
index c78518e86b..f70ecc55b3 100644
--- a/openpype/hosts/unreal/plugins/create/create_uasset.py
+++ b/openpype/hosts/unreal/plugins/create/create_uasset.py
@@ -17,6 +17,8 @@ class CreateUAsset(UnrealAssetCreator):
family = "uasset"
icon = "cube"
+ extension = ".uasset"
+
def create(self, subset_name, instance_data, pre_create_data):
if pre_create_data.get("use_selection"):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
@@ -37,10 +39,28 @@ class CreateUAsset(UnrealAssetCreator):
f"{Path(obj).name} is not on the disk. Likely it needs to"
"be saved first.")
- if Path(sys_path).suffix != ".uasset":
- raise CreatorError(f"{Path(sys_path).name} is not a UAsset.")
+ if Path(sys_path).suffix != self.extension:
+ raise CreatorError(
+ f"{Path(sys_path).name} is not a {self.label}.")
super(CreateUAsset, self).create(
subset_name,
instance_data,
pre_create_data)
+
+
+class CreateUMap(CreateUAsset):
+ """Create Level."""
+
+ identifier = "io.ayon.creators.unreal.umap"
+ label = "Level"
+ family = "uasset"
+ extension = ".umap"
+
+ def create(self, subset_name, instance_data, pre_create_data):
+ instance_data["families"] = ["umap"]
+
+ super(CreateUMap, self).create(
+ subset_name,
+ instance_data,
+ pre_create_data)
diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py
index 778ddf693d..a5ecb677e8 100644
--- a/openpype/hosts/unreal/plugins/load/load_animation.py
+++ b/openpype/hosts/unreal/plugins/load/load_animation.py
@@ -156,7 +156,7 @@ class AnimationFBXLoader(plugin.Loader):
package_paths=[f"{root}/{hierarchy[0]}"],
recursive_paths=False)
levels = ar.get_assets(_filter)
- master_level = levels[0].get_full_name()
+ master_level = levels[0].get_asset().get_path_name()
hierarchy_dir = root
for h in hierarchy:
@@ -168,7 +168,7 @@ class AnimationFBXLoader(plugin.Loader):
package_paths=[f"{hierarchy_dir}/"],
recursive_paths=True)
levels = ar.get_assets(_filter)
- level = levels[0].get_full_name()
+ level = levels[0].get_asset().get_path_name()
unreal.EditorLevelLibrary.save_all_dirty_levels()
unreal.EditorLevelLibrary.load_level(level)
diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py
index 1bd398349f..59ea14697d 100644
--- a/openpype/hosts/unreal/plugins/load/load_camera.py
+++ b/openpype/hosts/unreal/plugins/load/load_camera.py
@@ -3,16 +3,24 @@
from pathlib import Path
import unreal
-from unreal import EditorAssetLibrary
-from unreal import EditorLevelLibrary
-from unreal import EditorLevelUtils
-from openpype.client import get_assets, get_asset_by_name
+from unreal import (
+ EditorAssetLibrary,
+ EditorLevelLibrary,
+ EditorLevelUtils,
+ LevelSequenceEditorBlueprintLibrary as LevelSequenceLib,
+)
+from openpype.client import get_asset_by_name
from openpype.pipeline import (
AYON_CONTAINER_ID,
legacy_io,
)
from openpype.hosts.unreal.api import plugin
-from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+from openpype.hosts.unreal.api.pipeline import (
+ generate_sequence,
+ set_sequence_hierarchy,
+ create_container,
+ imprint,
+)
class CameraLoader(plugin.Loader):
@@ -24,32 +32,6 @@ class CameraLoader(plugin.Loader):
icon = "cube"
color = "orange"
- def _set_sequence_hierarchy(
- self, seq_i, seq_j, min_frame_j, max_frame_j
- ):
- tracks = seq_i.get_master_tracks()
- track = None
- for t in tracks:
- if t.get_class() == unreal.MovieSceneSubTrack.static_class():
- track = t
- break
- if not track:
- track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
-
- subscenes = track.get_sections()
- subscene = None
- for s in subscenes:
- if s.get_editor_property('sub_sequence') == seq_j:
- subscene = s
- break
- if not subscene:
- subscene = track.add_section()
- subscene.set_row_index(len(track.get_sections()))
- subscene.set_editor_property('sub_sequence', seq_j)
- subscene.set_range(
- min_frame_j,
- max_frame_j + 1)
-
def _import_camera(
self, world, sequence, bindings, import_fbx_settings, import_filename
):
@@ -110,10 +92,7 @@ class CameraLoader(plugin.Loader):
hierarchy_dir_list.append(hierarchy_dir)
asset = context.get('asset').get('name')
suffix = "_CON"
- if asset:
- asset_name = "{}_{}".format(asset, name)
- else:
- asset_name = "{}".format(name)
+ asset_name = f"{asset}_{name}" if asset else f"{name}"
tools = unreal.AssetToolsHelpers().get_asset_tools()
@@ -127,23 +106,15 @@ class CameraLoader(plugin.Loader):
# Get highest number to make a unique name
folders = [a for a in asset_content
if a[-1] == "/" and f"{name}_" in a]
- f_numbers = []
- for f in folders:
- # Get number from folder name. Splits the string by "_" and
- # removes the last element (which is a "/").
- f_numbers.append(int(f.split("_")[-1][:-1]))
+ # Get number from folder name. Splits the string by "_" and
+ # removes the last element (which is a "/").
+ f_numbers = [int(f.split("_")[-1][:-1]) for f in folders]
f_numbers.sort()
- if not f_numbers:
- unique_number = 1
- else:
- unique_number = f_numbers[-1] + 1
+ unique_number = f_numbers[-1] + 1 if f_numbers else 1
asset_dir, container_name = tools.create_unique_asset_name(
f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="")
- asset_path = Path(asset_dir)
- asset_path_parent = str(asset_path.parent.as_posix())
-
container_name += suffix
EditorAssetLibrary.make_directory(asset_dir)
@@ -156,9 +127,9 @@ class CameraLoader(plugin.Loader):
if not EditorAssetLibrary.does_asset_exist(master_level):
EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map")
- level = f"{asset_path_parent}/{asset}_map.{asset}_map"
+ level = f"{asset_dir}/{asset}_map_camera.{asset}_map_camera"
if not EditorAssetLibrary.does_asset_exist(level):
- EditorLevelLibrary.new_level(f"{asset_path_parent}/{asset}_map")
+ EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map_camera")
EditorLevelLibrary.load_level(master_level)
EditorLevelUtils.add_level_to_world(
@@ -169,27 +140,13 @@ class CameraLoader(plugin.Loader):
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(level)
- project_name = legacy_io.active_project()
- # TODO refactor
- # - Creating of hierarchy should be a function in unreal integration
- # - it's used in multiple loaders but must not be loader's logic
- # - hard to say what is purpose of the loop
- # - variables does not match their meaning
- # - why scene is stored to sequences?
- # - asset documents vs. elements
- # - cleanup variable names in whole function
- # - e.g. 'asset', 'asset_name', 'asset_data', 'asset_doc'
- # - really inefficient queries of asset documents
- # - existing asset in scene is considered as "with correct values"
- # - variable 'elements' is modified during it's loop
# Get all the sequences in the hierarchy. It will create them, if
# they don't exist.
- sequences = []
frame_ranges = []
- i = 0
- for h in hierarchy_dir_list:
+ sequences = []
+ for (h_dir, h) in zip(hierarchy_dir_list, hierarchy):
root_content = EditorAssetLibrary.list_assets(
- h, recursive=False, include_folder=False)
+ h_dir, recursive=False, include_folder=False)
existing_sequences = [
EditorAssetLibrary.find_asset_data(asset)
@@ -198,57 +155,17 @@ class CameraLoader(plugin.Loader):
asset).get_class().get_name() == 'LevelSequence'
]
- if not existing_sequences:
- scene = tools.create_asset(
- asset_name=hierarchy[i],
- package_path=h,
- asset_class=unreal.LevelSequence,
- factory=unreal.LevelSequenceFactoryNew()
- )
-
- asset_data = get_asset_by_name(
- project_name,
- h.split('/')[-1],
- fields=["_id", "data.fps"]
- )
-
- start_frames = []
- end_frames = []
-
- elements = list(get_assets(
- project_name,
- parent_ids=[asset_data["_id"]],
- fields=["_id", "data.clipIn", "data.clipOut"]
- ))
-
- for e in elements:
- start_frames.append(e.get('data').get('clipIn'))
- end_frames.append(e.get('data').get('clipOut'))
-
- elements.extend(get_assets(
- project_name,
- parent_ids=[e["_id"]],
- fields=["_id", "data.clipIn", "data.clipOut"]
- ))
-
- min_frame = min(start_frames)
- max_frame = max(end_frames)
-
- scene.set_display_rate(
- unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
- scene.set_playback_start(min_frame)
- scene.set_playback_end(max_frame)
-
- sequences.append(scene)
- frame_ranges.append((min_frame, max_frame))
- else:
- for e in existing_sequences:
- sequences.append(e.get_asset())
+ if existing_sequences:
+ for seq in existing_sequences:
+ sequences.append(seq.get_asset())
frame_ranges.append((
- e.get_asset().get_playback_start(),
- e.get_asset().get_playback_end()))
+ seq.get_asset().get_playback_start(),
+ seq.get_asset().get_playback_end()))
+ else:
+ sequence, frame_range = generate_sequence(h, h_dir)
- i += 1
+ sequences.append(sequence)
+ frame_ranges.append(frame_range)
EditorAssetLibrary.make_directory(asset_dir)
@@ -260,19 +177,24 @@ class CameraLoader(plugin.Loader):
)
# Add sequences data to hierarchy
- for i in range(0, len(sequences) - 1):
- self._set_sequence_hierarchy(
+ for i in range(len(sequences) - 1):
+ set_sequence_hierarchy(
sequences[i], sequences[i + 1],
- frame_ranges[i + 1][0], frame_ranges[i + 1][1])
+ frame_ranges[i][1],
+ frame_ranges[i + 1][0], frame_ranges[i + 1][1],
+ [level])
+ project_name = legacy_io.active_project()
data = get_asset_by_name(project_name, asset)["data"]
cam_seq.set_display_rate(
unreal.FrameRate(data.get("fps"), 1.0))
cam_seq.set_playback_start(data.get('clipIn'))
cam_seq.set_playback_end(data.get('clipOut') + 1)
- self._set_sequence_hierarchy(
+ set_sequence_hierarchy(
sequences[-1], cam_seq,
- data.get('clipIn'), data.get('clipOut'))
+ frame_ranges[-1][1],
+ data.get('clipIn'), data.get('clipOut'),
+ [level])
settings = unreal.MovieSceneUserImportFBXSettings()
settings.set_editor_property('reduce_keys', False)
@@ -307,7 +229,7 @@ class CameraLoader(plugin.Loader):
key.set_time(unreal.FrameNumber(value=new_time))
# Create Asset Container
- unreal_pipeline.create_container(
+ create_container(
container=container_name, path=asset_dir)
data = {
@@ -322,14 +244,14 @@ class CameraLoader(plugin.Loader):
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
- unreal_pipeline.imprint(
- "{}/{}".format(asset_dir, container_name), data)
+ imprint(f"{asset_dir}/{container_name}", data)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(master_level)
+ # Save all assets in the hierarchy
asset_content = EditorAssetLibrary.list_assets(
- asset_dir, recursive=True, include_folder=True
+ hierarchy_dir_list[0], recursive=True, include_folder=False
)
for a in asset_content:
@@ -340,32 +262,30 @@ class CameraLoader(plugin.Loader):
def update(self, container, representation):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
- root = "/Game/ayon"
+ curr_level_sequence = LevelSequenceLib.get_current_level_sequence()
+ curr_time = LevelSequenceLib.get_current_time()
+ is_cam_lock = LevelSequenceLib.is_camera_cut_locked_to_viewport()
+
+ editor_subsystem = unreal.UnrealEditorSubsystem()
+ vp_loc, vp_rot = editor_subsystem.get_level_viewport_camera_info()
asset_dir = container.get('namespace')
- context = representation.get("context")
-
- hierarchy = context.get('hierarchy').split("/")
- h_dir = f"{root}/{hierarchy[0]}"
- h_asset = hierarchy[0]
- master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map"
-
EditorLevelLibrary.save_current_level()
- filter = unreal.ARFilter(
+ _filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[asset_dir],
recursive_paths=False)
- sequences = ar.get_assets(filter)
- filter = unreal.ARFilter(
+ sequences = ar.get_assets(_filter)
+ _filter = unreal.ARFilter(
class_names=["World"],
- package_paths=[str(Path(asset_dir).parent.as_posix())],
+ package_paths=[asset_dir],
recursive_paths=True)
- maps = ar.get_assets(filter)
+ maps = ar.get_assets(_filter)
# There should be only one map in the list
- EditorLevelLibrary.load_level(maps[0].get_full_name())
+ EditorLevelLibrary.load_level(maps[0].get_asset().get_path_name())
level_sequence = sequences[0].get_asset()
@@ -401,12 +321,18 @@ class CameraLoader(plugin.Loader):
root = "/Game/Ayon"
namespace = container.get('namespace').replace(f"{root}/", "")
ms_asset = namespace.split('/')[0]
- filter = unreal.ARFilter(
+ _filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"{root}/{ms_asset}"],
recursive_paths=False)
- sequences = ar.get_assets(filter)
+ sequences = ar.get_assets(_filter)
master_sequence = sequences[0].get_asset()
+ _filter = unreal.ARFilter(
+ class_names=["World"],
+ package_paths=[f"{root}/{ms_asset}"],
+ recursive_paths=False)
+ levels = ar.get_assets(_filter)
+ master_level = levels[0].get_asset().get_path_name()
sequences = [master_sequence]
@@ -418,26 +344,20 @@ class CameraLoader(plugin.Loader):
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
- break
if subscene_track:
sections = subscene_track.get_sections()
for ss in sections:
if ss.get_sequence().get_name() == sequence_name:
parent = s
sub_scene = ss
- # subscene_track.remove_section(ss)
break
sequences.append(ss.get_sequence())
- # Update subscenes indexes.
- i = 0
- for ss in sections:
+ for i, ss in enumerate(sections):
ss.set_row_index(i)
- i += 1
-
if parent:
break
- assert parent, "Could not find the parent sequence"
+ assert parent, "Could not find the parent sequence"
EditorAssetLibrary.delete_asset(level_sequence.get_path_name())
@@ -466,33 +386,63 @@ class CameraLoader(plugin.Loader):
str(representation["data"]["path"])
)
+ # Set range of all sections
+ # Changing the range of the section is not enough. We need to change
+ # the frame of all the keys in the section.
+ project_name = legacy_io.active_project()
+ asset = container.get('asset')
+ data = get_asset_by_name(project_name, asset)["data"]
+
+ for possessable in new_sequence.get_possessables():
+ for tracks in possessable.get_tracks():
+ for section in tracks.get_sections():
+ section.set_range(
+ data.get('clipIn'),
+ data.get('clipOut') + 1)
+ for channel in section.get_all_channels():
+ for key in channel.get_keys():
+ old_time = key.get_time().get_editor_property(
+ 'frame_number')
+ old_time_value = old_time.get_editor_property(
+ 'value')
+ new_time = old_time_value + (
+ data.get('clipIn') - data.get('frameStart')
+ )
+ key.set_time(unreal.FrameNumber(value=new_time))
+
data = {
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
}
- unreal_pipeline.imprint(
- "{}/{}".format(asset_dir, container.get('container_name')), data)
+ imprint(f"{asset_dir}/{container.get('container_name')}", data)
EditorLevelLibrary.save_current_level()
asset_content = EditorAssetLibrary.list_assets(
- asset_dir, recursive=True, include_folder=False)
+ f"{root}/{ms_asset}", recursive=True, include_folder=False)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
EditorLevelLibrary.load_level(master_level)
+ if curr_level_sequence:
+ LevelSequenceLib.open_level_sequence(curr_level_sequence)
+ LevelSequenceLib.set_current_time(curr_time)
+ LevelSequenceLib.set_lock_camera_cut_to_viewport(is_cam_lock)
+
+ editor_subsystem.set_level_viewport_camera_info(vp_loc, vp_rot)
+
def remove(self, container):
- path = Path(container.get("namespace"))
- parent_path = str(path.parent.as_posix())
+ asset_dir = container.get('namespace')
+ path = Path(asset_dir)
ar = unreal.AssetRegistryHelpers.get_asset_registry()
- filter = unreal.ARFilter(
+ _filter = unreal.ARFilter(
class_names=["LevelSequence"],
- package_paths=[f"{str(path.as_posix())}"],
+ package_paths=[asset_dir],
recursive_paths=False)
- sequences = ar.get_assets(filter)
+ sequences = ar.get_assets(_filter)
if not sequences:
raise Exception("Could not find sequence.")
@@ -500,11 +450,11 @@ class CameraLoader(plugin.Loader):
world = ar.get_asset_by_object_path(
EditorLevelLibrary.get_editor_world().get_path_name())
- filter = unreal.ARFilter(
+ _filter = unreal.ARFilter(
class_names=["World"],
- package_paths=[f"{parent_path}"],
+ package_paths=[asset_dir],
recursive_paths=True)
- maps = ar.get_assets(filter)
+ maps = ar.get_assets(_filter)
# There should be only one map in the list
if not maps:
@@ -513,7 +463,7 @@ class CameraLoader(plugin.Loader):
map = maps[0]
EditorLevelLibrary.save_all_dirty_levels()
- EditorLevelLibrary.load_level(map.get_full_name())
+ EditorLevelLibrary.load_level(map.get_asset().get_path_name())
# Remove the camera from the level.
actors = EditorLevelLibrary.get_all_level_actors()
@@ -523,7 +473,7 @@ class CameraLoader(plugin.Loader):
EditorLevelLibrary.destroy_actor(a)
EditorLevelLibrary.save_all_dirty_levels()
- EditorLevelLibrary.load_level(world.get_full_name())
+ EditorLevelLibrary.load_level(world.get_asset().get_path_name())
# There should be only one sequence in the path.
sequence_name = sequences[0].asset_name
@@ -534,12 +484,18 @@ class CameraLoader(plugin.Loader):
root = "/Game/Ayon"
namespace = container.get('namespace').replace(f"{root}/", "")
ms_asset = namespace.split('/')[0]
- filter = unreal.ARFilter(
+ _filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"{root}/{ms_asset}"],
recursive_paths=False)
- sequences = ar.get_assets(filter)
+ sequences = ar.get_assets(_filter)
master_sequence = sequences[0].get_asset()
+ _filter = unreal.ARFilter(
+ class_names=["World"],
+ package_paths=[f"{root}/{ms_asset}"],
+ recursive_paths=False)
+ levels = ar.get_assets(_filter)
+ master_level = levels[0].get_full_name()
sequences = [master_sequence]
@@ -547,10 +503,13 @@ class CameraLoader(plugin.Loader):
for s in sequences:
tracks = s.get_master_tracks()
subscene_track = None
+ visibility_track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
- break
+ if (t.get_class() ==
+ unreal.MovieSceneLevelVisibilityTrack.static_class()):
+ visibility_track = t
if subscene_track:
sections = subscene_track.get_sections()
for ss in sections:
@@ -560,23 +519,48 @@ class CameraLoader(plugin.Loader):
break
sequences.append(ss.get_sequence())
# Update subscenes indexes.
- i = 0
- for ss in sections:
+ for i, ss in enumerate(sections):
ss.set_row_index(i)
- i += 1
+ if visibility_track:
+ sections = visibility_track.get_sections()
+ for ss in sections:
+ if (unreal.Name(f"{container.get('asset')}_map_camera")
+ in ss.get_level_names()):
+ visibility_track.remove_section(ss)
+ # Update visibility sections indexes.
+ i = -1
+ prev_name = []
+ for ss in sections:
+ if prev_name != ss.get_level_names():
+ i += 1
+ ss.set_row_index(i)
+ prev_name = ss.get_level_names()
if parent:
break
assert parent, "Could not find the parent sequence"
- EditorAssetLibrary.delete_directory(str(path.as_posix()))
+ # Create a temporary level to delete the layout level.
+ EditorLevelLibrary.save_all_dirty_levels()
+ EditorAssetLibrary.make_directory(f"{root}/tmp")
+ tmp_level = f"{root}/tmp/temp_map"
+ if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"):
+ EditorLevelLibrary.new_level(tmp_level)
+ else:
+ EditorLevelLibrary.load_level(tmp_level)
+
+ # Delete the layout directory.
+ EditorAssetLibrary.delete_directory(asset_dir)
+
+ EditorLevelLibrary.load_level(master_level)
+ EditorAssetLibrary.delete_directory(f"{root}/tmp")
# Check if there isn't any more assets in the parent folder, and
# delete it if not.
asset_content = EditorAssetLibrary.list_assets(
- parent_path, recursive=False, include_folder=True
+ path.parent.as_posix(), recursive=False, include_folder=True
)
if len(asset_content) == 0:
- EditorAssetLibrary.delete_directory(parent_path)
+ EditorAssetLibrary.delete_directory(path.parent.as_posix())
diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py
index e5f32c3412..86b2e1456c 100644
--- a/openpype/hosts/unreal/plugins/load/load_layout.py
+++ b/openpype/hosts/unreal/plugins/load/load_layout.py
@@ -5,15 +5,18 @@ import collections
from pathlib import Path
import unreal
-from unreal import EditorAssetLibrary
-from unreal import EditorLevelLibrary
-from unreal import EditorLevelUtils
-from unreal import AssetToolsHelpers
-from unreal import FBXImportType
-from unreal import MovieSceneLevelVisibilityTrack
-from unreal import MovieSceneSubTrack
+from unreal import (
+ EditorAssetLibrary,
+ EditorLevelLibrary,
+ EditorLevelUtils,
+ AssetToolsHelpers,
+ FBXImportType,
+ MovieSceneLevelVisibilityTrack,
+ MovieSceneSubTrack,
+ LevelSequenceEditorBlueprintLibrary as LevelSequenceLib,
+)
-from openpype.client import get_asset_by_name, get_assets, get_representations
+from openpype.client import get_asset_by_name, get_representations
from openpype.pipeline import (
discover_loader_plugins,
loaders_from_representation,
@@ -25,7 +28,13 @@ from openpype.pipeline import (
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.settings import get_current_project_settings
from openpype.hosts.unreal.api import plugin
-from openpype.hosts.unreal.api import pipeline as unreal_pipeline
+from openpype.hosts.unreal.api.pipeline import (
+ generate_sequence,
+ set_sequence_hierarchy,
+ create_container,
+ imprint,
+ ls,
+)
class LayoutLoader(plugin.Loader):
@@ -91,77 +100,6 @@ class LayoutLoader(plugin.Loader):
return None
- @staticmethod
- def _set_sequence_hierarchy(
- seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths
- ):
- # Get existing sequencer tracks or create them if they don't exist
- tracks = seq_i.get_master_tracks()
- subscene_track = None
- visibility_track = None
- for t in tracks:
- if t.get_class() == unreal.MovieSceneSubTrack.static_class():
- subscene_track = t
- if (t.get_class() ==
- unreal.MovieSceneLevelVisibilityTrack.static_class()):
- visibility_track = t
- if not subscene_track:
- subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
- if not visibility_track:
- visibility_track = seq_i.add_master_track(
- unreal.MovieSceneLevelVisibilityTrack)
-
- # Create the sub-scene section
- subscenes = subscene_track.get_sections()
- subscene = None
- for s in subscenes:
- if s.get_editor_property('sub_sequence') == seq_j:
- subscene = s
- break
- if not subscene:
- subscene = subscene_track.add_section()
- subscene.set_row_index(len(subscene_track.get_sections()))
- subscene.set_editor_property('sub_sequence', seq_j)
- subscene.set_range(
- min_frame_j,
- max_frame_j + 1)
-
- # Create the visibility section
- ar = unreal.AssetRegistryHelpers.get_asset_registry()
- maps = []
- for m in map_paths:
- # Unreal requires to load the level to get the map name
- EditorLevelLibrary.save_all_dirty_levels()
- EditorLevelLibrary.load_level(m)
- maps.append(str(ar.get_asset_by_object_path(m).asset_name))
-
- vis_section = visibility_track.add_section()
- index = len(visibility_track.get_sections())
-
- vis_section.set_range(
- min_frame_j,
- max_frame_j + 1)
- vis_section.set_visibility(unreal.LevelVisibility.VISIBLE)
- vis_section.set_row_index(index)
- vis_section.set_level_names(maps)
-
- if min_frame_j > 1:
- hid_section = visibility_track.add_section()
- hid_section.set_range(
- 1,
- min_frame_j)
- hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
- hid_section.set_row_index(index)
- hid_section.set_level_names(maps)
- if max_frame_j < max_frame_i:
- hid_section = visibility_track.add_section()
- hid_section.set_range(
- max_frame_j + 1,
- max_frame_i + 1)
- hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
- hid_section.set_row_index(index)
- hid_section.set_level_names(maps)
-
def _transform_from_basis(self, transform, basis):
"""Transform a transform from a basis to a new basis."""
# Get the basis matrix
@@ -352,63 +290,6 @@ class LayoutLoader(plugin.Loader):
sec_params = section.get_editor_property('params')
sec_params.set_editor_property('animation', animation)
- @staticmethod
- def _generate_sequence(h, h_dir):
- tools = unreal.AssetToolsHelpers().get_asset_tools()
-
- sequence = tools.create_asset(
- asset_name=h,
- package_path=h_dir,
- asset_class=unreal.LevelSequence,
- factory=unreal.LevelSequenceFactoryNew()
- )
-
- project_name = legacy_io.active_project()
- asset_data = get_asset_by_name(
- project_name,
- h_dir.split('/')[-1],
- fields=["_id", "data.fps"]
- )
-
- start_frames = []
- end_frames = []
-
- elements = list(get_assets(
- project_name,
- parent_ids=[asset_data["_id"]],
- fields=["_id", "data.clipIn", "data.clipOut"]
- ))
- for e in elements:
- start_frames.append(e.get('data').get('clipIn'))
- end_frames.append(e.get('data').get('clipOut'))
-
- elements.extend(get_assets(
- project_name,
- parent_ids=[e["_id"]],
- fields=["_id", "data.clipIn", "data.clipOut"]
- ))
-
- min_frame = min(start_frames)
- max_frame = max(end_frames)
-
- sequence.set_display_rate(
- unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
- sequence.set_playback_start(min_frame)
- sequence.set_playback_end(max_frame)
-
- tracks = sequence.get_master_tracks()
- track = None
- for t in tracks:
- if (t.get_class() ==
- unreal.MovieSceneCameraCutTrack.static_class()):
- track = t
- break
- if not track:
- track = sequence.add_master_track(
- unreal.MovieSceneCameraCutTrack)
-
- return sequence, (min_frame, max_frame)
-
def _get_repre_docs_by_version_id(self, data):
version_ids = {
element.get("version")
@@ -696,7 +577,7 @@ class LayoutLoader(plugin.Loader):
]
if not existing_sequences:
- sequence, frame_range = self._generate_sequence(h, h_dir)
+ sequence, frame_range = generate_sequence(h, h_dir)
sequences.append(sequence)
frame_ranges.append(frame_range)
@@ -716,7 +597,7 @@ class LayoutLoader(plugin.Loader):
# sequences and frame_ranges have the same length
for i in range(0, len(sequences) - 1):
- self._set_sequence_hierarchy(
+ set_sequence_hierarchy(
sequences[i], sequences[i + 1],
frame_ranges[i][1],
frame_ranges[i + 1][0], frame_ranges[i + 1][1],
@@ -729,7 +610,7 @@ class LayoutLoader(plugin.Loader):
shot.set_playback_start(0)
shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1)
if sequences:
- self._set_sequence_hierarchy(
+ set_sequence_hierarchy(
sequences[-1], shot,
frame_ranges[-1][1],
data.get('clipIn'), data.get('clipOut'),
@@ -740,12 +621,12 @@ class LayoutLoader(plugin.Loader):
loaded_assets = self._process(self.fname, asset_dir, shot)
for s in sequences:
- EditorAssetLibrary.save_asset(s.get_full_name())
+ EditorAssetLibrary.save_asset(s.get_path_name())
EditorLevelLibrary.save_current_level()
# Create Asset Container
- unreal_pipeline.create_container(
+ create_container(
container=container_name, path=asset_dir)
data = {
@@ -761,11 +642,13 @@ class LayoutLoader(plugin.Loader):
"family": context["representation"]["context"]["family"],
"loaded_assets": loaded_assets
}
- unreal_pipeline.imprint(
+ imprint(
"{}/{}".format(asset_dir, container_name), data)
+ save_dir = hierarchy_dir_list[0] if create_sequences else asset_dir
+
asset_content = EditorAssetLibrary.list_assets(
- asset_dir, recursive=True, include_folder=False)
+ save_dir, recursive=True, include_folder=False)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
@@ -781,16 +664,24 @@ class LayoutLoader(plugin.Loader):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
+ curr_level_sequence = LevelSequenceLib.get_current_level_sequence()
+ curr_time = LevelSequenceLib.get_current_time()
+ is_cam_lock = LevelSequenceLib.is_camera_cut_locked_to_viewport()
+
+ editor_subsystem = unreal.UnrealEditorSubsystem()
+ vp_loc, vp_rot = editor_subsystem.get_level_viewport_camera_info()
+
root = "/Game/Ayon"
asset_dir = container.get('namespace')
context = representation.get("context")
+ hierarchy = context.get('hierarchy').split("/")
+
sequence = None
master_level = None
if create_sequences:
- hierarchy = context.get('hierarchy').split("/")
h_dir = f"{root}/{hierarchy[0]}"
h_asset = hierarchy[0]
master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map"
@@ -819,7 +710,7 @@ class LayoutLoader(plugin.Loader):
recursive_paths=False)
levels = ar.get_assets(filter)
- layout_level = levels[0].get_full_name()
+ layout_level = levels[0].get_asset().get_path_name()
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(layout_level)
@@ -843,13 +734,15 @@ class LayoutLoader(plugin.Loader):
"parent": str(representation["parent"]),
"loaded_assets": loaded_assets
}
- unreal_pipeline.imprint(
+ imprint(
"{}/{}".format(asset_dir, container.get('container_name')), data)
EditorLevelLibrary.save_current_level()
+ save_dir = f"{root}/{hierarchy[0]}" if create_sequences else asset_dir
+
asset_content = EditorAssetLibrary.list_assets(
- asset_dir, recursive=True, include_folder=False)
+ save_dir, recursive=True, include_folder=False)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
@@ -859,6 +752,13 @@ class LayoutLoader(plugin.Loader):
elif prev_level:
EditorLevelLibrary.load_level(prev_level)
+ if curr_level_sequence:
+ LevelSequenceLib.open_level_sequence(curr_level_sequence)
+ LevelSequenceLib.set_current_time(curr_time)
+ LevelSequenceLib.set_lock_camera_cut_to_viewport(is_cam_lock)
+
+ editor_subsystem.set_level_viewport_camera_info(vp_loc, vp_rot)
+
def remove(self, container):
"""
Delete the layout. First, check if the assets loaded with the layout
@@ -870,7 +770,7 @@ class LayoutLoader(plugin.Loader):
root = "/Game/Ayon"
path = Path(container.get("namespace"))
- containers = unreal_pipeline.ls()
+ containers = ls()
layout_containers = [
c for c in containers
if (c.get('asset_name') != container.get('asset_name') and
@@ -919,7 +819,7 @@ class LayoutLoader(plugin.Loader):
package_paths=[f"{root}/{ms_asset}"],
recursive_paths=False)
levels = ar.get_assets(_filter)
- master_level = levels[0].get_full_name()
+ master_level = levels[0].get_asset().get_path_name()
sequences = [master_sequence]
diff --git a/openpype/hosts/unreal/plugins/load/load_uasset.py b/openpype/hosts/unreal/plugins/load/load_uasset.py
index 7606bc14e4..30f63abe39 100644
--- a/openpype/hosts/unreal/plugins/load/load_uasset.py
+++ b/openpype/hosts/unreal/plugins/load/load_uasset.py
@@ -21,6 +21,8 @@ class UAssetLoader(plugin.Loader):
icon = "cube"
color = "orange"
+ extension = "uasset"
+
def load(self, context, name, namespace, options):
"""Load and containerise representation into Content Browser.
@@ -42,26 +44,29 @@ class UAssetLoader(plugin.Loader):
root = "/Game/Ayon/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
- if asset:
- asset_name = "{}_{}".format(asset, name)
- else:
- asset_name = "{}".format(name)
-
+ asset_name = f"{asset}_{name}" if asset else f"{name}"
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}", suffix=""
)
- container_name += suffix
+ unique_number = 1
+ while unreal.EditorAssetLibrary.does_directory_exist(
+ f"{asset_dir}_{unique_number:02}"
+ ):
+ unique_number += 1
+
+ asset_dir = f"{asset_dir}_{unique_number:02}"
+ container_name = f"{container_name}_{unique_number:02}{suffix}"
unreal.EditorAssetLibrary.make_directory(asset_dir)
destination_path = asset_dir.replace(
- "/Game",
- Path(unreal.Paths.project_content_dir()).as_posix(),
- 1)
+ "/Game", Path(unreal.Paths.project_content_dir()).as_posix(), 1)
- shutil.copy(self.fname, f"{destination_path}/{name}.uasset")
+ shutil.copy(
+ self.fname,
+ f"{destination_path}/{name}_{unique_number:02}.{self.extension}")
# Create Asset Container
unreal_pipeline.create_container(
@@ -77,7 +82,7 @@ class UAssetLoader(plugin.Loader):
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
- "family": context["representation"]["context"]["family"]
+ "family": context["representation"]["context"]["family"],
}
unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data)
@@ -96,10 +101,10 @@ class UAssetLoader(plugin.Loader):
asset_dir = container["namespace"]
name = representation["context"]["subset"]
+ unique_number = container["container_name"].split("_")[-2]
+
destination_path = asset_dir.replace(
- "/Game",
- Path(unreal.Paths.project_content_dir()).as_posix(),
- 1)
+ "/Game", Path(unreal.Paths.project_content_dir()).as_posix(), 1)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=False, include_folder=True
@@ -107,22 +112,24 @@ class UAssetLoader(plugin.Loader):
for asset in asset_content:
obj = ar.get_asset_by_object_path(asset).get_asset()
- if not obj.get_class().get_name() == 'AyonAssetContainer':
+ if obj.get_class().get_name() != "AyonAssetContainer":
unreal.EditorAssetLibrary.delete_asset(asset)
update_filepath = get_representation_path(representation)
- shutil.copy(update_filepath, f"{destination_path}/{name}.uasset")
+ shutil.copy(
+ update_filepath,
+ f"{destination_path}/{name}_{unique_number}.{self.extension}")
- container_path = "{}/{}".format(container["namespace"],
- container["objectName"])
+ container_path = f'{container["namespace"]}/{container["objectName"]}'
# update metadata
unreal_pipeline.imprint(
container_path,
{
"representation": str(representation["_id"]),
- "parent": str(representation["parent"])
- })
+ "parent": str(representation["parent"]),
+ }
+ )
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
@@ -143,3 +150,13 @@ class UAssetLoader(plugin.Loader):
if len(asset_content) == 0:
unreal.EditorAssetLibrary.delete_directory(parent_path)
+
+
+class UMapLoader(UAssetLoader):
+ """Load Level."""
+
+ families = ["uasset"]
+ label = "Load Level"
+ representations = ["umap"]
+
+ extension = "umap"
diff --git a/openpype/hosts/unreal/plugins/publish/collect_instance_members.py b/openpype/hosts/unreal/plugins/publish/collect_instance_members.py
index 46ca51ab7e..de10e7b119 100644
--- a/openpype/hosts/unreal/plugins/publish/collect_instance_members.py
+++ b/openpype/hosts/unreal/plugins/publish/collect_instance_members.py
@@ -24,7 +24,7 @@ class CollectInstanceMembers(pyblish.api.InstancePlugin):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
inst_path = instance.data.get('instance_path')
- inst_name = instance.data.get('objectName')
+ inst_name = inst_path.split('/')[-1]
pub_instance = ar.get_asset_by_object_path(
f"{inst_path}.{inst_name}").get_asset()
diff --git a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py
index a352b2c3f3..dad0310dfc 100644
--- a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py
+++ b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py
@@ -103,8 +103,8 @@ class CollectRenderInstances(pyblish.api.InstancePlugin):
new_instance.data["representations"] = []
repr = {
- 'frameStart': s.get('frame_range')[0],
- 'frameEnd': s.get('frame_range')[1],
+ 'frameStart': instance.data["frameStart"],
+ 'frameEnd': instance.data["frameEnd"],
'name': 'png',
'ext': 'png',
'files': frames,
diff --git a/openpype/hosts/unreal/plugins/publish/extract_uasset.py b/openpype/hosts/unreal/plugins/publish/extract_uasset.py
index f719df2a82..48b62faa97 100644
--- a/openpype/hosts/unreal/plugins/publish/extract_uasset.py
+++ b/openpype/hosts/unreal/plugins/publish/extract_uasset.py
@@ -11,16 +11,17 @@ class ExtractUAsset(publish.Extractor):
label = "Extract UAsset"
hosts = ["unreal"]
- families = ["uasset"]
+ families = ["uasset", "umap"]
optional = True
def process(self, instance):
+ extension = (
+ "umap" if "umap" in instance.data.get("families") else "uasset")
ar = unreal.AssetRegistryHelpers.get_asset_registry()
self.log.info("Performing extraction..")
-
staging_dir = self.staging_dir(instance)
- filename = "{}.uasset".format(instance.name)
+ filename = f"{instance.name}.{extension}"
members = instance.data.get("members", [])
@@ -36,13 +37,15 @@ class ExtractUAsset(publish.Extractor):
shutil.copy(sys_path, staging_dir)
+ self.log.info(f"instance.data: {instance.data}")
+
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
- 'name': 'uasset',
- 'ext': 'uasset',
- 'files': filename,
+ "name": extension,
+ "ext": extension,
+ "files": filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
diff --git a/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py b/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py
index e6584e130f..76bb25fac3 100644
--- a/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py
+++ b/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py
@@ -31,8 +31,8 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin):
frames = list(collection.indexes)
current_range = (frames[0], frames[-1])
- required_range = (data["frameStart"],
- data["frameEnd"])
+ required_range = (data["clipIn"],
+ data["clipOut"])
if current_range != required_range:
raise ValueError(f"Invalid frame range: {current_range} - "
diff --git a/openpype/hosts/unreal/ue_workers.py b/openpype/hosts/unreal/ue_workers.py
index e7a690ac9c..2b7e1375e6 100644
--- a/openpype/hosts/unreal/ue_workers.py
+++ b/openpype/hosts/unreal/ue_workers.py
@@ -6,6 +6,8 @@ import subprocess
from distutils import dir_util
from pathlib import Path
from typing import List, Union
+import tempfile
+from distutils.dir_util import copy_tree
import openpype.hosts.unreal.lib as ue_lib
@@ -90,9 +92,20 @@ class UEProjectGenerationWorker(QtCore.QObject):
("Generating a new UE project ... 1 out of "
f"{stage_count}"))
+ # Need to copy the commandlet project to a temporary folder where
+ # users don't need admin rights to write to.
+ cmdlet_tmp = tempfile.TemporaryDirectory()
+ cmdlet_filename = cmdlet_project.name
+ cmdlet_dir = cmdlet_project.parent.as_posix()
+ cmdlet_tmp_name = Path(cmdlet_tmp.name)
+ cmdlet_tmp_file = cmdlet_tmp_name.joinpath(cmdlet_filename)
+ copy_tree(
+ cmdlet_dir,
+ cmdlet_tmp_name.as_posix())
+
commandlet_cmd = [
f"{ue_editor_exe.as_posix()}",
- f"{cmdlet_project.as_posix()}",
+ f"{cmdlet_tmp_file.as_posix()}",
"-run=AyonGenerateProject",
f"{project_file.resolve().as_posix()}",
]
@@ -111,6 +124,8 @@ class UEProjectGenerationWorker(QtCore.QObject):
gen_process.stdout.close()
return_code = gen_process.wait()
+ cmdlet_tmp.cleanup()
+
if return_code and return_code != 0:
msg = (
f"Failed to generate {self.project_name} "
diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py
index 9eb7724a60..06de486f2e 100644
--- a/openpype/lib/__init__.py
+++ b/openpype/lib/__init__.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# flake8: noqa E402
-"""Pype module API."""
+"""OpenPype lib functions."""
# add vendor to sys path based on Python version
import sys
import os
@@ -94,7 +94,8 @@ from .python_module_tools import (
modules_from_path,
recursive_bases_from_class,
classes_from_module,
- import_module_from_dirpath
+ import_module_from_dirpath,
+ is_func_signature_supported,
)
from .profiles_filtering import (
@@ -243,6 +244,7 @@ __all__ = [
"recursive_bases_from_class",
"classes_from_module",
"import_module_from_dirpath",
+ "is_func_signature_supported",
"get_transcode_temp_directory",
"should_convert_for_ffmpeg",
diff --git a/openpype/lib/events.py b/openpype/lib/events.py
index bed00fe659..dca58fcf93 100644
--- a/openpype/lib/events.py
+++ b/openpype/lib/events.py
@@ -6,10 +6,9 @@ import inspect
import logging
import weakref
from uuid import uuid4
-try:
- from weakref import WeakMethod
-except Exception:
- from openpype.lib.python_2_comp import WeakMethod
+
+from .python_2_comp import WeakMethod
+from .python_module_tools import is_func_signature_supported
class MissingEventSystem(Exception):
@@ -80,40 +79,8 @@ class EventCallback(object):
# Get expected arguments from function spec
# - positional arguments are always preferred
- expect_args = False
- expect_kwargs = False
- fake_event = "fake"
- if hasattr(inspect, "signature"):
- # Python 3 using 'Signature' object where we try to bind arg
- # or kwarg. Using signature is recommended approach based on
- # documentation.
- sig = inspect.signature(func)
- try:
- sig.bind(fake_event)
- expect_args = True
- except TypeError:
- pass
-
- try:
- sig.bind(event=fake_event)
- expect_kwargs = True
- except TypeError:
- pass
-
- else:
- # In Python 2 'signature' is not available so 'getcallargs' is used
- # - 'getcallargs' is marked as deprecated since Python 3.0
- try:
- inspect.getcallargs(func, fake_event)
- expect_args = True
- except TypeError:
- pass
-
- try:
- inspect.getcallargs(func, event=fake_event)
- expect_kwargs = True
- except TypeError:
- pass
+ expect_args = is_func_signature_supported(func, "fake")
+ expect_kwargs = is_func_signature_supported(func, event="fake")
self._func_ref = func_ref
self._func_name = func_name
diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py
index ef456395e7..6f52efdfcc 100644
--- a/openpype/lib/execute.py
+++ b/openpype/lib/execute.py
@@ -190,7 +190,7 @@ def run_openpype_process(*args, **kwargs):
Example:
```
- run_openpype_process("run", "")
+ run_detached_process("run", "")
```
Args:
diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py
index 07107ec011..674eaa3b91 100644
--- a/openpype/lib/project_backpack.py
+++ b/openpype/lib/project_backpack.py
@@ -113,26 +113,29 @@ def pack_project(
project_name
))
- roots = project_doc["config"]["roots"]
- # Determine root directory of project
- source_root = None
- source_root_name = None
- for root_name, root_value in roots.items():
- if source_root is not None:
- raise ValueError(
- "Packaging is supported only for single root projects"
- )
- source_root = root_value
- source_root_name = root_name
+ root_path = None
+ source_root = {}
+ project_source_path = None
+ if not only_documents:
+ roots = project_doc["config"]["roots"]
+ # Determine root directory of project
+ source_root_name = None
+ for root_name, root_value in roots.items():
+ if source_root is not None:
+ raise ValueError(
+ "Packaging is supported only for single root projects"
+ )
+ source_root = root_value
+ source_root_name = root_name
- root_path = source_root[platform.system().lower()]
- print("Using root \"{}\" with path \"{}\"".format(
- source_root_name, root_path
- ))
+ root_path = source_root[platform.system().lower()]
+ print("Using root \"{}\" with path \"{}\"".format(
+ source_root_name, root_path
+ ))
- project_source_path = os.path.join(root_path, project_name)
- if not os.path.exists(project_source_path):
- raise ValueError("Didn't find source of project files")
+ project_source_path = os.path.join(root_path, project_name)
+ if not os.path.exists(project_source_path):
+ raise ValueError("Didn't find source of project files")
# Determine zip filepath where data will be stored
if not destination_dir:
@@ -273,8 +276,7 @@ def unpack_project(
low_platform = platform.system().lower()
project_name = metadata["project_name"]
- source_root = metadata["root"]
- root_path = source_root[low_platform]
+ root_path = metadata["root"].get(low_platform)
# Drop existing collection
replace_project_documents(project_name, docs, database_name)
diff --git a/openpype/lib/python_2_comp.py b/openpype/lib/python_2_comp.py
index d7137dbe9c..091c51a6f6 100644
--- a/openpype/lib/python_2_comp.py
+++ b/openpype/lib/python_2_comp.py
@@ -1,41 +1,44 @@
import weakref
-class _weak_callable:
- def __init__(self, obj, func):
- self.im_self = obj
- self.im_func = func
+WeakMethod = getattr(weakref, "WeakMethod", None)
- def __call__(self, *args, **kws):
- if self.im_self is None:
- return self.im_func(*args, **kws)
- else:
- return self.im_func(self.im_self, *args, **kws)
+if WeakMethod is None:
+ class _WeakCallable:
+ def __init__(self, obj, func):
+ self.im_self = obj
+ self.im_func = func
+
+ def __call__(self, *args, **kws):
+ if self.im_self is None:
+ return self.im_func(*args, **kws)
+ else:
+ return self.im_func(self.im_self, *args, **kws)
-class WeakMethod:
- """ Wraps a function or, more importantly, a bound method in
- a way that allows a bound method's object to be GCed, while
- providing the same interface as a normal weak reference. """
+ class WeakMethod:
+ """ Wraps a function or, more importantly, a bound method in
+ a way that allows a bound method's object to be GCed, while
+ providing the same interface as a normal weak reference. """
- def __init__(self, fn):
- try:
- self._obj = weakref.ref(fn.im_self)
- self._meth = fn.im_func
- except AttributeError:
- # It's not a bound method
- self._obj = None
- self._meth = fn
+ def __init__(self, fn):
+ try:
+ self._obj = weakref.ref(fn.im_self)
+ self._meth = fn.im_func
+ except AttributeError:
+ # It's not a bound method
+ self._obj = None
+ self._meth = fn
- def __call__(self):
- if self._dead():
- return None
- return _weak_callable(self._getobj(), self._meth)
+ def __call__(self):
+ if self._dead():
+ return None
+ return _WeakCallable(self._getobj(), self._meth)
- def _dead(self):
- return self._obj is not None and self._obj() is None
+ def _dead(self):
+ return self._obj is not None and self._obj() is None
- def _getobj(self):
- if self._obj is None:
- return None
- return self._obj()
+ def _getobj(self):
+ if self._obj is None:
+ return None
+ return self._obj()
diff --git a/openpype/lib/python_module_tools.py b/openpype/lib/python_module_tools.py
index 9e8e94842c..a10263f991 100644
--- a/openpype/lib/python_module_tools.py
+++ b/openpype/lib/python_module_tools.py
@@ -230,3 +230,70 @@ def import_module_from_dirpath(dirpath, folder_name, dst_module_name=None):
dirpath, folder_name, dst_module_name
)
return module
+
+
+def is_func_signature_supported(func, *args, **kwargs):
+ """Check if a function signature supports passed args and kwargs.
+
+ This check does not actually call the function, just look if function can
+ be called with the arguments.
+
+ Notes:
+ This does NOT check if the function would work with passed arguments
+ only if they can be passed in. If function have *args, **kwargs
+ in paramaters, this will always return 'True'.
+
+ Example:
+ >>> def my_function(my_number):
+ ... return my_number + 1
+ ...
+ >>> is_func_signature_supported(my_function, 1)
+ True
+ >>> is_func_signature_supported(my_function, 1, 2)
+ False
+ >>> is_func_signature_supported(my_function, my_number=1)
+ True
+ >>> is_func_signature_supported(my_function, number=1)
+ False
+ >>> is_func_signature_supported(my_function, "string")
+ True
+ >>> def my_other_function(*args, **kwargs):
+ ... my_function(*args, **kwargs)
+ ...
+ >>> is_func_signature_supported(
+ ... my_other_function,
+ ... "string",
+ ... 1,
+ ... other=None
+ ... )
+ True
+
+ Args:
+ func (function): A function where the signature should be tested.
+ *args (tuple[Any]): Positional arguments for function signature.
+ **kwargs (dict[str, Any]): Keyword arguments for function signature.
+
+ Returns:
+ bool: Function can pass in arguments.
+ """
+
+ if hasattr(inspect, "signature"):
+ # Python 3 using 'Signature' object where we try to bind arg
+ # or kwarg. Using signature is recommended approach based on
+ # documentation.
+ sig = inspect.signature(func)
+ try:
+ sig.bind(*args, **kwargs)
+ return True
+ except TypeError:
+ pass
+
+ else:
+ # In Python 2 'signature' is not available so 'getcallargs' is used
+ # - 'getcallargs' is marked as deprecated since Python 3.0
+ try:
+ inspect.getcallargs(func, *args, **kwargs)
+ return True
+ except TypeError:
+ pass
+ return False
diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py
index 558a637e4b..e3e94d50cd 100644
--- a/openpype/modules/deadline/abstract_submit_deadline.py
+++ b/openpype/modules/deadline/abstract_submit_deadline.py
@@ -582,7 +582,6 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin):
metadata_folder = metadata_folder.replace(orig_scene,
new_scene)
instance.data["publishRenderMetadataFolder"] = metadata_folder
-
self.log.info("Scene name was switched {} -> {}".format(
orig_scene, new_scene
))
@@ -663,7 +662,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin):
# test if there is instance of workfile waiting
# to be published.
- assert i.data["publish"] is True, (
+ assert i.data.get("publish", True) is True, (
"Workfile (scene) must be published along")
return i
diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
index 9981bead3e..2de6073e29 100644
--- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
+++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py
@@ -5,23 +5,26 @@ This is resolving index of server lists stored in `deadlineServers` instance
attribute or using default server if that attribute doesn't exists.
"""
+from maya import cmds
+
import pyblish.api
class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
"""Collect Deadline Webservice URL from instance."""
- order = pyblish.api.CollectorOrder + 0.415
+ # Run before collect_render.
+ order = pyblish.api.CollectorOrder + 0.005
label = "Deadline Webservice from the Instance"
families = ["rendering", "renderlayer"]
+ hosts = ["maya"]
def process(self, instance):
instance.data["deadlineUrl"] = self._collect_deadline_url(instance)
self.log.info(
"Using {} for submission.".format(instance.data["deadlineUrl"]))
- @staticmethod
- def _collect_deadline_url(render_instance):
+ def _collect_deadline_url(self, render_instance):
# type: (pyblish.api.Instance) -> str
"""Get Deadline Webservice URL from render instance.
@@ -49,8 +52,16 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
default_server = render_instance.context.data["defaultDeadline"]
instance_server = render_instance.data.get("deadlineServers")
if not instance_server:
+ self.log.debug("Using default server.")
return default_server
+ # Get instance server as sting.
+ if isinstance(instance_server, int):
+ instance_server = cmds.getAttr(
+ "{}.deadlineServers".format(render_instance.data["objset"]),
+ asString=True
+ )
+
default_servers = deadline_settings["deadline_urls"]
project_servers = (
render_instance.context.data
@@ -58,15 +69,23 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
["deadline"]
["deadline_servers"]
)
- deadline_servers = {
+ if not project_servers:
+ self.log.debug("Not project servers found. Using default servers.")
+ return default_servers[instance_server]
+
+ project_enabled_servers = {
k: default_servers[k]
for k in project_servers
if k in default_servers
}
- # This is Maya specific and may not reflect real selection of deadline
- # url as dictionary keys in Python 2 are not ordered
- return deadline_servers[
- list(deadline_servers.keys())[
- int(render_instance.data.get("deadlineServers"))
- ]
- ]
+
+ msg = (
+ "\"{}\" server on instance is not enabled in project settings."
+ " Enabled project servers:\n{}".format(
+ instance_server, project_enabled_servers
+ )
+ )
+ assert instance_server in project_enabled_servers, msg
+
+ self.log.debug("Using project approved server.")
+ return project_enabled_servers[instance_server]
diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py
index e6ad6a9aa1..1a0d615dc3 100644
--- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py
+++ b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py
@@ -4,9 +4,21 @@ import pyblish.api
class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
- """Collect default Deadline Webservice URL."""
+ """Collect default Deadline Webservice URL.
- order = pyblish.api.CollectorOrder + 0.410
+ DL webservice addresses must be configured first in System Settings for
+ project settings enum to work.
+
+ Default webservice could be overriden by
+ `project_settings/deadline/deadline_servers`. Currently only single url
+ is expected.
+
+ This url could be overriden by some hosts directly on instances with
+ `CollectDeadlineServerFromInstance`.
+ """
+
+ # Run before collect_deadline_server_instance.
+ order = pyblish.api.CollectorOrder + 0.0025
label = "Default Deadline Webservice"
pass_mongo_url = False
@@ -23,3 +35,16 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
context.data["defaultDeadline"] = deadline_module.deadline_urls["default"] # noqa: E501
context.data["deadlinePassMongoUrl"] = self.pass_mongo_url
+
+ deadline_servers = (context.data
+ ["project_settings"]
+ ["deadline"]
+ ["deadline_servers"])
+ if deadline_servers:
+ deadline_server_name = deadline_servers[0]
+ deadline_webservice = deadline_module.deadline_urls.get(
+ deadline_server_name)
+ if deadline_webservice:
+ context.data["defaultDeadline"] = deadline_webservice
+ self.log.debug("Overriding from project settings with {}".format( # noqa: E501
+ deadline_webservice))
diff --git a/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py b/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py
index 717391100d..a48596c6bf 100644
--- a/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py
@@ -73,7 +73,7 @@ class FusionSubmitDeadline(
def process(self, instance):
if not instance.data.get("farm"):
- self.log.info("Skipping local instance.")
+ self.log.debug("Skipping local instance.")
return
attribute_values = self.get_attr_values_from_data(
diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
index 73ab689c9a..254914a850 100644
--- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py
@@ -1,19 +1,27 @@
+import hou
+
import os
-import json
+import attr
import getpass
from datetime import datetime
-
-import requests
import pyblish.api
-# import hou ???
-
from openpype.pipeline import legacy_io
from openpype.tests.lib import is_in_tests
+from openpype_modules.deadline import abstract_submit_deadline
+from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.lib import is_running_from_build
-class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
+@attr.s
+class DeadlinePluginInfo():
+ SceneFile = attr.ib(default=None)
+ OutputDriver = attr.ib(default=None)
+ Version = attr.ib(default=None)
+ IgnoreInputs = attr.ib(default=True)
+
+
+class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
"""Submit Solaris USD Render ROPs to Deadline.
Renders are submitted to a Deadline Web Service as
@@ -30,83 +38,57 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder
hosts = ["houdini"]
families = ["usdrender",
- "redshift_rop"]
+ "redshift_rop",
+ "arnold_rop",
+ "mantra_rop",
+ "karma_rop",
+ "vray_rop"]
targets = ["local"]
+ use_published = True
- def process(self, instance):
+ def get_job_info(self):
+ job_info = DeadlineJobInfo(Plugin="Houdini")
+ instance = self._instance
context = instance.context
- code = context.data["code"]
+
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
- comment = context.data.get("comment", "")
- deadline_user = context.data.get("deadlineUser", getpass.getuser())
- jobname = "%s - %s" % (filename, instance.name)
- # Support code prefix label for batch name
- batch_name = filename
- if code:
- batch_name = "{0} - {1}".format(code, batch_name)
+ job_info.Name = "{} - {}".format(filename, instance.name)
+ job_info.BatchName = filename
+ job_info.Plugin = "Houdini"
+ job_info.UserName = context.data.get(
+ "deadlineUser", getpass.getuser())
if is_in_tests():
- batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
+ job_info.BatchName += datetime.now().strftime("%d%m%Y%H%M%S")
- # Output driver to render
- driver = instance[0]
-
- # StartFrame to EndFrame by byFrameStep
+ # Deadline requires integers in frame range
frames = "{start}-{end}x{step}".format(
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"]),
step=int(instance.data["byFrameStep"]),
)
+ job_info.Frames = frames
- # Documentation for keys available at:
- # https://docs.thinkboxsoftware.com
- # /products/deadline/8.0/1_User%20Manual/manual
- # /manual-submission.html#job-info-file-options
- payload = {
- "JobInfo": {
- # Top-level group name
- "BatchName": batch_name,
+ job_info.Pool = instance.data.get("primaryPool")
+ job_info.SecondaryPool = instance.data.get("secondaryPool")
+ job_info.ChunkSize = instance.data.get("chunkSize", 10)
+ job_info.Comment = context.data.get("comment")
- # Job name, as seen in Monitor
- "Name": jobname,
-
- # Arbitrary username, for visualisation in Monitor
- "UserName": deadline_user,
-
- "Plugin": "Houdini",
- "Pool": instance.data.get("primaryPool"),
- "secondaryPool": instance.data.get("secondaryPool"),
- "Frames": frames,
-
- "ChunkSize": instance.data.get("chunkSize", 10),
-
- "Comment": comment
- },
- "PluginInfo": {
- # Input
- "SceneFile": filepath,
- "OutputDriver": driver.path(),
-
- # Mandatory for Deadline
- # Houdini version without patch number
- "Version": hou.applicationVersionString().rsplit(".", 1)[0],
-
- "IgnoreInputs": True
- },
-
- # Mandatory for Deadline, may be empty
- "AuxFiles": []
- }
-
- # Include critical environment variables with submission + api.Session
keys = [
- # Submit along the current Avalon tool setup that we launched
- # this application with so the Render Slave can build its own
- # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9"
- "AVALON_TOOLS"
+ "FTRACK_API_KEY",
+ "FTRACK_API_USER",
+ "FTRACK_SERVER",
+ "OPENPYPE_SG_USER",
+ "AVALON_PROJECT",
+ "AVALON_ASSET",
+ "AVALON_TASK",
+ "AVALON_APP_NAME",
+ "OPENPYPE_DEV",
+ "OPENPYPE_LOG_NO_COLORS",
+ "OPENPYPE_VERSION"
]
# Add OpenPype version if we are running from build.
@@ -114,61 +96,50 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
keys.append("OPENPYPE_VERSION")
# Add mongo url if it's enabled
- if context.data.get("deadlinePassMongoUrl"):
+ if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)
+ for key in keys:
+ value = environment.get(key)
+ if value:
+ job_info.EnvironmentKeyValue[key] = value
- payload["JobInfo"].update({
- "EnvironmentKeyValue%d" % index: "{key}={value}".format(
- key=key,
- value=environment[key]
- ) for index, key in enumerate(environment)
- })
+ # to recognize job from PYPE for turning Event On/Off
+ job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1"
- # Include OutputFilename entries
- # The first entry also enables double-click to preview rendered
- # frames from Deadline Monitor
- output_data = {}
for i, filepath in enumerate(instance.data["files"]):
dirname = os.path.dirname(filepath)
fname = os.path.basename(filepath)
- output_data["OutputDirectory%d" % i] = dirname.replace("\\", "/")
- output_data["OutputFilename%d" % i] = fname
+ job_info.OutputDirectory += dirname.replace("\\", "/")
+ job_info.OutputFilename += fname
- # For now ensure destination folder exists otherwise HUSK
- # will fail to render the output image. This is supposedly fixed
- # in new production builds of Houdini
- # TODO Remove this workaround with Houdini 18.0.391+
- if not os.path.exists(dirname):
- self.log.info("Ensuring output directory exists: %s" %
- dirname)
- os.makedirs(dirname)
+ return job_info
- payload["JobInfo"].update(output_data)
+ def get_plugin_info(self):
- self.submit(instance, payload)
+ instance = self._instance
+ context = instance.context
- def submit(self, instance, payload):
+ # Output driver to render
+ driver = hou.node(instance.data["instance_node"])
+ hou_major_minor = hou.applicationVersionString().rsplit(".", 1)[0]
- AVALON_DEADLINE = legacy_io.Session.get("AVALON_DEADLINE",
- "http://localhost:8082")
- assert AVALON_DEADLINE, "Requires AVALON_DEADLINE"
+ plugin_info = DeadlinePluginInfo(
+ SceneFile=context.data["currentFile"],
+ OutputDriver=driver.path(),
+ Version=hou_major_minor,
+ IgnoreInputs=True
+ )
- plugin = payload["JobInfo"]["Plugin"]
- self.log.info("Using Render Plugin : {}".format(plugin))
+ return attr.asdict(plugin_info)
- self.log.info("Submitting..")
- self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
-
- # E.g. http://192.168.0.1:8082/api/jobs
- url = "{}/api/jobs".format(AVALON_DEADLINE)
- response = requests.post(url, json=payload)
- if not response.ok:
- raise Exception(response.text)
+ def process(self, instance):
+ super(HoudiniSubmitDeadline, self).process(instance)
+ # TODO: Avoid the need for this logic here, needed for submit publish
# Store output dir for unified publisher (filesequence)
output_dir = os.path.dirname(instance.data["files"][0])
instance.data["outputDir"] = output_dir
- instance.data["deadlineSubmissionJob"] = response.json()
+ instance.data["toBeRenderedOn"] = "deadline"
diff --git a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py
index c728b6b9c7..b6a30e36b7 100644
--- a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py
@@ -78,7 +78,7 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
job_info.BatchName = src_filename
job_info.Plugin = instance.data["plugin"]
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
-
+ job_info.EnableAutoTimeout = True
# Deadline requires integers in frame range
frames = "{start}-{end}".format(
start=int(instance.data["frameStart"]),
@@ -133,7 +133,8 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
# Add list of expected files to job
# ---------------------------------
exp = instance.data.get("expectedFiles")
- for filepath in exp:
+
+ for filepath in self._iter_expected_files(exp):
job_info.OutputDirectory += os.path.dirname(filepath)
job_info.OutputFilename += os.path.basename(filepath)
@@ -162,10 +163,11 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
instance = self._instance
filepath = self.scene_path
- expected_files = instance.data["expectedFiles"]
- if not expected_files:
+ files = instance.data["expectedFiles"]
+ if not files:
raise RuntimeError("No Render Elements found!")
- output_dir = os.path.dirname(expected_files[0])
+ first_file = next(self._iter_expected_files(files))
+ output_dir = os.path.dirname(first_file)
instance.data["outputDir"] = output_dir
instance.data["toBeRenderedOn"] = "deadline"
@@ -196,25 +198,22 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
else:
plugin_data["DisableMultipass"] = 1
- expected_files = instance.data.get("expectedFiles")
- if not expected_files:
+ files = instance.data.get("expectedFiles")
+ if not files:
raise RuntimeError("No render elements found")
- old_output_dir = os.path.dirname(expected_files[0])
+ first_file = next(self._iter_expected_files(files))
+ old_output_dir = os.path.dirname(first_file)
output_beauty = RenderSettings().get_render_output(instance.name,
old_output_dir)
- filepath = self.from_published_scene()
-
- def _clean_name(path):
- return os.path.splitext(os.path.basename(path))[0]
-
- new_scene = _clean_name(filepath)
- orig_scene = _clean_name(instance.context.data["currentFile"])
-
- output_beauty = output_beauty.replace(orig_scene, new_scene)
- output_beauty = output_beauty.replace("\\", "/")
- plugin_data["RenderOutput"] = output_beauty
-
+ rgb_bname = os.path.basename(output_beauty)
+ dir = os.path.dirname(first_file)
+ beauty_name = f"{dir}/{rgb_bname}"
+ beauty_name = beauty_name.replace("\\", "/")
+ plugin_data["RenderOutput"] = beauty_name
+ # as 3dsmax has version with different languages
+ plugin_data["Language"] = "ENU"
renderer_class = get_current_renderer()
+
renderer = str(renderer_class).split(":")[0]
if renderer in [
"ART_Renderer",
@@ -226,14 +225,37 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
]:
render_elem_list = RenderSettings().get_render_element()
for i, element in enumerate(render_elem_list):
- element = element.replace(orig_scene, new_scene)
- plugin_data["RenderElementOutputFilename%d" % i] = element # noqa
+ elem_bname = os.path.basename(element)
+ new_elem = f"{dir}/{elem_bname}"
+ new_elem = new_elem.replace("/", "\\")
+ plugin_data["RenderElementOutputFilename%d" % i] = new_elem # noqa
+
+ if renderer == "Redshift_Renderer":
+ plugin_data["redshift_SeparateAovFiles"] = instance.data.get(
+ "separateAovFiles")
self.log.debug("plugin data:{}".format(plugin_data))
plugin_info.update(plugin_data)
return job_info, plugin_info
+ def from_published_scene(self, replace_in_path=True):
+ instance = self._instance
+ if instance.data["renderer"] == "Redshift_Renderer":
+ self.log.debug("Using Redshift...published scene wont be used..")
+ replace_in_path = False
+ return replace_in_path
+
+ @staticmethod
+ def _iter_expected_files(exp):
+ if isinstance(exp[0], dict):
+ for _aov, files in exp[0].items():
+ for file in files:
+ yield file
+ else:
+ for file in exp:
+ yield file
+
@classmethod
def get_attribute_defs(cls):
defs = super(MaxSubmitDeadline, cls).get_attribute_defs()
diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
index 5c598df94b..4900231783 100644
--- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
+++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py
@@ -86,7 +86,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
def process(self, instance):
if not instance.data.get("farm"):
- self.log.info("Skipping local instance.")
+ self.log.debug("Skipping local instance.")
return
instance.data["attributeValues"] = self.get_attr_values_from_data(
diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
index eeb813cb62..87b4ca64f4 100644
--- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py
+++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py
@@ -118,11 +118,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
deadline_plugin = "OpenPype"
targets = ["local"]
- hosts = ["fusion", "max", "maya", "nuke",
+ hosts = ["fusion", "max", "maya", "nuke", "houdini",
"celaction", "aftereffects", "harmony"]
families = ["render.farm", "prerender.farm",
- "renderlayer", "imagesequence", "maxrender", "vrayscene"]
+ "renderlayer", "imagesequence",
+ "vrayscene", "maxrender",
+ "arnold_rop", "mantra_rop",
+ "karma_rop", "vray_rop",
+ "redshift_rop"]
aov_filter = {"maya": [r".*([Bb]eauty).*"],
"aftereffects": [r".*"], # for everything from AE
@@ -140,7 +144,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"FTRACK_SERVER",
"AVALON_APP_NAME",
"OPENPYPE_USERNAME",
- "OPENPYPE_SG_USER",
+ "OPENPYPE_VERSION",
+ "OPENPYPE_SG_USER"
]
# Add OpenPype version if we are running from build.
@@ -275,7 +280,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
args = [
"--headless",
'publish',
- rootless_metadata_path,
+ '"{}"'.format(rootless_metadata_path),
"--targets", "deadline",
"--targets", "farm"
]
@@ -762,7 +767,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"""
if not instance.data.get("farm"):
- self.log.info("Skipping local instance.")
+ self.log.debug("Skipping local instance.")
return
data = instance.data.copy()
@@ -1089,6 +1094,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
deadline_publish_job_id = \
self._submit_deadline_post_job(instance, render_job, instances)
+ # Inject deadline url to instances.
+ for inst in instances:
+ inst["deadlineUrl"] = self.deadline_url
+
# publish job file
publish_job = {
"asset": asset,
diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py
index 7c8ab62d4d..e1c0595830 100644
--- a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py
+++ b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py
@@ -26,7 +26,7 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
def process(self, instance):
if not instance.data.get("farm"):
- self.log.info("Skipping local instance.")
+ self.log.debug("Skipping local instance.")
return
# get default deadline webservice url from deadline module
diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py
index eb64063fab..2226c85ef9 100644
--- a/openpype/modules/ftrack/ftrack_server/lib.py
+++ b/openpype/modules/ftrack/ftrack_server/lib.py
@@ -196,7 +196,7 @@ class ProcessEventHub(SocketBaseEventHub):
{"pype_data.is_processed": False}
).sort(
[("pype_data.stored", pymongo.ASCENDING)]
- )
+ ).limit(100)
found = False
for event_data in not_processed_events:
diff --git a/openpype/modules/ftrack/lib/ftrack_action_handler.py b/openpype/modules/ftrack/lib/ftrack_action_handler.py
index 07b3a780a2..1be4353b26 100644
--- a/openpype/modules/ftrack/lib/ftrack_action_handler.py
+++ b/openpype/modules/ftrack/lib/ftrack_action_handler.py
@@ -234,6 +234,10 @@ class BaseAction(BaseHandler):
if not settings_roles:
return default
+ user_roles = {
+ role_name.lower()
+ for role_name in user_roles
+ }
for role_name in settings_roles:
if role_name.lower() in user_roles:
return True
@@ -264,8 +268,15 @@ class BaseAction(BaseHandler):
return user_entity
@classmethod
- def get_user_roles_from_event(cls, session, event):
- """Query user entity from event."""
+ def get_user_roles_from_event(cls, session, event, lower=True):
+ """Get user roles based on data in event.
+
+ Args:
+ session (ftrack_api.Session): Prepared ftrack session.
+ event (ftrack_api.event.Event): Event which is processed.
+ lower (Optional[bool]): Lower the role names. Default 'True'.
+ """
+
not_set = object()
user_roles = event["data"].get("user_roles", not_set)
@@ -273,7 +284,10 @@ class BaseAction(BaseHandler):
user_roles = []
user_entity = cls.get_user_entity_from_event(session, event)
for role in user_entity["user_security_roles"]:
- user_roles.append(role["security_role"]["name"].lower())
+ role_name = role["security_role"]["name"]
+ if lower:
+ role_name = role_name.lower()
+ user_roles.append(role_name)
event["data"]["user_roles"] = user_roles
return user_roles
@@ -322,7 +336,8 @@ class BaseAction(BaseHandler):
if not settings.get(self.settings_enabled_key, True):
return False
- user_role_list = self.get_user_roles_from_event(session, event)
+ user_role_list = self.get_user_roles_from_event(
+ session, event, lower=False)
if not self.roles_check(settings.get("role_list"), user_role_list):
return False
return True
diff --git a/openpype/modules/ftrack/scripts/sub_event_status.py b/openpype/modules/ftrack/scripts/sub_event_status.py
index dc5836e7f2..c6c2e9e1f6 100644
--- a/openpype/modules/ftrack/scripts/sub_event_status.py
+++ b/openpype/modules/ftrack/scripts/sub_event_status.py
@@ -296,9 +296,9 @@ def server_activity_validate_user(event):
if not user_ent:
return False
- role_list = ["Pypeclub", "Administrator"]
+ role_list = {"pypeclub", "administrator"}
for role in user_ent["user_security_roles"]:
- if role["security_role"]["name"] in role_list:
+ if role["security_role"]["name"].lower() in role_list:
return True
return False
diff --git a/openpype/modules/ftrack/tray/login_dialog.py b/openpype/modules/ftrack/tray/login_dialog.py
index f374a71178..a8abdaf191 100644
--- a/openpype/modules/ftrack/tray/login_dialog.py
+++ b/openpype/modules/ftrack/tray/login_dialog.py
@@ -1,5 +1,3 @@
-import os
-
import requests
from qtpy import QtCore, QtGui, QtWidgets
diff --git a/openpype/modules/kitsu/kitsu_module.py b/openpype/modules/kitsu/kitsu_module.py
index b91373af20..8d2d5ccd60 100644
--- a/openpype/modules/kitsu/kitsu_module.py
+++ b/openpype/modules/kitsu/kitsu_module.py
@@ -94,7 +94,7 @@ class KitsuModule(OpenPypeModule, IPluginPaths, ITrayAction):
return {
"publish": [os.path.join(current_dir, "plugins", "publish")],
- "actions": [os.path.join(current_dir, "actions")]
+ "actions": [os.path.join(current_dir, "actions")],
}
def cli(self, click_group):
@@ -128,15 +128,35 @@ def push_to_zou(login, password):
@click.option(
"-p", "--password", envvar="KITSU_PWD", help="Password for kitsu username"
)
-def sync_service(login, password):
+@click.option(
+ "-prj",
+ "--project",
+ "projects",
+ multiple=True,
+ default=[],
+ help="Sync specific kitsu projects",
+)
+@click.option(
+ "-lo",
+ "--listen-only",
+ "listen_only",
+ is_flag=True,
+ default=False,
+ help="Listen to events only without any syncing",
+)
+def sync_service(login, password, projects, listen_only):
"""Synchronize openpype database from Zou sever database.
Args:
login (str): Kitsu user login
password (str): Kitsu user password
+ projects (tuple): specific kitsu projects
+ listen_only (bool): run listen only without any syncing
"""
from .utils.update_op_with_zou import sync_all_projects
from .utils.sync_service import start_listeners
- sync_all_projects(login, password)
+ if not listen_only:
+ sync_all_projects(login, password, filter_projects=projects)
+
start_listeners(login, password)
diff --git a/openpype/modules/kitsu/utils/update_op_with_zou.py b/openpype/modules/kitsu/utils/update_op_with_zou.py
index 4f4f0810bc..b495cd1bea 100644
--- a/openpype/modules/kitsu/utils/update_op_with_zou.py
+++ b/openpype/modules/kitsu/utils/update_op_with_zou.py
@@ -94,9 +94,7 @@ def update_op_assets(
if not item_doc: # Create asset
op_asset = create_op_asset(item)
insert_result = dbcon.insert_one(op_asset)
- item_doc = get_asset_by_id(
- project_name, insert_result.inserted_id
- )
+ item_doc = get_asset_by_id(project_name, insert_result.inserted_id)
# Update asset
item_data = deepcopy(item_doc["data"])
@@ -329,7 +327,7 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne:
"code": project_code,
"fps": float(project["fps"]),
"zou_id": project["id"],
- "active": project['project_status_name'] != "Closed",
+ "active": project["project_status_name"] != "Closed",
}
)
@@ -359,7 +357,10 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne:
def sync_all_projects(
- login: str, password: str, ignore_projects: list = None
+ login: str,
+ password: str,
+ ignore_projects: list = None,
+ filter_projects: tuple = None,
):
"""Update all OP projects in DB with Zou data.
@@ -367,6 +368,7 @@ def sync_all_projects(
login (str): Kitsu user login
password (str): Kitsu user password
ignore_projects (list): List of unsynced project names
+ filter_projects (tuple): Tuple of filter project names to sync with
Raises:
gazu.exception.AuthFailedException: Wrong user login and/or password
"""
@@ -381,7 +383,24 @@ def sync_all_projects(
dbcon = AvalonMongoDB()
dbcon.install()
all_projects = gazu.project.all_projects()
- for project in all_projects:
+
+ project_to_sync = []
+
+ if filter_projects:
+ all_kitsu_projects = {p["name"]: p for p in all_projects}
+ for proj_name in filter_projects:
+ if proj_name in all_kitsu_projects:
+ project_to_sync.append(all_kitsu_projects[proj_name])
+ else:
+ log.info(
+ f"`{proj_name}` project does not exist in Kitsu."
+ f" Please make sure the project is spelled correctly."
+ )
+ else:
+ # all project
+ project_to_sync = all_projects
+
+ for project in project_to_sync:
if ignore_projects and project["name"] in ignore_projects:
continue
sync_project_from_kitsu(dbcon, project)
@@ -408,14 +427,13 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict):
# Get all statuses for projects from Kitsu
all_status = gazu.project.all_project_status()
for status in all_status:
- if project['project_status_id'] == status['id']:
- project['project_status_name'] = status['name']
+ if project["project_status_id"] == status["id"]:
+ project["project_status_name"] = status["name"]
break
# Do not sync closed kitsu project that is not found in openpype
- if (
- project['project_status_name'] == "Closed"
- and not get_project(project['name'])
+ if project["project_status_name"] == "Closed" and not get_project(
+ project["name"]
):
return
@@ -444,7 +462,7 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict):
log.info("Project created: {}".format(project_name))
bulk_writes.append(write_project_to_op(project, dbcon))
- if project['project_status_name'] == "Closed":
+ if project["project_status_name"] == "Closed":
return
# Try to find project document
diff --git a/openpype/modules/muster/muster.py b/openpype/modules/muster/muster.py
index 77b9214a5a..0cdb1230c8 100644
--- a/openpype/modules/muster/muster.py
+++ b/openpype/modules/muster/muster.py
@@ -1,7 +1,9 @@
import os
import json
+
import appdirs
import requests
+
from openpype.modules import OpenPypeModule, ITrayModule
@@ -110,16 +112,10 @@ class MusterModule(OpenPypeModule, ITrayModule):
self.save_credentials(token)
def save_credentials(self, token):
- """
- Save credentials to JSON file
- """
- data = {
- 'token': token
- }
+ """Save credentials to JSON file."""
- file = open(self.cred_path, 'w')
- file.write(json.dumps(data))
- file.close()
+ with open(self.cred_path, "w") as f:
+ json.dump({'token': token}, f)
def show_login(self):
"""
diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py
index 72f3774e1a..0c57915c05 100644
--- a/openpype/pipeline/publish/__init__.py
+++ b/openpype/pipeline/publish/__init__.py
@@ -39,6 +39,7 @@ from .lib import (
apply_plugin_settings_automatically,
get_plugin_settings,
+ get_publish_instance_label,
)
from .abstract_expected_files import ExpectedFiles
@@ -85,6 +86,7 @@ __all__ = (
"apply_plugin_settings_automatically",
"get_plugin_settings",
+ "get_publish_instance_label",
"ExpectedFiles",
diff --git a/openpype/pipeline/publish/abstract_collect_render.py b/openpype/pipeline/publish/abstract_collect_render.py
index fd35ddb719..6877d556c3 100644
--- a/openpype/pipeline/publish/abstract_collect_render.py
+++ b/openpype/pipeline/publish/abstract_collect_render.py
@@ -167,16 +167,25 @@ class AbstractCollectRender(pyblish.api.ContextPlugin):
frame_start_render = int(render_instance.frameStart)
frame_end_render = int(render_instance.frameEnd)
+ # TODO: Refactor hacky frame range workaround below
if (render_instance.ignoreFrameHandleCheck or
int(context.data['frameStartHandle']) == frame_start_render
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
-
+ # only for Harmony where frame range cannot be set by DB
handle_start = context.data['handleStart']
handle_end = context.data['handleEnd']
frame_start = context.data['frameStart']
frame_end = context.data['frameEnd']
frame_start_handle = context.data['frameStartHandle']
frame_end_handle = context.data['frameEndHandle']
+ elif (hasattr(render_instance, "frameStartHandle")
+ and hasattr(render_instance, "frameEndHandle")):
+ handle_start = int(render_instance.handleStart)
+ handle_end = int(render_instance.handleEnd)
+ frame_start = int(render_instance.frameStart)
+ frame_end = int(render_instance.frameEnd)
+ frame_start_handle = int(render_instance.frameStartHandle)
+ frame_end_handle = int(render_instance.frameEndHandle)
else:
handle_start = 0
handle_end = 0
diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py
index 080f93e514..471be5ddb8 100644
--- a/openpype/pipeline/publish/lib.py
+++ b/openpype/pipeline/publish/lib.py
@@ -1,12 +1,10 @@
import os
import sys
-import types
import inspect
import copy
import tempfile
import xml.etree.ElementTree
-import six
import pyblish.util
import pyblish.plugin
import pyblish.api
@@ -14,7 +12,8 @@ import pyblish.api
from openpype.lib import (
Logger,
import_filepath,
- filter_profiles
+ filter_profiles,
+ is_func_signature_supported,
)
from openpype.settings import (
get_project_settings,
@@ -42,7 +41,9 @@ def get_template_name_profiles(
Args:
project_name (str): Name of project where to look for templates.
- project_settings(Dic[str, Any]): Prepared project settings.
+ project_settings (Dict[str, Any]): Prepared project settings.
+ logger (Optional[logging.Logger]): Logger object to be used instead
+ of default logger.
Returns:
List[Dict[str, Any]]: Publish template profiles.
@@ -103,7 +104,9 @@ def get_hero_template_name_profiles(
Args:
project_name (str): Name of project where to look for templates.
- project_settings(Dic[str, Any]): Prepared project settings.
+ project_settings (Dict[str, Any]): Prepared project settings.
+ logger (Optional[logging.Logger]): Logger object to be used instead
+ of default logger.
Returns:
List[Dict[str, Any]]: Publish template profiles.
@@ -172,9 +175,10 @@ def get_publish_template_name(
project_name (str): Name of project where to look for settings.
host_name (str): Name of host integration.
family (str): Family for which should be found template.
- task_name (str): Task name on which is intance working.
- task_type (str): Task type on which is intance working.
- project_setting (Dict[str, Any]): Prepared project settings.
+ task_name (str): Task name on which is instance working.
+ task_type (str): Task type on which is instance working.
+ project_settings (Dict[str, Any]): Prepared project settings.
+ hero (bool): Template is for hero version publishing.
logger (logging.Logger): Custom logger used for 'filter_profiles'
function.
@@ -264,19 +268,18 @@ def load_help_content_from_plugin(plugin):
def publish_plugins_discover(paths=None):
"""Find and return available pyblish plug-ins
- Overridden function from `pyblish` module to be able collect crashed files
- and reason of their crash.
+ Overridden function from `pyblish` module to be able to collect
+ crashed files and reason of their crash.
Arguments:
paths (list, optional): Paths to discover plug-ins from.
If no paths are provided, all paths are searched.
-
"""
# The only difference with `pyblish.api.discover`
result = DiscoverResult(pyblish.api.Plugin)
- plugins = dict()
+ plugins = {}
plugin_names = []
allow_duplicates = pyblish.plugin.ALLOW_DUPLICATES
@@ -302,7 +305,7 @@ def publish_plugins_discover(paths=None):
mod_name, mod_ext = os.path.splitext(fname)
- if not mod_ext == ".py":
+ if mod_ext != ".py":
continue
try:
@@ -320,6 +323,14 @@ def publish_plugins_discover(paths=None):
continue
for plugin in pyblish.plugin.plugins_from_module(module):
+ # Ignore base plugin classes
+ # NOTE 'pyblish.api.discover' does not ignore them!
+ if (
+ plugin is pyblish.api.Plugin
+ or plugin is pyblish.api.ContextPlugin
+ or plugin is pyblish.api.InstancePlugin
+ ):
+ continue
if not allow_duplicates and plugin.__name__ in plugin_names:
result.duplicated_plugins.append(plugin)
log.debug("Duplicate plug-in found: %s", plugin)
@@ -486,12 +497,26 @@ def filter_pyblish_plugins(plugins):
# iterate over plugins
for plugin in plugins[:]:
# Apply settings to plugins
- if hasattr(plugin, "apply_settings"):
+
+ apply_settings_func = getattr(plugin, "apply_settings", None)
+ if apply_settings_func is not None:
# Use classmethod 'apply_settings'
# - can be used to target settings from custom settings place
# - skip default behavior when successful
try:
- plugin.apply_settings(project_settings, system_settings)
+ # Support to pass only project settings
+ # - make sure that both settings are passed, when can be
+ # - that covers cases when *args are in method parameters
+ both_supported = is_func_signature_supported(
+ apply_settings_func, project_settings, system_settings
+ )
+ project_supported = is_func_signature_supported(
+ apply_settings_func, project_settings
+ )
+ if not both_supported and project_supported:
+ plugin.apply_settings(project_settings)
+ else:
+ plugin.apply_settings(project_settings, system_settings)
except Exception:
log.warning(
@@ -525,10 +550,10 @@ def find_close_plugin(close_plugin_name, log):
def remote_publish(log, close_plugin_name=None, raise_error=False):
"""Loops through all plugins, logs to console. Used for tests.
- Args:
- log (openpype.lib.Logger)
- close_plugin_name (str): name of plugin with responsibility to
- close host app
+ Args:
+ log (Logger)
+ close_plugin_name (str): name of plugin with responsibility to
+ close host app
"""
# Error exit as soon as any error occurs.
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
@@ -837,3 +862,45 @@ def _validate_transient_template(project_name, template_name, anatomy):
raise ValueError(("There is not set \"folder\" template in \"{}\" anatomy" # noqa
" for project \"{}\"."
).format(template_name, project_name))
+
+
+def add_repre_files_for_cleanup(instance, repre):
+ """ Explicitly mark repre files to be deleted.
+
+ Should be used on intermediate files (eg. review, thumbnails) to be
+ explicitly deleted.
+ """
+ files = repre["files"]
+ staging_dir = repre.get("stagingDir")
+ if not staging_dir:
+ return
+
+ if isinstance(files, str):
+ files = [files]
+
+ for file_name in files:
+ expected_file = os.path.join(staging_dir, file_name)
+ instance.context.data["cleanupFullPaths"].append(expected_file)
+
+
+def get_publish_instance_label(instance):
+ """Try to get label from pyblish instance.
+
+ First are used values in instance data under 'label' and 'name' keys. Then
+ is used string conversion of instance object -> 'instance._name'.
+
+ Todos:
+ Maybe 'subset' key could be used too.
+
+ Args:
+ instance (pyblish.api.Instance): Pyblish instance.
+
+ Returns:
+ str: Instance label.
+ """
+
+ return (
+ instance.data.get("label")
+ or instance.data.get("name")
+ or str(instance)
+ )
diff --git a/openpype/pipeline/publish/publish_plugins.py b/openpype/pipeline/publish/publish_plugins.py
index a38896ec8e..a67c8397b1 100644
--- a/openpype/pipeline/publish/publish_plugins.py
+++ b/openpype/pipeline/publish/publish_plugins.py
@@ -379,7 +379,9 @@ class ColormanagedPyblishPluginMixin(object):
# check if ext in lower case is in self.allowed_ext
if ext.lstrip(".").lower() not in self.allowed_ext:
- self.log.debug("Extension is not in allowed extensions.")
+ self.log.debug(
+ "Extension '{}' is not in allowed extensions.".format(ext)
+ )
return
if colorspace_settings is None:
@@ -393,8 +395,7 @@ class ColormanagedPyblishPluginMixin(object):
self.log.warning("No colorspace management was defined")
return
- self.log.info("Config data is : `{}`".format(
- config_data))
+ self.log.debug("Config data is: `{}`".format(config_data))
project_name = context.data["projectName"]
host_name = context.data["hostName"]
@@ -405,8 +406,7 @@ class ColormanagedPyblishPluginMixin(object):
if isinstance(filename, list):
filename = filename[0]
- self.log.debug("__ filename: `{}`".format(
- filename))
+ self.log.debug("__ filename: `{}`".format(filename))
# get matching colorspace from rules
colorspace = colorspace or get_imageio_colorspace_from_filepath(
@@ -415,8 +415,7 @@ class ColormanagedPyblishPluginMixin(object):
file_rules=file_rules,
project_settings=project_settings
)
- self.log.debug("__ colorspace: `{}`".format(
- colorspace))
+ self.log.debug("__ colorspace: `{}`".format(colorspace))
# infuse data to representation
if colorspace:
diff --git a/openpype/pipeline/workfile/workfile_template_builder.py b/openpype/pipeline/workfile/workfile_template_builder.py
index a3d7340367..896ed40f2d 100644
--- a/openpype/pipeline/workfile/workfile_template_builder.py
+++ b/openpype/pipeline/workfile/workfile_template_builder.py
@@ -43,6 +43,7 @@ from openpype.pipeline.load import (
get_contexts_for_repre_docs,
load_with_repre_context,
)
+
from openpype.pipeline.create import (
discover_legacy_creator_plugins,
CreateContext,
@@ -1246,6 +1247,16 @@ class PlaceholderLoadMixin(object):
loader_items = list(sorted(loader_items, key=lambda i: i["label"]))
options = options or {}
+
+ # Get families from all loaders excluding "*"
+ families = set()
+ for loader in loaders_by_name.values():
+ families.update(loader.families)
+ families.discard("*")
+
+ # Sort for readability
+ families = list(sorted(families))
+
return [
attribute_definitions.UISeparatorDef(),
attribute_definitions.UILabelDef("Main attributes"),
@@ -1272,11 +1283,11 @@ class PlaceholderLoadMixin(object):
" field \"inputLinks\""
)
),
- attribute_definitions.TextDef(
+ attribute_definitions.EnumDef(
"family",
label="Family",
default=options.get("family"),
- placeholder="model, look, ..."
+ items=families
),
attribute_definitions.TextDef(
"representation",
diff --git a/openpype/plugins/publish/cleanup.py b/openpype/plugins/publish/cleanup.py
index b90c88890d..57cc9c0ab5 100644
--- a/openpype/plugins/publish/cleanup.py
+++ b/openpype/plugins/publish/cleanup.py
@@ -81,7 +81,8 @@ class CleanUp(pyblish.api.InstancePlugin):
staging_dir = instance.data.get("stagingDir", None)
if not staging_dir:
- self.log.info("Staging dir not set.")
+ self.log.debug("Skipping cleanup. Staging dir not set "
+ "on instance: {}.".format(instance))
return
if not os.path.normpath(staging_dir).startswith(temp_root):
@@ -90,7 +91,7 @@ class CleanUp(pyblish.api.InstancePlugin):
return
if not os.path.exists(staging_dir):
- self.log.info("No staging directory found: %s" % staging_dir)
+ self.log.debug("No staging directory found at: %s" % staging_dir)
return
if instance.data.get("stagingDir_persistent"):
@@ -131,7 +132,9 @@ class CleanUp(pyblish.api.InstancePlugin):
try:
os.remove(src)
except PermissionError:
- self.log.warning("Insufficient permission to delete {}".format(src))
+ self.log.warning(
+ "Insufficient permission to delete {}".format(src)
+ )
continue
# add dir for cleanup
diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py
index 55ce8e06f4..508b01447b 100644
--- a/openpype/plugins/publish/collect_anatomy_context_data.py
+++ b/openpype/plugins/publish/collect_anatomy_context_data.py
@@ -67,5 +67,6 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin):
# Store
context.data["anatomyData"] = anatomy_data
- self.log.info("Global anatomy Data collected")
- self.log.debug(json.dumps(anatomy_data, indent=4))
+ self.log.debug("Global Anatomy Context Data collected:\n{}".format(
+ json.dumps(anatomy_data, indent=4)
+ ))
diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py
index 4fbb93324b..128ad90b4f 100644
--- a/openpype/plugins/publish/collect_anatomy_instance_data.py
+++ b/openpype/plugins/publish/collect_anatomy_instance_data.py
@@ -46,17 +46,17 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
follow_workfile_version = False
def process(self, context):
- self.log.info("Collecting anatomy data for all instances.")
+ self.log.debug("Collecting anatomy data for all instances.")
project_name = context.data["projectName"]
self.fill_missing_asset_docs(context, project_name)
self.fill_latest_versions(context, project_name)
self.fill_anatomy_data(context)
- self.log.info("Anatomy Data collection finished.")
+ self.log.debug("Anatomy Data collection finished.")
def fill_missing_asset_docs(self, context, project_name):
- self.log.debug("Qeurying asset documents for instances.")
+ self.log.debug("Querying asset documents for instances.")
context_asset_doc = context.data.get("assetEntity")
@@ -271,7 +271,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
instance_name = instance.data["name"]
instance_label = instance.data.get("label")
if instance_label:
- instance_name += "({})".format(instance_label)
+ instance_name += " ({})".format(instance_label)
self.log.debug("Anatomy data for instance {}: {}".format(
instance_name,
json.dumps(anatomy_data, indent=4)
diff --git a/openpype/plugins/publish/collect_anatomy_object.py b/openpype/plugins/publish/collect_anatomy_object.py
index 725cae2b14..f792cf3abd 100644
--- a/openpype/plugins/publish/collect_anatomy_object.py
+++ b/openpype/plugins/publish/collect_anatomy_object.py
@@ -30,6 +30,6 @@ class CollectAnatomyObject(pyblish.api.ContextPlugin):
context.data["anatomy"] = Anatomy(project_name)
- self.log.info(
+ self.log.debug(
"Anatomy object collected for project \"{}\".".format(project_name)
)
diff --git a/openpype/plugins/publish/collect_custom_staging_dir.py b/openpype/plugins/publish/collect_custom_staging_dir.py
index b749b251c0..669c4873e0 100644
--- a/openpype/plugins/publish/collect_custom_staging_dir.py
+++ b/openpype/plugins/publish/collect_custom_staging_dir.py
@@ -65,6 +65,6 @@ class CollectCustomStagingDir(pyblish.api.InstancePlugin):
else:
result_str = "Not adding"
- self.log.info("{} custom staging dir for instance with '{}'".format(
+ self.log.debug("{} custom staging dir for instance with '{}'".format(
result_str, family
))
diff --git a/openpype/plugins/publish/collect_frames_fix.py b/openpype/plugins/publish/collect_frames_fix.py
index bdd49585a5..86e727b053 100644
--- a/openpype/plugins/publish/collect_frames_fix.py
+++ b/openpype/plugins/publish/collect_frames_fix.py
@@ -26,55 +26,72 @@ class CollectFramesFixDef(
targets = ["local"]
hosts = ["nuke"]
families = ["render", "prerender"]
- enabled = True
+
+ rewrite_version_enable = False
def process(self, instance):
attribute_values = self.get_attr_values_from_data(instance.data)
frames_to_fix = attribute_values.get("frames_to_fix")
+
rewrite_version = attribute_values.get("rewrite_version")
- if frames_to_fix:
- instance.data["frames_to_fix"] = frames_to_fix
+ if not frames_to_fix:
+ return
- subset_name = instance.data["subset"]
- asset_name = instance.data["asset"]
+ instance.data["frames_to_fix"] = frames_to_fix
- project_entity = instance.data["projectEntity"]
- project_name = project_entity["name"]
+ subset_name = instance.data["subset"]
+ asset_name = instance.data["asset"]
- version = get_last_version_by_subset_name(project_name,
- subset_name,
- asset_name=asset_name)
- if not version:
- self.log.warning("No last version found, "
- "re-render not possible")
- return
+ project_entity = instance.data["projectEntity"]
+ project_name = project_entity["name"]
- representations = get_representations(project_name,
- version_ids=[version["_id"]])
- published_files = []
- for repre in representations:
- if repre["context"]["family"] not in self.families:
- continue
+ version = get_last_version_by_subset_name(
+ project_name,
+ subset_name,
+ asset_name=asset_name
+ )
+ if not version:
+ self.log.warning(
+ "No last version found, re-render not possible"
+ )
+ return
- for file_info in repre.get("files"):
- published_files.append(file_info["path"])
+ representations = get_representations(
+ project_name, version_ids=[version["_id"]]
+ )
+ published_files = []
+ for repre in representations:
+ if repre["context"]["family"] not in self.families:
+ continue
- instance.data["last_version_published_files"] = published_files
- self.log.debug("last_version_published_files::{}".format(
- instance.data["last_version_published_files"]))
+ for file_info in repre.get("files"):
+ published_files.append(file_info["path"])
- if rewrite_version:
- instance.data["version"] = version["name"]
- # limits triggering version validator
- instance.data.pop("latestVersion")
+ instance.data["last_version_published_files"] = published_files
+ self.log.debug("last_version_published_files::{}".format(
+ instance.data["last_version_published_files"]))
+
+ if self.rewrite_version_enable and rewrite_version:
+ instance.data["version"] = version["name"]
+ # limits triggering version validator
+ instance.data.pop("latestVersion")
@classmethod
def get_attribute_defs(cls):
- return [
+ attributes = [
TextDef("frames_to_fix", label="Frames to fix",
placeholder="5,10-15",
- regex="[0-9,-]+"),
- BoolDef("rewrite_version", label="Rewrite latest version",
- default=False),
+ regex="[0-9,-]+")
]
+
+ if cls.rewrite_version_enable:
+ attributes.append(
+ BoolDef(
+ "rewrite_version",
+ label="Rewrite latest version",
+ default=False
+ )
+ )
+
+ return attributes
diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py
index 5fcf8feb56..4888476fff 100644
--- a/openpype/plugins/publish/collect_from_create_context.py
+++ b/openpype/plugins/publish/collect_from_create_context.py
@@ -92,5 +92,5 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
instance.data["transientData"] = transient_data
- self.log.info("collected instance: {}".format(instance.data))
- self.log.info("parsing data: {}".format(in_data))
+ self.log.debug("collected instance: {}".format(instance.data))
+ self.log.debug("parsing data: {}".format(in_data))
diff --git a/openpype/plugins/publish/collect_rendered_files.py b/openpype/plugins/publish/collect_rendered_files.py
index 8f8d0a5eeb..6c8d1e9ca5 100644
--- a/openpype/plugins/publish/collect_rendered_files.py
+++ b/openpype/plugins/publish/collect_rendered_files.py
@@ -13,6 +13,7 @@ import json
import pyblish.api
from openpype.pipeline import legacy_io, KnownPublishError
+from openpype.pipeline.publish.lib import add_repre_files_for_cleanup
class CollectRenderedFiles(pyblish.api.ContextPlugin):
@@ -89,6 +90,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
# now we can just add instances from json file and we are done
for instance_data in data.get("instances"):
+
self.log.info(" - processing instance for {}".format(
instance_data.get("subset")))
instance = self._context.create_instance(
@@ -107,6 +109,8 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
self._fill_staging_dir(repre_data, anatomy)
representations.append(repre_data)
+ add_repre_files_for_cleanup(instance, repre_data)
+
instance.data["representations"] = representations
# add audio if in metadata data
@@ -157,6 +161,8 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
os.environ.update(session_data)
session_is_set = True
self._process_path(data, anatomy)
+ context.data["cleanupFullPaths"].append(path)
+ context.data["cleanupEmptyDirs"].append(os.path.dirname(path))
except Exception as e:
self.log.error(e, exc_info=True)
raise Exception("Error") from e
diff --git a/openpype/plugins/publish/collect_scene_version.py b/openpype/plugins/publish/collect_scene_version.py
index fdbcb3cb9d..cd3231a07d 100644
--- a/openpype/plugins/publish/collect_scene_version.py
+++ b/openpype/plugins/publish/collect_scene_version.py
@@ -48,10 +48,13 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
if '' in filename:
return
+ self.log.debug(
+ "Collecting scene version from filename: {}".format(filename)
+ )
+
version = get_version_from_path(filename)
assert version, "Cannot determine version"
rootVersion = int(version)
context.data['version'] = rootVersion
- self.log.info("{}".format(type(rootVersion)))
self.log.info('Scene Version: %s' % context.data.get('version'))
diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py
index a12e8d18b4..6a8ae958d2 100644
--- a/openpype/plugins/publish/extract_burnin.py
+++ b/openpype/plugins/publish/extract_burnin.py
@@ -19,6 +19,7 @@ from openpype.lib import (
should_convert_for_ffmpeg
)
from openpype.lib.profiles_filtering import filter_profiles
+from openpype.pipeline.publish.lib import add_repre_files_for_cleanup
class ExtractBurnin(publish.Extractor):
@@ -353,6 +354,8 @@ class ExtractBurnin(publish.Extractor):
# Add new representation to instance
instance.data["representations"].append(new_repre)
+ add_repre_files_for_cleanup(instance, new_repre)
+
# Cleanup temp staging dir after procesisng of output definitions
if do_convert:
temp_dir = repre["stagingDir"]
@@ -517,8 +520,8 @@ class ExtractBurnin(publish.Extractor):
"""
if "burnin" not in (repre.get("tags") or []):
- self.log.info((
- "Representation \"{}\" don't have \"burnin\" tag. Skipped."
+ self.log.debug((
+ "Representation \"{}\" does not have \"burnin\" tag. Skipped."
).format(repre["name"]))
return False
diff --git a/openpype/plugins/publish/extract_color_transcode.py b/openpype/plugins/publish/extract_color_transcode.py
index 58e0350a2e..45b10620d1 100644
--- a/openpype/plugins/publish/extract_color_transcode.py
+++ b/openpype/plugins/publish/extract_color_transcode.py
@@ -336,13 +336,13 @@ class ExtractOIIOTranscode(publish.Extractor):
if repre.get("ext") not in self.supported_exts:
self.log.debug((
- "Representation '{}' of unsupported extension. Skipped."
- ).format(repre["name"]))
+ "Representation '{}' has unsupported extension: '{}'. Skipped."
+ ).format(repre["name"], repre.get("ext")))
return False
if not repre.get("files"):
self.log.debug((
- "Representation '{}' have empty files. Skipped."
+ "Representation '{}' has empty files. Skipped."
).format(repre["name"]))
return False
diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py
index 1062683319..d04893fa7e 100644
--- a/openpype/plugins/publish/extract_review.py
+++ b/openpype/plugins/publish/extract_review.py
@@ -23,7 +23,11 @@ from openpype.lib.transcoding import (
convert_input_paths_for_ffmpeg,
get_transcode_temp_directory,
)
-from openpype.pipeline.publish import KnownPublishError
+from openpype.pipeline.publish import (
+ KnownPublishError,
+ get_publish_instance_label,
+)
+from openpype.pipeline.publish.lib import add_repre_files_for_cleanup
class ExtractReview(pyblish.api.InstancePlugin):
@@ -92,8 +96,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
host_name = instance.context.data["hostName"]
family = self.main_family_from_instance(instance)
- self.log.info("Host: \"{}\"".format(host_name))
- self.log.info("Family: \"{}\"".format(family))
+ self.log.debug("Host: \"{}\"".format(host_name))
+ self.log.debug("Family: \"{}\"".format(family))
profile = filter_profiles(
self.profiles,
@@ -202,17 +206,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
return filtered_defs
- @staticmethod
- def get_instance_label(instance):
- return (
- getattr(instance, "label", None)
- or instance.data.get("label")
- or instance.data.get("name")
- or str(instance)
- )
-
def main_process(self, instance):
- instance_label = self.get_instance_label(instance)
+ instance_label = get_publish_instance_label(instance)
self.log.debug("Processing instance \"{}\"".format(instance_label))
profile_outputs = self._get_outputs_for_instance(instance)
if not profile_outputs:
@@ -351,7 +346,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
temp_data = self.prepare_temp_data(instance, repre, output_def)
files_to_clean = []
if temp_data["input_is_sequence"]:
- self.log.info("Filling gaps in sequence.")
+ self.log.debug("Checking sequence to fill gaps in sequence..")
files_to_clean = self.fill_sequence_gaps(
files=temp_data["origin_repre"]["files"],
staging_dir=new_repre["stagingDir"],
@@ -425,6 +420,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
instance.data["representations"].append(new_repre)
+ add_repre_files_for_cleanup(instance, new_repre)
+
def input_is_sequence(self, repre):
"""Deduce from representation data if input is sequence."""
# TODO GLOBAL ISSUE - Find better way how to find out if input
diff --git a/openpype/plugins/publish/extract_thumbnail.py b/openpype/plugins/publish/extract_thumbnail.py
index 54b933a76d..b98ab64f56 100644
--- a/openpype/plugins/publish/extract_thumbnail.py
+++ b/openpype/plugins/publish/extract_thumbnail.py
@@ -36,7 +36,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
).format(subset_name))
return
- self.log.info(
+ self.log.debug(
"Processing instance with subset name {}".format(subset_name)
)
@@ -89,13 +89,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
src_staging = os.path.normpath(repre["stagingDir"])
full_input_path = os.path.join(src_staging, input_file)
- self.log.info("input {}".format(full_input_path))
+ self.log.debug("input {}".format(full_input_path))
filename = os.path.splitext(input_file)[0]
jpeg_file = filename + "_thumb.jpg"
full_output_path = os.path.join(dst_staging, jpeg_file)
if oiio_supported:
- self.log.info("Trying to convert with OIIO")
+ self.log.debug("Trying to convert with OIIO")
# If the input can read by OIIO then use OIIO method for
# conversion otherwise use ffmpeg
thumbnail_created = self.create_thumbnail_oiio(
@@ -148,7 +148,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
def _already_has_thumbnail(self, repres):
for repre in repres:
- self.log.info("repre {}".format(repre))
+ self.log.debug("repre {}".format(repre))
if repre["name"] == "thumbnail":
return True
return False
@@ -173,20 +173,20 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
return filtered_repres
def create_thumbnail_oiio(self, src_path, dst_path):
- self.log.info("outputting {}".format(dst_path))
+ self.log.info("Extracting thumbnail {}".format(dst_path))
oiio_tool_path = get_oiio_tools_path()
oiio_cmd = [
oiio_tool_path,
"-a", src_path,
"-o", dst_path
]
- self.log.info("running: {}".format(" ".join(oiio_cmd)))
+ self.log.debug("running: {}".format(" ".join(oiio_cmd)))
try:
run_subprocess(oiio_cmd, logger=self.log)
return True
except Exception:
self.log.warning(
- "Failed to create thubmnail using oiiotool",
+ "Failed to create thumbnail using oiiotool",
exc_info=True
)
return False
diff --git a/openpype/plugins/publish/extract_thumbnail_from_source.py b/openpype/plugins/publish/extract_thumbnail_from_source.py
index a92f762cde..a9c95d6065 100644
--- a/openpype/plugins/publish/extract_thumbnail_from_source.py
+++ b/openpype/plugins/publish/extract_thumbnail_from_source.py
@@ -39,7 +39,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
self._create_context_thumbnail(instance.context)
subset_name = instance.data["subset"]
- self.log.info(
+ self.log.debug(
"Processing instance with subset name {}".format(subset_name)
)
thumbnail_source = instance.data.get("thumbnailSource")
@@ -104,7 +104,7 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
full_output_path = os.path.join(dst_staging, dst_filename)
if oiio_supported:
- self.log.info("Trying to convert with OIIO")
+ self.log.debug("Trying to convert with OIIO")
# If the input can read by OIIO then use OIIO method for
# conversion otherwise use ffmpeg
thumbnail_created = self.create_thumbnail_oiio(
diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py
index 8e984a9e97..f392cf67f7 100644
--- a/openpype/plugins/publish/integrate.py
+++ b/openpype/plugins/publish/integrate.py
@@ -267,7 +267,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
instance_stagingdir = instance.data.get("stagingDir")
if not instance_stagingdir:
- self.log.info((
+ self.log.debug((
"{0} is missing reference to staging directory."
" Will try to get it from representation."
).format(instance))
@@ -480,7 +480,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
update_data
)
- self.log.info("Prepared subset: {}".format(subset_name))
+ self.log.debug("Prepared subset: {}".format(subset_name))
return subset_doc
def prepare_version(self, instance, op_session, subset_doc, project_name):
@@ -521,7 +521,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
project_name, version_doc["type"], version_doc
)
- self.log.info("Prepared version: v{0:03d}".format(version_doc["name"]))
+ self.log.debug(
+ "Prepared version: v{0:03d}".format(version_doc["name"])
+ )
return version_doc
diff --git a/openpype/plugins/publish/integrate_legacy.py b/openpype/plugins/publish/integrate_legacy.py
index c67ce62bf6..c238cca633 100644
--- a/openpype/plugins/publish/integrate_legacy.py
+++ b/openpype/plugins/publish/integrate_legacy.py
@@ -147,7 +147,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def process(self, instance):
if instance.data.get("processedWithNewIntegrator"):
- self.log.info("Instance was already processed with new integrator")
+ self.log.debug(
+ "Instance was already processed with new integrator"
+ )
return
for ef in self.exclude_families:
@@ -274,7 +276,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
stagingdir = instance.data.get("stagingDir")
if not stagingdir:
- self.log.info((
+ self.log.debug((
"{0} is missing reference to staging directory."
" Will try to get it from representation."
).format(instance))
diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py
index 16cc47d432..2e87d8fc86 100644
--- a/openpype/plugins/publish/integrate_thumbnail.py
+++ b/openpype/plugins/publish/integrate_thumbnail.py
@@ -20,6 +20,7 @@ import pyblish.api
from openpype.client import get_versions
from openpype.client.operations import OperationsSession, new_thumbnail_doc
+from openpype.pipeline.publish import get_publish_instance_label
InstanceFilterResult = collections.namedtuple(
"InstanceFilterResult",
@@ -41,7 +42,7 @@ class IntegrateThumbnails(pyblish.api.ContextPlugin):
# Filter instances which can be used for integration
filtered_instance_items = self._prepare_instances(context)
if not filtered_instance_items:
- self.log.info(
+ self.log.debug(
"All instances were filtered. Thumbnail integration skipped."
)
return
@@ -133,7 +134,7 @@ class IntegrateThumbnails(pyblish.api.ContextPlugin):
filtered_instances = []
for instance in context:
- instance_label = self._get_instance_label(instance)
+ instance_label = get_publish_instance_label(instance)
# Skip instances without published representations
# - there is no place where to put the thumbnail
published_repres = instance.data.get("published_representations")
@@ -162,7 +163,7 @@ class IntegrateThumbnails(pyblish.api.ContextPlugin):
# Skip instance if thumbnail path is not available for it
if not thumbnail_path:
- self.log.info((
+ self.log.debug((
"Skipping thumbnail integration for instance \"{}\"."
" Instance and context"
" thumbnail paths are not available."
@@ -248,7 +249,7 @@ class IntegrateThumbnails(pyblish.api.ContextPlugin):
for instance_item in filtered_instance_items:
instance, thumbnail_path, version_id = instance_item
- instance_label = self._get_instance_label(instance)
+ instance_label = get_publish_instance_label(instance)
version_doc = version_docs_by_str_id.get(version_id)
if not version_doc:
self.log.warning((
@@ -339,10 +340,3 @@ class IntegrateThumbnails(pyblish.api.ContextPlugin):
))
op_session.commit()
-
- def _get_instance_label(self, instance):
- return (
- instance.data.get("label")
- or instance.data.get("name")
- or "N/A"
- )
diff --git a/openpype/settings/defaults/project_settings/blender.json b/openpype/settings/defaults/project_settings/blender.json
index 20eec0c09d..41aebfa537 100644
--- a/openpype/settings/defaults/project_settings/blender.json
+++ b/openpype/settings/defaults/project_settings/blender.json
@@ -1,4 +1,9 @@
{
+ "unit_scale_settings": {
+ "enabled": true,
+ "apply_on_opening": false,
+ "base_file_unit_scale": 0.01
+ },
"imageio": {
"ocio_config": {
"enabled": false,
diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json
index 75f335f1de..002e547feb 100644
--- a/openpype/settings/defaults/project_settings/global.json
+++ b/openpype/settings/defaults/project_settings/global.json
@@ -46,6 +46,10 @@
"enabled": false,
"families": []
},
+ "CollectFramesFixDef": {
+ "enabled": true,
+ "rewrite_version_enable": true
+ },
"ValidateEditorialAssetName": {
"enabled": true,
"optional": false
@@ -252,7 +256,9 @@
}
},
{
- "families": ["review"],
+ "families": [
+ "review"
+ ],
"hosts": [
"maya",
"houdini"
diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json
index 85dee73176..3f8be4c872 100644
--- a/openpype/settings/defaults/project_settings/nuke.json
+++ b/openpype/settings/defaults/project_settings/nuke.json
@@ -222,6 +222,13 @@
"title": "OpenPype Docs",
"command": "import webbrowser;webbrowser.open(url='https://openpype.io/docs/artist_hosts_nuke_tut')",
"tooltip": "Open the OpenPype Nuke user doc page"
+ },
+ {
+ "type": "action",
+ "sourcetype": "python",
+ "title": "Set Frame Start (Read Node)",
+ "command": "from openpype.hosts.nuke.startup.frame_setting_for_read_nodes import main;main();",
+ "tooltip": "Set frame start for read node(s)"
}
]
},
@@ -358,12 +365,12 @@
"optional": true,
"active": true
},
- "ValidateGizmo": {
+ "ValidateBackdrop": {
"enabled": true,
"optional": true,
"active": true
},
- "ValidateBackdrop": {
+ "ValidateGizmo": {
"enabled": true,
"optional": true,
"active": true
@@ -401,7 +408,39 @@
false
]
]
- }
+ },
+ "reposition_nodes": [
+ {
+ "node_class": "Reformat",
+ "knobs": [
+ {
+ "type": "text",
+ "name": "type",
+ "value": "to format"
+ },
+ {
+ "type": "text",
+ "name": "format",
+ "value": "HD_1080"
+ },
+ {
+ "type": "text",
+ "name": "filter",
+ "value": "Lanczos6"
+ },
+ {
+ "type": "bool",
+ "name": "black_outside",
+ "value": true
+ },
+ {
+ "type": "bool",
+ "name": "pbb",
+ "value": false
+ }
+ ]
+ }
+ ]
},
"ExtractReviewData": {
"enabled": false
diff --git a/openpype/settings/defaults/project_settings/resolve.json b/openpype/settings/defaults/project_settings/resolve.json
index 264f3bd902..56efa78e89 100644
--- a/openpype/settings/defaults/project_settings/resolve.json
+++ b/openpype/settings/defaults/project_settings/resolve.json
@@ -1,4 +1,5 @@
{
+ "launch_openpype_menu_on_start": false,
"imageio": {
"ocio_config": {
"enabled": false,
diff --git a/openpype/settings/defaults/project_settings/unreal.json b/openpype/settings/defaults/project_settings/unreal.json
index 737a17d289..92bdb468ba 100644
--- a/openpype/settings/defaults/project_settings/unreal.json
+++ b/openpype/settings/defaults/project_settings/unreal.json
@@ -15,6 +15,6 @@
"preroll_frames": 0,
"render_format": "png",
"project_setup": {
- "dev_mode": true
+ "dev_mode": false
}
}
diff --git a/openpype/settings/defaults/system_settings/applications.json b/openpype/settings/defaults/system_settings/applications.json
index b492bb9321..f2fc7d933a 100644
--- a/openpype/settings/defaults/system_settings/applications.json
+++ b/openpype/settings/defaults/system_settings/applications.json
@@ -1069,8 +1069,8 @@
"RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [],
"RESOLVE_PYTHON3_HOME": {
"windows": "{LOCALAPPDATA}/Programs/Python/Python36",
- "darwin": "~/Library/Python/3.6/bin",
- "linux": "/opt/Python/3.6/bin"
+ "darwin": "/Library/Frameworks/Python.framework/Versions/3.6",
+ "linux": "/opt/Python/3.6"
}
},
"variants": {
diff --git a/openpype/settings/entities/lib.py b/openpype/settings/entities/lib.py
index 1c7dc9bed0..93abc27b0e 100644
--- a/openpype/settings/entities/lib.py
+++ b/openpype/settings/entities/lib.py
@@ -323,7 +323,10 @@ class SchemasHub:
filled_template = self._fill_template(
schema_data, template_def
)
- return filled_template
+ new_template_def = []
+ for item in filled_template:
+ new_template_def.extend(self.resolve_schema_data(item))
+ return new_template_def
def create_schema_object(self, schema_data, *args, **kwargs):
"""Create entity for passed schema data.
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json b/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json
index 725d9bfb08..5b40169872 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json
@@ -5,6 +5,32 @@
"label": "Blender",
"is_file": true,
"children": [
+ {
+ "key": "unit_scale_settings",
+ "type": "dict",
+ "label": "Set Unit Scale",
+ "collapsible": true,
+ "is_group": true,
+ "checkbox_key": "enabled",
+ "children": [
+ {
+ "type": "boolean",
+ "key": "enabled",
+ "label": "Enabled"
+ },
+ {
+ "key": "apply_on_opening",
+ "type": "boolean",
+ "label": "Apply on Opening Existing Files"
+ },
+ {
+ "key": "base_file_unit_scale",
+ "type": "number",
+ "label": "Base File Unit Scale",
+ "decimal": 10
+ }
+ ]
+ },
{
"key": "imageio",
"type": "dict",
diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json b/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json
index b326f22394..6f98bdd3bd 100644
--- a/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json
+++ b/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json
@@ -5,6 +5,11 @@
"label": "DaVinci Resolve",
"is_file": true,
"children": [
+ {
+ "type": "boolean",
+ "key": "launch_openpype_menu_on_start",
+ "label": "Launch OpenPype menu on start of Resolve"
+ },
{
"key": "imageio",
"type": "dict",
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json
index a7617918a3..3164cfb62d 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json
@@ -81,6 +81,26 @@
}
]
},
+ {
+ "type": "dict",
+ "collapsible": true,
+ "checkbox_key": "enabled",
+ "key": "CollectFramesFixDef",
+ "label": "Collect Frames to Fix",
+ "is_group": true,
+ "children": [
+ {
+ "type": "boolean",
+ "key": "enabled",
+ "label": "Enabled"
+ },
+ {
+ "type": "boolean",
+ "key": "rewrite_version_enable",
+ "label": "Show 'Rewrite latest version' toggle"
+ }
+ ]
+ },
{
"type": "dict",
"collapsible": true,
diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json
index ce9fa04c6a..3019c9b1b5 100644
--- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json
+++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json
@@ -158,10 +158,43 @@
"label": "Nodes",
"collapsible": true,
"children": [
+ {
+ "type": "label",
+ "label": "Nodes attribute will be deprecated in future releases. Use reposition_nodes instead."
+ },
{
"type": "raw-json",
"key": "nodes",
- "label": "Nodes"
+ "label": "Nodes [depricated]"
+ },
+ {
+ "type": "label",
+ "label": "Reposition knobs supported only. You can add multiple reformat nodes
and set their knobs. Order of reformat nodes is important. First reformat node
will be applied first and last reformat node will be applied last."
+ },
+ {
+ "key": "reposition_nodes",
+ "type": "list",
+ "label": "Reposition nodes",
+ "object_type": {
+ "type": "dict",
+ "children": [
+ {
+ "key": "node_class",
+ "label": "Node class",
+ "type": "text"
+ },
+ {
+ "type": "schema_template",
+ "name": "template_nuke_knob_inputs",
+ "template_data": [
+ {
+ "label": "Node knobs",
+ "key": "knobs"
+ }
+ ]
+ }
+ ]
+ }
}
]
}
diff --git a/openpype/style/data.json b/openpype/style/data.json
index bea2a3d407..7389387d97 100644
--- a/openpype/style/data.json
+++ b/openpype/style/data.json
@@ -26,8 +26,8 @@
"bg": "#2C313A",
"bg-inputs": "#21252B",
- "bg-buttons": "#434a56",
- "bg-button-hover": "rgb(81, 86, 97)",
+ "bg-buttons": "rgb(67, 74, 86)",
+ "bg-buttons-hover": "rgb(81, 86, 97)",
"bg-inputs-disabled": "#2C313A",
"bg-buttons-disabled": "#434a56",
@@ -66,7 +66,9 @@
"bg-success": "#458056",
"bg-success-hover": "#55a066",
"bg-error": "#AD2E2E",
- "bg-error-hover": "#C93636"
+ "bg-error-hover": "#C93636",
+ "bg-info": "rgb(63, 98, 121)",
+ "bg-info-hover": "rgb(81, 146, 181)"
},
"tab-widget": {
"bg": "#21252B",
@@ -94,6 +96,7 @@
"crash": "#FF6432",
"success": "#458056",
"warning": "#ffc671",
+ "progress": "rgb(194, 226, 236)",
"tab-bg": "#16191d",
"list-view-group": {
"bg": "#434a56",
diff --git a/openpype/style/style.css b/openpype/style/style.css
index 827b103f94..5ce55aa658 100644
--- a/openpype/style/style.css
+++ b/openpype/style/style.css
@@ -136,7 +136,7 @@ QPushButton {
}
QPushButton:hover {
- background: {color:bg-button-hover};
+ background: {color:bg-buttons-hover};
color: {color:font-hover};
}
@@ -166,7 +166,7 @@ QToolButton {
}
QToolButton:hover {
- background: {color:bg-button-hover};
+ background: {color:bg-buttons-hover};
color: {color:font-hover};
}
@@ -722,6 +722,13 @@ OverlayMessageWidget[type="error"]:hover {
background: {color:overlay-messages:bg-error-hover};
}
+OverlayMessageWidget[type="info"] {
+ background: {color:overlay-messages:bg-info};
+}
+OverlayMessageWidget[type="info"]:hover {
+ background: {color:overlay-messages:bg-info-hover};
+}
+
OverlayMessageWidget QWidget {
background: transparent;
}
@@ -749,10 +756,11 @@ OverlayMessageWidget QWidget {
}
#InfoText {
- padding-left: 30px;
- padding-top: 20px;
+ padding-left: 0px;
+ padding-top: 0px;
+ padding-right: 20px;
background: transparent;
- border: 1px solid {color:border};
+ border: none;
}
#TypeEditor, #ToolEditor, #NameEditor, #NumberEditor {
@@ -914,7 +922,7 @@ PixmapButton{
background: {color:bg-buttons};
}
PixmapButton:hover {
- background: {color:bg-button-hover};
+ background: {color:bg-buttons-hover};
}
PixmapButton:disabled {
background: {color:bg-buttons-disabled};
@@ -925,7 +933,7 @@ PixmapButton:disabled {
background: {color:bg-view};
}
#ThumbnailPixmapHoverButton:hover {
- background: {color:bg-button-hover};
+ background: {color:bg-buttons-hover};
}
#CreatorDetailedDescription {
@@ -946,7 +954,7 @@ PixmapButton:disabled {
}
#CreateDialogHelpButton:hover {
- background: {color:bg-button-hover};
+ background: {color:bg-buttons-hover};
}
#CreateDialogHelpButton QWidget {
background: transparent;
@@ -1005,7 +1013,7 @@ PixmapButton:disabled {
border-radius: 0.2em;
}
#CardViewWidget:hover {
- background: {color:bg-button-hover};
+ background: {color:bg-buttons-hover};
}
#CardViewWidget[state="selected"] {
background: {color:bg-view-selection};
@@ -1032,7 +1040,7 @@ PixmapButton:disabled {
}
#PublishInfoFrame[state="3"], #PublishInfoFrame[state="4"] {
- background: rgb(194, 226, 236);
+ background: {color:publisher:progress};
}
#PublishInfoFrame QLabel {
@@ -1040,6 +1048,11 @@ PixmapButton:disabled {
font-style: bold;
}
+#PublishReportHeader {
+ font-size: 14pt;
+ font-weight: bold;
+}
+
#PublishInfoMainLabel {
font-size: 12pt;
}
@@ -1060,7 +1073,7 @@ ValidationArtistMessage QLabel {
}
#ValidationActionButton:hover {
- background: {color:bg-button-hover};
+ background: {color:bg-buttons-hover};
color: {color:font-hover};
}
@@ -1090,6 +1103,35 @@ ValidationArtistMessage QLabel {
border-left: 1px solid {color:border};
}
+#PublishInstancesDetails {
+ border: 1px solid {color:border};
+ border-radius: 0.3em;
+}
+
+#InstancesLogsView {
+ border: 1px solid {color:border};
+ background: {color:bg-view};
+ border-radius: 0.3em;
+}
+
+#PublishLogMessage {
+ font-family: "Noto Sans Mono";
+}
+
+#PublishInstanceLogsLabel {
+ font-weight: bold;
+}
+
+#PublishCrashMainLabel{
+ font-weight: bold;
+ font-size: 16pt;
+}
+
+#PublishCrashReportLabel {
+ font-weight: bold;
+ font-size: 13pt;
+}
+
#AssetNameInputWidget {
background: {color:bg-inputs};
border: 1px solid {color:border};
diff --git a/openpype/tools/attribute_defs/files_widget.py b/openpype/tools/attribute_defs/files_widget.py
index 067866035f..076b33fb7c 100644
--- a/openpype/tools/attribute_defs/files_widget.py
+++ b/openpype/tools/attribute_defs/files_widget.py
@@ -198,29 +198,33 @@ class DropEmpty(QtWidgets.QWidget):
def paintEvent(self, event):
super(DropEmpty, self).paintEvent(event)
- painter = QtGui.QPainter(self)
+
pen = QtGui.QPen()
- pen.setWidth(1)
pen.setBrush(QtCore.Qt.darkGray)
pen.setStyle(QtCore.Qt.DashLine)
- painter.setPen(pen)
- content_margins = self.layout().contentsMargins()
+ pen.setWidth(1)
- left_m = content_margins.left()
- top_m = content_margins.top()
- rect = QtCore.QRect(
+ content_margins = self.layout().contentsMargins()
+ rect = self.rect()
+ left_m = content_margins.left() + pen.width()
+ top_m = content_margins.top() + pen.width()
+ new_rect = QtCore.QRect(
left_m,
top_m,
(
- self.rect().width()
+ rect.width()
- (left_m + content_margins.right() + pen.width())
),
(
- self.rect().height()
+ rect.height()
- (top_m + content_margins.bottom() + pen.width())
)
)
- painter.drawRect(rect)
+
+ painter = QtGui.QPainter(self)
+ painter.setRenderHint(QtGui.QPainter.Antialiasing)
+ painter.setPen(pen)
+ painter.drawRect(new_rect)
class FilesModel(QtGui.QStandardItemModel):
diff --git a/openpype/tools/publisher/constants.py b/openpype/tools/publisher/constants.py
index 660fccecf1..4630eb144b 100644
--- a/openpype/tools/publisher/constants.py
+++ b/openpype/tools/publisher/constants.py
@@ -35,9 +35,13 @@ ResetKeySequence = QtGui.QKeySequence(
__all__ = (
"CONTEXT_ID",
+ "CONTEXT_LABEL",
"VARIANT_TOOLTIP",
+ "INPUTS_LAYOUT_HSPACING",
+ "INPUTS_LAYOUT_VSPACING",
+
"INSTANCE_ID_ROLE",
"SORT_VALUE_ROLE",
"IS_GROUP_ROLE",
@@ -47,4 +51,6 @@ __all__ = (
"FAMILY_ROLE",
"GROUP_ROLE",
"CONVERTER_IDENTIFIER_ROLE",
+
+ "ResetKeySequence",
)
diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py
index 4b083d4bc8..89c2343ef7 100644
--- a/openpype/tools/publisher/control.py
+++ b/openpype/tools/publisher/control.py
@@ -40,6 +40,7 @@ from openpype.pipeline.create.context import (
CreatorsOperationFailed,
ConvertorsOperationFailed,
)
+from openpype.pipeline.publish import get_publish_instance_label
# Define constant for plugin orders offset
PLUGIN_ORDER_OFFSET = 0.5
@@ -47,6 +48,7 @@ PLUGIN_ORDER_OFFSET = 0.5
class CardMessageTypes:
standard = None
+ info = "info"
error = "error"
@@ -220,7 +222,12 @@ class PublishReportMaker:
def _add_plugin_data_item(self, plugin):
if plugin in self._stored_plugins:
- raise ValueError("Plugin is already stored")
+ # A plugin would be processed more than once. What can cause it:
+ # - there is a bug in controller
+ # - plugin class is imported into multiple files
+ # - this can happen even with base classes from 'pyblish'
+ raise ValueError(
+ "Plugin '{}' is already stored".format(str(plugin)))
self._stored_plugins.append(plugin)
@@ -239,6 +246,7 @@ class PublishReportMaker:
label = plugin.label
return {
+ "id": plugin.id,
"name": plugin.__name__,
"label": label,
"order": plugin.order,
@@ -324,7 +332,7 @@ class PublishReportMaker:
"instances": instances_details,
"context": self._extract_context_data(self._current_context),
"crashed_file_paths": crashed_file_paths,
- "id": str(uuid.uuid4()),
+ "id": uuid.uuid4().hex,
"report_version": "1.0.0"
}
@@ -339,10 +347,12 @@ class PublishReportMaker:
def _extract_instance_data(self, instance, exists):
return {
"name": instance.data.get("name"),
- "label": instance.data.get("label"),
+ "label": get_publish_instance_label(instance),
"family": instance.data["family"],
"families": instance.data.get("families") or [],
- "exists": exists
+ "exists": exists,
+ "creator_identifier": instance.data.get("creator_identifier"),
+ "instance_id": instance.data.get("instance_id"),
}
def _extract_instance_log_items(self, result):
@@ -388,8 +398,11 @@ class PublishReportMaker:
exception = result.get("error")
if exception:
fname, line_no, func, exc = exception.traceback
+ # Action result does not have 'is_validation_error'
+ is_validation_error = result.get("is_validation_error", False)
output.append({
"type": "error",
+ "is_validation_error": is_validation_error,
"msg": str(exception),
"filename": str(fname),
"lineno": str(line_no),
@@ -426,13 +439,15 @@ class PublishPluginsProxy:
plugin_id = plugin.id
plugins_by_id[plugin_id] = plugin
- action_ids = set()
+ action_ids = []
action_ids_by_plugin_id[plugin_id] = action_ids
actions = getattr(plugin, "actions", None) or []
for action in actions:
action_id = action.id
- action_ids.add(action_id)
+ if action_id in actions_by_id:
+ continue
+ action_ids.append(action_id)
actions_by_id[action_id] = action
self._plugins_by_id = plugins_by_id
@@ -461,7 +476,7 @@ class PublishPluginsProxy:
return plugin.id
def get_plugin_action_items(self, plugin_id):
- """Get plugin action items for plugin by it's id.
+ """Get plugin action items for plugin by its id.
Args:
plugin_id (str): Publish plugin id.
@@ -568,7 +583,7 @@ class ValidationErrorItem:
context_validation,
title,
description,
- detail,
+ detail
):
self.instance_id = instance_id
self.instance_label = instance_label
@@ -677,6 +692,8 @@ class PublishValidationErrorsReport:
for title in titles:
grouped_error_items.append({
+ "id": uuid.uuid4().hex,
+ "plugin_id": plugin_id,
"plugin_action_items": list(plugin_action_items),
"error_items": error_items_by_title[title],
"title": title
@@ -2379,7 +2396,8 @@ class PublisherController(BasePublisherController):
yield MainThreadItem(self.stop_publish)
# Add plugin to publish report
- self._publish_report.add_plugin_iter(plugin, self._publish_context)
+ self._publish_report.add_plugin_iter(
+ plugin, self._publish_context)
# WARNING This is hack fix for optional plugins
if not self._is_publish_plugin_active(plugin):
@@ -2461,14 +2479,14 @@ class PublisherController(BasePublisherController):
plugin, self._publish_context, instance
)
- self._publish_report.add_result(result)
-
exception = result.get("error")
if exception:
+ has_validation_error = False
if (
isinstance(exception, PublishValidationError)
and not self.publish_has_validated
):
+ has_validation_error = True
self._add_validation_error(result)
else:
@@ -2482,6 +2500,10 @@ class PublisherController(BasePublisherController):
self.publish_error_msg = msg
self.publish_has_crashed = True
+ result["is_validation_error"] = has_validation_error
+
+ self._publish_report.add_result(result)
+
self._publish_next_process()
diff --git a/openpype/tools/publisher/publish_report_viewer/widgets.py b/openpype/tools/publisher/publish_report_viewer/widgets.py
index dc449b6b69..02c9b63a4e 100644
--- a/openpype/tools/publisher/publish_report_viewer/widgets.py
+++ b/openpype/tools/publisher/publish_report_viewer/widgets.py
@@ -163,7 +163,11 @@ class ZoomPlainText(QtWidgets.QPlainTextEdit):
super(ZoomPlainText, self).wheelEvent(event)
return
- degrees = float(event.delta()) / 8
+ if hasattr(event, "angleDelta"):
+ delta = event.angleDelta().y()
+ else:
+ delta = event.delta()
+ degrees = float(delta) / 8
steps = int(ceil(degrees / 5))
self._scheduled_scalings += steps
if (self._scheduled_scalings * steps < 0):
diff --git a/openpype/tools/publisher/widgets/__init__.py b/openpype/tools/publisher/widgets/__init__.py
index f18e6cc61e..87a5f3914a 100644
--- a/openpype/tools/publisher/widgets/__init__.py
+++ b/openpype/tools/publisher/widgets/__init__.py
@@ -18,7 +18,7 @@ from .help_widget import (
from .publish_frame import PublishFrame
from .tabs_widget import PublisherTabsWidget
from .overview_widget import OverviewWidget
-from .validations_widget import ValidationsWidget
+from .report_page import ReportPageWidget
__all__ = (
@@ -40,5 +40,5 @@ __all__ = (
"PublisherTabsWidget",
"OverviewWidget",
- "ValidationsWidget",
+ "ReportPageWidget",
)
diff --git a/openpype/tools/publisher/widgets/border_label_widget.py b/openpype/tools/publisher/widgets/border_label_widget.py
index 5617e159cd..e5693368b1 100644
--- a/openpype/tools/publisher/widgets/border_label_widget.py
+++ b/openpype/tools/publisher/widgets/border_label_widget.py
@@ -14,32 +14,44 @@ class _VLineWidget(QtWidgets.QWidget):
It is expected that parent widget will set width.
"""
- def __init__(self, color, left, parent):
+ def __init__(self, color, line_size, left, parent):
super(_VLineWidget, self).__init__(parent)
self._color = color
self._left = left
+ self._line_size = line_size
+
+ def set_line_size(self, line_size):
+ self._line_size = line_size
def paintEvent(self, event):
if not self.isVisible():
return
- if self._left:
- pos_x = 0
- else:
- pos_x = self.width()
+ pos_x = self._line_size * 0.5
+ if not self._left:
+ pos_x = self.width() - pos_x
+
painter = QtGui.QPainter(self)
painter.setRenderHints(
QtGui.QPainter.Antialiasing
| QtGui.QPainter.SmoothPixmapTransform
)
+
if self._color:
pen = QtGui.QPen(self._color)
else:
pen = painter.pen()
- pen.setWidth(1)
+ pen.setWidth(self._line_size)
painter.setPen(pen)
painter.setBrush(QtCore.Qt.transparent)
- painter.drawLine(pos_x, 0, pos_x, self.height())
+ painter.drawRect(
+ QtCore.QRectF(
+ pos_x,
+ -self._line_size,
+ pos_x + (self.width() * 2),
+ self.height() + (self._line_size * 2)
+ )
+ )
painter.end()
@@ -56,34 +68,46 @@ class _HBottomLineWidget(QtWidgets.QWidget):
It is expected that parent widget will set height and radius.
"""
- def __init__(self, color, parent):
+ def __init__(self, color, line_size, parent):
super(_HBottomLineWidget, self).__init__(parent)
self._color = color
self._radius = 0
+ self._line_size = line_size
def set_radius(self, radius):
self._radius = radius
+ def set_line_size(self, line_size):
+ self._line_size = line_size
+
def paintEvent(self, event):
if not self.isVisible():
return
- rect = QtCore.QRect(
- 0, -self._radius, self.width(), self.height() + self._radius
+ x_offset = self._line_size * 0.5
+ rect = QtCore.QRectF(
+ x_offset,
+ -self._radius,
+ self.width() - (2 * x_offset),
+ (self.height() + self._radius) - x_offset
)
painter = QtGui.QPainter(self)
painter.setRenderHints(
QtGui.QPainter.Antialiasing
| QtGui.QPainter.SmoothPixmapTransform
)
+
if self._color:
pen = QtGui.QPen(self._color)
else:
pen = painter.pen()
- pen.setWidth(1)
+ pen.setWidth(self._line_size)
painter.setPen(pen)
painter.setBrush(QtCore.Qt.transparent)
- painter.drawRoundedRect(rect, self._radius, self._radius)
+ if self._radius:
+ painter.drawRoundedRect(rect, self._radius, self._radius)
+ else:
+ painter.drawRect(rect)
painter.end()
@@ -102,30 +126,38 @@ class _HTopCornerLineWidget(QtWidgets.QWidget):
It is expected that parent widget will set height and radius.
"""
- def __init__(self, color, left_side, parent):
+
+ def __init__(self, color, line_size, left_side, parent):
super(_HTopCornerLineWidget, self).__init__(parent)
self._left_side = left_side
+ self._line_size = line_size
self._color = color
self._radius = 0
def set_radius(self, radius):
self._radius = radius
+ def set_line_size(self, line_size):
+ self._line_size = line_size
+
def paintEvent(self, event):
if not self.isVisible():
return
- pos_y = self.height() / 2
-
+ pos_y = self.height() * 0.5
+ x_offset = self._line_size * 0.5
if self._left_side:
- rect = QtCore.QRect(
- 0, pos_y, self.width() + self._radius, self.height()
+ rect = QtCore.QRectF(
+ x_offset,
+ pos_y,
+ self.width() + self._radius + x_offset,
+ self.height()
)
else:
- rect = QtCore.QRect(
- -self._radius,
+ rect = QtCore.QRectF(
+ (-self._radius),
pos_y,
- self.width() + self._radius,
+ (self.width() + self._radius) - x_offset,
self.height()
)
@@ -138,10 +170,13 @@ class _HTopCornerLineWidget(QtWidgets.QWidget):
pen = QtGui.QPen(self._color)
else:
pen = painter.pen()
- pen.setWidth(1)
+ pen.setWidth(self._line_size)
painter.setPen(pen)
painter.setBrush(QtCore.Qt.transparent)
- painter.drawRoundedRect(rect, self._radius, self._radius)
+ if self._radius:
+ painter.drawRoundedRect(rect, self._radius, self._radius)
+ else:
+ painter.drawRect(rect)
painter.end()
@@ -163,8 +198,10 @@ class BorderedLabelWidget(QtWidgets.QFrame):
if color_value:
color = color_value.get_qcolor()
- top_left_w = _HTopCornerLineWidget(color, True, self)
- top_right_w = _HTopCornerLineWidget(color, False, self)
+ line_size = 1
+
+ top_left_w = _HTopCornerLineWidget(color, line_size, True, self)
+ top_right_w = _HTopCornerLineWidget(color, line_size, False, self)
label_widget = QtWidgets.QLabel(label, self)
@@ -175,10 +212,10 @@ class BorderedLabelWidget(QtWidgets.QFrame):
top_layout.addWidget(label_widget, 0)
top_layout.addWidget(top_right_w, 1)
- left_w = _VLineWidget(color, True, self)
- right_w = _VLineWidget(color, False, self)
+ left_w = _VLineWidget(color, line_size, True, self)
+ right_w = _VLineWidget(color, line_size, False, self)
- bottom_w = _HBottomLineWidget(color, self)
+ bottom_w = _HBottomLineWidget(color, line_size, self)
center_layout = QtWidgets.QHBoxLayout()
center_layout.setContentsMargins(5, 5, 5, 5)
@@ -201,6 +238,7 @@ class BorderedLabelWidget(QtWidgets.QFrame):
self._widget = None
self._radius = 0
+ self._line_size = line_size
self._top_left_w = top_left_w
self._top_right_w = top_right_w
@@ -216,14 +254,38 @@ class BorderedLabelWidget(QtWidgets.QFrame):
value, value, value, value
)
+ def set_line_size(self, line_size):
+ if self._line_size == line_size:
+ return
+ self._line_size = line_size
+ for widget in (
+ self._top_left_w,
+ self._top_right_w,
+ self._left_w,
+ self._right_w,
+ self._bottom_w
+ ):
+ widget.set_line_size(line_size)
+ self._recalculate_sizes()
+
def showEvent(self, event):
super(BorderedLabelWidget, self).showEvent(event)
+ self._recalculate_sizes()
+ def _recalculate_sizes(self):
height = self._label_widget.height()
- radius = (height + (height % 2)) / 2
+ radius = int((height + (height % 2)) / 2)
self._radius = radius
- side_width = 1 + radius
+ radius_size = self._line_size + 1
+ if radius_size < radius:
+ radius_size = radius
+
+ if radius:
+ side_width = self._line_size + radius
+ else:
+ side_width = self._line_size + 1
+
# Don't use fixed width/height as that would set also set
# the other size (When fixed width is set then is also set
# fixed height).
@@ -231,8 +293,8 @@ class BorderedLabelWidget(QtWidgets.QFrame):
self._left_w.setMaximumWidth(side_width)
self._right_w.setMinimumWidth(side_width)
self._right_w.setMaximumWidth(side_width)
- self._bottom_w.setMinimumHeight(radius)
- self._bottom_w.setMaximumHeight(radius)
+ self._bottom_w.setMinimumHeight(radius_size)
+ self._bottom_w.setMaximumHeight(radius_size)
self._bottom_w.set_radius(radius)
self._top_right_w.set_radius(radius)
self._top_left_w.set_radius(radius)
diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/openpype/tools/publisher/widgets/card_view_widgets.py
index 13715bc73c..eae8e0420a 100644
--- a/openpype/tools/publisher/widgets/card_view_widgets.py
+++ b/openpype/tools/publisher/widgets/card_view_widgets.py
@@ -93,7 +93,7 @@ class BaseGroupWidget(QtWidgets.QWidget):
return self._group
def get_widget_by_item_id(self, item_id):
- """Get instance widget by it's id."""
+ """Get instance widget by its id."""
return self._widgets_by_id.get(item_id)
@@ -702,8 +702,8 @@ class InstanceCardView(AbstractInstanceView):
for group_name in sorted_group_names:
group_icons = {
- idenfier: self._controller.get_creator_icon(idenfier)
- for idenfier in identifiers_by_group[group_name]
+ identifier: self._controller.get_creator_icon(identifier)
+ for identifier in identifiers_by_group[group_name]
}
if group_name in self._widgets_by_group:
group_widget = self._widgets_by_group[group_name]
diff --git a/openpype/tools/publisher/widgets/images/error.png b/openpype/tools/publisher/widgets/images/error.png
new file mode 100644
index 0000000000..7b09a57d7d
Binary files /dev/null and b/openpype/tools/publisher/widgets/images/error.png differ
diff --git a/openpype/tools/publisher/widgets/images/success.png b/openpype/tools/publisher/widgets/images/success.png
new file mode 100644
index 0000000000..291b442df4
Binary files /dev/null and b/openpype/tools/publisher/widgets/images/success.png differ
diff --git a/openpype/tools/publisher/widgets/images/warning.png b/openpype/tools/publisher/widgets/images/warning.png
index 76d1e34b6c..531f62b741 100644
Binary files a/openpype/tools/publisher/widgets/images/warning.png and b/openpype/tools/publisher/widgets/images/warning.png differ
diff --git a/openpype/tools/publisher/widgets/publish_frame.py b/openpype/tools/publisher/widgets/publish_frame.py
index e4e6740532..d423f97047 100644
--- a/openpype/tools/publisher/widgets/publish_frame.py
+++ b/openpype/tools/publisher/widgets/publish_frame.py
@@ -310,7 +310,7 @@ class PublishFrame(QtWidgets.QWidget):
self._set_success_property()
self._set_progress_visibility(True)
- self._main_label.setText("Hit publish (play button)! If you want")
+ self._main_label.setText("")
self._message_label_top.setText("")
self._reset_btn.setEnabled(True)
@@ -331,6 +331,7 @@ class PublishFrame(QtWidgets.QWidget):
self._set_success_property(3)
self._set_progress_visibility(True)
self._set_main_label("Publishing...")
+ self._message_label_top.setText("")
self._reset_btn.setEnabled(False)
self._stop_btn.setEnabled(True)
@@ -468,45 +469,14 @@ class PublishFrame(QtWidgets.QWidget):
widget.setProperty("state", state)
widget.style().polish(widget)
- def _copy_report(self):
- logs = self._controller.get_publish_report()
- logs_string = json.dumps(logs, indent=4)
-
- mime_data = QtCore.QMimeData()
- mime_data.setText(logs_string)
- QtWidgets.QApplication.instance().clipboard().setMimeData(
- mime_data
- )
-
- def _export_report(self):
- default_filename = "publish-report-{}".format(
- time.strftime("%y%m%d-%H-%M")
- )
- default_filepath = os.path.join(
- os.path.expanduser("~"),
- default_filename
- )
- new_filepath, ext = QtWidgets.QFileDialog.getSaveFileName(
- self, "Save report", default_filepath, ".json"
- )
- if not ext or not new_filepath:
- return
-
- logs = self._controller.get_publish_report()
- full_path = new_filepath + ext
- dir_path = os.path.dirname(full_path)
- if not os.path.exists(dir_path):
- os.makedirs(dir_path)
-
- with open(full_path, "w") as file_stream:
- json.dump(logs, file_stream)
-
def _on_report_triggered(self, identifier):
if identifier == "export_report":
- self._export_report()
+ self._controller.event_system.emit(
+ "export_report.request", {}, "publish_frame")
elif identifier == "copy_report":
- self._copy_report()
+ self._controller.event_system.emit(
+ "copy_report.request", {}, "publish_frame")
elif identifier == "go_to_report":
self.details_page_requested.emit()
diff --git a/openpype/tools/publisher/widgets/report_page.py b/openpype/tools/publisher/widgets/report_page.py
new file mode 100644
index 0000000000..50a619f0a8
--- /dev/null
+++ b/openpype/tools/publisher/widgets/report_page.py
@@ -0,0 +1,1876 @@
+# -*- coding: utf-8 -*-
+import collections
+import logging
+
+try:
+ import commonmark
+except Exception:
+ commonmark = None
+
+from qtpy import QtWidgets, QtCore, QtGui
+
+from openpype.style import get_objected_colors
+from openpype.tools.utils import (
+ BaseClickableFrame,
+ ClickableFrame,
+ ExpandingTextEdit,
+ FlowLayout,
+ ClassicExpandBtn,
+ paint_image_with_color,
+ SeparatorWidget,
+)
+from .widgets import IconValuePixmapLabel
+from .icons import (
+ get_pixmap,
+ get_image,
+)
+from ..constants import (
+ INSTANCE_ID_ROLE,
+ CONTEXT_ID,
+ CONTEXT_LABEL,
+)
+
+LOG_DEBUG_VISIBLE = 1 << 0
+LOG_INFO_VISIBLE = 1 << 1
+LOG_WARNING_VISIBLE = 1 << 2
+LOG_ERROR_VISIBLE = 1 << 3
+LOG_CRITICAL_VISIBLE = 1 << 4
+ERROR_VISIBLE = 1 << 5
+INFO_VISIBLE = 1 << 6
+
+
+class VerticalScrollArea(QtWidgets.QScrollArea):
+ """Scroll area for validation error titles.
+
+ The biggest difference is that the scroll area has scroll bar on left side
+ and resize of content will also resize scrollarea itself.
+
+ Resize if deferred by 100ms because at the moment of resize are not yet
+ propagated sizes and visibility of scroll bars.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(VerticalScrollArea, self).__init__(*args, **kwargs)
+
+ self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+ self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
+ self.setLayoutDirection(QtCore.Qt.RightToLeft)
+
+ self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+ # Background of scrollbar will be transparent
+ scrollbar_bg = self.verticalScrollBar().parent()
+ if scrollbar_bg:
+ scrollbar_bg.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+ self.setViewportMargins(0, 0, 0, 0)
+
+ self.verticalScrollBar().installEventFilter(self)
+
+ # Timer with 100ms offset after changing size
+ size_changed_timer = QtCore.QTimer()
+ size_changed_timer.setInterval(100)
+ size_changed_timer.setSingleShot(True)
+
+ size_changed_timer.timeout.connect(self._on_timer_timeout)
+ self._size_changed_timer = size_changed_timer
+
+ def setVerticalScrollBar(self, widget):
+ old_widget = self.verticalScrollBar()
+ if old_widget:
+ old_widget.removeEventFilter(self)
+
+ super(VerticalScrollArea, self).setVerticalScrollBar(widget)
+ if widget:
+ widget.installEventFilter(self)
+
+ def setWidget(self, widget):
+ old_widget = self.widget()
+ if old_widget:
+ old_widget.removeEventFilter(self)
+
+ super(VerticalScrollArea, self).setWidget(widget)
+ if widget:
+ widget.installEventFilter(self)
+
+ def _on_timer_timeout(self):
+ width = self.widget().width()
+ if self.verticalScrollBar().isVisible():
+ width += self.verticalScrollBar().width()
+ self.setMinimumWidth(width)
+
+ def eventFilter(self, obj, event):
+ if (
+ event.type() == QtCore.QEvent.Resize
+ and (obj is self.widget() or obj is self.verticalScrollBar())
+ ):
+ self._size_changed_timer.start()
+ return super(VerticalScrollArea, self).eventFilter(obj, event)
+
+
+# --- Publish actions widget ---
+class ActionButton(BaseClickableFrame):
+ """Plugin's action callback button.
+
+ Action may have label or icon or both.
+
+ Args:
+ plugin_action_item (PublishPluginActionItem): Action item that can be
+ triggered by its id.
+ """
+
+ action_clicked = QtCore.Signal(str, str)
+
+ def __init__(self, plugin_action_item, parent):
+ super(ActionButton, self).__init__(parent)
+
+ self.setObjectName("ValidationActionButton")
+
+ self.plugin_action_item = plugin_action_item
+
+ action_label = plugin_action_item.label
+ action_icon = plugin_action_item.icon
+ label_widget = QtWidgets.QLabel(action_label, self)
+ icon_label = None
+ if action_icon:
+ icon_label = IconValuePixmapLabel(action_icon, self)
+
+ layout = QtWidgets.QHBoxLayout(self)
+ layout.setContentsMargins(5, 0, 5, 0)
+ layout.addWidget(label_widget, 1)
+ if icon_label:
+ layout.addWidget(icon_label, 0)
+
+ self.setSizePolicy(
+ QtWidgets.QSizePolicy.Minimum,
+ self.sizePolicy().verticalPolicy()
+ )
+
+ def _mouse_release_callback(self):
+ self.action_clicked.emit(
+ self.plugin_action_item.plugin_id,
+ self.plugin_action_item.action_id
+ )
+
+
+class ValidateActionsWidget(QtWidgets.QFrame):
+ """Wrapper widget for plugin actions.
+
+ Change actions based on selected validation error.
+ """
+
+ def __init__(self, controller, parent):
+ super(ValidateActionsWidget, self).__init__(parent)
+
+ self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+
+ content_widget = QtWidgets.QWidget(self)
+ content_layout = FlowLayout(content_widget)
+ content_layout.setContentsMargins(0, 0, 0, 0)
+
+ layout = QtWidgets.QHBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addWidget(content_widget)
+
+ self._controller = controller
+ self._content_widget = content_widget
+ self._content_layout = content_layout
+
+ self._actions_mapping = {}
+
+ self._visible_mode = True
+
+ def _update_visibility(self):
+ self.setVisible(
+ self._visible_mode
+ and self._content_layout.count() > 0
+ )
+
+ def set_visible_mode(self, visible):
+ if self._visible_mode is visible:
+ return
+ self._visible_mode = visible
+ self._update_visibility()
+
+ def _clear(self):
+ """Remove actions from widget."""
+ while self._content_layout.count():
+ item = self._content_layout.takeAt(0)
+ widget = item.widget()
+ if widget:
+ widget.setVisible(False)
+ widget.deleteLater()
+ self._actions_mapping = {}
+
+ def set_error_info(self, error_info):
+ """Set selected plugin and show it's actions.
+
+ Clears current actions from widget and recreate them from the plugin.
+
+ Args:
+ Dict[str, Any]: Object holding error items, title and possible
+ actions to run.
+ """
+
+ self._clear()
+
+ if not error_info:
+ self.setVisible(False)
+ return
+
+ plugin_action_items = error_info["plugin_action_items"]
+ for plugin_action_item in plugin_action_items:
+ if not plugin_action_item.active:
+ continue
+
+ if plugin_action_item.on_filter not in ("failed", "all"):
+ continue
+
+ action_id = plugin_action_item.action_id
+ self._actions_mapping[action_id] = plugin_action_item
+
+ action_btn = ActionButton(plugin_action_item, self._content_widget)
+ action_btn.action_clicked.connect(self._on_action_click)
+ self._content_layout.addWidget(action_btn)
+
+ self._update_visibility()
+
+ def _on_action_click(self, plugin_id, action_id):
+ self._controller.run_action(plugin_id, action_id)
+
+
+# --- Validation error titles ---
+class ValidationErrorInstanceList(QtWidgets.QListView):
+ """List of publish instances that caused a validation error.
+
+ Instances are collected per plugin's validation error title.
+ """
+ def __init__(self, *args, **kwargs):
+ super(ValidationErrorInstanceList, self).__init__(*args, **kwargs)
+
+ self.setObjectName("ValidationErrorInstanceList")
+
+ self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+ self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
+
+ def minimumSizeHint(self):
+ return self.sizeHint()
+
+ def sizeHint(self):
+ result = super(ValidationErrorInstanceList, self).sizeHint()
+ row_count = self.model().rowCount()
+ height = 0
+ if row_count > 0:
+ height = self.sizeHintForRow(0) * row_count
+ result.setHeight(height)
+ return result
+
+
+class ValidationErrorTitleWidget(QtWidgets.QWidget):
+ """Title of validation error.
+
+ Widget is used as radio button so requires clickable functionality and
+ changing style on selection/deselection.
+
+ Has toggle button to show/hide instances on which validation error happened
+ if there is a list (Valdation error may happen on context).
+ """
+
+ selected = QtCore.Signal(str)
+ instance_changed = QtCore.Signal(str)
+
+ def __init__(self, title_id, error_info, parent):
+ super(ValidationErrorTitleWidget, self).__init__(parent)
+
+ self._title_id = title_id
+ self._error_info = error_info
+ self._selected = False
+
+ title_frame = ClickableFrame(self)
+ title_frame.setObjectName("ValidationErrorTitleFrame")
+
+ toggle_instance_btn = QtWidgets.QToolButton(title_frame)
+ toggle_instance_btn.setObjectName("ArrowBtn")
+ toggle_instance_btn.setArrowType(QtCore.Qt.RightArrow)
+ toggle_instance_btn.setMaximumWidth(14)
+
+ label_widget = QtWidgets.QLabel(error_info["title"], title_frame)
+
+ title_frame_layout = QtWidgets.QHBoxLayout(title_frame)
+ title_frame_layout.addWidget(label_widget, 1)
+ title_frame_layout.addWidget(toggle_instance_btn, 0)
+
+ instances_model = QtGui.QStandardItemModel()
+
+ instance_ids = []
+
+ items = []
+ context_validation = False
+ for error_item in error_info["error_items"]:
+ context_validation = error_item.context_validation
+ if context_validation:
+ toggle_instance_btn.setArrowType(QtCore.Qt.NoArrow)
+ instance_ids.append(CONTEXT_ID)
+ # Add fake item to have minimum size hint of view widget
+ items.append(QtGui.QStandardItem(CONTEXT_LABEL))
+ continue
+
+ label = error_item.instance_label
+ item = QtGui.QStandardItem(label)
+ item.setFlags(
+ QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
+ )
+ item.setData(label, QtCore.Qt.ToolTipRole)
+ item.setData(error_item.instance_id, INSTANCE_ID_ROLE)
+ items.append(item)
+ instance_ids.append(error_item.instance_id)
+
+ if items:
+ root_item = instances_model.invisibleRootItem()
+ root_item.appendRows(items)
+
+ instances_view = ValidationErrorInstanceList(self)
+ instances_view.setModel(instances_model)
+
+ self.setLayoutDirection(QtCore.Qt.LeftToRight)
+
+ view_widget = QtWidgets.QWidget(self)
+ view_layout = QtWidgets.QHBoxLayout(view_widget)
+ view_layout.setContentsMargins(0, 0, 0, 0)
+ view_layout.setSpacing(0)
+ view_layout.addSpacing(14)
+ view_layout.addWidget(instances_view, 0)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.setSpacing(0)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addWidget(title_frame, 0)
+ layout.addWidget(view_widget, 0)
+ view_widget.setVisible(False)
+
+ if not context_validation:
+ toggle_instance_btn.clicked.connect(self._on_toggle_btn_click)
+
+ title_frame.clicked.connect(self._mouse_release_callback)
+ instances_view.selectionModel().selectionChanged.connect(
+ self._on_selection_change
+ )
+
+ self._title_frame = title_frame
+
+ self._toggle_instance_btn = toggle_instance_btn
+
+ self._view_widget = view_widget
+
+ self._instances_model = instances_model
+ self._instances_view = instances_view
+
+ self._context_validation = context_validation
+
+ self._instance_ids = instance_ids
+ self._expanded = False
+
+ def sizeHint(self):
+ result = super(ValidationErrorTitleWidget, self).sizeHint()
+ expected_width = max(
+ self._view_widget.minimumSizeHint().width(),
+ self._view_widget.sizeHint().width()
+ )
+
+ if expected_width < 200:
+ expected_width = 200
+
+ if result.width() < expected_width:
+ result.setWidth(expected_width)
+
+ return result
+
+ def minimumSizeHint(self):
+ return self.sizeHint()
+
+ def _mouse_release_callback(self):
+ """Mark this widget as selected on click."""
+
+ self.set_selected(True)
+
+ @property
+ def is_selected(self):
+ """Is widget marked a selected.
+
+ Returns:
+ bool: Item is selected or not.
+ """
+
+ return self._selected
+
+ @property
+ def id(self):
+ return self._title_id
+
+ def _change_style_property(self, selected):
+ """Change style of widget based on selection."""
+
+ value = "1" if selected else ""
+ self._title_frame.setProperty("selected", value)
+ self._title_frame.style().polish(self._title_frame)
+
+ def set_selected(self, selected=None):
+ """Change selected state of widget."""
+
+ if selected is None:
+ selected = not self._selected
+
+ # Clear instance view selection on deselect
+ if not selected:
+ self._instances_view.clearSelection()
+
+ # Skip if has same value
+ if selected == self._selected:
+ return
+
+ self._selected = selected
+ self._change_style_property(selected)
+ if selected:
+ self.selected.emit(self._title_id)
+ self._set_expanded(True)
+
+ def _on_toggle_btn_click(self):
+ """Show/hide instances list."""
+
+ self._set_expanded()
+
+ def _set_expanded(self, expanded=None):
+ if expanded is None:
+ expanded = not self._expanded
+
+ elif expanded is self._expanded:
+ return
+
+ if expanded and self._context_validation:
+ return
+
+ self._expanded = expanded
+ self._view_widget.setVisible(expanded)
+ if expanded:
+ self._toggle_instance_btn.setArrowType(QtCore.Qt.DownArrow)
+ else:
+ self._toggle_instance_btn.setArrowType(QtCore.Qt.RightArrow)
+
+ def _on_selection_change(self):
+ self.instance_changed.emit(self._title_id)
+
+ def get_selected_instances(self):
+ if self._context_validation:
+ return [CONTEXT_ID]
+ sel_model = self._instances_view.selectionModel()
+ return [
+ index.data(INSTANCE_ID_ROLE)
+ for index in sel_model.selectedIndexes()
+ if index.isValid()
+ ]
+
+ def get_available_instances(self):
+ return list(self._instance_ids)
+
+
+class ValidationArtistMessage(QtWidgets.QWidget):
+ def __init__(self, message, parent):
+ super(ValidationArtistMessage, self).__init__(parent)
+
+ artist_msg_label = QtWidgets.QLabel(message, self)
+ artist_msg_label.setAlignment(QtCore.Qt.AlignCenter)
+
+ main_layout = QtWidgets.QHBoxLayout(self)
+ main_layout.setContentsMargins(0, 0, 0, 0)
+ main_layout.addWidget(
+ artist_msg_label, 1, QtCore.Qt.AlignCenter
+ )
+
+
+class ValidationErrorsView(QtWidgets.QWidget):
+ selection_changed = QtCore.Signal()
+
+ def __init__(self, parent):
+ super(ValidationErrorsView, self).__init__(parent)
+
+ errors_scroll = VerticalScrollArea(self)
+ errors_scroll.setWidgetResizable(True)
+
+ errors_widget = QtWidgets.QWidget(errors_scroll)
+ errors_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+
+ errors_scroll.setWidget(errors_widget)
+
+ errors_layout = QtWidgets.QVBoxLayout(errors_widget)
+ errors_layout.setContentsMargins(0, 0, 0, 0)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.addWidget(errors_scroll, 1)
+
+ self._errors_widget = errors_widget
+ self._errors_layout = errors_layout
+ self._title_widgets = {}
+ self._previous_select = None
+
+ def _clear(self):
+ """Delete all dynamic widgets and hide all wrappers."""
+
+ self._title_widgets = {}
+ self._previous_select = None
+ while self._errors_layout.count():
+ item = self._errors_layout.takeAt(0)
+ widget = item.widget()
+ if widget:
+ widget.deleteLater()
+
+ def set_errors(self, grouped_error_items):
+ """Set errors into context and created titles.
+
+ Args:
+ validation_error_report (PublishValidationErrorsReport): Report
+ with information about validation errors and publish plugin
+ actions.
+ """
+
+ self._clear()
+
+ first_id = None
+ for title_item in grouped_error_items:
+ title_id = title_item["id"]
+ if first_id is None:
+ first_id = title_id
+ widget = ValidationErrorTitleWidget(title_id, title_item, self)
+ widget.selected.connect(self._on_select)
+ widget.instance_changed.connect(self._on_instance_change)
+ self._errors_layout.addWidget(widget)
+ self._title_widgets[title_id] = widget
+
+ self._errors_layout.addStretch(1)
+
+ if first_id:
+ self._title_widgets[first_id].set_selected(True)
+ else:
+ self.selection_changed.emit()
+
+ self.updateGeometry()
+
+ def _on_select(self, title_id):
+ if self._previous_select:
+ if self._previous_select.id == title_id:
+ return
+ self._previous_select.set_selected(False)
+
+ self._previous_select = self._title_widgets[title_id]
+ self.selection_changed.emit()
+
+ def _on_instance_change(self, title_id):
+ if self._previous_select and self._previous_select.id != title_id:
+ self._title_widgets[title_id].set_selected(True)
+ else:
+ self.selection_changed.emit()
+
+ def get_selected_items(self):
+ if not self._previous_select:
+ return None, []
+
+ title_id = self._previous_select.id
+ instance_ids = self._previous_select.get_selected_instances()
+ if not instance_ids:
+ instance_ids = self._previous_select.get_available_instances()
+ return title_id, instance_ids
+
+
+# ----- Publish instance report -----
+class _InstanceItem:
+ """Publish instance item for report UI.
+
+ Contains only data related to an instance in publishing. Has implemented
+ sorting methods and prepares information, e.g. if contains error or
+ warnings.
+ """
+
+ _attrs = (
+ "creator_identifier",
+ "family",
+ "label",
+ "name",
+ )
+
+ def __init__(
+ self,
+ instance_id,
+ creator_identifier,
+ family,
+ name,
+ label,
+ exists,
+ logs,
+ errored,
+ warned
+ ):
+ self.id = instance_id
+ self.creator_identifier = creator_identifier
+ self.family = family
+ self.name = name
+ self.label = label
+ self.exists = exists
+ self.logs = logs
+ self.errored = errored
+ self.warned = warned
+
+ def __eq__(self, other):
+ for attr in self._attrs:
+ if getattr(self, attr) != getattr(other, attr):
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __gt__(self, other):
+ for attr in self._attrs:
+ self_value = getattr(self, attr)
+ other_value = getattr(other, attr)
+ if self_value == other_value:
+ continue
+ values = [self_value, other_value]
+ values.sort()
+ return values[0] == other_value
+ return None
+
+ def __lt__(self, other):
+ for attr in self._attrs:
+ self_value = getattr(self, attr)
+ other_value = getattr(other, attr)
+ if self_value == other_value:
+ continue
+ if self_value is None:
+ return False
+ if other_value is None:
+ return True
+ values = [self_value, other_value]
+ values.sort()
+ return values[0] == self_value
+ return None
+
+ def __ge__(self, other):
+ if self == other:
+ return True
+ return self.__gt__(other)
+
+ def __le__(self, other):
+ if self == other:
+ return True
+ return self.__lt__(other)
+
+ @classmethod
+ def from_report(cls, instance_id, instance_data, logs):
+ errored, warned = cls.extract_basic_log_info(logs)
+
+ return cls(
+ instance_id,
+ instance_data["creator_identifier"],
+ instance_data["family"],
+ instance_data["name"],
+ instance_data["label"],
+ instance_data["exists"],
+ logs,
+ errored,
+ warned,
+ )
+
+ @classmethod
+ def create_context_item(cls, context_label, logs):
+ errored, warned = cls.extract_basic_log_info(logs)
+ return cls(
+ CONTEXT_ID,
+ None,
+ "",
+ CONTEXT_LABEL,
+ context_label,
+ True,
+ logs,
+ errored,
+ warned
+ )
+
+ @staticmethod
+ def extract_basic_log_info(logs):
+ warned = False
+ errored = False
+ for log in logs:
+ if log["type"] == "error":
+ errored = True
+ elif log["type"] == "record":
+ level_no = log["levelno"]
+ if level_no and level_no >= logging.WARNING:
+ warned = True
+
+ if warned and errored:
+ break
+ return errored, warned
+
+
+class FamilyGroupLabel(QtWidgets.QWidget):
+ def __init__(self, family, parent):
+ super(FamilyGroupLabel, self).__init__(parent)
+
+ self.setLayoutDirection(QtCore.Qt.LeftToRight)
+
+ label_widget = QtWidgets.QLabel(family, self)
+
+ line_widget = QtWidgets.QWidget(self)
+ line_widget.setObjectName("Separator")
+ line_widget.setMinimumHeight(2)
+ line_widget.setMaximumHeight(2)
+
+ main_layout = QtWidgets.QHBoxLayout(self)
+ main_layout.setAlignment(QtCore.Qt.AlignVCenter)
+ main_layout.setSpacing(10)
+ main_layout.setContentsMargins(0, 0, 0, 0)
+ main_layout.addWidget(label_widget, 0)
+ main_layout.addWidget(line_widget, 1)
+
+
+class PublishInstanceCardWidget(BaseClickableFrame):
+ selection_requested = QtCore.Signal(str)
+
+ _warning_pix = None
+ _error_pix = None
+ _success_pix = None
+ _in_progress_pix = None
+
+ def __init__(self, instance, icon, publish_finished, parent):
+ super(PublishInstanceCardWidget, self).__init__(parent)
+
+ self.setObjectName("CardViewWidget")
+
+ icon_widget = IconValuePixmapLabel(icon, self)
+ icon_widget.setObjectName("FamilyIconLabel")
+
+ label_widget = QtWidgets.QLabel(instance.label, self)
+
+ if instance.errored:
+ state_pix = self.get_error_pix()
+ elif instance.warned:
+ state_pix = self.get_warning_pix()
+ elif publish_finished:
+ state_pix = self.get_success_pix()
+ else:
+ state_pix = self.get_in_progress_pix()
+
+ state_label = IconValuePixmapLabel(state_pix, self)
+
+ layout = QtWidgets.QHBoxLayout(self)
+ layout.setContentsMargins(10, 7, 10, 7)
+ layout.addWidget(icon_widget, 0)
+ layout.addWidget(label_widget, 1)
+ layout.addWidget(state_label, 0)
+
+ # Change direction -> parent is scroll area where scrolls are on
+ # left side
+ self.setLayoutDirection(QtCore.Qt.LeftToRight)
+
+ self._id = instance.id
+
+ self._selected = False
+
+ self._update_style_state()
+
+ @classmethod
+ def _prepare_pixes(cls):
+ publisher_colors = get_objected_colors("publisher")
+ cls._warning_pix = paint_image_with_color(
+ get_image("warning"),
+ publisher_colors["warning"].get_qcolor()
+ )
+ cls._error_pix = paint_image_with_color(
+ get_image("error"),
+ publisher_colors["error"].get_qcolor()
+ )
+ cls._success_pix = paint_image_with_color(
+ get_image("success"),
+ publisher_colors["success"].get_qcolor()
+ )
+ cls._in_progress_pix = paint_image_with_color(
+ get_image("success"),
+ publisher_colors["progress"].get_qcolor()
+ )
+
+ @classmethod
+ def get_warning_pix(cls):
+ if cls._warning_pix is None:
+ cls._prepare_pixes()
+ return cls._warning_pix
+
+ @classmethod
+ def get_error_pix(cls):
+ if cls._error_pix is None:
+ cls._prepare_pixes()
+ return cls._error_pix
+
+ @classmethod
+ def get_success_pix(cls):
+ if cls._success_pix is None:
+ cls._prepare_pixes()
+ return cls._success_pix
+
+ @classmethod
+ def get_in_progress_pix(cls):
+ if cls._in_progress_pix is None:
+ cls._prepare_pixes()
+ return cls._in_progress_pix
+
+ @property
+ def id(self):
+ """Id of card.
+
+ Returns:
+ str: Id of item.
+ """
+
+ return self._id
+
+ @property
+ def is_selected(self):
+ """Is card selected.
+
+ Returns:
+ bool: Item widget is marked as selected.
+ """
+
+ return self._selected
+
+ def set_selected(self, selected):
+ """Set card as selected.
+
+ Args:
+ selected (bool): Item should be marked as selected.
+ """
+
+ if selected == self._selected:
+ return
+ self._selected = selected
+ self._update_style_state()
+
+ def _update_style_state(self):
+ state = ""
+ if self._selected:
+ state = "selected"
+
+ self.setProperty("state", state)
+ self.style().polish(self)
+
+ def _mouse_release_callback(self):
+ """Trigger selected signal."""
+
+ self.selection_requested.emit(self.id)
+
+
+class PublishInstancesViewWidget(QtWidgets.QWidget):
+ # Sane minimum width of instance cards - size calulated using font metrics
+ _min_width_measure_string = 24 * "O"
+ selection_changed = QtCore.Signal()
+
+ def __init__(self, controller, parent):
+ super(PublishInstancesViewWidget, self).__init__(parent)
+
+ scroll_area = VerticalScrollArea(self)
+ scroll_area.setWidgetResizable(True)
+ scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+ scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
+ scrollbar_bg = scroll_area.verticalScrollBar().parent()
+ if scrollbar_bg:
+ scrollbar_bg.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+ scroll_area.setViewportMargins(0, 0, 0, 0)
+
+ instance_view = QtWidgets.QWidget(scroll_area)
+
+ scroll_area.setWidget(instance_view)
+
+ instance_layout = QtWidgets.QVBoxLayout(instance_view)
+ instance_layout.setContentsMargins(0, 0, 0, 0)
+ instance_layout.addStretch(1)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addWidget(scroll_area, 1)
+
+ self._controller = controller
+ self._scroll_area = scroll_area
+ self._instance_view = instance_view
+ self._instance_layout = instance_layout
+
+ self._context_widget = None
+
+ self._widgets_by_instance_id = {}
+ self._group_widgets = []
+ self._ordered_widgets = []
+
+ self._explicitly_selected_instance_ids = []
+
+ self.setSizePolicy(
+ QtWidgets.QSizePolicy.Minimum,
+ self.sizePolicy().verticalPolicy()
+ )
+
+ def sizeHint(self):
+ """Modify sizeHint based on visibility of scroll bars."""
+ # Calculate width hint by content widget and vertical scroll bar
+ scroll_bar = self._scroll_area.verticalScrollBar()
+ view_size = self._instance_view.sizeHint().width()
+ fm = self._instance_view.fontMetrics()
+ width = (
+ max(view_size, fm.width(self._min_width_measure_string))
+ + scroll_bar.sizeHint().width()
+ )
+
+ result = super(PublishInstancesViewWidget, self).sizeHint()
+ result.setWidth(width)
+ return result
+
+ def _get_selected_widgets(self):
+ return [
+ widget
+ for widget in self._ordered_widgets
+ if widget.is_selected
+ ]
+
+ def get_selected_instance_ids(self):
+ return [
+ widget.id
+ for widget in self._get_selected_widgets()
+ ]
+
+ def clear(self):
+ """Remove actions from widget."""
+ while self._instance_layout.count():
+ item = self._instance_layout.takeAt(0)
+ widget = item.widget()
+ if widget:
+ widget.setVisible(False)
+ widget.deleteLater()
+ self._ordered_widgets = []
+ self._group_widgets = []
+ self._widgets_by_instance_id = {}
+
+ def update_instances(self, instance_items):
+ self.clear()
+ identifiers = {
+ instance_item.creator_identifier
+ for instance_item in instance_items
+ }
+ identifier_icons = {
+ identifier: self._controller.get_creator_icon(identifier)
+ for identifier in identifiers
+ }
+
+ widgets = []
+ group_widgets = []
+
+ publish_finished = (
+ self._controller.publish_has_crashed
+ or self._controller.publish_has_validation_errors
+ or self._controller.publish_has_finished
+ )
+ instances_by_family = collections.defaultdict(list)
+ for instance_item in instance_items:
+ if not instance_item.exists:
+ continue
+ instances_by_family[instance_item.family].append(instance_item)
+
+ sorted_by_family = sorted(
+ instances_by_family.items(), key=lambda i: i[0]
+ )
+ for family, instance_items in sorted_by_family:
+ # Only instance without family is context
+ if family:
+ group_widget = FamilyGroupLabel(family, self._instance_view)
+ self._instance_layout.addWidget(group_widget, 0)
+ group_widgets.append(group_widget)
+
+ sorted_items = sorted(instance_items, key=lambda i: i.label)
+ for instance_item in sorted_items:
+ icon = identifier_icons[instance_item.creator_identifier]
+
+ widget = PublishInstanceCardWidget(
+ instance_item, icon, publish_finished, self._instance_view
+ )
+ widget.selection_requested.connect(self._on_selection_request)
+ self._instance_layout.addWidget(widget, 0)
+
+ widgets.append(widget)
+ self._widgets_by_instance_id[widget.id] = widget
+ self._instance_layout.addStretch(1)
+ self._ordered_widgets = widgets
+ self._group_widgets = group_widgets
+
+ def _on_selection_request(self, instance_id):
+ instance_widget = self._widgets_by_instance_id[instance_id]
+ selected_widgets = self._get_selected_widgets()
+ if instance_widget in selected_widgets:
+ instance_widget.set_selected(False)
+ else:
+ instance_widget.set_selected(True)
+ for widget in selected_widgets:
+ widget.set_selected(False)
+ self.selection_changed.emit()
+
+
+class LogIconFrame(QtWidgets.QFrame):
+ """Draw log item icon next to message.
+
+ Todos:
+ Paint event could be slow, maybe we could cache the image into pixmaps
+ so each item does not have to redraw it again.
+ """
+
+ info_color = QtGui.QColor("#ffffff")
+ error_color = QtGui.QColor("#ff4a4a")
+ level_to_color = dict((
+ (10, QtGui.QColor("#ff66e8")),
+ (20, QtGui.QColor("#66abff")),
+ (30, QtGui.QColor("#ffba66")),
+ (40, QtGui.QColor("#ff4d58")),
+ (50, QtGui.QColor("#ff4f75")),
+ ))
+ _error_pix = None
+ _validation_error_pix = None
+
+ def __init__(self, parent, log_type, log_level, is_validation_error):
+ super(LogIconFrame, self).__init__(parent)
+
+ self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+
+ self._is_record = log_type == "record"
+ self._is_error = log_type == "error"
+ self._is_validation_error = bool(is_validation_error)
+ self._log_color = self.level_to_color.get(log_level)
+
+ @classmethod
+ def get_validation_error_icon(cls):
+ if cls._validation_error_pix is None:
+ cls._validation_error_pix = get_pixmap("warning")
+ return cls._validation_error_pix
+
+ @classmethod
+ def get_error_icon(cls):
+ if cls._error_pix is None:
+ cls._error_pix = get_pixmap("error")
+ return cls._error_pix
+
+ def minimumSizeHint(self):
+ fm = self.fontMetrics()
+ size = fm.height()
+ return QtCore.QSize(size, size)
+
+ def paintEvent(self, event):
+ painter = QtGui.QPainter(self)
+ painter.setRenderHints(
+ QtGui.QPainter.Antialiasing
+ | QtGui.QPainter.SmoothPixmapTransform
+ )
+ painter.setPen(QtCore.Qt.NoPen)
+ rect = self.rect()
+ new_size = min(rect.width(), rect.height())
+ new_rect = QtCore.QRect(1, 1, new_size - 2, new_size - 2)
+ if self._is_error:
+ if self._is_validation_error:
+ error_icon = self.get_validation_error_icon()
+ else:
+ error_icon = self.get_error_icon()
+ scaled_error_icon = error_icon.scaled(
+ new_rect.size(),
+ QtCore.Qt.KeepAspectRatio,
+ QtCore.Qt.SmoothTransformation
+ )
+ painter.drawPixmap(new_rect, scaled_error_icon)
+
+ else:
+ if self._is_record:
+ color = self._log_color
+ else:
+ color = QtGui.QColor(255, 255, 255)
+ painter.setBrush(color)
+ painter.drawEllipse(new_rect)
+ painter.end()
+
+
+class LogItemWidget(QtWidgets.QWidget):
+ log_level_to_flag = {
+ 10: LOG_DEBUG_VISIBLE,
+ 20: LOG_INFO_VISIBLE,
+ 30: LOG_WARNING_VISIBLE,
+ 40: LOG_ERROR_VISIBLE,
+ 50: LOG_CRITICAL_VISIBLE,
+ }
+
+ def __init__(self, log, parent):
+ super(LogItemWidget, self).__init__(parent)
+
+ type_flag, level_n = self._get_log_info(log)
+ icon_label = LogIconFrame(
+ self, log["type"], level_n, log.get("is_validation_error"))
+ message_label = QtWidgets.QLabel(log["msg"].rstrip(), self)
+ message_label.setObjectName("PublishLogMessage")
+ message_label.setTextInteractionFlags(
+ QtCore.Qt.TextBrowserInteraction)
+ message_label.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
+ message_label.setWordWrap(True)
+
+ main_layout = QtWidgets.QHBoxLayout(self)
+ main_layout.setContentsMargins(0, 0, 0, 0)
+ main_layout.setSpacing(8)
+ main_layout.addWidget(icon_label, 0)
+ main_layout.addWidget(message_label, 1)
+
+ self._type_flag = type_flag
+ self._plugin_id = log["plugin_id"]
+ self._log_type_filtered = False
+ self._plugin_filtered = False
+
+ @property
+ def type_flag(self):
+ return self._type_flag
+
+ @property
+ def plugin_id(self):
+ return self._plugin_id
+
+ def _get_log_info(self, log):
+ log_type = log["type"]
+ if log_type == "error":
+ return ERROR_VISIBLE, None
+
+ if log_type != "record":
+ return INFO_VISIBLE, None
+
+ level_n = log["levelno"]
+ if level_n < 10:
+ level_n = 10
+ elif level_n % 10 != 0:
+ level_n -= (level_n % 10) + 10
+
+ flag = self.log_level_to_flag.get(level_n, LOG_CRITICAL_VISIBLE)
+ return flag, level_n
+
+ def _update_visibility(self):
+ self.setVisible(
+ not self._log_type_filtered
+ and not self._plugin_filtered
+ )
+
+ def set_log_type_filtered(self, filtered):
+ if filtered is self._log_type_filtered:
+ return
+ self._log_type_filtered = filtered
+ self._update_visibility()
+
+ def set_plugin_filtered(self, filtered):
+ if filtered is self._plugin_filtered:
+ return
+ self._plugin_filtered = filtered
+ self._update_visibility()
+
+
+class LogsWithIconsView(QtWidgets.QWidget):
+ """Show logs in a grid with 2 columns.
+
+ First column is for icon second is for message.
+
+ Todos:
+ Add filtering by type (exception, debug, info, etc.).
+ """
+
+ def __init__(self, logs, parent):
+ super(LogsWithIconsView, self).__init__(parent)
+ self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+
+ logs_layout = QtWidgets.QVBoxLayout(self)
+ logs_layout.setContentsMargins(0, 0, 0, 0)
+ logs_layout.setSpacing(4)
+
+ widgets_by_flag = collections.defaultdict(list)
+ widgets_by_plugins_id = collections.defaultdict(list)
+
+ for log in logs:
+ widget = LogItemWidget(log, self)
+ widgets_by_flag[widget.type_flag].append(widget)
+ widgets_by_plugins_id[widget.plugin_id].append(widget)
+ logs_layout.addWidget(widget, 0)
+
+ self._widgets_by_flag = widgets_by_flag
+ self._widgets_by_plugins_id = widgets_by_plugins_id
+
+ self._visibility_by_flags = {
+ LOG_DEBUG_VISIBLE: True,
+ LOG_INFO_VISIBLE: True,
+ LOG_WARNING_VISIBLE: True,
+ LOG_ERROR_VISIBLE: True,
+ LOG_CRITICAL_VISIBLE: True,
+ ERROR_VISIBLE: True,
+ INFO_VISIBLE: True,
+ }
+ self._flags_filter = sum(self._visibility_by_flags.keys())
+ self._plugin_ids_filter = None
+
+ def _update_flags_filtering(self):
+ for flag in (
+ LOG_DEBUG_VISIBLE,
+ LOG_INFO_VISIBLE,
+ LOG_WARNING_VISIBLE,
+ LOG_ERROR_VISIBLE,
+ LOG_CRITICAL_VISIBLE,
+ ERROR_VISIBLE,
+ INFO_VISIBLE,
+ ):
+ visible = (self._flags_filter & flag) != 0
+ if visible is not self._visibility_by_flags[flag]:
+ self._visibility_by_flags[flag] = visible
+ for widget in self._widgets_by_flag[flag]:
+ widget.set_log_type_filtered(not visible)
+
+ def _update_plugin_filtering(self):
+ if self._plugin_ids_filter is None:
+ for widgets in self._widgets_by_plugins_id.values():
+ for widget in widgets:
+ widget.set_plugin_filtered(False)
+
+ else:
+ for plugin_id, widgets in self._widgets_by_plugins_id.items():
+ filtered = plugin_id not in self._plugin_ids_filter
+ for widget in widgets:
+ widget.set_plugin_filtered(filtered)
+
+ def set_log_filters(self, visibility_filter, plugin_ids):
+ if self._flags_filter != visibility_filter:
+ self._flags_filter = visibility_filter
+ self._update_flags_filtering()
+
+ if self._plugin_ids_filter != plugin_ids:
+ if plugin_ids is not None:
+ plugin_ids = set(plugin_ids)
+ self._plugin_ids_filter = plugin_ids
+ self._update_plugin_filtering()
+
+
+class InstanceLogsWidget(QtWidgets.QWidget):
+ """Widget showing logs of one publish instance.
+
+ Args:
+ instance (_InstanceItem): Item of instance used as data source.
+ parent (QtWidgets.QWidget): Parent widget.
+ """
+
+ def __init__(self, instance, parent):
+ super(InstanceLogsWidget, self).__init__(parent)
+
+ self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+
+ label_widget = QtWidgets.QLabel(instance.label, self)
+ label_widget.setObjectName("PublishInstanceLogsLabel")
+ logs_grid = LogsWithIconsView(instance.logs, self)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addWidget(label_widget, 0)
+ layout.addWidget(logs_grid, 0)
+
+ self._logs_grid = logs_grid
+
+ def set_log_filters(self, visibility_filter, plugin_ids):
+ """Change logs filter.
+
+ Args:
+ visibility_filter (int): Number contained of flags for each log
+ type and level.
+ plugin_ids (Iterable[str]): Plugin ids to which are logs filtered.
+ """
+
+ self._logs_grid.set_log_filters(visibility_filter, plugin_ids)
+
+
+class InstancesLogsView(QtWidgets.QFrame):
+ """Publish instances logs view widget."""
+
+ def __init__(self, parent):
+ super(InstancesLogsView, self).__init__(parent)
+ self.setObjectName("InstancesLogsView")
+
+ scroll_area = QtWidgets.QScrollArea(self)
+ scroll_area.setWidgetResizable(True)
+ scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+ scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
+ scroll_area.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+ scrollbar_bg = scroll_area.verticalScrollBar().parent()
+ if scrollbar_bg:
+ scrollbar_bg.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+
+ content_wrap_widget = QtWidgets.QWidget(scroll_area)
+ content_wrap_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+
+ content_widget = QtWidgets.QWidget(content_wrap_widget)
+ content_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+ content_layout = QtWidgets.QVBoxLayout(content_widget)
+ content_layout.setSpacing(15)
+
+ scroll_area.setWidget(content_wrap_widget)
+
+ content_wrap_layout = QtWidgets.QVBoxLayout(content_wrap_widget)
+ content_wrap_layout.setContentsMargins(0, 0, 0, 0)
+ content_wrap_layout.addWidget(content_widget, 0)
+ content_wrap_layout.addStretch(1)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addWidget(scroll_area, 1)
+
+ self._visible_filters = (
+ LOG_INFO_VISIBLE
+ | LOG_WARNING_VISIBLE
+ | LOG_ERROR_VISIBLE
+ | LOG_CRITICAL_VISIBLE
+ | ERROR_VISIBLE
+ | INFO_VISIBLE
+ )
+
+ self._content_widget = content_widget
+ self._content_layout = content_layout
+
+ self._instances_order = []
+ self._instances_by_id = {}
+ self._views_by_instance_id = {}
+ self._is_showed = False
+ self._clear_needed = False
+ self._update_needed = False
+ self._instance_ids_filter = []
+ self._plugin_ids_filter = None
+
+ def showEvent(self, event):
+ super(InstancesLogsView, self).showEvent(event)
+ self._is_showed = True
+ self._update_instances()
+
+ def hideEvent(self, event):
+ super(InstancesLogsView, self).hideEvent(event)
+ self._is_showed = False
+
+ def closeEvent(self, event):
+ super(InstancesLogsView, self).closeEvent(event)
+ self._is_showed = False
+
+ def _update_instances(self):
+ if not self._is_showed:
+ return
+
+ if self._clear_needed:
+ self._clear_widgets()
+ self._clear_needed = False
+
+ if not self._update_needed:
+ return
+ self._update_needed = False
+
+ instance_ids = self._instance_ids_filter
+ to_hide = set()
+ if not instance_ids:
+ instance_ids = self._instances_by_id
+ else:
+ to_hide = set(self._instances_by_id) - set(instance_ids)
+
+ for instance_id in instance_ids:
+ widget = self._views_by_instance_id.get(instance_id)
+ if widget is None:
+ instance = self._instances_by_id[instance_id]
+ widget = InstanceLogsWidget(instance, self._content_widget)
+ self._views_by_instance_id[instance_id] = widget
+ self._content_layout.addWidget(widget, 0)
+
+ widget.setVisible(True)
+ widget.set_log_filters(
+ self._visible_filters, self._plugin_ids_filter
+ )
+
+ for instance_id in to_hide:
+ widget = self._views_by_instance_id.get(instance_id)
+ if widget is not None:
+ widget.setVisible(False)
+
+ def _clear_widgets(self):
+ """Remove all widgets from layout and from cache."""
+
+ while self._content_layout.count():
+ item = self._content_layout.takeAt(0)
+ widget = item.widget()
+ if widget:
+ widget.setVisible(False)
+ widget.deleteLater()
+ self._views_by_instance_id = {}
+
+ def update_instances(self, instances):
+ """Update publish instance from report.
+
+ Args:
+ instances (list[_InstanceItem]): Instance data from report.
+ """
+
+ self._instances_order = [
+ instance.id for instance in instances
+ ]
+ self._instances_by_id = {
+ instance.id: instance
+ for instance in instances
+ }
+ self._instance_ids_filter = []
+ self._plugin_ids_filter = None
+ self._clear_needed = True
+ self._update_needed = True
+ self._update_instances()
+
+ def set_instances_filter(self, instance_ids=None):
+ """Set instance filter.
+
+ Args:
+ instance_ids (Optional[list[str]]): List of instances to keep
+ visible. Pass empty list to hide all items.
+ """
+
+ self._instance_ids_filter = instance_ids
+ self._update_needed = True
+ self._update_instances()
+
+ def set_plugins_filter(self, plugin_ids=None):
+ if self._plugin_ids_filter == plugin_ids:
+ return
+ self._plugin_ids_filter = plugin_ids
+ self._update_needed = True
+ self._update_instances()
+
+
+class CrashWidget(QtWidgets.QWidget):
+ """Widget shown when publishing crashes.
+
+ Contains only minimal information for artist with easy access to report
+ actions.
+ """
+
+ def __init__(self, controller, parent):
+ super(CrashWidget, self).__init__(parent)
+
+ main_label = QtWidgets.QLabel("This is not your fault", self)
+ main_label.setAlignment(QtCore.Qt.AlignCenter)
+ main_label.setObjectName("PublishCrashMainLabel")
+
+ report_label = QtWidgets.QLabel(
+ (
+ "Please report the error to your pipeline support"
+ " using one of the options below."
+ ),
+ self
+ )
+ report_label.setAlignment(QtCore.Qt.AlignCenter)
+ report_label.setWordWrap(True)
+ report_label.setObjectName("PublishCrashReportLabel")
+
+ btns_widget = QtWidgets.QWidget(self)
+ copy_clipboard_btn = QtWidgets.QPushButton(
+ "Copy to clipboard", btns_widget)
+ save_to_disk_btn = QtWidgets.QPushButton(
+ "Save to disk", btns_widget)
+
+ btns_layout = QtWidgets.QHBoxLayout(btns_widget)
+ btns_layout.addStretch(1)
+ btns_layout.addWidget(copy_clipboard_btn, 0)
+ btns_layout.addSpacing(20)
+ btns_layout.addWidget(save_to_disk_btn, 0)
+ btns_layout.addStretch(1)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.addStretch(1)
+ layout.addWidget(main_label, 0)
+ layout.addSpacing(20)
+ layout.addWidget(report_label, 0)
+ layout.addSpacing(20)
+ layout.addWidget(btns_widget, 0)
+ layout.addStretch(2)
+
+ copy_clipboard_btn.clicked.connect(self._on_copy_to_clipboard)
+ save_to_disk_btn.clicked.connect(self._on_save_to_disk_click)
+
+ self._controller = controller
+
+ def _on_copy_to_clipboard(self):
+ self._controller.event_system.emit(
+ "copy_report.request", {}, "report_page")
+
+ def _on_save_to_disk_click(self):
+ self._controller.event_system.emit(
+ "export_report.request", {}, "report_page")
+
+
+class ErrorDetailsWidget(QtWidgets.QWidget):
+ def __init__(self, parent):
+ super(ErrorDetailsWidget, self).__init__(parent)
+
+ inputs_widget = QtWidgets.QWidget(self)
+ # Error 'Description' input
+ error_description_input = ExpandingTextEdit(inputs_widget)
+ error_description_input.setObjectName("InfoText")
+ error_description_input.setTextInteractionFlags(
+ QtCore.Qt.TextBrowserInteraction
+ )
+
+ # Error 'Details' widget -> Collapsible
+ error_details_widget = QtWidgets.QWidget(inputs_widget)
+
+ error_details_top = ClickableFrame(error_details_widget)
+
+ error_details_expand_btn = ClassicExpandBtn(error_details_top)
+ error_details_expand_label = QtWidgets.QLabel(
+ "Details", error_details_top)
+
+ line_widget = SeparatorWidget(1, parent=error_details_top)
+
+ error_details_top_l = QtWidgets.QHBoxLayout(error_details_top)
+ error_details_top_l.setContentsMargins(0, 0, 10, 0)
+ error_details_top_l.addWidget(error_details_expand_btn, 0)
+ error_details_top_l.addWidget(error_details_expand_label, 0)
+ error_details_top_l.addWidget(line_widget, 1)
+
+ error_details_input = ExpandingTextEdit(error_details_widget)
+ error_details_input.setObjectName("InfoText")
+ error_details_input.setTextInteractionFlags(
+ QtCore.Qt.TextBrowserInteraction
+ )
+ error_details_input.setVisible(not error_details_expand_btn.collapsed)
+
+ error_details_layout = QtWidgets.QVBoxLayout(error_details_widget)
+ error_details_layout.setContentsMargins(0, 0, 0, 0)
+ error_details_layout.addWidget(error_details_top, 0)
+ error_details_layout.addWidget(error_details_input, 0)
+ error_details_layout.addStretch(1)
+
+ # Description and Details layout
+ inputs_layout = QtWidgets.QVBoxLayout(inputs_widget)
+ inputs_layout.setContentsMargins(0, 0, 0, 0)
+ inputs_layout.setSpacing(10)
+ inputs_layout.addWidget(error_description_input, 0)
+ inputs_layout.addWidget(error_details_widget, 1)
+
+ main_layout = QtWidgets.QHBoxLayout(self)
+ main_layout.setContentsMargins(0, 0, 0, 0)
+ main_layout.addWidget(inputs_widget, 1)
+
+ error_details_top.clicked.connect(self._on_detail_toggle)
+
+ self._error_details_widget = error_details_widget
+ self._error_description_input = error_description_input
+ self._error_details_expand_btn = error_details_expand_btn
+ self._error_details_input = error_details_input
+
+ def _on_detail_toggle(self):
+ self._error_details_expand_btn.set_collapsed()
+ self._error_details_input.setVisible(
+ not self._error_details_expand_btn.collapsed)
+
+ def set_error_item(self, error_item):
+ detail = ""
+ description = ""
+ if error_item:
+ description = error_item.description or description
+ detail = error_item.detail or detail
+
+ if commonmark:
+ self._error_description_input.setHtml(
+ commonmark.commonmark(description)
+ )
+ self._error_details_input.setHtml(
+ commonmark.commonmark(detail)
+ )
+
+ elif hasattr(self._error_details_input, "setMarkdown"):
+ self._error_description_input.setMarkdown(description)
+ self._error_details_input.setMarkdown(detail)
+
+ else:
+ self._error_description_input.setText(description)
+ self._error_details_input.setText(detail)
+
+ self._error_details_widget.setVisible(bool(detail))
+
+
+class ReportsWidget(QtWidgets.QWidget):
+ """
+ # Crash layout
+ ββββββββ¬ββββββββββ¬ββββββββββ
+ βViews β Logs β Details β
+ β β β β
+ β β β β
+ ββββββββ΄ββββββββββ΄ββββββββββ
+ # Success layout
+ ββββββββ¬ββββββββββββββββββββ
+ βView β Logs β
+ β β β
+ β β β
+ ββββββββ΄ββββββββββββββββββββ
+ # Validation errors layout
+ ββββββββ¬ββββββββββ¬ββββββββββ
+ βViews β Actions β β
+ β βββββββββββ€ Details β
+ β β Logs β β
+ β β β β
+ ββββββββ΄ββββββββββ΄ββββββββββ
+ """
+
+ def __init__(self, controller, parent):
+ super(ReportsWidget, self).__init__(parent)
+
+ # Instances view
+ views_widget = QtWidgets.QWidget(self)
+
+ instances_view = PublishInstancesViewWidget(controller, views_widget)
+
+ validation_error_view = ValidationErrorsView(views_widget)
+
+ views_layout = QtWidgets.QStackedLayout(views_widget)
+ views_layout.setContentsMargins(0, 0, 0, 0)
+ views_layout.addWidget(instances_view)
+ views_layout.addWidget(validation_error_view)
+
+ views_layout.setCurrentWidget(instances_view)
+
+ # Error description with actions and optional detail
+ details_widget = QtWidgets.QFrame(self)
+ details_widget.setObjectName("PublishInstancesDetails")
+
+ # Actions widget
+ actions_widget = ValidateActionsWidget(controller, details_widget)
+
+ pages_widget = QtWidgets.QWidget(details_widget)
+
+ # Logs view
+ logs_view = InstancesLogsView(pages_widget)
+
+ # Validation details
+ # Description and details inputs are in scroll
+ # - single scroll for both inputs, they are forced to not use theirs
+ detail_inputs_spacer = QtWidgets.QWidget(pages_widget)
+ detail_inputs_spacer.setMinimumWidth(30)
+ detail_inputs_spacer.setMaximumWidth(30)
+
+ detail_input_scroll = QtWidgets.QScrollArea(pages_widget)
+
+ detail_inputs_widget = ErrorDetailsWidget(detail_input_scroll)
+ detail_inputs_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
+
+ detail_input_scroll.setWidget(detail_inputs_widget)
+ detail_input_scroll.setWidgetResizable(True)
+ detail_input_scroll.setViewportMargins(0, 0, 0, 0)
+
+ # Crash information
+ crash_widget = CrashWidget(controller, details_widget)
+
+ # Layout pages
+ pages_layout = QtWidgets.QHBoxLayout(pages_widget)
+ pages_layout.setContentsMargins(0, 0, 0, 0)
+ pages_layout.addWidget(logs_view, 1)
+ pages_layout.addWidget(detail_inputs_spacer, 0)
+ pages_layout.addWidget(detail_input_scroll, 1)
+ pages_layout.addWidget(crash_widget, 1)
+
+ details_layout = QtWidgets.QVBoxLayout(details_widget)
+ margins = details_layout.contentsMargins()
+ margins.setTop(margins.top() * 2)
+ margins.setBottom(margins.bottom() * 2)
+ details_layout.setContentsMargins(margins)
+ details_layout.setSpacing(margins.top())
+ details_layout.addWidget(actions_widget, 0)
+ details_layout.addWidget(pages_widget, 1)
+
+ content_layout = QtWidgets.QHBoxLayout(self)
+ content_layout.setContentsMargins(0, 0, 0, 0)
+ content_layout.addWidget(views_widget, 0)
+ content_layout.addWidget(details_widget, 1)
+
+ instances_view.selection_changed.connect(self._on_instance_selection)
+ validation_error_view.selection_changed.connect(
+ self._on_error_selection)
+
+ self._views_layout = views_layout
+ self._instances_view = instances_view
+ self._validation_error_view = validation_error_view
+
+ self._actions_widget = actions_widget
+ self._detail_inputs_widget = detail_inputs_widget
+ self._logs_view = logs_view
+ self._detail_inputs_spacer = detail_inputs_spacer
+ self._detail_input_scroll = detail_input_scroll
+ self._crash_widget = crash_widget
+
+ self._controller = controller
+
+ self._validation_errors_by_id = {}
+
+ def _get_instance_items(self):
+ report = self._controller.get_publish_report()
+ context_label = report["context"]["label"] or CONTEXT_LABEL
+ instances_by_id = report["instances"]
+ plugins_info = report["plugins_data"]
+ logs_by_instance_id = collections.defaultdict(list)
+ for plugin_info in plugins_info:
+ plugin_id = plugin_info["id"]
+ for instance_info in plugin_info["instances_data"]:
+ instance_id = instance_info["id"] or CONTEXT_ID
+ for log in instance_info["logs"]:
+ log["plugin_id"] = plugin_id
+ logs_by_instance_id[instance_id].extend(instance_info["logs"])
+
+ context_item = _InstanceItem.create_context_item(
+ context_label, logs_by_instance_id[CONTEXT_ID])
+ instance_items = [
+ _InstanceItem.from_report(
+ instance_id, instance, logs_by_instance_id[instance_id]
+ )
+ for instance_id, instance in instances_by_id.items()
+ if instance["exists"]
+ ]
+ instance_items.sort()
+ instance_items.insert(0, context_item)
+ return instance_items
+
+ def update_data(self):
+ view = self._instances_view
+ validation_error_mode = False
+ if (
+ not self._controller.publish_has_crashed
+ and self._controller.publish_has_validation_errors
+ ):
+ view = self._validation_error_view
+ validation_error_mode = True
+
+ self._actions_widget.set_visible_mode(validation_error_mode)
+ self._detail_inputs_spacer.setVisible(validation_error_mode)
+ self._detail_input_scroll.setVisible(validation_error_mode)
+ self._views_layout.setCurrentWidget(view)
+
+ self._crash_widget.setVisible(self._controller.publish_has_crashed)
+ self._logs_view.setVisible(not self._controller.publish_has_crashed)
+
+ # Instance view & logs update
+ instance_items = self._get_instance_items()
+ self._instances_view.update_instances(instance_items)
+ self._logs_view.update_instances(instance_items)
+
+ # Validation errors
+ validation_errors = self._controller.get_validation_errors()
+ grouped_error_items = validation_errors.group_items_by_title()
+
+ validation_errors_by_id = {
+ title_item["id"]: title_item
+ for title_item in grouped_error_items
+ }
+
+ self._validation_errors_by_id = validation_errors_by_id
+ self._validation_error_view.set_errors(grouped_error_items)
+
+ def _on_instance_selection(self):
+ instance_ids = self._instances_view.get_selected_instance_ids()
+ self._logs_view.set_instances_filter(instance_ids)
+
+ def _on_error_selection(self):
+ title_id, instance_ids = (
+ self._validation_error_view.get_selected_items())
+ error_info = self._validation_errors_by_id.get(title_id)
+ if error_info is None:
+ self._actions_widget.set_error_info(None)
+ self._detail_inputs_widget.set_error_item(None)
+ return
+
+ self._logs_view.set_instances_filter(instance_ids)
+ self._logs_view.set_plugins_filter([error_info["plugin_id"]])
+
+ match_error_item = None
+ for error_item in error_info["error_items"]:
+ instance_id = error_item.instance_id or CONTEXT_ID
+ if instance_id in instance_ids:
+ match_error_item = error_item
+ break
+
+ self._actions_widget.set_error_info(error_info)
+ self._detail_inputs_widget.set_error_item(match_error_item)
+
+
+class ReportPageWidget(QtWidgets.QFrame):
+ """Widgets showing report for artis.
+
+ There are 5 possible states:
+ 1. Publishing did not start yet. > Only label.
+ 2. Publishing is paused. β
+ 3. Publishing successfully finished. β> Instances with logs.
+ 4. Publishing crashed. β
+ 5. Crashed because of validation error. > Errors with logs.
+
+ This widget is shown if validation errors happened during validation part.
+
+ Shows validation error titles with instances on which they happened
+ and validation error detail with possible actions (repair).
+ """
+
+ def __init__(self, controller, parent):
+ super(ReportPageWidget, self).__init__(parent)
+
+ header_label = QtWidgets.QLabel(self)
+ header_label.setAlignment(QtCore.Qt.AlignCenter)
+ header_label.setObjectName("PublishReportHeader")
+
+ publish_instances_widget = ReportsWidget(controller, self)
+
+ layout = QtWidgets.QVBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+ layout.addWidget(header_label, 0)
+ layout.addWidget(publish_instances_widget, 0)
+
+ controller.event_system.add_callback(
+ "publish.process.started", self._on_publish_start
+ )
+ controller.event_system.add_callback(
+ "publish.reset.finished", self._on_publish_reset
+ )
+ controller.event_system.add_callback(
+ "publish.process.stopped", self._on_publish_stop
+ )
+
+ self._header_label = header_label
+ self._publish_instances_widget = publish_instances_widget
+
+ self._controller = controller
+
+ def _update_label(self):
+ if not self._controller.publish_has_started:
+ # This probably never happen when this widget is visible
+ header_label = "Nothing to report until you run publish"
+ elif self._controller.publish_has_crashed:
+ header_label = "Publish error report"
+ elif self._controller.publish_has_validation_errors:
+ header_label = "Publish validation report"
+ elif self._controller.publish_has_finished:
+ header_label = "Publish success report"
+ else:
+ header_label = "Publish report"
+ self._header_label.setText(header_label)
+
+ def _update_state(self):
+ self._update_label()
+ publish_started = self._controller.publish_has_started
+ self._publish_instances_widget.setVisible(publish_started)
+ if publish_started:
+ self._publish_instances_widget.update_data()
+
+ self.updateGeometry()
+
+ def _on_publish_start(self):
+ self._update_state()
+
+ def _on_publish_reset(self):
+ self._update_state()
+
+ def _on_publish_stop(self):
+ self._update_state()
diff --git a/openpype/tools/publisher/widgets/thumbnail_widget.py b/openpype/tools/publisher/widgets/thumbnail_widget.py
index e234f4cdc1..b17ca0adc8 100644
--- a/openpype/tools/publisher/widgets/thumbnail_widget.py
+++ b/openpype/tools/publisher/widgets/thumbnail_widget.py
@@ -75,6 +75,7 @@ class ThumbnailPainterWidget(QtWidgets.QWidget):
painter = QtGui.QPainter()
painter.begin(self)
+ painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.drawPixmap(0, 0, self._cached_pix)
painter.end()
@@ -183,6 +184,18 @@ class ThumbnailPainterWidget(QtWidgets.QWidget):
backgrounded_images.append(new_pix)
return backgrounded_images
+ def _paint_dash_line(self, painter, rect):
+ pen = QtGui.QPen()
+ pen.setWidth(1)
+ pen.setBrush(QtCore.Qt.darkGray)
+ pen.setStyle(QtCore.Qt.DashLine)
+
+ new_rect = rect.adjusted(1, 1, -1, -1)
+ painter.setPen(pen)
+ painter.setBrush(QtCore.Qt.transparent)
+ # painter.drawRect(rect)
+ painter.drawRect(new_rect)
+
def _cache_pix(self):
rect = self.rect()
rect_width = rect.width()
@@ -264,13 +277,7 @@ class ThumbnailPainterWidget(QtWidgets.QWidget):
# Draw drop enabled dashes
if used_default_pix:
- pen = QtGui.QPen()
- pen.setWidth(1)
- pen.setBrush(QtCore.Qt.darkGray)
- pen.setStyle(QtCore.Qt.DashLine)
- final_painter.setPen(pen)
- final_painter.setBrush(QtCore.Qt.transparent)
- final_painter.drawRect(rect)
+ self._paint_dash_line(final_painter, rect)
final_painter.end()
diff --git a/openpype/tools/publisher/widgets/validations_widget.py b/openpype/tools/publisher/widgets/validations_widget.py
deleted file mode 100644
index 0abe85c0b8..0000000000
--- a/openpype/tools/publisher/widgets/validations_widget.py
+++ /dev/null
@@ -1,715 +0,0 @@
-# -*- coding: utf-8 -*-
-try:
- import commonmark
-except Exception:
- commonmark = None
-
-from qtpy import QtWidgets, QtCore, QtGui
-
-from openpype.tools.utils import BaseClickableFrame, ClickableFrame
-from .widgets import (
- IconValuePixmapLabel
-)
-from ..constants import (
- INSTANCE_ID_ROLE
-)
-
-
-class ValidationErrorInstanceList(QtWidgets.QListView):
- """List of publish instances that caused a validation error.
-
- Instances are collected per plugin's validation error title.
- """
- def __init__(self, *args, **kwargs):
- super(ValidationErrorInstanceList, self).__init__(*args, **kwargs)
-
- self.setObjectName("ValidationErrorInstanceList")
-
- self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
- self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
-
- def minimumSizeHint(self):
- return self.sizeHint()
-
- def sizeHint(self):
- result = super(ValidationErrorInstanceList, self).sizeHint()
- row_count = self.model().rowCount()
- height = 0
- if row_count > 0:
- height = self.sizeHintForRow(0) * row_count
- result.setHeight(height)
- return result
-
-
-class ValidationErrorTitleWidget(QtWidgets.QWidget):
- """Title of validation error.
-
- Widget is used as radio button so requires clickable functionality and
- changing style on selection/deselection.
-
- Has toggle button to show/hide instances on which validation error happened
- if there is a list (Valdation error may happen on context).
- """
-
- selected = QtCore.Signal(int)
- instance_changed = QtCore.Signal(int)
-
- def __init__(self, index, error_info, parent):
- super(ValidationErrorTitleWidget, self).__init__(parent)
-
- self._index = index
- self._error_info = error_info
- self._selected = False
-
- title_frame = ClickableFrame(self)
- title_frame.setObjectName("ValidationErrorTitleFrame")
-
- toggle_instance_btn = QtWidgets.QToolButton(title_frame)
- toggle_instance_btn.setObjectName("ArrowBtn")
- toggle_instance_btn.setArrowType(QtCore.Qt.RightArrow)
- toggle_instance_btn.setMaximumWidth(14)
-
- label_widget = QtWidgets.QLabel(error_info["title"], title_frame)
-
- title_frame_layout = QtWidgets.QHBoxLayout(title_frame)
- title_frame_layout.addWidget(label_widget, 1)
- title_frame_layout.addWidget(toggle_instance_btn, 0)
-
- instances_model = QtGui.QStandardItemModel()
-
- help_text_by_instance_id = {}
-
- items = []
- context_validation = False
- for error_item in error_info["error_items"]:
- context_validation = error_item.context_validation
- if context_validation:
- toggle_instance_btn.setArrowType(QtCore.Qt.NoArrow)
- description = self._prepare_description(error_item)
- help_text_by_instance_id[None] = description
- # Add fake item to have minimum size hint of view widget
- items.append(QtGui.QStandardItem("Context"))
- continue
-
- label = error_item.instance_label
- item = QtGui.QStandardItem(label)
- item.setFlags(
- QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
- )
- item.setData(label, QtCore.Qt.ToolTipRole)
- item.setData(error_item.instance_id, INSTANCE_ID_ROLE)
- items.append(item)
- description = self._prepare_description(error_item)
- help_text_by_instance_id[error_item.instance_id] = description
-
- if items:
- root_item = instances_model.invisibleRootItem()
- root_item.appendRows(items)
-
- instances_view = ValidationErrorInstanceList(self)
- instances_view.setModel(instances_model)
-
- self.setLayoutDirection(QtCore.Qt.LeftToRight)
-
- view_widget = QtWidgets.QWidget(self)
- view_layout = QtWidgets.QHBoxLayout(view_widget)
- view_layout.setContentsMargins(0, 0, 0, 0)
- view_layout.setSpacing(0)
- view_layout.addSpacing(14)
- view_layout.addWidget(instances_view, 0)
-
- layout = QtWidgets.QVBoxLayout(self)
- layout.setSpacing(0)
- layout.setContentsMargins(0, 0, 0, 0)
- layout.addWidget(title_frame, 0)
- layout.addWidget(view_widget, 0)
- view_widget.setVisible(False)
-
- if not context_validation:
- toggle_instance_btn.clicked.connect(self._on_toggle_btn_click)
-
- title_frame.clicked.connect(self._mouse_release_callback)
- instances_view.selectionModel().selectionChanged.connect(
- self._on_seleciton_change
- )
-
- self._title_frame = title_frame
-
- self._toggle_instance_btn = toggle_instance_btn
-
- self._view_widget = view_widget
-
- self._instances_model = instances_model
- self._instances_view = instances_view
-
- self._context_validation = context_validation
- self._help_text_by_instance_id = help_text_by_instance_id
-
- self._expanded = False
-
- def sizeHint(self):
- result = super(ValidationErrorTitleWidget, self).sizeHint()
- expected_width = max(
- self._view_widget.minimumSizeHint().width(),
- self._view_widget.sizeHint().width()
- )
-
- if expected_width < 200:
- expected_width = 200
-
- if result.width() < expected_width:
- result.setWidth(expected_width)
-
- return result
-
- def minimumSizeHint(self):
- return self.sizeHint()
-
- def _prepare_description(self, error_item):
- """Prepare description text for detail intput.
-
- Args:
- error_item (ValidationErrorItem): Item which hold information about
- validation error.
-
- Returns:
- str: Prepared detailed description.
- """
-
- dsc = error_item.description
- detail = error_item.detail
- if detail:
- dsc += "
{}".format(detail)
-
- description = dsc
- if commonmark:
- description = commonmark.commonmark(dsc)
- return description
-
- def _mouse_release_callback(self):
- """Mark this widget as selected on click."""
-
- self.set_selected(True)
-
- def current_description_text(self):
- if self._context_validation:
- return self._help_text_by_instance_id[None]
- index = self._instances_view.currentIndex()
- # TODO make sure instance is selected
- if not index.isValid():
- index = self._instances_model.index(0, 0)
-
- indence_id = index.data(INSTANCE_ID_ROLE)
- return self._help_text_by_instance_id[indence_id]
-
- @property
- def is_selected(self):
- """Is widget marked a selected.
-
- Returns:
- bool: Item is selected or not.
- """
-
- return self._selected
-
- @property
- def index(self):
- """Widget's index set by parent.
-
- Returns:
- int: Index of widget.
- """
-
- return self._index
-
- def set_index(self, index):
- """Set index of widget (called by parent).
-
- Args:
- int: New index of widget.
- """
-
- self._index = index
-
- def _change_style_property(self, selected):
- """Change style of widget based on selection."""
-
- value = "1" if selected else ""
- self._title_frame.setProperty("selected", value)
- self._title_frame.style().polish(self._title_frame)
-
- def set_selected(self, selected=None):
- """Change selected state of widget."""
-
- if selected is None:
- selected = not self._selected
-
- # Clear instance view selection on deselect
- if not selected:
- self._instances_view.clearSelection()
-
- # Skip if has same value
- if selected == self._selected:
- return
-
- self._selected = selected
- self._change_style_property(selected)
- if selected:
- self.selected.emit(self._index)
- self._set_expanded(True)
-
- def _on_toggle_btn_click(self):
- """Show/hide instances list."""
-
- self._set_expanded()
-
- def _set_expanded(self, expanded=None):
- if expanded is None:
- expanded = not self._expanded
-
- elif expanded is self._expanded:
- return
-
- if expanded and self._context_validation:
- return
-
- self._expanded = expanded
- self._view_widget.setVisible(expanded)
- if expanded:
- self._toggle_instance_btn.setArrowType(QtCore.Qt.DownArrow)
- else:
- self._toggle_instance_btn.setArrowType(QtCore.Qt.RightArrow)
-
- def _on_seleciton_change(self):
- sel_model = self._instances_view.selectionModel()
- if sel_model.selectedIndexes():
- self.instance_changed.emit(self._index)
-
-
-class ActionButton(BaseClickableFrame):
- """Plugin's action callback button.
-
- Action may have label or icon or both.
-
- Args:
- plugin_action_item (PublishPluginActionItem): Action item that can be
- triggered by it's id.
- """
-
- action_clicked = QtCore.Signal(str, str)
-
- def __init__(self, plugin_action_item, parent):
- super(ActionButton, self).__init__(parent)
-
- self.setObjectName("ValidationActionButton")
-
- self.plugin_action_item = plugin_action_item
-
- action_label = plugin_action_item.label
- action_icon = plugin_action_item.icon
- label_widget = QtWidgets.QLabel(action_label, self)
- icon_label = None
- if action_icon:
- icon_label = IconValuePixmapLabel(action_icon, self)
-
- layout = QtWidgets.QHBoxLayout(self)
- layout.setContentsMargins(5, 0, 5, 0)
- layout.addWidget(label_widget, 1)
- if icon_label:
- layout.addWidget(icon_label, 0)
-
- self.setSizePolicy(
- QtWidgets.QSizePolicy.Minimum,
- self.sizePolicy().verticalPolicy()
- )
-
- def _mouse_release_callback(self):
- self.action_clicked.emit(
- self.plugin_action_item.plugin_id,
- self.plugin_action_item.action_id
- )
-
-
-class ValidateActionsWidget(QtWidgets.QFrame):
- """Wrapper widget for plugin actions.
-
- Change actions based on selected validation error.
- """
-
- def __init__(self, controller, parent):
- super(ValidateActionsWidget, self).__init__(parent)
-
- self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
-
- content_widget = QtWidgets.QWidget(self)
- content_layout = QtWidgets.QVBoxLayout(content_widget)
-
- layout = QtWidgets.QHBoxLayout(self)
- layout.setContentsMargins(0, 0, 0, 0)
- layout.addWidget(content_widget)
-
- self._controller = controller
- self._content_widget = content_widget
- self._content_layout = content_layout
- self._actions_mapping = {}
-
- def clear(self):
- """Remove actions from widget."""
- while self._content_layout.count():
- item = self._content_layout.takeAt(0)
- widget = item.widget()
- if widget:
- widget.setVisible(False)
- widget.deleteLater()
- self._actions_mapping = {}
-
- def set_error_item(self, error_item):
- """Set selected plugin and show it's actions.
-
- Clears current actions from widget and recreate them from the plugin.
-
- Args:
- Dict[str, Any]: Object holding error items, title and possible
- actions to run.
- """
-
- self.clear()
-
- if not error_item:
- self.setVisible(False)
- return
-
- plugin_action_items = error_item["plugin_action_items"]
- for plugin_action_item in plugin_action_items:
- if not plugin_action_item.active:
- continue
-
- if plugin_action_item.on_filter not in ("failed", "all"):
- continue
-
- action_id = plugin_action_item.action_id
- self._actions_mapping[action_id] = plugin_action_item
-
- action_btn = ActionButton(plugin_action_item, self._content_widget)
- action_btn.action_clicked.connect(self._on_action_click)
- self._content_layout.addWidget(action_btn)
-
- if self._content_layout.count() > 0:
- self.setVisible(True)
- self._content_layout.addStretch(1)
- else:
- self.setVisible(False)
-
- def _on_action_click(self, plugin_id, action_id):
- self._controller.run_action(plugin_id, action_id)
-
-
-class VerticallScrollArea(QtWidgets.QScrollArea):
- """Scroll area for validation error titles.
-
- The biggest difference is that the scroll area has scroll bar on left side
- and resize of content will also resize scrollarea itself.
-
- Resize if deferred by 100ms because at the moment of resize are not yet
- propagated sizes and visibility of scroll bars.
- """
-
- def __init__(self, *args, **kwargs):
- super(VerticallScrollArea, self).__init__(*args, **kwargs)
-
- self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
- self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
- self.setLayoutDirection(QtCore.Qt.RightToLeft)
-
- self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
- # Background of scrollbar will be transparent
- scrollbar_bg = self.verticalScrollBar().parent()
- if scrollbar_bg:
- scrollbar_bg.setAttribute(QtCore.Qt.WA_TranslucentBackground)
- self.setViewportMargins(0, 0, 0, 0)
-
- self.verticalScrollBar().installEventFilter(self)
-
- # Timer with 100ms offset after changing size
- size_changed_timer = QtCore.QTimer()
- size_changed_timer.setInterval(100)
- size_changed_timer.setSingleShot(True)
-
- size_changed_timer.timeout.connect(self._on_timer_timeout)
- self._size_changed_timer = size_changed_timer
-
- def setVerticalScrollBar(self, widget):
- old_widget = self.verticalScrollBar()
- if old_widget:
- old_widget.removeEventFilter(self)
-
- super(VerticallScrollArea, self).setVerticalScrollBar(widget)
- if widget:
- widget.installEventFilter(self)
-
- def setWidget(self, widget):
- old_widget = self.widget()
- if old_widget:
- old_widget.removeEventFilter(self)
-
- super(VerticallScrollArea, self).setWidget(widget)
- if widget:
- widget.installEventFilter(self)
-
- def _on_timer_timeout(self):
- width = self.widget().width()
- if self.verticalScrollBar().isVisible():
- width += self.verticalScrollBar().width()
- self.setMinimumWidth(width)
-
- def eventFilter(self, obj, event):
- if (
- event.type() == QtCore.QEvent.Resize
- and (obj is self.widget() or obj is self.verticalScrollBar())
- ):
- self._size_changed_timer.start()
- return super(VerticallScrollArea, self).eventFilter(obj, event)
-
-
-class ValidationArtistMessage(QtWidgets.QWidget):
- def __init__(self, message, parent):
- super(ValidationArtistMessage, self).__init__(parent)
-
- artist_msg_label = QtWidgets.QLabel(message, self)
- artist_msg_label.setAlignment(QtCore.Qt.AlignCenter)
-
- main_layout = QtWidgets.QHBoxLayout(self)
- main_layout.setContentsMargins(0, 0, 0, 0)
- main_layout.addWidget(
- artist_msg_label, 1, QtCore.Qt.AlignCenter
- )
-
-
-class ValidationsWidget(QtWidgets.QFrame):
- """Widgets showing validation error.
-
- This widget is shown if validation error/s happened during validation part.
-
- Shows validation error titles with instances on which happened and
- validation error detail with possible actions (repair).
-
- ββββββββ¬βββββββββββββββββ¬ββββββββ
- βtitlesβ βactionsβ
- β β β β
- β β Error detail β β
- β β β β
- β β β β
- ββββββββ΄βββββββββββββββββ΄ββββββββ
- """
-
- def __init__(self, controller, parent):
- super(ValidationsWidget, self).__init__(parent)
-
- # Before publishing
- before_publish_widget = ValidationArtistMessage(
- "Nothing to report until you run publish", self
- )
- # After success publishing
- publish_started_widget = ValidationArtistMessage(
- "So far so good", self
- )
- # After success publishing
- publish_stop_ok_widget = ValidationArtistMessage(
- "Publishing finished successfully", self
- )
- # After failed publishing (not with validation error)
- publish_stop_fail_widget = ValidationArtistMessage(
- "This is not your fault...", self
- )
-
- # Validation errors
- validations_widget = QtWidgets.QWidget(self)
-
- content_widget = QtWidgets.QWidget(validations_widget)
-
- errors_scroll = VerticallScrollArea(content_widget)
- errors_scroll.setWidgetResizable(True)
-
- errors_widget = QtWidgets.QWidget(errors_scroll)
- errors_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
- errors_layout = QtWidgets.QVBoxLayout(errors_widget)
- errors_layout.setContentsMargins(0, 0, 0, 0)
-
- errors_scroll.setWidget(errors_widget)
-
- error_details_frame = QtWidgets.QFrame(content_widget)
- error_details_input = QtWidgets.QTextEdit(error_details_frame)
- error_details_input.setObjectName("InfoText")
- error_details_input.setTextInteractionFlags(
- QtCore.Qt.TextBrowserInteraction
- )
-
- actions_widget = ValidateActionsWidget(controller, content_widget)
- actions_widget.setMinimumWidth(140)
-
- error_details_layout = QtWidgets.QHBoxLayout(error_details_frame)
- error_details_layout.addWidget(error_details_input, 1)
- error_details_layout.addWidget(actions_widget, 0)
-
- content_layout = QtWidgets.QHBoxLayout(content_widget)
- content_layout.setSpacing(0)
- content_layout.setContentsMargins(0, 0, 0, 0)
-
- content_layout.addWidget(errors_scroll, 0)
- content_layout.addWidget(error_details_frame, 1)
-
- top_label = QtWidgets.QLabel(
- "Publish validation report", content_widget
- )
- top_label.setObjectName("PublishInfoMainLabel")
- top_label.setAlignment(QtCore.Qt.AlignCenter)
-
- validation_layout = QtWidgets.QVBoxLayout(validations_widget)
- validation_layout.setContentsMargins(0, 0, 0, 0)
- validation_layout.addWidget(top_label, 0)
- validation_layout.addWidget(content_widget, 1)
-
- main_layout = QtWidgets.QStackedLayout(self)
- main_layout.addWidget(before_publish_widget)
- main_layout.addWidget(publish_started_widget)
- main_layout.addWidget(publish_stop_ok_widget)
- main_layout.addWidget(publish_stop_fail_widget)
- main_layout.addWidget(validations_widget)
-
- main_layout.setCurrentWidget(before_publish_widget)
-
- controller.event_system.add_callback(
- "publish.process.started", self._on_publish_start
- )
- controller.event_system.add_callback(
- "publish.reset.finished", self._on_publish_reset
- )
- controller.event_system.add_callback(
- "publish.process.stopped", self._on_publish_stop
- )
-
- self._main_layout = main_layout
-
- self._before_publish_widget = before_publish_widget
- self._publish_started_widget = publish_started_widget
- self._publish_stop_ok_widget = publish_stop_ok_widget
- self._publish_stop_fail_widget = publish_stop_fail_widget
- self._validations_widget = validations_widget
-
- self._top_label = top_label
- self._errors_widget = errors_widget
- self._errors_layout = errors_layout
- self._error_details_frame = error_details_frame
- self._error_details_input = error_details_input
- self._actions_widget = actions_widget
-
- self._title_widgets = {}
- self._error_info = {}
- self._previous_select = None
-
- self._controller = controller
-
- def clear(self):
- """Delete all dynamic widgets and hide all wrappers."""
- self._title_widgets = {}
- self._error_info = {}
- self._previous_select = None
- while self._errors_layout.count():
- item = self._errors_layout.takeAt(0)
- widget = item.widget()
- if widget:
- widget.deleteLater()
-
- self._top_label.setVisible(False)
- self._error_details_frame.setVisible(False)
- self._errors_widget.setVisible(False)
- self._actions_widget.setVisible(False)
-
- def _set_errors(self, validation_error_report):
- """Set errors into context and created titles.
-
- Args:
- validation_error_report (PublishValidationErrorsReport): Report
- with information about validation errors and publish plugin
- actions.
- """
-
- self.clear()
- if not validation_error_report:
- return
-
- self._top_label.setVisible(True)
- self._error_details_frame.setVisible(True)
- self._errors_widget.setVisible(True)
-
- grouped_error_items = validation_error_report.group_items_by_title()
- for idx, error_info in enumerate(grouped_error_items):
- widget = ValidationErrorTitleWidget(idx, error_info, self)
- widget.selected.connect(self._on_select)
- widget.instance_changed.connect(self._on_instance_change)
- self._errors_layout.addWidget(widget)
- self._title_widgets[idx] = widget
- self._error_info[idx] = error_info
-
- self._errors_layout.addStretch(1)
-
- if self._title_widgets:
- self._title_widgets[0].set_selected(True)
-
- self.updateGeometry()
-
- def _set_current_widget(self, widget):
- self._main_layout.setCurrentWidget(widget)
-
- def _on_publish_start(self):
- self._set_current_widget(self._publish_started_widget)
-
- def _on_publish_reset(self):
- self._set_current_widget(self._before_publish_widget)
-
- def _on_publish_stop(self):
- if self._controller.publish_has_crashed:
- self._set_current_widget(self._publish_stop_fail_widget)
- return
-
- if self._controller.publish_has_validation_errors:
- validation_errors = self._controller.get_validation_errors()
- self._set_current_widget(self._validations_widget)
- self._set_errors(validation_errors)
- return
-
- if self._controller.publish_has_finished:
- self._set_current_widget(self._publish_stop_ok_widget)
- return
-
- self._set_current_widget(self._publish_started_widget)
-
- def _on_select(self, index):
- if self._previous_select:
- if self._previous_select.index == index:
- return
- self._previous_select.set_selected(False)
-
- self._previous_select = self._title_widgets[index]
-
- error_item = self._error_info[index]
-
- self._actions_widget.set_error_item(error_item)
-
- self._update_description()
-
- def _on_instance_change(self, index):
- if self._previous_select and self._previous_select.index != index:
- self._title_widgets[index].set_selected(True)
- else:
- self._update_description()
-
- def _update_description(self):
- description = self._previous_select.current_description_text()
- if commonmark:
- html = commonmark.commonmark(description)
- self._error_details_input.setHtml(html)
- elif hasattr(self._error_details_input, "setMarkdown"):
- self._error_details_input.setMarkdown(description)
- else:
- self._error_details_input.setText(description)
diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py
index cd1f1f5a96..0b13f26d57 100644
--- a/openpype/tools/publisher/widgets/widgets.py
+++ b/openpype/tools/publisher/widgets/widgets.py
@@ -40,6 +40,41 @@ from ..constants import (
INPUTS_LAYOUT_VSPACING,
)
+FA_PREFIXES = ["", "fa.", "fa5.", "fa5b.", "fa5s.", "ei.", "mdi."]
+
+
+def parse_icon_def(
+ icon_def, default_width=None, default_height=None, color=None
+):
+ if not icon_def:
+ return None
+
+ if isinstance(icon_def, QtGui.QPixmap):
+ return icon_def
+
+ color = color or "white"
+ default_width = default_width or 512
+ default_height = default_height or 512
+
+ if isinstance(icon_def, QtGui.QIcon):
+ return icon_def.pixmap(default_width, default_height)
+
+ try:
+ if os.path.exists(icon_def):
+ return QtGui.QPixmap(icon_def)
+ except Exception:
+ # TODO logging
+ pass
+
+ for prefix in FA_PREFIXES:
+ try:
+ icon_name = "{}{}".format(prefix, icon_def)
+ icon = qtawesome.icon(icon_name, color=color)
+ return icon.pixmap(default_width, default_height)
+ except Exception:
+ # TODO logging
+ continue
+
class PublishPixmapLabel(PixmapLabel):
def _get_pix_size(self):
@@ -54,7 +89,6 @@ class IconValuePixmapLabel(PublishPixmapLabel):
Handle icon parsing from creators/instances. Using of QAwesome module
of path to images.
"""
- fa_prefixes = ["", "fa."]
default_size = 200
def __init__(self, icon_def, parent):
@@ -77,31 +111,9 @@ class IconValuePixmapLabel(PublishPixmapLabel):
return pix
def _parse_icon_def(self, icon_def):
- if not icon_def:
- return self._default_pixmap()
-
- if isinstance(icon_def, QtGui.QPixmap):
- return icon_def
-
- if isinstance(icon_def, QtGui.QIcon):
- return icon_def.pixmap(self.default_size, self.default_size)
-
- try:
- if os.path.exists(icon_def):
- return QtGui.QPixmap(icon_def)
- except Exception:
- # TODO logging
- pass
-
- for prefix in self.fa_prefixes:
- try:
- icon_name = "{}{}".format(prefix, icon_def)
- icon = qtawesome.icon(icon_name, color="white")
- return icon.pixmap(self.default_size, self.default_size)
- except Exception:
- # TODO logging
- continue
-
+ icon = parse_icon_def(icon_def, self.default_size, self.default_size)
+ if icon:
+ return icon
return self._default_pixmap()
@@ -692,6 +704,7 @@ class TasksCombobox(QtWidgets.QComboBox):
style.drawControl(
QtWidgets.QStyle.CE_ComboBoxLabel, opt, painter, self
)
+ painter.end()
def is_valid(self):
"""Are all selected items valid."""
diff --git a/openpype/tools/publisher/window.py b/openpype/tools/publisher/window.py
index b3471163ae..006098cb37 100644
--- a/openpype/tools/publisher/window.py
+++ b/openpype/tools/publisher/window.py
@@ -1,3 +1,6 @@
+import os
+import json
+import time
import collections
import copy
from qtpy import QtWidgets, QtCore, QtGui
@@ -15,10 +18,11 @@ from openpype.tools.utils import (
from .constants import ResetKeySequence
from .publish_report_viewer import PublishReportViewerWidget
+from .control import CardMessageTypes
from .control_qt import QtPublisherController
from .widgets import (
OverviewWidget,
- ValidationsWidget,
+ ReportPageWidget,
PublishFrame,
PublisherTabsWidget,
@@ -62,8 +66,7 @@ class PublisherWindow(QtWidgets.QDialog):
on_top_flag = QtCore.Qt.Dialog
self.setWindowFlags(
- self.windowFlags()
- | QtCore.Qt.WindowTitleHint
+ QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowMaximizeButtonHint
| QtCore.Qt.WindowMinimizeButtonHint
| QtCore.Qt.WindowCloseButtonHint
@@ -182,7 +185,7 @@ class PublisherWindow(QtWidgets.QDialog):
controller, content_stacked_widget
)
- report_widget = ValidationsWidget(controller, parent)
+ report_widget = ReportPageWidget(controller, parent)
# Details - Publish details
publish_details_widget = PublishReportViewerWidget(
@@ -313,6 +316,13 @@ class PublisherWindow(QtWidgets.QDialog):
controller.event_system.add_callback(
"convertors.find.failed", self._on_convertor_error
)
+ controller.event_system.add_callback(
+ "export_report.request", self._export_report
+ )
+ controller.event_system.add_callback(
+ "copy_report.request", self._copy_report
+ )
+
# Store extra header widget for TrayPublisher
# - can be used to add additional widgets to header between context
@@ -665,7 +675,15 @@ class PublisherWindow(QtWidgets.QDialog):
self._tabs_widget.set_current_tab(identifier)
def set_current_tab(self, tab):
- self._set_current_tab(tab)
+ if tab == "create":
+ self._go_to_create_tab()
+ elif tab == "publish":
+ self._go_to_publish_tab()
+ elif tab == "report":
+ self._go_to_report_tab()
+ elif tab == "details":
+ self._go_to_details_tab()
+
if not self._window_is_visible:
self.set_tab_on_reset(tab)
@@ -675,6 +693,12 @@ class PublisherWindow(QtWidgets.QDialog):
def _go_to_create_tab(self):
if self._create_tab.isEnabled():
self._set_current_tab("create")
+ return
+
+ self._overlay_object.add_message(
+ "Can't switch to Create tab because publishing is paused.",
+ message_type="info"
+ )
def _go_to_publish_tab(self):
self._set_current_tab("publish")
@@ -825,6 +849,9 @@ class PublisherWindow(QtWidgets.QDialog):
self._validate_btn.setEnabled(validate_enabled)
self._publish_btn.setEnabled(publish_enabled)
+ if not publish_enabled:
+ self._publish_frame.set_shrunk_state(True)
+
self._update_publish_details_widget()
def _validate_create_instances(self):
@@ -941,6 +968,46 @@ class PublisherWindow(QtWidgets.QDialog):
under_mouse = widget_x < global_pos.x()
self._create_overlay_button.set_under_mouse(under_mouse)
+ def _copy_report(self):
+ logs = self._controller.get_publish_report()
+ logs_string = json.dumps(logs, indent=4)
+
+ mime_data = QtCore.QMimeData()
+ mime_data.setText(logs_string)
+ QtWidgets.QApplication.instance().clipboard().setMimeData(
+ mime_data
+ )
+ self._controller.emit_card_message(
+ "Report added to clipboard",
+ CardMessageTypes.info)
+
+ def _export_report(self):
+ default_filename = "publish-report-{}".format(
+ time.strftime("%y%m%d-%H-%M")
+ )
+ default_filepath = os.path.join(
+ os.path.expanduser("~"),
+ default_filename
+ )
+ new_filepath, ext = QtWidgets.QFileDialog.getSaveFileName(
+ self, "Save report", default_filepath, ".json"
+ )
+ if not ext or not new_filepath:
+ return
+
+ logs = self._controller.get_publish_report()
+ full_path = new_filepath + ext
+ dir_path = os.path.dirname(full_path)
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+
+ with open(full_path, "w") as file_stream:
+ json.dump(logs, file_stream)
+
+ self._controller.emit_card_message(
+ "Report saved",
+ CardMessageTypes.info)
+
class ErrorsMessageBox(ErrorMessageBox):
def __init__(self, error_title, failed_info, message_start, parent):
diff --git a/openpype/tools/tray/pype_tray.py b/openpype/tools/tray/pype_tray.py
index 2f3b5251f9..fdc0a8094d 100644
--- a/openpype/tools/tray/pype_tray.py
+++ b/openpype/tools/tray/pype_tray.py
@@ -633,10 +633,10 @@ class TrayManager:
# Create a copy of sys.argv
additional_args = list(sys.argv)
- # Check last argument from `get_openpype_execute_args`
- # - when running from code it is the same as first from sys.argv
- if args[-1] == additional_args[0]:
- additional_args.pop(0)
+ # Remove first argument from 'sys.argv'
+ # - when running from code the first argument is 'start.py'
+ # - when running from build the first argument is executable
+ additional_args.pop(0)
cleanup_additional_args = False
if use_expected_version:
@@ -663,7 +663,6 @@ class TrayManager:
additional_args = _additional_args
args.extend(additional_args)
-
run_detached_process(args, env=envs)
self.exit()
diff --git a/openpype/tools/utils/__init__.py b/openpype/tools/utils/__init__.py
index 4149763f80..10bd527692 100644
--- a/openpype/tools/utils/__init__.py
+++ b/openpype/tools/utils/__init__.py
@@ -1,13 +1,16 @@
+from .layouts import FlowLayout
from .widgets import (
FocusSpinBox,
FocusDoubleSpinBox,
ComboBox,
CustomTextComboBox,
PlaceholderLineEdit,
+ ExpandingTextEdit,
BaseClickableFrame,
ClickableFrame,
ClickableLabel,
ExpandBtn,
+ ClassicExpandBtn,
PixmapLabel,
IconButton,
PixmapButton,
@@ -37,15 +40,19 @@ from .overlay_messages import (
__all__ = (
+ "FlowLayout",
+
"FocusSpinBox",
"FocusDoubleSpinBox",
"ComboBox",
"CustomTextComboBox",
"PlaceholderLineEdit",
+ "ExpandingTextEdit",
"BaseClickableFrame",
"ClickableFrame",
"ClickableLabel",
"ExpandBtn",
+ "ClassicExpandBtn",
"PixmapLabel",
"IconButton",
"PixmapButton",
diff --git a/openpype/tools/utils/layouts.py b/openpype/tools/utils/layouts.py
new file mode 100644
index 0000000000..65ea087c27
--- /dev/null
+++ b/openpype/tools/utils/layouts.py
@@ -0,0 +1,150 @@
+from qtpy import QtWidgets, QtCore
+
+
+class FlowLayout(QtWidgets.QLayout):
+ """Layout that organize widgets by minimum size into a flow layout.
+
+ Layout is putting widget from left to right and top to bottom. When widget
+ can't fit a row it is added to next line. Minimum size matches widget with
+ biggest 'sizeHint' width and height using calculated geometry.
+
+ Content margins are part of calculations. It is possible to define
+ horizontal and vertical spacing.
+
+ Layout does not support stretch and spacing items.
+
+ Todos:
+ Unified width concept -> use width of largest item so all of them are
+ same. This could allow to have minimum columns option too.
+ """
+
+ def __init__(self, parent=None):
+ super(FlowLayout, self).__init__(parent)
+
+ # spaces between each item
+ self._horizontal_spacing = 5
+ self._vertical_spacing = 5
+
+ self._items = []
+
+ def __del__(self):
+ while self.count():
+ self.takeAt(0, False)
+
+ def isEmpty(self):
+ for item in self._items:
+ if not item.isEmpty():
+ return False
+ return True
+
+ def setSpacing(self, spacing):
+ self._horizontal_spacing = spacing
+ self._vertical_spacing = spacing
+ self.invalidate()
+
+ def setHorizontalSpacing(self, spacing):
+ self._horizontal_spacing = spacing
+ self.invalidate()
+
+ def setVerticalSpacing(self, spacing):
+ self._vertical_spacing = spacing
+ self.invalidate()
+
+ def addItem(self, item):
+ self._items.append(item)
+ self.invalidate()
+
+ def count(self):
+ return len(self._items)
+
+ def itemAt(self, index):
+ if 0 <= index < len(self._items):
+ return self._items[index]
+ return None
+
+ def takeAt(self, index, invalidate=True):
+ if 0 <= index < len(self._items):
+ item = self._items.pop(index)
+ if invalidate:
+ self.invalidate()
+ return item
+ return None
+
+ def expandingDirections(self):
+ return QtCore.Qt.Orientations(QtCore.Qt.Vertical)
+
+ def hasHeightForWidth(self):
+ return True
+
+ def heightForWidth(self, width):
+ return self._setup_geometry(QtCore.QRect(0, 0, width, 0), True)
+
+ def setGeometry(self, rect):
+ super(FlowLayout, self).setGeometry(rect)
+ self._setup_geometry(rect)
+
+ def sizeHint(self):
+ return self.minimumSize()
+
+ def minimumSize(self):
+ size = QtCore.QSize(0, 0)
+ for item in self._items:
+ widget = item.widget()
+ if widget is not None:
+ parent = widget.parent()
+ if not widget.isVisibleTo(parent):
+ continue
+ size = size.expandedTo(item.minimumSize())
+
+ if size.width() < 1 or size.height() < 1:
+ return size
+ l_margin, t_margin, r_margin, b_margin = self.getContentsMargins()
+ size += QtCore.QSize(l_margin + r_margin, t_margin + b_margin)
+ return size
+
+ def _setup_geometry(self, rect, only_calculate=False):
+ h_spacing = self._horizontal_spacing
+ v_spacing = self._vertical_spacing
+ l_margin, t_margin, r_margin, b_margin = self.getContentsMargins()
+
+ left_x = rect.x() + l_margin
+ top_y = rect.y() + t_margin
+ pos_x = left_x
+ pos_y = top_y
+ row_height = 0
+ for item in self._items:
+ item_hint = item.sizeHint()
+ item_width = item_hint.width()
+ item_height = item_hint.height()
+ if item_width < 1 or item_height < 1:
+ continue
+
+ end_x = pos_x + item_width
+
+ wrap = (
+ row_height > 0
+ and (
+ end_x > rect.right()
+ or (end_x + r_margin) > rect.right()
+ )
+ )
+ if not wrap:
+ next_pos_x = end_x + h_spacing
+ else:
+ pos_x = left_x
+ pos_y += row_height + v_spacing
+ next_pos_x = pos_x + item_width + h_spacing
+ row_height = 0
+
+ if not only_calculate:
+ item.setGeometry(
+ QtCore.QRect(pos_x, pos_y, item_width, item_height)
+ )
+
+ pos_x = next_pos_x
+ row_height = max(row_height, item_height)
+
+ height = (pos_y - top_y) + row_height
+ if height > 0:
+ height += b_margin
+ return height
diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py
index 950c782727..58ece7c68f 100644
--- a/openpype/tools/utils/lib.py
+++ b/openpype/tools/utils/lib.py
@@ -872,7 +872,6 @@ class WrappedCallbackItem:
self.log.warning("- item is already processed")
return
- self.log.debug("Running callback: {}".format(str(self._callback)))
try:
result = self._callback(*self._args, **self._kwargs)
self._result = result
diff --git a/openpype/tools/utils/overlay_messages.py b/openpype/tools/utils/overlay_messages.py
index 180d7eae97..4da266bcf7 100644
--- a/openpype/tools/utils/overlay_messages.py
+++ b/openpype/tools/utils/overlay_messages.py
@@ -127,8 +127,7 @@ class OverlayMessageWidget(QtWidgets.QFrame):
if timeout:
self._timeout_timer.setInterval(timeout)
- if message_type:
- set_style_property(self, "type", message_type)
+ set_style_property(self, "type", message_type)
self._timeout_timer.start()
diff --git a/openpype/tools/utils/widgets.py b/openpype/tools/utils/widgets.py
index bae89aeb09..5a8104611b 100644
--- a/openpype/tools/utils/widgets.py
+++ b/openpype/tools/utils/widgets.py
@@ -101,6 +101,46 @@ class PlaceholderLineEdit(QtWidgets.QLineEdit):
self.setPalette(filter_palette)
+class ExpandingTextEdit(QtWidgets.QTextEdit):
+ """QTextEdit which does not have sroll area but expands height."""
+
+ def __init__(self, parent=None):
+ super(ExpandingTextEdit, self).__init__(parent)
+
+ size_policy = self.sizePolicy()
+ size_policy.setHeightForWidth(True)
+ size_policy.setVerticalPolicy(QtWidgets.QSizePolicy.Preferred)
+ self.setSizePolicy(size_policy)
+
+ self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+ self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
+
+ doc = self.document()
+ doc.contentsChanged.connect(self._on_doc_change)
+
+ def _on_doc_change(self):
+ self.updateGeometry()
+
+ def hasHeightForWidth(self):
+ return True
+
+ def heightForWidth(self, width):
+ margins = self.contentsMargins()
+
+ document_width = 0
+ if width >= margins.left() + margins.right():
+ document_width = width - margins.left() - margins.right()
+
+ document = self.document().clone()
+ document.setTextWidth(document_width)
+
+ return margins.top() + document.size().height() + margins.bottom()
+
+ def sizeHint(self):
+ width = super(ExpandingTextEdit, self).sizeHint().width()
+ return QtCore.QSize(width, self.heightForWidth(width))
+
+
class BaseClickableFrame(QtWidgets.QFrame):
"""Widget that catch left mouse click and can trigger a callback.
@@ -161,19 +201,34 @@ class ClickableLabel(QtWidgets.QLabel):
class ExpandBtnLabel(QtWidgets.QLabel):
"""Label showing expand icon meant for ExpandBtn."""
+ state_changed = QtCore.Signal()
+
+
def __init__(self, parent):
super(ExpandBtnLabel, self).__init__(parent)
- self._source_collapsed_pix = QtGui.QPixmap(
- get_style_image_path("branch_closed")
- )
- self._source_expanded_pix = QtGui.QPixmap(
- get_style_image_path("branch_open")
- )
+ self._source_collapsed_pix = self._create_collapsed_pixmap()
+ self._source_expanded_pix = self._create_expanded_pixmap()
self._current_image = self._source_collapsed_pix
self._collapsed = True
- def set_collapsed(self, collapsed):
+ def _create_collapsed_pixmap(self):
+ return QtGui.QPixmap(
+ get_style_image_path("branch_closed")
+ )
+
+ def _create_expanded_pixmap(self):
+ return QtGui.QPixmap(
+ get_style_image_path("branch_open")
+ )
+
+ @property
+ def collapsed(self):
+ return self._collapsed
+
+ def set_collapsed(self, collapsed=None):
+ if collapsed is None:
+ collapsed = not self._collapsed
if self._collapsed == collapsed:
return
self._collapsed = collapsed
@@ -182,6 +237,7 @@ class ExpandBtnLabel(QtWidgets.QLabel):
else:
self._current_image = self._source_expanded_pix
self._set_resized_pix()
+ self.state_changed.emit()
def resizeEvent(self, event):
self._set_resized_pix()
@@ -203,21 +259,55 @@ class ExpandBtnLabel(QtWidgets.QLabel):
class ExpandBtn(ClickableFrame):
+ state_changed = QtCore.Signal()
+
def __init__(self, parent=None):
super(ExpandBtn, self).__init__(parent)
- pixmap_label = ExpandBtnLabel(self)
+ pixmap_label = self._create_pix_widget(self)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(pixmap_label)
+ pixmap_label.state_changed.connect(self.state_changed)
+
self._pixmap_label = pixmap_label
- def set_collapsed(self, collapsed):
+ def _create_pix_widget(self, parent=None):
+ if parent is None:
+ parent = self
+ return ExpandBtnLabel(parent)
+
+ @property
+ def collapsed(self):
+ return self._pixmap_label.collapsed
+
+ def set_collapsed(self, collapsed=None):
self._pixmap_label.set_collapsed(collapsed)
+class ClassicExpandBtnLabel(ExpandBtnLabel):
+ def _create_collapsed_pixmap(self):
+ return QtGui.QPixmap(
+ get_style_image_path("right_arrow")
+ )
+
+ def _create_expanded_pixmap(self):
+ return QtGui.QPixmap(
+ get_style_image_path("down_arrow")
+ )
+
+
+class ClassicExpandBtn(ExpandBtn):
+ """Same as 'ExpandBtn' but with arrow images."""
+
+ def _create_pix_widget(self, parent=None):
+ if parent is None:
+ parent = self
+ return ClassicExpandBtnLabel(parent)
+
+
class ImageButton(QtWidgets.QPushButton):
"""PushButton with icon and size of font.
diff --git a/openpype/tools/workfiles/window.py b/openpype/tools/workfiles/window.py
index 31ecf50d3b..53f8894665 100644
--- a/openpype/tools/workfiles/window.py
+++ b/openpype/tools/workfiles/window.py
@@ -1,6 +1,7 @@
import os
import datetime
import copy
+import platform
from qtpy import QtCore, QtWidgets, QtGui
from openpype.client import (
@@ -94,6 +95,19 @@ class SidePanelWidget(QtWidgets.QWidget):
self._on_note_change()
self.save_clicked.emit()
+ def get_user_name(self, file):
+ """Get user name from file path"""
+ # Only run on Unix because pwd module is not available on Windows.
+ # NOTE: we tried adding "win32security" module but it was not working
+ # on all hosts so we decided to just support Linux until migration
+ # to Ayon
+ if platform.system().lower() == "windows":
+ return None
+ import pwd
+
+ filestat = os.stat(file)
+ return pwd.getpwuid(filestat.st_uid).pw_name
+
def set_context(self, asset_id, task_name, filepath, workfile_doc):
# Check if asset, task and file are selected
# NOTE workfile document is not requirement
@@ -134,8 +148,14 @@ class SidePanelWidget(QtWidgets.QWidget):
"Created:",
creation_time.strftime(datetime_format),
"Modified:",
- modification_time.strftime(datetime_format)
+ modification_time.strftime(datetime_format),
)
+ username = self.get_user_name(filepath)
+ if username:
+ lines += (
+ "User:",
+ username,
+ )
self._details_input.appendHtml("
".join(lines))
def get_workfile_data(self):
diff --git a/openpype/version.py b/openpype/version.py
index 8874eb510d..b55ca42244 100644
--- a/openpype/version.py
+++ b/openpype/version.py
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
-__version__ = "3.15.8-nightly.2"
+__version__ = "3.15.10-nightly.1"
diff --git a/pyproject.toml b/pyproject.toml
index 190ecb9329..633899d3a0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "OpenPype"
-version = "3.15.7" # OpenPype
+version = "3.15.9" # OpenPype
description = "Open VFX and Animation pipeline with support."
authors = ["OpenPype Team "]
license = "MIT License"
diff --git a/tests/README.md b/tests/README.md
index d36b6534f8..20847b2449 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -15,16 +15,16 @@ Structure:
- openpype/modules/MODULE_NAME - structure follow directory structure in code base
- fixture - sample data `(MongoDB dumps, test files etc.)`
- `tests.py` - single or more pytest files for MODULE_NAME
-- unit - quick unit test
- - MODULE_NAME
+- unit - quick unit test
+ - MODULE_NAME
- fixture
- `tests.py`
-
+
How to run:
----------
- use Openpype command 'runtests' from command line (`.venv` in ${OPENPYPE_ROOT} must be activated to use configured Python!)
-- `python ${OPENPYPE_ROOT}/start.py runtests`
-
+
By default, this command will run all tests in ${OPENPYPE_ROOT}/tests.
Specific location could be provided to this command as an argument, either as absolute path, or relative path to ${OPENPYPE_ROOT}.
@@ -41,17 +41,15 @@ In some cases your tests might be so localized, that you don't care about all en
In that case you might add this dummy configuration BEFORE any imports in your test file
```
import os
-os.environ["AVALON_MONGO"] = "mongodb://localhost:27017"
+os.environ["OPENPYPE_DEBUG"] = "1"
os.environ["OPENPYPE_MONGO"] = "mongodb://localhost:27017"
-os.environ["AVALON_DB"] = "avalon"
os.environ["OPENPYPE_DATABASE_NAME"] = "openpype"
-os.environ["AVALON_TIMEOUT"] = '3000'
-os.environ["OPENPYPE_DEBUG"] = "3"
-os.environ["AVALON_CONFIG"] = "pype"
+os.environ["AVALON_DB"] = "avalon"
+os.environ["AVALON_TIMEOUT"] = "3000"
os.environ["AVALON_ASSET"] = "Asset"
os.environ["AVALON_PROJECT"] = "test_project"
```
(AVALON_ASSET and AVALON_PROJECT values should exist in your environment)
This might be enough to run your test file separately. Do not commit this skeleton though.
-Use only when you know what you are doing!
\ No newline at end of file
+Use only when you know what you are doing!
diff --git a/website/docs/artist_hosts_3dsmax.md b/website/docs/artist_hosts_3dsmax.md
index 12c1f40181..fffab8ca5d 100644
--- a/website/docs/artist_hosts_3dsmax.md
+++ b/website/docs/artist_hosts_3dsmax.md
@@ -30,7 +30,7 @@ By clicking the icon ```OpenPype Menu``` rolls out.
Choose ```OpenPype Menu > Launcher``` to open the ```Launcher``` window.
-When opened you can **choose** the **project** to work in from the list. Then choose the particular **asset** you want to work on then choose **task**
+When opened you can **choose** the **project** to work in from the list. Then choose the particular **asset** you want to work on then choose **task**
and finally **run 3dsmax by its icon** in the tools.

@@ -65,13 +65,13 @@ If not any workfile present simply hit ```Save As``` and keep ```Subversion``` e

-OpenPype correctly names it and add version to the workfile. This basically happens whenever user trigger ```Save As``` action. Resulting into incremental version numbers like
+OpenPype correctly names it and add version to the workfile. This basically happens whenever user trigger ```Save As``` action. Resulting into incremental version numbers like
```workfileName_v001```
```workfileName_v002```
- etc.
+ etc.
Basically meaning user is free of guessing what is the correct naming and other necessities to keep everything in order and managed.
@@ -105,13 +105,13 @@ Before proceeding further please check [Glossary](artist_concepts.md) and [What
### Intro
-Current OpenPype integration (ver 3.15.0) supports only ```PointCache``` and ```Camera``` families now.
+Current OpenPype integration (ver 3.15.0) supports only ```PointCache```, ```Camera```, ```Geometry``` and ```Redshift Proxy``` families now.
**Pointcache** family being basically any geometry outputted as Alembic cache (.abc) format
**Camera** family being 3dsmax Camera object with/without animation outputted as native .max, FBX, Alembic format
-
+**Redshift Proxy** family being Redshift Proxy object with/without animation outputted as rs format(Redshift Proxy's very own format)
---
:::note Work in progress
@@ -119,7 +119,3 @@ This part of documentation is still work in progress.
:::
## ...to be added
-
-
-
-
diff --git a/website/docs/artist_hosts_houdini.md b/website/docs/artist_hosts_houdini.md
index 8874a0b5cf..0471765365 100644
--- a/website/docs/artist_hosts_houdini.md
+++ b/website/docs/artist_hosts_houdini.md
@@ -14,7 +14,7 @@ sidebar_label: Houdini
- [Library Loader](artist_tools_library-loader)
## Publishing Alembic Cameras
-You can publish baked camera in Alembic format.
+You can publish baked camera in Alembic format.
Select your camera and go **OpenPype -> Create** and select **Camera (abc)**.
This will create Alembic ROP in **out** with path and frame range already set. This node will have a name you've
@@ -30,7 +30,7 @@ You can use any COP node and publish the image sequence generated from it. For e

To publish the output of the `radialblur1` go to **OpenPype -> Create** and
-select **Composite (Image Sequence)**. If you name the variant *Noise* this will create the `/out/imagesequenceNoise` Composite ROP with the frame range set.
+select **Composite (Image Sequence)**. If you name the variant *Noise* this will create the `/out/imagesequenceNoise` Composite ROP with the frame range set.
When you hit **Publish** it will render image sequence from selected node.
@@ -56,14 +56,14 @@ Now select the `output0` node and go **OpenPype -> Create** and select **Point C
Alembic ROP `/out/pointcacheStrange`
## Publishing Reviews (OpenGL)
-To generate a review output from Houdini you need to create a **review** instance.
+To generate a review output from Houdini you need to create a **review** instance.
Go to **OpenPype -> Create** and select **Review**.

-On create, with the **Use Selection** checkbox enabled it will set up the first
-camera found in your selection as the camera for the OpenGL ROP node and other
-non-cameras are set in **Force Objects**. It will then render those even if
+On create, with the **Use Selection** checkbox enabled it will set up the first
+camera found in your selection as the camera for the OpenGL ROP node and other
+non-cameras are set in **Force Objects**. It will then render those even if
their display flag is disabled in your scene.
## Redshift
@@ -71,6 +71,18 @@ their display flag is disabled in your scene.
This part of documentation is still work in progress.
:::
+## Publishing Render to Deadline
+Five Renderers(Arnold, Redshift, Mantra, Karma, VRay) are supported for Render Publishing.
+They are named with the suffix("_ROP")
+To submit render to deadline, you need to create a **Render** instance.
+Go to **Openpype -> Create** and select **Publish**. Before clicking **Create** button,
+you need select your preferred image rendering format. You can also enable the **Use selection** to
+select your render camera.
+
+
+All the render outputs are stored in the pyblish/render directory within your project path.\
+For Karma-specific render, it also outputs the USD render as default.
+
## USD (experimental support)
### Publishing USD
You can publish your Solaris Stage as USD file.
diff --git a/website/docs/assets/houdini_render_publish_creator.png b/website/docs/assets/houdini_render_publish_creator.png
new file mode 100644
index 0000000000..5dd73d296a
Binary files /dev/null and b/website/docs/assets/houdini_render_publish_creator.png differ
diff --git a/website/docs/dev_blender.md b/website/docs/dev_blender.md
new file mode 100644
index 0000000000..bed0e4a09d
--- /dev/null
+++ b/website/docs/dev_blender.md
@@ -0,0 +1,61 @@
+---
+id: dev_blender
+title: Blender integration
+sidebar_label: Blender integration
+toc_max_heading_level: 4
+---
+
+## Run python script at launch
+In case you need to execute a python script when Blender is started (aka [`-P`](https://docs.blender.org/manual/en/latest/advanced/command_line/arguments.html#python-options)), for example to programmatically modify a blender file for conformation, you can create an OpenPype hook as follows:
+
+```python
+from openpype.hosts.blender.hooks import pre_add_run_python_script_arg
+from openpype.lib import PreLaunchHook
+
+
+class MyHook(PreLaunchHook):
+ """Add python script to be executed before Blender launch."""
+
+ order = pre_add_run_python_script_arg.AddPythonScriptToLaunchArgs.order - 1
+ app_groups = [
+ "blender",
+ ]
+
+ def execute(self):
+ self.launch_context.data.setdefault("python_scripts", []).append(
+ "/path/to/my_script.py"
+ )
+```
+
+You can write a bare python script, as you could run into the [Text Editor](https://docs.blender.org/manual/en/latest/editors/text_editor.html).
+
+### Python script with arguments
+#### Adding arguments
+In case you need to pass arguments to your script, you can append them to `self.launch_context.data["script_args"]`:
+
+```python
+self.launch_context.data.setdefault("script_args", []).append(
+ "--my-arg",
+ "value",
+ )
+```
+
+#### Parsing arguments
+You can parse arguments in your script using [argparse](https://docs.python.org/3/library/argparse.html) as follows:
+
+```python
+import argparse
+
+parser = argparse.ArgumentParser(
+ description="Parsing arguments for my_script.py"
+)
+parser.add_argument(
+ "--my-arg",
+ nargs="?",
+ help="My argument",
+)
+args, unknown = arg_parser.parse_known_args(
+ sys.argv[sys.argv.index("--") + 1 :]
+)
+print(args.my_arg)
+```
diff --git a/website/docs/module_deadline.md b/website/docs/module_deadline.md
index 94b6a381c2..bca2a83936 100644
--- a/website/docs/module_deadline.md
+++ b/website/docs/module_deadline.md
@@ -22,6 +22,9 @@ For [AWS Thinkbox Deadline](https://www.awsthinkbox.com/deadline) support you ne
5. Install our custom plugin and scripts to your deadline repository. It should be as simple as copying content of `openpype/modules/deadline/repository/custom` to `path/to/your/deadline/repository/custom`.
+Multiple different DL webservice could be configured. First set them in point 4., then they could be configured per project in `project_settings/deadline/deadline_servers`.
+Only single webservice could be a target of publish though.
+
## Configuration
diff --git a/website/docs/module_kitsu.md b/website/docs/module_kitsu.md
index d79c78fecf..9695542723 100644
--- a/website/docs/module_kitsu.md
+++ b/website/docs/module_kitsu.md
@@ -18,9 +18,20 @@ This setting is available for all the users of the OpenPype instance.
## Synchronize
Updating OP with Kitsu data is executed running the `sync-service`, which requires to provide your Kitsu credentials with `-l, --login` and `-p, --password` or by setting the environment variables `KITSU_LOGIN` and `KITSU_PWD`. This process will request data from Kitsu and create/delete/update OP assets.
Once this sync is done, the thread will automatically start a loop to listen to Kitsu events.
+- `-prj, --project` This flag accepts multiple project name to sync specific projects, and the default to sync all projects.
+- `-lo, --listen-only` This flag to run listen to Kitsu events only without any sync.
+
+Note: You must use one argument of `-pro` or `-lo`, because the listen only flag override syncing flag.
```bash
+// sync all projects then run listen
openpype_console module kitsu sync-service -l me@domain.ext -p my_password
+
+// sync specific projects then run listen
+openpype_console module kitsu sync-service -l me@domain.ext -p my_password -prj project_name01 -prj project_name02
+
+// start listen only for all projects
+openpype_console module kitsu sync-service -l me@domain.ext -p my_password -lo
```
### Events listening
diff --git a/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png b/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png
index 80e00702e6..76dd9b372a 100644
Binary files a/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png and b/website/docs/project_settings/assets/global_extract_review_letter_box_settings.png differ
diff --git a/website/docs/project_settings/settings_project_global.md b/website/docs/project_settings/settings_project_global.md
index c17f707830..5ddf247d98 100644
--- a/website/docs/project_settings/settings_project_global.md
+++ b/website/docs/project_settings/settings_project_global.md
@@ -63,7 +63,7 @@ Example here describes use case for creation of new color coded review of png im

Another use case is to transcode in Maya only `beauty` render layers and use collected `Display` and `View` colorspaces from DCC.
-n
+
## Profile filters
@@ -170,12 +170,10 @@ A profile may generate multiple outputs from a single input. Each output must de
- **`Letter Box`**
- **Enabled** - Enable letter boxes
- - **Ratio** - Ratio of letter boxes
- - **Type** - **Letterbox** (horizontal bars) or **Pillarbox** (vertical bars)
+ - **Ratio** - Ratio of letter boxes. Ratio type is calculated from output image dimensions. If letterbox ratio > image ratio, _letterbox_ is applied. Otherwise _pillarbox_ will be rendered.
- **Fill color** - Fill color of boxes (RGBA: 0-255)
- **Line Thickness** - Line thickness on the edge of box (set to `0` to turn off)
- - **Fill color** - Line color on the edge of box (RGBA: 0-255)
- - **Example**
+ - **Line color** - Line color on the edge of box (RGBA: 0-255)


diff --git a/website/sidebars.js b/website/sidebars.js
index 4874782197..267cc7f6d7 100644
--- a/website/sidebars.js
+++ b/website/sidebars.js
@@ -180,6 +180,7 @@ module.exports = {
]
},
"dev_deadline",
+ "dev_blender",
"dev_colorspace"
]
};