Merge remote-tracking branch 'origin/develop' into feature/houdini_export_task

This commit is contained in:
Ondřej Samohel 2023-12-06 11:03:06 +01:00
commit 0cb5c3814d
1064 changed files with 82077 additions and 11378 deletions

View file

@ -35,6 +35,51 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.7-nightly.6
- 3.17.7-nightly.5
- 3.17.7-nightly.4
- 3.17.7-nightly.3
- 3.17.7-nightly.2
- 3.17.7-nightly.1
- 3.17.6
- 3.17.6-nightly.3
- 3.17.6-nightly.2
- 3.17.6-nightly.1
- 3.17.5
- 3.17.5-nightly.3
- 3.17.5-nightly.2
- 3.17.5-nightly.1
- 3.17.4
- 3.17.4-nightly.2
- 3.17.4-nightly.1
- 3.17.3
- 3.17.3-nightly.2
- 3.17.3-nightly.1
- 3.17.2
- 3.17.2-nightly.4
- 3.17.2-nightly.3
- 3.17.2-nightly.2
- 3.17.2-nightly.1
- 3.17.1
- 3.17.1-nightly.3
- 3.17.1-nightly.2
- 3.17.1-nightly.1
- 3.17.0
- 3.16.7
- 3.16.7-nightly.2
- 3.16.7-nightly.1
- 3.16.6
- 3.16.6-nightly.1
- 3.16.5
- 3.16.5-nightly.5
- 3.16.5-nightly.4
- 3.16.5-nightly.3
- 3.16.5-nightly.2
- 3.16.5-nightly.1
- 3.16.4
- 3.16.4-nightly.3
- 3.16.4-nightly.2
- 3.16.4-nightly.1
- 3.16.3
- 3.16.3-nightly.5
- 3.16.3-nightly.4
@ -90,51 +135,6 @@ body:
- 3.15.3-nightly.2
- 3.15.3-nightly.1
- 3.15.2
- 3.15.2-nightly.6
- 3.15.2-nightly.5
- 3.15.2-nightly.4
- 3.15.2-nightly.3
- 3.15.2-nightly.2
- 3.15.2-nightly.1
- 3.15.1
- 3.15.1-nightly.6
- 3.15.1-nightly.5
- 3.15.1-nightly.4
- 3.15.1-nightly.3
- 3.15.1-nightly.2
- 3.15.1-nightly.1
- 3.15.0
- 3.15.0-nightly.1
- 3.14.11-nightly.4
- 3.14.11-nightly.3
- 3.14.11-nightly.2
- 3.14.11-nightly.1
- 3.14.10
- 3.14.10-nightly.9
- 3.14.10-nightly.8
- 3.14.10-nightly.7
- 3.14.10-nightly.6
- 3.14.10-nightly.5
- 3.14.10-nightly.4
- 3.14.10-nightly.3
- 3.14.10-nightly.2
- 3.14.10-nightly.1
- 3.14.9
- 3.14.9-nightly.5
- 3.14.9-nightly.4
- 3.14.9-nightly.3
- 3.14.9-nightly.2
- 3.14.9-nightly.1
- 3.14.8
- 3.14.8-nightly.4
- 3.14.8-nightly.3
- 3.14.8-nightly.2
- 3.14.8-nightly.1
- 3.14.7
- 3.14.7-nightly.8
- 3.14.7-nightly.7
- 3.14.7-nightly.6
- 3.14.7-nightly.5
validations:
required: true
- type: dropdown

File diff suppressed because it is too large Load diff

View file

@ -109,6 +109,8 @@ RUN source $HOME/.bashrc \
RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.9/lib \
&& cp /usr/lib64/openssl11/libssl* ./build/exe.linux-x86_64-3.9/lib \
&& cp /usr/lib64/openssl11/libcrypto* ./build/exe.linux-x86_64-3.9/lib \
&& ln -sr ./build/exe.linux-x86_64-3.9/lib/libssl.so ./build/exe.linux-x86_64-3.9/lib/libssl.1.1.so \
&& ln -sr ./build/exe.linux-x86_64-3.9/lib/libcrypto.so ./build/exe.linux-x86_64-3.9/lib/libcrypto.1.1.so \
&& cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.9/lib \
&& cp /usr/lib64/libxcb* ./build/exe.linux-x86_64-3.9/vendor/python/PySide2/Qt/lib

View file

@ -62,7 +62,7 @@ development tools like [CMake](https://cmake.org/) and [Visual Studio](https://v
#### Clone repository:
```sh
git clone --recurse-submodules git@github.com:Pypeclub/OpenPype.git
git clone --recurse-submodules git@github.com:ynput/OpenPype.git
```
#### To build OpenPype:
@ -144,6 +144,10 @@ sudo ./tools/docker_build.sh centos7
If all is successful, you'll find built OpenPype in `./build/` folder.
Docker build can be also started from Windows machine, just use `./tools/docker_build.ps1` instead of shell script.
This could be used even for building linux build (with argument `centos7` or `debian`)
#### Manual build
You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled.
@ -275,7 +279,7 @@ arguments and it will create zip file that OpenPype can use.
Building documentation
----------------------
Top build API documentation, run `.\tools\make_docs(.ps1|.sh)`. It will create html documentation
To build API documentation, run `.\tools\make_docs(.ps1|.sh)`. It will create html documentation
from current sources in `.\docs\build`.
**Note that it needs existing virtual environment.**

View file

@ -34,7 +34,11 @@ def _get_qt_app():
if attr is not None:
QtWidgets.QApplication.setAttribute(attr)
if hasattr(QtWidgets.QApplication, "setHighDpiScaleFactorRoundingPolicy"):
policy = os.getenv("QT_SCALE_FACTOR_ROUNDING_POLICY")
if (
hasattr(QtWidgets.QApplication, "setHighDpiScaleFactorRoundingPolicy")
and not policy
):
QtWidgets.QApplication.setHighDpiScaleFactorRoundingPolicy(
QtCore.Qt.HighDpiScaleFactorRoundingPolicy.PassThrough
)

View file

@ -589,7 +589,7 @@ class BootstrapRepos:
self.registry = OpenPypeSettingsRegistry()
self.zip_filter = [".pyc", "__pycache__"]
self.openpype_filter = [
"openpype", "schema", "LICENSE"
"openpype", "LICENSE"
]
# dummy progress reporter

View file

@ -36,7 +36,7 @@ WizardStyle=modern
Name: "english"; MessagesFile: "compiler:Default.isl"
[Tasks]
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"
[InstallDelete]
; clean everything in previous installation folder
@ -53,4 +53,3 @@ Name: "{autodesktop}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe"
[Run]
Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent

View file

@ -282,6 +282,9 @@ def run(script):
"--app_variant",
help="Provide specific app variant for test, empty for latest",
default=None)
@click.option("--app_group",
help="Provide specific app group for test, empty for default",
default=None)
@click.option("-t",
"--timeout",
help="Provide specific timeout value for test case",
@ -290,11 +293,15 @@ def run(script):
"--setup_only",
help="Only create dbs, do not run tests",
default=None)
@click.option("--mongo_url",
help="MongoDB for testing.",
default=None)
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
timeout, setup_only):
timeout, setup_only, mongo_url, app_group):
"""Run all automatic tests after proper initialization via start.py"""
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
persist, app_variant, timeout, setup_only)
persist, app_variant, timeout, setup_only,
mongo_url, app_group)
@main.command(help="DEPRECATED - run sync server")

View file

@ -1,6 +1,7 @@
from .mongo import (
OpenPypeMongoConnection,
)
from .server.utils import get_ayon_server_api_connection
from .entities import (
get_projects,
@ -43,6 +44,8 @@ from .entities import (
get_thumbnail_id_from_source,
get_workfile_info,
get_asset_name_identifier,
)
from .entity_links import (
@ -59,6 +62,8 @@ from .operations import (
__all__ = (
"OpenPypeMongoConnection",
"get_ayon_server_api_connection",
"get_projects",
"get_project",
"get_whole_project",
@ -105,4 +110,6 @@ __all__ = (
"get_linked_representation_id",
"create_project",
"get_asset_name_identifier",
)

View file

@ -4,3 +4,22 @@ if not AYON_SERVER_ENABLED:
from .mongo.entities import *
else:
from .server.entities import *
def get_asset_name_identifier(asset_doc):
"""Get asset name identifier by asset document.
This function is added because of AYON implementation where name
identifier is not just a name but full path.
Asset document must have "name" key, and "data.parents" when in AYON mode.
Args:
asset_doc (dict[str, Any]): Asset document.
"""
if not AYON_SERVER_ENABLED:
return asset_doc["name"]
parents = list(asset_doc["data"]["parents"])
parents.append(asset_doc["name"])
return "/" + "/".join(parents)

View file

@ -138,16 +138,22 @@ def _template_replacements_to_v3(template):
)
def _convert_template_item(template):
# Others won't have 'directory'
if "directory" not in template:
return
folder = _template_replacements_to_v3(template.pop("directory"))
template["folder"] = folder
template["file"] = _template_replacements_to_v3(template["file"])
template["path"] = "/".join(
(folder, template["file"])
)
def _convert_template_item(template_item):
for key, value in tuple(template_item.items()):
template_item[key] = _template_replacements_to_v3(value)
# Change 'directory' to 'folder'
if "directory" in template_item:
template_item["folder"] = template_item.pop("directory")
if (
"path" not in template_item
and "file" in template_item
and "folder" in template_item
):
template_item["path"] = "/".join(
(template_item["folder"], template_item["file"])
)
def _fill_template_category(templates, cat_templates, cat_key):
@ -212,10 +218,27 @@ def convert_v4_project_to_v3(project):
_convert_template_item(template)
new_others_templates[name] = template
staging_templates = templates.pop("staging", None)
# Key 'staging_directories' is legacy key that changed
# to 'staging_dir'
_legacy_staging_templates = templates.pop("staging_directories", None)
if staging_templates is None:
staging_templates = _legacy_staging_templates
if staging_templates is None:
staging_templates = {}
# Prefix all staging template names with 'staging_' prefix
# and add them to 'others'
for name, template in staging_templates.items():
_convert_template_item(template)
new_name = "staging_{}".format(name)
new_others_templates[new_name] = template
for key in (
"work",
"publish",
"hero"
"hero",
):
cat_templates = templates.pop(key)
_fill_template_category(templates, cat_templates, key)
@ -235,6 +258,8 @@ def convert_v4_project_to_v3(project):
new_task_types = {}
for task_type in task_types:
name = task_type.pop("name")
# Change 'shortName' to 'short_name'
task_type["short_name"] = task_type.pop("shortName", None)
new_task_types[name] = task_type
config["tasks"] = new_task_types
@ -663,10 +688,13 @@ def convert_v4_representation_to_v3(representation):
if isinstance(context, six.string_types):
context = json.loads(context)
if "folder" in context:
_c_folder = context.pop("folder")
if "asset" not in context and "folder" in context:
_c_folder = context["folder"]
context["asset"] = _c_folder["name"]
elif "asset" in context and "folder" not in context:
context["folder"] = {"name": context["asset"]}
if "product" in context:
_c_product = context.pop("product")
context["family"] = _c_product["type"]
@ -959,9 +987,11 @@ def convert_create_representation_to_v4(representation, con):
converted_representation["files"] = new_files
context = representation["context"]
context["folder"] = {
"name": context.pop("asset", None)
}
if "folder" not in context:
context["folder"] = {
"name": context.get("asset")
}
context["product"] = {
"type": context.pop("family", None),
"name": context.pop("subset", None),
@ -1074,7 +1104,7 @@ def convert_update_folder_to_v4(project_name, asset_id, update_data, con):
parent_id = None
tasks = None
new_data = {}
attribs = {}
attribs = full_update_data.pop("attrib", {})
if "type" in update_data:
new_update_data["active"] = update_data["type"] == "asset"
@ -1113,6 +1143,9 @@ def convert_update_folder_to_v4(project_name, asset_id, update_data, con):
print("Folder has new data: {}".format(new_data))
new_update_data["data"] = new_data
if attribs:
new_update_data["attrib"] = attribs
if has_task_changes:
raise ValueError("Task changes of folder are not implemented")
@ -1126,7 +1159,7 @@ def convert_update_subset_to_v4(project_name, subset_id, update_data, con):
full_update_data = _from_flat_dict(update_data)
data = full_update_data.get("data")
new_data = {}
attribs = {}
attribs = full_update_data.pop("attrib", {})
if data:
if "family" in data:
family = data.pop("family")
@ -1148,9 +1181,6 @@ def convert_update_subset_to_v4(project_name, subset_id, update_data, con):
elif value is not REMOVED_VALUE:
new_data[key] = value
if attribs:
new_update_data["attribs"] = attribs
if "name" in update_data:
new_update_data["name"] = update_data["name"]
@ -1165,6 +1195,9 @@ def convert_update_subset_to_v4(project_name, subset_id, update_data, con):
new_update_data["folderId"] = update_data["parent"]
flat_data = _to_flat_dict(new_update_data)
if attribs:
flat_data["attrib"] = attribs
if new_data:
print("Subset has new data: {}".format(new_data))
flat_data["data"] = new_data
@ -1179,7 +1212,7 @@ def convert_update_version_to_v4(project_name, version_id, update_data, con):
full_update_data = _from_flat_dict(update_data)
data = full_update_data.get("data")
new_data = {}
attribs = {}
attribs = full_update_data.pop("attrib", {})
if data:
if "author" in data:
new_update_data["author"] = data.pop("author")
@ -1196,9 +1229,6 @@ def convert_update_version_to_v4(project_name, version_id, update_data, con):
elif value is not REMOVED_VALUE:
new_data[key] = value
if attribs:
new_update_data["attribs"] = attribs
if "name" in update_data:
new_update_data["version"] = update_data["name"]
@ -1213,6 +1243,9 @@ def convert_update_version_to_v4(project_name, version_id, update_data, con):
new_update_data["productId"] = update_data["parent"]
flat_data = _to_flat_dict(new_update_data)
if attribs:
flat_data["attrib"] = attribs
if new_data:
print("Version has new data: {}".format(new_data))
flat_data["data"] = new_data
@ -1252,7 +1285,7 @@ def convert_update_representation_to_v4(
data = full_update_data.get("data")
new_data = {}
attribs = {}
attribs = full_update_data.pop("attrib", {})
if data:
for key, value in data.items():
if key in folder_attributes:
@ -1282,7 +1315,7 @@ def convert_update_representation_to_v4(
if "context" in update_data:
context = update_data["context"]
if "asset" in context:
if "folder" not in context and "asset" in context:
context["folder"] = {"name": context.pop("asset")}
if "family" in context or "subset" in context:
@ -1309,6 +1342,9 @@ def convert_update_representation_to_v4(
new_update_data["files"] = new_files
flat_data = _to_flat_dict(new_update_data)
if attribs:
flat_data["attrib"] = attribs
if new_data:
print("Representation has new data: {}".format(new_data))
flat_data["data"] = new_data

View file

@ -1,9 +1,8 @@
import collections
from ayon_api import get_server_api_connection
from openpype.client.mongo.operations import CURRENT_THUMBNAIL_SCHEMA
from .utils import get_ayon_server_api_connection
from .openpype_comp import get_folders_with_tasks
from .conversion_utils import (
project_fields_v3_to_v4,
@ -37,7 +36,7 @@ def get_projects(active=True, inactive=False, library=None, fields=None):
elif inactive:
active = False
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = project_fields_v3_to_v4(fields, con)
for project in con.get_projects(active, library, fields=fields):
yield convert_v4_project_to_v3(project)
@ -45,7 +44,7 @@ def get_projects(active=True, inactive=False, library=None, fields=None):
def get_project(project_name, active=True, inactive=False, fields=None):
# Skip if both are disabled
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = project_fields_v3_to_v4(fields, con)
return convert_v4_project_to_v3(
con.get_project(project_name, fields=fields)
@ -66,7 +65,7 @@ def _get_subsets(
fields=None
):
# Convert fields and add minimum required fields
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = subset_fields_v3_to_v4(fields, con)
if fields is not None:
for key in (
@ -75,18 +74,18 @@ def _get_subsets(
):
fields.add(key)
active = None
active = True
if archived:
active = False
active = None
for subset in con.get_products(
project_name,
subset_ids,
subset_names,
folder_ids,
names_by_folder_ids,
active,
fields
product_ids=subset_ids,
product_names=subset_names,
folder_ids=folder_ids,
names_by_folder_ids=names_by_folder_ids,
active=active,
fields=fields,
):
yield convert_v4_subset_to_v3(subset)
@ -102,7 +101,7 @@ def _get_versions(
active=None,
fields=None
):
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = version_fields_v3_to_v4(fields, con)
@ -114,23 +113,23 @@ def _get_versions(
queried_versions = con.get_versions(
project_name,
version_ids,
subset_ids,
versions,
hero,
standard,
latest,
version_ids=version_ids,
product_ids=subset_ids,
versions=versions,
hero=hero,
standard=standard,
latest=latest,
active=active,
fields=fields
)
versions = []
version_entities = []
hero_versions = []
for version in queried_versions:
if version["version"] < 0:
hero_versions.append(version)
else:
versions.append(convert_v4_version_to_v3(version))
version_entities.append(convert_v4_version_to_v3(version))
if hero_versions:
subset_ids = set()
@ -160,9 +159,9 @@ def _get_versions(
break
conv_hero = convert_v4_version_to_v3(hero_version)
conv_hero["version_id"] = version_id
versions.append(conv_hero)
version_entities.append(conv_hero)
return versions
return version_entities
def get_asset_by_id(project_name, asset_id, fields=None):
@ -183,6 +182,19 @@ def get_asset_by_name(project_name, asset_name, fields=None):
return None
def _folders_query(project_name, con, fields, **kwargs):
if fields is None or "tasks" in fields:
folders = get_folders_with_tasks(
con, project_name, fields=fields, **kwargs
)
else:
folders = con.get_folders(project_name, fields=fields, **kwargs)
for folder in folders:
yield folder
def get_assets(
project_name,
asset_ids=None,
@ -196,26 +208,45 @@ def get_assets(
active = True
if archived:
active = False
active = None
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = folder_fields_v3_to_v4(fields, con)
kwargs = dict(
folder_ids=asset_ids,
folder_names=asset_names,
parent_ids=parent_ids,
active=active,
fields=fields
)
if not asset_names:
for folder in _folders_query(project_name, con, fields, **kwargs):
yield convert_v4_folder_to_v3(folder, project_name)
return
if fields is None or "tasks" in fields:
folders = get_folders_with_tasks(con, project_name, **kwargs)
new_asset_names = set()
folder_paths = set()
for name in asset_names:
if "/" in name:
folder_paths.add(name)
else:
new_asset_names.add(name)
else:
folders = con.get_folders(project_name, **kwargs)
yielded_ids = set()
if folder_paths:
for folder in _folders_query(
project_name, con, fields, folder_paths=folder_paths, **kwargs
):
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
for folder in folders:
yield convert_v4_folder_to_v3(folder, project_name)
if not new_asset_names:
return
for folder in _folders_query(
project_name, con, fields, folder_names=new_asset_names, **kwargs
):
if folder["id"] not in yielded_ids:
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
def get_archived_assets(
@ -236,7 +267,7 @@ def get_archived_assets(
def get_asset_ids_with_subsets(project_name, asset_ids=None):
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.get_folder_ids_with_products(project_name, asset_ids)
@ -282,7 +313,7 @@ def get_subsets(
def get_subset_families(project_name, subset_ids=None):
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.get_product_type_names(project_name, subset_ids)
@ -422,7 +453,7 @@ def get_last_version_by_subset_name(
if not subset:
return None
return get_last_version_by_subset_id(
project_name, subset["id"], fields=fields
project_name, subset["_id"], fields=fields
)
@ -430,7 +461,7 @@ def get_output_link_versions(project_name, version_id, fields=None):
if not version_id:
return []
con = get_server_api_connection()
con = get_ayon_server_api_connection()
version_links = con.get_version_links(
project_name, version_id, link_direction="out")
@ -446,7 +477,7 @@ def get_output_link_versions(project_name, version_id, fields=None):
def version_is_latest(project_name, version_id):
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.version_is_latest(project_name, version_id)
@ -501,18 +532,18 @@ def get_representations(
else:
active = None
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = representation_fields_v3_to_v4(fields, con)
if fields and active is not None:
fields.add("active")
representations = con.get_representations(
project_name,
representation_ids,
representation_names,
version_ids,
names_by_version_ids,
active,
representation_ids=representation_ids,
representation_names=representation_names,
version_ids=version_ids,
names_by_version_ids=names_by_version_ids,
active=active,
fields=fields
)
for representation in representations:
@ -535,7 +566,7 @@ def get_representations_parents(project_name, representations):
repre["_id"]
for repre in representations
}
con = get_server_api_connection()
con = get_ayon_server_api_connection()
parents_by_repre_id = con.get_representations_parents(project_name,
repre_ids)
folder_ids = set()
@ -677,7 +708,7 @@ def get_workfile_info(
if not asset_id or not task_name or not filename:
return None
con = get_server_api_connection()
con = get_ayon_server_api_connection()
task = con.get_task_by_name(
project_name, asset_id, task_name, fields=["id", "name", "folderId"]
)

View file

@ -1,6 +1,4 @@
import ayon_api
from ayon_api import get_folder_links, get_versions_links
from .utils import get_ayon_server_api_connection
from .entities import get_assets, get_representation_by_id
@ -28,7 +26,8 @@ def get_linked_asset_ids(project_name, asset_doc=None, asset_id=None):
if not asset_id:
asset_id = asset_doc["_id"]
links = get_folder_links(project_name, asset_id, link_direction="in")
con = get_ayon_server_api_connection()
links = con.get_folder_links(project_name, asset_id, link_direction="in")
return [
link["entityId"]
for link in links
@ -115,6 +114,7 @@ def get_linked_representation_id(
if link_type:
link_types = [link_type]
con = get_ayon_server_api_connection()
# Store already found version ids to avoid recursion, and also to store
# output -> Don't forget to remove 'version_id' at the end!!!
linked_version_ids = {version_id}
@ -124,7 +124,7 @@ def get_linked_representation_id(
if not versions_to_check:
break
links = get_versions_links(
links = con.get_versions_links(
project_name,
versions_to_check,
link_types=link_types,
@ -145,8 +145,8 @@ def get_linked_representation_id(
linked_version_ids.remove(version_id)
if not linked_version_ids:
return []
representations = ayon_api.get_representations(
con = get_ayon_server_api_connection()
representations = con.get_representations(
project_name,
version_ids=linked_version_ids,
fields=["id"])

View file

@ -1,4 +1,7 @@
import collections
import json
import six
from ayon_api.graphql import GraphQlQuery, FIELD_VALUE, fields_to_dict
from .constants import DEFAULT_FOLDER_FIELDS
@ -84,12 +87,12 @@ def get_folders_with_tasks(
for folder. All possible folder fields are returned if 'None'
is passed.
Returns:
List[Dict[str, Any]]: Queried folder entities.
Yields:
Dict[str, Any]: Queried folder entities.
"""
if not project_name:
return []
return
filters = {
"projectName": project_name
@ -97,25 +100,25 @@ def get_folders_with_tasks(
if folder_ids is not None:
folder_ids = set(folder_ids)
if not folder_ids:
return []
return
filters["folderIds"] = list(folder_ids)
if folder_paths is not None:
folder_paths = set(folder_paths)
if not folder_paths:
return []
return
filters["folderPaths"] = list(folder_paths)
if folder_names is not None:
folder_names = set(folder_names)
if not folder_names:
return []
return
filters["folderNames"] = list(folder_names)
if parent_ids is not None:
parent_ids = set(parent_ids)
if not parent_ids:
return []
return
if None in parent_ids:
# Replace 'None' with '"root"' which is used during GraphQl
# query for parent ids filter for folders without folder
@ -147,10 +150,10 @@ def get_folders_with_tasks(
parsed_data = query.query(con)
folders = parsed_data["project"]["folders"]
if active is None:
return folders
return [
folder
for folder in folders
if folder["active"] is active
]
for folder in folders:
if active is not None and folder["active"] is not active:
continue
folder_data = folder.get("data")
if isinstance(folder_data, six.string_types):
folder["data"] = json.loads(folder_data)
yield folder

View file

@ -5,7 +5,6 @@ import uuid
import datetime
from bson.objectid import ObjectId
from ayon_api import get_server_api_connection
from openpype.client.operations_base import (
REMOVED_VALUE,
@ -41,7 +40,7 @@ from .conversion_utils import (
convert_update_representation_to_v4,
convert_update_workfile_info_to_v4,
)
from .utils import create_entity_id
from .utils import create_entity_id, get_ayon_server_api_connection
def _create_or_convert_to_id(entity_id=None):
@ -422,7 +421,7 @@ def failed_json_default(value):
class ServerCreateOperation(CreateOperation):
"""Opeartion to create an entity.
"""Operation to create an entity.
Args:
project_name (str): On which project operation will happen.
@ -634,7 +633,7 @@ class ServerUpdateOperation(UpdateOperation):
class ServerDeleteOperation(DeleteOperation):
"""Opeartion to delete an entity.
"""Operation to delete an entity.
Args:
project_name (str): On which project operation will happen.
@ -647,7 +646,7 @@ class ServerDeleteOperation(DeleteOperation):
self._session = session
if entity_type == "asset":
entity_type == "folder"
entity_type = "folder"
elif entity_type == "hero_version":
entity_type = "version"
@ -680,7 +679,7 @@ class OperationsSession(BaseOperationsSession):
def __init__(self, con=None, *args, **kwargs):
super(OperationsSession, self).__init__(*args, **kwargs)
if con is None:
con = get_server_api_connection()
con = get_ayon_server_api_connection()
self._con = con
self._project_cache = {}
self._nested_operations = collections.defaultdict(list)
@ -858,7 +857,7 @@ def create_project(
"""
if con is None:
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.create_project(
project_name,
@ -870,12 +869,12 @@ def create_project(
def delete_project(project_name, con=None):
if con is None:
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.delete_project(project_name)
def create_thumbnail(project_name, src_filepath, thumbnail_id=None, con=None):
if con is None:
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.create_thumbnail(project_name, src_filepath, thumbnail_id)

View file

@ -1,8 +1,33 @@
import os
import uuid
import ayon_api
from openpype.client.operations_base import REMOVED_VALUE
class _GlobalCache:
initialized = False
def get_ayon_server_api_connection():
if _GlobalCache.initialized:
con = ayon_api.get_server_api_connection()
else:
from openpype.lib.local_settings import get_local_site_id
_GlobalCache.initialized = True
site_id = get_local_site_id()
version = os.getenv("AYON_VERSION")
if ayon_api.is_connection_created():
con = ayon_api.get_server_api_connection()
con.set_site_id(site_id)
con.set_client_version(version)
else:
con = ayon_api.create_connection(site_id, version)
return con
def create_entity_id():
return uuid.uuid1().hex

View file

@ -27,6 +27,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"tvpaint",
"substancepainter",
"aftereffects",
"wrap"
}
launch_types = {LaunchTypes.local}

View file

@ -19,7 +19,8 @@ class CopyTemplateWorkfile(PreLaunchHook):
# Before `AddLastWorkfileToLaunchArgs`
order = 0
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects"}
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects",
"wrap"}
launch_types = {LaunchTypes.local}
def execute(self):

View file

@ -2,7 +2,7 @@ import subprocess
from openpype.lib.applications import PreLaunchHook, LaunchTypes
class LaunchFoundryAppsWindows(PreLaunchHook):
class LaunchNewConsoleApps(PreLaunchHook):
"""Foundry applications have specific way how to launch them.
Nuke is executed "like" python process so it is required to pass
@ -13,13 +13,15 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
# Should be as last hook because must change launch arguments to string
order = 1000
app_groups = {"nuke", "nukeassist", "nukex", "hiero", "nukestudio"}
app_groups = {
"nuke", "nukeassist", "nukex", "hiero", "nukestudio", "mayapy"
}
platforms = {"windows"}
launch_types = {LaunchTypes.local}
def execute(self):
# Change `creationflags` to CREATE_NEW_CONSOLE
# - on Windows nuke will create new window using its console
# - on Windows some apps will create new window using its console
# Set `stdout` and `stderr` to None so new created console does not
# have redirected output to DEVNULL in build
self.launch_context.kwargs.update({

View file

@ -13,7 +13,7 @@ class OCIOEnvHook(PreLaunchHook):
"fusion",
"blender",
"aftereffects",
"max",
"3dsmax",
"houdini",
"maya",
"nuke",
@ -45,6 +45,9 @@ class OCIOEnvHook(PreLaunchHook):
if config_data:
ocio_path = config_data["path"]
if self.host_name in ["nuke", "hiero"]:
ocio_path = ocio_path.replace("\\", "/")
self.log.info(
f"Setting OCIO environment to config path: {ocio_path}")

View file

@ -170,7 +170,7 @@ class HostBase(object):
if project_name:
items.append(project_name)
if asset_name:
items.append(asset_name)
items.append(asset_name.lstrip("/"))
if task_name:
items.append(task_name)
if items:

View file

@ -1,6 +1,6 @@
# AfterEffects Integration
Requirements: This extension requires use of Javascript engine, which is
Requirements: This extension requires use of Javascript engine, which is
available since CC 16.0.
Please check your File>Project Settings>Expressions>Expressions Engine
@ -13,26 +13,28 @@ The After Effects integration requires two components to work; `extension` and `
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
```
ExManCmd /install {path to avalon-core}\avalon\photoshop\extension.zxp
ExManCmd /install {path to addon}/api/extension.zxp
```
OR
download [Anastasiys Extension Manager](https://install.anastasiy.com/)
`{path to addon}` will be most likely in your AppData (on Windows, in your user data folder in Linux and MacOS.)
### Server
The easiest way to get the server and After Effects launch is with:
```
python -c ^"import avalon.photoshop;avalon.aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
python -c ^"import openpype.hosts.photoshop;openpype.hosts..aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
```
`avalon.aftereffects.launch` launches the application and server, and also closes the server when After Effects exists.
## Usage
The After Effects extension can be found under `Window > Extensions > OpenPype`. Once launched you should be presented with a panel like this:
The After Effects extension can be found under `Window > Extensions > AYON`. Once launched you should be presented with a panel like this:
![Avalon Panel](panel.PNG "Avalon Panel")
![Ayon Panel](panel.png "Ayon Panel")
## Developing
@ -43,8 +45,8 @@ When developing the extension you can load it [unsigned](https://github.com/Adob
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
```
ZXPSignCmd -selfSignedCert NA NA Avalon Avalon-After-Effects avalon extension.p12
ZXPSignCmd -sign {path to avalon-core}\avalon\aftereffects\extension {path to avalon-core}\avalon\aftereffects\extension.zxp extension.p12 avalon
ZXPSignCmd -selfSignedCert NA NA Ayon Avalon-After-Effects Ayon extension.p12
ZXPSignCmd -sign {path to addon}/api/extension {path to addon}/api/extension.zxp extension.p12 Ayon
```
### Plugin Examples
@ -52,14 +54,14 @@ ZXPSignCmd -sign {path to avalon-core}\avalon\aftereffects\extension {path to av
These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py).
Expected deployed extension location on default Windows:
`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\com.openpype.AE.panel`
`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\io.ynput.AE.panel`
For easier debugging of Javascript:
https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1
Add (optional) --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
then localhost:8092
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
## Resources
- https://javascript-tools-guide.readthedocs.io/introduction/index.html
- https://github.com/Adobe-CEP/Getting-Started-guides

View file

@ -1,32 +1,31 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionList>
<Extension Id="com.openpype.AE.panel">
<Extension Id="io.ynput.AE.panel">
<HostList>
<!-- Comment Host tags according to the apps you want your panel to support -->
<!-- Photoshop -->
<Host Name="PHXS" Port="8088"/>
<!-- Illustrator -->
<Host Name="ILST" Port="8089"/>
<!-- InDesign -->
<Host Name="IDSN" Port="8090" />
<!-- Premiere -->
<Host Name="PPRO" Port="8091" />
<!-- AfterEffects -->
<Host Name="AEFT" Port="8092" />
<!-- PRELUDE -->
<Host Name="PRLD" Port="8093" />
<!-- FLASH Pro -->
<Host Name="FLPR" Port="8094" />
</HostList>
</Extension>
</ExtensionList>

View file

@ -1,8 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.26"
ExtensionBundleName="com.openpype.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionManifest Version="8.0" ExtensionBundleId="io.ynput.AE.panel" ExtensionBundleVersion="1.1.0"
ExtensionBundleName="io.ynput.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList>
<Extension Id="com.openpype.AE.panel" Version="1.0" />
<Extension Id="io.ynput.AE.panel" Version="1.0" />
</ExtensionList>
<ExecutionEnvironment>
<HostList>
@ -10,22 +10,22 @@
<!-- Photoshop -->
<!--<Host Name="PHXS" Version="[14.0,19.0]" /> -->
<!-- <Host Name="PHSP" Version="[14.0,19.0]" /> -->
<!-- Illustrator -->
<!-- <Host Name="ILST" Version="[18.0,22.0]" /> -->
<!-- InDesign -->
<!-- <Host Name="IDSN" Version="[10.0,13.0]" /> -->
<!-- <Host Name="IDSN" Version="[10.0,13.0]" /> -->
<!-- Premiere -->
<!-- <Host Name="PPRO" Version="[8.0,12.0]" /> -->
<!-- AfterEffects -->
<Host Name="AEFT" Version="[13.0,99.0]" />
<!-- PRELUDE -->
<!-- PRELUDE -->
<!-- <Host Name="PRLD" Version="[3.0,7.0]" /> -->
<!-- FLASH Pro -->
<!-- <Host Name="FLPR" Version="[14.0,18.0]" /> -->
@ -38,7 +38,7 @@
</RequiredRuntimeList>
</ExecutionEnvironment>
<DispatchInfoList>
<Extension Id="com.openpype.AE.panel">
<Extension Id="io.ynput.AE.panel">
<DispatchInfo >
<Resources>
<MainPath>./index.html</MainPath>
@ -49,7 +49,7 @@
</Lifecycle>
<UI>
<Type>Panel</Type>
<Menu>OpenPype</Menu>
<Menu>AYON</Menu>
<Geometry>
<Size>
<Height>200</Height>
@ -63,17 +63,17 @@
<Height>550</Height>
<Width>400</Width>
</MaxSize>-->
</Geometry>
<Icons>
<Icon Type="Normal">./icons/iconNormal.png</Icon>
<Icon Type="Normal">./icons/ayon_logo.png</Icon>
<Icon Type="RollOver">./icons/iconRollover.png</Icon>
<Icon Type="Disabled">./icons/iconDisabled.png</Icon>
<Icon Type="DarkNormal">./icons/iconDarkNormal.png</Icon>
<Icon Type="DarkRollOver">./icons/iconDarkRollover.png</Icon>
</Icons>
</Icons>
</UI>
</DispatchInfo>
</Extension>
</DispatchInfoList>
</ExtensionManifest>
</ExtensionManifest>

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

View file

@ -215,6 +215,8 @@ function _getItem(item, comps, folders, footages){
* Refactor
*/
var item_type = '';
var path = '';
var containing_comps = [];
if (item instanceof FolderItem){
item_type = 'folder';
if (!folders){
@ -222,10 +224,18 @@ function _getItem(item, comps, folders, footages){
}
}
if (item instanceof FootageItem){
item_type = 'footage';
if (!footages){
return "{}";
}
item_type = 'footage';
if (item.file){
path = item.file.fsName;
}
if (item.usedIn){
for (j = 0; j < item.usedIn.length; ++j){
containing_comps.push(item.usedIn[j].id);
}
}
}
if (item instanceof CompItem){
item_type = 'comp';
@ -236,7 +246,9 @@ function _getItem(item, comps, folders, footages){
var item = {"name": item.name,
"id": item.id,
"type": item_type};
"type": item_type,
"path": path,
"containing_comps": containing_comps};
return JSON.stringify(item);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

View file

@ -74,11 +74,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
register_event_callback("application.launched", application_launch)
@ -186,11 +181,6 @@ def application_launch():
check_inventory()
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle layer visibility on instance toggles."""
instance[0].Visible = new_value
def ls():
"""Yields containers from active AfterEffects document.

View file

@ -37,6 +37,9 @@ class AEItem(object):
height = attr.ib(default=None)
is_placeholder = attr.ib(default=False)
uuid = attr.ib(default=False)
path = attr.ib(default=False) # path to FootageItem to validate
# list of composition Footage is in
containing_comps = attr.ib(factory=list)
class AfterEffectsServerStub():
@ -704,7 +707,10 @@ class AfterEffectsServerStub():
d.get("instance_id"),
d.get("width"),
d.get("height"),
d.get("is_placeholder"))
d.get("is_placeholder"),
d.get("uuid"),
d.get("path"),
d.get("containing_comps"),)
ret.append(item)
return ret

View file

@ -164,7 +164,7 @@ class RenderCreator(Creator):
api.get_stub().rename_item(comp_id,
new_comp_name)
def apply_settings(self, project_settings, system_settings):
def apply_settings(self, project_settings):
plugin_settings = (
project_settings["aftereffects"]["create"]["RenderCreator"]
)

View file

@ -1,3 +1,4 @@
from openpype import AYON_SERVER_ENABLED
import openpype.hosts.aftereffects.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
@ -43,6 +44,14 @@ class AEWorkfileCreator(AutoCreator):
task_name = context.get_current_task_name()
host_name = context.host_name
existing_asset_name = None
if existing_instance is not None:
if AYON_SERVER_ENABLED:
existing_asset_name = existing_instance.get("folderPath")
if existing_asset_name is None:
existing_asset_name = existing_instance["asset"]
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
@ -50,10 +59,13 @@ class AEWorkfileCreator(AutoCreator):
project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc,
project_name, host_name, None
@ -68,7 +80,7 @@ class AEWorkfileCreator(AutoCreator):
new_instance.data_to_store())
elif (
existing_instance["asset"] != asset_name
existing_asset_name != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@ -76,6 +88,10 @@ class AEWorkfileCreator(AutoCreator):
self.default_variant, task_name, asset_doc,
project_name, host_name
)
existing_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -31,13 +31,8 @@ class FileLoader(api.AfterEffectsLoader):
path = self.filepath_from_context(context)
repr_cont = context["representation"]["context"]
if "#" not in path:
frame = repr_cont.get("frame")
if frame:
padding = len(frame)
path = path.replace(frame, "#" * padding)
import_options['sequence'] = True
if len(context["representation"]["files"]) > 1:
import_options['sequence'] = True
if not path:
repr_id = context["representation"]["_id"]

View file

@ -138,7 +138,6 @@ class CollectAERender(publish.AbstractCollectRender):
fam = "render.farm"
if fam not in instance.families:
instance.families.append(fam)
instance.toBeRenderedOn = "deadline"
instance.renderer = "aerender"
instance.farm = True # to skip integrate
if "review" in instance.families:

View file

@ -1,6 +1,8 @@
import os
import pyblish.api
from openpype.client import get_asset_name_identifier
from openpype.pipeline.create import get_subset_name
@ -48,9 +50,11 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
asset_name = get_asset_name_identifier(asset_entity)
instance_data = {
"active": True,
"asset": asset_entity["name"],
"asset": asset_name,
"task": task,
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],

View file

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Footage item missing</title>
<description>
## Footage item missing
FootageItem `{name}` contains missing `{path}`. Render will not produce any frames and AE will stop react to any integration
### How to repair?
Remove `{name}` or provide missing file.
</description>
</error>
</root>

View file

@ -0,0 +1,49 @@
# -*- coding: utf-8 -*-
"""Validate presence of footage items in composition
Requires:
"""
import os
import pyblish.api
from openpype.pipeline import (
PublishXmlValidationError
)
from openpype.hosts.aftereffects.api import get_stub
class ValidateFootageItems(pyblish.api.InstancePlugin):
"""
Validates if FootageItems contained in composition exist.
AE fails silently and doesn't render anything if footage item file is
missing. This will result in nonresponsiveness of AE UI as it expects
reaction from user, but it will not provide dialog.
This validator tries to check existence of the files.
It will not protect from missing frame in multiframes though
(as AE api doesn't provide this information and it cannot be told how many
frames should be there easily). Missing frame is replaced by placeholder.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Footage Items"
families = ["render.farm", "render.local", "render"]
hosts = ["aftereffects"]
optional = True
def process(self, instance):
"""Plugin entry point."""
comp_id = instance.data["comp_id"]
for footage_item in get_stub().get_items(comps=False, folders=False,
footages=True):
self.log.info(footage_item)
if comp_id not in footage_item.containing_comps:
continue
path = footage_item.path
if path and not os.path.exists(path):
msg = f"File {path} not found."
formatting = {"name": footage_item.name, "path": path}
raise PublishXmlValidationError(self, msg,
formatting_data=formatting)

View file

@ -10,6 +10,7 @@ from .pipeline import (
ls,
publish,
containerise,
BlenderHost,
)
from .plugin import (
@ -38,6 +39,8 @@ from .lib import (
from .capture import capture
from .render_lib import prepare_rendering
__all__ = [
"install",
@ -45,6 +48,7 @@ __all__ = [
"ls",
"publish",
"containerise",
"BlenderHost",
"Creator",
"Loader",
@ -66,4 +70,5 @@ __all__ = [
"get_selection",
"capture",
# "unique_name",
"prepare_rendering",
]

View file

@ -148,13 +148,14 @@ def applied_view(window, camera, isolate=None, options=None):
area.ui_type = "VIEW_3D"
meshes = [obj for obj in window.scene.objects if obj.type == "MESH"]
types = {"MESH", "GPENCIL"}
objects = [obj for obj in window.scene.objects if obj.type in types]
if camera == "AUTO":
space.region_3d.view_perspective = "ORTHO"
isolate_objects(window, isolate or meshes)
isolate_objects(window, isolate or objects)
else:
isolate_objects(window, isolate or meshes)
isolate_objects(window, isolate or objects)
space.camera = window.scene.objects.get(camera)
space.region_3d.view_perspective = "CAMERA"

View file

@ -0,0 +1,51 @@
import attr
import bpy
@attr.s
class LayerMetadata(object):
"""Data class for Render Layer metadata."""
frameStart = attr.ib()
frameEnd = attr.ib()
@attr.s
class RenderProduct(object):
"""
Getting Colorspace as Specific Render Product Parameter for submitting
publish job.
"""
colorspace = attr.ib() # colorspace
view = attr.ib() # OCIO view transform
productName = attr.ib(default=None)
class ARenderProduct(object):
def __init__(self):
"""Constructor."""
# Initialize
self.layer_data = self._get_layer_data()
self.layer_data.products = self.get_render_products()
def _get_layer_data(self):
scene = bpy.context.scene
return LayerMetadata(
frameStart=int(scene.frame_start),
frameEnd=int(scene.frame_end),
)
def get_render_products(self):
"""To be implemented by renderer class.
This should return a list of RenderProducts.
Returns:
list: List of RenderProduct
"""
return [
RenderProduct(
colorspace="sRGB",
view="ACES 1.0",
productName=""
)
]

View file

@ -188,7 +188,7 @@ def imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict):
# Support values evaluated at imprint
value = value()
if not isinstance(value, (int, float, bool, str, list)):
if not isinstance(value, (int, float, bool, str, list, dict)):
raise TypeError(f"Unsupported type: {type(value)}")
imprint_data[key] = value
@ -266,9 +266,59 @@ def read(node: bpy.types.bpy_struct_meta_idprop):
return data
def get_selection() -> List[bpy.types.Object]:
"""Return the selected objects from the current scene."""
return [obj for obj in bpy.context.scene.objects if obj.select_get()]
def get_selected_collections():
"""
Returns a list of the currently selected collections in the outliner.
Raises:
RuntimeError: If the outliner cannot be found in the main Blender
window.
Returns:
list: A list of `bpy.types.Collection` objects that are currently
selected in the outliner.
"""
window = bpy.context.window or bpy.context.window_manager.windows[0]
try:
area = next(
area for area in window.screen.areas
if area.type == 'OUTLINER')
region = next(
region for region in area.regions
if region.type == 'WINDOW')
except StopIteration as e:
raise RuntimeError("Could not find outliner. An outliner space "
"must be in the main Blender window.") from e
with bpy.context.temp_override(
window=window,
area=area,
region=region,
screen=window.screen
):
ids = bpy.context.selected_ids
return [id for id in ids if isinstance(id, bpy.types.Collection)]
def get_selection(include_collections: bool = False) -> List[bpy.types.Object]:
"""
Returns a list of selected objects in the current Blender scene.
Args:
include_collections (bool, optional): Whether to include selected
collections in the result. Defaults to False.
Returns:
List[bpy.types.Object]: A list of selected objects.
"""
selection = [obj for obj in bpy.context.scene.objects if obj.select_get()]
if include_collections:
selection.extend(get_selected_collections())
return selection
@contextlib.contextmanager

View file

@ -16,6 +16,7 @@ import bpy
import bpy.utils.previews
from openpype import style
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import get_current_asset_name, get_current_task_name
from openpype.tools.utils import host_tools
@ -30,6 +31,14 @@ PREVIEW_COLLECTIONS: Dict = dict()
TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1
def execute_function_in_main_thread(f):
"""Decorator to move a function call into main thread items"""
def wrapper(*args, **kwargs):
mti = MainThreadItem(f, *args, **kwargs)
execute_in_main_thread(mti)
return wrapper
class BlenderApplication(QtWidgets.QApplication):
_instance = None
blender_windows = {}
@ -237,8 +246,24 @@ class LaunchQtApp(bpy.types.Operator):
self.before_window_show()
def pull_to_front(window):
"""Pull window forward to screen.
If Window is minimized this will un-minimize, then it can be raised
and activated to the front.
"""
window.setWindowState(
(window.windowState() & ~QtCore.Qt.WindowMinimized) |
QtCore.Qt.WindowActive
)
window.raise_()
window.activateWindow()
if isinstance(self._window, ModuleType):
self._window.show()
pull_to_front(self._window)
# Pull window to the front
window = None
if hasattr(self._window, "window"):
window = self._window.window
@ -253,6 +278,7 @@ class LaunchQtApp(bpy.types.Operator):
on_top_flags = origin_flags | QtCore.Qt.WindowStaysOnTopHint
self._window.setWindowFlags(on_top_flags)
self._window.show()
pull_to_front(self._window)
# if on_top_flags != origin_flags:
# self._window.setWindowFlags(origin_flags)
@ -274,6 +300,10 @@ class LaunchCreator(LaunchQtApp):
def before_window_show(self):
self._window.refresh()
def execute(self, context):
host_tools.show_publisher(tab="create")
return {"FINISHED"}
class LaunchLoader(LaunchQtApp):
"""Launch Avalon Loader."""
@ -283,6 +313,8 @@ class LaunchLoader(LaunchQtApp):
_tool_name = "loader"
def before_window_show(self):
if AYON_SERVER_ENABLED:
return
self._window.set_context(
{"asset": get_current_asset_name()},
refresh=True
@ -296,7 +328,7 @@ class LaunchPublisher(LaunchQtApp):
bl_label = "Publish..."
def execute(self, context):
host_tools.show_publish()
host_tools.show_publisher(tab="publish")
return {"FINISHED"}
@ -308,6 +340,8 @@ class LaunchManager(LaunchQtApp):
_tool_name = "sceneinventory"
def before_window_show(self):
if AYON_SERVER_ENABLED:
return
self._window.refresh()
@ -319,6 +353,8 @@ class LaunchLibrary(LaunchQtApp):
_tool_name = "libraryloader"
def before_window_show(self):
if AYON_SERVER_ENABLED:
return
self._window.refresh()
@ -331,13 +367,16 @@ class LaunchWorkFiles(LaunchQtApp):
def execute(self, context):
result = super().execute(context)
self._window.set_context({
"asset": get_current_asset_name(),
"task": get_current_task_name()
})
if not AYON_SERVER_ENABLED:
self._window.set_context({
"asset": get_current_asset_name(),
"task": get_current_task_name()
})
return result
def before_window_show(self):
if AYON_SERVER_ENABLED:
return
self._window.root = str(Path(
os.environ.get("AVALON_WORKDIR", ""),
os.environ.get("AVALON_SCENEDIR", ""),
@ -406,7 +445,6 @@ class TOPBAR_MT_avalon(bpy.types.Menu):
layout.operator(SetResolution.bl_idname, text="Set Resolution")
layout.separator()
layout.operator(LaunchWorkFiles.bl_idname, text="Work Files...")
# TODO (jasper): maybe add 'Reload Pipeline'
def draw_avalon_menu(self, context):

View file

@ -10,6 +10,12 @@ from . import ops
import pyblish.api
from openpype.host import (
HostBase,
IWorkfileHost,
IPublishHost,
ILoadHost
)
from openpype.client import get_asset_by_name
from openpype.pipeline import (
schema,
@ -29,6 +35,14 @@ from openpype.lib import (
)
import openpype.hosts.blender
from openpype.settings import get_project_settings
from .workio import (
open_file,
save_file,
current_file,
has_unsaved_changes,
file_extensions,
work_root,
)
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.blender.__file__))
@ -47,6 +61,101 @@ IS_HEADLESS = bpy.app.background
log = Logger.get_logger(__name__)
class BlenderHost(HostBase, IWorkfileHost, IPublishHost, ILoadHost):
name = "blender"
def install(self):
"""Override install method from HostBase.
Install Blender host functionality."""
install()
def get_containers(self) -> Iterator:
"""List containers from active Blender scene."""
return ls()
def get_workfile_extensions(self) -> List[str]:
"""Override get_workfile_extensions method from IWorkfileHost.
Get workfile possible extensions.
Returns:
List[str]: Workfile extensions.
"""
return file_extensions()
def save_workfile(self, dst_path: str = None):
"""Override save_workfile method from IWorkfileHost.
Save currently opened workfile.
Args:
dst_path (str): Where the current scene should be saved. Or use
current path if `None` is passed.
"""
save_file(dst_path if dst_path else bpy.data.filepath)
def open_workfile(self, filepath: str):
"""Override open_workfile method from IWorkfileHost.
Open workfile at specified filepath in the host.
Args:
filepath (str): Path to workfile.
"""
open_file(filepath)
def get_current_workfile(self) -> str:
"""Override get_current_workfile method from IWorkfileHost.
Retrieve currently opened workfile path.
Returns:
str: Path to currently opened workfile.
"""
return current_file()
def workfile_has_unsaved_changes(self) -> bool:
"""Override wokfile_has_unsaved_changes method from IWorkfileHost.
Returns True if opened workfile has no unsaved changes.
Returns:
bool: True if scene is saved and False if it has unsaved
modifications.
"""
return has_unsaved_changes()
def work_root(self, session) -> str:
"""Override work_root method from IWorkfileHost.
Modify workdir per host.
Args:
session (dict): Session context data.
Returns:
str: Path to new workdir.
"""
return work_root(session)
def get_context_data(self) -> dict:
"""Override abstract method from IPublishHost.
Get global data related to creation-publishing from workfile.
Returns:
dict: Context data stored using 'update_context_data'.
"""
property = bpy.context.scene.get(AVALON_PROPERTY)
if property:
return property.to_dict()
return {}
def update_context_data(self, data: dict, changes: dict):
"""Override abstract method from IPublishHost.
Store global context data to workfile.
Args:
data (dict): New data as are.
changes (dict): Only data that has been changed. Each value has
tuple with '(<old>, <new>)' value.
"""
bpy.context.scene[AVALON_PROPERTY] = data
def pype_excepthook_handler(*args):
traceback.print_exception(*args)
@ -460,36 +569,6 @@ def ls() -> Iterator:
yield parse_container(container)
def update_hierarchy(containers):
"""Hierarchical container support
This is the function to support Scene Inventory to draw hierarchical
view for containers.
We need both parent and children to visualize the graph.
"""
all_containers = set(ls()) # lookup set
for container in containers:
# Find parent
# FIXME (jasperge): re-evaluate this. How would it be possible
# to 'nest' assets? Collections can have several parents, for
# now assume it has only 1 parent
parent = [
coll for coll in bpy.data.collections if container in coll.children
]
for node in parent:
if node in all_containers:
container["parent"] = node
break
log.debug("Container: %s", container)
yield container
def publish():
"""Shorthand to publish from within host."""

View file

@ -1,28 +1,34 @@
"""Shared functionality for pipeline plugins for Blender."""
import itertools
from pathlib import Path
from typing import Dict, List, Optional
import bpy
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import (
LegacyCreator,
Creator,
CreatedInstance,
LoaderPlugin,
)
from .pipeline import AVALON_CONTAINERS
from openpype.lib import BoolDef
from .pipeline import (
AVALON_CONTAINERS,
AVALON_INSTANCES,
AVALON_PROPERTY,
)
from .ops import (
MainThreadItem,
execute_in_main_thread
)
from .lib import (
imprint,
get_selection
)
from .lib import imprint
VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx"]
def asset_name(
def prepare_scene_name(
asset: str, subset: str, namespace: Optional[str] = None
) -> str:
"""Return a consistent name for an asset."""
@ -40,9 +46,16 @@ def get_unique_number(
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
return "01"
asset_groups = avalon_container.all_objects
container_names = [c.name for c in asset_groups if c.type == 'EMPTY']
# Check the names of both object and collection containers
obj_asset_groups = avalon_container.objects
obj_group_names = {
c.name for c in obj_asset_groups
if c.type == 'EMPTY' and c.get(AVALON_PROPERTY)}
coll_asset_groups = avalon_container.children
coll_group_names = {
c.name for c in coll_asset_groups
if c.get(AVALON_PROPERTY)}
container_names = obj_group_names.union(coll_group_names)
count = 1
name = f"{asset}_{count:0>2}_{subset}"
while name in container_names:
@ -134,20 +147,224 @@ def deselect_all():
bpy.context.view_layer.objects.active = active
class Creator(LegacyCreator):
"""Base class for Creator plug-ins."""
class BaseCreator(Creator):
"""Base class for Blender Creator plug-ins."""
defaults = ['Main']
def process(self):
collection = bpy.data.collections.new(name=self.data["subset"])
bpy.context.scene.collection.children.link(collection)
imprint(collection, self.data)
create_as_asset_group = False
if (self.options or {}).get("useSelection"):
for obj in get_selection():
collection.objects.link(obj)
@staticmethod
def cache_subsets(shared_data):
"""Cache instances for Creators shared data.
return collection
Create `blender_cached_subsets` key when needed in shared data and
fill it with all collected instances from the scene under its
respective creator identifiers.
If legacy instances are detected in the scene, create
`blender_cached_legacy_subsets` key and fill it with
all legacy subsets from this family as a value. # key or value?
Args:
shared_data(Dict[str, Any]): Shared data.
Return:
Dict[str, Any]: Shared data with cached subsets.
"""
if not shared_data.get('blender_cached_subsets'):
cache = {}
cache_legacy = {}
avalon_instances = bpy.data.collections.get(AVALON_INSTANCES)
avalon_instance_objs = (
avalon_instances.objects if avalon_instances else []
)
for obj_or_col in itertools.chain(
avalon_instance_objs,
bpy.data.collections
):
avalon_prop = obj_or_col.get(AVALON_PROPERTY, {})
if not avalon_prop:
continue
if avalon_prop.get('id') != 'pyblish.avalon.instance':
continue
creator_id = avalon_prop.get('creator_identifier')
if creator_id:
# Creator instance
cache.setdefault(creator_id, []).append(obj_or_col)
else:
family = avalon_prop.get('family')
if family:
# Legacy creator instance
cache_legacy.setdefault(family, []).append(obj_or_col)
shared_data["blender_cached_subsets"] = cache
shared_data["blender_cached_legacy_subsets"] = cache_legacy
return shared_data
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
"""Override abstract method from Creator.
Create new instance and store it.
Args:
subset_name(str): Subset name of created instance.
instance_data(dict): Instance base data.
pre_create_data(dict): Data based on pre creation attributes.
Those may affect how creator works.
"""
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create asset group
if AYON_SERVER_ENABLED:
asset_name = instance_data["folderPath"]
else:
asset_name = instance_data["asset"]
name = prepare_scene_name(asset_name, subset_name)
if self.create_as_asset_group:
# Create instance as empty
instance_node = bpy.data.objects.new(name=name, object_data=None)
instance_node.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(instance_node)
else:
# Create instance collection
instance_node = bpy.data.collections.new(name=name)
instances.children.link(instance_node)
self.set_instance_data(subset_name, instance_data)
instance = CreatedInstance(
self.family, subset_name, instance_data, self
)
instance.transient_data["instance_node"] = instance_node
self._add_instance_to_context(instance)
imprint(instance_node, instance_data)
return instance_node
def collect_instances(self):
"""Override abstract method from BaseCreator.
Collect existing instances related to this creator plugin."""
# Cache subsets in shared data
self.cache_subsets(self.collection_shared_data)
# Get cached subsets
cached_subsets = self.collection_shared_data.get(
"blender_cached_subsets"
)
if not cached_subsets:
return
# Process only instances that were created by this creator
for instance_node in cached_subsets.get(self.identifier, []):
property = instance_node.get(AVALON_PROPERTY)
# Create instance object from existing data
instance = CreatedInstance.from_existing(
instance_data=property.to_dict(),
creator=self
)
instance.transient_data["instance_node"] = instance_node
# Add instance to create context
self._add_instance_to_context(instance)
def update_instances(self, update_list):
"""Override abstract method from BaseCreator.
Store changes of existing instances so they can be recollected.
Args:
update_list(List[UpdateData]): Changed instances
and their changes, as a list of tuples.
"""
if AYON_SERVER_ENABLED:
asset_name_key = "folderPath"
else:
asset_name_key = "asset"
for created_instance, changes in update_list:
data = created_instance.data_to_store()
node = created_instance.transient_data["instance_node"]
if not node:
# We can't update if we don't know the node
self.log.error(
f"Unable to update instance {created_instance} "
f"without instance node."
)
return
# Rename the instance node in the scene if subset or asset changed
if (
"subset" in changes.changed_keys
or asset_name_key in changes.changed_keys
):
asset_name = data[asset_name_key]
name = prepare_scene_name(
asset=asset_name, subset=data["subset"]
)
node.name = name
imprint(node, data)
def remove_instances(self, instances: List[CreatedInstance]):
for instance in instances:
node = instance.transient_data["instance_node"]
if isinstance(node, bpy.types.Collection):
for children in node.children_recursive:
if isinstance(children, bpy.types.Collection):
bpy.data.collections.remove(children)
else:
bpy.data.objects.remove(children)
bpy.data.collections.remove(node)
elif isinstance(node, bpy.types.Object):
bpy.data.objects.remove(node)
self._remove_instance_from_context(instance)
def set_instance_data(
self,
subset_name: str,
instance_data: dict
):
"""Fill instance data with required items.
Args:
subset_name(str): Subset name of created instance.
instance_data(dict): Instance base data.
instance_node(bpy.types.ID): Instance node in blender scene.
"""
if not instance_data:
instance_data = {}
instance_data.update(
{
"id": "pyblish.avalon.instance",
"creator_identifier": self.identifier,
"subset": subset_name,
}
)
def get_pre_create_attr_defs(self):
return [
BoolDef("use_selection",
label="Use selection",
default=True)
]
class Loader(LoaderPlugin):
@ -241,7 +458,7 @@ class AssetLoader(LoaderPlugin):
namespace: Use pre-defined namespace
options: Additional settings dictionary
"""
# TODO (jasper): make it possible to add the asset several times by
# TODO: make it possible to add the asset several times by
# just re-using the collection
filepath = self.filepath_from_context(context)
assert Path(filepath).exists(), f"{filepath} doesn't exist."
@ -252,7 +469,7 @@ class AssetLoader(LoaderPlugin):
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
name = name or asset_name(
name = name or prepare_scene_name(
asset, subset, unique_number
)
@ -281,7 +498,9 @@ class AssetLoader(LoaderPlugin):
# asset = context["asset"]["name"]
# subset = context["subset"]["name"]
# instance_name = asset_name(asset, subset, unique_number) + '_CON'
# instance_name = prepare_scene_name(
# asset, subset, unique_number
# ) + '_CON'
# return self._get_instance_collection(instance_name, nodes)

View file

@ -0,0 +1,255 @@
from pathlib import Path
import bpy
from openpype.settings import get_project_settings
from openpype.pipeline import get_current_project_name
def get_default_render_folder(settings):
"""Get default render folder from blender settings."""
return (settings["blender"]
["RenderSettings"]
["default_render_image_folder"])
def get_aov_separator(settings):
"""Get aov separator from blender settings."""
aov_sep = (settings["blender"]
["RenderSettings"]
["aov_separator"])
if aov_sep == "dash":
return "-"
elif aov_sep == "underscore":
return "_"
elif aov_sep == "dot":
return "."
else:
raise ValueError(f"Invalid aov separator: {aov_sep}")
def get_image_format(settings):
"""Get image format from blender settings."""
return (settings["blender"]
["RenderSettings"]
["image_format"])
def get_multilayer(settings):
"""Get multilayer from blender settings."""
return (settings["blender"]
["RenderSettings"]
["multilayer_exr"])
def get_render_product(output_path, name, aov_sep):
"""
Generate the path to the render product. Blender interprets the `#`
as the frame number, when it renders.
Args:
file_path (str): The path to the blender scene.
render_folder (str): The render folder set in settings.
file_name (str): The name of the blender scene.
instance (pyblish.api.Instance): The instance to publish.
ext (str): The image format to render.
"""
filepath = output_path / name.lstrip("/")
render_product = f"{filepath}{aov_sep}beauty.####"
render_product = render_product.replace("\\", "/")
return render_product
def set_render_format(ext, multilayer):
# Set Blender to save the file with the right extension
bpy.context.scene.render.use_file_extension = True
image_settings = bpy.context.scene.render.image_settings
if ext == "exr":
image_settings.file_format = (
"OPEN_EXR_MULTILAYER" if multilayer else "OPEN_EXR")
elif ext == "bmp":
image_settings.file_format = "BMP"
elif ext == "rgb":
image_settings.file_format = "IRIS"
elif ext == "png":
image_settings.file_format = "PNG"
elif ext == "jpeg":
image_settings.file_format = "JPEG"
elif ext == "jp2":
image_settings.file_format = "JPEG2000"
elif ext == "tga":
image_settings.file_format = "TARGA"
elif ext == "tif":
image_settings.file_format = "TIFF"
def set_render_passes(settings):
aov_list = (settings["blender"]
["RenderSettings"]
["aov_list"])
custom_passes = (settings["blender"]
["RenderSettings"]
["custom_passes"])
vl = bpy.context.view_layer
vl.use_pass_combined = "combined" in aov_list
vl.use_pass_z = "z" in aov_list
vl.use_pass_mist = "mist" in aov_list
vl.use_pass_normal = "normal" in aov_list
vl.use_pass_diffuse_direct = "diffuse_light" in aov_list
vl.use_pass_diffuse_color = "diffuse_color" in aov_list
vl.use_pass_glossy_direct = "specular_light" in aov_list
vl.use_pass_glossy_color = "specular_color" in aov_list
vl.eevee.use_pass_volume_direct = "volume_light" in aov_list
vl.use_pass_emit = "emission" in aov_list
vl.use_pass_environment = "environment" in aov_list
vl.use_pass_shadow = "shadow" in aov_list
vl.use_pass_ambient_occlusion = "ao" in aov_list
cycles = vl.cycles
cycles.denoising_store_passes = "denoising" in aov_list
cycles.use_pass_volume_direct = "volume_direct" in aov_list
cycles.use_pass_volume_indirect = "volume_indirect" in aov_list
aovs_names = [aov.name for aov in vl.aovs]
for cp in custom_passes:
cp_name = cp[0]
if cp_name not in aovs_names:
aov = vl.aovs.add()
aov.name = cp_name
else:
aov = vl.aovs[cp_name]
aov.type = cp[1].get("type", "VALUE")
return aov_list, custom_passes
def set_node_tree(output_path, name, aov_sep, ext, multilayer):
# Set the scene to use the compositor node tree to render
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
# Get the Render Layers node
rl_node = None
for node in tree.nodes:
if node.bl_idname == "CompositorNodeRLayers":
rl_node = node
break
# If there's not a Render Layers node, we create it
if not rl_node:
rl_node = tree.nodes.new("CompositorNodeRLayers")
# Get the enabled output sockets, that are the active passes for the
# render.
# We also exclude some layers.
exclude_sockets = ["Image", "Alpha", "Noisy Image"]
passes = [
socket
for socket in rl_node.outputs
if socket.enabled and socket.name not in exclude_sockets
]
# Remove all output nodes
for node in tree.nodes:
if node.bl_idname == "CompositorNodeOutputFile":
tree.nodes.remove(node)
# Create a new output node
output = tree.nodes.new("CompositorNodeOutputFile")
image_settings = bpy.context.scene.render.image_settings
output.format.file_format = image_settings.file_format
# In case of a multilayer exr, we don't need to use the output node,
# because the blender render already outputs a multilayer exr.
if ext == "exr" and multilayer:
output.layer_slots.clear()
return []
output.file_slots.clear()
output.base_path = str(output_path)
aov_file_products = []
# For each active render pass, we add a new socket to the output node
# and link it
for render_pass in passes:
filepath = f"{name}{aov_sep}{render_pass.name}.####"
output.file_slots.new(filepath)
filename = str(output_path / filepath.lstrip("/"))
aov_file_products.append((render_pass.name, filename))
node_input = output.inputs[-1]
tree.links.new(render_pass, node_input)
return aov_file_products
def imprint_render_settings(node, data):
RENDER_DATA = "render_data"
if not node.get(RENDER_DATA):
node[RENDER_DATA] = {}
for key, value in data.items():
if value is None:
continue
node[RENDER_DATA][key] = value
def prepare_rendering(asset_group):
name = asset_group.name
filepath = Path(bpy.data.filepath)
assert filepath, "Workfile not saved. Please save the file first."
dirpath = filepath.parent
file_name = Path(filepath.name).stem
project = get_current_project_name()
settings = get_project_settings(project)
render_folder = get_default_render_folder(settings)
aov_sep = get_aov_separator(settings)
ext = get_image_format(settings)
multilayer = get_multilayer(settings)
set_render_format(ext, multilayer)
aov_list, custom_passes = set_render_passes(settings)
output_path = Path.joinpath(dirpath, render_folder, file_name)
render_product = get_render_product(output_path, name, aov_sep)
aov_file_product = set_node_tree(
output_path, name, aov_sep, ext, multilayer)
bpy.context.scene.render.filepath = render_product
render_settings = {
"render_folder": render_folder,
"aov_separator": aov_sep,
"image_format": ext,
"multilayer_exr": multilayer,
"aov_list": aov_list,
"custom_passes": custom_passes,
"render_product": render_product,
"aov_file_product": aov_file_product,
"review": True,
}
imprint_render_settings(asset_group, render_settings)

View file

@ -1,9 +1,9 @@
from openpype.pipeline import install_host
from openpype.hosts.blender import api
from openpype.hosts.blender.api import BlenderHost
def register():
install_host(api)
install_host(BlenderHost())
def unregister():

View file

@ -31,7 +31,7 @@ class InstallPySideToBlender(PreLaunchHook):
def inner_execute(self):
# Get blender's python directory
version_regex = re.compile(r"^[2-3]\.[0-9]+$")
version_regex = re.compile(r"^[2-4]\.[0-9]+$")
platform = system().lower()
executable = self.launch_context.executable.executable_path

View file

@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
"""Converter for legacy Houdini subsets."""
from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin
from openpype.hosts.blender.api.lib import imprint
class BlenderLegacyConvertor(SubsetConvertorPlugin):
"""Find and convert any legacy subsets in the scene.
This Converter will find all legacy subsets in the scene and will
transform them to the current system. Since the old subsets doesn't
retain any information about their original creators, the only mapping
we can do is based on their families.
Its limitation is that you can have multiple creators creating subset
of the same family and there is no way to handle it. This code should
nevertheless cover all creators that came with OpenPype.
"""
identifier = "io.openpype.creators.blender.legacy"
family_to_id = {
"action": "io.openpype.creators.blender.action",
"camera": "io.openpype.creators.blender.camera",
"animation": "io.openpype.creators.blender.animation",
"blendScene": "io.openpype.creators.blender.blendscene",
"layout": "io.openpype.creators.blender.layout",
"model": "io.openpype.creators.blender.model",
"pointcache": "io.openpype.creators.blender.pointcache",
"render": "io.openpype.creators.blender.render",
"review": "io.openpype.creators.blender.review",
"rig": "io.openpype.creators.blender.rig",
}
def __init__(self, *args, **kwargs):
super(BlenderLegacyConvertor, self).__init__(*args, **kwargs)
self.legacy_subsets = {}
def find_instances(self):
"""Find legacy subsets in the scene.
Legacy subsets are the ones that doesn't have `creator_identifier`
parameter on them.
This is using cached entries done in
:py:meth:`~BaseCreator.cache_subsets()`
"""
self.legacy_subsets = self.collection_shared_data.get(
"blender_cached_legacy_subsets")
if not self.legacy_subsets:
return
self.add_convertor_item(
"Found {} incompatible subset{}".format(
len(self.legacy_subsets),
"s" if len(self.legacy_subsets) > 1 else ""
)
)
def convert(self):
"""Convert all legacy subsets to current.
It is enough to add `creator_identifier` and `instance_node`.
"""
if not self.legacy_subsets:
return
for family, instance_nodes in self.legacy_subsets.items():
if family in self.family_to_id:
for instance_node in instance_nodes:
creator_identifier = self.family_to_id[family]
self.log.info(
"Converting {} to {}".format(instance_node.name,
creator_identifier)
)
imprint(instance_node, data={
"creator_identifier": creator_identifier
})

View file

@ -2,30 +2,29 @@
import bpy
from openpype.pipeline import get_current_task_name
import openpype.hosts.blender.api.plugin
from openpype.hosts.blender.api import lib
from openpype.hosts.blender.api import lib, plugin
class CreateAction(openpype.hosts.blender.api.plugin.Creator):
"""Action output for character rigs"""
class CreateAction(plugin.BaseCreator):
"""Action output for character rigs."""
name = "actionMain"
identifier = "io.openpype.creators.blender.action"
label = "Action"
family = "action"
icon = "male"
def process(self):
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
# Run parent create method
collection = super().create(
subset_name, instance_data, pre_create_data
)
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = get_current_task_name()
lib.imprint(collection, self.data)
# Get instance name
name = plugin.prepare_scene_name(instance_data["asset"], subset_name)
if (self.options or {}).get("useSelection"):
if pre_create_data.get("use_selection"):
for obj in lib.get_selection():
if (obj.animation_data is not None
and obj.animation_data.action is not None):

View file

@ -1,51 +1,32 @@
"""Create an animation asset."""
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin, lib
class CreateAnimation(plugin.Creator):
"""Animation output for character rigs"""
class CreateAnimation(plugin.BaseCreator):
"""Animation output for character rigs."""
name = "animationMain"
identifier = "io.openpype.creators.blender.animation"
label = "Animation"
family = "animation"
icon = "male"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
# Run parent create method
collection = super().create(
subset_name, instance_data, pre_create_data
)
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
# name = self.name
# if not name:
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
# asset_group = bpy.data.objects.new(name=name, object_data=None)
# asset_group.empty_display_type = 'SINGLE_ARROW'
asset_group = bpy.data.collections.new(name=name)
instances.children.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):
if pre_create_data.get("use_selection"):
selected = lib.get_selection()
for obj in selected:
asset_group.objects.link(obj)
elif (self.options or {}).get("asset_group"):
obj = (self.options or {}).get("asset_group")
asset_group.objects.link(obj)
collection.objects.link(obj)
elif pre_create_data.get("asset_group"):
# Use for Load Blend automated creation of animation instances
# upon loading rig files
obj = pre_create_data.get("asset_group")
collection.objects.link(obj)
return asset_group
return collection

View file

@ -2,50 +2,33 @@
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin, lib
class CreateBlendScene(plugin.Creator):
"""Generic group of assets"""
class CreateBlendScene(plugin.BaseCreator):
"""Generic group of assets."""
name = "blendScene"
identifier = "io.openpype.creators.blender.blendscene"
label = "Blender Scene"
family = "blendScene"
icon = "cubes"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
maintain_selection = False
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
instance_node = super().create(subset_name,
instance_data,
pre_create_data)
# Add selected objects to instance
if (self.options or {}).get("useSelection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
if pre_create_data.get("use_selection"):
selection = lib.get_selection(include_collections=True)
for data in selection:
if isinstance(data, bpy.types.Collection):
instance_node.children.link(data)
elif isinstance(data, bpy.types.Object):
instance_node.objects.link(data)
return asset_group
return instance_node

View file

@ -2,62 +2,41 @@
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api import plugin, lib
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
class CreateCamera(plugin.Creator):
"""Polygonal static geometry"""
class CreateCamera(plugin.BaseCreator):
"""Polygonal static geometry."""
name = "cameraMain"
identifier = "io.openpype.creators.blender.camera"
label = "Camera"
family = "camera"
icon = "video-camera"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
create_as_asset_group = True
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = super().create(subset_name,
instance_data,
pre_create_data)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = get_current_task_name()
print(f"self.data: {self.data}")
lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
bpy.context.view_layer.objects.active = asset_group
if pre_create_data.get("use_selection"):
for obj in lib.get_selection():
obj.parent = asset_group
else:
plugin.deselect_all()
camera = bpy.data.cameras.new(subset)
camera_obj = bpy.data.objects.new(subset, camera)
camera = bpy.data.cameras.new(subset_name)
camera_obj = bpy.data.objects.new(subset_name, camera)
instances = bpy.data.collections.get(AVALON_INSTANCES)
instances.objects.link(camera_obj)
camera_obj.select_set(True)
asset_group.select_set(True)
bpy.context.view_layer.objects.active = asset_group
bpy.ops.object.parent_set(keep_transform=True)
camera_obj.parent = asset_group
return asset_group

View file

@ -2,50 +2,31 @@
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin, lib
class CreateLayout(plugin.Creator):
"""Layout output for character rigs"""
class CreateLayout(plugin.BaseCreator):
"""Layout output for character rigs."""
name = "layoutMain"
identifier = "io.openpype.creators.blender.layout"
label = "Layout"
family = "layout"
icon = "cubes"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
create_as_asset_group = True
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
asset_group = super().create(subset_name,
instance_data,
pre_create_data)
# Add selected objects to instance
if (self.options or {}).get("useSelection"):
if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
for obj in lib.get_selection():
obj.parent = asset_group
return asset_group

View file

@ -2,50 +2,30 @@
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin, lib
class CreateModel(plugin.Creator):
"""Polygonal static geometry"""
class CreateModel(plugin.BaseCreator):
"""Polygonal static geometry."""
name = "modelMain"
identifier = "io.openpype.creators.blender.model"
label = "Model"
family = "model"
icon = "cube"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
create_as_asset_group = True
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
asset_group = super().create(subset_name,
instance_data,
pre_create_data)
# Add selected objects to instance
if (self.options or {}).get("useSelection"):
if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
for obj in lib.get_selection():
obj.parent = asset_group
return asset_group

View file

@ -1,31 +1,25 @@
"""Create a pointcache asset."""
import bpy
from openpype.pipeline import get_current_task_name
import openpype.hosts.blender.api.plugin
from openpype.hosts.blender.api import lib
from openpype.hosts.blender.api import plugin, lib
class CreatePointcache(openpype.hosts.blender.api.plugin.Creator):
"""Polygonal static geometry"""
class CreatePointcache(plugin.BaseCreator):
"""Polygonal static geometry."""
name = "pointcacheMain"
identifier = "io.openpype.creators.blender.pointcache"
label = "Point Cache"
family = "pointcache"
icon = "gears"
def process(self):
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
# Run parent create method
collection = super().create(
subset_name, instance_data, pre_create_data
)
asset = self.data["asset"]
subset = self.data["subset"]
name = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
collection = bpy.data.collections.new(name=name)
bpy.context.scene.collection.children.link(collection)
self.data['task'] = get_current_task_name()
lib.imprint(collection, self.data)
if (self.options or {}).get("useSelection"):
if pre_create_data.get("use_selection"):
objects = lib.get_selection()
for obj in objects:
collection.objects.link(obj)

View file

@ -0,0 +1,42 @@
"""Create render."""
import bpy
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.render_lib import prepare_rendering
class CreateRenderlayer(plugin.BaseCreator):
"""Single baked camera."""
identifier = "io.openpype.creators.blender.render"
label = "Render"
family = "render"
icon = "eye"
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
try:
# Run parent create method
collection = super().create(
subset_name, instance_data, pre_create_data
)
prepare_rendering(collection)
except Exception:
# Remove the instance if there was an error
bpy.data.collections.remove(collection)
raise
# TODO: this is undesiderable, but it's the only way to be sure that
# the file is saved before the render starts.
# Blender, by design, doesn't set the file as dirty if modifications
# happen by script. So, when creating the instance and setting the
# render settings, the file is not marked as dirty. This means that
# there is the risk of sending to deadline a file without the right
# settings. Even the validator to check that the file is saved will
# detect the file as saved, even if it isn't. The only solution for
# now it is to force the file to be saved.
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
return collection

View file

@ -1,47 +1,27 @@
"""Create review."""
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin, lib
class CreateReview(plugin.Creator):
"""Single baked camera"""
class CreateReview(plugin.BaseCreator):
"""Single baked camera."""
name = "reviewDefault"
identifier = "io.openpype.creators.blender.review"
label = "Review"
family = "review"
icon = "video-camera"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
# Run parent create method
collection = super().create(
subset_name, instance_data, pre_create_data
)
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.collections.new(name=name)
instances.children.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):
if pre_create_data.get("use_selection"):
selected = lib.get_selection()
for obj in selected:
asset_group.objects.link(obj)
elif (self.options or {}).get("asset_group"):
obj = (self.options or {}).get("asset_group")
asset_group.objects.link(obj)
collection.objects.link(obj)
return asset_group
return collection

View file

@ -2,50 +2,30 @@
import bpy
from openpype.pipeline import get_current_task_name
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
from openpype.hosts.blender.api import plugin, lib
class CreateRig(plugin.Creator):
"""Artist-friendly rig with controls to direct motion"""
class CreateRig(plugin.BaseCreator):
"""Artist-friendly rig with controls to direct motion."""
name = "rigMain"
identifier = "io.openpype.creators.blender.rig"
label = "Rig"
family = "rig"
icon = "wheelchair"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
create_as_asset_group = True
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
def create(
self, subset_name: str, instance_data: dict, pre_create_data: dict
):
asset_group = super().create(subset_name,
instance_data,
pre_create_data)
# Add selected objects to instance
if (self.options or {}).get("useSelection"):
if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
for obj in lib.get_selection():
obj.parent = asset_group
return asset_group

View file

@ -0,0 +1,121 @@
import bpy
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import CreatedInstance, AutoCreator
from openpype.client import get_asset_by_name
from openpype.hosts.blender.api.plugin import BaseCreator
from openpype.hosts.blender.api.pipeline import (
AVALON_PROPERTY,
AVALON_CONTAINERS
)
class CreateWorkfile(BaseCreator, AutoCreator):
"""Workfile auto-creator.
The workfile instance stores its data on the `AVALON_CONTAINERS` collection
as custom attributes, because unlike other instances it doesn't have an
instance node of its own.
"""
identifier = "io.openpype.creators.blender.workfile"
label = "Workfile"
family = "workfile"
icon = "fa5.file"
def create(self):
"""Create workfile instances."""
existing_instance = next(
(
instance for instance in self.create_context.instances
if instance.creator_identifier == self.identifier
),
None,
)
project_name = self.project_name
asset_name = self.create_context.get_current_asset_name()
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
existing_asset_name = None
if existing_instance is not None:
if AYON_SERVER_ENABLED:
existing_asset_name = existing_instance.get("folderPath")
if existing_asset_name is None:
existing_asset_name = existing_instance["asset"]
if not existing_instance:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
task_name, task_name, asset_doc, project_name, host_name
)
data = {
"task": task_name,
"variant": task_name,
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
data.update(
self.get_dynamic_data(
task_name,
task_name,
asset_doc,
project_name,
host_name,
existing_instance,
)
)
self.log.info("Auto-creating workfile instance...")
current_instance = CreatedInstance(
self.family, subset_name, data, self
)
instance_node = bpy.data.collections.get(AVALON_CONTAINERS, {})
current_instance.transient_data["instance_node"] = instance_node
self._add_instance_to_context(current_instance)
elif (
existing_asset_name != asset_name
or existing_instance["task"] != task_name
):
# Update instance context if it's different
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
task_name, task_name, asset_doc, project_name, host_name
)
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
def collect_instances(self):
instance_node = bpy.data.collections.get(AVALON_CONTAINERS)
if not instance_node:
return
property = instance_node.get(AVALON_PROPERTY)
if not property:
return
# Create instance object from existing data
instance = CreatedInstance.from_existing(
instance_data=property.to_dict(),
creator=self
)
instance.transient_data["instance_node"] = instance_node
# Add instance to create context
self._add_instance_to_context(instance)
def remove_instances(self, instances):
for instance in instances:
node = instance.transient_data["instance_node"]
del node[AVALON_PROPERTY]
self._remove_instance_from_context(instance)

View file

@ -7,7 +7,7 @@ def append_workfile(context, fname, do_import):
asset = context['asset']['name']
subset = context['subset']['name']
group_name = plugin.asset_name(asset, subset)
group_name = plugin.prepare_scene_name(asset, subset)
# We need to preserve the original names of the scenes, otherwise,
# if there are duplicate names in the current workfile, the imported

View file

@ -26,8 +26,7 @@ class CacheModelLoader(plugin.AssetLoader):
Note:
At least for now it only supports Alembic files.
"""
families = ["model", "pointcache"]
families = ["model", "pointcache", "animation"]
representations = ["abc"]
label = "Load Alembic"
@ -53,32 +52,43 @@ class CacheModelLoader(plugin.AssetLoader):
def _process(self, libpath, asset_group, group_name):
plugin.deselect_all()
collection = bpy.context.view_layer.active_layer_collection.collection
relative = bpy.context.preferences.filepaths.use_relative_paths
bpy.ops.wm.alembic_import(
filepath=libpath,
relative_path=relative
)
parent = bpy.context.scene.collection
imported = lib.get_selection()
# Children must be linked before parents,
# otherwise the hierarchy will break
# Use first EMPTY without parent as container
container = next(
(obj for obj in imported
if obj.type == "EMPTY" and not obj.parent),
None
)
objects = []
if container:
nodes = list(container.children)
for obj in imported:
obj.parent = asset_group
for obj in nodes:
obj.parent = asset_group
for obj in imported:
objects.append(obj)
imported.extend(list(obj.children))
bpy.data.objects.remove(container)
objects.reverse()
objects.extend(nodes)
for obj in nodes:
objects.extend(obj.children_recursive)
else:
for obj in imported:
obj.parent = asset_group
objects = imported
for obj in objects:
# Unlink the object from all collections
collections = obj.users_collection
for collection in collections:
collection.objects.unlink(obj)
name = obj.name
obj.name = f"{group_name}:{name}"
if obj.type != 'EMPTY':
@ -90,7 +100,7 @@ class CacheModelLoader(plugin.AssetLoader):
material_slot.material.name = f"{group_name}:{name_mat}"
if not obj.get(AVALON_PROPERTY):
obj[AVALON_PROPERTY] = dict()
obj[AVALON_PROPERTY] = {}
avalon_info = obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
@ -99,6 +109,18 @@ class CacheModelLoader(plugin.AssetLoader):
return objects
def _link_objects(self, objects, collection, containers, asset_group):
# Link the imported objects to any collection where the asset group is
# linked to, except the AVALON_CONTAINERS collection
group_collections = [
collection
for collection in asset_group.users_collection
if collection != containers]
for obj in objects:
for collection in group_collections:
collection.objects.link(obj)
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
@ -115,23 +137,27 @@ class CacheModelLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_containers = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_containers:
avalon_containers = bpy.data.collections.new(
name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_containers)
containers = bpy.data.collections.get(AVALON_CONTAINERS)
if not containers:
containers = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(containers)
asset_group = bpy.data.objects.new(group_name, object_data=None)
avalon_containers.objects.link(asset_group)
asset_group.empty_display_type = 'SINGLE_ARROW'
containers.objects.link(asset_group)
objects = self._process(libpath, asset_group, group_name)
bpy.context.scene.collection.objects.link(asset_group)
# Link the asset group to the active collection
collection = bpy.context.view_layer.active_layer_collection.collection
collection.objects.link(asset_group)
self._link_objects(objects, asset_group, containers, asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
@ -207,7 +233,11 @@ class CacheModelLoader(plugin.AssetLoader):
mat = asset_group.matrix_basis.copy()
self._remove(asset_group)
self._process(str(libpath), asset_group, object_name)
objects = self._process(str(libpath), asset_group, object_name)
containers = bpy.data.collections.get(AVALON_CONTAINERS)
self._link_objects(objects, asset_group, containers, asset_group)
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)

View file

@ -7,7 +7,7 @@ from typing import Dict, List, Optional
import bpy
from openpype.pipeline import get_representation_path
import openpype.hosts.blender.api.plugin
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
containerise_existing,
AVALON_PROPERTY,
@ -16,7 +16,7 @@ from openpype.hosts.blender.api.pipeline import (
logger = logging.getLogger("openpype").getChild("blender").getChild("load_action")
class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
class BlendActionLoader(plugin.AssetLoader):
"""Load action from a .blend file.
Warning:
@ -46,8 +46,8 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = openpype.hosts.blender.api.plugin.asset_name(asset, subset)
container_name = openpype.hosts.blender.api.plugin.asset_name(
lib_container = plugin.prepare_scene_name(asset, subset)
container_name = plugin.prepare_scene_name(
asset, subset, namespace
)
@ -152,7 +152,7 @@ class BlendActionLoader(openpype.hosts.blender.api.plugin.AssetLoader):
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in openpype.hosts.blender.api.plugin.VALID_EXTENSIONS, (
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)

View file

@ -42,9 +42,9 @@ class AudioLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)

View file

@ -4,11 +4,11 @@ from pathlib import Path
import bpy
from openpype.pipeline import (
legacy_create,
get_representation_path,
AVALON_CONTAINER_ID,
registered_host
)
from openpype.pipeline.create import get_legacy_creator_by_name
from openpype.pipeline.create import CreateContext
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.lib import imprint
from openpype.hosts.blender.api.pipeline import (
@ -20,7 +20,7 @@ from openpype.hosts.blender.api.pipeline import (
class BlendLoader(plugin.AssetLoader):
"""Load assets from a .blend file."""
families = ["model", "rig", "layout", "camera", "blendScene"]
families = ["model", "rig", "layout", "camera"]
representations = ["blend"]
label = "Append Blend"
@ -32,7 +32,7 @@ class BlendLoader(plugin.AssetLoader):
empties = [obj for obj in objects if obj.type == 'EMPTY']
for empty in empties:
if empty.get(AVALON_PROPERTY):
if empty.get(AVALON_PROPERTY) and empty.parent is None:
return empty
return None
@ -57,19 +57,21 @@ class BlendLoader(plugin.AssetLoader):
obj.get(AVALON_PROPERTY).get('family') == 'rig'
)
]
if not rigs:
return
# Create animation instances for each rig
creator_identifier = "io.openpype.creators.blender.animation"
host = registered_host()
create_context = CreateContext(host)
for rig in rigs:
creator_plugin = get_legacy_creator_by_name("CreateAnimation")
legacy_create(
creator_plugin,
name=rig.name.split(':')[-1] + "_animation",
asset=asset,
options={
"useSelection": False,
create_context.create(
creator_identifier=creator_identifier,
variant=rig.name.split(':')[-1],
pre_create_data={
"use_selection": False,
"asset_group": rig
},
data={
"dependencies": representation
}
)
@ -100,6 +102,7 @@ class BlendLoader(plugin.AssetLoader):
# Link all the container children to the collection
for obj in container.children_recursive:
print(obj)
bpy.context.scene.collection.objects.link(obj)
# Remove the library from the blend file
@ -119,7 +122,7 @@ class BlendLoader(plugin.AssetLoader):
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
@ -130,9 +133,9 @@ class BlendLoader(plugin.AssetLoader):
representation = str(context["representation"]["_id"])
asset_name = plugin.asset_name(asset, subset)
asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
@ -244,7 +247,7 @@ class BlendLoader(plugin.AssetLoader):
for parent in parent_containers:
parent.get(AVALON_PROPERTY)["members"] = list(filter(
lambda i: i not in members,
parent.get(AVALON_PROPERTY)["members"]))
parent.get(AVALON_PROPERTY).get("members", [])))
for attr in attrs:
for data in getattr(bpy.data, attr):

View file

@ -0,0 +1,221 @@
from typing import Dict, List, Optional
from pathlib import Path
import bpy
from openpype.pipeline import (
get_representation_path,
AVALON_CONTAINER_ID,
)
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.lib import imprint
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
)
class BlendSceneLoader(plugin.AssetLoader):
"""Load assets from a .blend file."""
families = ["blendScene"]
representations = ["blend"]
label = "Append Blend"
icon = "code-fork"
color = "orange"
@staticmethod
def _get_asset_container(collections):
for coll in collections:
parents = [c for c in collections if c.user_of_id(coll)]
if coll.get(AVALON_PROPERTY) and not parents:
return coll
return None
def _process_data(self, libpath, group_name, family):
# Append all the data from the .blend file
with bpy.data.libraries.load(
libpath, link=False, relative=False
) as (data_from, data_to):
for attr in dir(data_to):
setattr(data_to, attr, getattr(data_from, attr))
members = []
# Rename the object to add the asset name
for attr in dir(data_to):
for data in getattr(data_to, attr):
data.name = f"{group_name}:{data.name}"
members.append(data)
container = self._get_asset_container(
data_to.collections)
assert container, "No asset group found"
container.name = group_name
# Link the group to the scene
bpy.context.scene.collection.children.link(container)
# Remove the library from the blend file
library = bpy.data.libraries.get(bpy.path.basename(libpath))
bpy.data.libraries.remove(library)
return container, members
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "model"
asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
container, members = self._process_data(libpath, group_name, family)
avalon_container.children.link(container)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name,
"members": members,
}
container[AVALON_PROPERTY] = data
objects = [
obj for obj in bpy.data.objects
if obj.name.startswith(f"{group_name}:")
]
self[:] = objects
return objects
def exec_update(self, container: Dict, representation: Dict):
"""
Update the loaded asset.
"""
group_name = container["objectName"]
asset_group = bpy.data.collections.get(group_name)
libpath = Path(get_representation_path(representation)).as_posix()
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
# Get the parents of the members of the asset group, so we can
# re-link them after the update.
# Also gets the transform for each object to reapply after the update.
collection_parents = {}
member_transforms = {}
members = asset_group.get(AVALON_PROPERTY).get("members", [])
loaded_collections = {c for c in bpy.data.collections if c in members}
loaded_collections.add(bpy.data.collections.get(AVALON_CONTAINERS))
for member in members:
if isinstance(member, bpy.types.Object):
member_parents = set(member.users_collection)
member_transforms[member.name] = member.matrix_basis.copy()
elif isinstance(member, bpy.types.Collection):
member_parents = {
c for c in bpy.data.collections if c.user_of_id(member)}
else:
continue
member_parents = member_parents.difference(loaded_collections)
if member_parents:
collection_parents[member.name] = list(member_parents)
old_data = dict(asset_group.get(AVALON_PROPERTY))
self.exec_remove(container)
family = container["family"]
asset_group, members = self._process_data(libpath, group_name, family)
for member in members:
if member.name in collection_parents:
for parent in collection_parents[member.name]:
if isinstance(member, bpy.types.Object):
parent.objects.link(member)
elif isinstance(member, bpy.types.Collection):
parent.children.link(member)
if member.name in member_transforms and isinstance(
member, bpy.types.Object
):
member.matrix_basis = member_transforms[member.name]
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
avalon_container.children.link(asset_group)
# Restore the old data, but reset members, as they don't exist anymore
# This avoids a crash, because the memory addresses of those members
# are not valid anymore
old_data["members"] = []
asset_group[AVALON_PROPERTY] = old_data
new_data = {
"libpath": libpath,
"representation": str(representation["_id"]),
"parent": str(representation["parent"]),
"members": members,
}
imprint(asset_group, new_data)
def exec_remove(self, container: Dict) -> bool:
"""
Remove an existing container from a Blender scene.
"""
group_name = container["objectName"]
asset_group = bpy.data.collections.get(group_name)
members = set(asset_group.get(AVALON_PROPERTY).get("members", []))
if members:
for attr_name in dir(bpy.data):
attr = getattr(bpy.data, attr_name)
if not isinstance(attr, bpy.types.bpy_prop_collection):
continue
# ensure to make a list copy because we
# we remove members as we iterate
for data in list(attr):
if data not in members or data == asset_group:
continue
attr.remove(data)
bpy.data.collections.remove(asset_group)

View file

@ -87,9 +87,9 @@ class AbcCameraLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
@ -100,7 +100,7 @@ class AbcCameraLoader(plugin.AssetLoader):
asset_group = bpy.data.objects.new(group_name, object_data=None)
avalon_container.objects.link(asset_group)
objects = self._process(libpath, asset_group, group_name)
self._process(libpath, asset_group, group_name)
objects = []
nodes = list(asset_group.children)

View file

@ -90,9 +90,9 @@ class FbxCameraLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
@ -103,7 +103,7 @@ class FbxCameraLoader(plugin.AssetLoader):
asset_group = bpy.data.objects.new(group_name, object_data=None)
avalon_container.objects.link(asset_group)
objects = self._process(libpath, asset_group, group_name)
self._process(libpath, asset_group, group_name)
objects = []
nodes = list(asset_group.children)

View file

@ -134,9 +134,9 @@ class FbxModelLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)

View file

@ -123,6 +123,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
# raise ValueError("Creator plugin \"CreateCamera\" was "
# "not found.")
# TODO: Refactor legacy create usage to new style creators
# legacy_create(
# creator_plugin,
# name="camera",
@ -148,9 +149,9 @@ class JsonLayoutLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
asset_name = plugin.prepare_scene_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
group_name = plugin.prepare_scene_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)

View file

@ -96,14 +96,14 @@ class BlendLookLoader(plugin.AssetLoader):
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = plugin.asset_name(
lib_container = plugin.prepare_scene_name(
asset, subset
)
unique_number = plugin.get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
container_name = plugin.asset_name(
container_name = plugin.prepare_scene_name(
asset, subset, unique_number
)

View file

@ -1,72 +1,15 @@
import os
import bpy
import pyblish.api
from openpype.pipeline import get_current_task_name, get_current_asset_name
from openpype.hosts.blender.api import workio
class SaveWorkfiledAction(pyblish.api.Action):
"""Save Workfile."""
label = "Save Workfile"
on = "failed"
icon = "save"
def process(self, context, plugin):
bpy.ops.wm.avalon_workfiles()
class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Blender Current File"
hosts = ["blender"]
actions = [SaveWorkfiledAction]
def process(self, context):
"""Inject the current working file"""
current_file = workio.current_file()
context.data["currentFile"] = current_file
assert current_file, (
"Current file is empty. Save the file before continuing."
)
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
task = get_current_task_name()
data = {}
# create instance
instance = context.create_instance(name=filename)
subset = "workfile" + task.capitalize()
data.update({
"subset": subset,
"asset": get_current_asset_name(),
"label": subset,
"publish": True,
"family": "workfile",
"families": ["workfile"],
"setMembers": [current_file],
"frameStart": bpy.context.scene.frame_start,
"frameEnd": bpy.context.scene.frame_end,
})
data["representations"] = [{
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": file,
"stagingDir": folder,
}]
instance.data.update(data)
self.log.info("Collected instance: {}".format(file))
self.log.info("Scene path: {}".format(current_file))
self.log.info("staging Dir: {}".format(folder))
self.log.info("subset: {}".format(subset))

View file

@ -0,0 +1,43 @@
import bpy
import pyblish.api
from openpype.pipeline.publish import KnownPublishError
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class CollectBlenderInstanceData(pyblish.api.InstancePlugin):
"""Validator to verify that the instance is not empty"""
order = pyblish.api.CollectorOrder
hosts = ["blender"]
families = ["model", "pointcache", "animation", "rig", "camera", "layout",
"blendScene"]
label = "Collect Instance"
def process(self, instance):
instance_node = instance.data["transientData"]["instance_node"]
# Collect members of the instance
members = [instance_node]
if isinstance(instance_node, bpy.types.Collection):
members.extend(instance_node.objects)
members.extend(instance_node.children)
# Special case for animation instances, include armatures
if instance.data["family"] == "animation":
for obj in instance_node.objects:
if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY):
members.extend(
child for child in obj.children
if child.type == 'ARMATURE'
)
elif isinstance(instance_node, bpy.types.Object):
members.extend(instance_node.children_recursive)
else:
raise KnownPublishError(
f"Unsupported instance node type '{type(instance_node)}' "
f"for instance '{instance}'"
)
instance[:] = members

View file

@ -1,104 +0,0 @@
import json
from typing import Generator
import bpy
import pyblish.api
from openpype.hosts.blender.api.pipeline import (
AVALON_INSTANCES,
AVALON_PROPERTY,
)
class CollectInstances(pyblish.api.ContextPlugin):
"""Collect the data of a model."""
hosts = ["blender"]
label = "Collect Instances"
order = pyblish.api.CollectorOrder
@staticmethod
def get_asset_groups() -> Generator:
"""Return all 'model' collections.
Check if the family is 'model' and if it doesn't have the
representation set. If the representation is set, it is a loaded model
and we don't want to publish it.
"""
instances = bpy.data.collections.get(AVALON_INSTANCES)
for obj in instances.objects:
avalon_prop = obj.get(AVALON_PROPERTY) or dict()
if avalon_prop.get('id') == 'pyblish.avalon.instance':
yield obj
@staticmethod
def get_collections() -> Generator:
"""Return all 'model' collections.
Check if the family is 'model' and if it doesn't have the
representation set. If the representation is set, it is a loaded model
and we don't want to publish it.
"""
for collection in bpy.data.collections:
avalon_prop = collection.get(AVALON_PROPERTY) or dict()
if avalon_prop.get('id') == 'pyblish.avalon.instance':
yield collection
def process(self, context):
"""Collect the models from the current Blender scene."""
asset_groups = self.get_asset_groups()
collections = self.get_collections()
for group in asset_groups:
avalon_prop = group[AVALON_PROPERTY]
asset = avalon_prop['asset']
family = avalon_prop['family']
subset = avalon_prop['subset']
task = avalon_prop['task']
name = f"{asset}_{subset}"
instance = context.create_instance(
name=name,
family=family,
families=[family],
subset=subset,
asset=asset,
task=task,
)
objects = list(group.children)
members = set()
for obj in objects:
objects.extend(list(obj.children))
members.add(obj)
members.add(group)
instance[:] = list(members)
self.log.debug(json.dumps(instance.data, indent=4))
for obj in instance:
self.log.debug(obj)
for collection in collections:
avalon_prop = collection[AVALON_PROPERTY]
asset = avalon_prop['asset']
family = avalon_prop['family']
subset = avalon_prop['subset']
task = avalon_prop['task']
name = f"{asset}_{subset}"
instance = context.create_instance(
name=name,
family=family,
families=[family],
subset=subset,
asset=asset,
task=task,
)
members = list(collection.objects)
if family == "animation":
for obj in collection.objects:
if obj.type == 'EMPTY' and obj.get(AVALON_PROPERTY):
for child in obj.children:
if child.type == 'ARMATURE':
members.append(child)
members.append(collection)
instance[:] = members
self.log.debug(json.dumps(instance.data, indent=4))
for obj in instance:
self.log.debug(obj)

View file

@ -0,0 +1,120 @@
# -*- coding: utf-8 -*-
"""Collect render data."""
import os
import re
import bpy
from openpype.hosts.blender.api import colorspace
import pyblish.api
class CollectBlenderRender(pyblish.api.InstancePlugin):
"""Gather all publishable render instances."""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["blender"]
families = ["render"]
label = "Collect Render"
sync_workfile_version = False
@staticmethod
def generate_expected_beauty(
render_product, frame_start, frame_end, frame_step, ext
):
"""
Generate the expected files for the render product for the beauty
render. This returns a list of files that should be rendered. It
replaces the sequence of `#` with the frame number.
"""
path = os.path.dirname(render_product)
file = os.path.basename(render_product)
expected_files = []
for frame in range(frame_start, frame_end + 1, frame_step):
frame_str = str(frame).rjust(4, "0")
filename = re.sub("#+", frame_str, file)
expected_file = f"{os.path.join(path, filename)}.{ext}"
expected_files.append(expected_file.replace("\\", "/"))
return {
"beauty": expected_files
}
@staticmethod
def generate_expected_aovs(
aov_file_product, frame_start, frame_end, frame_step, ext
):
"""
Generate the expected files for the render product for the beauty
render. This returns a list of files that should be rendered. It
replaces the sequence of `#` with the frame number.
"""
expected_files = {}
for aov_name, aov_file in aov_file_product:
path = os.path.dirname(aov_file)
file = os.path.basename(aov_file)
aov_files = []
for frame in range(frame_start, frame_end + 1, frame_step):
frame_str = str(frame).rjust(4, "0")
filename = re.sub("#+", frame_str, file)
expected_file = f"{os.path.join(path, filename)}.{ext}"
aov_files.append(expected_file.replace("\\", "/"))
expected_files[aov_name] = aov_files
return expected_files
def process(self, instance):
context = instance.context
instance_node = instance.data["transientData"]["instance_node"]
render_data = instance_node.get("render_data")
assert render_data, "No render data found."
render_product = render_data.get("render_product")
aov_file_product = render_data.get("aov_file_product")
ext = render_data.get("image_format")
multilayer = render_data.get("multilayer_exr")
frame_start = context.data["frameStart"]
frame_end = context.data["frameEnd"]
frame_handle_start = context.data["frameStartHandle"]
frame_handle_end = context.data["frameEndHandle"]
expected_beauty = self.generate_expected_beauty(
render_product, int(frame_start), int(frame_end),
int(bpy.context.scene.frame_step), ext)
expected_aovs = self.generate_expected_aovs(
aov_file_product, int(frame_start), int(frame_end),
int(bpy.context.scene.frame_step), ext)
expected_files = expected_beauty | expected_aovs
instance.data.update({
"families": ["render", "render.farm"],
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_handle_start,
"frameEndHandle": frame_handle_end,
"fps": context.data["fps"],
"byFrameStep": bpy.context.scene.frame_step,
"review": render_data.get("review", False),
"multipartExr": ext == "exr" and multilayer,
"farm": True,
"expectedFiles": [expected_files],
# OCIO not currently implemented in Blender, but the following
# settings are required by the schema, so it is hardcoded.
# TODO: Implement OCIO in Blender
"colorspaceConfig": "",
"colorspaceDisplay": "sRGB",
"colorspaceView": "ACES 1.0 SDR-video",
"renderProducts": colorspace.ARenderProduct(),
})

View file

@ -16,10 +16,12 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug(f"instance: {instance}")
datablock = instance.data["transientData"]["instance_node"]
# get cameras
cameras = [
obj
for obj in instance
for obj in datablock.all_objects
if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA"
]
@ -31,23 +33,20 @@ class CollectReview(pyblish.api.InstancePlugin):
focal_length = cameras[0].data.lens
# get isolate objects list from meshes instance members .
# get isolate objects list from meshes instance members.
types = {"MESH", "GPENCIL"}
isolate_objects = [
obj
for obj in instance
if isinstance(obj, bpy.types.Object) and obj.type == "MESH"
if isinstance(obj, bpy.types.Object) and obj.type in types
]
if not instance.data.get("remove"):
task = instance.context.data["task"]
# Store focal length in `burninDataMembers`
burninData = instance.data.setdefault("burninDataMembers", {})
burninData["focalLength"] = focal_length
instance.data.update({
"subset": f"{task}Review",
"review_camera": camera,
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],

View file

@ -0,0 +1,37 @@
from pathlib import Path
from pyblish.api import InstancePlugin, CollectorOrder
class CollectWorkfile(InstancePlugin):
"""Inject workfile data into its instance."""
order = CollectorOrder
label = "Collect Workfile"
hosts = ["blender"]
families = ["workfile"]
def process(self, instance):
"""Process collector."""
context = instance.context
filepath = Path(context.data["currentFile"])
ext = filepath.suffix
instance.data.update(
{
"setMembers": [filepath.as_posix()],
"frameStart": context.data.get("frameStart", 1),
"frameEnd": context.data.get("frameEnd", 1),
"handleStart": context.data.get("handleStart", 1),
"handledEnd": context.data.get("handleEnd", 1),
"representations": [
{
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": filepath.name,
"stagingDir": filepath.parent,
}
],
}
)

View file

@ -4,42 +4,42 @@ import bpy
from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractABC(publish.Extractor):
class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract as ABC."""
label = "Extract ABC"
hosts = ["blender"]
families = ["model", "pointcache"]
optional = True
families = ["pointcache"]
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
plugin.deselect_all()
selected = []
active = None
asset_group = instance.data["transientData"]["instance_node"]
selected = []
for obj in instance:
obj.select_set(True)
selected.append(obj)
# Set as active the asset group
if obj.get(AVALON_PROPERTY):
active = obj
if isinstance(obj, bpy.types.Object):
obj.select_set(True)
selected.append(obj)
context = plugin.create_blender_context(
active=active, selected=selected)
active=asset_group, selected=selected)
with bpy.context.temp_override(**context):
# We export the abc
@ -62,5 +62,14 @@ class ExtractABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)
class ExtractModelABC(ExtractABC):
"""Extract model as ABC."""
label = "Extract Model ABC"
hosts = ["blender"]
families = ["model"]
optional = True

View file

@ -6,7 +6,10 @@ from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
class ExtractAnimationABC(publish.Extractor):
class ExtractAnimationABC(
publish.Extractor,
publish.OptionalPyblishPluginMixin,
):
"""Extract as ABC."""
label = "Extract Animation ABC"
@ -15,20 +18,25 @@ class ExtractAnimationABC(publish.Extractor):
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
plugin.deselect_all()
selected = []
asset_group = None
asset_group = instance.data["transientData"]["instance_node"]
objects = []
for obj in instance:
@ -68,5 +76,5 @@ class ExtractAnimationABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -5,7 +5,7 @@ import bpy
from openpype.pipeline import publish
class ExtractBlend(publish.Extractor):
class ExtractBlend(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract a blend file."""
label = "Extract Blend"
@ -14,30 +14,44 @@ class ExtractBlend(publish.Extractor):
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
data_blocks = set()
for obj in instance:
data_blocks.add(obj)
for data in instance:
data_blocks.add(data)
# Pack used images in the blend files.
if obj.type == 'MESH':
for material_slot in obj.material_slots:
mat = material_slot.material
if mat and mat.use_nodes:
tree = mat.node_tree
if tree.type == 'SHADER':
for node in tree.nodes:
if node.bl_idname == 'ShaderNodeTexImage':
if node.image:
node.image.pack()
if not (
isinstance(data, bpy.types.Object) and data.type == 'MESH'
):
continue
for material_slot in data.material_slots:
mat = material_slot.material
if not (mat and mat.use_nodes):
continue
tree = mat.node_tree
if tree.type != 'SHADER':
continue
for node in tree.nodes:
if node.bl_idname != 'ShaderNodeTexImage':
continue
# Check if image is not packed already
# and pack it if not.
if node.image and node.image.packed_file is None:
node.image.pack()
bpy.data.libraries.write(filepath, data_blocks)
@ -52,5 +66,5 @@ class ExtractBlend(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -5,7 +5,10 @@ import bpy
from openpype.pipeline import publish
class ExtractBlendAnimation(publish.Extractor):
class ExtractBlendAnimation(
publish.Extractor,
publish.OptionalPyblishPluginMixin,
):
"""Extract a blend file."""
label = "Extract Blend"
@ -14,14 +17,20 @@ class ExtractBlendAnimation(publish.Extractor):
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
data_blocks = set()
@ -50,5 +59,5 @@ class ExtractBlendAnimation(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -7,7 +7,7 @@ from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractCameraABC(publish.Extractor):
class ExtractCameraABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract camera as ABC."""
label = "Extract Camera (ABC)"
@ -16,27 +16,23 @@ class ExtractCameraABC(publish.Extractor):
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
plugin.deselect_all()
selected = []
active = None
asset_group = None
for obj in instance:
if obj.get(AVALON_PROPERTY):
asset_group = obj
break
assert asset_group, "No asset group found"
asset_group = instance.data["transientData"]["instance_node"]
# Need to cast to list because children is a tuple
selected = list(asset_group.children)
@ -69,5 +65,5 @@ class ExtractCameraABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -6,7 +6,7 @@ from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
class ExtractCamera(publish.Extractor):
class ExtractCamera(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract as the camera as FBX."""
label = "Extract Camera (FBX)"
@ -15,13 +15,19 @@ class ExtractCamera(publish.Extractor):
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
plugin.deselect_all()
@ -73,5 +79,5 @@ class ExtractCamera(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -7,7 +7,7 @@ from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractFBX(publish.Extractor):
class ExtractFBX(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract as FBX."""
label = "Extract FBX"
@ -16,24 +16,28 @@ class ExtractFBX(publish.Extractor):
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
plugin.deselect_all()
selected = []
asset_group = None
asset_group = instance.data["transientData"]["instance_node"]
selected = []
for obj in instance:
obj.select_set(True)
selected.append(obj)
if obj.get(AVALON_PROPERTY):
asset_group = obj
context = plugin.create_blender_context(
active=asset_group, selected=selected)
@ -84,5 +88,5 @@ class ExtractFBX(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.debug("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -10,7 +10,41 @@ from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractAnimationFBX(publish.Extractor):
def get_all_parents(obj):
"""Get all recursive parents of object"""
result = []
while True:
obj = obj.parent
if not obj:
break
result.append(obj)
return result
def get_highest_root(objects):
# Get the highest object that is also in the collection
included_objects = {obj.name_full for obj in objects}
num_parents_to_obj = {}
for obj in objects:
if isinstance(obj, bpy.types.Object):
parents = get_all_parents(obj)
# included parents
parents = [parent for parent in parents if
parent.name_full in included_objects]
if not parents:
# A node without parents must be a highest root
return obj
num_parents_to_obj.setdefault(len(parents), obj)
minimum_parent = min(num_parents_to_obj)
return num_parents_to_obj[minimum_parent]
class ExtractAnimationFBX(
publish.Extractor,
publish.OptionalPyblishPluginMixin,
):
"""Extract as animation."""
label = "Extract FBX"
@ -19,23 +53,43 @@ class ExtractAnimationFBX(publish.Extractor):
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
# The first collection object in the instance is taken, as there
# should be only one that contains the asset group.
collection = [
obj for obj in instance if type(obj) is bpy.types.Collection][0]
asset_group = instance.data["transientData"]["instance_node"]
# Again, the first object in the collection is taken , as there
# should be only the asset group in the collection.
asset_group = collection.objects[0]
# Get objects in this collection (but not in children collections)
# and for those objects include the children hierarchy
# TODO: Would it make more sense for the Collect Instance collector
# to also always retrieve all the children?
objects = set(asset_group.objects)
armature = [
obj for obj in asset_group.children if obj.type == 'ARMATURE'][0]
# From the direct children of the collection find the 'root' node
# that we want to export - it is the 'highest' node in a hierarchy
root = get_highest_root(objects)
for obj in list(objects):
objects.update(obj.children_recursive)
# Find all armatures among the objects, assume to find only one
armatures = [obj for obj in objects if obj.type == "ARMATURE"]
if not armatures:
raise RuntimeError(
f"Unable to find ARMATURE in collection: "
f"{asset_group.name}"
)
elif len(armatures) > 1:
self.log.warning(
"Found more than one ARMATURE, using "
f"only first of: {armatures}"
)
armature = armatures[0]
object_action_pairs = []
original_actions = []
@ -44,9 +98,6 @@ class ExtractAnimationFBX(publish.Extractor):
ending_frames = []
# For each armature, we make a copy of the current action
curr_action = None
copy_action = None
if armature.animation_data and armature.animation_data.action:
curr_action = armature.animation_data.action
copy_action = curr_action.copy()
@ -56,12 +107,20 @@ class ExtractAnimationFBX(publish.Extractor):
starting_frames.append(curr_frame_range[0])
ending_frames.append(curr_frame_range[1])
else:
self.log.info("Object have no animation.")
self.log.info(
f"Armature '{armature.name}' has no animation, "
f"skipping FBX animation extraction for {instance}."
)
return
asset_group_name = asset_group.name
asset_group.name = asset_group.get(AVALON_PROPERTY).get("asset_name")
asset_name = asset_group.get(AVALON_PROPERTY).get("asset_name")
if asset_name:
# Rename for the export; this data is only present when loaded
# from a JSON Layout (layout family)
asset_group.name = asset_name
# Remove : from the armature name for the export
armature_name = armature.name
original_name = armature_name.split(':')[1]
armature.name = original_name
@ -84,13 +143,16 @@ class ExtractAnimationFBX(publish.Extractor):
for obj in bpy.data.objects:
obj.select_set(False)
asset_group.select_set(True)
root.select_set(True)
armature.select_set(True)
fbx_filename = f"{instance.name}_{armature.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
fbx_filename = f"{instance_name}_{armature.name}.fbx"
filepath = os.path.join(stagingdir, fbx_filename)
override = plugin.create_blender_context(
active=asset_group, selected=[asset_group, armature])
active=root, selected=[root, armature])
bpy.ops.export_scene.fbx(
override,
filepath=filepath,
@ -104,7 +166,7 @@ class ExtractAnimationFBX(publish.Extractor):
)
armature.name = armature_name
asset_group.name = asset_group_name
asset_group.select_set(False)
root.select_set(True)
armature.select_set(False)
# We delete the baked action and set the original one back
@ -119,7 +181,7 @@ class ExtractAnimationFBX(publish.Extractor):
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
json_filename = f"{instance.name}.json"
json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
json_dict = {
@ -158,5 +220,5 @@ class ExtractAnimationFBX(publish.Extractor):
instance.data["representations"].append(fbx_representation)
instance.data["representations"].append(json_representation)
self.log.info("Extracted instance '{}' to: {}".format(
instance.name, fbx_representation))
self.log.debug("Extracted instance '{}' to: {}".format(
instance.name, fbx_representation))

View file

@ -11,10 +11,10 @@ from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractLayout(publish.Extractor):
class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract a layout."""
label = "Extract Layout"
label = "Extract Layout (JSON)"
hosts = ["blender"]
families = ["layout"]
optional = True
@ -45,7 +45,7 @@ class ExtractLayout(publish.Extractor):
starting_frames.append(curr_frame_range[0])
ending_frames.append(curr_frame_range[1])
else:
self.log.info("Object have no animation.")
self.log.info("Object has no animation.")
continue
asset_group_name = asset.name
@ -113,11 +113,14 @@ class ExtractLayout(publish.Extractor):
return None, n
def process(self, instance):
if not self.is_active(instance.data):
return
# Define extract output file path
stagingdir = self.staging_dir(instance)
# Perform extraction
self.log.info("Performing extraction..")
self.log.debug("Performing extraction..")
if "representations" not in instance.data:
instance.data["representations"] = []
@ -125,13 +128,22 @@ class ExtractLayout(publish.Extractor):
json_data = []
fbx_files = []
asset_group = bpy.data.objects[str(instance)]
asset_group = instance.data["transientData"]["instance_node"]
fbx_count = 0
project_name = instance.context.data["projectEntity"]["name"]
for asset in asset_group.children:
metadata = asset.get(AVALON_PROPERTY)
if not metadata:
# Avoid raising error directly if there's just invalid data
# inside the instance; better to log it to the artist
# TODO: This should actually be validated in a validator
self.log.warning(
f"Found content in layout that is not a loaded "
f"asset, skipping: {asset.name_full}"
)
continue
version_id = metadata["parent"]
family = metadata["family"]
@ -212,7 +224,11 @@ class ExtractLayout(publish.Extractor):
json_data.append(json_element)
json_filename = "{}.json".format(instance.name)
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:
@ -245,5 +261,5 @@ class ExtractLayout(publish.Extractor):
}
instance.data["representations"].append(fbx_representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, json_representation)
self.log.debug("Extracted instance '%s' to: %s",
instance.name, json_representation)

View file

@ -9,7 +9,7 @@ from openpype.hosts.blender.api import capture
from openpype.hosts.blender.api.lib import maintained_time
class ExtractPlayblast(publish.Extractor):
class ExtractPlayblast(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""
Extract viewport playblast.
@ -24,9 +24,8 @@ class ExtractPlayblast(publish.Extractor):
order = pyblish.api.ExtractorOrder + 0.01
def process(self, instance):
self.log.info("Extracting capture..")
self.log.info(instance.data)
if not self.is_active(instance.data):
return
# get scene fps
fps = instance.data.get("fps")
@ -34,14 +33,14 @@ class ExtractPlayblast(publish.Extractor):
fps = bpy.context.scene.render.fps
instance.data["fps"] = fps
self.log.info(f"fps: {fps}")
self.log.debug(f"fps: {fps}")
# If start and end frames cannot be determined,
# get them from Blender timeline.
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
end = instance.data.get("frameEnd", bpy.context.scene.frame_end)
self.log.info(f"start: {start}, end: {end}")
self.log.debug(f"start: {start}, end: {end}")
assert end > start, "Invalid time range !"
# get cameras
@ -52,10 +51,13 @@ class ExtractPlayblast(publish.Extractor):
# get output path
stagingdir = self.staging_dir(instance)
filename = instance.name
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
filename = f"{asset_name}_{subset}"
path = os.path.join(stagingdir, filename)
self.log.info(f"Outputting images to {path}")
self.log.debug(f"Outputting images to {path}")
project_settings = instance.context.data["project_settings"]["blender"]
presets = project_settings["publish"]["ExtractPlayblast"]["presets"]
@ -100,7 +102,7 @@ class ExtractPlayblast(publish.Extractor):
frame_collection = collections[0]
self.log.info(f"We found collection of interest {frame_collection}")
self.log.debug(f"Found collection of interest {frame_collection}")
instance.data.setdefault("representations", [])

View file

@ -24,13 +24,20 @@ class ExtractThumbnail(publish.Extractor):
presets = {}
def process(self, instance):
self.log.info("Extracting capture..")
self.log.debug("Extracting capture..")
if instance.data.get("thumbnailSource"):
self.log.debug("Thumbnail source found, skipping...")
return
stagingdir = self.staging_dir(instance)
filename = instance.name
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
filename = f"{asset_name}_{subset}"
path = os.path.join(stagingdir, filename)
self.log.info(f"Outputting images to {path}")
self.log.debug(f"Outputting images to {path}")
camera = instance.data.get("review_camera", "AUTO")
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
@ -61,7 +68,7 @@ class ExtractThumbnail(publish.Extractor):
thumbnail = os.path.basename(self._fix_output_path(path))
self.log.info(f"thumbnail: {thumbnail}")
self.log.debug(f"thumbnail: {thumbnail}")
instance.data.setdefault("representations", [])

View file

@ -1,17 +1,24 @@
import pyblish.api
from openpype.pipeline.publish import OptionalPyblishPluginMixin
from openpype.hosts.blender.api.workio import save_file
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
class IncrementWorkfileVersion(
pyblish.api.ContextPlugin,
OptionalPyblishPluginMixin
):
"""Increment current workfile version."""
order = pyblish.api.IntegratorOrder + 0.9
label = "Increment Workfile Version"
optional = True
hosts = ["blender"]
families = ["animation", "model", "rig", "action", "layout", "blendScene"]
families = ["animation", "model", "rig", "action", "layout", "blendScene",
"pointcache", "render.farm"]
def process(self, context):
if not self.is_active(context.data):
return
assert all(result["success"] for result in context.data["results"]), (
"Publishing not successful so version is not increased.")
@ -22,4 +29,4 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
save_file(filepath, copy=False)
self.log.info('Incrementing script version')
self.log.debug('Incrementing blender workfile version')

View file

@ -1,9 +1,13 @@
import json
import pyblish.api
from openpype.pipeline.publish import OptionalPyblishPluginMixin
class IntegrateAnimation(pyblish.api.InstancePlugin):
class IntegrateAnimation(
pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin,
):
"""Generate a JSON file for animation."""
label = "Integrate Animation"
@ -13,7 +17,7 @@ class IntegrateAnimation(pyblish.api.InstancePlugin):
families = ["setdress"]
def process(self, instance):
self.log.info("Integrate Animation")
self.log.debug("Integrate Animation")
representation = instance.data.get('representations')[0]
json_path = representation.get('publishedFiles')[0]

View file

@ -5,10 +5,15 @@ import bpy
import pyblish.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder
from openpype.pipeline.publish import (
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Camera must have a keyframe at frame 0.
Unreal shifts the first keyframe to frame 0. Forcing the camera to have
@ -40,8 +45,12 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
if not self.is_active(instance.data):
return
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Camera must have a keyframe at frame 0: {invalid}"
names = ", ".join(obj.name for obj in invalid)
raise PublishValidationError(
f"Camera must have a keyframe at frame 0: {names}"
)

View file

@ -0,0 +1,47 @@
import os
import bpy
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
from openpype.hosts.blender.api.render_lib import prepare_rendering
class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validates Render File Directory is
not the same in every submission
"""
order = ValidateContentsOrder
families = ["render"]
hosts = ["blender"]
label = "Validate Render Output for Deadline"
optional = True
actions = [RepairAction]
def process(self, instance):
if not self.is_active(instance.data):
return
filepath = bpy.data.filepath
file = os.path.basename(filepath)
filename, ext = os.path.splitext(file)
if filename not in bpy.context.scene.render.filepath:
raise PublishValidationError(
"Render output folder "
"doesn't match the blender scene name! "
"Use Repair action to "
"fix the folder file path."
)
@classmethod
def repair(cls, instance):
container = instance.data["transientData"]["instance_node"]
prepare_rendering(container)
bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath)
cls.log.debug("Reset the render output folder...")

View file

@ -0,0 +1,61 @@
import bpy
import pyblish.api
from openpype.pipeline.publish import (
OptionalPyblishPluginMixin,
PublishValidationError
)
class SaveWorkfileAction(pyblish.api.Action):
"""Save Workfile."""
label = "Save Workfile"
on = "failed"
icon = "save"
def process(self, context, plugin):
bpy.ops.wm.avalon_workfiles()
class ValidateFileSaved(pyblish.api.ContextPlugin,
OptionalPyblishPluginMixin):
"""Validate that the workfile has been saved."""
order = pyblish.api.ValidatorOrder - 0.01
hosts = ["blender"]
label = "Validate File Saved"
optional = False
exclude_families = []
actions = [SaveWorkfileAction]
def process(self, context):
if not self.is_active(context.data):
return
if not context.data["currentFile"]:
# File has not been saved at all and has no filename
raise PublishValidationError(
"Current file is empty. Save the file before continuing."
)
# Do not validate workfile has unsaved changes if only instances
# present of families that should be excluded
families = {
instance.data["family"] for instance in context
# Consider only enabled instances
if instance.data.get("publish", True)
and instance.data.get("active", True)
}
def is_excluded(family):
return any(family in exclude_family
for exclude_family in self.exclude_families)
if all(is_excluded(family) for family in families):
self.log.debug("Only excluded families found, skipping workfile "
"unsaved changes validation..")
return
if bpy.data.is_dirty:
raise PublishValidationError("Workfile has unsaved changes.")

View file

@ -0,0 +1,19 @@
import pyblish.api
from openpype.pipeline.publish import PublishValidationError
class ValidateInstanceEmpty(pyblish.api.InstancePlugin):
"""Validator to verify that the instance is not empty"""
order = pyblish.api.ValidatorOrder - 0.01
hosts = ["blender"]
families = ["model", "pointcache", "rig", "camera" "layout", "blendScene"]
label = "Validate Instance is not Empty"
optional = False
def process(self, instance):
# Members are collected by `collect_instance` so we only need to check
# whether any member is included. The instance node will be included
# as a member as well, hence we will check for at least 2 members
if len(instance) < 2:
raise PublishValidationError(f"Instance {instance.name} is empty.")

View file

@ -4,17 +4,24 @@ import bpy
import pyblish.api
from openpype.pipeline.publish import ValidateContentsOrder
from openpype.pipeline.publish import (
ValidateContentsOrder,
OptionalPyblishPluginMixin,
PublishValidationError
)
import openpype.hosts.blender.api.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
class ValidateMeshHasUvs(
pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin,
):
"""Validate that the current mesh has UV's."""
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
label = "Mesh Has UV's"
label = "Mesh Has UVs"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
optional = True
@ -49,8 +56,11 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
if not self.is_active(instance.data):
return
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
raise PublishValidationError(
f"Meshes found in instance without valid UV's: {invalid}"
)

View file

@ -4,11 +4,16 @@ import bpy
import pyblish.api
from openpype.pipeline.publish import ValidateContentsOrder
from openpype.pipeline.publish import (
ValidateContentsOrder,
OptionalPyblishPluginMixin,
PublishValidationError
)
import openpype.hosts.blender.api.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
class ValidateMeshNoNegativeScale(pyblish.api.Validator,
OptionalPyblishPluginMixin):
"""Ensure that meshes don't have a negative scale."""
order = ValidateContentsOrder
@ -27,8 +32,12 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator):
return invalid
def process(self, instance):
if not self.is_active(instance.data):
return
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Meshes found in instance with negative scale: {invalid}"
names = ", ".join(obj.name for obj in invalid)
raise PublishValidationError(
f"Meshes found in instance with negative scale: {names}"
)

View file

@ -5,10 +5,15 @@ import bpy
import pyblish.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder
from openpype.pipeline.publish import (
ValidateContentsOrder,
OptionalPyblishPluginMixin,
PublishValidationError
)
class ValidateNoColonsInName(pyblish.api.InstancePlugin):
class ValidateNoColonsInName(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""There cannot be colons in names
Object or bone names cannot include colons. Other software do not
@ -36,8 +41,12 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
if not self.is_active(instance.data):
return
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Objects found with colon in name: {invalid}"
names = ", ".join(obj.name for obj in invalid)
raise PublishValidationError(
f"Objects found with colon in name: {names}"
)

View file

@ -3,10 +3,17 @@ from typing import List
import bpy
import pyblish.api
from openpype.pipeline.publish import (
OptionalPyblishPluginMixin,
PublishValidationError
)
import openpype.hosts.blender.api.action
class ValidateObjectIsInObjectMode(pyblish.api.InstancePlugin):
class ValidateObjectIsInObjectMode(
pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin,
):
"""Validate that the objects in the instance are in Object Mode."""
order = pyblish.api.ValidatorOrder - 0.01
@ -25,8 +32,12 @@ class ValidateObjectIsInObjectMode(pyblish.api.InstancePlugin):
return invalid
def process(self, instance):
if not self.is_active(instance.data):
return
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Object found in instance is not in Object Mode: {invalid}"
names = ", ".join(obj.name for obj in invalid)
raise PublishValidationError(
f"Object found in instance is not in Object Mode: {names}"
)

View file

@ -0,0 +1,26 @@
import bpy
import pyblish.api
from openpype.pipeline.publish import (
OptionalPyblishPluginMixin,
PublishValidationError
)
class ValidateRenderCameraIsSet(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validate that there is a camera set as active for rendering."""
order = pyblish.api.ValidatorOrder
hosts = ["blender"]
families = ["render"]
label = "Validate Render Camera Is Set"
optional = False
def process(self, instance):
if not self.is_active(instance.data):
return
if not bpy.context.scene.camera:
raise PublishValidationError("No camera is active for rendering.")

Some files were not shown because too many files have changed in this diff Show more