Merge remote-tracking branch 'origin/develop' into feature/OP-3933_RR-support

This commit is contained in:
Ondrej Samohel 2023-04-28 15:12:37 +02:00
commit 2e2aaaebaf
No known key found for this signature in database
GPG key ID: 02376E18990A97C6
109 changed files with 4601 additions and 865 deletions

View file

@ -35,6 +35,10 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.15.5
- 3.15.5-nightly.2
- 3.15.5-nightly.1
- 3.15.4
- 3.15.4-nightly.3
- 3.15.4-nightly.2
- 3.15.4-nightly.1
@ -131,10 +135,6 @@ body:
- 3.13.1-nightly.2
- 3.13.1-nightly.1
- 3.13.0
- 3.13.0-nightly.1
- 3.12.3-nightly.3
- 3.12.3-nightly.2
- 3.12.3-nightly.1
validations:
required: true
- type: dropdown
@ -166,8 +166,8 @@ body:
label: Are there any labels you wish to add?
description: Please search labels and identify those related to your bug.
options:
- label: I have added the relevant labels to the bug report.
required: true
- label: I have added the relevant labels to the bug report.
required: true
- type: textarea
id: logs
attributes:

View file

@ -45,3 +45,6 @@ jobs:
token: ${{ secrets.YNPUT_BOT_TOKEN }}
user_email: ${{ secrets.CI_EMAIL }}
user_name: ${{ secrets.CI_USER }}
cu_api_key: ${{ secrets.CLICKUP_API_KEY }}
cu_team_id: ${{ secrets.CLICKUP_TEAM_ID }}
cu_field_id: ${{ secrets.CLICKUP_RELEASE_FIELD_ID }}

View file

@ -25,5 +25,5 @@ jobs:
- name: Invoke pre-release workflow
uses: benc-uk/workflow-dispatch@v1
with:
workflow: Nightly Prerelease
workflow: prerelease.yml
token: ${{ secrets.YNPUT_BOT_TOKEN }}

View file

@ -65,3 +65,9 @@ jobs:
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
- name: Invoke Update bug report workflow
uses: benc-uk/workflow-dispatch@v1
with:
workflow: update_bug_report.yml
token: ${{ secrets.YNPUT_BOT_TOKEN }}

View file

@ -18,6 +18,8 @@ jobs:
uses: ynput/gha-populate-form-version@main
with:
github_token: ${{ secrets.YNPUT_BOT_TOKEN }}
github_user: ${{ secrets.CI_USER }}
github_email: ${{ secrets.CI_EMAIL }}
registry: github
dropdown: _version
limit_to: 100

File diff suppressed because it is too large Load diff

View file

@ -52,7 +52,7 @@ RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.n
# we need to build our own patchelf
WORKDIR /temp-patchelf
RUN git clone https://github.com/NixOS/patchelf.git . \
RUN git clone -b 0.17.0 --single-branch https://github.com/NixOS/patchelf.git . \
&& source scl_source enable devtoolset-7 \
&& ./bootstrap.sh \
&& ./configure \

View file

@ -415,11 +415,12 @@ def repack_version(directory):
@main.command()
@click.option("--project", help="Project name")
@click.option(
"--dirpath", help="Directory where package is stored", default=None
)
def pack_project(project, dirpath):
"--dirpath", help="Directory where package is stored", default=None)
@click.option(
"--dbonly", help="Store only Database data", default=False, is_flag=True)
def pack_project(project, dirpath, dbonly):
"""Create a package of project with all files and database dump."""
PypeCommands().pack_project(project, dirpath)
PypeCommands().pack_project(project, dirpath, dbonly)
@main.command()
@ -427,9 +428,11 @@ def pack_project(project, dirpath):
@click.option(
"--root", help="Replace root which was stored in project", default=None
)
def unpack_project(zipfile, root):
@click.option(
"--dbonly", help="Store only Database data", default=False, is_flag=True)
def unpack_project(zipfile, root, dbonly):
"""Create a package of project with all files and database dump."""
PypeCommands().unpack_project(zipfile, root)
PypeCommands().unpack_project(zipfile, root, dbonly)
@main.command()

View file

@ -69,6 +69,19 @@ def convert_ids(in_ids):
def get_projects(active=True, inactive=False, fields=None):
"""Yield all project entity documents.
Args:
active (Optional[bool]): Include active projects. Defaults to True.
inactive (Optional[bool]): Include inactive projects.
Defaults to False.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Yields:
dict: Project entity data which can be reduced to specified 'fields'.
None is returned if project with specified filters was not found.
"""
mongodb = get_project_database()
for project_name in mongodb.collection_names():
if project_name in ("system.indexes",):
@ -81,6 +94,20 @@ def get_projects(active=True, inactive=False, fields=None):
def get_project(project_name, active=True, inactive=True, fields=None):
"""Return project entity document by project name.
Args:
project_name (str): Name of project.
active (Optional[bool]): Allow active project. Defaults to True.
inactive (Optional[bool]): Allow inactive project. Defaults to True.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Union[Dict, None]: Project entity data which can be reduced to
specified 'fields'. None is returned if project with specified
filters was not found.
"""
# Skip if both are disabled
if not active and not inactive:
return None
@ -124,17 +151,18 @@ def get_whole_project(project_name):
def get_asset_by_id(project_name, asset_id, fields=None):
"""Receive asset data by it's id.
"""Receive asset data by its id.
Args:
project_name (str): Name of project where to look for queried entities.
asset_id (Union[str, ObjectId]): Asset's id.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
dict: Asset entity data.
None: Asset was not found by id.
Union[Dict, None]: Asset entity data which can be reduced to
specified 'fields'. None is returned if asset with specified
filters was not found.
"""
asset_id = convert_id(asset_id)
@ -147,17 +175,18 @@ def get_asset_by_id(project_name, asset_id, fields=None):
def get_asset_by_name(project_name, asset_name, fields=None):
"""Receive asset data by it's name.
"""Receive asset data by its name.
Args:
project_name (str): Name of project where to look for queried entities.
asset_name (str): Asset's name.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
dict: Asset entity data.
None: Asset was not found by name.
Union[Dict, None]: Asset entity data which can be reduced to
specified 'fields'. None is returned if asset with specified
filters was not found.
"""
if not asset_name:
@ -195,8 +224,8 @@ def _get_assets(
parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids.
standard (bool): Query standard assets (type 'asset').
archived (bool): Query archived assets (type 'archived_asset').
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Cursor: Query cursor as iterable which returns asset documents matching
@ -261,8 +290,8 @@ def get_assets(
asset_names (Iterable[str]): Name assets that should be found.
parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids.
archived (bool): Add also archived assets.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Cursor: Query cursor as iterable which returns asset documents matching
@ -300,8 +329,8 @@ def get_archived_assets(
be found.
asset_names (Iterable[str]): Name assets that should be found.
parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Cursor: Query cursor as iterable which returns asset documents matching
@ -356,17 +385,18 @@ def get_asset_ids_with_subsets(project_name, asset_ids=None):
def get_subset_by_id(project_name, subset_id, fields=None):
"""Single subset entity data by it's id.
"""Single subset entity data by its id.
Args:
project_name (str): Name of project where to look for queried entities.
subset_id (Union[str, ObjectId]): Id of subset which should be found.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If subset with specified filters was not found.
Dict: Subset document which can be reduced to specified 'fields'.
Union[Dict, None]: Subset entity data which can be reduced to
specified 'fields'. None is returned if subset with specified
filters was not found.
"""
subset_id = convert_id(subset_id)
@ -379,20 +409,19 @@ def get_subset_by_id(project_name, subset_id, fields=None):
def get_subset_by_name(project_name, subset_name, asset_id, fields=None):
"""Single subset entity data by it's name and it's version id.
"""Single subset entity data by its name and its version id.
Args:
project_name (str): Name of project where to look for queried entities.
subset_name (str): Name of subset.
asset_id (Union[str, ObjectId]): Id of parent asset.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Union[None, Dict[str, Any]]: None if subset with specified filters was
not found or dict subset document which can be reduced to
specified 'fields'.
Union[Dict, None]: Subset entity data which can be reduced to
specified 'fields'. None is returned if subset with specified
filters was not found.
"""
if not subset_name:
return None
@ -434,8 +463,8 @@ def get_subsets(
names_by_asset_ids (dict[ObjectId, List[str]]): Complex filtering
using asset ids and list of subset names under the asset.
archived (bool): Look for archived subsets too.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Cursor: Iterable cursor yielding all matching subsets.
@ -520,17 +549,18 @@ def get_subset_families(project_name, subset_ids=None):
def get_version_by_id(project_name, version_id, fields=None):
"""Single version entity data by it's id.
"""Single version entity data by its id.
Args:
project_name (str): Name of project where to look for queried entities.
version_id (Union[str, ObjectId]): Id of version which should be found.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If version with specified filters was not found.
Dict: Version document which can be reduced to specified 'fields'.
Union[Dict, None]: Version entity data which can be reduced to
specified 'fields'. None is returned if version with specified
filters was not found.
"""
version_id = convert_id(version_id)
@ -546,18 +576,19 @@ def get_version_by_id(project_name, version_id, fields=None):
def get_version_by_name(project_name, version, subset_id, fields=None):
"""Single version entity data by it's name and subset id.
"""Single version entity data by its name and subset id.
Args:
project_name (str): Name of project where to look for queried entities.
version (int): name of version entity (it's version).
version (int): name of version entity (its version).
subset_id (Union[str, ObjectId]): Id of version which should be found.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If version with specified filters was not found.
Dict: Version document which can be reduced to specified 'fields'.
Union[Dict, None]: Version entity data which can be reduced to
specified 'fields'. None is returned if version with specified
filters was not found.
"""
subset_id = convert_id(subset_id)
@ -574,7 +605,7 @@ def get_version_by_name(project_name, version, subset_id, fields=None):
def version_is_latest(project_name, version_id):
"""Is version the latest from it's subset.
"""Is version the latest from its subset.
Note:
Hero versions are considered as latest.
@ -680,8 +711,8 @@ def get_versions(
versions (Iterable[int]): Version names (as integers).
Filter ignored if 'None' is passed.
hero (bool): Look also for hero versions.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Cursor: Iterable cursor yielding all matching versions.
@ -705,12 +736,13 @@ def get_hero_version_by_subset_id(project_name, subset_id, fields=None):
project_name (str): Name of project where to look for queried entities.
subset_id (Union[str, ObjectId]): Subset id under which
is hero version.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If hero version for passed subset id does not exists.
Dict: Hero version entity data.
Union[Dict, None]: Hero version entity data which can be reduced to
specified 'fields'. None is returned if hero version with specified
filters was not found.
"""
subset_id = convert_id(subset_id)
@ -730,17 +762,18 @@ def get_hero_version_by_subset_id(project_name, subset_id, fields=None):
def get_hero_version_by_id(project_name, version_id, fields=None):
"""Hero version by it's id.
"""Hero version by its id.
Args:
project_name (str): Name of project where to look for queried entities.
version_id (Union[str, ObjectId]): Hero version id.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If hero version with passed id was not found.
Dict: Hero version entity data.
Union[Dict, None]: Hero version entity data which can be reduced to
specified 'fields'. None is returned if hero version with specified
filters was not found.
"""
version_id = convert_id(version_id)
@ -773,8 +806,8 @@ def get_hero_versions(
should look for hero versions. Filter ignored if 'None' is passed.
version_ids (Iterable[Union[str, ObjectId]]): Hero version ids. Filter
ignored if 'None' is passed.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Cursor|list: Iterable yielding hero versions matching passed filters.
@ -801,8 +834,8 @@ def get_output_link_versions(project_name, version_id, fields=None):
project_name (str): Name of project where to look for queried entities.
version_id (Union[str, ObjectId]): Version id which can be used
as input link for other versions.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Iterable: Iterable cursor yielding versions that are used as input
@ -828,8 +861,8 @@ def get_last_versions(project_name, subset_ids, fields=None):
Args:
project_name (str): Name of project where to look for queried entities.
subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
dict[ObjectId, int]: Key is subset id and value is last version name.
@ -913,12 +946,13 @@ def get_last_version_by_subset_id(project_name, subset_id, fields=None):
Args:
project_name (str): Name of project where to look for queried entities.
subset_id (Union[str, ObjectId]): Id of version which should be found.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If version with specified filters was not found.
Dict: Version document which can be reduced to specified 'fields'.
Union[Dict, None]: Version entity data which can be reduced to
specified 'fields'. None is returned if version with specified
filters was not found.
"""
subset_id = convert_id(subset_id)
@ -945,12 +979,13 @@ def get_last_version_by_subset_name(
asset_id (Union[str, ObjectId]): Asset id which is parent of passed
subset name.
asset_name (str): Asset name which is parent of passed subset name.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If version with specified filters was not found.
Dict: Version document which can be reduced to specified 'fields'.
Union[Dict, None]: Version entity data which can be reduced to
specified 'fields'. None is returned if version with specified
filters was not found.
"""
if not asset_id and not asset_name:
@ -972,18 +1007,18 @@ def get_last_version_by_subset_name(
def get_representation_by_id(project_name, representation_id, fields=None):
"""Representation entity data by it's id.
"""Representation entity data by its id.
Args:
project_name (str): Name of project where to look for queried entities.
representation_id (Union[str, ObjectId]): Representation id.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If representation with specified filters was not found.
Dict: Representation entity data which can be reduced
to specified 'fields'.
Union[Dict, None]: Representation entity data which can be reduced to
specified 'fields'. None is returned if representation with
specified filters was not found.
"""
if not representation_id:
@ -1004,19 +1039,19 @@ def get_representation_by_id(project_name, representation_id, fields=None):
def get_representation_by_name(
project_name, representation_name, version_id, fields=None
):
"""Representation entity data by it's name and it's version id.
"""Representation entity data by its name and its version id.
Args:
project_name (str): Name of project where to look for queried entities.
representation_name (str): Representation name.
version_id (Union[str, ObjectId]): Id of parent version entity.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If representation with specified filters was not found.
Dict: Representation entity data which can be reduced
to specified 'fields'.
Union[dict[str, Any], None]: Representation entity data which can be
reduced to specified 'fields'. None is returned if representation
with specified filters was not found.
"""
version_id = convert_id(version_id)
@ -1202,8 +1237,8 @@ def get_representations(
names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering
using version ids and list of names under the version.
archived (bool): Output will also contain archived representations.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Cursor: Iterable cursor yielding all matching representations.
@ -1216,7 +1251,7 @@ def get_representations(
version_ids=version_ids,
context_filters=context_filters,
names_by_version_ids=names_by_version_ids,
standard=True,
standard=standard,
archived=archived,
fields=fields
)
@ -1247,8 +1282,8 @@ def get_archived_representations(
representation context fields.
names_by_version_ids (dict[ObjectId, List[str]]): Complex filtering
using version ids and list of names under the version.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Cursor: Iterable cursor yielding all matching representations.
@ -1377,8 +1412,8 @@ def get_thumbnail_id_from_source(project_name, src_type, src_id):
src_id (Union[str, ObjectId]): Id of source entity.
Returns:
ObjectId: Thumbnail id assigned to entity.
None: If Source entity does not have any thumbnail id assigned.
Union[ObjectId, None]: Thumbnail id assigned to entity. If Source
entity does not have any thumbnail id assigned.
"""
if not src_type or not src_id:
@ -1397,14 +1432,14 @@ def get_thumbnails(project_name, thumbnail_ids, fields=None):
"""Receive thumbnails entity data.
Thumbnail entity can be used to receive binary content of thumbnail based
on it's content and ThumbnailResolvers.
on its content and ThumbnailResolvers.
Args:
project_name (str): Name of project where to look for queried entities.
thumbnail_ids (Iterable[Union[str, ObjectId]]): Ids of thumbnail
entities.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
cursor: Cursor of queried documents.
@ -1429,12 +1464,13 @@ def get_thumbnail(project_name, thumbnail_id, fields=None):
Args:
project_name (str): Name of project where to look for queried entities.
thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
None: If thumbnail with specified id was not found.
Dict: Thumbnail entity data which can be reduced to specified 'fields'.
Union[Dict, None]: Thumbnail entity data which can be reduced to
specified 'fields'.None is returned if thumbnail with specified
filters was not found.
"""
if not thumbnail_id:
@ -1458,8 +1494,13 @@ def get_workfile_info(
project_name (str): Name of project where to look for queried entities.
asset_id (Union[str, ObjectId]): Id of asset entity.
task_name (str): Task name on asset.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
fields (Optional[Iterable[str]]): Fields that should be returned. All
fields are returned if 'None' is passed.
Returns:
Union[Dict, None]: Workfile entity data which can be reduced to
specified 'fields'.None is returned if workfile with specified
filters was not found.
"""
if not asset_id or not task_name or not filename:

View file

@ -5,6 +5,12 @@ import logging
import pymongo
import certifi
from bson.json_util import (
loads,
dumps,
CANONICAL_JSON_OPTIONS
)
if sys.version_info[0] == 2:
from urlparse import urlparse, parse_qs
else:
@ -15,6 +21,49 @@ class MongoEnvNotSet(Exception):
pass
def documents_to_json(docs):
"""Convert documents to json string.
Args:
Union[list[dict[str, Any]], dict[str, Any]]: Document/s to convert to
json string.
Returns:
str: Json string with mongo documents.
"""
return dumps(docs, json_options=CANONICAL_JSON_OPTIONS)
def load_json_file(filepath):
"""Load mongo documents from a json file.
Args:
filepath (str): Path to a json file.
Returns:
Union[dict[str, Any], list[dict[str, Any]]]: Loaded content from a
json file.
"""
if not os.path.exists(filepath):
raise ValueError("Path {} was not found".format(filepath))
with open(filepath, "r") as stream:
content = stream.read()
return loads("".join(content))
def get_project_database_name():
"""Name of database name where projects are available.
Returns:
str: Name of database name where projects are.
"""
return os.environ.get("AVALON_DB") or "avalon"
def _decompose_url(url):
"""Decompose mongo url to basic components.
@ -210,12 +259,102 @@ class OpenPypeMongoConnection:
return mongo_client
def get_project_database():
db_name = os.environ.get("AVALON_DB") or "avalon"
return OpenPypeMongoConnection.get_mongo_client()[db_name]
# ------ Helper Mongo functions ------
# Functions can be helpful with custom tools to backup/restore mongo state.
# Not meant as API functionality that should be used in production codebase!
def get_collection_documents(database_name, collection_name, as_json=False):
"""Query all documents from a collection.
Args:
database_name (str): Name of database where to look for collection.
collection_name (str): Name of collection where to look for collection.
as_json (Optional[bool]): Output should be a json string.
Default: 'False'
Returns:
Union[list[dict[str, Any]], str]: Queried documents.
"""
client = OpenPypeMongoConnection.get_mongo_client()
output = list(client[database_name][collection_name].find({}))
if as_json:
output = documents_to_json(output)
return output
def get_project_connection(project_name):
def store_collection(filepath, database_name, collection_name):
"""Store collection documents to a json file.
Args:
filepath (str): Path to a json file where documents will be stored.
database_name (str): Name of database where to look for collection.
collection_name (str): Name of collection to store.
"""
# Make sure directory for output file exists
dirpath = os.path.dirname(filepath)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
content = get_collection_documents(database_name, collection_name, True)
with open(filepath, "w") as stream:
stream.write(content)
def replace_collection_documents(docs, database_name, collection_name):
"""Replace all documents in a collection with passed documents.
Warnings:
All existing documents in collection will be removed if there are any.
Args:
docs (list[dict[str, Any]]): New documents.
database_name (str): Name of database where to look for collection.
collection_name (str): Name of collection where new documents are
uploaded.
"""
client = OpenPypeMongoConnection.get_mongo_client()
database = client[database_name]
if collection_name in database.list_collection_names():
database.drop_collection(collection_name)
col = database[collection_name]
col.insert_many(docs)
def restore_collection(filepath, database_name, collection_name):
"""Restore/replace collection from a json filepath.
Warnings:
All existing documents in collection will be removed if there are any.
Args:
filepath (str): Path to a json with documents.
database_name (str): Name of database where to look for collection.
collection_name (str): Name of collection where new documents are
uploaded.
"""
docs = load_json_file(filepath)
replace_collection_documents(docs, database_name, collection_name)
def get_project_database(database_name=None):
"""Database object where project collections are.
Args:
database_name (Optional[str]): Custom name of database.
Returns:
pymongo.database.Database: Collection related to passed project.
"""
if not database_name:
database_name = get_project_database_name()
return OpenPypeMongoConnection.get_mongo_client()[database_name]
def get_project_connection(project_name, database_name=None):
"""Direct access to mongo collection.
We're trying to avoid using direct access to mongo. This should be used
@ -223,13 +362,83 @@ def get_project_connection(project_name):
api calls for that.
Args:
project_name(str): Project name for which collection should be
project_name (str): Project name for which collection should be
returned.
database_name (Optional[str]): Custom name of database.
Returns:
pymongo.Collection: Collection realated to passed project.
pymongo.collection.Collection: Collection related to passed project.
"""
if not project_name:
raise ValueError("Invalid project name {}".format(str(project_name)))
return get_project_database()[project_name]
return get_project_database(database_name)[project_name]
def get_project_documents(project_name, database_name=None):
"""Query all documents from project collection.
Args:
project_name (str): Name of project.
database_name (Optional[str]): Name of mongo database where to look for
project.
Returns:
list[dict[str, Any]]: Documents in project collection.
"""
if not database_name:
database_name = get_project_database_name()
return get_collection_documents(database_name, project_name)
def store_project_documents(project_name, filepath, database_name=None):
"""Store project documents to a file as json string.
Args:
project_name (str): Name of project to store.
filepath (str): Path to a json file where output will be stored.
database_name (Optional[str]): Name of mongo database where to look for
project.
"""
if not database_name:
database_name = get_project_database_name()
store_collection(filepath, database_name, project_name)
def replace_project_documents(project_name, docs, database_name=None):
"""Replace documents in mongo with passed documents.
Warnings:
Existing project collection is removed if exists in mongo.
Args:
project_name (str): Name of project.
docs (list[dict[str, Any]]): Documents to restore.
database_name (Optional[str]): Name of mongo database where project
collection will be created.
"""
if not database_name:
database_name = get_project_database_name()
replace_collection_documents(docs, database_name, project_name)
def restore_project_documents(project_name, filepath, database_name=None):
"""Replace documents in mongo with passed documents.
Warnings:
Existing project collection is removed if exists in mongo.
Args:
project_name (str): Name of project.
filepath (str): File to json file with project documents.
database_name (Optional[str]): Name of mongo database where project
collection will be created.
"""
if not database_name:
database_name = get_project_database_name()
restore_collection(filepath, database_name, project_name)

View file

@ -42,13 +42,5 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
self.log.info("Current context does not have any workfile yet.")
return
# Determine whether to open workfile post initialization.
if self.host_name == "maya":
key = "open_workfile_post_initialization"
if self.data["project_settings"]["maya"][key]:
self.log.debug("Opening workfile post initialization.")
self.data["env"]["OPENPYPE_" + key.upper()] = "1"
return
# Add path to workfile to arguments
self.launch_context.launch_args.append(last_workfile)

View file

@ -53,10 +53,10 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"active": True,
"asset": asset_entity["name"],
"task": task,
"frameStart": asset_entity["data"]["frameStart"],
"frameEnd": asset_entity["data"]["frameEnd"],
"handleStart": asset_entity["data"]["handleStart"],
"handleEnd": asset_entity["data"]["handleEnd"],
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],
"handleStart": context.data['handleStart'],
"handleEnd": context.data['handleEnd'],
"fps": asset_entity["data"]["fps"],
"resolutionWidth": asset_entity["data"].get(
"resolutionWidth",

View file

@ -1,7 +1,5 @@
import os
import qtawesome
from openpype.hosts.fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk,
@ -28,6 +26,7 @@ class CreateSaver(Creator):
family = "render"
default_variants = ["Main", "Mask"]
description = "Fusion Saver to generate image sequence"
icon = "fa5.eye"
instance_attributes = ["reviewable"]
@ -89,9 +88,6 @@ class CreateSaver(Creator):
self._add_instance_to_context(created_instance)
def get_icon(self):
return qtawesome.icon("fa.eye", color="white")
def update_instances(self, update_list):
for created_inst, _changes in update_list:
new_data = created_inst.data_to_store()

View file

@ -1,5 +1,3 @@
import qtawesome
from openpype.hosts.fusion.api import (
get_current_comp
)
@ -15,6 +13,7 @@ class FusionWorkfileCreator(AutoCreator):
identifier = "workfile"
family = "workfile"
label = "Workfile"
icon = "fa5.file"
default_variant = "Main"
@ -104,6 +103,3 @@ class FusionWorkfileCreator(AutoCreator):
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
def get_icon(self):
return qtawesome.icon("fa.file-o", color="white")

View file

@ -104,3 +104,6 @@ class AbcLoader(load.LoaderPlugin):
node = container["node"]
node.destroy()
def switch(self, container, representation):
self.update(container, representation)

View file

@ -73,3 +73,6 @@ class AbcArchiveLoader(load.LoaderPlugin):
node = container["node"]
node.destroy()
def switch(self, container, representation):
self.update(container, representation)

View file

@ -106,3 +106,6 @@ class BgeoLoader(load.LoaderPlugin):
node = container["node"]
node.destroy()
def switch(self, container, representation):
self.update(container, representation)

View file

@ -192,3 +192,6 @@ class CameraLoader(load.LoaderPlugin):
new_node.moveToGoodPosition()
return new_node
def switch(self, container, representation):
self.update(container, representation)

View file

@ -125,3 +125,6 @@ class ImageLoader(load.LoaderPlugin):
prefix, padding, suffix = first_fname.rsplit(".", 2)
fname = ".".join([prefix, "$F{}".format(len(padding)), suffix])
return os.path.join(root, fname).replace("\\", "/")
def switch(self, container, representation):
self.update(container, representation)

View file

@ -79,3 +79,6 @@ class USDSublayerLoader(load.LoaderPlugin):
node = container["node"]
node.destroy()
def switch(self, container, representation):
self.update(container, representation)

View file

@ -79,3 +79,6 @@ class USDReferenceLoader(load.LoaderPlugin):
node = container["node"]
node.destroy()
def switch(self, container, representation):
self.update(container, representation)

View file

@ -102,3 +102,6 @@ class VdbLoader(load.LoaderPlugin):
node = container["node"]
node.destroy()
def switch(self, container, representation):
self.update(container, representation)

View file

@ -4,15 +4,14 @@ import hou
import pyblish.api
class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin):
class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.01
order = pyblish.api.CollectorOrder - 0.1
label = "Houdini Current File"
hosts = ["houdini"]
families = ["workfile"]
def process(self, instance):
def process(self, context):
"""Inject the current working file"""
current_file = hou.hipFile.path()
@ -34,26 +33,5 @@ class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin):
"saved correctly."
)
instance.context.data["currentFile"] = current_file
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
instance.data.update({
"setMembers": [current_file],
"frameStart": instance.context.data['frameStart'],
"frameEnd": instance.context.data['frameEnd'],
"handleStart": instance.context.data['handleStart'],
"handleEnd": instance.context.data['handleEnd']
})
instance.data['representations'] = [{
'name': ext.lstrip("."),
'ext': ext.lstrip("."),
'files': file,
"stagingDir": folder,
}]
self.log.info('Collected instance: {}'.format(file))
self.log.info('Scene path: {}'.format(current_file))
self.log.info('staging Dir: {}'.format(folder))
context.data["currentFile"] = current_file
self.log.info('Current workfile path: {}'.format(current_file))

View file

@ -17,6 +17,7 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
# which isn't the actual frame range that this instance renders.
instance.data["handleStart"] = 0
instance.data["handleEnd"] = 0
instance.data["fps"] = instance.context.data["fps"]
# Get the camera from the rop node to collect the focal length
ropnode_path = instance.data["instance_node"]

View file

@ -0,0 +1,36 @@
import os
import pyblish.api
class CollectWorkfile(pyblish.api.InstancePlugin):
"""Inject workfile representation into instance"""
order = pyblish.api.CollectorOrder - 0.01
label = "Houdini Workfile Data"
hosts = ["houdini"]
families = ["workfile"]
def process(self, instance):
current_file = instance.context.data["currentFile"]
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
instance.data.update({
"setMembers": [current_file],
"frameStart": instance.context.data['frameStart'],
"frameEnd": instance.context.data['frameEnd'],
"handleStart": instance.context.data['handleStart'],
"handleEnd": instance.context.data['handleEnd']
})
instance.data['representations'] = [{
'name': ext.lstrip("."),
'ext': ext.lstrip("."),
'files': file,
"stagingDir": folder,
}]
self.log.info('Collected instance: {}'.format(file))
self.log.info('staging Dir: {}'.format(folder))

View file

@ -128,14 +128,14 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase):
if not asset_doc:
raise RuntimeError("Invalid asset name: '%s'" % asset)
formatted_anatomy = anatomy.format({
template_obj = anatomy.templates_obj["publish"]["path"]
path = template_obj.format_strict({
"project": PROJECT,
"asset": asset_doc["name"],
"subset": subset,
"representation": ext,
"version": 0 # stub version zero
})
path = formatted_anatomy["publish"]["path"]
# Remove the version folder
subset_folder = os.path.dirname(os.path.dirname(path))

View file

@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
"""Pre-launch to force 3ds max startup script."""
from openpype.lib import PreLaunchHook
import os
class ForceStartupScript(PreLaunchHook):
"""Inject OpenPype environment to 3ds max.
Note that this works in combination whit 3dsmax startup script that
is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH
environment.
Hook `GlobalHostDataHook` must be executed before this hook.
"""
app_groups = ["3dsmax"]
order = 11
def execute(self):
startup_args = [
"-U",
"MAXScript",
f"{os.getenv('OPENPYPE_ROOT')}\\openpype\\hosts\\max\\startup\\startup.ms"] # noqa
self.launch_context.launch_args.append(startup_args)

View file

@ -32,12 +32,17 @@ from openpype.pipeline import (
load_container,
registered_host,
)
from openpype.pipeline.create import (
legacy_create,
get_legacy_creator_by_name,
)
from openpype.pipeline.context_tools import (
get_current_asset_name,
get_current_project_asset,
get_current_project_name,
get_current_task_name
)
from openpype.lib.profiles_filtering import filter_profiles
self = sys.modules[__name__]
@ -117,6 +122,18 @@ FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
RENDERLIKE_INSTANCE_FAMILIES = ["rendering", "vrayscene"]
DISPLAY_LIGHTS_VALUES = [
"project_settings", "default", "all", "selected", "flat", "none"
]
DISPLAY_LIGHTS_LABELS = [
"Use Project Settings",
"Default Lighting",
"All Lights",
"Selected Lights",
"Flat Lighting",
"No Lights"
]
def get_main_window():
"""Acquire Maya's main window"""
@ -2140,17 +2157,23 @@ def set_scene_resolution(width, height, pixelAspect):
cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect)
def get_frame_range():
"""Get the current assets frame range and handles."""
def get_frame_range(include_animation_range=False):
"""Get the current assets frame range and handles.
Args:
include_animation_range (bool, optional): Whether to include
`animationStart` and `animationEnd` keys to define the outer
range of the timeline. It is excluded by default.
Returns:
dict: Asset's expected frame range values.
"""
# Set frame start/end
project_name = get_current_project_name()
task_name = get_current_task_name()
asset_name = get_current_asset_name()
asset = get_asset_by_name(project_name, asset_name)
settings = get_project_settings(project_name)
include_handles_settings = settings["maya"]["include_handles"]
current_task = asset.get("data").get("tasks").get(task_name)
frame_start = asset["data"].get("frameStart")
frame_end = asset["data"].get("frameEnd")
@ -2162,32 +2185,39 @@ def get_frame_range():
handle_start = asset["data"].get("handleStart") or 0
handle_end = asset["data"].get("handleEnd") or 0
animation_start = frame_start
animation_end = frame_end
include_handles = include_handles_settings["include_handles_default"]
for item in include_handles_settings["per_task_type"]:
if current_task["type"] in item["task_type"]:
include_handles = item["include_handles"]
break
if include_handles:
animation_start -= int(handle_start)
animation_end += int(handle_end)
cmds.playbackOptions(
minTime=frame_start,
maxTime=frame_end,
animationStartTime=animation_start,
animationEndTime=animation_end
)
cmds.currentTime(frame_start)
return {
frame_range = {
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": handle_start,
"handleEnd": handle_end
}
if include_animation_range:
# The animation range values are only included to define whether
# the Maya time slider should include the handles or not.
# Some usages of this function use the full dictionary to define
# instance attributes for which we want to exclude the animation
# keys. That is why these are excluded by default.
task_name = get_current_task_name()
settings = get_project_settings(project_name)
include_handles_settings = settings["maya"]["include_handles"]
current_task = asset.get("data").get("tasks").get(task_name)
animation_start = frame_start
animation_end = frame_end
include_handles = include_handles_settings["include_handles_default"]
for item in include_handles_settings["per_task_type"]:
if current_task["type"] in item["task_type"]:
include_handles = item["include_handles"]
break
if include_handles:
animation_start -= int(handle_start)
animation_end += int(handle_end)
frame_range["animationStart"] = animation_start
frame_range["animationEnd"] = animation_end
return frame_range
def reset_frame_range(playback=True, render=True, fps=True):
@ -2206,18 +2236,23 @@ def reset_frame_range(playback=True, render=True, fps=True):
)
set_scene_fps(fps)
frame_range = get_frame_range()
frame_range = get_frame_range(include_animation_range=True)
if not frame_range:
# No frame range data found for asset
return
frame_start = frame_range["frameStart"] - int(frame_range["handleStart"])
frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"])
frame_start = frame_range["frameStart"]
frame_end = frame_range["frameEnd"]
animation_start = frame_range["animationStart"]
animation_end = frame_range["animationEnd"]
if playback:
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.playbackOptions(animationStartTime=frame_start)
cmds.playbackOptions(animationEndTime=frame_end)
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.playbackOptions(
minTime=frame_start,
maxTime=frame_end,
animationStartTime=animation_start,
animationEndTime=animation_end
)
cmds.currentTime(frame_start)
if render:
@ -3855,3 +3890,98 @@ def get_all_children(nodes):
iterator.next() # noqa: B305
return list(traversed)
def get_capture_preset(task_name, task_type, subset, project_settings, log):
"""Get capture preset for playblasting.
Logic for transitioning from old style capture preset to new capture preset
profiles.
Args:
task_name (str): Task name.
take_type (str): Task type.
subset (str): Subset name.
project_settings (dict): Project settings.
log (object): Logging object.
"""
capture_preset = None
filtering_criteria = {
"hosts": "maya",
"families": "review",
"task_names": task_name,
"task_types": task_type,
"subset": subset
}
plugin_settings = project_settings["maya"]["publish"]["ExtractPlayblast"]
if plugin_settings["profiles"]:
profile = filter_profiles(
plugin_settings["profiles"],
filtering_criteria,
logger=log
)
capture_preset = profile.get("capture_preset")
else:
log.warning("No profiles present for Extract Playblast")
# Backward compatibility for deprecated Extract Playblast settings
# without profiles.
if capture_preset is None:
log.debug(
"Falling back to deprecated Extract Playblast capture preset "
"because no new style playblast profiles are defined."
)
capture_preset = plugin_settings["capture_preset"]
return capture_preset or {}
def create_rig_animation_instance(nodes, context, namespace, log=None):
"""Create an animation publish instance for loaded rigs.
See the RecreateRigAnimationInstance inventory action on how to use this
for loaded rig containers.
Arguments:
nodes (list): Member nodes of the rig instance.
context (dict): Representation context of the rig container
namespace (str): Namespace of the rig container
log (logging.Logger, optional): Logger to log to if provided
Returns:
None
"""
output = next((node for node in nodes if
node.endswith("out_SET")), None)
controls = next((node for node in nodes if
node.endswith("controls_SET")), None)
assert output, "No out_SET in rig, this is a bug."
assert controls, "No controls_SET in rig, this is a bug."
# Find the roots amongst the loaded nodes
roots = (
cmds.ls(nodes, assemblies=True, long=True) or
get_highest_in_hierarchy(nodes)
)
assert roots, "No root nodes in rig, this is a bug."
asset = legacy_io.Session["AVALON_ASSET"]
dependency = str(context["representation"]["_id"])
if log:
log.info("Creating subset: {}".format(namespace))
# Create the animation instance
creator_plugin = get_legacy_creator_by_name("CreateAnimation")
with maintained_selection():
cmds.select([output, controls] + roots, noExpand=True)
legacy_create(
creator_plugin,
name=namespace,
asset=asset,
options={"useSelection": True},
data={"dependencies": dependency}
)

View file

@ -234,26 +234,10 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
return self.get_load_plugin_options(options)
def cleanup_placeholder(self, placeholder, failed):
"""Hide placeholder, parent them to root
add them to placeholder set and register placeholder's parent
to keep placeholder info available for future use
"""Hide placeholder, add them to placeholder set
"""
node = placeholder._scene_identifier
node_parent = placeholder.data["parent"]
if node_parent:
cmds.setAttr(node + ".parent", node_parent, type="string")
if cmds.getAttr(node + ".index") < 0:
cmds.setAttr(node + ".index", placeholder.data["index"])
holding_sets = cmds.listSets(object=node)
if holding_sets:
for set in holding_sets:
cmds.sets(node, remove=set)
if cmds.listRelatives(node, p=True):
node = cmds.parent(node, world=True)[0]
cmds.sets(node, addElement=PLACEHOLDER_SET)
cmds.hide(node)
cmds.setAttr(node + ".hiddenInOutliner", True)
@ -286,8 +270,6 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
elif not cmds.sets(root, q=True):
return
if placeholder.data["parent"]:
cmds.parent(nodes_to_parent, placeholder.data["parent"])
# Move loaded nodes to correct index in outliner hierarchy
placeholder_form = cmds.xform(
placeholder.scene_identifier,

View file

@ -0,0 +1,29 @@
from openpype.lib import PreLaunchHook
class MayaPreAutoLoadPlugins(PreLaunchHook):
"""Define -noAutoloadPlugins command flag."""
# Before AddLastWorkfileToLaunchArgs
order = 9
app_groups = ["maya"]
def execute(self):
# Ignore if there's no last workfile to start.
if not self.data.get("start_last_workfile"):
return
maya_settings = self.data["project_settings"]["maya"]
enabled = maya_settings["explicit_plugins_loading"]["enabled"]
if enabled:
# Force disable the `AddLastWorkfileToLaunchArgs`.
self.data.pop("start_last_workfile")
# Force post initialization so our dedicated plug-in load can run
# prior to Maya opening a scene file.
key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION"
self.launch_context.env[key] = "1"
self.log.debug("Explicit plugins loading.")
self.launch_context.launch_args.append("-noAutoloadPlugins")

View file

@ -0,0 +1,25 @@
from openpype.lib import PreLaunchHook
class MayaPreOpenWorkfilePostInitialization(PreLaunchHook):
"""Define whether open last workfile should run post initialize."""
# Before AddLastWorkfileToLaunchArgs.
order = 9
app_groups = ["maya"]
def execute(self):
# Ignore if there's no last workfile to start.
if not self.data.get("start_last_workfile"):
return
maya_settings = self.data["project_settings"]["maya"]
enabled = maya_settings["open_workfile_post_initialization"]
if enabled:
# Force disable the `AddLastWorkfileToLaunchArgs`.
self.data.pop("start_last_workfile")
self.log.debug("Opening workfile post initialization.")
key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION"
self.launch_context.env[key] = "1"

View file

@ -7,6 +7,12 @@ from openpype.hosts.maya.api import (
class CreateAnimation(plugin.Creator):
"""Animation output for character rigs"""
# We hide the animation creator from the UI since the creation of it
# is automated upon loading a rig. There's an inventory action to recreate
# it for loaded rigs if by chance someone deleted the animation instance.
# Note: This setting is actually applied from project settings
enabled = False
name = "animationDefault"
label = "Animation"
family = "animation"

View file

@ -1,8 +1,14 @@
import os
from collections import OrderedDict
import json
from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.settings import get_project_settings
from openpype.pipeline import get_current_project_name, get_current_task_name
from openpype.client import get_asset_by_name
class CreateReview(plugin.Creator):
@ -32,6 +38,23 @@ class CreateReview(plugin.Creator):
super(CreateReview, self).__init__(*args, **kwargs)
data = OrderedDict(**self.data)
project_name = get_current_project_name()
asset_doc = get_asset_by_name(project_name, data["asset"])
task_name = get_current_task_name()
preset = lib.get_capture_preset(
task_name,
asset_doc["data"]["tasks"][task_name]["type"],
data["subset"],
get_project_settings(project_name),
self.log
)
if os.environ.get("OPENPYPE_DEBUG") == "1":
self.log.debug(
"Using preset: {}".format(
json.dumps(preset, indent=4, sort_keys=True)
)
)
# Option for using Maya or asset frame range in settings.
frame_range = lib.get_frame_range()
if self.useMayaTimeline:
@ -40,12 +63,14 @@ class CreateReview(plugin.Creator):
data[key] = value
data["fps"] = lib.collect_animation_data(fps=True)["fps"]
data["review_width"] = self.Width
data["review_height"] = self.Height
data["isolate"] = self.isolate
data["keepImages"] = self.keepImages
data["imagePlane"] = self.imagePlane
data["transparency"] = self.transparency
data["panZoom"] = self.panZoom
data["review_width"] = preset["Resolution"]["width"]
data["review_height"] = preset["Resolution"]["height"]
data["isolate"] = preset["Generic"]["isolate_view"]
data["imagePlane"] = preset["Viewport Options"]["imagePlane"]
data["panZoom"] = preset["Generic"]["pan_zoom"]
data["displayLights"] = lib.DISPLAY_LIGHTS_LABELS
self.data = data

View file

@ -0,0 +1,35 @@
from openpype.pipeline import (
InventoryAction,
get_representation_context
)
from openpype.hosts.maya.api.lib import (
create_rig_animation_instance,
get_container_members,
)
class RecreateRigAnimationInstance(InventoryAction):
"""Recreate animation publish instance for loaded rigs"""
label = "Recreate rig animation instance"
icon = "wrench"
color = "#888888"
@staticmethod
def is_compatible(container):
return (
container.get("loader") == "ReferenceLoader"
and container.get("name", "").startswith("rig")
)
def process(self, containers):
for container in containers:
# todo: delete an existing entry if it exist or skip creation
namespace = container["namespace"]
representation_id = container["representation"]
context = get_representation_context(representation_id)
nodes = get_container_members(container)
create_rig_animation_instance(nodes, context, namespace)

View file

@ -4,16 +4,12 @@ import contextlib
from maya import cmds
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from openpype.pipeline.create import (
legacy_create,
get_legacy_creator_by_name,
)
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.lib import (
maintained_selection,
get_container_members,
parent_nodes
parent_nodes,
create_rig_animation_instance
)
@ -114,9 +110,6 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
icon = "code-fork"
color = "orange"
# Name of creator class that will be used to create animation instance
animation_creator_name = "CreateAnimation"
def process_reference(self, context, name, namespace, options):
import maya.cmds as cmds
@ -220,37 +213,10 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
self._lock_camera_transforms(members)
def _post_process_rig(self, name, namespace, context, options):
output = next((node for node in self if
node.endswith("out_SET")), None)
controls = next((node for node in self if
node.endswith("controls_SET")), None)
assert output, "No out_SET in rig, this is a bug."
assert controls, "No controls_SET in rig, this is a bug."
# Find the roots amongst the loaded nodes
roots = cmds.ls(self[:], assemblies=True, long=True)
assert roots, "No root nodes in rig, this is a bug."
asset = legacy_io.Session["AVALON_ASSET"]
dependency = str(context["representation"]["_id"])
self.log.info("Creating subset: {}".format(namespace))
# Create the animation instance
creator_plugin = get_legacy_creator_by_name(
self.animation_creator_name
nodes = self[:]
create_rig_animation_instance(
nodes, context, namespace, log=self.log
)
with maintained_selection():
cmds.select([output, controls] + roots, noExpand=True)
legacy_create(
creator_plugin,
name=namespace,
asset=asset,
options={"useSelection": True},
data={"dependencies": dependency}
)
def _lock_camera_transforms(self, nodes):
cameras = cmds.ls(nodes, type="camera")

View file

@ -4,7 +4,7 @@ import pyblish.api
from openpype.client import get_subset_by_name
from openpype.pipeline import legacy_io, KnownPublishError
from openpype.hosts.maya.api.lib import get_attribute_input
from openpype.hosts.maya.api import lib
class CollectReview(pyblish.api.InstancePlugin):
@ -29,26 +29,37 @@ class CollectReview(pyblish.api.InstancePlugin):
# get cameras
members = instance.data['setMembers']
cameras = cmds.ls(members, long=True,
dag=True, cameras=True)
self.log.debug('members: {}'.format(members))
# validate required settings
if len(cameras) == 0:
raise KnownPublishError("No camera found in review "
"instance: {}".format(instance))
elif len(cameras) > 2:
raise KnownPublishError(
"Only a single camera is allowed for a review instance but "
"more than one camera found in review instance: {}. "
"Cameras found: {}".format(instance, ", ".join(cameras)))
camera = cameras[0]
self.log.debug('camera: {}'.format(camera))
cameras = cmds.ls(members, long=True, dag=True, cameras=True)
camera = cameras[0] if cameras else None
context = instance.context
objectset = context.data['objectsets']
# Convert enum attribute index to string for Display Lights.
index = instance.data.get("displayLights", 0)
display_lights = lib.DISPLAY_LIGHTS_VALUES[index]
if display_lights == "project_settings":
settings = instance.context.data["project_settings"]
settings = settings["maya"]["publish"]["ExtractPlayblast"]
settings = settings["capture_preset"]["Viewport Options"]
display_lights = settings["displayLights"]
# Collect camera focal length.
burninDataMembers = instance.data.get("burninDataMembers", {})
if camera is not None:
attr = camera + ".focalLength"
if lib.get_attribute_input(attr):
start = instance.data["frameStart"]
end = instance.data["frameEnd"] + 1
time_range = range(int(start), int(end))
focal_length = [cmds.getAttr(attr, time=t) for t in time_range]
else:
focal_length = cmds.getAttr(attr)
burninDataMembers["focalLength"] = focal_length
# Account for nested instances like model.
reviewable_subsets = list(set(members) & set(objectset))
if reviewable_subsets:
if len(reviewable_subsets) > 1:
@ -75,11 +86,14 @@ class CollectReview(pyblish.api.InstancePlugin):
else:
data['families'] = ['review']
data["cameras"] = cameras
data['review_camera'] = camera
data['frameStartFtrack'] = instance.data["frameStartHandle"]
data['frameEndFtrack'] = instance.data["frameEndHandle"]
data['frameStartHandle'] = instance.data["frameStartHandle"]
data['frameEndHandle'] = instance.data["frameEndHandle"]
data['handleStart'] = instance.data["handleStart"]
data['handleEnd'] = instance.data["handleEnd"]
data["frameStart"] = instance.data["frameStart"]
data["frameEnd"] = instance.data["frameEnd"]
data['step'] = instance.data['step']
@ -89,6 +103,8 @@ class CollectReview(pyblish.api.InstancePlugin):
data["isolate"] = instance.data["isolate"]
data["panZoom"] = instance.data.get("panZoom", False)
data["panel"] = instance.data["panel"]
data["displayLights"] = display_lights
data["burninDataMembers"] = burninDataMembers
# The review instance must be active
cmds.setAttr(str(instance) + '.active', 1)
@ -109,11 +125,14 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug("Existing subsets found, keep legacy name.")
instance.data['subset'] = legacy_subset_name
instance.data["cameras"] = cameras
instance.data['review_camera'] = camera
instance.data['frameStartFtrack'] = \
instance.data["frameStartHandle"]
instance.data['frameEndFtrack'] = \
instance.data["frameEndHandle"]
instance.data["displayLights"] = display_lights
instance.data["burninDataMembers"] = burninDataMembers
# make ftrack publishable
instance.data.setdefault("families", []).append('ftrack')
@ -155,20 +174,3 @@ class CollectReview(pyblish.api.InstancePlugin):
audio_data.append(get_audio_node_data(node))
instance.data["audio"] = audio_data
# Collect focal length.
attr = camera + ".focalLength"
if get_attribute_input(attr):
start = instance.data["frameStart"]
end = instance.data["frameEnd"] + 1
focal_length = [
cmds.getAttr(attr, time=t) for t in range(int(start), int(end))
]
else:
focal_length = cmds.getAttr(attr)
key = "focalLength"
try:
instance.data["burninDataMembers"][key] = focal_length
except KeyError:
instance.data["burninDataMembers"] = {key: focal_length}

View file

@ -280,7 +280,7 @@ class MakeTX(TextureProcessor):
# Do nothing if the source file is already a .tx file.
return TextureResult(
path=source,
file_hash=None, # todo: unknown texture hash?
file_hash=source_hash(source),
colorspace=colorspace,
transfer_mode=COPY
)

View file

@ -34,13 +34,15 @@ class ExtractPlayblast(publish.Extractor):
families = ["review"]
optional = True
capture_preset = {}
profiles = None
def _capture(self, preset):
self.log.info(
"Using preset:\n{}".format(
json.dumps(preset, sort_keys=True, indent=4)
if os.environ.get("OPENPYPE_DEBUG") == "1":
self.log.debug(
"Using preset: {}".format(
json.dumps(preset, indent=4, sort_keys=True)
)
)
)
path = capture.capture(log=self.log, **preset)
self.log.debug("playblast path {}".format(path))
@ -65,12 +67,25 @@ class ExtractPlayblast(publish.Extractor):
# get cameras
camera = instance.data["review_camera"]
preset = lib.load_capture_preset(data=self.capture_preset)
# Grab capture presets from the project settings
capture_presets = self.capture_preset
task_data = instance.data["anatomyData"].get("task", {})
capture_preset = lib.get_capture_preset(
task_data.get("name"),
task_data.get("type"),
instance.data["subset"],
instance.context.data["project_settings"],
self.log
)
preset = lib.load_capture_preset(data=capture_preset)
# "isolate_view" will already have been applied at creation, so we'll
# ignore it here.
preset.pop("isolate_view")
# Set resolution variables from capture presets
width_preset = capture_presets["Resolution"]["width"]
height_preset = capture_presets["Resolution"]["height"]
width_preset = capture_preset["Resolution"]["width"]
height_preset = capture_preset["Resolution"]["height"]
# Set resolution variables from asset values
asset_data = instance.data["assetEntity"]["data"]
asset_width = asset_data.get("resolutionWidth")
@ -115,14 +130,19 @@ class ExtractPlayblast(publish.Extractor):
cmds.currentTime(refreshFrameInt - 1, edit=True)
cmds.currentTime(refreshFrameInt, edit=True)
# Use displayLights setting from instance
key = "displayLights"
preset["viewport_options"][key] = instance.data[key]
# Override transparency if requested.
transparency = instance.data.get("transparency", 0)
if transparency != 0:
preset["viewport2_options"]["transparencyAlgorithm"] = transparency
# Isolate view is requested by having objects in the set besides a
# camera.
if preset.pop("isolate_view", False) and instance.data.get("isolate"):
# camera. If there is only 1 member it'll be the camera because we
# validate to have 1 camera only.
if instance.data["isolate"] and len(instance.data["setMembers"]) > 1:
preset["isolate"] = instance.data["setMembers"]
# Show/Hide image planes on request.
@ -157,7 +177,7 @@ class ExtractPlayblast(publish.Extractor):
)
override_viewport_options = (
capture_presets["Viewport Options"]["override_viewport_options"]
capture_preset["Viewport Options"]["override_viewport_options"]
)
# Force viewer to False in call to capture because we have our own
@ -233,8 +253,8 @@ class ExtractPlayblast(publish.Extractor):
collected_files = collected_files[0]
representation = {
"name": self.capture_preset["Codec"]["compression"],
"ext": self.capture_preset["Codec"]["compression"],
"name": capture_preset["Codec"]["compression"],
"ext": capture_preset["Codec"]["compression"],
"files": collected_files,
"stagingDir": stagingdir,
"frameStart": start,

View file

@ -1,6 +1,7 @@
import os
import glob
import tempfile
import json
import capture
@ -27,22 +28,25 @@ class ExtractThumbnail(publish.Extractor):
camera = instance.data["review_camera"]
maya_setting = instance.context.data["project_settings"]["maya"]
plugin_setting = maya_setting["publish"]["ExtractPlayblast"]
capture_preset = plugin_setting["capture_preset"]
task_data = instance.data["anatomyData"].get("task", {})
capture_preset = lib.get_capture_preset(
task_data.get("name"),
task_data.get("type"),
instance.data["subset"],
instance.context.data["project_settings"],
self.log
)
preset = lib.load_capture_preset(data=capture_preset)
# "isolate_view" will already have been applied at creation, so we'll
# ignore it here.
preset.pop("isolate_view")
override_viewport_options = (
capture_preset["Viewport Options"]["override_viewport_options"]
)
try:
preset = lib.load_capture_preset(data=capture_preset)
except KeyError as ke:
self.log.error("Error loading capture presets: {}".format(str(ke)))
preset = {}
self.log.info("Using viewport preset: {}".format(preset))
# preset["off_screen"] = False
preset["camera"] = camera
preset["start_frame"] = instance.data["frameStart"]
preset["end_frame"] = instance.data["frameStart"]
@ -58,10 +62,9 @@ class ExtractThumbnail(publish.Extractor):
"overscan": 1.0,
"depthOfField": cmds.getAttr("{0}.depthOfField".format(camera)),
}
capture_presets = capture_preset
# Set resolution variables from capture presets
width_preset = capture_presets["Resolution"]["width"]
height_preset = capture_presets["Resolution"]["height"]
width_preset = capture_preset["Resolution"]["width"]
height_preset = capture_preset["Resolution"]["height"]
# Set resolution variables from asset values
asset_data = instance.data["assetEntity"]["data"]
asset_width = asset_data.get("resolutionWidth")
@ -104,14 +107,19 @@ class ExtractThumbnail(publish.Extractor):
cmds.currentTime(refreshFrameInt - 1, edit=True)
cmds.currentTime(refreshFrameInt, edit=True)
# Use displayLights setting from instance
key = "displayLights"
preset["viewport_options"][key] = instance.data[key]
# Override transparency if requested.
transparency = instance.data.get("transparency", 0)
if transparency != 0:
preset["viewport2_options"]["transparencyAlgorithm"] = transparency
# Isolate view is requested by having objects in the set besides a
# camera.
if preset.pop("isolate_view", False) and instance.data.get("isolate"):
# camera. If there is only 1 member it'll be the camera because we
# validate to have 1 camera only.
if instance.data["isolate"] and len(instance.data["setMembers"]) > 1:
preset["isolate"] = instance.data["setMembers"]
# Show or Hide Image Plane
@ -139,6 +147,13 @@ class ExtractThumbnail(publish.Extractor):
preset.update(panel_preset)
cmds.setFocus(panel)
if os.environ.get("OPENPYPE_DEBUG") == "1":
self.log.debug(
"Using preset: {}".format(
json.dumps(preset, indent=4, sort_keys=True)
)
)
path = capture.capture(**preset)
playblast = self._fix_playblast_output_path(path)

View file

@ -65,9 +65,10 @@ class ExtractXgen(publish.Extractor):
)
cmds.delete(set(children) - set(shapes))
duplicate_transform = cmds.parent(
duplicate_transform, world=True
)[0]
if cmds.listRelatives(duplicate_transform, parent=True):
duplicate_transform = cmds.parent(
duplicate_transform, world=True
)[0]
duplicate_nodes.append(duplicate_transform)

View file

@ -0,0 +1,30 @@
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder, PublishValidationError
)
class ValidateReview(pyblish.api.InstancePlugin):
"""Validate review."""
order = ValidateContentsOrder
label = "Validate Review"
families = ["review"]
def process(self, instance):
cameras = instance.data["cameras"]
# validate required settings
if len(cameras) == 0:
raise PublishValidationError(
"No camera found in review instance: {}".format(instance)
)
elif len(cameras) > 2:
raise PublishValidationError(
"Only a single camera is allowed for a review instance but "
"more than one camera found in review instance: {}. "
"Cameras found: {}".format(instance, ", ".join(cameras))
)
self.log.debug('camera: {}'.format(instance.data["review_camera"]))

View file

@ -19,7 +19,7 @@ class ValidateSingleAssembly(pyblish.api.InstancePlugin):
order = ValidateContentsOrder
hosts = ['maya']
families = ['rig', 'animation']
families = ['rig']
label = 'Single Assembly'
def process(self, instance):

View file

@ -57,3 +57,16 @@ class ValidateXgen(pyblish.api.InstancePlugin):
json.dumps(inactive_modifiers, indent=4, sort_keys=True)
)
)
# We need a namespace else there will be a naming conflict when
# extracting because of stripping namespaces and parenting to world.
node_names = [instance.data["xgmPalette"]]
for _, connections in instance.data["xgenConnections"].items():
node_names.append(connections["transform"].split(".")[0])
non_namespaced_nodes = [n for n in node_names if ":" not in n]
if non_namespaced_nodes:
raise PublishValidationError(
"Could not find namespace on {}. Namespace is required for"
" xgen publishing.".format(non_namespaced_nodes)
)

View file

@ -1,5 +1,4 @@
import os
from functools import partial
from openpype.settings import get_project_settings
from openpype.pipeline import install_host
@ -13,24 +12,41 @@ install_host(host)
print("Starting OpenPype usersetup...")
project_settings = get_project_settings(os.environ['AVALON_PROJECT'])
# Loading plugins explicitly.
explicit_plugins_loading = project_settings["maya"]["explicit_plugins_loading"]
if explicit_plugins_loading["enabled"]:
def _explicit_load_plugins():
for plugin in explicit_plugins_loading["plugins_to_load"]:
if plugin["enabled"]:
print("Loading plug-in: " + plugin["name"])
try:
cmds.loadPlugin(plugin["name"], quiet=True)
except RuntimeError as e:
print(e)
# We need to load plugins deferred as loading them directly does not work
# correctly due to Maya's initialization.
cmds.evalDeferred(
_explicit_load_plugins,
lowestPriority=True
)
# Open Workfile Post Initialization.
key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION"
if bool(int(os.environ.get(key, "0"))):
def _log_and_open():
path = os.environ["AVALON_LAST_WORKFILE"]
print("Opening \"{}\"".format(path))
cmds.file(path, open=True, force=True)
cmds.evalDeferred(
partial(
cmds.file,
os.environ["AVALON_LAST_WORKFILE"],
open=True,
force=True
),
_log_and_open,
lowestPriority=True
)
# Build a shelf.
settings = get_project_settings(os.environ['AVALON_PROJECT'])
shelf_preset = settings['maya'].get('project_shelf')
shelf_preset = project_settings['maya'].get('project_shelf')
if shelf_preset:
project = os.environ["AVALON_PROJECT"]

View file

@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-
"""Tools for loading looks to vray proxies."""
import os
from collections import defaultdict
import logging
import six
import alembic.Abc
log = logging.getLogger(__name__)
def get_alembic_paths_by_property(filename, attr, verbose=False):
# type: (str, str, bool) -> dict
"""Return attribute value per objects in the Alembic file.
Reads an Alembic archive hierarchy and retrieves the
value from the `attr` properties on the objects.
Args:
filename (str): Full path to Alembic archive to read.
attr (str): Id attribute.
verbose (bool): Whether to verbosely log missing attributes.
Returns:
dict: Mapping of node full path with its id
"""
# Normalize alembic path
filename = os.path.normpath(filename)
filename = filename.replace("\\", "/")
filename = str(filename) # path must be string
try:
archive = alembic.Abc.IArchive(filename)
except RuntimeError:
# invalid alembic file - probably vrmesh
log.warning("{} is not an alembic file".format(filename))
return {}
root = archive.getTop()
iterator = list(root.children)
obj_ids = {}
for obj in iterator:
name = obj.getFullName()
# include children for coming iterations
iterator.extend(obj.children)
props = obj.getProperties()
if props.getNumProperties() == 0:
# Skip those without properties, e.g. '/materials' in a gpuCache
continue
# THe custom attribute is under the properties' first container under
# the ".arbGeomParams"
prop = props.getProperty(0) # get base property
_property = None
try:
geo_params = prop.getProperty('.arbGeomParams')
_property = geo_params.getProperty(attr)
except KeyError:
if verbose:
log.debug("Missing attr on: {0}".format(name))
continue
if not _property.isConstant():
log.warning("Id not constant on: {0}".format(name))
# Get first value sample
value = _property.getValue()[0]
obj_ids[name] = value
return obj_ids
def get_alembic_ids_cache(path):
# type: (str) -> dict
"""Build a id to node mapping in Alembic file.
Nodes without IDs are ignored.
Returns:
dict: Mapping of id to nodes in the Alembic.
"""
node_ids = get_alembic_paths_by_property(path, attr="cbId")
id_nodes = defaultdict(list)
for node, _id in six.iteritems(node_ids):
id_nodes[_id].append(node)
return dict(six.iteritems(id_nodes))

View file

@ -250,7 +250,7 @@ class MayaLookAssignerWindow(QtWidgets.QWidget):
if vp in nodes:
vrayproxy_assign_look(vp, subset_name)
nodes = list(set(item["nodes"]).difference(vray_proxies))
nodes = list(set(nodes).difference(vray_proxies))
else:
self.echo(
"Could not assign to VRayProxy because vrayformaya plugin "
@ -260,17 +260,18 @@ class MayaLookAssignerWindow(QtWidgets.QWidget):
# Assign Arnold Standin look.
if cmds.pluginInfo("mtoa", query=True, loaded=True):
arnold_standins = set(cmds.ls(type="aiStandIn", long=True))
for standin in arnold_standins:
if standin in nodes:
arnold_standin.assign_look(standin, subset_name)
nodes = list(set(nodes).difference(arnold_standins))
else:
self.echo(
"Could not assign to aiStandIn because mtoa plugin is not "
"loaded."
)
nodes = list(set(item["nodes"]).difference(arnold_standins))
# Assign look
if nodes:
assign_look_by_version(nodes, version_id=version["_id"])

View file

@ -9,6 +9,7 @@ from openpype.pipeline import legacy_io
from openpype.client import get_last_version_by_subset_name
from openpype.hosts.maya import api
from . import lib
from .alembic import get_alembic_ids_cache
log = logging.getLogger(__name__)
@ -68,6 +69,11 @@ def get_nodes_by_id(standin):
(dict): Dictionary with node full name/path and id.
"""
path = cmds.getAttr(standin + ".dso")
if path.endswith(".abc"):
# Support alembic files directly
return get_alembic_ids_cache(path)
json_path = None
for f in os.listdir(os.path.dirname(path)):
if f.endswith(".json"):

View file

@ -1,108 +1,20 @@
# -*- coding: utf-8 -*-
"""Tools for loading looks to vray proxies."""
import os
from collections import defaultdict
import logging
import six
import alembic.Abc
from maya import cmds
from openpype.client import get_last_version_by_subset_name
from openpype.pipeline import legacy_io
import openpype.hosts.maya.lib as maya_lib
from . import lib
from .alembic import get_alembic_ids_cache
log = logging.getLogger(__name__)
def get_alembic_paths_by_property(filename, attr, verbose=False):
# type: (str, str, bool) -> dict
"""Return attribute value per objects in the Alembic file.
Reads an Alembic archive hierarchy and retrieves the
value from the `attr` properties on the objects.
Args:
filename (str): Full path to Alembic archive to read.
attr (str): Id attribute.
verbose (bool): Whether to verbosely log missing attributes.
Returns:
dict: Mapping of node full path with its id
"""
# Normalize alembic path
filename = os.path.normpath(filename)
filename = filename.replace("\\", "/")
filename = str(filename) # path must be string
try:
archive = alembic.Abc.IArchive(filename)
except RuntimeError:
# invalid alembic file - probably vrmesh
log.warning("{} is not an alembic file".format(filename))
return {}
root = archive.getTop()
iterator = list(root.children)
obj_ids = {}
for obj in iterator:
name = obj.getFullName()
# include children for coming iterations
iterator.extend(obj.children)
props = obj.getProperties()
if props.getNumProperties() == 0:
# Skip those without properties, e.g. '/materials' in a gpuCache
continue
# THe custom attribute is under the properties' first container under
# the ".arbGeomParams"
prop = props.getProperty(0) # get base property
_property = None
try:
geo_params = prop.getProperty('.arbGeomParams')
_property = geo_params.getProperty(attr)
except KeyError:
if verbose:
log.debug("Missing attr on: {0}".format(name))
continue
if not _property.isConstant():
log.warning("Id not constant on: {0}".format(name))
# Get first value sample
value = _property.getValue()[0]
obj_ids[name] = value
return obj_ids
def get_alembic_ids_cache(path):
# type: (str) -> dict
"""Build a id to node mapping in Alembic file.
Nodes without IDs are ignored.
Returns:
dict: Mapping of id to nodes in the Alembic.
"""
node_ids = get_alembic_paths_by_property(path, attr="cbId")
id_nodes = defaultdict(list)
for node, _id in six.iteritems(node_ids):
id_nodes[_id].append(node)
return dict(six.iteritems(id_nodes))
def assign_vrayproxy_shaders(vrayproxy, assignments):
# type: (str, dict) -> None
"""Assign shaders to content of Vray Proxy.

View file

@ -495,17 +495,17 @@ def get_avalon_knob_data(node, prefix="avalon:", create=True):
data (dict)
"""
data = {}
if AVALON_TAB not in node.knobs():
return data
# check if lists
if not isinstance(prefix, list):
prefix = list([prefix])
data = dict()
prefix = [prefix]
# loop prefix
for p in prefix:
# check if the node is avalon tracked
if AVALON_TAB not in node.knobs():
continue
try:
# check if data available on the node
test = node[AVALON_DATA_GROUP].value()
@ -516,8 +516,7 @@ def get_avalon_knob_data(node, prefix="avalon:", create=True):
if create:
node = set_avalon_knob_data(node)
return get_avalon_knob_data(node)
else:
return {}
return {}
# get data from filtered knobs
data.update({k.replace(p, ''): node[k].value()

View file

@ -2,7 +2,8 @@ from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin
from openpype.hosts.nuke.api.lib import (
INSTANCE_DATA_KNOB,
get_node_data,
get_avalon_knob_data
get_avalon_knob_data,
AVALON_TAB,
)
from openpype.hosts.nuke.api.plugin import convert_to_valid_instaces
@ -17,13 +18,15 @@ class LegacyConverted(SubsetConvertorPlugin):
legacy_found = False
# search for first available legacy item
for node in nuke.allNodes(recurseGroups=True):
if node.Class() in ["Viewer", "Dot"]:
continue
if get_node_data(node, INSTANCE_DATA_KNOB):
continue
if AVALON_TAB not in node.knobs():
continue
# get data from avalon knob
avalon_knob_data = get_avalon_knob_data(
node, ["avalon:", "ak:"], create=False)

View file

@ -190,7 +190,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin,
# make sure rendered sequence on farm will
# be used for extract review
if not instance.data["review"]:
if not instance.data.get("review"):
instance.data["useSequenceForReview"] = False
self.log.debug("instance.data: {}".format(pformat(instance.data)))

View file

@ -27,11 +27,12 @@ class ExtractWorkfileUrl(pyblish.api.ContextPlugin):
rep_name = instance.data.get("representations")[0].get("name")
template_data["representation"] = rep_name
template_data["ext"] = rep_name
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled["publish"]["path"]
template_obj = anatomy.templates_obj["publish"]["path"]
template_filled = template_obj.format_strict(template_data)
filepath = os.path.normpath(template_filled)
self.log.info("Using published scene for render {}".format(
filepath))
break
if not filepath:
self.log.info("Texture batch doesn't contain workfile.")

View file

@ -0,0 +1,43 @@
# -*- coding: utf-8 -*-
import pyblish.api
class CollectReviewInfo(pyblish.api.InstancePlugin):
"""Collect data required for review instances.
ExtractReview plugin requires frame start/end, fps on instance data which
are missing on instances from TrayPublishes.
Warning:
This is temporary solution to "make it work". Contains removed changes
from https://github.com/ynput/OpenPype/pull/4383 reduced only for
review instances.
"""
label = "Collect Review Info"
order = pyblish.api.CollectorOrder + 0.491
families = ["review"]
hosts = ["traypublisher"]
def process(self, instance):
asset_entity = instance.data.get("assetEntity")
if instance.data.get("frameStart") is not None or not asset_entity:
self.log.debug("Missing required data on instance")
return
asset_data = asset_entity["data"]
# Store collected data for logging
collected_data = {}
for key in (
"fps",
"frameStart",
"frameEnd",
"handleStart",
"handleEnd",
):
if key in instance.data or key not in asset_data:
continue
value = asset_data[key]
collected_data[key] = value
instance.data[key] = value
self.log.debug("Collected data: {}".format(str(collected_data)))

View file

@ -144,7 +144,7 @@ class ExtractSequence(pyblish.api.Extractor):
# Fill tags and new families from project settings
tags = []
if family_lowered == "review":
if "review" in instance.data["families"]:
tags.append("review")
# Sequence of one frame

View file

@ -2,8 +2,10 @@ import os
import unreal
from openpype.settings import get_project_settings
from openpype.pipeline import Anatomy
from openpype.hosts.unreal.api import pipeline
from openpype.widgets.message_window import Window
queue = None
@ -32,11 +34,20 @@ def start_rendering():
"""
Start the rendering process.
"""
print("Starting rendering...")
unreal.log("Starting rendering...")
# Get selected sequences
assets = unreal.EditorUtilityLibrary.get_selected_assets()
if not assets:
Window(
parent=None,
title="No assets selected",
message="No assets selected. Select a render instance.",
level="warning")
raise RuntimeError(
"No assets selected. You need to select a render instance.")
# instances = pipeline.ls_inst()
instances = [
a for a in assets
@ -66,6 +77,13 @@ def start_rendering():
ar = unreal.AssetRegistryHelpers.get_asset_registry()
data = get_project_settings(project)
config = None
config_path = str(data.get("unreal").get("render_config_path"))
if config_path and unreal.EditorAssetLibrary.does_asset_exist(config_path):
unreal.log("Found saved render configuration")
config = ar.get_asset_by_object_path(config_path).get_asset()
for i in inst_data:
sequence = ar.get_asset_by_object_path(i["sequence"]).get_asset()
@ -81,55 +99,80 @@ def start_rendering():
# Get all the sequences to render. If there are subsequences,
# add them and their frame ranges to the render list. We also
# use the names for the output paths.
for s in sequences:
subscenes = pipeline.get_subsequences(s.get('sequence'))
for seq in sequences:
subscenes = pipeline.get_subsequences(seq.get('sequence'))
if subscenes:
for ss in subscenes:
for sub_seq in subscenes:
sequences.append({
"sequence": ss.get_sequence(),
"output": (f"{s.get('output')}/"
f"{ss.get_sequence().get_name()}"),
"sequence": sub_seq.get_sequence(),
"output": (f"{seq.get('output')}/"
f"{sub_seq.get_sequence().get_name()}"),
"frame_range": (
ss.get_start_frame(), ss.get_end_frame())
sub_seq.get_start_frame(), sub_seq.get_end_frame())
})
else:
# Avoid rendering camera sequences
if "_camera" not in s.get('sequence').get_name():
render_list.append(s)
if "_camera" not in seq.get('sequence').get_name():
render_list.append(seq)
# Create the rendering jobs and add them to the queue.
for r in render_list:
for render_setting in render_list:
job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob)
job.sequence = unreal.SoftObjectPath(i["master_sequence"])
job.map = unreal.SoftObjectPath(i["master_level"])
job.author = "OpenPype"
# If we have a saved configuration, copy it to the job.
if config:
job.get_configuration().copy_from(config)
# User data could be used to pass data to the job, that can be
# read in the job's OnJobFinished callback. We could,
# for instance, pass the AvalonPublishInstance's path to the job.
# job.user_data = ""
output_dir = render_setting.get('output')
shot_name = render_setting.get('sequence').get_name()
settings = job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineOutputSetting)
settings.output_resolution = unreal.IntPoint(1920, 1080)
settings.custom_start_frame = r.get("frame_range")[0]
settings.custom_end_frame = r.get("frame_range")[1]
settings.custom_start_frame = render_setting.get("frame_range")[0]
settings.custom_end_frame = render_setting.get("frame_range")[1]
settings.use_custom_playback_range = True
settings.file_name_format = "{sequence_name}.{frame_number}"
settings.output_directory.path = f"{render_dir}/{r.get('output')}"
renderPass = job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineDeferredPassBase)
renderPass.disable_multisample_effects = True
settings.file_name_format = f"{shot_name}" + ".{frame_number}"
settings.output_directory.path = f"{render_dir}/{output_dir}"
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_PNG)
unreal.MoviePipelineDeferredPassBase)
render_format = data.get("unreal").get("render_format", "png")
if render_format == "png":
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_PNG)
elif render_format == "exr":
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_EXR)
elif render_format == "jpg":
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_JPG)
elif render_format == "bmp":
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_BMP)
# If there are jobs in the queue, start the rendering process.
if queue.get_jobs():
global executor
executor = unreal.MoviePipelinePIEExecutor()
preroll_frames = data.get("unreal").get("preroll_frames", 0)
settings = unreal.MoviePipelinePIEExecutorSettings()
settings.set_editor_property(
"initial_delay_frame_count", preroll_frames)
executor.on_executor_finished_delegate.add_callable_unique(
_queue_finish_callback)
executor.on_individual_job_finished_delegate.add_callable_unique(

View file

@ -61,10 +61,10 @@ class UnrealPrelaunchHook(PreLaunchHook):
project_name=project_doc["name"]
)
# Fill templates
filled_anatomy = anatomy.format(workdir_data)
template_obj = anatomy.templates_obj[workfile_template_key]["file"]
# Return filename
return filled_anatomy[workfile_template_key]["file"]
return template_obj.format_strict(workdir_data)
def exec_plugin_install(self, engine_path: Path, env: dict = None):
# set up the QThread and worker with necessary signals

View file

@ -1,14 +1,22 @@
# -*- coding: utf-8 -*-
from pathlib import Path
import unreal
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.pipeline import (
get_subsequences
UNREAL_VERSION,
create_folder,
get_subsequences,
)
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator
)
from openpype.lib import UILabelDef
from openpype.lib import (
UILabelDef,
UISeparatorDef,
BoolDef,
NumberDef
)
class CreateRender(UnrealAssetCreator):
@ -19,7 +27,92 @@ class CreateRender(UnrealAssetCreator):
family = "render"
icon = "eye"
def create(self, subset_name, instance_data, pre_create_data):
def create_instance(
self, instance_data, subset_name, pre_create_data,
selected_asset_path, master_seq, master_lvl, seq_data
):
instance_data["members"] = [selected_asset_path]
instance_data["sequence"] = selected_asset_path
instance_data["master_sequence"] = master_seq
instance_data["master_level"] = master_lvl
instance_data["output"] = seq_data.get('output')
instance_data["frameStart"] = seq_data.get('frame_range')[0]
instance_data["frameEnd"] = seq_data.get('frame_range')[1]
super(CreateRender, self).create(
subset_name,
instance_data,
pre_create_data)
def create_with_new_sequence(
self, subset_name, instance_data, pre_create_data
):
# If the option to create a new level sequence is selected,
# create a new level sequence and a master level.
root = f"/Game/OpenPype/Sequences"
# Create a new folder for the sequence in root
sequence_dir_name = create_folder(root, subset_name)
sequence_dir = f"{root}/{sequence_dir_name}"
unreal.log_warning(f"sequence_dir: {sequence_dir}")
# Create the level sequence
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
seq = asset_tools.create_asset(
asset_name=subset_name,
package_path=sequence_dir,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew())
seq.set_playback_start(pre_create_data.get("start_frame"))
seq.set_playback_end(pre_create_data.get("end_frame"))
pre_create_data["members"] = [seq.get_path_name()]
unreal.EditorAssetLibrary.save_asset(seq.get_path_name())
# Create the master level
if UNREAL_VERSION.major >= 5:
curr_level = unreal.LevelEditorSubsystem().get_current_level()
else:
world = unreal.EditorLevelLibrary.get_editor_world()
levels = unreal.EditorLevelUtils.get_levels(world)
curr_level = levels[0] if len(levels) else None
if not curr_level:
raise RuntimeError("No level loaded.")
curr_level_path = curr_level.get_outer().get_path_name()
# If the level path does not start with "/Game/", the current
# level is a temporary, unsaved level.
if curr_level_path.startswith("/Game/"):
if UNREAL_VERSION.major >= 5:
unreal.LevelEditorSubsystem().save_current_level()
else:
unreal.EditorLevelLibrary.save_current_level()
ml_path = f"{sequence_dir}/{subset_name}_MasterLevel"
if UNREAL_VERSION.major >= 5:
unreal.LevelEditorSubsystem().new_level(ml_path)
else:
unreal.EditorLevelLibrary.new_level(ml_path)
seq_data = {
"sequence": seq,
"output": f"{seq.get_name()}",
"frame_range": (
seq.get_playback_start(),
seq.get_playback_end())}
self.create_instance(
instance_data, subset_name, pre_create_data,
seq.get_path_name(), seq.get_path_name(), ml_path, seq_data)
def create_from_existing_sequence(
self, subset_name, instance_data, pre_create_data
):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
@ -27,8 +120,8 @@ class CreateRender(UnrealAssetCreator):
a.get_path_name() for a in sel_objects
if a.get_class().get_name() == "LevelSequence"]
if not selection:
raise CreatorError("Please select at least one Level Sequence.")
if len(selection) == 0:
raise RuntimeError("Please select at least one Level Sequence.")
seq_data = None
@ -42,28 +135,38 @@ class CreateRender(UnrealAssetCreator):
f"Skipping {selected_asset.get_name()}. It isn't a Level "
"Sequence.")
# The asset name is the third element of the path which
# contains the map.
# To take the asset name, we remove from the path the prefix
# "/Game/OpenPype/" and then we split the path by "/".
sel_path = selected_asset_path
asset_name = sel_path.replace("/Game/OpenPype/", "").split("/")[0]
if pre_create_data.get("use_hierarchy"):
# The asset name is the the third element of the path which
# contains the map.
# To take the asset name, we remove from the path the prefix
# "/Game/OpenPype/" and then we split the path by "/".
sel_path = selected_asset_path
asset_name = sel_path.replace(
"/Game/OpenPype/", "").split("/")[0]
search_path = f"/Game/OpenPype/{asset_name}"
else:
search_path = Path(selected_asset_path).parent.as_posix()
# Get the master sequence and the master level.
# There should be only one sequence and one level in the directory.
ar_filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
sequences = ar.get_assets(ar_filter)
master_seq = sequences[0].get_asset().get_path_name()
master_seq_obj = sequences[0].get_asset()
ar_filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
levels = ar.get_assets(ar_filter)
master_lvl = levels[0].get_asset().get_path_name()
try:
ar_filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[search_path],
recursive_paths=False)
sequences = ar.get_assets(ar_filter)
master_seq = sequences[0].get_asset().get_path_name()
master_seq_obj = sequences[0].get_asset()
ar_filter = unreal.ARFilter(
class_names=["World"],
package_paths=[search_path],
recursive_paths=False)
levels = ar.get_assets(ar_filter)
master_lvl = levels[0].get_asset().get_path_name()
except IndexError:
raise RuntimeError(
f"Could not find the hierarchy for the selected sequence.")
# If the selected asset is the master sequence, we get its data
# and then we create the instance for the master sequence.
@ -79,7 +182,8 @@ class CreateRender(UnrealAssetCreator):
master_seq_obj.get_playback_start(),
master_seq_obj.get_playback_end())}
if selected_asset_path == master_seq:
if (selected_asset_path == master_seq or
pre_create_data.get("use_hierarchy")):
seq_data = master_seq_data
else:
seq_data_list = [master_seq_data]
@ -119,20 +223,54 @@ class CreateRender(UnrealAssetCreator):
"sub-sequence of the master sequence.")
continue
instance_data["members"] = [selected_asset_path]
instance_data["sequence"] = selected_asset_path
instance_data["master_sequence"] = master_seq
instance_data["master_level"] = master_lvl
instance_data["output"] = seq_data.get('output')
instance_data["frameStart"] = seq_data.get('frame_range')[0]
instance_data["frameEnd"] = seq_data.get('frame_range')[1]
self.create_instance(
instance_data, subset_name, pre_create_data,
selected_asset_path, master_seq, master_lvl, seq_data)
super(CreateRender, self).create(
subset_name,
instance_data,
pre_create_data)
def create(self, subset_name, instance_data, pre_create_data):
if pre_create_data.get("create_seq"):
self.create_with_new_sequence(
subset_name, instance_data, pre_create_data)
else:
self.create_from_existing_sequence(
subset_name, instance_data, pre_create_data)
def get_pre_create_attr_defs(self):
return [
UILabelDef("Select the sequence to render.")
UILabelDef(
"Select a Level Sequence to render or create a new one."
),
BoolDef(
"create_seq",
label="Create a new Level Sequence",
default=False
),
UILabelDef(
"WARNING: If you create a new Level Sequence, the current\n"
"level will be saved and a new Master Level will be created."
),
NumberDef(
"start_frame",
label="Start Frame",
default=0,
minimum=-999999,
maximum=999999
),
NumberDef(
"end_frame",
label="Start Frame",
default=150,
minimum=-999999,
maximum=999999
),
UISeparatorDef(),
UILabelDef(
"The following settings are valid only if you are not\n"
"creating a new sequence."
),
BoolDef(
"use_hierarchy",
label="Use Hierarchy",
default=False
),
]

View file

@ -0,0 +1,42 @@
import clique
import pyblish.api
class ValidateSequenceFrames(pyblish.api.InstancePlugin):
"""Ensure the sequence of frames is complete
The files found in the folder are checked against the frameStart and
frameEnd of the instance. If the first or last file is not
corresponding with the first or last frame it is flagged as invalid.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Sequence Frames"
families = ["render"]
hosts = ["unreal"]
optional = True
def process(self, instance):
representations = instance.data.get("representations")
for repr in representations:
data = instance.data.get("assetEntity", {}).get("data", {})
patterns = [clique.PATTERNS["frames"]]
collections, remainder = clique.assemble(
repr["files"], minimum_items=1, patterns=patterns)
assert not remainder, "Must not have remainder"
assert len(collections) == 1, "Must detect single collection"
collection = collections[0]
frames = list(collection.indexes)
current_range = (frames[0], frames[-1])
required_range = (data["frameStart"],
data["frameEnd"])
if current_range != required_range:
raise ValueError(f"Invalid frame range: {current_range} - "
f"expected: {required_range}")
missing = collection.holes().indexes
assert not missing, "Missing frames: %s" % (missing,)

View file

@ -1,16 +1,19 @@
"""These lib functions are primarily for development purposes.
"""These lib functions are for development purposes.
WARNING: This is not meant for production data.
WARNING:
This is not meant for production data. Please don't write code which is
dependent on functionality here.
Goal is to be able create package of current state of project with related
documents from mongo and files from disk to zip file and then be able recreate
the project based on the zip.
Goal is to be able to create package of current state of project with related
documents from mongo and files from disk to zip file and then be able
to recreate the project based on the zip.
This gives ability to create project where a changes and tests can be done.
Keep in mind that to be able create a package of project has few requirements.
Possible requirement should be listed in 'pack_project' function.
Keep in mind that to be able to create a package of project has few
requirements. Possible requirement should be listed in 'pack_project' function.
"""
import os
import json
import platform
@ -19,16 +22,12 @@ import shutil
import datetime
import zipfile
from bson.json_util import (
loads,
dumps,
CANONICAL_JSON_OPTIONS
from openpype.client.mongo import (
load_json_file,
get_project_connection,
replace_project_documents,
store_project_documents,
)
from openpype.client import (
get_project,
get_whole_project,
)
from openpype.pipeline import AvalonMongoDB
DOCUMENTS_FILE_NAME = "database"
METADATA_FILE_NAME = "metadata"
@ -43,7 +42,52 @@ def add_timestamp(filepath):
return new_base + ext
def pack_project(project_name, destination_dir=None):
def get_project_document(project_name, database_name=None):
"""Query project document.
Function 'get_project' from client api cannot be used as it does not allow
to change which 'database_name' is used.
Args:
project_name (str): Name of project.
database_name (Optional[str]): Name of mongo database where to look for
project.
Returns:
Union[dict[str, Any], None]: Project document or None.
"""
col = get_project_connection(project_name, database_name)
return col.find_one({"type": "project"})
def _pack_files_to_zip(zip_stream, source_path, root_path):
"""Pack files to a zip stream.
Args:
zip_stream (zipfile.ZipFile): Stream to a zipfile.
source_path (str): Path to a directory where files are.
root_path (str): Path to a directory which is used for calculation
of relative path.
"""
for root, _, filenames in os.walk(source_path):
for filename in filenames:
filepath = os.path.join(root, filename)
# TODO add one more folder
archive_name = os.path.join(
PROJECT_FILES_DIR,
os.path.relpath(filepath, root_path)
)
zip_stream.write(filepath, archive_name)
def pack_project(
project_name,
destination_dir=None,
only_documents=False,
database_name=None
):
"""Make a package of a project with mongo documents and files.
This function has few restrictions:
@ -52,13 +96,18 @@ def pack_project(project_name, destination_dir=None):
"{root[...]}/{project[name]}"
Args:
project_name(str): Project that should be packaged.
destination_dir(str): Optional path where zip will be stored. Project's
root is used if not passed.
project_name (str): Project that should be packaged.
destination_dir (Optional[str]): Optional path where zip will be
stored. Project's root is used if not passed.
only_documents (Optional[bool]): Pack only Mongo documents and skip
files.
database_name (Optional[str]): Custom database name from which is
project queried.
"""
print("Creating package of project \"{}\"".format(project_name))
# Validate existence of project
project_doc = get_project(project_name)
project_doc = get_project_document(project_name, database_name)
if not project_doc:
raise ValueError("Project \"{}\" was not found in database".format(
project_name
@ -119,12 +168,7 @@ def pack_project(project_name, destination_dir=None):
temp_docs_json = s.name
# Query all project documents and store them to temp json
docs = list(get_whole_project(project_name))
data = dumps(
docs, json_options=CANONICAL_JSON_OPTIONS
)
with open(temp_docs_json, "w") as stream:
stream.write(data)
store_project_documents(project_name, temp_docs_json, database_name)
print("Packing files into zip")
# Write all to zip file
@ -133,16 +177,10 @@ def pack_project(project_name, destination_dir=None):
zip_stream.write(temp_metadata_json, METADATA_FILE_NAME + ".json")
# Add database documents
zip_stream.write(temp_docs_json, DOCUMENTS_FILE_NAME + ".json")
# Add project files to zip
for root, _, filenames in os.walk(project_source_path):
for filename in filenames:
filepath = os.path.join(root, filename)
# TODO add one more folder
archive_name = os.path.join(
PROJECT_FILES_DIR,
os.path.relpath(filepath, root_path)
)
zip_stream.write(filepath, archive_name)
if not only_documents:
_pack_files_to_zip(zip_stream, project_source_path, root_path)
print("Cleaning up")
# Cleanup
@ -152,80 +190,30 @@ def pack_project(project_name, destination_dir=None):
print("*** Packing finished ***")
def unpack_project(path_to_zip, new_root=None):
"""Unpack project zip file to recreate project.
def _unpack_project_files(unzip_dir, root_path, project_name):
"""Move project files from unarchived temp folder to new root.
Unpack is skipped if source files are not available in the zip. That can
happen if nothing was published yet or only documents were stored to
package.
Args:
path_to_zip(str): Path to zip which was created using 'pack_project'
function.
new_root(str): Optional way how to set different root path for unpacked
project.
unzip_dir (str): Location where zip was unzipped.
root_path (str): Path to new root.
project_name (str): Name of project.
"""
print("Unpacking project from zip {}".format(path_to_zip))
if not os.path.exists(path_to_zip):
print("Zip file does not exists: {}".format(path_to_zip))
src_project_files_dir = os.path.join(
unzip_dir, PROJECT_FILES_DIR, project_name
)
# Skip if files are not in the zip
if not os.path.exists(src_project_files_dir):
return
tmp_dir = tempfile.mkdtemp(prefix="unpack_")
print("Zip is extracted to temp: {}".format(tmp_dir))
with zipfile.ZipFile(path_to_zip, "r") as zip_stream:
zip_stream.extractall(tmp_dir)
metadata_json_path = os.path.join(tmp_dir, METADATA_FILE_NAME + ".json")
with open(metadata_json_path, "r") as stream:
metadata = json.load(stream)
docs_json_path = os.path.join(tmp_dir, DOCUMENTS_FILE_NAME + ".json")
with open(docs_json_path, "r") as stream:
content = stream.readlines()
docs = loads("".join(content))
low_platform = platform.system().lower()
project_name = metadata["project_name"]
source_root = metadata["root"]
root_path = source_root[low_platform]
# Drop existing collection
dbcon = AvalonMongoDB()
database = dbcon.database
if project_name in database.list_collection_names():
database.drop_collection(project_name)
print("Removed existing project collection")
print("Creating project documents ({})".format(len(docs)))
# Create new collection with loaded docs
collection = database[project_name]
collection.insert_many(docs)
# Skip change of root if is the same as the one stored in metadata
if (
new_root
and (os.path.normpath(new_root) == os.path.normpath(root_path))
):
new_root = None
if new_root:
print("Using different root path {}".format(new_root))
root_path = new_root
project_doc = get_project(project_name)
roots = project_doc["config"]["roots"]
key = tuple(roots.keys())[0]
update_key = "config.roots.{}.{}".format(key, low_platform)
collection.update_one(
{"_id": project_doc["_id"]},
{"$set": {
update_key: new_root
}}
)
# Make sure root path exists
if not os.path.exists(root_path):
os.makedirs(root_path)
src_project_files_dir = os.path.join(
tmp_dir, PROJECT_FILES_DIR, project_name
)
dst_project_files_dir = os.path.normpath(
os.path.join(root_path, project_name)
)
@ -241,8 +229,83 @@ def unpack_project(path_to_zip, new_root=None):
))
shutil.move(src_project_files_dir, dst_project_files_dir)
def unpack_project(
path_to_zip, new_root=None, database_only=None, database_name=None
):
"""Unpack project zip file to recreate project.
Args:
path_to_zip (str): Path to zip which was created using 'pack_project'
function.
new_root (str): Optional way how to set different root path for
unpacked project.
database_only (Optional[bool]): Unpack only database from zip.
database_name (str): Name of database where project will be recreated.
"""
if database_only is None:
database_only = False
print("Unpacking project from zip {}".format(path_to_zip))
if not os.path.exists(path_to_zip):
print("Zip file does not exists: {}".format(path_to_zip))
return
tmp_dir = tempfile.mkdtemp(prefix="unpack_")
print("Zip is extracted to temp: {}".format(tmp_dir))
with zipfile.ZipFile(path_to_zip, "r") as zip_stream:
if database_only:
for filename in (
"{}.json".format(METADATA_FILE_NAME),
"{}.json".format(DOCUMENTS_FILE_NAME),
):
zip_stream.extract(filename, tmp_dir)
else:
zip_stream.extractall(tmp_dir)
metadata_json_path = os.path.join(tmp_dir, METADATA_FILE_NAME + ".json")
with open(metadata_json_path, "r") as stream:
metadata = json.load(stream)
docs_json_path = os.path.join(tmp_dir, DOCUMENTS_FILE_NAME + ".json")
docs = load_json_file(docs_json_path)
low_platform = platform.system().lower()
project_name = metadata["project_name"]
source_root = metadata["root"]
root_path = source_root[low_platform]
# Drop existing collection
replace_project_documents(project_name, docs, database_name)
print("Creating project documents ({})".format(len(docs)))
# Skip change of root if is the same as the one stored in metadata
if (
new_root
and (os.path.normpath(new_root) == os.path.normpath(root_path))
):
new_root = None
if new_root:
print("Using different root path {}".format(new_root))
root_path = new_root
project_doc = get_project_document(project_name)
roots = project_doc["config"]["roots"]
key = tuple(roots.keys())[0]
update_key = "config.roots.{}.{}".format(key, low_platform)
collection = get_project_connection(project_name, database_name)
collection.update_one(
{"_id": project_doc["_id"]},
{"$set": {
update_key: new_root
}}
)
_unpack_project_files(tmp_dir, root_path, project_name)
# CLeanup
print("Cleaning up")
shutil.rmtree(tmp_dir)
dbcon.uninstall()
print("*** Unpack finished ***")

View file

@ -327,7 +327,8 @@ def get_usd_master_path(asset, subset, representation):
else:
asset_doc = get_asset_by_name(project_name, asset, fields=["name"])
formatted_result = anatomy.format(
template_obj = anatomy.templates_obj["publish"]["path"]
path = template_obj.format_strict(
{
"project": {
"name": project_name,
@ -340,7 +341,6 @@ def get_usd_master_path(asset, subset, representation):
}
)
path = formatted_result["publish"]["path"]
# Remove the version folder
subset_folder = os.path.dirname(os.path.dirname(path))
master_folder = os.path.join(subset_folder, "master")

View file

@ -325,6 +325,11 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
job_info = copy.deepcopy(payload_job_info)
plugin_info = copy.deepcopy(payload_plugin_info)
# Force plugin reload for vray cause the region does not get flushed
# between tile renders.
if plugin_info["Renderer"] == "vray":
job_info.ForceReloadPlugin = True
# if we have sequence of files, we need to create tile job for
# every frame
job_info.TileJob = True
@ -434,6 +439,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
assembly_payloads = []
output_dir = self.job_info.OutputDirectory[0]
config_files = []
for file in assembly_files:
frame = re.search(R_FRAME_NUMBER, file).group("frame")
@ -459,6 +465,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
)
)
config_files.append(config_file)
try:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
@ -467,8 +474,6 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
self.log.warning("Path is unreachable: "
"`{}`".format(output_dir))
assembly_plugin_info["ConfigFile"] = config_file
with open(config_file, "w") as cf:
print("TileCount={}".format(tiles_count), file=cf)
print("ImageFileName={}".format(file), file=cf)
@ -477,6 +482,10 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
print("ImageHeight={}".format(
instance.data.get("resolutionHeight")), file=cf)
reversed_y = False
if plugin_info["Renderer"] == "arnold":
reversed_y = True
with open(config_file, "a") as cf:
# Need to reverse the order of the y tiles, because image
# coordinates are calculated from bottom left corner.
@ -487,7 +496,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
instance.data.get("resolutionWidth"),
instance.data.get("resolutionHeight"),
payload_plugin_info["OutputFilePrefix"],
reversed_y=True
reversed_y=reversed_y
)[1]
for k, v in sorted(tiles.items()):
print("{}={}".format(k, v), file=cf)
@ -516,6 +525,11 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
instance.data["assemblySubmissionJobs"] = assembly_job_ids
# Remove config files to avoid confusion about where data is coming
# from in Deadline.
for config_file in config_files:
os.remove(config_file)
def _get_maya_payload(self, data):
job_info = copy.deepcopy(self.job_info)
@ -876,8 +890,6 @@ def _format_tiles(
out["PluginInfo"]["RegionRight{}".format(tile)] = right
# Tile config
cfg["Tile{}".format(tile)] = new_filename
cfg["Tile{}Tile".format(tile)] = new_filename
cfg["Tile{}FileName".format(tile)] = new_filename
cfg["Tile{}X".format(tile)] = left
cfg["Tile{}Y".format(tile)] = top

View file

@ -772,10 +772,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
template_data["family"] = "render"
template_data["version"] = version
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["render"]:
publish_folder = anatomy_filled["render"]["folder"]
render_templates = anatomy.templates_obj["render"]
if "folder" in render_templates:
publish_folder = render_templates["folder"].format_strict(
template_data
)
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
@ -785,8 +786,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["render"]["path"]
# Directory
file_path = render_templates["path"].format_strict(template_data)
publish_folder = os.path.dirname(file_path)
return publish_folder

View file

@ -463,9 +463,7 @@ def get_workdir_from_session(session=None, template_key=None):
session = legacy_io.Session
project_name = session["AVALON_PROJECT"]
host_name = session["AVALON_APP"]
anatomy = Anatomy(project_name)
template_data = get_template_data_from_session(session)
anatomy_filled = anatomy.format(template_data)
if not template_key:
task_type = template_data["task"]["type"]
@ -474,7 +472,10 @@ def get_workdir_from_session(session=None, template_key=None):
host_name,
project_name=project_name
)
path = anatomy_filled[template_key]["folder"]
anatomy = Anatomy(project_name)
template_obj = anatomy.templates_obj[template_key]["folder"]
path = template_obj.format_strict(template_data)
if path:
path = os.path.normpath(path)
return path

View file

@ -1,5 +1,6 @@
"""Functions useful for delivery of published representations."""
import os
import copy
import shutil
import glob
import clique
@ -146,12 +147,11 @@ def deliver_single_file(
report_items["Source file was not found"].append(msg)
return report_items, 0
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][template_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][template_name]
anatomy_data = copy.deepcopy(anatomy_data)
anatomy_data["root"] = format_dict["root"]
template_obj = anatomy.templates_obj["delivery"][template_name]
delivery_path = template_obj.format_strict(anatomy_data)
# Backwards compatibility when extension contained `.`
delivery_path = delivery_path.replace("..", ".")
@ -269,14 +269,12 @@ def deliver_sequence(
frame_indicator = "@####@"
anatomy_data = copy.deepcopy(anatomy_data)
anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
template_result = anatomy_filled["delivery"][template_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][template_name]
anatomy_data["root"] = format_dict["root"]
template_obj = anatomy.templates_obj["delivery"][template_name]
delivery_path = template_obj.format_strict(anatomy_data)
delivery_path = os.path.normpath(delivery_path.replace("\\", "/"))
delivery_folder = os.path.dirname(delivery_path)

View file

@ -45,7 +45,7 @@ class PublishValidationError(Exception):
def __init__(self, message, title=None, description=None, detail=None):
self.message = message
self.title = title or "< Missing title >"
self.title = title
self.description = description or message
self.detail = detail
super(PublishValidationError, self).__init__(message)

View file

@ -132,9 +132,9 @@ def get_workdir_with_workdir_data(
project_settings
)
anatomy_filled = anatomy.format(workdir_data)
template_obj = anatomy.templates_obj[template_key]["folder"]
# Output is TemplateResult object which contain useful data
output = anatomy_filled[template_key]["folder"]
output = template_obj.format_strict(workdir_data)
if output:
return output.normalized()
return output

View file

@ -158,7 +158,7 @@ class AbstractTemplateBuilder(object):
def linked_asset_docs(self):
if self._linked_asset_docs is None:
self._linked_asset_docs = get_linked_assets(
self.current_asset_doc
self.project_name, self.current_asset_doc
)
return self._linked_asset_docs
@ -1151,13 +1151,10 @@ class PlaceholderItem(object):
return self._log
def __repr__(self):
name = None
if hasattr("name", self):
name = self.name
if hasattr("_scene_identifier ", self):
name = self._scene_identifier
return "< {} {} >".format(self.__class__.__name__, name)
return "< {} {} >".format(
self.__class__.__name__,
self._scene_identifier
)
@property
def order(self):
@ -1419,16 +1416,7 @@ class PlaceholderLoadMixin(object):
"family": [placeholder.data["family"]]
}
elif builder_type != "linked_asset":
context_filters = {
"asset": [re.compile(placeholder.data["asset"])],
"subset": [re.compile(placeholder.data["subset"])],
"hierarchy": [re.compile(placeholder.data["hierarchy"])],
"representation": [placeholder.data["representation"]],
"family": [placeholder.data["family"]]
}
else:
elif builder_type == "linked_asset":
asset_regex = re.compile(placeholder.data["asset"])
linked_asset_names = []
for asset_doc in linked_asset_docs:
@ -1444,6 +1432,15 @@ class PlaceholderLoadMixin(object):
"family": [placeholder.data["family"]],
}
else:
context_filters = {
"asset": [re.compile(placeholder.data["asset"])],
"subset": [re.compile(placeholder.data["subset"])],
"hierarchy": [re.compile(placeholder.data["hierarchy"])],
"representation": [placeholder.data["representation"]],
"family": [placeholder.data["family"]]
}
return list(get_representations(
project_name,
context_filters=context_filters

View file

@ -83,10 +83,11 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
"hierarchy": instance.data["hierarchy"]
})
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
publish_templates = anatomy.templates_obj["publish"]
if "folder" in publish_templates:
publish_folder = publish_templates["folder"].format_strict(
template_data
)
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
@ -95,8 +96,7 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
" key underneath `publish` (in global of for project `{}`)."
).format(anatomy.project_name))
file_path = anatomy_filled["publish"]["path"]
# Directory
file_path = publish_templates["path"].format_strict(template_data)
publish_folder = os.path.dirname(file_path)
publish_folder = os.path.normpath(publish_folder)

View file

@ -665,8 +665,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# - template_data (Dict[str, Any]): source data used to fill template
# - to add required data to 'repre_context' not used for
# formatting
# - anatomy_filled (Dict[str, Any]): filled anatomy of last file
# - to fill 'publishDir' on instance.data -> not ideal
path_template_obj = anatomy.templates_obj[template_name]["path"]
# Treat template with 'orignalBasename' in special way
if "{originalBasename}" in template:
@ -700,8 +699,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
template_data["originalBasename"], _ = os.path.splitext(
src_file_name)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled[template_name]["path"]
dst = path_template_obj.format_strict(template_data)
src = os.path.join(stagingdir, src_file_name)
transfers.append((src, dst))
if repre_context is None:
@ -761,8 +759,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
template_data["udim"] = index
else:
template_data["frame"] = index
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
template_filled = path_template_obj.format_strict(
template_data
)
dst_filepaths.append(template_filled)
if repre_context is None:
self.log.debug(
@ -798,8 +797,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
if is_udim:
template_data["udim"] = repre["udim"][0]
# Construct destination filepath from template
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
template_filled = path_template_obj.format_strict(template_data)
repre_context = template_filled.used_values
dst = os.path.normpath(template_filled)
@ -810,11 +808,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# todo: Are we sure the assumption each representation
# ends up in the same folder is valid?
if not instance.data.get("publishDir"):
instance.data["publishDir"] = (
anatomy_filled
[template_name]
["folder"]
)
template_obj = anatomy.templates_obj[template_name]["folder"]
template_filled = template_obj.format_strict(template_data)
instance.data["publishDir"] = template_filled
for key in self.db_representation_context_keys:
# Also add these values to the context even if not used by the

View file

@ -291,6 +291,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
))
try:
src_to_dst_file_paths = []
path_template_obj = anatomy.templates_obj[template_key]["path"]
for repre_info in published_repres.values():
# Skip if new repre does not have published repre files
@ -303,9 +304,7 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
anatomy_data.pop("version", None)
# Get filled path to repre context
anatomy_filled = anatomy.format(anatomy_data)
template_filled = anatomy_filled[template_key]["path"]
template_filled = path_template_obj.format_strict(anatomy_data)
repre_data = {
"path": str(template_filled),
"template": hero_template
@ -343,8 +342,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
# Get head and tail for collection
frame_splitter = "_-_FRAME_SPLIT_-_"
anatomy_data["frame"] = frame_splitter
_anatomy_filled = anatomy.format(anatomy_data)
_template_filled = _anatomy_filled[template_key]["path"]
_template_filled = path_template_obj.format_strict(
anatomy_data
)
head, tail = _template_filled.split(frame_splitter)
padding = int(
anatomy.templates[template_key]["frame_padding"]
@ -520,24 +520,24 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
})
if "folder" in anatomy.templates[template_key]:
anatomy_filled = anatomy.format(template_data)
publish_folder = anatomy_filled[template_key]["folder"]
template_obj = anatomy.templates_obj[template_key]["folder"]
publish_folder = template_obj.format_strict(template_data)
else:
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(anatomy.project_name))
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
template_obj = anatomy.templates_obj[template_key]["path"]
file_path = template_obj.format_strict(template_data)
file_path = anatomy_filled[template_key]["path"]
# Directory
publish_folder = os.path.dirname(file_path)

View file

@ -480,8 +480,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
else:
template_data["udim"] = src_padding_exp % i
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
template_obj = anatomy.templates_obj[template_name]["path"]
template_filled = template_obj.format_strict(template_data)
if repre_context is None:
repre_context = template_filled.used_values
test_dest_files.append(
@ -587,8 +587,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if repre.get("udim"):
template_data["udim"] = repre["udim"][0]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
template_obj = anatomy.templates_obj[template_name]["path"]
template_filled = template_obj.format_strict(template_data)
repre_context = template_filled.used_values
dst = os.path.normpath(template_filled)
@ -600,9 +600,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if not instance.data.get("publishDir"):
instance.data["publishDir"] = (
anatomy_filled
[template_name]
["folder"]
anatomy.templates_obj[template_name]["folder"]
.format_strict(template_data)
)
if repre.get("udim"):
repre_context["udim"] = repre.get("udim") # store list

View file

@ -271,9 +271,9 @@ class IntegrateThumbnails(pyblish.api.ContextPlugin):
"thumbnail_type": "thumbnail"
})
anatomy_filled = anatomy.format(template_data)
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
template_filled = anatomy_filled["publish"]["thumbnail"]
template_obj = anatomy.templates_obj["publish"]["thumbnail"]
template_filled = template_obj.format_strict(template_data)
thumbnail_template = template_filled.template
dst_full_path = os.path.normpath(str(template_filled))
self.log.debug("Copying file .. {} -> {}".format(

View file

@ -49,7 +49,12 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin):
collection = collections[0]
frames = list(collection.indexes)
if instance.data.get("slate"):
# Slate is not part of the frame range
frames = frames[1:]
current_range = (frames[0], frames[-1])
required_range = (instance.data["frameStart"],
instance.data["frameEnd"])

View file

@ -353,12 +353,12 @@ class PypeCommands:
version_packer = VersionRepacker(directory)
version_packer.process()
def pack_project(self, project_name, dirpath):
def pack_project(self, project_name, dirpath, database_only):
from openpype.lib.project_backpack import pack_project
pack_project(project_name, dirpath)
pack_project(project_name, dirpath, database_only)
def unpack_project(self, zip_filepath, new_root):
def unpack_project(self, zip_filepath, new_root, database_only):
from openpype.lib.project_backpack import unpack_project
unpack_project(zip_filepath, new_root)
unpack_project(zip_filepath, new_root, database_only)

View file

@ -1,5 +1,414 @@
{
"open_workfile_post_initialization": false,
"explicit_plugins_loading": {
"enabled": false,
"plugins_to_load": [
{
"enabled": false,
"name": "AbcBullet"
},
{
"enabled": true,
"name": "AbcExport"
},
{
"enabled": true,
"name": "AbcImport"
},
{
"enabled": false,
"name": "animImportExport"
},
{
"enabled": false,
"name": "ArubaTessellator"
},
{
"enabled": false,
"name": "ATFPlugin"
},
{
"enabled": false,
"name": "atomImportExport"
},
{
"enabled": false,
"name": "AutodeskPacketFile"
},
{
"enabled": false,
"name": "autoLoader"
},
{
"enabled": false,
"name": "bifmeshio"
},
{
"enabled": false,
"name": "bifrostGraph"
},
{
"enabled": false,
"name": "bifrostshellnode"
},
{
"enabled": false,
"name": "bifrostvisplugin"
},
{
"enabled": false,
"name": "blast2Cmd"
},
{
"enabled": false,
"name": "bluePencil"
},
{
"enabled": false,
"name": "Boss"
},
{
"enabled": false,
"name": "bullet"
},
{
"enabled": true,
"name": "cacheEvaluator"
},
{
"enabled": false,
"name": "cgfxShader"
},
{
"enabled": false,
"name": "cleanPerFaceAssignment"
},
{
"enabled": false,
"name": "clearcoat"
},
{
"enabled": false,
"name": "convertToComponentTags"
},
{
"enabled": false,
"name": "curveWarp"
},
{
"enabled": false,
"name": "ddsFloatReader"
},
{
"enabled": true,
"name": "deformerEvaluator"
},
{
"enabled": false,
"name": "dgProfiler"
},
{
"enabled": false,
"name": "drawUfe"
},
{
"enabled": false,
"name": "dx11Shader"
},
{
"enabled": false,
"name": "fbxmaya"
},
{
"enabled": false,
"name": "fltTranslator"
},
{
"enabled": false,
"name": "freeze"
},
{
"enabled": false,
"name": "Fur"
},
{
"enabled": false,
"name": "gameFbxExporter"
},
{
"enabled": false,
"name": "gameInputDevice"
},
{
"enabled": false,
"name": "GamePipeline"
},
{
"enabled": false,
"name": "gameVertexCount"
},
{
"enabled": false,
"name": "geometryReport"
},
{
"enabled": false,
"name": "geometryTools"
},
{
"enabled": false,
"name": "glslShader"
},
{
"enabled": true,
"name": "GPUBuiltInDeformer"
},
{
"enabled": false,
"name": "gpuCache"
},
{
"enabled": false,
"name": "hairPhysicalShader"
},
{
"enabled": false,
"name": "ik2Bsolver"
},
{
"enabled": false,
"name": "ikSpringSolver"
},
{
"enabled": false,
"name": "invertShape"
},
{
"enabled": false,
"name": "lges"
},
{
"enabled": false,
"name": "lookdevKit"
},
{
"enabled": false,
"name": "MASH"
},
{
"enabled": false,
"name": "matrixNodes"
},
{
"enabled": false,
"name": "mayaCharacterization"
},
{
"enabled": false,
"name": "mayaHIK"
},
{
"enabled": false,
"name": "MayaMuscle"
},
{
"enabled": false,
"name": "mayaUsdPlugin"
},
{
"enabled": false,
"name": "mayaVnnPlugin"
},
{
"enabled": false,
"name": "melProfiler"
},
{
"enabled": false,
"name": "meshReorder"
},
{
"enabled": true,
"name": "modelingToolkit"
},
{
"enabled": false,
"name": "mtoa"
},
{
"enabled": false,
"name": "mtoh"
},
{
"enabled": false,
"name": "nearestPointOnMesh"
},
{
"enabled": true,
"name": "objExport"
},
{
"enabled": false,
"name": "OneClick"
},
{
"enabled": false,
"name": "OpenEXRLoader"
},
{
"enabled": false,
"name": "pgYetiMaya"
},
{
"enabled": false,
"name": "pgyetiVrayMaya"
},
{
"enabled": false,
"name": "polyBoolean"
},
{
"enabled": false,
"name": "poseInterpolator"
},
{
"enabled": false,
"name": "quatNodes"
},
{
"enabled": false,
"name": "randomizerDevice"
},
{
"enabled": false,
"name": "redshift4maya"
},
{
"enabled": true,
"name": "renderSetup"
},
{
"enabled": false,
"name": "retargeterNodes"
},
{
"enabled": false,
"name": "RokokoMotionLibrary"
},
{
"enabled": false,
"name": "rotateHelper"
},
{
"enabled": false,
"name": "sceneAssembly"
},
{
"enabled": false,
"name": "shaderFXPlugin"
},
{
"enabled": false,
"name": "shotCamera"
},
{
"enabled": false,
"name": "snapTransform"
},
{
"enabled": false,
"name": "stage"
},
{
"enabled": true,
"name": "stereoCamera"
},
{
"enabled": false,
"name": "stlTranslator"
},
{
"enabled": false,
"name": "studioImport"
},
{
"enabled": false,
"name": "Substance"
},
{
"enabled": false,
"name": "substancelink"
},
{
"enabled": false,
"name": "substancemaya"
},
{
"enabled": false,
"name": "substanceworkflow"
},
{
"enabled": false,
"name": "svgFileTranslator"
},
{
"enabled": false,
"name": "sweep"
},
{
"enabled": false,
"name": "testify"
},
{
"enabled": false,
"name": "tiffFloatReader"
},
{
"enabled": false,
"name": "timeSliderBookmark"
},
{
"enabled": false,
"name": "Turtle"
},
{
"enabled": false,
"name": "Type"
},
{
"enabled": false,
"name": "udpDevice"
},
{
"enabled": false,
"name": "ufeSupport"
},
{
"enabled": false,
"name": "Unfold3D"
},
{
"enabled": false,
"name": "VectorRender"
},
{
"enabled": false,
"name": "vrayformaya"
},
{
"enabled": false,
"name": "vrayvolumegrid"
},
{
"enabled": false,
"name": "xgenToolkit"
},
{
"enabled": false,
"name": "xgenVray"
}
]
},
"imageio": {
"ocio_config": {
"enabled": false,
@ -145,7 +554,7 @@
"publish_mip_map": true
},
"CreateAnimation": {
"enabled": true,
"enabled": false,
"write_color_sets": false,
"write_face_sets": false,
"include_parent_hierarchy": false,
@ -911,7 +1320,8 @@
"displayFilmOrigin": false,
"overscan": 1.0
}
}
},
"profiles": []
},
"ExtractMayaSceneRaw": {
"enabled": true,
@ -1049,7 +1459,7 @@
]
},
"reference_loader": {
"namespace": "{asset_name}_{subset}_##",
"namespace": "{asset_name}_{subset}_##_",
"group_name": "_GRP"
}
},

View file

@ -11,6 +11,9 @@
},
"level_sequences_for_layouts": false,
"delete_unmatched_assets": false,
"render_config_path": "",
"preroll_frames": 0,
"render_format": "png",
"project_setup": {
"dev_mode": true
}

View file

@ -119,9 +119,7 @@
"label": "3ds max",
"icon": "{}/app_icons/3dsmax.png",
"host_name": "max",
"environment": {
"ADSK_3DSMAX_STARTUPSCRIPTS_ADDON_DIR": "{OPENPYPE_ROOT}\\openpype\\hosts\\max\\startup"
},
"environment": {},
"variants": {
"2023": {
"use_python_2": false,
@ -133,9 +131,7 @@
"linux": []
},
"arguments": {
"windows": [
"-U MAXScript {OPENPYPE_ROOT}\\openpype\\hosts\\max\\startup\\startup.ms"
],
"windows": [],
"darwin": [],
"linux": []
},

View file

@ -11,8 +11,10 @@ class ColorEntity(InputEntity):
def _item_initialization(self):
self.valid_value_types = (list, )
self.value_on_not_set = [0, 0, 0, 255]
self.use_alpha = self.schema_data.get("use_alpha", True)
self.value_on_not_set = self.convert_to_valid_type(
self.schema_data.get("default", [0, 0, 0, 255])
)
def set_override_state(self, *args, **kwargs):
super(ColorEntity, self).set_override_state(*args, **kwargs)

View file

@ -442,7 +442,9 @@ class TextEntity(InputEntity):
def _item_initialization(self):
self.valid_value_types = (STRING_TYPE, )
self.value_on_not_set = ""
self.value_on_not_set = self.convert_to_valid_type(
self.schema_data.get("default", "")
)
# GUI attributes
self.multiline = self.schema_data.get("multiline", False)

View file

@ -10,6 +10,41 @@
"key": "open_workfile_post_initialization",
"label": "Open Workfile Post Initialization"
},
{
"type": "dict",
"key": "explicit_plugins_loading",
"label": "Explicit Plugins Loading",
"collapsible": true,
"is_group": true,
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "list",
"key": "plugins_to_load",
"label": "Plugins To Load",
"object_type": {
"type": "dict",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "text",
"key": "name",
"label": "Name"
}
]
}
}
]
},
{
"key": "imageio",
"type": "dict",

View file

@ -32,6 +32,28 @@
"key": "delete_unmatched_assets",
"label": "Delete assets that are not matched"
},
{
"type": "text",
"key": "render_config_path",
"label": "Render Config Path"
},
{
"type": "number",
"key": "preroll_frames",
"label": "Pre-roll frames"
},
{
"key": "render_format",
"label": "Render format",
"type": "enum",
"multiselection": false,
"enum_items": [
{"png": "PNG"},
{"exr": "EXR"},
{"jpg": "JPG"},
{"bmp": "BMP"}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -7,6 +7,8 @@
{
"type": "dict",
"key": "capture_preset",
"label": "DEPRECATED! Please use \"Profiles\" below.",
"collapsed": false,
"children": [
{
"type": "dict",
@ -176,7 +178,7 @@
{ "all": "All Lights"},
{ "selected": "Selected Lights"},
{ "flat": "Flat Lighting"},
{ "nolights": "No Lights"}
{ "none": "No Lights"}
]
},
{
@ -626,6 +628,747 @@
]
}
]
},
{
"type": "list",
"key": "profiles",
"label": "Profiles",
"object_type": {
"type": "dict",
"children": [
{
"key": "task_types",
"label": "Task types",
"type": "task-types-enum"
},
{
"key": "task_names",
"label": "Task names",
"type": "list",
"object_type": "text"
},
{
"key": "subsets",
"label": "Subset names",
"type": "list",
"object_type": "text"
},
{
"type": "splitter"
},
{
"type": "dict",
"key": "capture_preset",
"children": [
{
"type": "dict",
"key": "Codec",
"children": [
{
"type": "label",
"label": "<b>Codec</b>"
},
{
"type": "text",
"key": "compression",
"label": "Encoding",
"default": "png"
},
{
"type": "text",
"key": "format",
"label": "Format",
"default": "image"
},
{
"type": "number",
"key": "quality",
"label": "Quality",
"decimal": 0,
"minimum": 0,
"maximum": 100,
"default": 95
},
{
"type": "splitter"
}
]
},
{
"type": "dict",
"key": "Display Options",
"children": [
{
"type": "label",
"label": "<b>Display Options</b>"
},
{
"type": "boolean",
"key": "override_display",
"label": "Override display options",
"default": true
},
{
"type": "color",
"key": "background",
"label": "Background Color: ",
"default": [125, 125, 125, 255]
},
{
"type": "boolean",
"key": "displayGradient",
"label": "Display background gradient",
"default": true
},
{
"type": "color",
"key": "backgroundBottom",
"label": "Background Bottom: ",
"default": [125, 125, 125, 255]
},
{
"type": "color",
"key": "backgroundTop",
"label": "Background Top: ",
"default": [125, 125, 125, 255]
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"key": "Generic",
"children": [
{
"type": "label",
"label": "<b>Generic</b>"
},
{
"type": "boolean",
"key": "isolate_view",
"label": " Isolate view",
"default": true
},
{
"type": "boolean",
"key": "off_screen",
"label": " Off Screen",
"default": true
},
{
"type": "boolean",
"key": "pan_zoom",
"label": " 2D Pan/Zoom",
"default": false
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"key": "Renderer",
"children": [
{
"type": "label",
"label": "<b>Renderer</b>"
},
{
"type": "enum",
"key": "rendererName",
"label": "Renderer name",
"enum_items": [
{ "vp2Renderer": "Viewport 2.0" }
],
"default": "vp2Renderer"
}
]
},
{
"type": "dict",
"key": "Resolution",
"children": [
{
"type": "splitter"
},
{
"type": "label",
"label": "<b>Resolution</b>"
},
{
"type": "number",
"key": "width",
"label": " Width",
"decimal": 0,
"minimum": 0,
"maximum": 99999,
"default": 0
},
{
"type": "number",
"key": "height",
"label": "Height",
"decimal": 0,
"minimum": 0,
"maximum": 99999,
"default": 0
}
]
},
{
"type": "splitter"
},
{
"type": "dict",
"collapsible": true,
"key": "Viewport Options",
"label": "Viewport Options",
"children": [
{
"type": "boolean",
"key": "override_viewport_options",
"label": "Override Viewport Options",
"default": true
},
{
"type": "enum",
"key": "displayLights",
"label": "Display Lights",
"enum_items": [
{ "default": "Default Lighting"},
{ "all": "All Lights"},
{ "selected": "Selected Lights"},
{ "flat": "Flat Lighting"},
{ "nolights": "No Lights"}
],
"default": "default"
},
{
"type": "boolean",
"key": "displayTextures",
"label": "Display Textures",
"default": true
},
{
"type": "number",
"key": "textureMaxResolution",
"label": "Texture Clamp Resolution",
"decimal": 0,
"default": 1024
},
{
"type": "splitter"
},
{
"type": "label",
"label": "<b>Display</b>"
},
{
"type":"boolean",
"key": "renderDepthOfField",
"label": "Depth of Field",
"default": true
},
{
"type": "splitter"
},
{
"type": "boolean",
"key": "shadows",
"label": "Display Shadows",
"default": true
},
{
"type": "boolean",
"key": "twoSidedLighting",
"label": "Two Sided Lighting",
"default": true
},
{
"type": "splitter"
},
{
"type": "boolean",
"key": "lineAAEnable",
"label": "Enable Anti-Aliasing",
"default": true
},
{
"type": "number",
"key": "multiSample",
"label": "Anti Aliasing Samples",
"decimal": 0,
"minimum": 0,
"maximum": 32,
"default": 8
},
{
"type": "splitter"
},
{
"type": "boolean",
"key": "useDefaultMaterial",
"label": "Use Default Material",
"default": false
},
{
"type": "boolean",
"key": "wireframeOnShaded",
"label": "Wireframe On Shaded",
"default": false
},
{
"type": "boolean",
"key": "xray",
"label": "X-Ray",
"default": false
},
{
"type": "boolean",
"key": "jointXray",
"label": "X-Ray Joints",
"default": false
},
{
"type": "boolean",
"key": "backfaceCulling",
"label": "Backface Culling",
"default": false
},
{
"type": "boolean",
"key": "ssaoEnable",
"label": "Screen Space Ambient Occlusion",
"default": false
},
{
"type": "number",
"key": "ssaoAmount",
"label": "SSAO Amount",
"default": 1
},
{
"type": "number",
"key": "ssaoRadius",
"label": "SSAO Radius",
"default": 16
},
{
"type": "number",
"key": "ssaoFilterRadius",
"label": "SSAO Filter Radius",
"decimal": 0,
"minimum": 1,
"maximum": 32,
"default": 16
},
{
"type": "number",
"key": "ssaoSamples",
"label": "SSAO Samples",
"decimal": 0,
"minimum": 8,
"maximum": 32,
"default": 16
},
{
"type": "splitter"
},
{
"type": "boolean",
"key": "fogging",
"label": "Enable Hardware Fog",
"default": false
},
{
"type": "enum",
"key": "hwFogFalloff",
"label": "Hardware Falloff",
"enum_items": [
{ "0": "Linear"},
{ "1": "Exponential"},
{ "2": "Exponential Squared"}
],
"default": "0"
},
{
"type": "number",
"key": "hwFogDensity",
"label": "Fog Density",
"decimal": 2,
"minimum": 0,
"maximum": 1,
"default": 0
},
{
"type": "number",
"key": "hwFogStart",
"label": "Fog Start",
"default": 0
},
{
"type": "number",
"key": "hwFogEnd",
"label": "Fog End",
"default": 100
},
{
"type": "number",
"key": "hwFogAlpha",
"label": "Fog Alpha",
"default": 0
},
{
"type": "number",
"key": "hwFogColorR",
"label": "Fog Color R",
"decimal": 2,
"minimum": 0,
"maximum": 1,
"default": 1
},
{
"type": "number",
"key": "hwFogColorG",
"label": "Fog Color G",
"decimal": 2,
"minimum": 0,
"maximum": 1,
"default": 1
},
{
"type": "number",
"key": "hwFogColorB",
"label": "Fog Color B",
"decimal": 2,
"minimum": 0,
"maximum": 1,
"default": 1
},
{
"type": "splitter"
},
{
"type": "boolean",
"key": "motionBlurEnable",
"label": "Enable Motion Blur",
"default": false
},
{
"type": "number",
"key": "motionBlurSampleCount",
"label": "Motion Blur Sample Count",
"decimal": 0,
"minimum": 8,
"maximum": 32,
"default": 8
},
{
"type": "number",
"key": "motionBlurShutterOpenFraction",
"label": "Shutter Open Fraction",
"decimal": 3,
"minimum": 0.01,
"maximum": 32,
"default": 0.2
},
{
"type": "splitter"
},
{
"type": "label",
"label": "<b>Show</b>"
},
{
"type": "boolean",
"key": "cameras",
"label": "Cameras",
"default": false
},
{
"type": "boolean",
"key": "clipGhosts",
"label": "Clip Ghosts",
"default": false
},
{
"type": "boolean",
"key": "deformers",
"label": "Deformers",
"default": false
},
{
"type": "boolean",
"key": "dimensions",
"label": "Dimensions",
"default": false
},
{
"type": "boolean",
"key": "dynamicConstraints",
"label": "Dynamic Constraints",
"default": false
},
{
"type": "boolean",
"key": "dynamics",
"label": "Dynamics",
"default": false
},
{
"type": "boolean",
"key": "fluids",
"label": "Fluids",
"default": false
},
{
"type": "boolean",
"key": "follicles",
"label": "Follicles",
"default": false
},
{
"type": "boolean",
"key": "greasePencils",
"label": "Grease Pencil",
"default": false
},
{
"type": "boolean",
"key": "grid",
"label": "Grid",
"default": false
},
{
"type": "boolean",
"key": "hairSystems",
"label": "Hair Systems",
"default": true
},
{
"type": "boolean",
"key": "handles",
"label": "Handles",
"default": false
},
{
"type": "boolean",
"key": "headsUpDisplay",
"label": "HUD",
"default": false
},
{
"type": "boolean",
"key": "ikHandles",
"label": "IK Handles",
"default": false
},
{
"type": "boolean",
"key": "imagePlane",
"label": "Image Planes",
"default": true
},
{
"type": "boolean",
"key": "joints",
"label": "Joints",
"default": false
},
{
"type": "boolean",
"key": "lights",
"label": "Lights",
"default": false
},
{
"type": "boolean",
"key": "locators",
"label": "Locators",
"default": false
},
{
"type": "boolean",
"key": "manipulators",
"label": "Manipulators",
"default": false
},
{
"type": "boolean",
"key": "motionTrails",
"label": "Motion Trails",
"default": false
},
{
"type": "boolean",
"key": "nCloths",
"label": "nCloths",
"default": false
},
{
"type": "boolean",
"key": "nParticles",
"label": "nParticles",
"default": false
},
{
"type": "boolean",
"key": "nRigids",
"label": "nRigids",
"default": false
},
{
"type": "boolean",
"key": "controlVertices",
"label": "NURBS CVs",
"default": false
},
{
"type": "boolean",
"key": "nurbsCurves",
"label": "NURBS Curves",
"default": false
},
{
"type": "boolean",
"key": "hulls",
"label": "NURBS Hulls",
"default": false
},
{
"type": "boolean",
"key": "nurbsSurfaces",
"label": "NURBS Surfaces",
"default": false
},
{
"type": "boolean",
"key": "particleInstancers",
"label": "Particle Instancers",
"default": false
},
{
"type": "boolean",
"key": "pivots",
"label": "Pivots",
"default": false
},
{
"type": "boolean",
"key": "planes",
"label": "Planes",
"default": false
},
{
"type": "boolean",
"key": "pluginShapes",
"label": "Plugin Shapes",
"default": false
},
{
"type": "boolean",
"key": "polymeshes",
"label": "Polygons",
"default": true
},
{
"type": "boolean",
"key": "strokes",
"label": "Strokes",
"default": false
},
{
"type": "boolean",
"key": "subdivSurfaces",
"label": "Subdiv Surfaces",
"default": false
},
{
"type": "boolean",
"key": "textures",
"label": "Texture Placements",
"default": false
},
{
"type": "dict-modifiable",
"key": "pluginObjects",
"label": "Plugin Objects",
"object_type": "boolean"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "Camera Options",
"label": "Camera Options",
"children": [
{
"type": "boolean",
"key": "displayGateMask",
"label": "Display Gate Mask",
"default": false
},
{
"type": "boolean",
"key": "displayResolution",
"label": "Display Resolution",
"default": false
},
{
"type": "boolean",
"key": "displayFilmGate",
"label": "Display Film Gate",
"default": false
},
{
"type": "boolean",
"key": "displayFieldChart",
"label": "Display Field Chart",
"default": false
},
{
"type": "boolean",
"key": "displaySafeAction",
"label": "Display Safe Action",
"default": false
},
{
"type": "boolean",
"key": "displaySafeTitle",
"label": "Display Safe Title",
"default": false
},
{
"type": "boolean",
"key": "displayFilmPivot",
"label": "Display Film Pivot",
"default": false
},
{
"type": "boolean",
"key": "displayFilmOrigin",
"label": "Display Film Origin",
"default": false
},
{
"type": "number",
"key": "overscan",
"label": "Overscan",
"decimal": 1,
"minimum": 0,
"maximum": 10,
"default": 1
}
]
}
]
}
]
}
}
]
}

View file

@ -48,7 +48,7 @@
"bg-view-selection-hover": "rgba(92, 173, 214, .8)",
"border": "#373D48",
"border-hover": "rgba(168, 175, 189, .3)",
"border-hover": "rgb(92, 99, 111)",
"border-focus": "rgb(92, 173, 214)",
"restart-btn-bg": "#458056",

View file

@ -35,6 +35,11 @@ QWidget:disabled {
color: {color:font-disabled};
}
/* Some DCCs have set borders to solid color */
QScrollArea {
border: none;
}
QLabel {
background: transparent;
}
@ -42,7 +47,7 @@ QLabel {
/* Inputs */
QAbstractSpinBox, QLineEdit, QPlainTextEdit, QTextEdit {
border: 1px solid {color:border};
border-radius: 0.3em;
border-radius: 0.2em;
background: {color:bg-inputs};
padding: 0.1em;
}
@ -226,7 +231,7 @@ QMenu::separator {
/* Combobox */
QComboBox {
border: 1px solid {color:border};
border-radius: 3px;
border-radius: 0.2em;
padding: 1px 3px 1px 3px;
background: {color:bg-inputs};
}
@ -474,7 +479,6 @@ QAbstractItemView:disabled{
}
QAbstractItemView::item:hover {
/* color: {color:bg-view-hover}; */
background: {color:bg-view-hover};
}
@ -743,7 +747,7 @@ OverlayMessageWidget QWidget {
#TypeEditor, #ToolEditor, #NameEditor, #NumberEditor {
background: transparent;
border-radius: 0.3em;
border-radius: 0.2em;
}
#TypeEditor:focus, #ToolEditor:focus, #NameEditor:focus, #NumberEditor:focus {
@ -860,7 +864,13 @@ OverlayMessageWidget QWidget {
background: {color:bg-view-hover};
}
/* New Create/Publish UI */
/* Publisher UI (Create/Publish) */
#PublishWindow QAbstractSpinBox, QLineEdit, QPlainTextEdit, QTextEdit {
padding: 1px;
}
#PublishWindow QComboBox {
padding: 1px 1px 1px 0.2em;
}
PublisherTabsWidget {
background: {color:publisher:tab-bg};
}
@ -944,6 +954,7 @@ PixmapButton:disabled {
border-top-left-radius: 0px;
padding-top: 0.5em;
padding-bottom: 0.5em;
width: 0.5em;
}
#VariantInput[state="new"], #VariantInput[state="new"]:focus, #VariantInput[state="new"]:hover {
border-color: {color:publisher:success};
@ -1072,7 +1083,7 @@ ValidationArtistMessage QLabel {
#AssetNameInputWidget {
background: {color:bg-inputs};
border: 1px solid {color:border};
border-radius: 0.3em;
border-radius: 0.2em;
}
#AssetNameInputWidget QWidget {
@ -1465,6 +1476,12 @@ CreateNextPageOverlay {
}
/* Attribute Definition widgets */
AttributeDefinitionsWidget QAbstractSpinBox, QLineEdit, QPlainTextEdit, QTextEdit {
padding: 1px;
}
AttributeDefinitionsWidget QComboBox {
padding: 1px 1px 1px 0.2em;
}
InViewButton, InViewButton:disabled {
background: transparent;
}

View file

@ -1,4 +1,3 @@
import uuid
import copy
from qtpy import QtWidgets, QtCore
@ -126,7 +125,7 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
row = 0
for attr_def in attr_defs:
if not isinstance(attr_def, UIDef):
if attr_def.is_value_def:
if attr_def.key in self._current_keys:
raise KeyError(
"Duplicated key \"{}\"".format(attr_def.key))
@ -144,11 +143,16 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
col_num = 2 - expand_cols
if attr_def.label:
if attr_def.is_value_def and attr_def.label:
label_widget = QtWidgets.QLabel(attr_def.label, self)
tooltip = attr_def.tooltip
if tooltip:
label_widget.setToolTip(tooltip)
if attr_def.is_label_horizontal:
label_widget.setAlignment(
QtCore.Qt.AlignRight
| QtCore.Qt.AlignVCenter
)
layout.addWidget(
label_widget, row, 0, 1, expand_cols
)

View file

@ -123,7 +123,7 @@ class BaseRepresentationModel(object):
self.remote_provider = remote_provider
class SubsetsModel(TreeModel, BaseRepresentationModel):
class SubsetsModel(BaseRepresentationModel, TreeModel):
doc_fetched = QtCore.Signal()
refreshed = QtCore.Signal(bool)

View file

@ -2,7 +2,7 @@ from qtpy import QtCore, QtGui
# ID of context item in instance view
CONTEXT_ID = "context"
CONTEXT_LABEL = "Options"
CONTEXT_LABEL = "Context"
# Not showed anywhere - used as identifier
CONTEXT_GROUP = "__ContextGroup__"
@ -15,6 +15,9 @@ VARIANT_TOOLTIP = (
"\nnumerical characters (0-9) dot (\".\") or underscore (\"_\")."
)
INPUTS_LAYOUT_HSPACING = 4
INPUTS_LAYOUT_VSPACING = 2
# Roles for instance views
INSTANCE_ID_ROLE = QtCore.Qt.UserRole + 1
SORT_VALUE_ROLE = QtCore.Qt.UserRole + 2

View file

@ -163,7 +163,7 @@ class AssetDocsCache:
return copy.deepcopy(self._full_asset_docs_by_name[asset_name])
class PublishReport:
class PublishReportMaker:
"""Report for single publishing process.
Report keeps current state of publishing and currently processed plugin.
@ -784,6 +784,13 @@ class PublishValidationErrors:
# Make sure the cached report is cleared
plugin_id = self._plugins_proxy.get_plugin_id(plugin)
if not error.title:
if hasattr(plugin, "label") and plugin.label:
plugin_label = plugin.label
else:
plugin_label = plugin.__name__
error.title = plugin_label
self._error_items.append(
ValidationErrorItem.from_result(plugin_id, error, instance)
)
@ -1674,7 +1681,7 @@ class PublisherController(BasePublisherController):
# pyblish.api.Context
self._publish_context = None
# Pyblish report
self._publish_report = PublishReport(self)
self._publish_report = PublishReportMaker(self)
# Store exceptions of validation error
self._publish_validation_errors = PublishValidationErrors()

View file

@ -211,6 +211,10 @@ class AssetsDialog(QtWidgets.QDialog):
layout.addWidget(asset_view, 1)
layout.addLayout(btns_layout, 0)
controller.event_system.add_callback(
"controller.reset.finished", self._on_controller_reset
)
asset_view.double_clicked.connect(self._on_ok_clicked)
filter_input.textChanged.connect(self._on_filter_change)
ok_btn.clicked.connect(self._on_ok_clicked)
@ -245,6 +249,10 @@ class AssetsDialog(QtWidgets.QDialog):
new_pos.setY(new_pos.y() - int(self.height() / 2))
self.move(new_pos)
def _on_controller_reset(self):
# Change reset enabled so model is reset on show event
self._soft_reset_enabled = True
def showEvent(self, event):
"""Refresh asset model on show."""
super(AssetsDialog, self).showEvent(event)

View file

@ -9,7 +9,7 @@ Only one item can be selected at a time.
```
<i> : Icon. Can have Warning icon when context is not right
Options
Context
<Group 1>
<i> <Instance 1> [x]
<i> <Instance 2> [x]
@ -202,7 +202,7 @@ class ConvertorItemsGroupWidget(BaseGroupWidget):
class InstanceGroupWidget(BaseGroupWidget):
"""Widget wrapping instances under group."""
active_changed = QtCore.Signal()
active_changed = QtCore.Signal(str, str, bool)
def __init__(self, group_icons, *args, **kwargs):
super(InstanceGroupWidget, self).__init__(*args, **kwargs)
@ -253,13 +253,16 @@ class InstanceGroupWidget(BaseGroupWidget):
instance, group_icon, self
)
widget.selected.connect(self._on_widget_selection)
widget.active_changed.connect(self.active_changed)
widget.active_changed.connect(self._on_active_changed)
self._widgets_by_id[instance.id] = widget
self._content_layout.insertWidget(widget_idx, widget)
widget_idx += 1
self._update_ordered_item_ids()
def _on_active_changed(self, instance_id, value):
self.active_changed.emit(self.group_name, instance_id, value)
class CardWidget(BaseClickableFrame):
"""Clickable card used as bigger button."""
@ -332,7 +335,7 @@ class ContextCardWidget(CardWidget):
icon_layout.addWidget(icon_widget)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 5, 10, 5)
layout.setContentsMargins(0, 2, 10, 2)
layout.addLayout(icon_layout, 0)
layout.addWidget(label_widget, 1)
@ -363,7 +366,7 @@ class ConvertorItemCardWidget(CardWidget):
icon_layout.addWidget(icon_widget)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 5, 10, 5)
layout.setContentsMargins(0, 2, 10, 2)
layout.addLayout(icon_layout, 0)
layout.addWidget(label_widget, 1)
@ -377,7 +380,7 @@ class ConvertorItemCardWidget(CardWidget):
class InstanceCardWidget(CardWidget):
"""Card widget representing instance."""
active_changed = QtCore.Signal()
active_changed = QtCore.Signal(str, bool)
def __init__(self, instance, group_icon, parent):
super(InstanceCardWidget, self).__init__(parent)
@ -424,7 +427,7 @@ class InstanceCardWidget(CardWidget):
top_layout.addWidget(expand_btn, 0)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 5, 10, 5)
layout.setContentsMargins(0, 2, 10, 2)
layout.addLayout(top_layout)
layout.addWidget(detail_widget)
@ -445,6 +448,10 @@ class InstanceCardWidget(CardWidget):
def set_active_toggle_enabled(self, enabled):
self._active_checkbox.setEnabled(enabled)
@property
def is_active(self):
return self._active_checkbox.isChecked()
def set_active(self, new_value):
"""Set instance as active."""
checkbox_value = self._active_checkbox.isChecked()
@ -515,7 +522,7 @@ class InstanceCardWidget(CardWidget):
return
self.instance["active"] = new_value
self.active_changed.emit()
self.active_changed.emit(self._id, new_value)
def _on_expend_clicked(self):
self._set_expanded()
@ -584,6 +591,45 @@ class InstanceCardView(AbstractInstanceView):
result.setWidth(width)
return result
def _toggle_instances(self, value):
if not self._active_toggle_enabled:
return
widgets = self._get_selected_widgets()
changed = False
for widget in widgets:
if not isinstance(widget, InstanceCardWidget):
continue
is_active = widget.is_active
if value == -1:
widget.set_active(not is_active)
changed = True
continue
_value = bool(value)
if is_active is not _value:
widget.set_active(_value)
changed = True
if changed:
self.active_changed.emit()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Space:
self._toggle_instances(-1)
return True
elif event.key() == QtCore.Qt.Key_Backspace:
self._toggle_instances(0)
return True
elif event.key() == QtCore.Qt.Key_Return:
self._toggle_instances(1)
return True
return super(InstanceCardView, self).keyPressEvent(event)
def _get_selected_widgets(self):
output = []
if (
@ -742,7 +788,15 @@ class InstanceCardView(AbstractInstanceView):
for widget in self._widgets_by_group.values():
widget.update_instance_values()
def _on_active_changed(self):
def _on_active_changed(self, group_name, instance_id, value):
group_widget = self._widgets_by_group[group_name]
instance_widget = group_widget.get_widget_by_item_id(instance_id)
if instance_widget.is_selected:
for widget in self._get_selected_widgets():
if isinstance(widget, InstanceCardWidget):
widget.set_active(value)
else:
self._select_item_clear(instance_id, group_name, instance_widget)
self.active_changed.emit()
def _on_widget_selection(self, instance_id, group_name, selection_type):

View file

@ -22,6 +22,8 @@ from ..constants import (
CREATOR_IDENTIFIER_ROLE,
CREATOR_THUMBNAIL_ENABLED_ROLE,
CREATOR_SORT_ROLE,
INPUTS_LAYOUT_HSPACING,
INPUTS_LAYOUT_VSPACING,
)
SEPARATORS = ("---separator---", "---")
@ -198,6 +200,8 @@ class CreateWidget(QtWidgets.QWidget):
variant_subset_layout = QtWidgets.QFormLayout(variant_subset_widget)
variant_subset_layout.setContentsMargins(0, 0, 0, 0)
variant_subset_layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING)
variant_subset_layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING)
variant_subset_layout.addRow("Variant", variant_widget)
variant_subset_layout.addRow("Subset", subset_name_input)
@ -282,6 +286,9 @@ class CreateWidget(QtWidgets.QWidget):
thumbnail_widget.thumbnail_created.connect(self._on_thumbnail_create)
thumbnail_widget.thumbnail_cleared.connect(self._on_thumbnail_clear)
controller.event_system.add_callback(
"main.window.closed", self._on_main_window_close
)
controller.event_system.add_callback(
"plugins.refresh.finished", self._on_plugins_refresh
)
@ -316,6 +323,10 @@ class CreateWidget(QtWidgets.QWidget):
self._first_show = True
self._last_thumbnail_path = None
self._last_current_context_asset = None
self._last_current_context_task = None
self._use_current_context = True
@property
def current_asset_name(self):
return self._controller.current_asset_name
@ -356,12 +367,39 @@ class CreateWidget(QtWidgets.QWidget):
if check_prereq:
self._invalidate_prereq()
def _on_main_window_close(self):
"""Publisher window was closed."""
# Use current context on next refresh
self._use_current_context = True
def refresh(self):
current_asset_name = self._controller.current_asset_name
current_task_name = self._controller.current_task_name
# Get context before refresh to keep selection of asset and
# task widgets
asset_name = self._get_asset_name()
task_name = self._get_task_name()
# Replace by current context if last loaded context was
# 'current context' before reset
if (
self._use_current_context
or (
self._last_current_context_asset
and asset_name == self._last_current_context_asset
and task_name == self._last_current_context_task
)
):
asset_name = current_asset_name
task_name = current_task_name
# Store values for future refresh
self._last_current_context_asset = current_asset_name
self._last_current_context_task = current_task_name
self._use_current_context = False
self._prereq_available = False
# Disable context widget so refresh of asset will use context asset
@ -398,7 +436,10 @@ class CreateWidget(QtWidgets.QWidget):
prereq_available = False
creator_btn_tooltips.append("Creator is not selected")
if self._context_change_is_enabled() and self._asset_name is None:
if (
self._context_change_is_enabled()
and self._get_asset_name() is None
):
# QUESTION how to handle invalid asset?
prereq_available = False
creator_btn_tooltips.append("Context is not selected")

View file

@ -11,7 +11,7 @@ selection can be enabled disabled using checkbox or keyboard key presses:
- Backspace - disable selection
```
|- Options
|- Context
|- <Group 1> [x]
| |- <Instance 1> [x]
| |- <Instance 2> [x]
@ -486,6 +486,9 @@ class InstanceListView(AbstractInstanceView):
group_widget.set_expanded(expanded)
def _on_toggle_request(self, toggle):
if not self._active_toggle_enabled:
return
selected_instance_ids = self._instance_view.get_selected_instance_ids()
if toggle == -1:
active = None
@ -1039,7 +1042,8 @@ class InstanceListView(AbstractInstanceView):
proxy_index = proxy_model.mapFromSource(select_indexes[0])
selection_model.setCurrentIndex(
proxy_index,
selection_model.ClearAndSelect | selection_model.Rows
QtCore.QItemSelectionModel.ClearAndSelect
| QtCore.QItemSelectionModel.Rows
)
return

View file

@ -2,6 +2,8 @@ from qtpy import QtWidgets, QtCore
from openpype.tools.attribute_defs import create_widget_for_attr_def
from ..constants import INPUTS_LAYOUT_HSPACING, INPUTS_LAYOUT_VSPACING
class PreCreateWidget(QtWidgets.QWidget):
def __init__(self, parent):
@ -81,6 +83,8 @@ class AttributesWidget(QtWidgets.QWidget):
layout = QtWidgets.QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING)
layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING)
self._layout = layout
@ -117,8 +121,16 @@ class AttributesWidget(QtWidgets.QWidget):
col_num = 2 - expand_cols
if attr_def.label:
if attr_def.is_value_def and attr_def.label:
label_widget = QtWidgets.QLabel(attr_def.label, self)
tooltip = attr_def.tooltip
if tooltip:
label_widget.setToolTip(tooltip)
if attr_def.is_label_horizontal:
label_widget.setAlignment(
QtCore.Qt.AlignRight
| QtCore.Qt.AlignVCenter
)
self._layout.addWidget(
label_widget, row, 0, 1, expand_cols
)

View file

@ -9,7 +9,7 @@ import collections
from qtpy import QtWidgets, QtCore, QtGui
import qtawesome
from openpype.lib.attribute_definitions import UnknownDef, UIDef
from openpype.lib.attribute_definitions import UnknownDef
from openpype.tools.attribute_defs import create_widget_for_attr_def
from openpype.tools import resources
from openpype.tools.flickcharm import FlickCharm
@ -36,6 +36,8 @@ from .icons import (
from ..constants import (
VARIANT_TOOLTIP,
ResetKeySequence,
INPUTS_LAYOUT_HSPACING,
INPUTS_LAYOUT_VSPACING,
)
@ -1098,6 +1100,8 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
btns_layout.addWidget(cancel_btn)
main_layout = QtWidgets.QFormLayout(self)
main_layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING)
main_layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING)
main_layout.addRow("Variant", variant_input)
main_layout.addRow("Asset", asset_value_widget)
main_layout.addRow("Task", task_value_widget)
@ -1346,6 +1350,8 @@ class CreatorAttrsWidget(QtWidgets.QWidget):
content_layout.setColumnStretch(0, 0)
content_layout.setColumnStretch(1, 1)
content_layout.setAlignment(QtCore.Qt.AlignTop)
content_layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING)
content_layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING)
row = 0
for attr_def, attr_instances, values in result:
@ -1371,9 +1377,19 @@ class CreatorAttrsWidget(QtWidgets.QWidget):
col_num = 2 - expand_cols
label = attr_def.label or attr_def.key
label = None
if attr_def.is_value_def:
label = attr_def.label or attr_def.key
if label:
label_widget = QtWidgets.QLabel(label, self)
tooltip = attr_def.tooltip
if tooltip:
label_widget.setToolTip(tooltip)
if attr_def.is_label_horizontal:
label_widget.setAlignment(
QtCore.Qt.AlignRight
| QtCore.Qt.AlignVCenter
)
content_layout.addWidget(
label_widget, row, 0, 1, expand_cols
)
@ -1474,6 +1490,8 @@ class PublishPluginAttrsWidget(QtWidgets.QWidget):
attr_def_layout = QtWidgets.QGridLayout(attr_def_widget)
attr_def_layout.setColumnStretch(0, 0)
attr_def_layout.setColumnStretch(1, 1)
attr_def_layout.setHorizontalSpacing(INPUTS_LAYOUT_HSPACING)
attr_def_layout.setVerticalSpacing(INPUTS_LAYOUT_VSPACING)
content_layout = QtWidgets.QVBoxLayout(content_widget)
content_layout.addWidget(attr_def_widget, 0)
@ -1501,12 +1519,19 @@ class PublishPluginAttrsWidget(QtWidgets.QWidget):
expand_cols = 1
col_num = 2 - expand_cols
label = attr_def.label or attr_def.key
label = None
if attr_def.is_value_def:
label = attr_def.label or attr_def.key
if label:
label_widget = QtWidgets.QLabel(label, content_widget)
tooltip = attr_def.tooltip
if tooltip:
label_widget.setToolTip(tooltip)
if attr_def.is_label_horizontal:
label_widget.setAlignment(
QtCore.Qt.AlignRight
| QtCore.Qt.AlignVCenter
)
attr_def_layout.addWidget(
label_widget, row, 0, 1, expand_cols
)
@ -1517,7 +1542,7 @@ class PublishPluginAttrsWidget(QtWidgets.QWidget):
)
row += 1
if isinstance(attr_def, UIDef):
if not attr_def.is_value_def:
continue
widget.value_changed.connect(self._input_value_changed)

View file

@ -46,6 +46,8 @@ class PublisherWindow(QtWidgets.QDialog):
def __init__(self, parent=None, controller=None, reset_on_show=None):
super(PublisherWindow, self).__init__(parent)
self.setObjectName("PublishWindow")
self.setWindowTitle("OpenPype publisher")
icon = QtGui.QIcon(resources.get_openpype_icon_filepath())
@ -284,6 +286,9 @@ class PublisherWindow(QtWidgets.QDialog):
controller.event_system.add_callback(
"publish.has_validated.changed", self._on_publish_validated_change
)
controller.event_system.add_callback(
"publish.finished.changed", self._on_publish_finished_change
)
controller.event_system.add_callback(
"publish.process.stopped", self._on_publish_stop
)
@ -400,8 +405,12 @@ class PublisherWindow(QtWidgets.QDialog):
# TODO capture changes and ask user if wants to save changes on close
if not self._controller.host_context_has_changed:
self._save_changes(False)
self._comment_input.setText("") # clear comment
self._reset_on_show = True
self._controller.clear_thumbnail_temp_dir_path()
# Trigger custom event that should be captured only in UI
# - backend (controller) must not be dependent on this event topic!!!
self._controller.event_system.emit("main.window.closed", {}, "window")
super(PublisherWindow, self).closeEvent(event)
def leaveEvent(self, event):
@ -433,15 +442,24 @@ class PublisherWindow(QtWidgets.QDialog):
event.accept()
return
if event.matches(QtGui.QKeySequence.Save):
save_match = event.matches(QtGui.QKeySequence.Save)
if save_match == QtGui.QKeySequence.ExactMatch:
if not self._controller.publish_has_started:
self._save_changes(True)
event.accept()
return
if ResetKeySequence.matches(
QtGui.QKeySequence(event.key() | event.modifiers())
):
# PySide6 Support
if hasattr(event, "keyCombination"):
reset_match_result = ResetKeySequence.matches(
QtGui.QKeySequence(event.keyCombination())
)
else:
reset_match_result = ResetKeySequence.matches(
QtGui.QKeySequence(event.modifiers() | event.key())
)
if reset_match_result == QtGui.QKeySequence.ExactMatch:
if not self.controller.publish_is_running:
self.reset()
event.accept()
@ -777,6 +795,11 @@ class PublisherWindow(QtWidgets.QDialog):
if event["value"]:
self._validate_btn.setEnabled(False)
def _on_publish_finished_change(self, event):
if event["value"]:
# Successful publish, remove comment from UI
self._comment_input.setText("")
def _on_publish_stop(self):
self._set_publish_overlay_visibility(False)
self._reset_btn.setEnabled(True)

View file

@ -1050,8 +1050,8 @@ class ProjectPushItemProcess:
repre_format_data["ext"] = ext[1:]
break
tmp_result = anatomy.format(formatting_data)
folder_path = tmp_result[template_name]["folder"]
template_obj = anatomy.templates_obj[template_name]["folder"]
folder_path = template_obj.format_strict(formatting_data)
repre_context = folder_path.used_values
folder_path_rootless = folder_path.rootless
repre_filepaths = []

View file

@ -199,90 +199,103 @@ class InventoryModel(TreeModel):
"""Refresh the model"""
host = registered_host()
if not items: # for debugging or testing, injecting items from outside
# for debugging or testing, injecting items from outside
if items is None:
if isinstance(host, ILoadHost):
items = host.get_containers()
else:
elif hasattr(host, "ls"):
items = host.ls()
else:
items = []
self.clear()
if self._hierarchy_view and selected:
if not hasattr(host.pipeline, "update_hierarchy"):
# If host doesn't support hierarchical containers, then
# cherry-pick only.
self.add_items((item for item in items
if item["objectName"] in selected))
return
# Update hierarchy info for all containers
items_by_name = {item["objectName"]: item
for item in host.pipeline.update_hierarchy(items)}
selected_items = set()
def walk_children(names):
"""Select containers and extend to chlid containers"""
for name in [n for n in names if n not in selected_items]:
selected_items.add(name)
item = items_by_name[name]
yield item
for child in walk_children(item["children"]):
yield child
items = list(walk_children(selected)) # Cherry-picked and extended
# Cut unselected upstream containers
for item in items:
if not item.get("parent") in selected_items:
# Parent not in selection, this is root item.
item["parent"] = None
parents = [self._root_item]
# The length of `items` array is the maximum depth that a
# hierarchy could be.
# Take this as an easiest way to prevent looping forever.
maximum_loop = len(items)
count = 0
while items:
if count > maximum_loop:
self.log.warning("Maximum loop count reached, possible "
"missing parent node.")
break
_parents = list()
for parent in parents:
_unparented = list()
def _children():
"""Child item provider"""
for item in items:
if item.get("parent") == parent.get("objectName"):
# (NOTE)
# Since `self._root_node` has no "objectName"
# entry, it will be paired with root item if
# the value of key "parent" is None, or not
# having the key.
yield item
else:
# Not current parent's child, try next
_unparented.append(item)
self.add_items(_children(), parent)
items[:] = _unparented
# Parents of next level
for group_node in parent.children():
_parents += group_node.children()
parents[:] = _parents
count += 1
else:
if not selected or not self._hierarchy_view:
self.add_items(items)
return
if (
not hasattr(host, "pipeline")
or not hasattr(host.pipeline, "update_hierarchy")
):
# If host doesn't support hierarchical containers, then
# cherry-pick only.
self.add_items((
item
for item in items
if item["objectName"] in selected
))
return
# TODO find out what this part does. Function 'update_hierarchy' is
# available only in 'blender' at this moment.
# Update hierarchy info for all containers
items_by_name = {
item["objectName"]: item
for item in host.pipeline.update_hierarchy(items)
}
selected_items = set()
def walk_children(names):
"""Select containers and extend to chlid containers"""
for name in [n for n in names if n not in selected_items]:
selected_items.add(name)
item = items_by_name[name]
yield item
for child in walk_children(item["children"]):
yield child
items = list(walk_children(selected)) # Cherry-picked and extended
# Cut unselected upstream containers
for item in items:
if not item.get("parent") in selected_items:
# Parent not in selection, this is root item.
item["parent"] = None
parents = [self._root_item]
# The length of `items` array is the maximum depth that a
# hierarchy could be.
# Take this as an easiest way to prevent looping forever.
maximum_loop = len(items)
count = 0
while items:
if count > maximum_loop:
self.log.warning("Maximum loop count reached, possible "
"missing parent node.")
break
_parents = list()
for parent in parents:
_unparented = list()
def _children():
"""Child item provider"""
for item in items:
if item.get("parent") == parent.get("objectName"):
# (NOTE)
# Since `self._root_node` has no "objectName"
# entry, it will be paired with root item if
# the value of key "parent" is None, or not
# having the key.
yield item
else:
# Not current parent's child, try next
_unparented.append(item)
self.add_items(_children(), parent)
items[:] = _unparented
# Parents of next level
for group_node in parent.children():
_parents += group_node.children()
parents[:] = _parents
count += 1
def add_items(self, items, parent=None):
"""Add the items to the model.

View file

@ -107,8 +107,8 @@ class SceneInventoryWindow(QtWidgets.QDialog):
view.hierarchy_view_changed.connect(
self._on_hierarchy_view_change
)
view.data_changed.connect(self.refresh)
refresh_button.clicked.connect(self.refresh)
view.data_changed.connect(self._on_refresh_request)
refresh_button.clicked.connect(self._on_refresh_request)
update_all_button.clicked.connect(self._on_update_all)
self._update_all_button = update_all_button
@ -139,6 +139,11 @@ class SceneInventoryWindow(QtWidgets.QDialog):
"""
def _on_refresh_request(self):
"""Signal callback to trigger 'refresh' without any arguments."""
self.refresh()
def refresh(self, items=None):
with preserve_expanded_rows(
tree_view=self._view,

View file

@ -47,8 +47,8 @@ class TextureCopy:
"hierarchy": hierarchy
}
anatomy = Anatomy(project_name)
anatomy_filled = anatomy.format(template_data)
return anatomy_filled['texture']['path']
template_obj = anatomy.templates_obj["texture"]["path"]
return template_obj.format_strict(template_data)
def _get_version(self, path):
versions = [0]

View file

@ -1,6 +1,7 @@
from .widgets import (
FocusSpinBox,
FocusDoubleSpinBox,
ComboBox,
CustomTextComboBox,
PlaceholderLineEdit,
BaseClickableFrame,
@ -38,6 +39,7 @@ from .overlay_messages import (
__all__ = (
"FocusSpinBox",
"FocusDoubleSpinBox",
"ComboBox",
"CustomTextComboBox",
"PlaceholderLineEdit",
"BaseClickableFrame",

Some files were not shown because too many files have changed in this diff Show more