diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..6ed6ae428c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,31 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: bug +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Desktop (please complete the following information):** + - OS: [e.g. windows] + - Host: [e.g. Maya, Nuke, Houdini] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..11fc491ef1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: enhancement +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/pype/ftrack/actions/action_client_review_sort.py b/pype/ftrack/actions/action_client_review_sort.py index bd8d3653d9..b98cf34756 100644 --- a/pype/ftrack/actions/action_client_review_sort.py +++ b/pype/ftrack/actions/action_client_review_sort.py @@ -1,9 +1,44 @@ -import sys -import argparse -import logging - -import ftrack_api from pype.ftrack import BaseAction +try: + from functools import cmp_to_key +except Exception: + cmp_to_key = None + + +def existence_comaprison(item_a, item_b): + if not item_a and not item_b: + return 0 + if not item_a: + return 1 + if not item_b: + return -1 + return None + + +def task_name_sorter(item_a, item_b): + asset_version_a = item_a["asset_version"] + asset_version_b = item_b["asset_version"] + asset_version_comp = existence_comaprison(asset_version_a, asset_version_b) + if asset_version_comp is not None: + return asset_version_comp + + task_a = asset_version_a["task"] + task_b = asset_version_b["task"] + task_comp = existence_comaprison(task_a, task_b) + if task_comp is not None: + return task_comp + + if task_a["name"] > task_b["name"]: + return 1 + if task_a["name"] < task_b["name"]: + return -1 + return 0 + + +if cmp_to_key: + task_name_sorter = cmp_to_key(task_name_sorter) +task_name_kwarg_key = "key" if cmp_to_key else "cmp" +task_name_sort_kwargs = {task_name_kwarg_key: task_name_sorter} class ClientReviewSort(BaseAction): @@ -24,7 +59,6 @@ class ClientReviewSort(BaseAction): return True def launch(self, session, entities, event): - entity = entities[0] # Get all objects from Review Session and all 'sort order' possibilities @@ -36,11 +70,8 @@ class ClientReviewSort(BaseAction): # Sort criteria obj_list = sorted(obj_list, key=lambda k: k['version']) - obj_list = sorted( - obj_list, key=lambda k: k['asset_version']['task']['name'] - ) + obj_list.sort(**task_name_sort_kwargs) obj_list = sorted(obj_list, key=lambda k: k['name']) - # Set 'sort order' to sorted list, so they are sorted in Ftrack also for i in range(len(obj_list)): obj_list[i]['sort_order'] = sort_order_list[i] @@ -57,42 +88,3 @@ def register(session, plugins_presets={}): '''Register action. Called when used as an event plugin.''' ClientReviewSort(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 6f928914bf..474c70bd26 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -806,12 +806,12 @@ class SyncEntitiesFactory: def set_hierarchical_attribute(self, hier_attrs, sync_ids): # collect all hierarchical attribute keys # and prepare default values to project - attribute_names = [] - attribute_ids = [] + attributes_by_key = {} + attribute_key_by_id = {} for attr in hier_attrs: key = attr["key"] - attribute_ids.append(attr["id"]) - attribute_names.append(key) + attribute_key_by_id[attr["id"]] = key + attributes_by_key[key] = attr store_key = "hier_attrs" if key.startswith("avalon_"): @@ -824,11 +824,11 @@ class SyncEntitiesFactory: # Prepare dict with all hier keys and None values prepare_dict = {} prepare_dict_avalon = {} - for attr in attribute_names: - if attr.startswith("avalon_"): - prepare_dict_avalon[attr] = None + for key in attributes_by_key.keys(): + if key.startswith("avalon_"): + prepare_dict_avalon[key] = None else: - prepare_dict[attr] = None + prepare_dict[key] = None for id, entity_dict in self.entities_dict.items(): # Skip project because has stored defaults at the moment @@ -842,32 +842,32 @@ class SyncEntitiesFactory: entity_ids_joined = ", ".join([ "\"{}\"".format(id) for id in sync_ids ]) - attributes_joined = ", ".join([ - "\"{}\"".format(name) for name in attribute_ids - ]) - call_expr = [{ - "action": "query", - "expression": ( - "select value, entity_id from CustomAttributeValue " - "where entity_id in ({}) and configuration_id in ({})" - ).format(entity_ids_joined, attributes_joined) - }] - if hasattr(self.session, "call"): - [values] = self.session.call(call_expr) - else: - [values] = self.session._call(call_expr) avalon_hier = [] - for value in values["data"]: - if value["value"] is None: - continue - entity_id = value["entity_id"] - key = value["configuration"]["key"] - store_key = "hier_attrs" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - avalon_hier.append(key) - self.entities_dict[entity_id][store_key][key] = value["value"] + for configuration_id in attribute_key_by_id.keys(): + call_expr = [{ + "action": "query", + "expression": ( + "select value, entity_id from CustomAttributeValue " + "where entity_id in ({}) and configuration_id is \"{}\"" + ).format(entity_ids_joined, configuration_id) + }] + if hasattr(self.session, "call"): + [values] = self.session.call(call_expr) + else: + [values] = self.session._call(call_expr) + + for value in values["data"]: + if value["value"] is None: + continue + entity_id = value["entity_id"] + key = attribute_key_by_id[value["configuration_id"]] + if key.startswith("avalon_"): + store_key = "avalon_attrs" + avalon_hier.append(key) + else: + store_key = "hier_attrs" + self.entities_dict[entity_id][store_key][key] = value["value"] # Get dictionary with not None hierarchical values to pull to childs top_id = self.ft_project_id @@ -888,13 +888,14 @@ class SyncEntitiesFactory: hier_values, parent_id = hier_down_queue.get() for child_id in self.entities_dict[parent_id]["children"]: _hier_values = hier_values.copy() - for name in attribute_names: - store_key = "hier_attrs" - if name.startswith("avalon_"): + for key in attributes_by_key.keys(): + if key.startswith("avalon_"): store_key = "avalon_attrs" - value = self.entities_dict[child_id][store_key][name] + else: + store_key = "hier_attrs" + value = self.entities_dict[child_id][store_key][key] if value is not None: - _hier_values[name] = value + _hier_values[key] = value self.entities_dict[child_id]["hier_attrs"].update(_hier_values) hier_down_queue.put((_hier_values, child_id)) diff --git a/pype/lib.py b/pype/lib.py index 824d2e0f52..d3ccbc8589 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -2,6 +2,9 @@ import os import sys import types import re +import uuid +import json +import collections import logging import itertools import contextlib @@ -9,11 +12,10 @@ import subprocess import inspect from abc import ABCMeta, abstractmethod +from avalon import io, pipeline import six - -from avalon import io import avalon.api -import avalon +from pypeapp import config log = logging.getLogger(__name__) @@ -497,7 +499,6 @@ def filter_pyblish_plugins(plugins): `discover()` method. :type plugins: Dict """ - from pypeapp import config from pyblish import api host = api.current_host() @@ -556,7 +557,6 @@ def get_subsets(asset_name, Returns: dict: subsets with version and representaions in keys """ - from avalon import io # query asset from db asset_io = io.find_one({"type": "asset", "name": asset_name}) @@ -632,7 +632,6 @@ class CustomNone: def __init__(self): """Create uuid as identifier for custom None.""" - import uuid self.identifier = str(uuid.uuid4()) def __bool__(self): @@ -708,3 +707,623 @@ class PypeHook: @abstractmethod def execute(self, *args, **kwargs): pass + + +def get_linked_assets(asset_entity): + """Return linked assets for `asset_entity`.""" + # TODO implement + return [] + + +def map_subsets_by_family(subsets): + subsets_by_family = collections.defaultdict(list) + for subset in subsets: + family = subset["data"].get("family") + if not family: + families = subset["data"].get("families") + if not families: + continue + family = families[0] + + subsets_by_family[family].append(subset) + return subsets_by_family + + +class BuildWorkfile: + """Wrapper for build workfile process. + + Load representations for current context by build presets. Build presets + are host related, since each host has it's loaders. + """ + + def process(self): + """Main method of this wrapper. + + Building of workfile is triggered and is possible to implement + post processing of loaded containers if necessary. + """ + containers = self.build_workfile() + + return containers + + def build_workfile(self): + """Prepares and load containers into workfile. + + Loads latest versions of current and linked assets to workfile by logic + stored in Workfile profiles from presets. Profiles are set by host, + filtered by current task name and used by families. + + Each family can specify representation names and loaders for + representations and first available and successful loaded + representation is returned as container. + + At the end you'll get list of loaded containers per each asset. + + loaded_containers [{ + "asset_entity": , + "containers": [, , ...] + }, { + "asset_entity": , + "containers": [, ...] + }, { + ... + }] + """ + # Get current asset name and entity + current_asset_name = io.Session["AVALON_ASSET"] + current_asset_entity = io.find_one({ + "type": "asset", + "name": current_asset_name + }) + + # Skip if asset was not found + if not current_asset_entity: + print("Asset entity with name `{}` was not found".format( + current_asset_name + )) + return + + # Prepare available loaders + loaders_by_name = {} + for loader in avalon.api.discover(avalon.api.Loader): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {0}!".format(loader_name) + ) + loaders_by_name[loader_name] = loader + + # Skip if there are any loaders + if not loaders_by_name: + log.warning("There are no registered loaders.") + return + + # Get current task name + current_task_name = io.Session["AVALON_TASK"] + + # Load workfile presets for task + build_presets = self.get_build_presets(current_task_name) + + # Skip if there are any presets for task + if not build_presets: + log.warning( + "Current task `{}` does not have any loading preset.".format( + current_task_name + ) + ) + return + + # Get presets for loading current asset + current_context_profiles = build_presets.get("current_context") + # Get presets for loading linked assets + link_context_profiles = build_presets.get("linked_assets") + # Skip if both are missing + if not current_context_profiles and not link_context_profiles: + log.warning("Current task `{}` has empty loading preset.".format( + current_task_name + )) + return + + elif not current_context_profiles: + log.warning(( + "Current task `{}` doesn't have any loading" + " preset for it's context." + ).format(current_task_name)) + + elif not link_context_profiles: + log.warning(( + "Current task `{}` doesn't have any" + "loading preset for it's linked assets." + ).format(current_task_name)) + + # Prepare assets to process by workfile presets + assets = [] + current_asset_id = None + if current_context_profiles: + # Add current asset entity if preset has current context set + assets.append(current_asset_entity) + current_asset_id = current_asset_entity["_id"] + + if link_context_profiles: + # Find and append linked assets if preset has set linked mapping + link_assets = get_linked_assets(current_asset_entity) + if link_assets: + assets.extend(link_assets) + + # Skip if there are no assets. This can happen if only linked mapping + # is set and there are no links for his asset. + if not assets: + log.warning( + "Asset does not have linked assets. Nothing to process." + ) + return + + # Prepare entities from database for assets + prepared_entities = self._collect_last_version_repres(assets) + + # Load containers by prepared entities and presets + loaded_containers = [] + # - Current asset containers + if current_asset_id and current_asset_id in prepared_entities: + current_context_data = prepared_entities.pop(current_asset_id) + loaded_data = self.load_containers_by_asset_data( + current_context_data, current_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # - Linked assets container + for linked_asset_data in prepared_entities.values(): + loaded_data = self.load_containers_by_asset_data( + linked_asset_data, link_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # Return list of loaded containers + return loaded_containers + + def get_build_presets(self, task_name): + """ Returns presets to build workfile for task name. + + Presets are loaded for current project set in + io.Session["AVALON_PROJECT"], filtered by registered host + and entered task name. + + :param task_name: Task name used for filtering build presets. + :type task_name: str + :return: preset per eneter task + :rtype: dict | None + """ + host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1] + presets = config.get_presets(io.Session["AVALON_PROJECT"]) + # Get presets for host + build_presets = ( + presets["plugins"] + .get(host_name, {}) + .get("workfile_build") + ) + if not build_presets: + return + + task_name_low = task_name.lower() + per_task_preset = None + for preset in build_presets: + preset_tasks = preset.get("tasks") or [] + preset_tasks_low = [task.lower() for task in preset_tasks] + if task_name_low in preset_tasks_low: + per_task_preset = preset + break + + return per_task_preset + + def _filter_build_profiles(self, build_profiles, loaders_by_name): + """ Filter build profiles by loaders and prepare process data. + + Valid profile must have "loaders", "families" and "repre_names" keys + with valid values. + - "loaders" expects list of strings representing possible loaders. + - "families" expects list of strings for filtering + by main subset family. + - "repre_names" expects list of strings for filtering by + representation name. + + Lowered "families" and "repre_names" are prepared for each profile with + all required keys. + + :param build_profiles: Profiles for building workfile. + :type build_profiles: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Filtered and prepared profiles. + :rtype: list + """ + valid_profiles = [] + for profile in build_profiles: + # Check loaders + profile_loaders = profile.get("loaders") + if not profile_loaders: + log.warning(( + "Build profile has missing loaders configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check if any loader is available + loaders_match = False + for loader_name in profile_loaders: + if loader_name in loaders_by_name: + loaders_match = True + break + + if not loaders_match: + log.warning(( + "All loaders from Build profile are not available: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check families + profile_families = profile.get("families") + if not profile_families: + log.warning(( + "Build profile is missing families configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check representation names + profile_repre_names = profile.get("repre_names") + if not profile_repre_names: + log.warning(( + "Build profile is missing" + " representation names filtering: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Prepare lowered families and representation names + profile["families_lowered"] = [ + fam.lower() for fam in profile_families + ] + profile["repre_names_lowered"] = [ + name.lower() for name in profile_repre_names + ] + + valid_profiles.append(profile) + + return valid_profiles + + def _prepare_profile_for_subsets(self, subsets, profiles): + """Select profile for each subset byt it's data. + + Profiles are filtered for each subset individually. + Profile is filtered by subset's family, optionally by name regex and + representation names set in profile. + It is possible to not find matching profile for subset, in that case + subset is skipped and it is possible that none of subsets have + matching profile. + + :param subsets: Subset documents. + :type subsets: list + :param profiles: Build profiles. + :type profiles: dict + :return: Profile by subset's id. + :rtype: dict + """ + # Prepare subsets + subsets_by_family = map_subsets_by_family(subsets) + + profiles_per_subset_id = {} + for family, subsets in subsets_by_family.items(): + family_low = family.lower() + for profile in profiles: + # Skip profile if does not contain family + if family_low not in profile["families_lowered"]: + continue + + # Precompile name filters as regexes + profile_regexes = profile.get("subset_name_filters") + if profile_regexes: + _profile_regexes = [] + for regex in profile_regexes: + _profile_regexes.append(re.compile(regex)) + profile_regexes = _profile_regexes + + # TODO prepare regex compilation + for subset in subsets: + # Verify regex filtering (optional) + if profile_regexes: + valid = False + for pattern in profile_regexes: + if re.match(pattern, subset["name"]): + valid = True + break + + if not valid: + continue + + profiles_per_subset_id[subset["_id"]] = profile + + # break profiles loop on finding the first matching profile + break + return profiles_per_subset_id + + def load_containers_by_asset_data( + self, asset_entity_data, build_profiles, loaders_by_name + ): + """Load containers for entered asset entity by Build profiles. + + :param asset_entity_data: Prepared data with subsets, last version + and representations for specific asset. + :type asset_entity_data: dict + :param build_profiles: Build profiles. + :type build_profiles: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Output contains asset document and loaded containers. + :rtype: dict + """ + + # Make sure all data are not empty + if not asset_entity_data or not build_profiles or not loaders_by_name: + return + + asset_entity = asset_entity_data["asset_entity"] + + valid_profiles = self._filter_build_profiles( + build_profiles, loaders_by_name + ) + if not valid_profiles: + log.warning( + "There are not valid Workfile profiles. Skipping process." + ) + return + + log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + + subsets_by_id = {} + version_by_subset_id = {} + repres_by_version_id = {} + for subset_id, in_data in asset_entity_data["subsets"].items(): + subset_entity = in_data["subset_entity"] + subsets_by_id[subset_entity["_id"]] = subset_entity + + version_data = in_data["version"] + version_entity = version_data["version_entity"] + version_by_subset_id[subset_id] = version_entity + repres_by_version_id[version_entity["_id"]] = ( + version_data["repres"] + ) + + if not subsets_by_id: + log.warning("There are not subsets for asset {0}".format( + asset_entity["name"] + )) + return + + profiles_per_subset_id = self._prepare_profile_for_subsets( + subsets_by_id.values(), valid_profiles + ) + if not profiles_per_subset_id: + log.warning("There are not valid subsets.") + return + + valid_repres_by_subset_id = collections.defaultdict(list) + for subset_id, profile in profiles_per_subset_id.items(): + profile_repre_names = profile["repre_names_lowered"] + + version_entity = version_by_subset_id[subset_id] + version_id = version_entity["_id"] + repres = repres_by_version_id[version_id] + for repre in repres: + repre_name_low = repre["name"].lower() + if repre_name_low in profile_repre_names: + valid_repres_by_subset_id[subset_id].append(repre) + + # DEBUG message + msg = "Valid representations for Asset: `{}`".format( + asset_entity["name"] + ) + for subset_id, repres in valid_repres_by_subset_id.items(): + subset = subsets_by_id[subset_id] + msg += "\n# Subset Name/ID: `{}`/{}".format( + subset["name"], subset_id + ) + for repre in repres: + msg += "\n## Repre name: `{}`".format(repre["name"]) + + log.debug(msg) + + containers = self._load_containers( + valid_repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ) + + return { + "asset_entity": asset_entity, + "containers": containers + } + + def _load_containers( + self, repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ): + """Real load by collected data happens here. + + Loading of representations per subset happens here. Each subset can + loads one representation. Loading is tried in specific order. + Representations are tried to load by names defined in configuration. + If subset has representation matching representation name each loader + is tried to load it until any is successful. If none of them was + successful then next reprensentation name is tried. + Subset process loop ends when any representation is loaded or + all matching representations were already tried. + + :param repres_by_subset_id: Available representations mapped + by their parent (subset) id. + :type repres_by_subset_id: dict + :param subsets_by_id: Subset documents mapped by their id. + :type subsets_by_id: dict + :param profiles_per_subset_id: Build profiles mapped by subset id. + :type profiles_per_subset_id: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Objects of loaded containers. + :rtype: list + """ + loaded_containers = [] + for subset_id, repres in repres_by_subset_id.items(): + subset_name = subsets_by_id[subset_id]["name"] + + profile = profiles_per_subset_id[subset_id] + loaders_last_idx = len(profile["loaders"]) - 1 + repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 + + repre_by_low_name = { + repre["name"].lower(): repre for repre in repres + } + + is_loaded = False + for repre_name_idx, profile_repre_name in enumerate( + profile["repre_names_lowered"] + ): + # Break iteration if representation was already loaded + if is_loaded: + break + + repre = repre_by_low_name.get(profile_repre_name) + if not repre: + continue + + for loader_idx, loader_name in enumerate(profile["loaders"]): + if is_loaded: + break + + loader = loaders_by_name.get(loader_name) + if not loader: + continue + try: + container = avalon.api.load( + loader, + repre["_id"], + name=subset_name + ) + loaded_containers.append(container) + is_loaded = True + + except Exception as exc: + if exc == pipeline.IncompatibleLoaderError: + log.info(( + "Loader `{}` is not compatible with" + " representation `{}`" + ).format(loader_name, repre["name"])) + + else: + log.error( + "Unexpected error happened during loading", + exc_info=True + ) + + msg = "Loading failed." + if loader_idx < loaders_last_idx: + msg += " Trying next loader." + elif repre_name_idx < repre_names_last_idx: + msg += ( + " Loading of subset `{}` was not successful." + ).format(subset_name) + else: + msg += " Trying next representation." + log.info(msg) + + return loaded_containers + + def _collect_last_version_repres(self, asset_entities): + """Collect subsets, versions and representations for asset_entities. + + :param asset_entities: Asset entities for which want to find data + :type asset_entities: list + :return: collected entities + :rtype: dict + + Example output: + ``` + { + {Asset ID}: { + "asset_entity": , + "subsets": { + {Subset ID}: { + "subset_entity": , + "version": { + "version_entity": , + "repres": [ + , , ... + ] + } + }, + ... + } + }, + ... + } + output[asset_id]["subsets"][subset_id]["version"]["repres"] + ``` + """ + + if not asset_entities: + return {} + + asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} + + subsets = list(io.find({ + "type": "subset", + "parent": {"$in": asset_entity_by_ids.keys()} + })) + subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} + + sorted_versions = list(io.find({ + "type": "version", + "parent": {"$in": subset_entity_by_ids.keys()} + }).sort("name", -1)) + + subset_id_with_latest_version = [] + last_versions_by_id = {} + for version in sorted_versions: + subset_id = version["parent"] + if subset_id in subset_id_with_latest_version: + continue + subset_id_with_latest_version.append(subset_id) + last_versions_by_id[version["_id"]] = version + + repres = io.find({ + "type": "representation", + "parent": {"$in": last_versions_by_id.keys()} + }) + + output = {} + for repre in repres: + version_id = repre["parent"] + version = last_versions_by_id[version_id] + + subset_id = version["parent"] + subset = subset_entity_by_ids[subset_id] + + asset_id = subset["parent"] + asset = asset_entity_by_ids[asset_id] + + if asset_id not in output: + output[asset_id] = { + "asset_entity": asset, + "subsets": {} + } + + if subset_id not in output[asset_id]["subsets"]: + output[asset_id]["subsets"][subset_id] = { + "subset_entity": subset, + "version": { + "version_entity": version, + "repres": [] + } + } + + output[asset_id]["subsets"][subset_id]["version"]["repres"].append( + repre + ) + + return output diff --git a/pype/maya/menu.py b/pype/maya/menu.py index 806944c117..70df50b9e6 100644 --- a/pype/maya/menu.py +++ b/pype/maya/menu.py @@ -2,8 +2,9 @@ import sys import os import logging -from avalon.vendor.Qt import QtWidgets, QtCore, QtGui - +from avalon.vendor.Qt import QtWidgets, QtGui +from avalon.maya import pipeline +from ..lib import BuildWorkfile import maya.cmds as cmds self = sys.modules[__name__] @@ -21,8 +22,15 @@ def _get_menu(): return menu - def deferred(): + def add_build_workfiles_item(): + # Add build first workfile + cmds.menuItem(divider=True, parent=pipeline._menu) + cmds.menuItem( + "Build First Workfile", + parent=pipeline._menu, + command=lambda *args: BuildWorkfile().process() + ) log.info("Attempting to install scripts menu..") @@ -30,8 +38,11 @@ def deferred(): import scriptsmenu.launchformaya as launchformaya import scriptsmenu.scriptsmenu as scriptsmenu except ImportError: - log.warning("Skipping studio.menu install, because " - "'scriptsmenu' module seems unavailable.") + log.warning( + "Skipping studio.menu install, because " + "'scriptsmenu' module seems unavailable." + ) + add_build_workfiles_item() return # load configuration of custom menu @@ -39,15 +50,16 @@ def deferred(): config = scriptsmenu.load_configuration(config_path) # run the launcher for Maya menu - studio_menu = launchformaya.main(title=self._menu.title(), - objectName=self._menu) + studio_menu = launchformaya.main( + title=self._menu.title(), + objectName=self._menu + ) # apply configuration studio_menu.build_from_configuration(studio_menu, config) def uninstall(): - menu = _get_menu() if menu: log.info("Attempting to uninstall..") @@ -60,9 +72,8 @@ def uninstall(): def install(): - if cmds.about(batch=True): - print("Skipping pype.menu initialization in batch mode..") + log.info("Skipping pype.menu initialization in batch mode..") return uninstall() diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index ea777e4e34..71917946b8 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -132,13 +132,14 @@ class ExtractBurnin(pype.api.Extractor): slate_duration = duration_cp # exception for slate workflow - if ("slate" in instance.data["families"]): + if "slate" in instance.data["families"]: if "slate-frame" in repre.get("tags", []): slate_frame_start = frame_start_cp - 1 slate_frame_end = frame_end_cp slate_duration = duration_cp + 1 - self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start)) + self.log.debug("__1 slate_frame_start: {}".format( + slate_frame_start)) _prep_data.update({ "slate_frame_start": slate_frame_start, @@ -192,7 +193,6 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug("Output: {}".format(output)) repre_update = { - "anatomy_template": "render", "files": movieFileBurnin, "name": repre["name"], "tags": [x for x in repre["tags"] if x != "delete"] diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index a6f2d5d79b..843760f9ec 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -155,6 +155,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "PYPE_METADATA_FILE", "PYPE_STUDIO_PROJECTS_PATH", "PYPE_STUDIO_PROJECTS_MOUNT", + "AVALON_PROJECT" ] # pool used to do the publishing job diff --git a/pype/plugins/maya/load/load_ass.py b/pype/plugins/maya/load/load_ass.py index 83dd80bd4e..929ff2450a 100644 --- a/pype/plugins/maya/load/load_ass.py +++ b/pype/plugins/maya/load/load_ass.py @@ -16,7 +16,7 @@ class AssProxyLoader(pype.maya.plugin.ReferenceLoader): icon = "code-fork" color = "orange" - def process_reference(self, context, name, namespace, data): + def process_reference(self, context, name, namespace, options): import maya.cmds as cmds from avalon import maya @@ -164,7 +164,7 @@ class AssStandinLoader(api.Loader): icon = "code-fork" color = "orange" - def load(self, context, name, namespace, data): + def load(self, context, name, namespace, options): import maya.cmds as cmds import avalon.maya.lib as lib diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index 7cf8d77de4..b91d390e2e 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -151,13 +151,16 @@ class CollectReviews(api.InstancePlugin): "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", "clipInH", "clipOutH", "asset", - "track", "version" + "track" ] version_data = dict() # pass data to version version_data.update({k: instance.data[k] for k in transfer_data}) + if 'version' in instance.data: + version_data["version"] = instance.data[version] + # add to data of representation version_data.update({ "colorspace": item.sourceMediaColourTransform(), diff --git a/pype/version.py b/pype/version.py index 2614ce9d96..892994aa6c 100644 --- a/pype/version.py +++ b/pype/version.py @@ -1 +1 @@ -__version__ = "2.7.0" +__version__ = "2.8.0"