diff --git a/pype/api.py b/pype/api.py index 37ddef8972..b88be4cc88 100644 --- a/pype/api.py +++ b/pype/api.py @@ -40,7 +40,6 @@ from .lib import ( version_up, get_asset, get_hierarchy, - get_subsets, get_version_from_path, get_last_version_from_path, source_hash, @@ -87,7 +86,6 @@ __all__ = [ "version_up", "get_hierarchy", "get_asset", - "get_subsets", "get_version_from_path", "get_last_version_from_path", "source_hash", diff --git a/pype/hosts/fusion/lib.py b/pype/hosts/fusion/lib.py index f2846c966a..77866fde9d 100644 --- a/pype/hosts/fusion/lib.py +++ b/pype/hosts/fusion/lib.py @@ -2,7 +2,7 @@ import sys from avalon.vendor.Qt import QtGui import avalon.fusion - +from avalon import io self = sys.modules[__name__] self._project = None @@ -59,3 +59,84 @@ def get_additional_data(container): return {"color": QtGui.QColor.fromRgbF(tile_color["R"], tile_color["G"], tile_color["B"])} + + +def switch_item(container, + asset_name=None, + subset_name=None, + representation_name=None): + """Switch container asset, subset or representation of a container by name. + + It'll always switch to the latest version - of course a different + approach could be implemented. + + Args: + container (dict): data of the item to switch with + asset_name (str): name of the asset + subset_name (str): name of the subset + representation_name (str): name of the representation + + Returns: + dict + + """ + + if all(not x for x in [asset_name, subset_name, representation_name]): + raise ValueError("Must have at least one change provided to switch.") + + # Collect any of current asset, subset and representation if not provided + # so we can use the original name from those. + if any(not x for x in [asset_name, subset_name, representation_name]): + _id = io.ObjectId(container["representation"]) + representation = io.find_one({"type": "representation", "_id": _id}) + version, subset, asset, project = io.parenthood(representation) + + if asset_name is None: + asset_name = asset["name"] + + if subset_name is None: + subset_name = subset["name"] + + if representation_name is None: + representation_name = representation["name"] + + # Find the new one + asset = io.find_one({ + "name": asset_name, + "type": "asset" + }) + assert asset, ("Could not find asset in the database with the name " + "'%s'" % asset_name) + + subset = io.find_one({ + "name": subset_name, + "type": "subset", + "parent": asset["_id"] + }) + assert subset, ("Could not find subset in the database with the name " + "'%s'" % subset_name) + + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[('name', -1)] + ) + + assert version, "Could not find a version for {}.{}".format( + asset_name, subset_name + ) + + representation = io.find_one({ + "name": representation_name, + "type": "representation", + "parent": version["_id"]} + ) + + assert representation, ("Could not find representation in the database " + "with the name '%s'" % representation_name) + + avalon.api.switch(container, representation) + + return representation diff --git a/pype/hosts/fusion/scripts/fusion_switch_shot.py b/pype/hosts/fusion/scripts/fusion_switch_shot.py index a3f2116db8..ed657cb612 100644 --- a/pype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/pype/hosts/fusion/scripts/fusion_switch_shot.py @@ -234,7 +234,7 @@ def switch(asset_name, filepath=None, new=True): representations = [] for container in containers: try: - representation = pype.switch_item( + representation = fusion_lib.switch_item( container, asset_name=asset_name) representations.append(representation) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 05aee8cf7b..bcc0d352e6 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -6,6 +6,16 @@ from .deprecated import ( set_io_database ) +from .avalon_context import ( + is_latest, + any_outdated, + get_asset, + get_hierarchy, + get_linked_assets, + get_latest_version, + BuildWorkfile +) + from .hooks import PypeHook, execute_hook from .applications import ( @@ -26,23 +36,22 @@ from .path_tools import ( from .lib_old import ( _subprocess, - get_hierarchy, - is_latest, - any_outdated, - switch_item, - get_asset, - get_subsets, - get_linked_assets, - BuildWorkfile, - ffprobe_streams, - source_hash, - get_latest_version + source_hash ) +from .ffmpeg_utils import ffprobe_streams __all__ = [ "get_avalon_database", "set_io_database", + "is_latest", + "any_outdated", + "get_asset", + "get_hierarchy", + "get_linked_assets", + "get_latest_version", + "BuildWorkfile", + "PypeHook", "execute_hook", @@ -56,5 +65,7 @@ __all__ = [ "get_version_from_path", "get_last_version_from_path", "get_paths_from_environ", - "get_ffmpeg_tool_path" + "get_ffmpeg_tool_path", + + "ffprobe_streams" ] diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py new file mode 100644 index 0000000000..56abc4aee6 --- /dev/null +++ b/pype/lib/avalon_context.py @@ -0,0 +1,852 @@ +import os +import json +import re +import logging +import collections + +from avalon import io, pipeline +from ..api import config +import avalon.api + +log = logging.getLogger("AvalonContext") + + +def is_latest(representation): + """Return whether the representation is from latest version + + Args: + representation (dict): The representation document from the database. + + Returns: + bool: Whether the representation is of latest version. + + """ + + version = io.find_one({"_id": representation['parent']}) + if version["type"] == "master_version": + return True + + # Get highest version under the parent + highest_version = io.find_one({ + "type": "version", + "parent": version["parent"] + }, sort=[("name", -1)], projection={"name": True}) + + if version['name'] == highest_version['name']: + return True + else: + return False + + +def any_outdated(): + """Return whether the current scene has any outdated content""" + + checked = set() + host = avalon.api.registered_host() + for container in host.ls(): + representation = container['representation'] + if representation in checked: + continue + + representation_doc = io.find_one( + { + "_id": io.ObjectId(representation), + "type": "representation" + }, + projection={"parent": True} + ) + if representation_doc and not is_latest(representation_doc): + return True + elif not representation_doc: + log.debug("Container '{objectName}' has an invalid " + "representation, it is missing in the " + "database".format(**container)) + + checked.add(representation) + return False + + +def get_asset(asset_name=None): + """ Returning asset document from database """ + if not asset_name: + asset_name = avalon.api.Session["AVALON_ASSET"] + + asset_document = io.find_one({ + "name": asset_name, + "type": "asset" + }) + + if not asset_document: + raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) + + return asset_document + + +def get_hierarchy(asset_name=None): + """ + Obtain asset hierarchy path string from mongo db + + Returns: + string: asset hierarchy path + + """ + if not asset_name: + asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"]) + + asset_entity = io.find_one({ + "type": 'asset', + "name": asset_name + }) + + not_set = "PARENTS_NOT_SET" + entity_parents = asset_entity.get("data", {}).get("parents", not_set) + + # If entity already have parents then just return joined + if entity_parents != not_set: + return "/".join(entity_parents) + + # Else query parents through visualParents and store result to entity + hierarchy_items = [] + entity = asset_entity + while True: + parent_id = entity.get("data", {}).get("visualParent") + if not parent_id: + break + entity = io.find_one({"_id": parent_id}) + hierarchy_items.append(entity["name"]) + + # Add parents to entity data for next query + entity_data = asset_entity.get("data", {}) + entity_data["parents"] = hierarchy_items + io.update_many( + {"_id": asset_entity["_id"]}, + {"$set": {"data": entity_data}} + ) + + return "/".join(hierarchy_items) + + +def get_linked_assets(asset_entity): + """Return linked assets for `asset_entity`.""" + inputs = asset_entity["data"].get("inputs", []) + inputs = [io.find_one({"_id": x}) for x in inputs] + return inputs + + +def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): + """Retrieve latest version from `asset_name`, and `subset_name`. + + Do not use if you want to query more than 5 latest versions as this method + query 3 times to mongo for each call. For those cases is better to use + more efficient way, e.g. with help of aggregations. + + Args: + asset_name (str): Name of asset. + subset_name (str): Name of subset. + dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection + with Session. + project_name (str, optional): Find latest version in specific project. + + Returns: + None: If asset, subset or version were not found. + dict: Last version document for entered . + """ + + if not dbcon: + log.debug("Using `avalon.io` for query.") + dbcon = io + # Make sure is installed + io.install() + + if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): + # `avalon.io` has only `_database` attribute + # but `AvalonMongoDB` has `database` + database = getattr(dbcon, "database", dbcon._database) + collection = database[project_name] + else: + project_name = dbcon.Session.get("AVALON_PROJECT") + collection = dbcon + + log.debug(( + "Getting latest version for Project: \"{}\" Asset: \"{}\"" + " and Subset: \"{}\"" + ).format(project_name, asset_name, subset_name)) + + # Query asset document id by asset name + asset_doc = collection.find_one( + {"type": "asset", "name": asset_name}, + {"_id": True} + ) + if not asset_doc: + log.info( + "Asset \"{}\" was not found in Database.".format(asset_name) + ) + return None + + subset_doc = collection.find_one( + {"type": "subset", "name": subset_name, "parent": asset_doc["_id"]}, + {"_id": True} + ) + if not subset_doc: + log.info( + "Subset \"{}\" was not found in Database.".format(subset_name) + ) + return None + + version_doc = collection.find_one( + {"type": "version", "parent": subset_doc["_id"]}, + sort=[("name", -1)], + ) + if not version_doc: + log.info( + "Subset \"{}\" does not have any version yet.".format(subset_name) + ) + return None + return version_doc + + +class BuildWorkfile: + """Wrapper for build workfile process. + + Load representations for current context by build presets. Build presets + are host related, since each host has it's loaders. + """ + + log = logging.getLogger("BuildWorkfile") + + @staticmethod + def map_subsets_by_family(subsets): + subsets_by_family = collections.defaultdict(list) + for subset in subsets: + family = subset["data"].get("family") + if not family: + families = subset["data"].get("families") + if not families: + continue + family = families[0] + + subsets_by_family[family].append(subset) + return subsets_by_family + + def process(self): + """Main method of this wrapper. + + Building of workfile is triggered and is possible to implement + post processing of loaded containers if necessary. + """ + containers = self.build_workfile() + + return containers + + def build_workfile(self): + """Prepares and load containers into workfile. + + Loads latest versions of current and linked assets to workfile by logic + stored in Workfile profiles from presets. Profiles are set by host, + filtered by current task name and used by families. + + Each family can specify representation names and loaders for + representations and first available and successful loaded + representation is returned as container. + + At the end you'll get list of loaded containers per each asset. + + loaded_containers [{ + "asset_entity": , + "containers": [, , ...] + }, { + "asset_entity": , + "containers": [, ...] + }, { + ... + }] + """ + # Get current asset name and entity + current_asset_name = io.Session["AVALON_ASSET"] + current_asset_entity = io.find_one({ + "type": "asset", + "name": current_asset_name + }) + + # Skip if asset was not found + if not current_asset_entity: + print("Asset entity with name `{}` was not found".format( + current_asset_name + )) + return + + # Prepare available loaders + loaders_by_name = {} + for loader in avalon.api.discover(avalon.api.Loader): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {0}!".format(loader_name) + ) + loaders_by_name[loader_name] = loader + + # Skip if there are any loaders + if not loaders_by_name: + self.log.warning("There are no registered loaders.") + return + + # Get current task name + current_task_name = io.Session["AVALON_TASK"] + + # Load workfile presets for task + self.build_presets = self.get_build_presets(current_task_name) + + # Skip if there are any presets for task + if not self.build_presets: + self.log.warning( + "Current task `{}` does not have any loading preset.".format( + current_task_name + ) + ) + return + + # Get presets for loading current asset + current_context_profiles = self.build_presets.get("current_context") + # Get presets for loading linked assets + link_context_profiles = self.build_presets.get("linked_assets") + # Skip if both are missing + if not current_context_profiles and not link_context_profiles: + self.log.warning( + "Current task `{}` has empty loading preset.".format( + current_task_name + ) + ) + return + + elif not current_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any loading" + " preset for it's context." + ).format(current_task_name)) + + elif not link_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any" + "loading preset for it's linked assets." + ).format(current_task_name)) + + # Prepare assets to process by workfile presets + assets = [] + current_asset_id = None + if current_context_profiles: + # Add current asset entity if preset has current context set + assets.append(current_asset_entity) + current_asset_id = current_asset_entity["_id"] + + if link_context_profiles: + # Find and append linked assets if preset has set linked mapping + link_assets = get_linked_assets(current_asset_entity) + if link_assets: + assets.extend(link_assets) + + # Skip if there are no assets. This can happen if only linked mapping + # is set and there are no links for his asset. + if not assets: + self.log.warning( + "Asset does not have linked assets. Nothing to process." + ) + return + + # Prepare entities from database for assets + prepared_entities = self._collect_last_version_repres(assets) + + # Load containers by prepared entities and presets + loaded_containers = [] + # - Current asset containers + if current_asset_id and current_asset_id in prepared_entities: + current_context_data = prepared_entities.pop(current_asset_id) + loaded_data = self.load_containers_by_asset_data( + current_context_data, current_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # - Linked assets container + for linked_asset_data in prepared_entities.values(): + loaded_data = self.load_containers_by_asset_data( + linked_asset_data, link_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # Return list of loaded containers + return loaded_containers + + def get_build_presets(self, task_name): + """ Returns presets to build workfile for task name. + + Presets are loaded for current project set in + io.Session["AVALON_PROJECT"], filtered by registered host + and entered task name. + + :param task_name: Task name used for filtering build presets. + :type task_name: str + :return: preset per eneter task + :rtype: dict | None + """ + host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1] + presets = config.get_presets(io.Session["AVALON_PROJECT"]) + # Get presets for host + build_presets = ( + presets["plugins"] + .get(host_name, {}) + .get("workfile_build") + ) + if not build_presets: + return + + task_name_low = task_name.lower() + per_task_preset = None + for preset in build_presets: + preset_tasks = preset.get("tasks") or [] + preset_tasks_low = [task.lower() for task in preset_tasks] + if task_name_low in preset_tasks_low: + per_task_preset = preset + break + + return per_task_preset + + def _filter_build_profiles(self, build_profiles, loaders_by_name): + """ Filter build profiles by loaders and prepare process data. + + Valid profile must have "loaders", "families" and "repre_names" keys + with valid values. + - "loaders" expects list of strings representing possible loaders. + - "families" expects list of strings for filtering + by main subset family. + - "repre_names" expects list of strings for filtering by + representation name. + + Lowered "families" and "repre_names" are prepared for each profile with + all required keys. + + :param build_profiles: Profiles for building workfile. + :type build_profiles: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Filtered and prepared profiles. + :rtype: list + """ + valid_profiles = [] + for profile in build_profiles: + # Check loaders + profile_loaders = profile.get("loaders") + if not profile_loaders: + self.log.warning(( + "Build profile has missing loaders configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check if any loader is available + loaders_match = False + for loader_name in profile_loaders: + if loader_name in loaders_by_name: + loaders_match = True + break + + if not loaders_match: + self.log.warning(( + "All loaders from Build profile are not available: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check families + profile_families = profile.get("families") + if not profile_families: + self.log.warning(( + "Build profile is missing families configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check representation names + profile_repre_names = profile.get("repre_names") + if not profile_repre_names: + self.log.warning(( + "Build profile is missing" + " representation names filtering: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Prepare lowered families and representation names + profile["families_lowered"] = [ + fam.lower() for fam in profile_families + ] + profile["repre_names_lowered"] = [ + name.lower() for name in profile_repre_names + ] + + valid_profiles.append(profile) + + return valid_profiles + + def _prepare_profile_for_subsets(self, subsets, profiles): + """Select profile for each subset byt it's data. + + Profiles are filtered for each subset individually. + Profile is filtered by subset's family, optionally by name regex and + representation names set in profile. + It is possible to not find matching profile for subset, in that case + subset is skipped and it is possible that none of subsets have + matching profile. + + :param subsets: Subset documents. + :type subsets: list + :param profiles: Build profiles. + :type profiles: dict + :return: Profile by subset's id. + :rtype: dict + """ + # Prepare subsets + subsets_by_family = self.map_subsets_by_family(subsets) + + profiles_per_subset_id = {} + for family, subsets in subsets_by_family.items(): + family_low = family.lower() + for profile in profiles: + # Skip profile if does not contain family + if family_low not in profile["families_lowered"]: + continue + + # Precompile name filters as regexes + profile_regexes = profile.get("subset_name_filters") + if profile_regexes: + _profile_regexes = [] + for regex in profile_regexes: + _profile_regexes.append(re.compile(regex)) + profile_regexes = _profile_regexes + + # TODO prepare regex compilation + for subset in subsets: + # Verify regex filtering (optional) + if profile_regexes: + valid = False + for pattern in profile_regexes: + if re.match(pattern, subset["name"]): + valid = True + break + + if not valid: + continue + + profiles_per_subset_id[subset["_id"]] = profile + + # break profiles loop on finding the first matching profile + break + return profiles_per_subset_id + + def load_containers_by_asset_data( + self, asset_entity_data, build_profiles, loaders_by_name + ): + """Load containers for entered asset entity by Build profiles. + + :param asset_entity_data: Prepared data with subsets, last version + and representations for specific asset. + :type asset_entity_data: dict + :param build_profiles: Build profiles. + :type build_profiles: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Output contains asset document and loaded containers. + :rtype: dict + """ + + # Make sure all data are not empty + if not asset_entity_data or not build_profiles or not loaders_by_name: + return + + asset_entity = asset_entity_data["asset_entity"] + + valid_profiles = self._filter_build_profiles( + build_profiles, loaders_by_name + ) + if not valid_profiles: + self.log.warning( + "There are not valid Workfile profiles. Skipping process." + ) + return + + self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + + subsets_by_id = {} + version_by_subset_id = {} + repres_by_version_id = {} + for subset_id, in_data in asset_entity_data["subsets"].items(): + subset_entity = in_data["subset_entity"] + subsets_by_id[subset_entity["_id"]] = subset_entity + + version_data = in_data["version"] + version_entity = version_data["version_entity"] + version_by_subset_id[subset_id] = version_entity + repres_by_version_id[version_entity["_id"]] = ( + version_data["repres"] + ) + + if not subsets_by_id: + self.log.warning("There are not subsets for asset {0}".format( + asset_entity["name"] + )) + return + + profiles_per_subset_id = self._prepare_profile_for_subsets( + subsets_by_id.values(), valid_profiles + ) + if not profiles_per_subset_id: + self.log.warning("There are not valid subsets.") + return + + valid_repres_by_subset_id = collections.defaultdict(list) + for subset_id, profile in profiles_per_subset_id.items(): + profile_repre_names = profile["repre_names_lowered"] + + version_entity = version_by_subset_id[subset_id] + version_id = version_entity["_id"] + repres = repres_by_version_id[version_id] + for repre in repres: + repre_name_low = repre["name"].lower() + if repre_name_low in profile_repre_names: + valid_repres_by_subset_id[subset_id].append(repre) + + # DEBUG message + msg = "Valid representations for Asset: `{}`".format( + asset_entity["name"] + ) + for subset_id, repres in valid_repres_by_subset_id.items(): + subset = subsets_by_id[subset_id] + msg += "\n# Subset Name/ID: `{}`/{}".format( + subset["name"], subset_id + ) + for repre in repres: + msg += "\n## Repre name: `{}`".format(repre["name"]) + + self.log.debug(msg) + + containers = self._load_containers( + valid_repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ) + + return { + "asset_entity": asset_entity, + "containers": containers + } + + def _load_containers( + self, repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ): + """Real load by collected data happens here. + + Loading of representations per subset happens here. Each subset can + loads one representation. Loading is tried in specific order. + Representations are tried to load by names defined in configuration. + If subset has representation matching representation name each loader + is tried to load it until any is successful. If none of them was + successful then next reprensentation name is tried. + Subset process loop ends when any representation is loaded or + all matching representations were already tried. + + :param repres_by_subset_id: Available representations mapped + by their parent (subset) id. + :type repres_by_subset_id: dict + :param subsets_by_id: Subset documents mapped by their id. + :type subsets_by_id: dict + :param profiles_per_subset_id: Build profiles mapped by subset id. + :type profiles_per_subset_id: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Objects of loaded containers. + :rtype: list + """ + loaded_containers = [] + + # Get subset id order from build presets. + build_presets = self.build_presets.get("current_context", []) + build_presets += self.build_presets.get("linked_assets", []) + subset_ids_ordered = [] + for preset in build_presets: + for preset_family in preset["families"]: + for id, subset in subsets_by_id.items(): + if preset_family not in subset["data"].get("families", []): + continue + + subset_ids_ordered.append(id) + + # Order representations from subsets. + print("repres_by_subset_id", repres_by_subset_id) + representations_ordered = [] + representations = [] + for id in subset_ids_ordered: + for subset_id, repres in repres_by_subset_id.items(): + if repres in representations: + continue + + if id == subset_id: + representations_ordered.append((subset_id, repres)) + representations.append(repres) + + print("representations", representations) + + # Load ordered reprensentations. + for subset_id, repres in representations_ordered: + subset_name = subsets_by_id[subset_id]["name"] + + profile = profiles_per_subset_id[subset_id] + loaders_last_idx = len(profile["loaders"]) - 1 + repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 + + repre_by_low_name = { + repre["name"].lower(): repre for repre in repres + } + + is_loaded = False + for repre_name_idx, profile_repre_name in enumerate( + profile["repre_names_lowered"] + ): + # Break iteration if representation was already loaded + if is_loaded: + break + + repre = repre_by_low_name.get(profile_repre_name) + if not repre: + continue + + for loader_idx, loader_name in enumerate(profile["loaders"]): + if is_loaded: + break + + loader = loaders_by_name.get(loader_name) + if not loader: + continue + try: + container = avalon.api.load( + loader, + repre["_id"], + name=subset_name + ) + loaded_containers.append(container) + is_loaded = True + + except Exception as exc: + if exc == pipeline.IncompatibleLoaderError: + self.log.info(( + "Loader `{}` is not compatible with" + " representation `{}`" + ).format(loader_name, repre["name"])) + + else: + self.log.error( + "Unexpected error happened during loading", + exc_info=True + ) + + msg = "Loading failed." + if loader_idx < loaders_last_idx: + msg += " Trying next loader." + elif repre_name_idx < repre_names_last_idx: + msg += ( + " Loading of subset `{}` was not successful." + ).format(subset_name) + else: + msg += " Trying next representation." + self.log.info(msg) + + return loaded_containers + + def _collect_last_version_repres(self, asset_entities): + """Collect subsets, versions and representations for asset_entities. + + :param asset_entities: Asset entities for which want to find data + :type asset_entities: list + :return: collected entities + :rtype: dict + + Example output: + ``` + { + {Asset ID}: { + "asset_entity": , + "subsets": { + {Subset ID}: { + "subset_entity": , + "version": { + "version_entity": , + "repres": [ + , , ... + ] + } + }, + ... + } + }, + ... + } + output[asset_id]["subsets"][subset_id]["version"]["repres"] + ``` + """ + + if not asset_entities: + return {} + + asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} + + subsets = list(io.find({ + "type": "subset", + "parent": {"$in": asset_entity_by_ids.keys()} + })) + subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} + + sorted_versions = list(io.find({ + "type": "version", + "parent": {"$in": subset_entity_by_ids.keys()} + }).sort("name", -1)) + + subset_id_with_latest_version = [] + last_versions_by_id = {} + for version in sorted_versions: + subset_id = version["parent"] + if subset_id in subset_id_with_latest_version: + continue + subset_id_with_latest_version.append(subset_id) + last_versions_by_id[version["_id"]] = version + + repres = io.find({ + "type": "representation", + "parent": {"$in": last_versions_by_id.keys()} + }) + + output = {} + for repre in repres: + version_id = repre["parent"] + version = last_versions_by_id[version_id] + + subset_id = version["parent"] + subset = subset_entity_by_ids[subset_id] + + asset_id = subset["parent"] + asset = asset_entity_by_ids[asset_id] + + if asset_id not in output: + output[asset_id] = { + "asset_entity": asset, + "subsets": {} + } + + if subset_id not in output[asset_id]["subsets"]: + output[asset_id]["subsets"][subset_id] = { + "subset_entity": subset, + "version": { + "version_entity": version, + "repres": [] + } + } + + output[asset_id]["subsets"][subset_id]["version"]["repres"].append( + repre + ) + + return output diff --git a/pype/lib/ffmpeg_utils.py b/pype/lib/ffmpeg_utils.py new file mode 100644 index 0000000000..1c656d55d3 --- /dev/null +++ b/pype/lib/ffmpeg_utils.py @@ -0,0 +1,40 @@ +import logging +import json +import subprocess + +from . import get_ffmpeg_tool_path + +log = logging.getLogger("FFmpeg utils") + + +def ffprobe_streams(path_to_file, logger=None): + """Load streams from entered filepath via ffprobe.""" + if not logger: + logger = log + logger.info( + "Getting information about input \"{}\".".format(path_to_file) + ) + args = [ + "\"{}\"".format(get_ffmpeg_tool_path("ffprobe")), + "-v quiet", + "-print_format json", + "-show_format", + "-show_streams", + "\"{}\"".format(path_to_file) + ] + command = " ".join(args) + logger.debug("FFprobe command: \"{}\"".format(command)) + popen = subprocess.Popen( + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + popen_stdout, popen_stderr = popen.communicate() + if popen_stdout: + logger.debug("ffprobe stdout: {}".format(popen_stdout)) + + if popen_stderr: + logger.debug("ffprobe stderr: {}".format(popen_stderr)) + return json.loads(popen_stdout)["streams"] diff --git a/pype/lib/lib_old.py b/pype/lib/lib_old.py index eafd34264c..be4211d067 100644 --- a/pype/lib/lib_old.py +++ b/pype/lib/lib_old.py @@ -1,15 +1,9 @@ import os -import re -import json -import collections import logging -import itertools import contextlib import subprocess -from avalon import io, pipeline import avalon.api -from ..api import config, Anatomy, Logger log = logging.getLogger(__name__) @@ -79,923 +73,6 @@ def _subprocess(*args, **kwargs): return full_output -# Avalon databse functions - - -def get_hierarchy(asset_name=None): - """Obtain asset hierarchy path string from mongo db. - - Returns: - string: asset hierarchy path - - """ - if not asset_name: - asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"]) - - asset_entity = io.find_one({ - "type": 'asset', - "name": asset_name - }) - - not_set = "PARENTS_NOT_SET" - entity_parents = asset_entity.get("data", {}).get("parents", not_set) - - # If entity already have parents then just return joined - if entity_parents != not_set: - return "/".join(entity_parents) - - # Else query parents through visualParents and store result to entity - hierarchy_items = [] - entity = asset_entity - while True: - parent_id = entity.get("data", {}).get("visualParent") - if not parent_id: - break - entity = io.find_one({"_id": parent_id}) - hierarchy_items.append(entity["name"]) - - # Add parents to entity data for next query - entity_data = asset_entity.get("data", {}) - entity_data["parents"] = hierarchy_items - io.update_many( - {"_id": asset_entity["_id"]}, - {"$set": {"data": entity_data}} - ) - - return "/".join(hierarchy_items) - - -def is_latest(representation): - """Return whether the representation is from latest version. - - Args: - representation (dict): The representation document from the database. - - Returns: - bool: Whether the representation is of latest version. - - """ - version = io.find_one({"_id": representation['parent']}) - if version["type"] == "master_version": - return True - - # Get highest version under the parent - highest_version = io.find_one({ - "type": "version", - "parent": version["parent"] - }, sort=[("name", -1)], projection={"name": True}) - - if version['name'] == highest_version['name']: - return True - else: - return False - - -def any_outdated(): - """Return whether the current scene has any outdated content.""" - checked = set() - host = avalon.api.registered_host() - for container in host.ls(): - representation = container['representation'] - if representation in checked: - continue - - representation_doc = io.find_one( - { - "_id": io.ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not is_latest(representation_doc): - return True - - log.debug("Container '{objectName}' has an invalid " - "representation, it is missing in the " - "database".format(**container)) - - checked.add(representation) - return False - - -def switch_item(container, - asset_name=None, - subset_name=None, - representation_name=None): - """Switch container asset, subset or representation of a container by name. - - It'll always switch to the latest version - of course a different - approach could be implemented. - - Args: - container (dict): data of the item to switch with - asset_name (str): name of the asset - subset_name (str): name of the subset - representation_name (str): name of the representation - - Returns: - dict - - """ - - if all(not x for x in [asset_name, subset_name, representation_name]): - raise ValueError("Must have at least one change provided to switch.") - - # Collect any of current asset, subset and representation if not provided - # so we can use the original name from those. - if any(not x for x in [asset_name, subset_name, representation_name]): - _id = io.ObjectId(container["representation"]) - representation = io.find_one({"type": "representation", "_id": _id}) - version, subset, asset, project = io.parenthood(representation) - - if asset_name is None: - asset_name = asset["name"] - - if subset_name is None: - subset_name = subset["name"] - - if representation_name is None: - representation_name = representation["name"] - - # Find the new one - asset = io.find_one({ - "name": asset_name, - "type": "asset" - }) - assert asset, ("Could not find asset in the database with the name " - "'%s'" % asset_name) - - subset = io.find_one({ - "name": subset_name, - "type": "subset", - "parent": asset["_id"] - }) - assert subset, ("Could not find subset in the database with the name " - "'%s'" % subset_name) - - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[('name', -1)] - ) - - assert version, "Could not find a version for {}.{}".format( - asset_name, subset_name - ) - - representation = io.find_one({ - "name": representation_name, - "type": "representation", - "parent": version["_id"]} - ) - - assert representation, ("Could not find representation in the database " - "with the name '%s'" % representation_name) - - avalon.api.switch(container, representation) - - return representation - - -def get_asset(asset_name=None): - """ Returning asset document from database """ - if not asset_name: - asset_name = avalon.api.Session["AVALON_ASSET"] - - asset_document = io.find_one({ - "name": asset_name, - "type": "asset" - }) - - if not asset_document: - raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) - - return asset_document - - -def get_subsets(asset_name, - regex_filter=None, - version=None, - representations=["exr", "dpx"]): - """ - Query subsets with filter on name. - - The method will return all found subsets and its defined version - and subsets. Version could be specified with number. Representation - can be filtered. - - Arguments: - asset_name (str): asset (shot) name - regex_filter (raw): raw string with filter pattern - version (str or int): `last` or number of version - representations (list): list for all representations - - Returns: - dict: subsets with version and representaions in keys - """ - - # query asset from db - asset_io = io.find_one({"type": "asset", "name": asset_name}) - - # check if anything returned - assert asset_io, ( - "Asset not existing. Check correct name: `{}`").format(asset_name) - - # create subsets query filter - filter_query = {"type": "subset", "parent": asset_io["_id"]} - - # add reggex filter string into query filter - if regex_filter: - filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}}) - else: - filter_query.update({"name": {"$regex": r'.*'}}) - - # query all assets - subsets = [s for s in io.find(filter_query)] - - assert subsets, ("No subsets found. Check correct filter. " - "Try this for start `r'.*'`: " - "asset: `{}`").format(asset_name) - - output_dict = {} - # Process subsets - for subset in subsets: - if not version: - version_sel = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - else: - assert isinstance(version, int), "version needs to be `int` type" - version_sel = io.find_one({ - "type": "version", - "parent": subset["_id"], - "name": int(version) - }) - - find_dict = {"type": "representation", - "parent": version_sel["_id"]} - - filter_repr = {"name": {"$in": representations}} - - find_dict.update(filter_repr) - repres_out = [i for i in io.find(find_dict)] - - if len(repres_out) > 0: - output_dict[subset["name"]] = {"version": version_sel, - "representations": repres_out} - - return output_dict - - -def get_linked_assets(asset_entity): - """Return linked assets for `asset_entity`.""" - inputs = asset_entity["data"].get("inputs", []) - inputs = [io.find_one({"_id": x}) for x in inputs] - return inputs - - -class BuildWorkfile: - """Wrapper for build workfile process. - - Load representations for current context by build presets. Build presets - are host related, since each host has it's loaders. - """ - - @staticmethod - def map_subsets_by_family(subsets): - subsets_by_family = collections.defaultdict(list) - for subset in subsets: - family = subset["data"].get("family") - if not family: - families = subset["data"].get("families") - if not families: - continue - family = families[0] - - subsets_by_family[family].append(subset) - return subsets_by_family - - def process(self): - """Main method of this wrapper. - - Building of workfile is triggered and is possible to implement - post processing of loaded containers if necessary. - """ - containers = self.build_workfile() - - return containers - - def build_workfile(self): - """Prepares and load containers into workfile. - - Loads latest versions of current and linked assets to workfile by logic - stored in Workfile profiles from presets. Profiles are set by host, - filtered by current task name and used by families. - - Each family can specify representation names and loaders for - representations and first available and successful loaded - representation is returned as container. - - At the end you'll get list of loaded containers per each asset. - - loaded_containers [{ - "asset_entity": , - "containers": [, , ...] - }, { - "asset_entity": , - "containers": [, ...] - }, { - ... - }] - """ - # Get current asset name and entity - current_asset_name = io.Session["AVALON_ASSET"] - current_asset_entity = io.find_one({ - "type": "asset", - "name": current_asset_name - }) - - # Skip if asset was not found - if not current_asset_entity: - print("Asset entity with name `{}` was not found".format( - current_asset_name - )) - return - - # Prepare available loaders - loaders_by_name = {} - for loader in avalon.api.discover(avalon.api.Loader): - loader_name = loader.__name__ - if loader_name in loaders_by_name: - raise KeyError( - "Duplicated loader name {0}!".format(loader_name) - ) - loaders_by_name[loader_name] = loader - - # Skip if there are any loaders - if not loaders_by_name: - log.warning("There are no registered loaders.") - return - - # Get current task name - current_task_name = io.Session["AVALON_TASK"] - - # Load workfile presets for task - build_presets = self.get_build_presets(current_task_name) - - # Skip if there are any presets for task - if not build_presets: - log.warning( - "Current task `{}` does not have any loading preset.".format( - current_task_name - ) - ) - return - - # Get presets for loading current asset - current_context_profiles = build_presets.get("current_context") - # Get presets for loading linked assets - link_context_profiles = build_presets.get("linked_assets") - # Skip if both are missing - if not current_context_profiles and not link_context_profiles: - log.warning("Current task `{}` has empty loading preset.".format( - current_task_name - )) - return - - elif not current_context_profiles: - log.warning(( - "Current task `{}` doesn't have any loading" - " preset for it's context." - ).format(current_task_name)) - - elif not link_context_profiles: - log.warning(( - "Current task `{}` doesn't have any" - "loading preset for it's linked assets." - ).format(current_task_name)) - - # Prepare assets to process by workfile presets - assets = [] - current_asset_id = None - if current_context_profiles: - # Add current asset entity if preset has current context set - assets.append(current_asset_entity) - current_asset_id = current_asset_entity["_id"] - - if link_context_profiles: - # Find and append linked assets if preset has set linked mapping - link_assets = get_linked_assets(current_asset_entity) - if link_assets: - assets.extend(link_assets) - - # Skip if there are no assets. This can happen if only linked mapping - # is set and there are no links for his asset. - if not assets: - log.warning( - "Asset does not have linked assets. Nothing to process." - ) - return - - # Prepare entities from database for assets - prepared_entities = self._collect_last_version_repres(assets) - - # Load containers by prepared entities and presets - loaded_containers = [] - # - Current asset containers - if current_asset_id and current_asset_id in prepared_entities: - current_context_data = prepared_entities.pop(current_asset_id) - loaded_data = self.load_containers_by_asset_data( - current_context_data, current_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # - Linked assets container - for linked_asset_data in prepared_entities.values(): - loaded_data = self.load_containers_by_asset_data( - linked_asset_data, link_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # Return list of loaded containers - return loaded_containers - - def get_build_presets(self, task_name): - """ Returns presets to build workfile for task name. - - Presets are loaded for current project set in - io.Session["AVALON_PROJECT"], filtered by registered host - and entered task name. - - :param task_name: Task name used for filtering build presets. - :type task_name: str - :return: preset per eneter task - :rtype: dict | None - """ - host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1] - presets = config.get_presets(io.Session["AVALON_PROJECT"]) - # Get presets for host - build_presets = ( - presets["plugins"] - .get(host_name, {}) - .get("workfile_build") - ) - if not build_presets: - return - - task_name_low = task_name.lower() - per_task_preset = None - for preset in build_presets: - preset_tasks = preset.get("tasks") or [] - preset_tasks_low = [task.lower() for task in preset_tasks] - if task_name_low in preset_tasks_low: - per_task_preset = preset - break - - return per_task_preset - - def _filter_build_profiles(self, build_profiles, loaders_by_name): - """ Filter build profiles by loaders and prepare process data. - - Valid profile must have "loaders", "families" and "repre_names" keys - with valid values. - - "loaders" expects list of strings representing possible loaders. - - "families" expects list of strings for filtering - by main subset family. - - "repre_names" expects list of strings for filtering by - representation name. - - Lowered "families" and "repre_names" are prepared for each profile with - all required keys. - - :param build_profiles: Profiles for building workfile. - :type build_profiles: dict - :param loaders_by_name: Available loaders per name. - :type loaders_by_name: dict - :return: Filtered and prepared profiles. - :rtype: list - """ - valid_profiles = [] - for profile in build_profiles: - # Check loaders - profile_loaders = profile.get("loaders") - if not profile_loaders: - log.warning(( - "Build profile has missing loaders configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check if any loader is available - loaders_match = False - for loader_name in profile_loaders: - if loader_name in loaders_by_name: - loaders_match = True - break - - if not loaders_match: - log.warning(( - "All loaders from Build profile are not available: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check families - profile_families = profile.get("families") - if not profile_families: - log.warning(( - "Build profile is missing families configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check representation names - profile_repre_names = profile.get("repre_names") - if not profile_repre_names: - log.warning(( - "Build profile is missing" - " representation names filtering: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Prepare lowered families and representation names - profile["families_lowered"] = [ - fam.lower() for fam in profile_families - ] - profile["repre_names_lowered"] = [ - name.lower() for name in profile_repre_names - ] - - valid_profiles.append(profile) - - return valid_profiles - - def _prepare_profile_for_subsets(self, subsets, profiles): - """Select profile for each subset byt it's data. - - Profiles are filtered for each subset individually. - Profile is filtered by subset's family, optionally by name regex and - representation names set in profile. - It is possible to not find matching profile for subset, in that case - subset is skipped and it is possible that none of subsets have - matching profile. - - :param subsets: Subset documents. - :type subsets: list - :param profiles: Build profiles. - :type profiles: dict - :return: Profile by subset's id. - :rtype: dict - """ - # Prepare subsets - subsets_by_family = self.map_subsets_by_family(subsets) - - profiles_per_subset_id = {} - for family, subsets in subsets_by_family.items(): - family_low = family.lower() - for profile in profiles: - # Skip profile if does not contain family - if family_low not in profile["families_lowered"]: - continue - - # Precompile name filters as regexes - profile_regexes = profile.get("subset_name_filters") - if profile_regexes: - _profile_regexes = [] - for regex in profile_regexes: - _profile_regexes.append(re.compile(regex)) - profile_regexes = _profile_regexes - - # TODO prepare regex compilation - for subset in subsets: - # Verify regex filtering (optional) - if profile_regexes: - valid = False - for pattern in profile_regexes: - if re.match(pattern, subset["name"]): - valid = True - break - - if not valid: - continue - - profiles_per_subset_id[subset["_id"]] = profile - - # break profiles loop on finding the first matching profile - break - return profiles_per_subset_id - - def load_containers_by_asset_data( - self, asset_entity_data, build_profiles, loaders_by_name - ): - """Load containers for entered asset entity by Build profiles. - - :param asset_entity_data: Prepared data with subsets, last version - and representations for specific asset. - :type asset_entity_data: dict - :param build_profiles: Build profiles. - :type build_profiles: dict - :param loaders_by_name: Available loaders per name. - :type loaders_by_name: dict - :return: Output contains asset document and loaded containers. - :rtype: dict - """ - - # Make sure all data are not empty - if not asset_entity_data or not build_profiles or not loaders_by_name: - return - - asset_entity = asset_entity_data["asset_entity"] - - valid_profiles = self._filter_build_profiles( - build_profiles, loaders_by_name - ) - if not valid_profiles: - log.warning( - "There are not valid Workfile profiles. Skipping process." - ) - return - - log.debug("Valid Workfile profiles: {}".format(valid_profiles)) - - subsets_by_id = {} - version_by_subset_id = {} - repres_by_version_id = {} - for subset_id, in_data in asset_entity_data["subsets"].items(): - subset_entity = in_data["subset_entity"] - subsets_by_id[subset_entity["_id"]] = subset_entity - - version_data = in_data["version"] - version_entity = version_data["version_entity"] - version_by_subset_id[subset_id] = version_entity - repres_by_version_id[version_entity["_id"]] = ( - version_data["repres"] - ) - - if not subsets_by_id: - log.warning("There are not subsets for asset {0}".format( - asset_entity["name"] - )) - return - - profiles_per_subset_id = self._prepare_profile_for_subsets( - subsets_by_id.values(), valid_profiles - ) - if not profiles_per_subset_id: - log.warning("There are not valid subsets.") - return - - valid_repres_by_subset_id = collections.defaultdict(list) - for subset_id, profile in profiles_per_subset_id.items(): - profile_repre_names = profile["repre_names_lowered"] - - version_entity = version_by_subset_id[subset_id] - version_id = version_entity["_id"] - repres = repres_by_version_id[version_id] - for repre in repres: - repre_name_low = repre["name"].lower() - if repre_name_low in profile_repre_names: - valid_repres_by_subset_id[subset_id].append(repre) - - # DEBUG message - msg = "Valid representations for Asset: `{}`".format( - asset_entity["name"] - ) - for subset_id, repres in valid_repres_by_subset_id.items(): - subset = subsets_by_id[subset_id] - msg += "\n# Subset Name/ID: `{}`/{}".format( - subset["name"], subset_id - ) - for repre in repres: - msg += "\n## Repre name: `{}`".format(repre["name"]) - - log.debug(msg) - - containers = self._load_containers( - valid_repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ) - - return { - "asset_entity": asset_entity, - "containers": containers - } - - def _load_containers( - self, repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ): - """Real load by collected data happens here. - - Loading of representations per subset happens here. Each subset can - loads one representation. Loading is tried in specific order. - Representations are tried to load by names defined in configuration. - If subset has representation matching representation name each loader - is tried to load it until any is successful. If none of them was - successful then next reprensentation name is tried. - Subset process loop ends when any representation is loaded or - all matching representations were already tried. - - :param repres_by_subset_id: Available representations mapped - by their parent (subset) id. - :type repres_by_subset_id: dict - :param subsets_by_id: Subset documents mapped by their id. - :type subsets_by_id: dict - :param profiles_per_subset_id: Build profiles mapped by subset id. - :type profiles_per_subset_id: dict - :param loaders_by_name: Available loaders per name. - :type loaders_by_name: dict - :return: Objects of loaded containers. - :rtype: list - """ - loaded_containers = [] - for subset_id, repres in repres_by_subset_id.items(): - subset_name = subsets_by_id[subset_id]["name"] - - profile = profiles_per_subset_id[subset_id] - loaders_last_idx = len(profile["loaders"]) - 1 - repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 - - repre_by_low_name = { - repre["name"].lower(): repre for repre in repres - } - - is_loaded = False - for repre_name_idx, profile_repre_name in enumerate( - profile["repre_names_lowered"] - ): - # Break iteration if representation was already loaded - if is_loaded: - break - - repre = repre_by_low_name.get(profile_repre_name) - if not repre: - continue - - for loader_idx, loader_name in enumerate(profile["loaders"]): - if is_loaded: - break - - loader = loaders_by_name.get(loader_name) - if not loader: - continue - try: - container = avalon.api.load( - loader, - repre["_id"], - name=subset_name - ) - loaded_containers.append(container) - is_loaded = True - - except Exception as exc: - if exc == pipeline.IncompatibleLoaderError: - log.info(( - "Loader `{}` is not compatible with" - " representation `{}`" - ).format(loader_name, repre["name"])) - - else: - log.error( - "Unexpected error happened during loading", - exc_info=True - ) - - msg = "Loading failed." - if loader_idx < loaders_last_idx: - msg += " Trying next loader." - elif repre_name_idx < repre_names_last_idx: - msg += ( - " Loading of subset `{}` was not successful." - ).format(subset_name) - else: - msg += " Trying next representation." - log.info(msg) - - return loaded_containers - - def _collect_last_version_repres(self, asset_entities): - """Collect subsets, versions and representations for asset_entities. - - :param asset_entities: Asset entities for which want to find data - :type asset_entities: list - :return: collected entities - :rtype: dict - - Example output: - ``` - { - {Asset ID}: { - "asset_entity": , - "subsets": { - {Subset ID}: { - "subset_entity": , - "version": { - "version_entity": , - "repres": [ - , , ... - ] - } - }, - ... - } - }, - ... - } - output[asset_id]["subsets"][subset_id]["version"]["repres"] - ``` - """ - - if not asset_entities: - return {} - - asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} - - subsets = list(io.find({ - "type": "subset", - "parent": {"$in": asset_entity_by_ids.keys()} - })) - subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - - sorted_versions = list(io.find({ - "type": "version", - "parent": {"$in": subset_entity_by_ids.keys()} - }).sort("name", -1)) - - subset_id_with_latest_version = [] - last_versions_by_id = {} - for version in sorted_versions: - subset_id = version["parent"] - if subset_id in subset_id_with_latest_version: - continue - subset_id_with_latest_version.append(subset_id) - last_versions_by_id[version["_id"]] = version - - repres = io.find({ - "type": "representation", - "parent": {"$in": last_versions_by_id.keys()} - }) - - output = {} - for repre in repres: - version_id = repre["parent"] - version = last_versions_by_id[version_id] - - subset_id = version["parent"] - subset = subset_entity_by_ids[subset_id] - - asset_id = subset["parent"] - asset = asset_entity_by_ids[asset_id] - - if asset_id not in output: - output[asset_id] = { - "asset_entity": asset, - "subsets": {} - } - - if subset_id not in output[asset_id]["subsets"]: - output[asset_id]["subsets"][subset_id] = { - "subset_entity": subset, - "version": { - "version_entity": version, - "repres": [] - } - } - - output[asset_id]["subsets"][subset_id]["version"]["repres"].append( - repre - ) - - return output - - -def ffprobe_streams(path_to_file): - """Load streams from entered filepath via ffprobe.""" - log.info( - "Getting information about input \"{}\".".format(path_to_file) - ) - args = [ - get_ffmpeg_tool_path("ffprobe"), - "-v quiet", - "-print_format json", - "-show_format", - "-show_streams", - "\"{}\"".format(path_to_file) - ] - command = " ".join(args) - log.debug("FFprobe command: \"{}\"".format(command)) - popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) - - popen_output = popen.communicate()[0] - log.debug("FFprobe output: {}".format(popen_output)) - return json.loads(popen_output)["streams"] - - def source_hash(filepath, *args): """Generate simple identifier for a source file. This is used to identify whether a source file has previously been @@ -1015,75 +92,3 @@ def source_hash(filepath, *args): time = str(os.path.getmtime(filepath)) size = str(os.path.getsize(filepath)) return "|".join([file_name, time, size] + list(args)).replace(".", ",") - - -def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): - """Retrieve latest version from `asset_name`, and `subset_name`. - - Do not use if you want to query more than 5 latest versions as this method - query 3 times to mongo for each call. For those cases is better to use - more efficient way, e.g. with help of aggregations. - - Args: - asset_name (str): Name of asset. - subset_name (str): Name of subset. - dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection - with Session. - project_name (str, optional): Find latest version in specific project. - - Returns: - None: If asset, subset or version were not found. - dict: Last version document for entered . - """ - - if not dbcon: - log.debug("Using `avalon.io` for query.") - dbcon = io - # Make sure is installed - io.install() - - if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): - # `avalon.io` has only `_database` attribute - # but `AvalonMongoDB` has `database` - database = getattr(dbcon, "database", dbcon._database) - collection = database[project_name] - else: - project_name = dbcon.Session.get("AVALON_PROJECT") - collection = dbcon - - log.debug(( - "Getting latest version for Project: \"{}\" Asset: \"{}\"" - " and Subset: \"{}\"" - ).format(project_name, asset_name, subset_name)) - - # Query asset document id by asset name - asset_doc = collection.find_one( - {"type": "asset", "name": asset_name}, - {"_id": True} - ) - if not asset_doc: - log.info( - "Asset \"{}\" was not found in Database.".format(asset_name) - ) - return None - - subset_doc = collection.find_one( - {"type": "subset", "name": subset_name, "parent": asset_doc["_id"]}, - {"_id": True} - ) - if not subset_doc: - log.info( - "Subset \"{}\" was not found in Database.".format(subset_name) - ) - return None - - version_doc = collection.find_one( - {"type": "version", "parent": subset_doc["_id"]}, - sort=[("name", -1)], - ) - if not version_doc: - log.info( - "Subset \"{}\" does not have any version yet.".format(subset_name) - ) - return None - return version_doc diff --git a/pype/plugins/celaction/publish/collect_audio.py b/pype/plugins/celaction/publish/collect_audio.py index c29e212d80..6e5d698c59 100644 --- a/pype/plugins/celaction/publish/collect_audio.py +++ b/pype/plugins/celaction/publish/collect_audio.py @@ -1,7 +1,8 @@ -import pyblish.api import os -import pype.api as pype +import pyblish.api +from avalon import io + from pprint import pformat @@ -15,7 +16,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): asset_entity = context.data["assetEntity"] # get all available representations - subsets = pype.get_subsets(asset_entity["name"], + subsets = self.get_subsets(asset_entity["name"], representations=["audio", "wav"] ) self.log.info(f"subsets is: {pformat(subsets)}") @@ -39,3 +40,83 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): 'audio_file: {}, has been added to context'.format(audio_file)) else: self.log.warning("Couldn't find any audio file on Ftrack.") + + def get_subsets( + self, + asset_name, + representations, + regex_filter=None, + version=None + ): + """ + Query subsets with filter on name. + + The method will return all found subsets and its defined version + and subsets. Version could be specified with number. Representation + can be filtered. + + Arguments: + asset_name (str): asset (shot) name + regex_filter (raw): raw string with filter pattern + version (str or int): `last` or number of version + representations (list): list for all representations + + Returns: + dict: subsets with version and representaions in keys + """ + + # query asset from db + asset_io = io.find_one({"type": "asset", "name": asset_name}) + + # check if anything returned + assert asset_io, ( + "Asset not existing. Check correct name: `{}`").format(asset_name) + + # create subsets query filter + filter_query = {"type": "subset", "parent": asset_io["_id"]} + + # add reggex filter string into query filter + if regex_filter: + filter_query["name"] = {"$regex": r"{}".format(regex_filter)} + + # query all assets + subsets = list(io.find(filter_query)) + + assert subsets, ("No subsets found. Check correct filter. " + "Try this for start `r'.*'`: " + "asset: `{}`").format(asset_name) + + output_dict = {} + # Process subsets + for subset in subsets: + if not version: + version_sel = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) + else: + assert isinstance(version, int), ( + "version needs to be `int` type" + ) + version_sel = io.find_one({ + "type": "version", + "parent": subset["_id"], + "name": int(version) + }) + + find_dict = {"type": "representation", + "parent": version_sel["_id"]} + + filter_repr = {"name": {"$in": representations}} + + find_dict.update(filter_repr) + repres_out = [i for i in io.find(find_dict)] + + if len(repres_out) > 0: + output_dict[subset["name"]] = {"version": version_sel, + "representations": repres_out} + + return output_dict diff --git a/pype/scripts/fusion_switch_shot.py b/pype/scripts/fusion_switch_shot.py index f936b7d8e0..5791220acd 100644 --- a/pype/scripts/fusion_switch_shot.py +++ b/pype/scripts/fusion_switch_shot.py @@ -191,7 +191,7 @@ def switch(asset_name, filepath=None, new=True): representations = [] for container in containers: try: - representation = pype.switch_item(container, + representation = fusion_lib.switch_item(container, asset_name=asset_name) representations.append(representation) except Exception as e: diff --git a/pype/tests/test_lib_restructuralization.py b/pype/tests/test_lib_restructuralization.py index d8ef4f2f1e..5980f934c9 100644 --- a/pype/tests/test_lib_restructuralization.py +++ b/pype/tests/test_lib_restructuralization.py @@ -22,5 +22,15 @@ def test_backward_compatibility(printer): from pype.lib import get_version_from_path from pype.lib import version_up + from pype.lib import is_latest + from pype.lib import any_outdated + from pype.lib import get_asset + from pype.lib import get_hierarchy + from pype.lib import get_linked_assets + from pype.lib import get_latest_version + from pype.lib import ffprobe_streams + + from pype.hosts.fusion.lib import switch_item + except ImportError as e: raise