From da8f010c9062b12547e9c0191e3d0453a156cef5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 10:10:26 +0100 Subject: [PATCH 01/12] updated lib_old with newer changes --- pype/lib/lib_old.py | 70 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 55 insertions(+), 15 deletions(-) diff --git a/pype/lib/lib_old.py b/pype/lib/lib_old.py index b384c3a06a..114996cd90 100644 --- a/pype/lib/lib_old.py +++ b/pype/lib/lib_old.py @@ -9,7 +9,7 @@ import subprocess from avalon import io, pipeline import avalon.api -from ..api import config, Anatomy, Logger +from ..api import config log = logging.getLogger(__name__) @@ -695,10 +695,10 @@ class BuildWorkfile: current_task_name = io.Session["AVALON_TASK"] # Load workfile presets for task - build_presets = self.get_build_presets(current_task_name) + self.build_presets = self.get_build_presets(current_task_name) # Skip if there are any presets for task - if not build_presets: + if not self.build_presets: log.warning( "Current task `{}` does not have any loading preset.".format( current_task_name @@ -707,9 +707,9 @@ class BuildWorkfile: return # Get presets for loading current asset - current_context_profiles = build_presets.get("current_context") + current_context_profiles = self.build_presets.get("current_context") # Get presets for loading linked assets - link_context_profiles = build_presets.get("linked_assets") + link_context_profiles = self.build_presets.get("linked_assets") # Skip if both are missing if not current_context_profiles and not link_context_profiles: log.warning("Current task `{}` has empty loading preset.".format( @@ -901,7 +901,7 @@ class BuildWorkfile: :rtype: dict """ # Prepare subsets - subsets_by_family = self.map_subsets_by_family(subsets) + subsets_by_family = map_subsets_by_family(subsets) profiles_per_subset_id = {} for family, subsets in subsets_by_family.items(): @@ -1062,7 +1062,36 @@ class BuildWorkfile: :rtype: list """ loaded_containers = [] - for subset_id, repres in repres_by_subset_id.items(): + + # Get subset id order from build presets. + build_presets = self.build_presets.get("current_context", []) + build_presets += self.build_presets.get("linked_assets", []) + subset_ids_ordered = [] + for preset in build_presets: + for preset_family in preset["families"]: + for id, subset in subsets_by_id.items(): + if preset_family not in subset["data"].get("families", []): + continue + + subset_ids_ordered.append(id) + + # Order representations from subsets. + print("repres_by_subset_id", repres_by_subset_id) + representations_ordered = [] + representations = [] + for id in subset_ids_ordered: + for subset_id, repres in repres_by_subset_id.items(): + if repres in representations: + continue + + if id == subset_id: + representations_ordered.append((subset_id, repres)) + representations.append(repres) + + print("representations", representations) + + # Load ordered reprensentations. + for subset_id, repres in representations_ordered: subset_name = subsets_by_id[subset_id]["name"] profile = profiles_per_subset_id[subset_id] @@ -1222,13 +1251,15 @@ class BuildWorkfile: return output -def ffprobe_streams(path_to_file): +def ffprobe_streams(path_to_file, logger=None): """Load streams from entered filepath via ffprobe.""" - log.info( + if not logger: + logger = log + logger.info( "Getting information about input \"{}\".".format(path_to_file) ) args = [ - get_ffmpeg_tool_path("ffprobe"), + "\"{}\"".format(get_ffmpeg_tool_path("ffprobe")), "-v quiet", "-print_format json", "-show_format", @@ -1236,12 +1267,21 @@ def ffprobe_streams(path_to_file): "\"{}\"".format(path_to_file) ] command = " ".join(args) - log.debug("FFprobe command: \"{}\"".format(command)) - popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + logger.debug("FFprobe command: \"{}\"".format(command)) + popen = subprocess.Popen( + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) - popen_output = popen.communicate()[0] - log.debug("FFprobe output: {}".format(popen_output)) - return json.loads(popen_output)["streams"] + popen_stdout, popen_stderr = popen.communicate() + if popen_stdout: + logger.debug("ffprobe stdout: {}".format(popen_stdout)) + + if popen_stderr: + logger.debug("ffprobe stderr: {}".format(popen_stderr)) + return json.loads(popen_stdout)["streams"] def source_hash(filepath, *args): From 324636a490e273ede976f4e78753913e039d202c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 10:17:59 +0100 Subject: [PATCH 02/12] fix method `map_subsets_by_family` call --- pype/lib/lib_old.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/lib/lib_old.py b/pype/lib/lib_old.py index 114996cd90..33ddf9ed49 100644 --- a/pype/lib/lib_old.py +++ b/pype/lib/lib_old.py @@ -901,7 +901,7 @@ class BuildWorkfile: :rtype: dict """ # Prepare subsets - subsets_by_family = map_subsets_by_family(subsets) + subsets_by_family = self.map_subsets_by_family(subsets) profiles_per_subset_id = {} for family, subsets in subsets_by_family.items(): From 4be779b9f69210f976c17b943699b8eed7a1ded7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 10:25:06 +0100 Subject: [PATCH 03/12] BuildWorkfile class has it's own logger --- pype/lib/lib_old.py | 44 ++++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/pype/lib/lib_old.py b/pype/lib/lib_old.py index 33ddf9ed49..1f608e368e 100644 --- a/pype/lib/lib_old.py +++ b/pype/lib/lib_old.py @@ -615,6 +615,8 @@ class BuildWorkfile: are host related, since each host has it's loaders. """ + log = logging.getLogger("BuildWorkfile") + @staticmethod def map_subsets_by_family(subsets): subsets_by_family = collections.defaultdict(list) @@ -688,7 +690,7 @@ class BuildWorkfile: # Skip if there are any loaders if not loaders_by_name: - log.warning("There are no registered loaders.") + self.log.warning("There are no registered loaders.") return # Get current task name @@ -699,7 +701,7 @@ class BuildWorkfile: # Skip if there are any presets for task if not self.build_presets: - log.warning( + self.log.warning( "Current task `{}` does not have any loading preset.".format( current_task_name ) @@ -712,19 +714,21 @@ class BuildWorkfile: link_context_profiles = self.build_presets.get("linked_assets") # Skip if both are missing if not current_context_profiles and not link_context_profiles: - log.warning("Current task `{}` has empty loading preset.".format( - current_task_name - )) + self.log.warning( + "Current task `{}` has empty loading preset.".format( + current_task_name + ) + ) return elif not current_context_profiles: - log.warning(( + self.log.warning(( "Current task `{}` doesn't have any loading" " preset for it's context." ).format(current_task_name)) elif not link_context_profiles: - log.warning(( + self.log.warning(( "Current task `{}` doesn't have any" "loading preset for it's linked assets." ).format(current_task_name)) @@ -746,7 +750,7 @@ class BuildWorkfile: # Skip if there are no assets. This can happen if only linked mapping # is set and there are no links for his asset. if not assets: - log.warning( + self.log.warning( "Asset does not have linked assets. Nothing to process." ) return @@ -836,7 +840,7 @@ class BuildWorkfile: # Check loaders profile_loaders = profile.get("loaders") if not profile_loaders: - log.warning(( + self.log.warning(( "Build profile has missing loaders configuration: {0}" ).format(json.dumps(profile, indent=4))) continue @@ -849,7 +853,7 @@ class BuildWorkfile: break if not loaders_match: - log.warning(( + self.log.warning(( "All loaders from Build profile are not available: {0}" ).format(json.dumps(profile, indent=4))) continue @@ -857,7 +861,7 @@ class BuildWorkfile: # Check families profile_families = profile.get("families") if not profile_families: - log.warning(( + self.log.warning(( "Build profile is missing families configuration: {0}" ).format(json.dumps(profile, indent=4))) continue @@ -865,7 +869,7 @@ class BuildWorkfile: # Check representation names profile_repre_names = profile.get("repre_names") if not profile_repre_names: - log.warning(( + self.log.warning(( "Build profile is missing" " representation names filtering: {0}" ).format(json.dumps(profile, indent=4))) @@ -964,12 +968,12 @@ class BuildWorkfile: build_profiles, loaders_by_name ) if not valid_profiles: - log.warning( + self.log.warning( "There are not valid Workfile profiles. Skipping process." ) return - log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) subsets_by_id = {} version_by_subset_id = {} @@ -986,7 +990,7 @@ class BuildWorkfile: ) if not subsets_by_id: - log.warning("There are not subsets for asset {0}".format( + self.log.warning("There are not subsets for asset {0}".format( asset_entity["name"] )) return @@ -995,7 +999,7 @@ class BuildWorkfile: subsets_by_id.values(), valid_profiles ) if not profiles_per_subset_id: - log.warning("There are not valid subsets.") + self.log.warning("There are not valid subsets.") return valid_repres_by_subset_id = collections.defaultdict(list) @@ -1022,7 +1026,7 @@ class BuildWorkfile: for repre in repres: msg += "\n## Repre name: `{}`".format(repre["name"]) - log.debug(msg) + self.log.debug(msg) containers = self._load_containers( valid_repres_by_subset_id, subsets_by_id, @@ -1132,13 +1136,13 @@ class BuildWorkfile: except Exception as exc: if exc == pipeline.IncompatibleLoaderError: - log.info(( + self.log.info(( "Loader `{}` is not compatible with" " representation `{}`" ).format(loader_name, repre["name"])) else: - log.error( + self.log.error( "Unexpected error happened during loading", exc_info=True ) @@ -1152,7 +1156,7 @@ class BuildWorkfile: ).format(subset_name) else: msg += " Trying next representation." - log.info(msg) + self.log.info(msg) return loaded_containers From 3edf202815d769e854021fa0efc795c01b6407e1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 11:40:53 +0100 Subject: [PATCH 04/12] moved all avalon context functions to one file --- pype/lib/__init__.py | 28 ++- pype/lib/avalon_context.py | 360 +++++++++++++++++++++++++++++++++++++ pype/lib/lib_old.py | 353 ------------------------------------ 3 files changed, 380 insertions(+), 361 deletions(-) create mode 100644 pype/lib/avalon_context.py diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index a303bf038d..002689d21c 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -6,6 +6,17 @@ from .deprecated import ( set_io_database ) +from .avalon_context import ( + is_latest, + any_outdated, + switch_item, + get_asset, + get_hierarchy, + get_subsets, + get_linked_assets, + get_latest_version +) + from .hooks import PypeHook, execute_hook from .applications import ( @@ -20,32 +31,33 @@ from .lib_old import ( _subprocess, get_paths_from_environ, get_ffmpeg_tool_path, - get_hierarchy, add_tool_to_environment, modified_environ, pairwise, grouper, - is_latest, - any_outdated, _rreplace, version_up, - switch_item, _get_host_name, - get_asset, get_version_from_path, get_last_version_from_path, - get_subsets, - get_linked_assets, BuildWorkfile, ffprobe_streams, source_hash, - get_latest_version ) __all__ = [ "get_avalon_database", "set_io_database", + "is_latest", + "any_outdated", + "switch_item", + "get_asset", + "get_hierarchy", + "get_subsets", + "get_linked_assets", + "get_latest_version", + "PypeHook", "execute_hook", diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py new file mode 100644 index 0000000000..e9cb4c4f5b --- /dev/null +++ b/pype/lib/avalon_context.py @@ -0,0 +1,360 @@ +import os +import logging + +from avalon import io +import avalon.api + +log = logging.getLogger("AvalonContext") + + +def is_latest(representation): + """Return whether the representation is from latest version + + Args: + representation (dict): The representation document from the database. + + Returns: + bool: Whether the representation is of latest version. + + """ + + version = io.find_one({"_id": representation['parent']}) + if version["type"] == "master_version": + return True + + # Get highest version under the parent + highest_version = io.find_one({ + "type": "version", + "parent": version["parent"] + }, sort=[("name", -1)], projection={"name": True}) + + if version['name'] == highest_version['name']: + return True + else: + return False + + +def any_outdated(): + """Return whether the current scene has any outdated content""" + + checked = set() + host = avalon.api.registered_host() + for container in host.ls(): + representation = container['representation'] + if representation in checked: + continue + + representation_doc = io.find_one( + { + "_id": io.ObjectId(representation), + "type": "representation" + }, + projection={"parent": True} + ) + if representation_doc and not is_latest(representation_doc): + return True + elif not representation_doc: + log.debug("Container '{objectName}' has an invalid " + "representation, it is missing in the " + "database".format(**container)) + + checked.add(representation) + return False + + +def switch_item(container, + asset_name=None, + subset_name=None, + representation_name=None): + """Switch container asset, subset or representation of a container by name. + + It'll always switch to the latest version - of course a different + approach could be implemented. + + Args: + container (dict): data of the item to switch with + asset_name (str): name of the asset + subset_name (str): name of the subset + representation_name (str): name of the representation + + Returns: + dict + + """ + + if all(not x for x in [asset_name, subset_name, representation_name]): + raise ValueError("Must have at least one change provided to switch.") + + # Collect any of current asset, subset and representation if not provided + # so we can use the original name from those. + if any(not x for x in [asset_name, subset_name, representation_name]): + _id = io.ObjectId(container["representation"]) + representation = io.find_one({"type": "representation", "_id": _id}) + version, subset, asset, project = io.parenthood(representation) + + if asset_name is None: + asset_name = asset["name"] + + if subset_name is None: + subset_name = subset["name"] + + if representation_name is None: + representation_name = representation["name"] + + # Find the new one + asset = io.find_one({ + "name": asset_name, + "type": "asset" + }) + assert asset, ("Could not find asset in the database with the name " + "'%s'" % asset_name) + + subset = io.find_one({ + "name": subset_name, + "type": "subset", + "parent": asset["_id"] + }) + assert subset, ("Could not find subset in the database with the name " + "'%s'" % subset_name) + + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[('name', -1)] + ) + + assert version, "Could not find a version for {}.{}".format( + asset_name, subset_name + ) + + representation = io.find_one({ + "name": representation_name, + "type": "representation", + "parent": version["_id"]} + ) + + assert representation, ("Could not find representation in the database " + "with the name '%s'" % representation_name) + + avalon.api.switch(container, representation) + + return representation + + +def get_asset(asset_name=None): + """ Returning asset document from database """ + if not asset_name: + asset_name = avalon.api.Session["AVALON_ASSET"] + + asset_document = io.find_one({ + "name": asset_name, + "type": "asset" + }) + + if not asset_document: + raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) + + return asset_document + + +def get_hierarchy(asset_name=None): + """ + Obtain asset hierarchy path string from mongo db + + Returns: + string: asset hierarchy path + + """ + if not asset_name: + asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"]) + + asset_entity = io.find_one({ + "type": 'asset', + "name": asset_name + }) + + not_set = "PARENTS_NOT_SET" + entity_parents = asset_entity.get("data", {}).get("parents", not_set) + + # If entity already have parents then just return joined + if entity_parents != not_set: + return "/".join(entity_parents) + + # Else query parents through visualParents and store result to entity + hierarchy_items = [] + entity = asset_entity + while True: + parent_id = entity.get("data", {}).get("visualParent") + if not parent_id: + break + entity = io.find_one({"_id": parent_id}) + hierarchy_items.append(entity["name"]) + + # Add parents to entity data for next query + entity_data = asset_entity.get("data", {}) + entity_data["parents"] = hierarchy_items + io.update_many( + {"_id": asset_entity["_id"]}, + {"$set": {"data": entity_data}} + ) + + return "/".join(hierarchy_items) + + +def get_subsets(asset_name, + regex_filter=None, + version=None, + representations=["exr", "dpx"]): + """ + Query subsets with filter on name. + + The method will return all found subsets and its defined version + and subsets. Version could be specified with number. Representation + can be filtered. + + Arguments: + asset_name (str): asset (shot) name + regex_filter (raw): raw string with filter pattern + version (str or int): `last` or number of version + representations (list): list for all representations + + Returns: + dict: subsets with version and representaions in keys + """ + + # query asset from db + asset_io = io.find_one({"type": "asset", "name": asset_name}) + + # check if anything returned + assert asset_io, ( + "Asset not existing. Check correct name: `{}`").format(asset_name) + + # create subsets query filter + filter_query = {"type": "subset", "parent": asset_io["_id"]} + + # add reggex filter string into query filter + if regex_filter: + filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}}) + else: + filter_query.update({"name": {"$regex": r'.*'}}) + + # query all assets + subsets = [s for s in io.find(filter_query)] + + assert subsets, ("No subsets found. Check correct filter. " + "Try this for start `r'.*'`: " + "asset: `{}`").format(asset_name) + + output_dict = {} + # Process subsets + for subset in subsets: + if not version: + version_sel = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) + else: + assert isinstance(version, int), "version needs to be `int` type" + version_sel = io.find_one({ + "type": "version", + "parent": subset["_id"], + "name": int(version) + }) + + find_dict = {"type": "representation", + "parent": version_sel["_id"]} + + filter_repr = {"name": {"$in": representations}} + + find_dict.update(filter_repr) + repres_out = [i for i in io.find(find_dict)] + + if len(repres_out) > 0: + output_dict[subset["name"]] = {"version": version_sel, + "representations": repres_out} + + return output_dict + + +def get_linked_assets(asset_entity): + """Return linked assets for `asset_entity`.""" + inputs = asset_entity["data"].get("inputs", []) + inputs = [io.find_one({"_id": x}) for x in inputs] + return inputs + + +def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): + """Retrieve latest version from `asset_name`, and `subset_name`. + + Do not use if you want to query more than 5 latest versions as this method + query 3 times to mongo for each call. For those cases is better to use + more efficient way, e.g. with help of aggregations. + + Args: + asset_name (str): Name of asset. + subset_name (str): Name of subset. + dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection + with Session. + project_name (str, optional): Find latest version in specific project. + + Returns: + None: If asset, subset or version were not found. + dict: Last version document for entered . + """ + + if not dbcon: + log.debug("Using `avalon.io` for query.") + dbcon = io + # Make sure is installed + io.install() + + if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): + # `avalon.io` has only `_database` attribute + # but `AvalonMongoDB` has `database` + database = getattr(dbcon, "database", dbcon._database) + collection = database[project_name] + else: + project_name = dbcon.Session.get("AVALON_PROJECT") + collection = dbcon + + log.debug(( + "Getting latest version for Project: \"{}\" Asset: \"{}\"" + " and Subset: \"{}\"" + ).format(project_name, asset_name, subset_name)) + + # Query asset document id by asset name + asset_doc = collection.find_one( + {"type": "asset", "name": asset_name}, + {"_id": True} + ) + if not asset_doc: + log.info( + "Asset \"{}\" was not found in Database.".format(asset_name) + ) + return None + + subset_doc = collection.find_one( + {"type": "subset", "name": subset_name, "parent": asset_doc["_id"]}, + {"_id": True} + ) + if not subset_doc: + log.info( + "Subset \"{}\" was not found in Database.".format(subset_name) + ) + return None + + version_doc = collection.find_one( + {"type": "version", "parent": subset_doc["_id"]}, + sort=[("name", -1)], + ) + if not version_doc: + log.info( + "Subset \"{}\" does not have any version yet.".format(subset_name) + ) + return None + return version_doc diff --git a/pype/lib/lib_old.py b/pype/lib/lib_old.py index 1f608e368e..c559324a5e 100644 --- a/pype/lib/lib_old.py +++ b/pype/lib/lib_old.py @@ -135,50 +135,6 @@ def _subprocess(*args, **kwargs): return full_output -def get_hierarchy(asset_name=None): - """ - Obtain asset hierarchy path string from mongo db - - Returns: - string: asset hierarchy path - - """ - if not asset_name: - asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"]) - - asset_entity = io.find_one({ - "type": 'asset', - "name": asset_name - }) - - not_set = "PARENTS_NOT_SET" - entity_parents = asset_entity.get("data", {}).get("parents", not_set) - - # If entity already have parents then just return joined - if entity_parents != not_set: - return "/".join(entity_parents) - - # Else query parents through visualParents and store result to entity - hierarchy_items = [] - entity = asset_entity - while True: - parent_id = entity.get("data", {}).get("visualParent") - if not parent_id: - break - entity = io.find_one({"_id": parent_id}) - hierarchy_items.append(entity["name"]) - - # Add parents to entity data for next query - entity_data = asset_entity.get("data", {}) - entity_data["parents"] = hierarchy_items - io.update_many( - {"_id": asset_entity["_id"]}, - {"$set": {"data": entity_data}} - ) - - return "/".join(hierarchy_items) - - def add_tool_to_environment(tools): """ It is adding dynamic environment to os environment. @@ -247,61 +203,6 @@ def grouper(iterable, n, fillvalue=None): return itertools.izip_longest(fillvalue=fillvalue, *args) -def is_latest(representation): - """Return whether the representation is from latest version - - Args: - representation (dict): The representation document from the database. - - Returns: - bool: Whether the representation is of latest version. - - """ - - version = io.find_one({"_id": representation['parent']}) - if version["type"] == "master_version": - return True - - # Get highest version under the parent - highest_version = io.find_one({ - "type": "version", - "parent": version["parent"] - }, sort=[("name", -1)], projection={"name": True}) - - if version['name'] == highest_version['name']: - return True - else: - return False - - -def any_outdated(): - """Return whether the current scene has any outdated content""" - - checked = set() - host = avalon.api.registered_host() - for container in host.ls(): - representation = container['representation'] - if representation in checked: - continue - - representation_doc = io.find_one( - { - "_id": io.ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not is_latest(representation_doc): - return True - elif not representation_doc: - log.debug("Container '{objectName}' has an invalid " - "representation, it is missing in the " - "database".format(**container)) - - checked.add(representation) - return False - - def _rreplace(s, a, b, n=1): """Replace a with b in string s from right side n times""" return b.join(s.rsplit(a, n)) @@ -360,87 +261,6 @@ def version_up(filepath): return new_filename -def switch_item(container, - asset_name=None, - subset_name=None, - representation_name=None): - """Switch container asset, subset or representation of a container by name. - - It'll always switch to the latest version - of course a different - approach could be implemented. - - Args: - container (dict): data of the item to switch with - asset_name (str): name of the asset - subset_name (str): name of the subset - representation_name (str): name of the representation - - Returns: - dict - - """ - - if all(not x for x in [asset_name, subset_name, representation_name]): - raise ValueError("Must have at least one change provided to switch.") - - # Collect any of current asset, subset and representation if not provided - # so we can use the original name from those. - if any(not x for x in [asset_name, subset_name, representation_name]): - _id = io.ObjectId(container["representation"]) - representation = io.find_one({"type": "representation", "_id": _id}) - version, subset, asset, project = io.parenthood(representation) - - if asset_name is None: - asset_name = asset["name"] - - if subset_name is None: - subset_name = subset["name"] - - if representation_name is None: - representation_name = representation["name"] - - # Find the new one - asset = io.find_one({ - "name": asset_name, - "type": "asset" - }) - assert asset, ("Could not find asset in the database with the name " - "'%s'" % asset_name) - - subset = io.find_one({ - "name": subset_name, - "type": "subset", - "parent": asset["_id"] - }) - assert subset, ("Could not find subset in the database with the name " - "'%s'" % subset_name) - - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[('name', -1)] - ) - - assert version, "Could not find a version for {}.{}".format( - asset_name, subset_name - ) - - representation = io.find_one({ - "name": representation_name, - "type": "representation", - "parent": version["_id"]} - ) - - assert representation, ("Could not find representation in the database " - "with the name '%s'" % representation_name) - - avalon.api.switch(container, representation) - - return representation - - def _get_host_name(): _host = avalon.api.registered_host() @@ -448,22 +268,6 @@ def _get_host_name(): return _host.__name__.rsplit(".", 1)[-1] -def get_asset(asset_name=None): - """ Returning asset document from database """ - if not asset_name: - asset_name = avalon.api.Session["AVALON_ASSET"] - - asset_document = io.find_one({ - "name": asset_name, - "type": "asset" - }) - - if not asset_document: - raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) - - return asset_document - - def get_version_from_path(file): """ Finds version number in file path string @@ -523,91 +327,6 @@ def get_last_version_from_path(path_dir, filter): return None -def get_subsets(asset_name, - regex_filter=None, - version=None, - representations=["exr", "dpx"]): - """ - Query subsets with filter on name. - - The method will return all found subsets and its defined version - and subsets. Version could be specified with number. Representation - can be filtered. - - Arguments: - asset_name (str): asset (shot) name - regex_filter (raw): raw string with filter pattern - version (str or int): `last` or number of version - representations (list): list for all representations - - Returns: - dict: subsets with version and representaions in keys - """ - - # query asset from db - asset_io = io.find_one({"type": "asset", "name": asset_name}) - - # check if anything returned - assert asset_io, ( - "Asset not existing. Check correct name: `{}`").format(asset_name) - - # create subsets query filter - filter_query = {"type": "subset", "parent": asset_io["_id"]} - - # add reggex filter string into query filter - if regex_filter: - filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}}) - else: - filter_query.update({"name": {"$regex": r'.*'}}) - - # query all assets - subsets = [s for s in io.find(filter_query)] - - assert subsets, ("No subsets found. Check correct filter. " - "Try this for start `r'.*'`: " - "asset: `{}`").format(asset_name) - - output_dict = {} - # Process subsets - for subset in subsets: - if not version: - version_sel = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - else: - assert isinstance(version, int), "version needs to be `int` type" - version_sel = io.find_one({ - "type": "version", - "parent": subset["_id"], - "name": int(version) - }) - - find_dict = {"type": "representation", - "parent": version_sel["_id"]} - - filter_repr = {"name": {"$in": representations}} - - find_dict.update(filter_repr) - repres_out = [i for i in io.find(find_dict)] - - if len(repres_out) > 0: - output_dict[subset["name"]] = {"version": version_sel, - "representations": repres_out} - - return output_dict - - -def get_linked_assets(asset_entity): - """Return linked assets for `asset_entity`.""" - inputs = asset_entity["data"].get("inputs", []) - inputs = [io.find_one({"_id": x}) for x in inputs] - return inputs - - class BuildWorkfile: """Wrapper for build workfile process. @@ -1307,75 +1026,3 @@ def source_hash(filepath, *args): time = str(os.path.getmtime(filepath)) size = str(os.path.getsize(filepath)) return "|".join([file_name, time, size] + list(args)).replace(".", ",") - - -def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): - """Retrieve latest version from `asset_name`, and `subset_name`. - - Do not use if you want to query more than 5 latest versions as this method - query 3 times to mongo for each call. For those cases is better to use - more efficient way, e.g. with help of aggregations. - - Args: - asset_name (str): Name of asset. - subset_name (str): Name of subset. - dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection - with Session. - project_name (str, optional): Find latest version in specific project. - - Returns: - None: If asset, subset or version were not found. - dict: Last version document for entered . - """ - - if not dbcon: - log.debug("Using `avalon.io` for query.") - dbcon = io - # Make sure is installed - io.install() - - if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): - # `avalon.io` has only `_database` attribute - # but `AvalonMongoDB` has `database` - database = getattr(dbcon, "database", dbcon._database) - collection = database[project_name] - else: - project_name = dbcon.Session.get("AVALON_PROJECT") - collection = dbcon - - log.debug(( - "Getting latest version for Project: \"{}\" Asset: \"{}\"" - " and Subset: \"{}\"" - ).format(project_name, asset_name, subset_name)) - - # Query asset document id by asset name - asset_doc = collection.find_one( - {"type": "asset", "name": asset_name}, - {"_id": True} - ) - if not asset_doc: - log.info( - "Asset \"{}\" was not found in Database.".format(asset_name) - ) - return None - - subset_doc = collection.find_one( - {"type": "subset", "name": subset_name, "parent": asset_doc["_id"]}, - {"_id": True} - ) - if not subset_doc: - log.info( - "Subset \"{}\" was not found in Database.".format(subset_name) - ) - return None - - version_doc = collection.find_one( - {"type": "version", "parent": subset_doc["_id"]}, - sort=[("name", -1)], - ) - if not version_doc: - log.info( - "Subset \"{}\" does not have any version yet.".format(subset_name) - ) - return None - return version_doc From c116e8042b6ca54891e2db5912073ccb008d13a6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 11:42:47 +0100 Subject: [PATCH 05/12] moved `get_subsets` to collect audio in celaction collector which is only place where is used --- pype/lib/__init__.py | 2 - pype/lib/avalon_context.py | 78 ----------------- .../celaction/publish/collect_audio.py | 84 ++++++++++++++++++- 3 files changed, 82 insertions(+), 82 deletions(-) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 002689d21c..59411db9de 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -12,7 +12,6 @@ from .avalon_context import ( switch_item, get_asset, get_hierarchy, - get_subsets, get_linked_assets, get_latest_version ) @@ -54,7 +53,6 @@ __all__ = [ "switch_item", "get_asset", "get_hierarchy", - "get_subsets", "get_linked_assets", "get_latest_version", diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py index e9cb4c4f5b..099b48967b 100644 --- a/pype/lib/avalon_context.py +++ b/pype/lib/avalon_context.py @@ -203,84 +203,6 @@ def get_hierarchy(asset_name=None): return "/".join(hierarchy_items) -def get_subsets(asset_name, - regex_filter=None, - version=None, - representations=["exr", "dpx"]): - """ - Query subsets with filter on name. - - The method will return all found subsets and its defined version - and subsets. Version could be specified with number. Representation - can be filtered. - - Arguments: - asset_name (str): asset (shot) name - regex_filter (raw): raw string with filter pattern - version (str or int): `last` or number of version - representations (list): list for all representations - - Returns: - dict: subsets with version and representaions in keys - """ - - # query asset from db - asset_io = io.find_one({"type": "asset", "name": asset_name}) - - # check if anything returned - assert asset_io, ( - "Asset not existing. Check correct name: `{}`").format(asset_name) - - # create subsets query filter - filter_query = {"type": "subset", "parent": asset_io["_id"]} - - # add reggex filter string into query filter - if regex_filter: - filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}}) - else: - filter_query.update({"name": {"$regex": r'.*'}}) - - # query all assets - subsets = [s for s in io.find(filter_query)] - - assert subsets, ("No subsets found. Check correct filter. " - "Try this for start `r'.*'`: " - "asset: `{}`").format(asset_name) - - output_dict = {} - # Process subsets - for subset in subsets: - if not version: - version_sel = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - else: - assert isinstance(version, int), "version needs to be `int` type" - version_sel = io.find_one({ - "type": "version", - "parent": subset["_id"], - "name": int(version) - }) - - find_dict = {"type": "representation", - "parent": version_sel["_id"]} - - filter_repr = {"name": {"$in": representations}} - - find_dict.update(filter_repr) - repres_out = [i for i in io.find(find_dict)] - - if len(repres_out) > 0: - output_dict[subset["name"]] = {"version": version_sel, - "representations": repres_out} - - return output_dict - - def get_linked_assets(asset_entity): """Return linked assets for `asset_entity`.""" inputs = asset_entity["data"].get("inputs", []) diff --git a/pype/plugins/celaction/publish/collect_audio.py b/pype/plugins/celaction/publish/collect_audio.py index c29e212d80..c92e4fd868 100644 --- a/pype/plugins/celaction/publish/collect_audio.py +++ b/pype/plugins/celaction/publish/collect_audio.py @@ -1,6 +1,8 @@ -import pyblish.api import os +import pyblish.api +from avalon import io + import pype.api as pype from pprint import pformat @@ -15,7 +17,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): asset_entity = context.data["assetEntity"] # get all available representations - subsets = pype.get_subsets(asset_entity["name"], + subsets = self.get_subsets(asset_entity["name"], representations=["audio", "wav"] ) self.log.info(f"subsets is: {pformat(subsets)}") @@ -39,3 +41,81 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): 'audio_file: {}, has been added to context'.format(audio_file)) else: self.log.warning("Couldn't find any audio file on Ftrack.") + + def get_subsets( + self, + asset_name, + regex_filter=None, + version=None, + representations=["exr", "dpx"] + ): + """ + Query subsets with filter on name. + + The method will return all found subsets and its defined version + and subsets. Version could be specified with number. Representation + can be filtered. + + Arguments: + asset_name (str): asset (shot) name + regex_filter (raw): raw string with filter pattern + version (str or int): `last` or number of version + representations (list): list for all representations + + Returns: + dict: subsets with version and representaions in keys + """ + + # query asset from db + asset_io = io.find_one({"type": "asset", "name": asset_name}) + + # check if anything returned + assert asset_io, ( + "Asset not existing. Check correct name: `{}`").format(asset_name) + + # create subsets query filter + filter_query = {"type": "subset", "parent": asset_io["_id"]} + + # add reggex filter string into query filter + if regex_filter: + filter_query["name"] = {"$regex": r"{}".format(regex_filter)} + + # query all assets + subsets = list(io.find(filter_query)) + + assert subsets, ("No subsets found. Check correct filter. " + "Try this for start `r'.*'`: " + "asset: `{}`").format(asset_name) + + output_dict = {} + # Process subsets + for subset in subsets: + if not version: + version_sel = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) + else: + assert isinstance(version, int), "version needs to be `int` type" + version_sel = io.find_one({ + "type": "version", + "parent": subset["_id"], + "name": int(version) + }) + + find_dict = {"type": "representation", + "parent": version_sel["_id"]} + + filter_repr = {"name": {"$in": representations}} + + find_dict.update(filter_repr) + repres_out = [i for i in io.find(find_dict)] + + if len(repres_out) > 0: + output_dict[subset["name"]] = {"version": version_sel, + "representations": repres_out} + + return output_dict From 36cd349c6a5b2c38cbd58792d9758b1bcab00241 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 11:51:06 +0100 Subject: [PATCH 06/12] switch_item moved to fusion.lib as it's only place where is used --- pype/hosts/fusion/lib.py | 83 ++++++++++++++++++- .../fusion/scripts/fusion_switch_shot.py | 2 +- pype/lib/__init__.py | 2 - pype/lib/avalon_context.py | 81 ------------------ pype/scripts/fusion_switch_shot.py | 2 +- 5 files changed, 84 insertions(+), 86 deletions(-) diff --git a/pype/hosts/fusion/lib.py b/pype/hosts/fusion/lib.py index f2846c966a..77866fde9d 100644 --- a/pype/hosts/fusion/lib.py +++ b/pype/hosts/fusion/lib.py @@ -2,7 +2,7 @@ import sys from avalon.vendor.Qt import QtGui import avalon.fusion - +from avalon import io self = sys.modules[__name__] self._project = None @@ -59,3 +59,84 @@ def get_additional_data(container): return {"color": QtGui.QColor.fromRgbF(tile_color["R"], tile_color["G"], tile_color["B"])} + + +def switch_item(container, + asset_name=None, + subset_name=None, + representation_name=None): + """Switch container asset, subset or representation of a container by name. + + It'll always switch to the latest version - of course a different + approach could be implemented. + + Args: + container (dict): data of the item to switch with + asset_name (str): name of the asset + subset_name (str): name of the subset + representation_name (str): name of the representation + + Returns: + dict + + """ + + if all(not x for x in [asset_name, subset_name, representation_name]): + raise ValueError("Must have at least one change provided to switch.") + + # Collect any of current asset, subset and representation if not provided + # so we can use the original name from those. + if any(not x for x in [asset_name, subset_name, representation_name]): + _id = io.ObjectId(container["representation"]) + representation = io.find_one({"type": "representation", "_id": _id}) + version, subset, asset, project = io.parenthood(representation) + + if asset_name is None: + asset_name = asset["name"] + + if subset_name is None: + subset_name = subset["name"] + + if representation_name is None: + representation_name = representation["name"] + + # Find the new one + asset = io.find_one({ + "name": asset_name, + "type": "asset" + }) + assert asset, ("Could not find asset in the database with the name " + "'%s'" % asset_name) + + subset = io.find_one({ + "name": subset_name, + "type": "subset", + "parent": asset["_id"] + }) + assert subset, ("Could not find subset in the database with the name " + "'%s'" % subset_name) + + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[('name', -1)] + ) + + assert version, "Could not find a version for {}.{}".format( + asset_name, subset_name + ) + + representation = io.find_one({ + "name": representation_name, + "type": "representation", + "parent": version["_id"]} + ) + + assert representation, ("Could not find representation in the database " + "with the name '%s'" % representation_name) + + avalon.api.switch(container, representation) + + return representation diff --git a/pype/hosts/fusion/scripts/fusion_switch_shot.py b/pype/hosts/fusion/scripts/fusion_switch_shot.py index a3f2116db8..ed657cb612 100644 --- a/pype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/pype/hosts/fusion/scripts/fusion_switch_shot.py @@ -234,7 +234,7 @@ def switch(asset_name, filepath=None, new=True): representations = [] for container in containers: try: - representation = pype.switch_item( + representation = fusion_lib.switch_item( container, asset_name=asset_name) representations.append(representation) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 59411db9de..a93d371b2b 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -9,7 +9,6 @@ from .deprecated import ( from .avalon_context import ( is_latest, any_outdated, - switch_item, get_asset, get_hierarchy, get_linked_assets, @@ -50,7 +49,6 @@ __all__ = [ "is_latest", "any_outdated", - "switch_item", "get_asset", "get_hierarchy", "get_linked_assets", diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py index 099b48967b..813d244d72 100644 --- a/pype/lib/avalon_context.py +++ b/pype/lib/avalon_context.py @@ -62,87 +62,6 @@ def any_outdated(): return False -def switch_item(container, - asset_name=None, - subset_name=None, - representation_name=None): - """Switch container asset, subset or representation of a container by name. - - It'll always switch to the latest version - of course a different - approach could be implemented. - - Args: - container (dict): data of the item to switch with - asset_name (str): name of the asset - subset_name (str): name of the subset - representation_name (str): name of the representation - - Returns: - dict - - """ - - if all(not x for x in [asset_name, subset_name, representation_name]): - raise ValueError("Must have at least one change provided to switch.") - - # Collect any of current asset, subset and representation if not provided - # so we can use the original name from those. - if any(not x for x in [asset_name, subset_name, representation_name]): - _id = io.ObjectId(container["representation"]) - representation = io.find_one({"type": "representation", "_id": _id}) - version, subset, asset, project = io.parenthood(representation) - - if asset_name is None: - asset_name = asset["name"] - - if subset_name is None: - subset_name = subset["name"] - - if representation_name is None: - representation_name = representation["name"] - - # Find the new one - asset = io.find_one({ - "name": asset_name, - "type": "asset" - }) - assert asset, ("Could not find asset in the database with the name " - "'%s'" % asset_name) - - subset = io.find_one({ - "name": subset_name, - "type": "subset", - "parent": asset["_id"] - }) - assert subset, ("Could not find subset in the database with the name " - "'%s'" % subset_name) - - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[('name', -1)] - ) - - assert version, "Could not find a version for {}.{}".format( - asset_name, subset_name - ) - - representation = io.find_one({ - "name": representation_name, - "type": "representation", - "parent": version["_id"]} - ) - - assert representation, ("Could not find representation in the database " - "with the name '%s'" % representation_name) - - avalon.api.switch(container, representation) - - return representation - - def get_asset(asset_name=None): """ Returning asset document from database """ if not asset_name: diff --git a/pype/scripts/fusion_switch_shot.py b/pype/scripts/fusion_switch_shot.py index f936b7d8e0..5791220acd 100644 --- a/pype/scripts/fusion_switch_shot.py +++ b/pype/scripts/fusion_switch_shot.py @@ -191,7 +191,7 @@ def switch(asset_name, filepath=None, new=True): representations = [] for container in containers: try: - representation = pype.switch_item(container, + representation = fusion_lib.switch_item(container, asset_name=asset_name) representations.append(representation) except Exception as e: From 833d6ffdff03a308451dabc6452931bcf05bdee3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 11:54:30 +0100 Subject: [PATCH 07/12] moved BuildWorkfile to avalon context --- pype/lib/__init__.py | 4 +- pype/lib/avalon_context.py | 653 ++++++++++++++++++++++++++++++++++++- pype/lib/lib_old.py | 647 ------------------------------------ 3 files changed, 655 insertions(+), 649 deletions(-) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index a93d371b2b..f807fe894a 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -12,7 +12,8 @@ from .avalon_context import ( get_asset, get_hierarchy, get_linked_assets, - get_latest_version + get_latest_version, + BuildWorkfile ) from .hooks import PypeHook, execute_hook @@ -53,6 +54,7 @@ __all__ = [ "get_hierarchy", "get_linked_assets", "get_latest_version", + "BuildWorkfile", "PypeHook", "execute_hook", diff --git a/pype/lib/avalon_context.py b/pype/lib/avalon_context.py index 813d244d72..56abc4aee6 100644 --- a/pype/lib/avalon_context.py +++ b/pype/lib/avalon_context.py @@ -1,7 +1,11 @@ import os +import json +import re import logging +import collections -from avalon import io +from avalon import io, pipeline +from ..api import config import avalon.api log = logging.getLogger("AvalonContext") @@ -199,3 +203,650 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): ) return None return version_doc + + +class BuildWorkfile: + """Wrapper for build workfile process. + + Load representations for current context by build presets. Build presets + are host related, since each host has it's loaders. + """ + + log = logging.getLogger("BuildWorkfile") + + @staticmethod + def map_subsets_by_family(subsets): + subsets_by_family = collections.defaultdict(list) + for subset in subsets: + family = subset["data"].get("family") + if not family: + families = subset["data"].get("families") + if not families: + continue + family = families[0] + + subsets_by_family[family].append(subset) + return subsets_by_family + + def process(self): + """Main method of this wrapper. + + Building of workfile is triggered and is possible to implement + post processing of loaded containers if necessary. + """ + containers = self.build_workfile() + + return containers + + def build_workfile(self): + """Prepares and load containers into workfile. + + Loads latest versions of current and linked assets to workfile by logic + stored in Workfile profiles from presets. Profiles are set by host, + filtered by current task name and used by families. + + Each family can specify representation names and loaders for + representations and first available and successful loaded + representation is returned as container. + + At the end you'll get list of loaded containers per each asset. + + loaded_containers [{ + "asset_entity": , + "containers": [, , ...] + }, { + "asset_entity": , + "containers": [, ...] + }, { + ... + }] + """ + # Get current asset name and entity + current_asset_name = io.Session["AVALON_ASSET"] + current_asset_entity = io.find_one({ + "type": "asset", + "name": current_asset_name + }) + + # Skip if asset was not found + if not current_asset_entity: + print("Asset entity with name `{}` was not found".format( + current_asset_name + )) + return + + # Prepare available loaders + loaders_by_name = {} + for loader in avalon.api.discover(avalon.api.Loader): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {0}!".format(loader_name) + ) + loaders_by_name[loader_name] = loader + + # Skip if there are any loaders + if not loaders_by_name: + self.log.warning("There are no registered loaders.") + return + + # Get current task name + current_task_name = io.Session["AVALON_TASK"] + + # Load workfile presets for task + self.build_presets = self.get_build_presets(current_task_name) + + # Skip if there are any presets for task + if not self.build_presets: + self.log.warning( + "Current task `{}` does not have any loading preset.".format( + current_task_name + ) + ) + return + + # Get presets for loading current asset + current_context_profiles = self.build_presets.get("current_context") + # Get presets for loading linked assets + link_context_profiles = self.build_presets.get("linked_assets") + # Skip if both are missing + if not current_context_profiles and not link_context_profiles: + self.log.warning( + "Current task `{}` has empty loading preset.".format( + current_task_name + ) + ) + return + + elif not current_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any loading" + " preset for it's context." + ).format(current_task_name)) + + elif not link_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any" + "loading preset for it's linked assets." + ).format(current_task_name)) + + # Prepare assets to process by workfile presets + assets = [] + current_asset_id = None + if current_context_profiles: + # Add current asset entity if preset has current context set + assets.append(current_asset_entity) + current_asset_id = current_asset_entity["_id"] + + if link_context_profiles: + # Find and append linked assets if preset has set linked mapping + link_assets = get_linked_assets(current_asset_entity) + if link_assets: + assets.extend(link_assets) + + # Skip if there are no assets. This can happen if only linked mapping + # is set and there are no links for his asset. + if not assets: + self.log.warning( + "Asset does not have linked assets. Nothing to process." + ) + return + + # Prepare entities from database for assets + prepared_entities = self._collect_last_version_repres(assets) + + # Load containers by prepared entities and presets + loaded_containers = [] + # - Current asset containers + if current_asset_id and current_asset_id in prepared_entities: + current_context_data = prepared_entities.pop(current_asset_id) + loaded_data = self.load_containers_by_asset_data( + current_context_data, current_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # - Linked assets container + for linked_asset_data in prepared_entities.values(): + loaded_data = self.load_containers_by_asset_data( + linked_asset_data, link_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # Return list of loaded containers + return loaded_containers + + def get_build_presets(self, task_name): + """ Returns presets to build workfile for task name. + + Presets are loaded for current project set in + io.Session["AVALON_PROJECT"], filtered by registered host + and entered task name. + + :param task_name: Task name used for filtering build presets. + :type task_name: str + :return: preset per eneter task + :rtype: dict | None + """ + host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1] + presets = config.get_presets(io.Session["AVALON_PROJECT"]) + # Get presets for host + build_presets = ( + presets["plugins"] + .get(host_name, {}) + .get("workfile_build") + ) + if not build_presets: + return + + task_name_low = task_name.lower() + per_task_preset = None + for preset in build_presets: + preset_tasks = preset.get("tasks") or [] + preset_tasks_low = [task.lower() for task in preset_tasks] + if task_name_low in preset_tasks_low: + per_task_preset = preset + break + + return per_task_preset + + def _filter_build_profiles(self, build_profiles, loaders_by_name): + """ Filter build profiles by loaders and prepare process data. + + Valid profile must have "loaders", "families" and "repre_names" keys + with valid values. + - "loaders" expects list of strings representing possible loaders. + - "families" expects list of strings for filtering + by main subset family. + - "repre_names" expects list of strings for filtering by + representation name. + + Lowered "families" and "repre_names" are prepared for each profile with + all required keys. + + :param build_profiles: Profiles for building workfile. + :type build_profiles: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Filtered and prepared profiles. + :rtype: list + """ + valid_profiles = [] + for profile in build_profiles: + # Check loaders + profile_loaders = profile.get("loaders") + if not profile_loaders: + self.log.warning(( + "Build profile has missing loaders configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check if any loader is available + loaders_match = False + for loader_name in profile_loaders: + if loader_name in loaders_by_name: + loaders_match = True + break + + if not loaders_match: + self.log.warning(( + "All loaders from Build profile are not available: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check families + profile_families = profile.get("families") + if not profile_families: + self.log.warning(( + "Build profile is missing families configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check representation names + profile_repre_names = profile.get("repre_names") + if not profile_repre_names: + self.log.warning(( + "Build profile is missing" + " representation names filtering: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Prepare lowered families and representation names + profile["families_lowered"] = [ + fam.lower() for fam in profile_families + ] + profile["repre_names_lowered"] = [ + name.lower() for name in profile_repre_names + ] + + valid_profiles.append(profile) + + return valid_profiles + + def _prepare_profile_for_subsets(self, subsets, profiles): + """Select profile for each subset byt it's data. + + Profiles are filtered for each subset individually. + Profile is filtered by subset's family, optionally by name regex and + representation names set in profile. + It is possible to not find matching profile for subset, in that case + subset is skipped and it is possible that none of subsets have + matching profile. + + :param subsets: Subset documents. + :type subsets: list + :param profiles: Build profiles. + :type profiles: dict + :return: Profile by subset's id. + :rtype: dict + """ + # Prepare subsets + subsets_by_family = self.map_subsets_by_family(subsets) + + profiles_per_subset_id = {} + for family, subsets in subsets_by_family.items(): + family_low = family.lower() + for profile in profiles: + # Skip profile if does not contain family + if family_low not in profile["families_lowered"]: + continue + + # Precompile name filters as regexes + profile_regexes = profile.get("subset_name_filters") + if profile_regexes: + _profile_regexes = [] + for regex in profile_regexes: + _profile_regexes.append(re.compile(regex)) + profile_regexes = _profile_regexes + + # TODO prepare regex compilation + for subset in subsets: + # Verify regex filtering (optional) + if profile_regexes: + valid = False + for pattern in profile_regexes: + if re.match(pattern, subset["name"]): + valid = True + break + + if not valid: + continue + + profiles_per_subset_id[subset["_id"]] = profile + + # break profiles loop on finding the first matching profile + break + return profiles_per_subset_id + + def load_containers_by_asset_data( + self, asset_entity_data, build_profiles, loaders_by_name + ): + """Load containers for entered asset entity by Build profiles. + + :param asset_entity_data: Prepared data with subsets, last version + and representations for specific asset. + :type asset_entity_data: dict + :param build_profiles: Build profiles. + :type build_profiles: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Output contains asset document and loaded containers. + :rtype: dict + """ + + # Make sure all data are not empty + if not asset_entity_data or not build_profiles or not loaders_by_name: + return + + asset_entity = asset_entity_data["asset_entity"] + + valid_profiles = self._filter_build_profiles( + build_profiles, loaders_by_name + ) + if not valid_profiles: + self.log.warning( + "There are not valid Workfile profiles. Skipping process." + ) + return + + self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + + subsets_by_id = {} + version_by_subset_id = {} + repres_by_version_id = {} + for subset_id, in_data in asset_entity_data["subsets"].items(): + subset_entity = in_data["subset_entity"] + subsets_by_id[subset_entity["_id"]] = subset_entity + + version_data = in_data["version"] + version_entity = version_data["version_entity"] + version_by_subset_id[subset_id] = version_entity + repres_by_version_id[version_entity["_id"]] = ( + version_data["repres"] + ) + + if not subsets_by_id: + self.log.warning("There are not subsets for asset {0}".format( + asset_entity["name"] + )) + return + + profiles_per_subset_id = self._prepare_profile_for_subsets( + subsets_by_id.values(), valid_profiles + ) + if not profiles_per_subset_id: + self.log.warning("There are not valid subsets.") + return + + valid_repres_by_subset_id = collections.defaultdict(list) + for subset_id, profile in profiles_per_subset_id.items(): + profile_repre_names = profile["repre_names_lowered"] + + version_entity = version_by_subset_id[subset_id] + version_id = version_entity["_id"] + repres = repres_by_version_id[version_id] + for repre in repres: + repre_name_low = repre["name"].lower() + if repre_name_low in profile_repre_names: + valid_repres_by_subset_id[subset_id].append(repre) + + # DEBUG message + msg = "Valid representations for Asset: `{}`".format( + asset_entity["name"] + ) + for subset_id, repres in valid_repres_by_subset_id.items(): + subset = subsets_by_id[subset_id] + msg += "\n# Subset Name/ID: `{}`/{}".format( + subset["name"], subset_id + ) + for repre in repres: + msg += "\n## Repre name: `{}`".format(repre["name"]) + + self.log.debug(msg) + + containers = self._load_containers( + valid_repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ) + + return { + "asset_entity": asset_entity, + "containers": containers + } + + def _load_containers( + self, repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ): + """Real load by collected data happens here. + + Loading of representations per subset happens here. Each subset can + loads one representation. Loading is tried in specific order. + Representations are tried to load by names defined in configuration. + If subset has representation matching representation name each loader + is tried to load it until any is successful. If none of them was + successful then next reprensentation name is tried. + Subset process loop ends when any representation is loaded or + all matching representations were already tried. + + :param repres_by_subset_id: Available representations mapped + by their parent (subset) id. + :type repres_by_subset_id: dict + :param subsets_by_id: Subset documents mapped by their id. + :type subsets_by_id: dict + :param profiles_per_subset_id: Build profiles mapped by subset id. + :type profiles_per_subset_id: dict + :param loaders_by_name: Available loaders per name. + :type loaders_by_name: dict + :return: Objects of loaded containers. + :rtype: list + """ + loaded_containers = [] + + # Get subset id order from build presets. + build_presets = self.build_presets.get("current_context", []) + build_presets += self.build_presets.get("linked_assets", []) + subset_ids_ordered = [] + for preset in build_presets: + for preset_family in preset["families"]: + for id, subset in subsets_by_id.items(): + if preset_family not in subset["data"].get("families", []): + continue + + subset_ids_ordered.append(id) + + # Order representations from subsets. + print("repres_by_subset_id", repres_by_subset_id) + representations_ordered = [] + representations = [] + for id in subset_ids_ordered: + for subset_id, repres in repres_by_subset_id.items(): + if repres in representations: + continue + + if id == subset_id: + representations_ordered.append((subset_id, repres)) + representations.append(repres) + + print("representations", representations) + + # Load ordered reprensentations. + for subset_id, repres in representations_ordered: + subset_name = subsets_by_id[subset_id]["name"] + + profile = profiles_per_subset_id[subset_id] + loaders_last_idx = len(profile["loaders"]) - 1 + repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 + + repre_by_low_name = { + repre["name"].lower(): repre for repre in repres + } + + is_loaded = False + for repre_name_idx, profile_repre_name in enumerate( + profile["repre_names_lowered"] + ): + # Break iteration if representation was already loaded + if is_loaded: + break + + repre = repre_by_low_name.get(profile_repre_name) + if not repre: + continue + + for loader_idx, loader_name in enumerate(profile["loaders"]): + if is_loaded: + break + + loader = loaders_by_name.get(loader_name) + if not loader: + continue + try: + container = avalon.api.load( + loader, + repre["_id"], + name=subset_name + ) + loaded_containers.append(container) + is_loaded = True + + except Exception as exc: + if exc == pipeline.IncompatibleLoaderError: + self.log.info(( + "Loader `{}` is not compatible with" + " representation `{}`" + ).format(loader_name, repre["name"])) + + else: + self.log.error( + "Unexpected error happened during loading", + exc_info=True + ) + + msg = "Loading failed." + if loader_idx < loaders_last_idx: + msg += " Trying next loader." + elif repre_name_idx < repre_names_last_idx: + msg += ( + " Loading of subset `{}` was not successful." + ).format(subset_name) + else: + msg += " Trying next representation." + self.log.info(msg) + + return loaded_containers + + def _collect_last_version_repres(self, asset_entities): + """Collect subsets, versions and representations for asset_entities. + + :param asset_entities: Asset entities for which want to find data + :type asset_entities: list + :return: collected entities + :rtype: dict + + Example output: + ``` + { + {Asset ID}: { + "asset_entity": , + "subsets": { + {Subset ID}: { + "subset_entity": , + "version": { + "version_entity": , + "repres": [ + , , ... + ] + } + }, + ... + } + }, + ... + } + output[asset_id]["subsets"][subset_id]["version"]["repres"] + ``` + """ + + if not asset_entities: + return {} + + asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} + + subsets = list(io.find({ + "type": "subset", + "parent": {"$in": asset_entity_by_ids.keys()} + })) + subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} + + sorted_versions = list(io.find({ + "type": "version", + "parent": {"$in": subset_entity_by_ids.keys()} + }).sort("name", -1)) + + subset_id_with_latest_version = [] + last_versions_by_id = {} + for version in sorted_versions: + subset_id = version["parent"] + if subset_id in subset_id_with_latest_version: + continue + subset_id_with_latest_version.append(subset_id) + last_versions_by_id[version["_id"]] = version + + repres = io.find({ + "type": "representation", + "parent": {"$in": last_versions_by_id.keys()} + }) + + output = {} + for repre in repres: + version_id = repre["parent"] + version = last_versions_by_id[version_id] + + subset_id = version["parent"] + subset = subset_entity_by_ids[subset_id] + + asset_id = subset["parent"] + asset = asset_entity_by_ids[asset_id] + + if asset_id not in output: + output[asset_id] = { + "asset_entity": asset, + "subsets": {} + } + + if subset_id not in output[asset_id]["subsets"]: + output[asset_id]["subsets"][subset_id] = { + "subset_entity": subset, + "version": { + "version_entity": version, + "repres": [] + } + } + + output[asset_id]["subsets"][subset_id]["version"]["repres"].append( + repre + ) + + return output diff --git a/pype/lib/lib_old.py b/pype/lib/lib_old.py index c559324a5e..58c9abd71f 100644 --- a/pype/lib/lib_old.py +++ b/pype/lib/lib_old.py @@ -327,653 +327,6 @@ def get_last_version_from_path(path_dir, filter): return None -class BuildWorkfile: - """Wrapper for build workfile process. - - Load representations for current context by build presets. Build presets - are host related, since each host has it's loaders. - """ - - log = logging.getLogger("BuildWorkfile") - - @staticmethod - def map_subsets_by_family(subsets): - subsets_by_family = collections.defaultdict(list) - for subset in subsets: - family = subset["data"].get("family") - if not family: - families = subset["data"].get("families") - if not families: - continue - family = families[0] - - subsets_by_family[family].append(subset) - return subsets_by_family - - def process(self): - """Main method of this wrapper. - - Building of workfile is triggered and is possible to implement - post processing of loaded containers if necessary. - """ - containers = self.build_workfile() - - return containers - - def build_workfile(self): - """Prepares and load containers into workfile. - - Loads latest versions of current and linked assets to workfile by logic - stored in Workfile profiles from presets. Profiles are set by host, - filtered by current task name and used by families. - - Each family can specify representation names and loaders for - representations and first available and successful loaded - representation is returned as container. - - At the end you'll get list of loaded containers per each asset. - - loaded_containers [{ - "asset_entity": , - "containers": [, , ...] - }, { - "asset_entity": , - "containers": [, ...] - }, { - ... - }] - """ - # Get current asset name and entity - current_asset_name = io.Session["AVALON_ASSET"] - current_asset_entity = io.find_one({ - "type": "asset", - "name": current_asset_name - }) - - # Skip if asset was not found - if not current_asset_entity: - print("Asset entity with name `{}` was not found".format( - current_asset_name - )) - return - - # Prepare available loaders - loaders_by_name = {} - for loader in avalon.api.discover(avalon.api.Loader): - loader_name = loader.__name__ - if loader_name in loaders_by_name: - raise KeyError( - "Duplicated loader name {0}!".format(loader_name) - ) - loaders_by_name[loader_name] = loader - - # Skip if there are any loaders - if not loaders_by_name: - self.log.warning("There are no registered loaders.") - return - - # Get current task name - current_task_name = io.Session["AVALON_TASK"] - - # Load workfile presets for task - self.build_presets = self.get_build_presets(current_task_name) - - # Skip if there are any presets for task - if not self.build_presets: - self.log.warning( - "Current task `{}` does not have any loading preset.".format( - current_task_name - ) - ) - return - - # Get presets for loading current asset - current_context_profiles = self.build_presets.get("current_context") - # Get presets for loading linked assets - link_context_profiles = self.build_presets.get("linked_assets") - # Skip if both are missing - if not current_context_profiles and not link_context_profiles: - self.log.warning( - "Current task `{}` has empty loading preset.".format( - current_task_name - ) - ) - return - - elif not current_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any loading" - " preset for it's context." - ).format(current_task_name)) - - elif not link_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any" - "loading preset for it's linked assets." - ).format(current_task_name)) - - # Prepare assets to process by workfile presets - assets = [] - current_asset_id = None - if current_context_profiles: - # Add current asset entity if preset has current context set - assets.append(current_asset_entity) - current_asset_id = current_asset_entity["_id"] - - if link_context_profiles: - # Find and append linked assets if preset has set linked mapping - link_assets = get_linked_assets(current_asset_entity) - if link_assets: - assets.extend(link_assets) - - # Skip if there are no assets. This can happen if only linked mapping - # is set and there are no links for his asset. - if not assets: - self.log.warning( - "Asset does not have linked assets. Nothing to process." - ) - return - - # Prepare entities from database for assets - prepared_entities = self._collect_last_version_repres(assets) - - # Load containers by prepared entities and presets - loaded_containers = [] - # - Current asset containers - if current_asset_id and current_asset_id in prepared_entities: - current_context_data = prepared_entities.pop(current_asset_id) - loaded_data = self.load_containers_by_asset_data( - current_context_data, current_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # - Linked assets container - for linked_asset_data in prepared_entities.values(): - loaded_data = self.load_containers_by_asset_data( - linked_asset_data, link_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # Return list of loaded containers - return loaded_containers - - def get_build_presets(self, task_name): - """ Returns presets to build workfile for task name. - - Presets are loaded for current project set in - io.Session["AVALON_PROJECT"], filtered by registered host - and entered task name. - - :param task_name: Task name used for filtering build presets. - :type task_name: str - :return: preset per eneter task - :rtype: dict | None - """ - host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1] - presets = config.get_presets(io.Session["AVALON_PROJECT"]) - # Get presets for host - build_presets = ( - presets["plugins"] - .get(host_name, {}) - .get("workfile_build") - ) - if not build_presets: - return - - task_name_low = task_name.lower() - per_task_preset = None - for preset in build_presets: - preset_tasks = preset.get("tasks") or [] - preset_tasks_low = [task.lower() for task in preset_tasks] - if task_name_low in preset_tasks_low: - per_task_preset = preset - break - - return per_task_preset - - def _filter_build_profiles(self, build_profiles, loaders_by_name): - """ Filter build profiles by loaders and prepare process data. - - Valid profile must have "loaders", "families" and "repre_names" keys - with valid values. - - "loaders" expects list of strings representing possible loaders. - - "families" expects list of strings for filtering - by main subset family. - - "repre_names" expects list of strings for filtering by - representation name. - - Lowered "families" and "repre_names" are prepared for each profile with - all required keys. - - :param build_profiles: Profiles for building workfile. - :type build_profiles: dict - :param loaders_by_name: Available loaders per name. - :type loaders_by_name: dict - :return: Filtered and prepared profiles. - :rtype: list - """ - valid_profiles = [] - for profile in build_profiles: - # Check loaders - profile_loaders = profile.get("loaders") - if not profile_loaders: - self.log.warning(( - "Build profile has missing loaders configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check if any loader is available - loaders_match = False - for loader_name in profile_loaders: - if loader_name in loaders_by_name: - loaders_match = True - break - - if not loaders_match: - self.log.warning(( - "All loaders from Build profile are not available: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check families - profile_families = profile.get("families") - if not profile_families: - self.log.warning(( - "Build profile is missing families configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check representation names - profile_repre_names = profile.get("repre_names") - if not profile_repre_names: - self.log.warning(( - "Build profile is missing" - " representation names filtering: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Prepare lowered families and representation names - profile["families_lowered"] = [ - fam.lower() for fam in profile_families - ] - profile["repre_names_lowered"] = [ - name.lower() for name in profile_repre_names - ] - - valid_profiles.append(profile) - - return valid_profiles - - def _prepare_profile_for_subsets(self, subsets, profiles): - """Select profile for each subset byt it's data. - - Profiles are filtered for each subset individually. - Profile is filtered by subset's family, optionally by name regex and - representation names set in profile. - It is possible to not find matching profile for subset, in that case - subset is skipped and it is possible that none of subsets have - matching profile. - - :param subsets: Subset documents. - :type subsets: list - :param profiles: Build profiles. - :type profiles: dict - :return: Profile by subset's id. - :rtype: dict - """ - # Prepare subsets - subsets_by_family = self.map_subsets_by_family(subsets) - - profiles_per_subset_id = {} - for family, subsets in subsets_by_family.items(): - family_low = family.lower() - for profile in profiles: - # Skip profile if does not contain family - if family_low not in profile["families_lowered"]: - continue - - # Precompile name filters as regexes - profile_regexes = profile.get("subset_name_filters") - if profile_regexes: - _profile_regexes = [] - for regex in profile_regexes: - _profile_regexes.append(re.compile(regex)) - profile_regexes = _profile_regexes - - # TODO prepare regex compilation - for subset in subsets: - # Verify regex filtering (optional) - if profile_regexes: - valid = False - for pattern in profile_regexes: - if re.match(pattern, subset["name"]): - valid = True - break - - if not valid: - continue - - profiles_per_subset_id[subset["_id"]] = profile - - # break profiles loop on finding the first matching profile - break - return profiles_per_subset_id - - def load_containers_by_asset_data( - self, asset_entity_data, build_profiles, loaders_by_name - ): - """Load containers for entered asset entity by Build profiles. - - :param asset_entity_data: Prepared data with subsets, last version - and representations for specific asset. - :type asset_entity_data: dict - :param build_profiles: Build profiles. - :type build_profiles: dict - :param loaders_by_name: Available loaders per name. - :type loaders_by_name: dict - :return: Output contains asset document and loaded containers. - :rtype: dict - """ - - # Make sure all data are not empty - if not asset_entity_data or not build_profiles or not loaders_by_name: - return - - asset_entity = asset_entity_data["asset_entity"] - - valid_profiles = self._filter_build_profiles( - build_profiles, loaders_by_name - ) - if not valid_profiles: - self.log.warning( - "There are not valid Workfile profiles. Skipping process." - ) - return - - self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) - - subsets_by_id = {} - version_by_subset_id = {} - repres_by_version_id = {} - for subset_id, in_data in asset_entity_data["subsets"].items(): - subset_entity = in_data["subset_entity"] - subsets_by_id[subset_entity["_id"]] = subset_entity - - version_data = in_data["version"] - version_entity = version_data["version_entity"] - version_by_subset_id[subset_id] = version_entity - repres_by_version_id[version_entity["_id"]] = ( - version_data["repres"] - ) - - if not subsets_by_id: - self.log.warning("There are not subsets for asset {0}".format( - asset_entity["name"] - )) - return - - profiles_per_subset_id = self._prepare_profile_for_subsets( - subsets_by_id.values(), valid_profiles - ) - if not profiles_per_subset_id: - self.log.warning("There are not valid subsets.") - return - - valid_repres_by_subset_id = collections.defaultdict(list) - for subset_id, profile in profiles_per_subset_id.items(): - profile_repre_names = profile["repre_names_lowered"] - - version_entity = version_by_subset_id[subset_id] - version_id = version_entity["_id"] - repres = repres_by_version_id[version_id] - for repre in repres: - repre_name_low = repre["name"].lower() - if repre_name_low in profile_repre_names: - valid_repres_by_subset_id[subset_id].append(repre) - - # DEBUG message - msg = "Valid representations for Asset: `{}`".format( - asset_entity["name"] - ) - for subset_id, repres in valid_repres_by_subset_id.items(): - subset = subsets_by_id[subset_id] - msg += "\n# Subset Name/ID: `{}`/{}".format( - subset["name"], subset_id - ) - for repre in repres: - msg += "\n## Repre name: `{}`".format(repre["name"]) - - self.log.debug(msg) - - containers = self._load_containers( - valid_repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ) - - return { - "asset_entity": asset_entity, - "containers": containers - } - - def _load_containers( - self, repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ): - """Real load by collected data happens here. - - Loading of representations per subset happens here. Each subset can - loads one representation. Loading is tried in specific order. - Representations are tried to load by names defined in configuration. - If subset has representation matching representation name each loader - is tried to load it until any is successful. If none of them was - successful then next reprensentation name is tried. - Subset process loop ends when any representation is loaded or - all matching representations were already tried. - - :param repres_by_subset_id: Available representations mapped - by their parent (subset) id. - :type repres_by_subset_id: dict - :param subsets_by_id: Subset documents mapped by their id. - :type subsets_by_id: dict - :param profiles_per_subset_id: Build profiles mapped by subset id. - :type profiles_per_subset_id: dict - :param loaders_by_name: Available loaders per name. - :type loaders_by_name: dict - :return: Objects of loaded containers. - :rtype: list - """ - loaded_containers = [] - - # Get subset id order from build presets. - build_presets = self.build_presets.get("current_context", []) - build_presets += self.build_presets.get("linked_assets", []) - subset_ids_ordered = [] - for preset in build_presets: - for preset_family in preset["families"]: - for id, subset in subsets_by_id.items(): - if preset_family not in subset["data"].get("families", []): - continue - - subset_ids_ordered.append(id) - - # Order representations from subsets. - print("repres_by_subset_id", repres_by_subset_id) - representations_ordered = [] - representations = [] - for id in subset_ids_ordered: - for subset_id, repres in repres_by_subset_id.items(): - if repres in representations: - continue - - if id == subset_id: - representations_ordered.append((subset_id, repres)) - representations.append(repres) - - print("representations", representations) - - # Load ordered reprensentations. - for subset_id, repres in representations_ordered: - subset_name = subsets_by_id[subset_id]["name"] - - profile = profiles_per_subset_id[subset_id] - loaders_last_idx = len(profile["loaders"]) - 1 - repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 - - repre_by_low_name = { - repre["name"].lower(): repre for repre in repres - } - - is_loaded = False - for repre_name_idx, profile_repre_name in enumerate( - profile["repre_names_lowered"] - ): - # Break iteration if representation was already loaded - if is_loaded: - break - - repre = repre_by_low_name.get(profile_repre_name) - if not repre: - continue - - for loader_idx, loader_name in enumerate(profile["loaders"]): - if is_loaded: - break - - loader = loaders_by_name.get(loader_name) - if not loader: - continue - try: - container = avalon.api.load( - loader, - repre["_id"], - name=subset_name - ) - loaded_containers.append(container) - is_loaded = True - - except Exception as exc: - if exc == pipeline.IncompatibleLoaderError: - self.log.info(( - "Loader `{}` is not compatible with" - " representation `{}`" - ).format(loader_name, repre["name"])) - - else: - self.log.error( - "Unexpected error happened during loading", - exc_info=True - ) - - msg = "Loading failed." - if loader_idx < loaders_last_idx: - msg += " Trying next loader." - elif repre_name_idx < repre_names_last_idx: - msg += ( - " Loading of subset `{}` was not successful." - ).format(subset_name) - else: - msg += " Trying next representation." - self.log.info(msg) - - return loaded_containers - - def _collect_last_version_repres(self, asset_entities): - """Collect subsets, versions and representations for asset_entities. - - :param asset_entities: Asset entities for which want to find data - :type asset_entities: list - :return: collected entities - :rtype: dict - - Example output: - ``` - { - {Asset ID}: { - "asset_entity": , - "subsets": { - {Subset ID}: { - "subset_entity": , - "version": { - "version_entity": , - "repres": [ - , , ... - ] - } - }, - ... - } - }, - ... - } - output[asset_id]["subsets"][subset_id]["version"]["repres"] - ``` - """ - - if not asset_entities: - return {} - - asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} - - subsets = list(io.find({ - "type": "subset", - "parent": {"$in": asset_entity_by_ids.keys()} - })) - subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - - sorted_versions = list(io.find({ - "type": "version", - "parent": {"$in": subset_entity_by_ids.keys()} - }).sort("name", -1)) - - subset_id_with_latest_version = [] - last_versions_by_id = {} - for version in sorted_versions: - subset_id = version["parent"] - if subset_id in subset_id_with_latest_version: - continue - subset_id_with_latest_version.append(subset_id) - last_versions_by_id[version["_id"]] = version - - repres = io.find({ - "type": "representation", - "parent": {"$in": last_versions_by_id.keys()} - }) - - output = {} - for repre in repres: - version_id = repre["parent"] - version = last_versions_by_id[version_id] - - subset_id = version["parent"] - subset = subset_entity_by_ids[subset_id] - - asset_id = subset["parent"] - asset = asset_entity_by_ids[asset_id] - - if asset_id not in output: - output[asset_id] = { - "asset_entity": asset, - "subsets": {} - } - - if subset_id not in output[asset_id]["subsets"]: - output[asset_id]["subsets"][subset_id] = { - "subset_entity": subset, - "version": { - "version_entity": version, - "repres": [] - } - } - - output[asset_id]["subsets"][subset_id]["version"]["repres"].append( - repre - ) - - return output - - def ffprobe_streams(path_to_file, logger=None): """Load streams from entered filepath via ffprobe.""" if not logger: From ab621279276512463d8790b203571743fa2c1b9e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 11:57:08 +0100 Subject: [PATCH 08/12] ffprobe_streams moved to ffmpeg_utils --- pype/lib/__init__.py | 5 ++++- pype/lib/ffmpeg_utils.py | 40 ++++++++++++++++++++++++++++++++++++++++ pype/lib/lib_old.py | 33 --------------------------------- 3 files changed, 44 insertions(+), 34 deletions(-) create mode 100644 pype/lib/ffmpeg_utils.py diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index f807fe894a..25589fa84f 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -43,6 +43,7 @@ from .lib_old import ( ffprobe_streams, source_hash, ) +from .ffmpeg_utils import ffprobe_streams __all__ = [ "get_avalon_database", @@ -63,5 +64,7 @@ __all__ = [ "launch_application", "ApplicationAction", - "filter_pyblish_plugins" + "filter_pyblish_plugins", + + "ffprobe_streams" ] diff --git a/pype/lib/ffmpeg_utils.py b/pype/lib/ffmpeg_utils.py new file mode 100644 index 0000000000..1c656d55d3 --- /dev/null +++ b/pype/lib/ffmpeg_utils.py @@ -0,0 +1,40 @@ +import logging +import json +import subprocess + +from . import get_ffmpeg_tool_path + +log = logging.getLogger("FFmpeg utils") + + +def ffprobe_streams(path_to_file, logger=None): + """Load streams from entered filepath via ffprobe.""" + if not logger: + logger = log + logger.info( + "Getting information about input \"{}\".".format(path_to_file) + ) + args = [ + "\"{}\"".format(get_ffmpeg_tool_path("ffprobe")), + "-v quiet", + "-print_format json", + "-show_format", + "-show_streams", + "\"{}\"".format(path_to_file) + ] + command = " ".join(args) + logger.debug("FFprobe command: \"{}\"".format(command)) + popen = subprocess.Popen( + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + popen_stdout, popen_stderr = popen.communicate() + if popen_stdout: + logger.debug("ffprobe stdout: {}".format(popen_stdout)) + + if popen_stderr: + logger.debug("ffprobe stderr: {}".format(popen_stderr)) + return json.loads(popen_stdout)["streams"] diff --git a/pype/lib/lib_old.py b/pype/lib/lib_old.py index 58c9abd71f..37cd6d8f93 100644 --- a/pype/lib/lib_old.py +++ b/pype/lib/lib_old.py @@ -327,39 +327,6 @@ def get_last_version_from_path(path_dir, filter): return None -def ffprobe_streams(path_to_file, logger=None): - """Load streams from entered filepath via ffprobe.""" - if not logger: - logger = log - logger.info( - "Getting information about input \"{}\".".format(path_to_file) - ) - args = [ - "\"{}\"".format(get_ffmpeg_tool_path("ffprobe")), - "-v quiet", - "-print_format json", - "-show_format", - "-show_streams", - "\"{}\"".format(path_to_file) - ] - command = " ".join(args) - logger.debug("FFprobe command: \"{}\"".format(command)) - popen = subprocess.Popen( - command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - - popen_stdout, popen_stderr = popen.communicate() - if popen_stdout: - logger.debug("ffprobe stdout: {}".format(popen_stdout)) - - if popen_stderr: - logger.debug("ffprobe stderr: {}".format(popen_stderr)) - return json.loads(popen_stdout)["streams"] - - def source_hash(filepath, *args): """Generate simple identifier for a source file. This is used to identify whether a source file has previously been From e153fbef3686e8778d1303a91fd3a24c23a667ae Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 11:58:34 +0100 Subject: [PATCH 09/12] cleaned up imports --- pype/lib/lib_old.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pype/lib/lib_old.py b/pype/lib/lib_old.py index 37cd6d8f93..89b7f42d38 100644 --- a/pype/lib/lib_old.py +++ b/pype/lib/lib_old.py @@ -1,15 +1,11 @@ import os import re -import json -import collections import logging import itertools import contextlib import subprocess -from avalon import io, pipeline import avalon.api -from ..api import config log = logging.getLogger(__name__) From dafa84e2a49ba5f0c3d45e3d9f8d9c5694a28118 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 12:16:17 +0100 Subject: [PATCH 10/12] removed `get_subsets` from pype.api --- pype/api.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pype/api.py b/pype/api.py index 2c7dfa73f0..29e91dc8e0 100644 --- a/pype/api.py +++ b/pype/api.py @@ -40,7 +40,6 @@ from .lib import ( version_up, get_asset, get_hierarchy, - get_subsets, get_version_from_path, get_last_version_from_path, modified_environ, @@ -89,7 +88,6 @@ __all__ = [ "version_up", "get_hierarchy", "get_asset", - "get_subsets", "get_version_from_path", "get_last_version_from_path", "modified_environ", From 79cf8783634972ed8817baf9b40f364fd86fcbd0 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 13:20:56 +0100 Subject: [PATCH 11/12] removed imports from old lib --- pype/lib/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 25589fa84f..b5a819653b 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -39,8 +39,6 @@ from .lib_old import ( _get_host_name, get_version_from_path, get_last_version_from_path, - BuildWorkfile, - ffprobe_streams, source_hash, ) from .ffmpeg_utils import ffprobe_streams From f591d76367db917448de46e2676e38c09b81b421 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 10 Nov 2020 13:21:17 +0100 Subject: [PATCH 12/12] hound fixes --- pype/plugins/celaction/publish/collect_audio.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pype/plugins/celaction/publish/collect_audio.py b/pype/plugins/celaction/publish/collect_audio.py index c92e4fd868..341db4250e 100644 --- a/pype/plugins/celaction/publish/collect_audio.py +++ b/pype/plugins/celaction/publish/collect_audio.py @@ -45,9 +45,9 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): def get_subsets( self, asset_name, + representations, regex_filter=None, - version=None, - representations=["exr", "dpx"] + version=None ): """ Query subsets with filter on name. @@ -99,7 +99,9 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): sort=[("name", -1)] ) else: - assert isinstance(version, int), "version needs to be `int` type" + assert isinstance(version, int), ( + "version needs to be `int` type" + ) version_sel = io.find_one({ "type": "version", "parent": subset["_id"],