mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 00:44:52 +01:00
Merge branch 'develop' into feature/PYPE-654-nks-cut-reference-videos
This commit is contained in:
commit
788409714a
221 changed files with 10209 additions and 6236 deletions
2
.flake8
2
.flake8
|
|
@ -1,5 +1,7 @@
|
|||
[flake8]
|
||||
# ignore = D203
|
||||
ignore = BLK100
|
||||
max-line-length = 79
|
||||
exclude =
|
||||
.git,
|
||||
__pycache__,
|
||||
|
|
|
|||
4
.hound.yml
Normal file
4
.hound.yml
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
flake8:
|
||||
enabled: true
|
||||
config_file: .flake8
|
||||
|
||||
2
LICENSE
2
LICENSE
|
|
@ -1,6 +1,6 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018 orbi tools s.r.o
|
||||
Copyright (c) 2020 Orbi Tools s.r.o.
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
|
|
|
|||
30
README.md
30
README.md
|
|
@ -1,31 +1,11 @@
|
|||
Pype
|
||||
====
|
||||
|
||||
The base studio _config_ for [Avalon](https://getavalon.github.io/)
|
||||
Welcome to PYPE _config_ for [Avalon](https://getavalon.github.io/)
|
||||
|
||||
Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. We're working on open sourcing all of the necessary code though. You can still get inspiration or take our individual validators and scripts which should work just fine in other pipelines.
|
||||
To get all the key information about the project, go to [PYPE.club](http://pype.club)
|
||||
|
||||
|
||||
Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. To install it you'll need to download [pype-setup](github.com/pypeclub/pype-setup), which is able to deploy everything for you if you follow the documentation.
|
||||
|
||||
_This configuration acts as a starting point for all pype club clients wth avalon deployment._
|
||||
|
||||
Code convention
|
||||
---------------
|
||||
|
||||
Below are some of the standard practices applied to this repositories.
|
||||
|
||||
- **Etiquette: PEP8**
|
||||
|
||||
All code is written in PEP8. It is recommended you use a linter as you work, flake8 and pylinter are both good options.
|
||||
- **Etiquette: Napoleon docstrings**
|
||||
|
||||
Any docstrings are made in Google Napoleon format. See [Napoleon](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) for details.
|
||||
|
||||
- **Etiquette: Semantic Versioning**
|
||||
|
||||
This project follows [semantic versioning](http://semver.org).
|
||||
- **Etiquette: Underscore means private**
|
||||
|
||||
Anything prefixed with an underscore means that it is internal to wherever it is used. For example, a variable name is only ever used in the parent function or class. A module is not for use by the end-user. In contrast, anything without an underscore is public, but not necessarily part of the API. Members of the API resides in `api.py`.
|
||||
|
||||
- **API: Idempotence**
|
||||
|
||||
A public function must be able to be called twice and produce the exact same result. This means no changing of state without restoring previous state when finishing. For example, if a function requires changing the current selection in Autodesk Maya, it must restore the previous selection prior to completing.
|
||||
|
|
|
|||
|
|
@ -9,8 +9,9 @@ from pypeapp import config
|
|||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__version__ = "2.3.0"
|
||||
__version__ = "2.6.0"
|
||||
|
||||
PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS")
|
||||
PACKAGE_DIR = os.path.dirname(__file__)
|
||||
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
|
||||
|
||||
|
|
@ -72,6 +73,18 @@ def install():
|
|||
pyblish.register_discovery_filter(filter_pyblish_plugins)
|
||||
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
|
||||
# Register project specific plugins
|
||||
project_name = os.environ.get("AVALON_PROJECT")
|
||||
if PROJECT_PLUGINS_PATH and project_name:
|
||||
for path in PROJECT_PLUGINS_PATH.split(os.pathsep):
|
||||
if not path:
|
||||
continue
|
||||
plugin_path = os.path.join(path, project_name, "plugins")
|
||||
if os.path.exists(plugin_path):
|
||||
pyblish.register_plugin_path(plugin_path)
|
||||
avalon.register_plugin_path(avalon.Loader, plugin_path)
|
||||
avalon.register_plugin_path(avalon.Creator, plugin_path)
|
||||
|
||||
# apply monkey patched discover to original one
|
||||
avalon.discover = patched_discover
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class CreateProjectFolders(BaseAction):
|
|||
#: Action description.
|
||||
description = 'Creates folder structure'
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ['Pypeclub', 'Administrator']
|
||||
role_list = ['Pypeclub', 'Administrator', 'Project Manager']
|
||||
icon = '{}/ftrack/action_icons/CreateProjectFolders.svg'.format(
|
||||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
)
|
||||
|
|
|
|||
|
|
@ -99,6 +99,7 @@ class DeleteAssetSubset(BaseAction):
|
|||
|
||||
# Filter event even more (skip task entities)
|
||||
# - task entities are not relevant for avalon
|
||||
entity_mapping = {}
|
||||
for entity in entities:
|
||||
ftrack_id = entity["id"]
|
||||
if ftrack_id not in ftrack_ids:
|
||||
|
|
@ -107,6 +108,8 @@ class DeleteAssetSubset(BaseAction):
|
|||
if entity.entity_type.lower() == "task":
|
||||
ftrack_ids.remove(ftrack_id)
|
||||
|
||||
entity_mapping[ftrack_id] = entity
|
||||
|
||||
if not ftrack_ids:
|
||||
# It is bug if this happens!
|
||||
return {
|
||||
|
|
@ -122,11 +125,41 @@ class DeleteAssetSubset(BaseAction):
|
|||
project_name = project["full_name"]
|
||||
self.dbcon.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
selected_av_entities = self.dbcon.find({
|
||||
selected_av_entities = list(self.dbcon.find({
|
||||
"type": "asset",
|
||||
"data.ftrackId": {"$in": ftrack_ids}
|
||||
})
|
||||
selected_av_entities = [ent for ent in selected_av_entities]
|
||||
}))
|
||||
found_without_ftrack_id = {}
|
||||
if len(selected_av_entities) != len(ftrack_ids):
|
||||
found_ftrack_ids = [
|
||||
ent["data"]["ftrackId"] for ent in selected_av_entities
|
||||
]
|
||||
for ftrack_id, entity in entity_mapping.items():
|
||||
if ftrack_id in found_ftrack_ids:
|
||||
continue
|
||||
|
||||
av_ents_by_name = list(self.dbcon.find({
|
||||
"type": "asset",
|
||||
"name": entity["name"]
|
||||
}))
|
||||
if not av_ents_by_name:
|
||||
continue
|
||||
|
||||
ent_path_items = [ent["name"] for ent in entity["link"]]
|
||||
parents = ent_path_items[1:len(ent_path_items)-1:]
|
||||
# TODO we should say to user that
|
||||
# few of them are missing in avalon
|
||||
for av_ent in av_ents_by_name:
|
||||
if av_ent["data"]["parents"] != parents:
|
||||
continue
|
||||
|
||||
# TODO we should say to user that found entity
|
||||
# with same name does not match same ftrack id?
|
||||
if "ftrackId" not in av_ent["data"]:
|
||||
selected_av_entities.append(av_ent)
|
||||
found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id
|
||||
break
|
||||
|
||||
if not selected_av_entities:
|
||||
return {
|
||||
"success": False,
|
||||
|
|
@ -155,7 +188,8 @@ class DeleteAssetSubset(BaseAction):
|
|||
"created_at": datetime.now(),
|
||||
"project_name": project_name,
|
||||
"subset_ids_by_name": {},
|
||||
"subset_ids_by_parent": {}
|
||||
"subset_ids_by_parent": {},
|
||||
"without_ftrack_id": found_without_ftrack_id
|
||||
}
|
||||
|
||||
id_item = {
|
||||
|
|
@ -413,14 +447,21 @@ class DeleteAssetSubset(BaseAction):
|
|||
asset_ids_to_archive = []
|
||||
ftrack_ids_to_delete = []
|
||||
if len(assets_to_delete) > 0:
|
||||
map_av_ftrack_id = spec_data["without_ftrack_id"]
|
||||
# Prepare data when deleting whole avalon asset
|
||||
avalon_assets = self.dbcon.find({"type": "asset"})
|
||||
avalon_assets_by_parent = collections.defaultdict(list)
|
||||
for asset in avalon_assets:
|
||||
asset_id = asset["_id"]
|
||||
parent_id = asset["data"]["visualParent"]
|
||||
avalon_assets_by_parent[parent_id].append(asset)
|
||||
if asset["_id"] in assets_to_delete:
|
||||
ftrack_id = asset["data"]["ftrackId"]
|
||||
if asset_id in assets_to_delete:
|
||||
ftrack_id = map_av_ftrack_id.get(str(asset_id))
|
||||
if not ftrack_id:
|
||||
ftrack_id = asset["data"].get("ftrackId")
|
||||
|
||||
if not ftrack_id:
|
||||
continue
|
||||
ftrack_ids_to_delete.append(ftrack_id)
|
||||
|
||||
children_queue = Queue()
|
||||
|
|
|
|||
534
pype/ftrack/actions/action_delete_old_versions.py
Normal file
534
pype/ftrack/actions/action_delete_old_versions.py
Normal file
|
|
@ -0,0 +1,534 @@
|
|||
import os
|
||||
import collections
|
||||
import uuid
|
||||
|
||||
import clique
|
||||
from pymongo import UpdateOne
|
||||
|
||||
from pype.ftrack import BaseAction
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
|
||||
import avalon.pipeline
|
||||
|
||||
|
||||
class DeleteOldVersions(BaseAction):
|
||||
|
||||
identifier = "delete.old.versions"
|
||||
label = "Pype Admin"
|
||||
variant = "- Delete old versions"
|
||||
description = (
|
||||
"Delete files from older publishes so project can be"
|
||||
" archived with only lates versions."
|
||||
)
|
||||
role_list = ["Pypeclub", "Project Manager", "Administrator"]
|
||||
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
|
||||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
)
|
||||
|
||||
dbcon = DbConnector()
|
||||
|
||||
inteface_title = "Choose your preferences"
|
||||
splitter_item = {"type": "label", "value": "---"}
|
||||
sequence_splitter = "__sequence_splitter__"
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
selection = event["data"].get("selection") or []
|
||||
for entity in selection:
|
||||
entity_type = (entity.get("entityType") or "").lower()
|
||||
if entity_type == "assetversion":
|
||||
return True
|
||||
return False
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
items = []
|
||||
root = os.environ.get("AVALON_PROJECTS")
|
||||
if not root:
|
||||
msg = "Root path to projects is not set."
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "<i><b>ERROR:</b> {}</i>".format(msg)
|
||||
})
|
||||
self.show_interface(
|
||||
items=items, title=self.inteface_title, event=event
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
if not os.path.exists(root):
|
||||
msg = "Root path does not exists \"{}\".".format(str(root))
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "<i><b>ERROR:</b> {}</i>".format(msg)
|
||||
})
|
||||
self.show_interface(
|
||||
items=items, title=self.inteface_title, event=event
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
values = event["data"].get("values")
|
||||
if values:
|
||||
versions_count = int(values["last_versions_count"])
|
||||
if versions_count >= 1:
|
||||
return
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"# You have to keep at least 1 version!"
|
||||
)
|
||||
})
|
||||
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"<i><b>WARNING:</b> This will remove published files of older"
|
||||
" versions from disk so we don't recommend use"
|
||||
" this action on \"live\" project.</i>"
|
||||
)
|
||||
})
|
||||
|
||||
items.append(self.splitter_item)
|
||||
|
||||
# How many versions to keep
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "## Choose how many versions you want to keep:"
|
||||
})
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"<i><b>NOTE:</b> We do recommend to keep 2 versions.</i>"
|
||||
)
|
||||
})
|
||||
items.append({
|
||||
"type": "number",
|
||||
"name": "last_versions_count",
|
||||
"label": "Versions",
|
||||
"value": 2
|
||||
})
|
||||
|
||||
items.append(self.splitter_item)
|
||||
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"## Remove publish folder even if there"
|
||||
" are other than published files:"
|
||||
)
|
||||
})
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"<i><b>WARNING:</b> This may remove more than you want.</i>"
|
||||
)
|
||||
})
|
||||
items.append({
|
||||
"type": "boolean",
|
||||
"name": "force_delete_publish_folder",
|
||||
"label": "Are You sure?",
|
||||
"value": False
|
||||
})
|
||||
|
||||
return {
|
||||
"items": items,
|
||||
"title": self.inteface_title
|
||||
}
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
values = event["data"].get("values")
|
||||
if not values:
|
||||
return
|
||||
|
||||
versions_count = int(values["last_versions_count"])
|
||||
force_to_remove = values["force_delete_publish_folder"]
|
||||
|
||||
_val1 = "OFF"
|
||||
if force_to_remove:
|
||||
_val1 = "ON"
|
||||
|
||||
_val3 = "s"
|
||||
if versions_count == 1:
|
||||
_val3 = ""
|
||||
|
||||
self.log.debug((
|
||||
"Process started. Force to delete publish folder is set to [{0}]"
|
||||
" and will keep {1} latest version{2}."
|
||||
).format(_val1, versions_count, _val3))
|
||||
|
||||
self.dbcon.install()
|
||||
|
||||
project = None
|
||||
avalon_asset_names = []
|
||||
asset_versions_by_parent_id = collections.defaultdict(list)
|
||||
subset_names_by_asset_name = collections.defaultdict(list)
|
||||
|
||||
ftrack_assets_by_name = {}
|
||||
for entity in entities:
|
||||
ftrack_asset = entity["asset"]
|
||||
|
||||
parent_ent = ftrack_asset["parent"]
|
||||
parent_ftrack_id = parent_ent["id"]
|
||||
parent_name = parent_ent["name"]
|
||||
|
||||
if parent_name not in avalon_asset_names:
|
||||
avalon_asset_names.append(parent_name)
|
||||
|
||||
# Group asset versions by parent entity
|
||||
asset_versions_by_parent_id[parent_ftrack_id].append(entity)
|
||||
|
||||
# Get project
|
||||
if project is None:
|
||||
project = parent_ent["project"]
|
||||
|
||||
# Collect subset names per asset
|
||||
subset_name = ftrack_asset["name"]
|
||||
subset_names_by_asset_name[parent_name].append(subset_name)
|
||||
|
||||
if subset_name not in ftrack_assets_by_name:
|
||||
ftrack_assets_by_name[subset_name] = ftrack_asset
|
||||
|
||||
# Set Mongo collection
|
||||
project_name = project["full_name"]
|
||||
self.dbcon.Session["AVALON_PROJECT"] = project_name
|
||||
self.log.debug("Project is set to {}".format(project_name))
|
||||
|
||||
# Get Assets from avalon database
|
||||
assets = list(self.dbcon.find({
|
||||
"type": "asset",
|
||||
"name": {"$in": avalon_asset_names}
|
||||
}))
|
||||
asset_id_to_name_map = {
|
||||
asset["_id"]: asset["name"] for asset in assets
|
||||
}
|
||||
asset_ids = list(asset_id_to_name_map.keys())
|
||||
|
||||
self.log.debug("Collected assets ({})".format(len(asset_ids)))
|
||||
|
||||
# Get Subsets
|
||||
subsets = list(self.dbcon.find({
|
||||
"type": "subset",
|
||||
"parent": {"$in": asset_ids}
|
||||
}))
|
||||
subsets_by_id = {}
|
||||
subset_ids = []
|
||||
for subset in subsets:
|
||||
asset_id = subset["parent"]
|
||||
asset_name = asset_id_to_name_map[asset_id]
|
||||
available_subsets = subset_names_by_asset_name[asset_name]
|
||||
|
||||
if subset["name"] not in available_subsets:
|
||||
continue
|
||||
|
||||
subset_ids.append(subset["_id"])
|
||||
subsets_by_id[subset["_id"]] = subset
|
||||
|
||||
self.log.debug("Collected subsets ({})".format(len(subset_ids)))
|
||||
|
||||
# Get Versions
|
||||
versions = list(self.dbcon.find({
|
||||
"type": "version",
|
||||
"parent": {"$in": subset_ids}
|
||||
}))
|
||||
|
||||
versions_by_parent = collections.defaultdict(list)
|
||||
for ent in versions:
|
||||
versions_by_parent[ent["parent"]].append(ent)
|
||||
|
||||
def sort_func(ent):
|
||||
return int(ent["name"])
|
||||
|
||||
all_last_versions = []
|
||||
for parent_id, _versions in versions_by_parent.items():
|
||||
for idx, version in enumerate(
|
||||
sorted(_versions, key=sort_func, reverse=True)
|
||||
):
|
||||
if idx >= versions_count:
|
||||
break
|
||||
all_last_versions.append(version)
|
||||
|
||||
self.log.debug("Collected versions ({})".format(len(versions)))
|
||||
|
||||
# Filter latest versions
|
||||
for version in all_last_versions:
|
||||
versions.remove(version)
|
||||
|
||||
# Update versions_by_parent without filtered versions
|
||||
versions_by_parent = collections.defaultdict(list)
|
||||
for ent in versions:
|
||||
versions_by_parent[ent["parent"]].append(ent)
|
||||
|
||||
# Filter already deleted versions
|
||||
versions_to_pop = []
|
||||
for version in versions:
|
||||
version_tags = version["data"].get("tags")
|
||||
if version_tags and "deleted" in version_tags:
|
||||
versions_to_pop.append(version)
|
||||
|
||||
for version in versions_to_pop:
|
||||
subset = subsets_by_id[version["parent"]]
|
||||
asset_id = subset["parent"]
|
||||
asset_name = asset_id_to_name_map[asset_id]
|
||||
msg = "Asset: \"{}\" | Subset: \"{}\" | Version: \"{}\"".format(
|
||||
asset_name, subset["name"], version["name"]
|
||||
)
|
||||
self.log.warning((
|
||||
"Skipping version. Already tagged as `deleted`. < {} >"
|
||||
).format(msg))
|
||||
versions.remove(version)
|
||||
|
||||
version_ids = [ent["_id"] for ent in versions]
|
||||
|
||||
self.log.debug(
|
||||
"Filtered versions to delete ({})".format(len(version_ids))
|
||||
)
|
||||
|
||||
if not version_ids:
|
||||
msg = "Skipping processing. Nothing to delete."
|
||||
self.log.debug(msg)
|
||||
return {
|
||||
"success": True,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
repres = list(self.dbcon.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": version_ids}
|
||||
}))
|
||||
|
||||
self.log.debug(
|
||||
"Collected representations to remove ({})".format(len(repres))
|
||||
)
|
||||
|
||||
dir_paths = {}
|
||||
file_paths_by_dir = collections.defaultdict(list)
|
||||
for repre in repres:
|
||||
file_path, seq_path = self.path_from_represenation(repre)
|
||||
if file_path is None:
|
||||
self.log.warning((
|
||||
"Could not format path for represenation \"{}\""
|
||||
).format(str(repre)))
|
||||
continue
|
||||
|
||||
dir_path = os.path.dirname(file_path)
|
||||
dir_id = None
|
||||
for _dir_id, _dir_path in dir_paths.items():
|
||||
if _dir_path == dir_path:
|
||||
dir_id = _dir_id
|
||||
break
|
||||
|
||||
if dir_id is None:
|
||||
dir_id = uuid.uuid4()
|
||||
dir_paths[dir_id] = dir_path
|
||||
|
||||
file_paths_by_dir[dir_id].append([file_path, seq_path])
|
||||
|
||||
dir_ids_to_pop = []
|
||||
for dir_id, dir_path in dir_paths.items():
|
||||
if os.path.exists(dir_path):
|
||||
continue
|
||||
|
||||
dir_ids_to_pop.append(dir_id)
|
||||
|
||||
# Pop dirs from both dictionaries
|
||||
for dir_id in dir_ids_to_pop:
|
||||
dir_paths.pop(dir_id)
|
||||
paths = file_paths_by_dir.pop(dir_id)
|
||||
# TODO report of missing directories?
|
||||
paths_msg = ", ".join([
|
||||
"'{}'".format(path[0].replace("\\", "/")) for path in paths
|
||||
])
|
||||
self.log.warning((
|
||||
"Folder does not exist. Deleting it's files skipped: {}"
|
||||
).format(paths_msg))
|
||||
|
||||
if force_to_remove:
|
||||
self.delete_whole_dir_paths(dir_paths.values())
|
||||
else:
|
||||
self.delete_only_repre_files(dir_paths, file_paths_by_dir)
|
||||
|
||||
mongo_changes_bulk = []
|
||||
for version in versions:
|
||||
orig_version_tags = version["data"].get("tags") or []
|
||||
version_tags = [tag for tag in orig_version_tags]
|
||||
if "deleted" not in version_tags:
|
||||
version_tags.append("deleted")
|
||||
|
||||
if version_tags == orig_version_tags:
|
||||
continue
|
||||
|
||||
update_query = {"_id": version["_id"]}
|
||||
update_data = {"$set": {"data.tags": version_tags}}
|
||||
mongo_changes_bulk.append(UpdateOne(update_query, update_data))
|
||||
|
||||
if mongo_changes_bulk:
|
||||
self.dbcon.bulk_write(mongo_changes_bulk)
|
||||
|
||||
self.dbcon.uninstall()
|
||||
|
||||
# Set attribute `is_published` to `False` on ftrack AssetVersions
|
||||
for subset_id, _versions in versions_by_parent.items():
|
||||
subset_name = None
|
||||
for subset in subsets:
|
||||
if subset["_id"] == subset_id:
|
||||
subset_name = subset["name"]
|
||||
break
|
||||
|
||||
if subset_name is None:
|
||||
self.log.warning(
|
||||
"Subset with ID `{}` was not found.".format(str(subset_id))
|
||||
)
|
||||
continue
|
||||
|
||||
ftrack_asset = ftrack_assets_by_name.get(subset_name)
|
||||
if not ftrack_asset:
|
||||
self.log.warning((
|
||||
"Could not find Ftrack asset with name `{}`"
|
||||
).format(subset_name))
|
||||
continue
|
||||
|
||||
version_numbers = [int(ver["name"]) for ver in _versions]
|
||||
for version in ftrack_asset["versions"]:
|
||||
if int(version["version"]) in version_numbers:
|
||||
version["is_published"] = False
|
||||
|
||||
try:
|
||||
session.commit()
|
||||
|
||||
except Exception:
|
||||
msg = (
|
||||
"Could not set `is_published` attribute to `False`"
|
||||
" for selected AssetVersions."
|
||||
)
|
||||
self.log.warning(msg, exc_info=True)
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
return True
|
||||
|
||||
def delete_whole_dir_paths(self, dir_paths):
|
||||
for dir_path in dir_paths:
|
||||
# Delete all files and fodlers in dir path
|
||||
for root, dirs, files in os.walk(dir_path, topdown=False):
|
||||
for name in files:
|
||||
os.remove(os.path.join(root, name))
|
||||
|
||||
for name in dirs:
|
||||
os.rmdir(os.path.join(root, name))
|
||||
|
||||
# Delete even the folder and it's parents folders if they are empty
|
||||
while True:
|
||||
if not os.path.exists(dir_path):
|
||||
dir_path = os.path.dirname(dir_path)
|
||||
continue
|
||||
|
||||
if len(os.listdir(dir_path)) != 0:
|
||||
break
|
||||
|
||||
os.rmdir(os.path.join(dir_path))
|
||||
|
||||
def delete_only_repre_files(self, dir_paths, file_paths):
|
||||
for dir_id, dir_path in dir_paths.items():
|
||||
dir_files = os.listdir(dir_path)
|
||||
collections, remainders = clique.assemble(dir_files)
|
||||
for file_path, seq_path in file_paths[dir_id]:
|
||||
file_path_base = os.path.split(file_path)[1]
|
||||
# Just remove file if `frame` key was not in context or
|
||||
# filled path is in remainders (single file sequence)
|
||||
if not seq_path or file_path_base in remainders:
|
||||
if not os.path.exists(file_path):
|
||||
self.log.warning(
|
||||
"File was not found: {}".format(file_path)
|
||||
)
|
||||
continue
|
||||
os.remove(file_path)
|
||||
self.log.debug("Removed file: {}".format(file_path))
|
||||
remainders.remove(file_path_base)
|
||||
continue
|
||||
|
||||
seq_path_base = os.path.split(seq_path)[1]
|
||||
head, tail = seq_path_base.split(self.sequence_splitter)
|
||||
|
||||
final_col = None
|
||||
for collection in collections:
|
||||
if head != collection.head or tail != collection.tail:
|
||||
continue
|
||||
final_col = collection
|
||||
break
|
||||
|
||||
if final_col is not None:
|
||||
# Fill full path to head
|
||||
final_col.head = os.path.join(dir_path, final_col.head)
|
||||
for _file_path in final_col:
|
||||
if os.path.exists(_file_path):
|
||||
os.remove(_file_path)
|
||||
_seq_path = final_col.format("{head}{padding}{tail}")
|
||||
self.log.debug("Removed files: {}".format(_seq_path))
|
||||
collections.remove(final_col)
|
||||
|
||||
elif os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
self.log.debug("Removed file: {}".format(file_path))
|
||||
|
||||
else:
|
||||
self.log.warning(
|
||||
"File was not found: {}".format(file_path)
|
||||
)
|
||||
|
||||
# Delete as much as possible parent folders
|
||||
for dir_path in dir_paths.values():
|
||||
while True:
|
||||
if not os.path.exists(dir_path):
|
||||
dir_path = os.path.dirname(dir_path)
|
||||
continue
|
||||
|
||||
if len(os.listdir(dir_path)) != 0:
|
||||
break
|
||||
|
||||
self.log.debug("Removed folder: {}".format(dir_path))
|
||||
os.rmdir(dir_path)
|
||||
|
||||
def path_from_represenation(self, representation):
|
||||
try:
|
||||
template = representation["data"]["template"]
|
||||
|
||||
except KeyError:
|
||||
return (None, None)
|
||||
|
||||
root = os.environ["AVALON_PROJECTS"]
|
||||
if not root:
|
||||
return (None, None)
|
||||
|
||||
sequence_path = None
|
||||
try:
|
||||
context = representation["context"]
|
||||
context["root"] = root
|
||||
path = avalon.pipeline.format_template_with_optional_keys(
|
||||
context, template
|
||||
)
|
||||
if "frame" in context:
|
||||
context["frame"] = self.sequence_splitter
|
||||
sequence_path = os.path.normpath(
|
||||
avalon.pipeline.format_template_with_optional_keys(
|
||||
context, template
|
||||
)
|
||||
)
|
||||
|
||||
except KeyError:
|
||||
# Template references unavailable data
|
||||
return (None, None)
|
||||
|
||||
return (os.path.normpath(path), sequence_path)
|
||||
|
||||
|
||||
def register(session, plugins_presets={}):
|
||||
'''Register plugin. Called when used as an plugin.'''
|
||||
|
||||
DeleteOldVersions(session, plugins_presets).register()
|
||||
|
|
@ -312,42 +312,32 @@ class Delivery(BaseAction):
|
|||
anatomy_data = copy.deepcopy(repre["context"])
|
||||
anatomy_data["root"] = location_path
|
||||
|
||||
anatomy_filled = anatomy.format(anatomy_data)
|
||||
test_path = (
|
||||
anatomy_filled
|
||||
.get("delivery", {})
|
||||
.get(anatomy_name)
|
||||
)
|
||||
anatomy_filled = anatomy.format_all(anatomy_data)
|
||||
test_path = anatomy_filled["delivery"][anatomy_name]
|
||||
|
||||
if not test_path:
|
||||
if not test_path.solved:
|
||||
msg = (
|
||||
"Missing keys in Representation's context"
|
||||
" for anatomy template \"{}\"."
|
||||
).format(anatomy_name)
|
||||
|
||||
all_anatomies = anatomy.format_all(anatomy_data)
|
||||
result = None
|
||||
for anatomies in all_anatomies.values():
|
||||
for key, temp in anatomies.get("delivery", {}).items():
|
||||
if key != anatomy_name:
|
||||
continue
|
||||
if test_path.missing_keys:
|
||||
keys = ", ".join(test_path.missing_keys)
|
||||
sub_msg = (
|
||||
"Representation: {}<br>- Missing keys: \"{}\"<br>"
|
||||
).format(str(repre["_id"]), keys)
|
||||
|
||||
result = temp
|
||||
break
|
||||
if test_path.invalid_types:
|
||||
items = []
|
||||
for key, value in test_path.invalid_types.items():
|
||||
items.append("\"{}\" {}".format(key, str(value)))
|
||||
|
||||
# TODO log error! - missing keys in anatomy
|
||||
if result:
|
||||
missing_keys = [
|
||||
key[1] for key in string.Formatter().parse(result)
|
||||
if key[1] is not None
|
||||
]
|
||||
else:
|
||||
missing_keys = ["unknown"]
|
||||
keys = ", ".join(items)
|
||||
sub_msg = (
|
||||
"Representation: {}<br>"
|
||||
"- Invalid value DataType: \"{}\"<br>"
|
||||
).format(str(repre["_id"]), keys)
|
||||
|
||||
keys = ", ".join(missing_keys)
|
||||
sub_msg = (
|
||||
"Representation: {}<br>- Missing keys: \"{}\"<br>"
|
||||
).format(str(repre["_id"]), keys)
|
||||
self.report_items[msg].append(sub_msg)
|
||||
self.log.warning(
|
||||
"{} Representation: \"{}\" Filled: <{}>".format(
|
||||
|
|
|
|||
350
pype/ftrack/actions/action_store_thumbnails_to_avalon.py
Normal file
350
pype/ftrack/actions/action_store_thumbnails_to_avalon.py
Normal file
|
|
@ -0,0 +1,350 @@
|
|||
import os
|
||||
import requests
|
||||
import errno
|
||||
import json
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
from pype.ftrack import BaseAction
|
||||
from pype.ftrack.lib import (
|
||||
get_project_from_entity,
|
||||
get_avalon_entities_for_assetversion
|
||||
)
|
||||
from pypeapp import Anatomy
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
|
||||
|
||||
class StoreThumbnailsToAvalon(BaseAction):
|
||||
# Action identifier
|
||||
identifier = "store.thubmnail.to.avalon"
|
||||
# Action label
|
||||
label = "Pype Admin"
|
||||
# Action variant
|
||||
variant = "- Store Thumbnails to avalon"
|
||||
# Action description
|
||||
description = 'Test action'
|
||||
# roles that are allowed to register this action
|
||||
role_list = ["Pypeclub", "Administrator", "Project Manager"]
|
||||
|
||||
icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
|
||||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
)
|
||||
|
||||
thumbnail_key = "AVALON_THUMBNAIL_ROOT"
|
||||
db_con = DbConnector()
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
for entity in entities:
|
||||
if entity.entity_type.lower() == "assetversion":
|
||||
return True
|
||||
return False
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
# DEBUG LINE
|
||||
# root_path = r"C:\Users\jakub.trllo\Desktop\Tests\ftrack_thumbnails"
|
||||
|
||||
user = session.query(
|
||||
"User where username is '{0}'".format(session.api_user)
|
||||
).one()
|
||||
action_job = session.create("Job", {
|
||||
"user": user,
|
||||
"status": "running",
|
||||
"data": json.dumps({
|
||||
"description": "Storing thumbnails to avalon."
|
||||
})
|
||||
})
|
||||
session.commit()
|
||||
|
||||
thumbnail_roots = os.environ.get(self.thumbnail_key)
|
||||
if not thumbnail_roots:
|
||||
msg = "`{}` environment is not set".format(self.thumbnail_key)
|
||||
|
||||
action_job["status"] = "failed"
|
||||
session.commit()
|
||||
|
||||
self.log.warning(msg)
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
existing_thumbnail_root = None
|
||||
for path in thumbnail_roots.split(os.pathsep):
|
||||
if os.path.exists(path):
|
||||
existing_thumbnail_root = path
|
||||
break
|
||||
|
||||
if existing_thumbnail_root is None:
|
||||
msg = (
|
||||
"Can't access paths, set in `{}` ({})"
|
||||
).format(self.thumbnail_key, thumbnail_roots)
|
||||
|
||||
action_job["status"] = "failed"
|
||||
session.commit()
|
||||
|
||||
self.log.warning(msg)
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
project = get_project_from_entity(entities[0])
|
||||
project_name = project["full_name"]
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
if "publish" not in anatomy.templates:
|
||||
msg = "Anatomy does not have set publish key!"
|
||||
|
||||
action_job["status"] = "failed"
|
||||
session.commit()
|
||||
|
||||
self.log.warning(msg)
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
if "thumbnail" not in anatomy.templates["publish"]:
|
||||
msg = (
|
||||
"There is not set \"thumbnail\""
|
||||
" template in Antomy for project \"{}\""
|
||||
).format(project_name)
|
||||
|
||||
action_job["status"] = "failed"
|
||||
session.commit()
|
||||
|
||||
self.log.warning(msg)
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
example_template_data = {
|
||||
"_id": "ID",
|
||||
"thumbnail_root": "THUBMNAIL_ROOT",
|
||||
"thumbnail_type": "THUMBNAIL_TYPE",
|
||||
"ext": ".EXT",
|
||||
"project": {
|
||||
"name": "PROJECT_NAME",
|
||||
"code": "PROJECT_CODE"
|
||||
},
|
||||
"asset": "ASSET_NAME",
|
||||
"subset": "SUBSET_NAME",
|
||||
"version": "VERSION_NAME",
|
||||
"hierarchy": "HIERARCHY"
|
||||
}
|
||||
tmp_filled = anatomy.format_all(example_template_data)
|
||||
thumbnail_result = tmp_filled["publish"]["thumbnail"]
|
||||
if not thumbnail_result.solved:
|
||||
missing_keys = thumbnail_result.missing_keys
|
||||
invalid_types = thumbnail_result.invalid_types
|
||||
submsg = ""
|
||||
if missing_keys:
|
||||
submsg += "Missing keys: {}".format(", ".join(
|
||||
["\"{}\"".format(key) for key in missing_keys]
|
||||
))
|
||||
|
||||
if invalid_types:
|
||||
items = []
|
||||
for key, value in invalid_types.items():
|
||||
items.append("{}{}".format(str(key), str(value)))
|
||||
submsg += "Invalid types: {}".format(", ".join(items))
|
||||
|
||||
msg = (
|
||||
"Thumbnail Anatomy template expects more keys than action"
|
||||
" can offer. {}"
|
||||
).format(submsg)
|
||||
|
||||
action_job["status"] = "failed"
|
||||
session.commit()
|
||||
|
||||
self.log.warning(msg)
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
|
||||
|
||||
self.db_con.install()
|
||||
|
||||
for entity in entities:
|
||||
# Skip if entity is not AssetVersion (never should happend, but..)
|
||||
if entity.entity_type.lower() != "assetversion":
|
||||
continue
|
||||
|
||||
# Skip if AssetVersion don't have thumbnail
|
||||
thumbnail_ent = entity["thumbnail"]
|
||||
if thumbnail_ent is None:
|
||||
self.log.debug((
|
||||
"Skipping. AssetVersion don't "
|
||||
"have set thumbnail. {}"
|
||||
).format(entity["id"]))
|
||||
continue
|
||||
|
||||
avalon_ents_result = get_avalon_entities_for_assetversion(
|
||||
entity, self.db_con
|
||||
)
|
||||
version_full_path = (
|
||||
"Asset: \"{project_name}/{asset_path}\""
|
||||
" | Subset: \"{subset_name}\""
|
||||
" | Version: \"{version_name}\""
|
||||
).format(**avalon_ents_result)
|
||||
|
||||
version = avalon_ents_result["version"]
|
||||
if not version:
|
||||
self.log.warning((
|
||||
"AssetVersion does not have version in avalon. {}"
|
||||
).format(version_full_path))
|
||||
continue
|
||||
|
||||
thumbnail_id = version["data"].get("thumbnail_id")
|
||||
if thumbnail_id:
|
||||
self.log.info((
|
||||
"AssetVersion skipped, already has thubmanil set. {}"
|
||||
).format(version_full_path))
|
||||
continue
|
||||
|
||||
# Get thumbnail extension
|
||||
file_ext = thumbnail_ent["file_type"]
|
||||
if not file_ext.startswith("."):
|
||||
file_ext = ".{}".format(file_ext)
|
||||
|
||||
avalon_project = avalon_ents_result["project"]
|
||||
avalon_asset = avalon_ents_result["asset"]
|
||||
hierarchy = ""
|
||||
parents = avalon_asset["data"].get("parents") or []
|
||||
if parents:
|
||||
hierarchy = "/".join(parents)
|
||||
|
||||
# Prepare anatomy template fill data
|
||||
# 1. Create new id for thumbnail entity
|
||||
thumbnail_id = ObjectId()
|
||||
|
||||
template_data = {
|
||||
"_id": str(thumbnail_id),
|
||||
"thumbnail_root": existing_thumbnail_root,
|
||||
"thumbnail_type": "thumbnail",
|
||||
"ext": file_ext,
|
||||
"project": {
|
||||
"name": avalon_project["name"],
|
||||
"code": avalon_project["data"].get("code")
|
||||
},
|
||||
"asset": avalon_ents_result["asset_name"],
|
||||
"subset": avalon_ents_result["subset_name"],
|
||||
"version": avalon_ents_result["version_name"],
|
||||
"hierarchy": hierarchy
|
||||
}
|
||||
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
thumbnail_path = anatomy_filled["publish"]["thumbnail"]
|
||||
thumbnail_path = thumbnail_path.replace("..", ".")
|
||||
thumbnail_path = os.path.normpath(thumbnail_path)
|
||||
|
||||
downloaded = False
|
||||
for loc in (thumbnail_ent.get("component_locations") or []):
|
||||
res_id = loc.get("resource_identifier")
|
||||
if not res_id:
|
||||
continue
|
||||
|
||||
thubmnail_url = self.get_thumbnail_url(res_id)
|
||||
if self.download_file(thubmnail_url, thumbnail_path):
|
||||
downloaded = True
|
||||
break
|
||||
|
||||
if not downloaded:
|
||||
self.log.warning(
|
||||
"Could not download thumbnail for {}".format(
|
||||
version_full_path
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
# Clean template data from keys that are dynamic
|
||||
template_data.pop("_id")
|
||||
template_data.pop("thumbnail_root")
|
||||
|
||||
thumbnail_entity = {
|
||||
"_id": thumbnail_id,
|
||||
"type": "thumbnail",
|
||||
"schema": "pype:thumbnail-1.0",
|
||||
"data": {
|
||||
"template": thumbnail_template,
|
||||
"template_data": template_data
|
||||
}
|
||||
}
|
||||
|
||||
# Create thumbnail entity
|
||||
self.db_con.insert_one(thumbnail_entity)
|
||||
self.log.debug(
|
||||
"Creating entity in database {}".format(str(thumbnail_entity))
|
||||
)
|
||||
|
||||
# Set thumbnail id for version
|
||||
self.db_con.update_one(
|
||||
{"_id": version["_id"]},
|
||||
{"$set": {"data.thumbnail_id": thumbnail_id}}
|
||||
)
|
||||
|
||||
self.db_con.update_one(
|
||||
{"_id": avalon_asset["_id"]},
|
||||
{"$set": {"data.thumbnail_id": thumbnail_id}}
|
||||
)
|
||||
|
||||
action_job["status"] = "done"
|
||||
session.commit()
|
||||
|
||||
return True
|
||||
|
||||
def get_thumbnail_url(self, resource_identifier, size=None):
|
||||
# TODO use ftrack_api method rather (find way how to use it)
|
||||
url_string = (
|
||||
u'{url}/component/thumbnail?id={id}&username={username}'
|
||||
u'&apiKey={apiKey}'
|
||||
)
|
||||
url = url_string.format(
|
||||
url=self.session.server_url,
|
||||
id=resource_identifier,
|
||||
username=self.session.api_user,
|
||||
apiKey=self.session.api_key
|
||||
)
|
||||
if size:
|
||||
url += u'&size={0}'.format(size)
|
||||
|
||||
return url
|
||||
|
||||
def download_file(self, source_url, dst_file_path):
|
||||
dir_path = os.path.dirname(dst_file_path)
|
||||
try:
|
||||
os.makedirs(dir_path)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.EEXIST:
|
||||
self.log.warning(
|
||||
"Could not create folder: \"{}\"".format(dir_path)
|
||||
)
|
||||
return False
|
||||
|
||||
self.log.debug(
|
||||
"Downloading file \"{}\" -> \"{}\"".format(
|
||||
source_url, dst_file_path
|
||||
)
|
||||
)
|
||||
file_open = open(dst_file_path, "wb")
|
||||
try:
|
||||
file_open.write(requests.get(source_url).content)
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Download of image `{}` failed.".format(source_url)
|
||||
)
|
||||
return False
|
||||
finally:
|
||||
file_open.close()
|
||||
return True
|
||||
|
||||
|
||||
def register(session, plugins_presets={}):
|
||||
StoreThumbnailsToAvalon(session, plugins_presets).register()
|
||||
188
pype/ftrack/events/event_first_version_status.py
Normal file
188
pype/ftrack/events/event_first_version_status.py
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
from pype.ftrack import BaseEvent
|
||||
|
||||
|
||||
class FirstVersionStatus(BaseEvent):
|
||||
|
||||
# WARNING Priority MUST be higher
|
||||
# than handler in `event_version_to_task_statuses.py`
|
||||
priority = 200
|
||||
|
||||
keys_enum = ["task", "task_type"]
|
||||
# This should be set with presets
|
||||
task_status_map = []
|
||||
|
||||
# EXAMPLE of `task_status_map`
|
||||
__example_status_map__ = [{
|
||||
# `key` specify where to look for name (is enumerator of `keys_enum`)
|
||||
# By default is set to "task"
|
||||
"key": "task",
|
||||
# speicification of name
|
||||
"name": "compositing",
|
||||
# Status to set to the asset version
|
||||
"status": "Blocking"
|
||||
}]
|
||||
|
||||
def register(self, *args, **kwargs):
|
||||
result = super(FirstVersionStatus, self).register(*args, **kwargs)
|
||||
|
||||
valid_task_status_map = []
|
||||
for item in self.task_status_map:
|
||||
key = (item.get("key") or "task").lower()
|
||||
name = (item.get("name") or "").lower()
|
||||
status = (item.get("status") or "").lower()
|
||||
if not (key and name and status):
|
||||
self.log.warning((
|
||||
"Invalid item in Task -> Status mapping. {}"
|
||||
).format(str(item)))
|
||||
continue
|
||||
|
||||
if key not in self.keys_enum:
|
||||
expected_msg = ""
|
||||
last_key_idx = len(self.keys_enum) - 1
|
||||
for idx, key in enumerate(self.keys_enum):
|
||||
if idx == 0:
|
||||
joining_part = "`{}`"
|
||||
elif idx == last_key_idx:
|
||||
joining_part = "or `{}`"
|
||||
else:
|
||||
joining_part = ", `{}`"
|
||||
expected_msg += joining_part.format(key)
|
||||
|
||||
self.log.warning((
|
||||
"Invalid key `{}`. Expected: {}."
|
||||
).format(key, expected_msg))
|
||||
continue
|
||||
|
||||
valid_task_status_map.append({
|
||||
"key": key,
|
||||
"name": name,
|
||||
"status": status
|
||||
})
|
||||
|
||||
self.task_status_map = valid_task_status_map
|
||||
if not self.task_status_map:
|
||||
self.log.warning((
|
||||
"Event handler `{}` don't have set presets."
|
||||
).format(self.__class__.__name__))
|
||||
|
||||
return result
|
||||
|
||||
def launch(self, session, event):
|
||||
"""Set task's status for first created Asset Version."""
|
||||
|
||||
if not self.task_status_map:
|
||||
return
|
||||
|
||||
entities_info = self.filter_event_ents(event)
|
||||
if not entities_info:
|
||||
return
|
||||
|
||||
entity_ids = []
|
||||
for entity_info in entities_info:
|
||||
entity_ids.append(entity_info["entityId"])
|
||||
|
||||
joined_entity_ids = ",".join(
|
||||
["\"{}\"".format(entity_id) for entity_id in entity_ids]
|
||||
)
|
||||
asset_versions = session.query(
|
||||
"AssetVersion where id in ({})".format(joined_entity_ids)
|
||||
).all()
|
||||
|
||||
asset_version_statuses = None
|
||||
|
||||
project_schema = None
|
||||
for asset_version in asset_versions:
|
||||
task_entity = asset_version["task"]
|
||||
found_item = None
|
||||
for item in self.task_status_map:
|
||||
if (
|
||||
item["key"] == "task" and
|
||||
task_entity["name"].lower() != item["name"]
|
||||
):
|
||||
continue
|
||||
|
||||
elif (
|
||||
item["key"] == "task_type" and
|
||||
task_entity["type"]["name"].lower() != item["name"]
|
||||
):
|
||||
continue
|
||||
|
||||
found_item = item
|
||||
break
|
||||
|
||||
if not found_item:
|
||||
continue
|
||||
|
||||
if project_schema is None:
|
||||
project_schema = task_entity["project"]["project_schema"]
|
||||
|
||||
# Get all available statuses for Task
|
||||
if asset_version_statuses is None:
|
||||
statuses = project_schema.get_statuses("AssetVersion")
|
||||
|
||||
# map lowered status name with it's object
|
||||
asset_version_statuses = {
|
||||
status["name"].lower(): status for status in statuses
|
||||
}
|
||||
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in task_entity["link"]] +
|
||||
[
|
||||
str(asset_version["asset"]["name"]),
|
||||
str(asset_version["version"])
|
||||
]
|
||||
)
|
||||
|
||||
new_status = asset_version_statuses.get(found_item["status"])
|
||||
if not new_status:
|
||||
self.log.warning(
|
||||
"AssetVersion doesn't have status `{}`."
|
||||
).format(found_item["status"])
|
||||
continue
|
||||
|
||||
try:
|
||||
asset_version["status"] = new_status
|
||||
session.commit()
|
||||
self.log.debug("[ {} ] Status updated to [ {} ]".format(
|
||||
ent_path, new_status['name']
|
||||
))
|
||||
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning(
|
||||
"[ {} ] Status couldn't be set.".format(ent_path),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
def filter_event_ents(self, event):
|
||||
filtered_ents = []
|
||||
for entity in event["data"].get("entities", []):
|
||||
# Care only about add actions
|
||||
if entity["action"] != "add":
|
||||
continue
|
||||
|
||||
# Filter AssetVersions
|
||||
if entity["entityType"] != "assetversion":
|
||||
continue
|
||||
|
||||
entity_changes = entity.get("changes") or {}
|
||||
|
||||
# Check if version of Asset Version is `1`
|
||||
version_num = entity_changes.get("version", {}).get("new")
|
||||
if version_num != 1:
|
||||
continue
|
||||
|
||||
# Skip in Asset Version don't have task
|
||||
task_id = entity_changes.get("taskid", {}).get("new")
|
||||
if not task_id:
|
||||
continue
|
||||
|
||||
filtered_ents.append(entity)
|
||||
|
||||
return filtered_ents
|
||||
|
||||
|
||||
def register(session, plugins_presets):
|
||||
'''Register plugin. Called when used as an plugin.'''
|
||||
|
||||
FirstVersionStatus(session, plugins_presets).register()
|
||||
|
|
@ -3,6 +3,7 @@ import collections
|
|||
import copy
|
||||
import queue
|
||||
import time
|
||||
import datetime
|
||||
import atexit
|
||||
import traceback
|
||||
|
||||
|
|
@ -25,13 +26,9 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
|
||||
dbcon = DbConnector()
|
||||
|
||||
ignore_entTypes = [
|
||||
"socialfeed", "socialnotification", "note",
|
||||
"assetversion", "job", "user", "reviewsessionobject", "timer",
|
||||
"timelog", "auth_userrole", "appointment"
|
||||
]
|
||||
interest_entTypes = ["show", "task"]
|
||||
ignore_ent_types = ["Milestone"]
|
||||
ignore_keys = ["statusid"]
|
||||
ignore_keys = ["statusid", "thumbid"]
|
||||
|
||||
project_query = (
|
||||
"select full_name, name, custom_attributes"
|
||||
|
|
@ -51,9 +48,39 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
|
||||
def __init__(self, session, plugins_presets={}):
|
||||
'''Expects a ftrack_api.Session instance'''
|
||||
# Debug settings
|
||||
# - time expiration in seconds
|
||||
self.debug_print_time_expiration = 5 * 60
|
||||
# - store current time
|
||||
self.debug_print_time = datetime.datetime.now()
|
||||
# - store synchronize entity types to be able to use
|
||||
# only entityTypes in interest instead of filtering by ignored
|
||||
self.debug_sync_types = collections.defaultdict(list)
|
||||
|
||||
# Set processing session to not use global
|
||||
self.set_process_session(session)
|
||||
super().__init__(session, plugins_presets)
|
||||
|
||||
def debug_logs(self):
|
||||
"""This is debug method for printing small debugs messages. """
|
||||
now_datetime = datetime.datetime.now()
|
||||
delta = now_datetime - self.debug_print_time
|
||||
if delta.total_seconds() < self.debug_print_time_expiration:
|
||||
return
|
||||
|
||||
self.debug_print_time = now_datetime
|
||||
known_types_items = []
|
||||
for entityType, entity_type in self.debug_sync_types.items():
|
||||
ent_types_msg = ", ".join(entity_type)
|
||||
known_types_items.append(
|
||||
"<{}> ({})".format(entityType, ent_types_msg)
|
||||
)
|
||||
|
||||
known_entityTypes = ", ".join(known_types_items)
|
||||
self.log.debug(
|
||||
"DEBUG MESSAGE: Known types {}".format(known_entityTypes)
|
||||
)
|
||||
|
||||
@property
|
||||
def cur_project(self):
|
||||
if self._cur_project is None:
|
||||
|
|
@ -106,9 +133,10 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if self._avalon_ents_by_id is None:
|
||||
self._avalon_ents_by_id = {}
|
||||
proj, ents = self.avalon_entities
|
||||
self._avalon_ents_by_id[proj["_id"]] = proj
|
||||
for ent in ents:
|
||||
self._avalon_ents_by_id[ent["_id"]] = ent
|
||||
if proj:
|
||||
self._avalon_ents_by_id[proj["_id"]] = proj
|
||||
for ent in ents:
|
||||
self._avalon_ents_by_id[ent["_id"]] = ent
|
||||
return self._avalon_ents_by_id
|
||||
|
||||
@property
|
||||
|
|
@ -128,13 +156,14 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if self._avalon_ents_by_ftrack_id is None:
|
||||
self._avalon_ents_by_ftrack_id = {}
|
||||
proj, ents = self.avalon_entities
|
||||
ftrack_id = proj["data"]["ftrackId"]
|
||||
self._avalon_ents_by_ftrack_id[ftrack_id] = proj
|
||||
for ent in ents:
|
||||
ftrack_id = ent["data"].get("ftrackId")
|
||||
if ftrack_id is None:
|
||||
continue
|
||||
self._avalon_ents_by_ftrack_id[ftrack_id] = ent
|
||||
if proj:
|
||||
ftrack_id = proj["data"]["ftrackId"]
|
||||
self._avalon_ents_by_ftrack_id[ftrack_id] = proj
|
||||
for ent in ents:
|
||||
ftrack_id = ent["data"].get("ftrackId")
|
||||
if ftrack_id is None:
|
||||
continue
|
||||
self._avalon_ents_by_ftrack_id[ftrack_id] = ent
|
||||
return self._avalon_ents_by_ftrack_id
|
||||
|
||||
@property
|
||||
|
|
@ -477,15 +506,26 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
found_actions = set()
|
||||
for ent_info in entities_info:
|
||||
entityType = ent_info["entityType"]
|
||||
if entityType in self.ignore_entTypes:
|
||||
if entityType not in self.interest_entTypes:
|
||||
continue
|
||||
|
||||
entity_type = ent_info.get("entity_type")
|
||||
if not entity_type or entity_type in self.ignore_ent_types:
|
||||
continue
|
||||
|
||||
if entity_type not in self.debug_sync_types[entityType]:
|
||||
self.debug_sync_types[entityType].append(entity_type)
|
||||
|
||||
action = ent_info["action"]
|
||||
ftrack_id = ent_info["entityId"]
|
||||
if isinstance(ftrack_id, list):
|
||||
self.log.warning((
|
||||
"BUG REPORT: Entity info has `entityId` as `list` \"{}\""
|
||||
).format(ent_info))
|
||||
if len(ftrack_id) == 0:
|
||||
continue
|
||||
ftrack_id = ftrack_id[0]
|
||||
|
||||
if action == "move":
|
||||
ent_keys = ent_info["keys"]
|
||||
# Seprate update info from move action
|
||||
|
|
@ -565,8 +605,7 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
if auto_sync is not True:
|
||||
return True
|
||||
|
||||
debug_msg = ""
|
||||
debug_msg += "Updated: {}".format(len(updated))
|
||||
debug_msg = "Updated: {}".format(len(updated))
|
||||
debug_action_map = {
|
||||
"add": "Created",
|
||||
"remove": "Removed",
|
||||
|
|
@ -626,6 +665,8 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
self.ftrack_added = entities_by_action["add"]
|
||||
self.ftrack_updated = updated
|
||||
|
||||
self.debug_logs()
|
||||
|
||||
self.log.debug("Synchronization begins")
|
||||
try:
|
||||
time_1 = time.time()
|
||||
|
|
@ -1437,7 +1478,7 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
.get("name", {})
|
||||
.get("new")
|
||||
)
|
||||
avalon_ent_by_name = self.avalon_ents_by_name.get(name)
|
||||
avalon_ent_by_name = self.avalon_ents_by_name.get(name) or {}
|
||||
avalon_ent_by_name_ftrack_id = (
|
||||
avalon_ent_by_name
|
||||
.get("data", {})
|
||||
|
|
@ -1537,6 +1578,14 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
entity_type_conf_ids[entity_type] = configuration_id
|
||||
break
|
||||
|
||||
if not configuration_id:
|
||||
self.log.warning(
|
||||
"BUG REPORT: Missing configuration for `{} < {} >`".format(
|
||||
entity_type, ent_info["entityType"]
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
_entity_key = collections.OrderedDict({
|
||||
"configuration_id": configuration_id,
|
||||
"entity_id": ftrack_id
|
||||
|
|
@ -1555,7 +1604,7 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
try:
|
||||
# Commit changes of mongo_id to empty string
|
||||
self.process_session.commit()
|
||||
self.log.debug("Commititng unsetting")
|
||||
self.log.debug("Committing unsetting")
|
||||
except Exception:
|
||||
self.process_session.rollback()
|
||||
# TODO logging
|
||||
|
|
@ -1635,7 +1684,7 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
new_name, "task", schema_patterns=self.regex_schemas
|
||||
)
|
||||
if not passed_regex:
|
||||
self.regex_failed.append(ent_infos["entityId"])
|
||||
self.regex_failed.append(ent_info["entityId"])
|
||||
continue
|
||||
|
||||
if new_name not in self.task_changes_by_avalon_id[mongo_id]:
|
||||
|
|
@ -1820,6 +1869,13 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
obj_type_id = ent_info["objectTypeId"]
|
||||
ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id)
|
||||
|
||||
if ent_cust_attrs is None:
|
||||
self.log.warning((
|
||||
"BUG REPORT: Entity has ent type without"
|
||||
" custom attributes <{}> \"{}\""
|
||||
).format(entType, ent_info))
|
||||
continue
|
||||
|
||||
for key, values in ent_info["changes"].items():
|
||||
if key in hier_attrs_keys:
|
||||
self.hier_cust_attrs_changes[key].append(ftrack_id)
|
||||
|
|
|
|||
|
|
@ -207,7 +207,9 @@ class UserAssigmentEvent(BaseEvent):
|
|||
# formatting work dir is easiest part as we can use whole path
|
||||
work_dir = anatomy.format(data)['avalon']['work']
|
||||
# we also need publish but not whole
|
||||
publish = anatomy.format_all(data)['partial']['avalon']['publish']
|
||||
filled_all = anatomy.format_all(data)
|
||||
publish = filled_all['avalon']['publish']
|
||||
|
||||
# now find path to {asset}
|
||||
m = re.search("(^.+?{})".format(data['asset']),
|
||||
publish)
|
||||
|
|
|
|||
|
|
@ -4,9 +4,13 @@ import signal
|
|||
import datetime
|
||||
import subprocess
|
||||
import socket
|
||||
import json
|
||||
import platform
|
||||
import argparse
|
||||
import getpass
|
||||
import atexit
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import ftrack_api
|
||||
from pype.ftrack.lib import credentials
|
||||
|
|
@ -63,10 +67,19 @@ def validate_credentials(url, user, api):
|
|||
)
|
||||
session.close()
|
||||
except Exception as e:
|
||||
print(
|
||||
'ERROR: Can\'t log into Ftrack with used credentials:'
|
||||
' Ftrack server: "{}" // Username: {} // API key: {}'
|
||||
).format(url, user, api)
|
||||
print("Can't log into Ftrack with used credentials:")
|
||||
ftrack_cred = {
|
||||
"Ftrack server": str(url),
|
||||
"Username": str(user),
|
||||
"API key": str(api)
|
||||
}
|
||||
item_lens = [len(key) + 1 for key in ftrack_cred.keys()]
|
||||
justify_len = max(*item_lens)
|
||||
for key, value in ftrack_cred.items():
|
||||
print("{} {}".format(
|
||||
(key + ":").ljust(justify_len, " "),
|
||||
value
|
||||
))
|
||||
return False
|
||||
|
||||
print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format(
|
||||
|
|
@ -175,6 +188,7 @@ def main_loop(ftrack_url):
|
|||
otherwise thread will be killed.
|
||||
"""
|
||||
|
||||
os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1())
|
||||
# Get mongo hostname and port for testing mongo connection
|
||||
mongo_list = ftrack_events_mongo_settings()
|
||||
mongo_hostname = mongo_list[0]
|
||||
|
|
@ -202,6 +216,13 @@ def main_loop(ftrack_url):
|
|||
processor_last_failed = datetime.datetime.now()
|
||||
processor_failed_count = 0
|
||||
|
||||
statuser_name = "StorerThread"
|
||||
statuser_port = 10021
|
||||
statuser_path = "{}/sub_event_status.py".format(file_path)
|
||||
statuser_thread = None
|
||||
statuser_last_failed = datetime.datetime.now()
|
||||
statuser_failed_count = 0
|
||||
|
||||
ftrack_accessible = False
|
||||
mongo_accessible = False
|
||||
|
||||
|
|
@ -210,7 +231,7 @@ def main_loop(ftrack_url):
|
|||
|
||||
# stop threads on exit
|
||||
# TODO check if works and args have thread objects!
|
||||
def on_exit(processor_thread, storer_thread):
|
||||
def on_exit(processor_thread, storer_thread, statuser_thread):
|
||||
if processor_thread is not None:
|
||||
processor_thread.stop()
|
||||
processor_thread.join()
|
||||
|
|
@ -221,9 +242,27 @@ def main_loop(ftrack_url):
|
|||
storer_thread.join()
|
||||
storer_thread = None
|
||||
|
||||
if statuser_thread is not None:
|
||||
statuser_thread.stop()
|
||||
statuser_thread.join()
|
||||
statuser_thread = None
|
||||
|
||||
atexit.register(
|
||||
on_exit, processor_thread=processor_thread, storer_thread=storer_thread
|
||||
on_exit,
|
||||
processor_thread=processor_thread,
|
||||
storer_thread=storer_thread,
|
||||
statuser_thread=statuser_thread
|
||||
)
|
||||
|
||||
system_name, pc_name = platform.uname()[:2]
|
||||
host_name = socket.gethostname()
|
||||
main_info = {
|
||||
"created_at": datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S"),
|
||||
"Username": getpass.getuser(),
|
||||
"Host Name": host_name,
|
||||
"Host IP": socket.gethostbyname(host_name)
|
||||
}
|
||||
main_info_str = json.dumps(main_info)
|
||||
# Main loop
|
||||
while True:
|
||||
# Check if accessible Ftrack and Mongo url
|
||||
|
|
@ -261,6 +300,52 @@ def main_loop(ftrack_url):
|
|||
printed_ftrack_error = False
|
||||
printed_mongo_error = False
|
||||
|
||||
# ====== STATUSER =======
|
||||
if statuser_thread is None:
|
||||
if statuser_failed_count < max_fail_count:
|
||||
statuser_thread = socket_thread.StatusSocketThread(
|
||||
statuser_name, statuser_port, statuser_path,
|
||||
[main_info_str]
|
||||
)
|
||||
statuser_thread.start()
|
||||
|
||||
elif statuser_failed_count == max_fail_count:
|
||||
print((
|
||||
"Statuser failed {}times in row"
|
||||
" I'll try to run again {}s later"
|
||||
).format(str(max_fail_count), str(wait_time_after_max_fail)))
|
||||
statuser_failed_count += 1
|
||||
|
||||
elif ((
|
||||
datetime.datetime.now() - statuser_last_failed
|
||||
).seconds > wait_time_after_max_fail):
|
||||
statuser_failed_count = 0
|
||||
|
||||
# If thread failed test Ftrack and Mongo connection
|
||||
elif not statuser_thread.isAlive():
|
||||
statuser_thread.join()
|
||||
statuser_thread = None
|
||||
ftrack_accessible = False
|
||||
mongo_accessible = False
|
||||
|
||||
_processor_last_failed = datetime.datetime.now()
|
||||
delta_time = (
|
||||
_processor_last_failed - statuser_last_failed
|
||||
).seconds
|
||||
|
||||
if delta_time < min_fail_seconds:
|
||||
statuser_failed_count += 1
|
||||
else:
|
||||
statuser_failed_count = 0
|
||||
statuser_last_failed = _processor_last_failed
|
||||
|
||||
elif statuser_thread.stop_subprocess:
|
||||
print("Main process was stopped by action")
|
||||
on_exit(processor_thread, storer_thread, statuser_thread)
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
return 1
|
||||
|
||||
# ====== STORER =======
|
||||
# Run backup thread which does not requeire mongo to work
|
||||
if storer_thread is None:
|
||||
if storer_failed_count < max_fail_count:
|
||||
|
|
@ -268,6 +353,7 @@ def main_loop(ftrack_url):
|
|||
storer_name, storer_port, storer_path
|
||||
)
|
||||
storer_thread.start()
|
||||
|
||||
elif storer_failed_count == max_fail_count:
|
||||
print((
|
||||
"Storer failed {}times I'll try to run again {}s later"
|
||||
|
|
@ -295,6 +381,7 @@ def main_loop(ftrack_url):
|
|||
storer_failed_count = 0
|
||||
storer_last_failed = _storer_last_failed
|
||||
|
||||
# ====== PROCESSOR =======
|
||||
if processor_thread is None:
|
||||
if processor_failed_count < max_fail_count:
|
||||
processor_thread = socket_thread.SocketThread(
|
||||
|
|
@ -336,6 +423,10 @@ def main_loop(ftrack_url):
|
|||
processor_failed_count = 0
|
||||
processor_last_failed = _processor_last_failed
|
||||
|
||||
if statuser_thread is not None:
|
||||
statuser_thread.set_process("storer", storer_thread)
|
||||
statuser_thread.set_process("processor", processor_thread)
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
|
|
@ -446,9 +537,9 @@ def main(argv):
|
|||
event_paths = kwargs.ftrackeventpaths
|
||||
|
||||
if not kwargs.noloadcred:
|
||||
cred = credentials._get_credentials(True)
|
||||
cred = credentials.get_credentials(ftrack_url)
|
||||
username = cred.get('username')
|
||||
api_key = cred.get('apiKey')
|
||||
api_key = cred.get('api_key')
|
||||
|
||||
if kwargs.ftrackuser:
|
||||
username = kwargs.ftrackuser
|
||||
|
|
@ -482,7 +573,7 @@ def main(argv):
|
|||
return 1
|
||||
|
||||
if kwargs.storecred:
|
||||
credentials._save_credentials(username, api_key, True)
|
||||
credentials.save_credentials(username, api_key, ftrack_url)
|
||||
|
||||
# Set Ftrack environments
|
||||
os.environ["FTRACK_SERVER"] = ftrack_url
|
||||
|
|
|
|||
|
|
@ -100,9 +100,9 @@ class FtrackServer:
|
|||
log.warning(msg, exc_info=e)
|
||||
|
||||
if len(register_functions_dict) < 1:
|
||||
raise Exception((
|
||||
"There are no events with register function."
|
||||
" Registered paths: \"{}\""
|
||||
log.warning((
|
||||
"There are no events with `register` function"
|
||||
" in registered paths: \"{}\""
|
||||
).format("| ".join(paths)))
|
||||
|
||||
# Load presets for setting plugins
|
||||
|
|
@ -122,7 +122,7 @@ class FtrackServer:
|
|||
else:
|
||||
register(self.session, plugins_presets=plugins_presets)
|
||||
|
||||
if function_counter%7 == 0:
|
||||
if function_counter % 7 == 0:
|
||||
time.sleep(0.1)
|
||||
function_counter += 1
|
||||
except Exception as exc:
|
||||
|
|
|
|||
|
|
@ -28,6 +28,10 @@ from pypeapp import Logger
|
|||
from pype.ftrack.lib.custom_db_connector import DbConnector
|
||||
|
||||
|
||||
TOPIC_STATUS_SERVER = "pype.event.server.status"
|
||||
TOPIC_STATUS_SERVER_RESULT = "pype.event.server.status.result"
|
||||
|
||||
|
||||
def ftrack_events_mongo_settings():
|
||||
host = None
|
||||
port = None
|
||||
|
|
@ -123,20 +127,59 @@ def check_ftrack_url(url, log_errors=True):
|
|||
return url
|
||||
|
||||
|
||||
class StorerEventHub(ftrack_api.event.hub.EventHub):
|
||||
class SocketBaseEventHub(ftrack_api.event.hub.EventHub):
|
||||
|
||||
hearbeat_msg = b"hearbeat"
|
||||
heartbeat_callbacks = []
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.sock = kwargs.pop("sock")
|
||||
super(StorerEventHub, self).__init__(*args, **kwargs)
|
||||
super(SocketBaseEventHub, self).__init__(*args, **kwargs)
|
||||
|
||||
def _handle_packet(self, code, packet_identifier, path, data):
|
||||
"""Override `_handle_packet` which extend heartbeat"""
|
||||
code_name = self._code_name_mapping[code]
|
||||
if code_name == "heartbeat":
|
||||
# Reply with heartbeat.
|
||||
self.sock.sendall(b"storer")
|
||||
return self._send_packet(self._code_name_mapping['heartbeat'])
|
||||
for callback in self.heartbeat_callbacks:
|
||||
callback()
|
||||
|
||||
elif code_name == "connect":
|
||||
self.sock.sendall(self.hearbeat_msg)
|
||||
return self._send_packet(self._code_name_mapping["heartbeat"])
|
||||
|
||||
return super(SocketBaseEventHub, self)._handle_packet(
|
||||
code, packet_identifier, path, data
|
||||
)
|
||||
|
||||
|
||||
class StatusEventHub(SocketBaseEventHub):
|
||||
def _handle_packet(self, code, packet_identifier, path, data):
|
||||
"""Override `_handle_packet` which extend heartbeat"""
|
||||
code_name = self._code_name_mapping[code]
|
||||
if code_name == "connect":
|
||||
event = ftrack_api.event.base.Event(
|
||||
topic="pype.status.started",
|
||||
data={},
|
||||
source={
|
||||
"id": self.id,
|
||||
"user": {"username": self._api_user}
|
||||
}
|
||||
)
|
||||
self._event_queue.put(event)
|
||||
|
||||
return super(StatusEventHub, self)._handle_packet(
|
||||
code, packet_identifier, path, data
|
||||
)
|
||||
|
||||
|
||||
class StorerEventHub(SocketBaseEventHub):
|
||||
|
||||
hearbeat_msg = b"storer"
|
||||
|
||||
def _handle_packet(self, code, packet_identifier, path, data):
|
||||
"""Override `_handle_packet` which extend heartbeat"""
|
||||
code_name = self._code_name_mapping[code]
|
||||
if code_name == "connect":
|
||||
event = ftrack_api.event.base.Event(
|
||||
topic="pype.storer.started",
|
||||
data={},
|
||||
|
|
@ -152,7 +195,9 @@ class StorerEventHub(ftrack_api.event.hub.EventHub):
|
|||
)
|
||||
|
||||
|
||||
class ProcessEventHub(ftrack_api.event.hub.EventHub):
|
||||
class ProcessEventHub(SocketBaseEventHub):
|
||||
|
||||
hearbeat_msg = b"processor"
|
||||
url, database, table_name = get_ftrack_event_mongo_info()
|
||||
|
||||
is_table_created = False
|
||||
|
|
@ -164,7 +209,6 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
|
|||
database_name=self.database,
|
||||
table_name=self.table_name
|
||||
)
|
||||
self.sock = kwargs.pop("sock")
|
||||
super(ProcessEventHub, self).__init__(*args, **kwargs)
|
||||
|
||||
def prepare_dbcon(self):
|
||||
|
|
@ -260,42 +304,10 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub):
|
|||
code_name = self._code_name_mapping[code]
|
||||
if code_name == "event":
|
||||
return
|
||||
if code_name == "heartbeat":
|
||||
self.sock.sendall(b"processor")
|
||||
return self._send_packet(self._code_name_mapping["heartbeat"])
|
||||
|
||||
return super()._handle_packet(code, packet_identifier, path, data)
|
||||
|
||||
|
||||
class UserEventHub(ftrack_api.event.hub.EventHub):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.sock = kwargs.pop("sock")
|
||||
super(UserEventHub, self).__init__(*args, **kwargs)
|
||||
|
||||
def _handle_packet(self, code, packet_identifier, path, data):
|
||||
"""Override `_handle_packet` which extend heartbeat"""
|
||||
code_name = self._code_name_mapping[code]
|
||||
if code_name == "heartbeat":
|
||||
# Reply with heartbeat.
|
||||
self.sock.sendall(b"hearbeat")
|
||||
return self._send_packet(self._code_name_mapping['heartbeat'])
|
||||
|
||||
elif code_name == "connect":
|
||||
event = ftrack_api.event.base.Event(
|
||||
topic="pype.storer.started",
|
||||
data={},
|
||||
source={
|
||||
"id": self.id,
|
||||
"user": {"username": self._api_user}
|
||||
}
|
||||
)
|
||||
self._event_queue.put(event)
|
||||
|
||||
return super(UserEventHub, self)._handle_packet(
|
||||
code, packet_identifier, path, data
|
||||
)
|
||||
|
||||
|
||||
class SocketSession(ftrack_api.session.Session):
|
||||
'''An isolated session for interaction with an ftrack server.'''
|
||||
def __init__(
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
import os
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import threading
|
||||
import traceback
|
||||
import subprocess
|
||||
from pypeapp import Logger
|
||||
|
||||
|
|
@ -11,13 +13,15 @@ class SocketThread(threading.Thread):
|
|||
|
||||
MAX_TIMEOUT = 35
|
||||
|
||||
def __init__(self, name, port, filepath):
|
||||
def __init__(self, name, port, filepath, additional_args=[]):
|
||||
super(SocketThread, self).__init__()
|
||||
self.log = Logger().get_logger("SocketThread", "Event Thread")
|
||||
self.log = Logger().get_logger(self.__class__.__name__)
|
||||
self.setName(name)
|
||||
self.name = name
|
||||
self.port = port
|
||||
self.filepath = filepath
|
||||
self.additional_args = additional_args
|
||||
|
||||
self.sock = None
|
||||
self.subproc = None
|
||||
self.connection = None
|
||||
|
|
@ -52,8 +56,13 @@ class SocketThread(threading.Thread):
|
|||
)
|
||||
|
||||
self.subproc = subprocess.Popen(
|
||||
["python", self.filepath, "-port", str(self.port)],
|
||||
stdout=subprocess.PIPE
|
||||
[
|
||||
sys.executable,
|
||||
self.filepath,
|
||||
*self.additional_args,
|
||||
str(self.port)
|
||||
],
|
||||
stdin=subprocess.PIPE
|
||||
)
|
||||
|
||||
# Listen for incoming connections
|
||||
|
|
@ -115,11 +124,6 @@ class SocketThread(threading.Thread):
|
|||
if self.subproc.poll() is None:
|
||||
self.subproc.terminate()
|
||||
|
||||
lines = self.subproc.stdout.readlines()
|
||||
if lines:
|
||||
print("*** Socked Thread stdout ***")
|
||||
for line in lines:
|
||||
os.write(1, line)
|
||||
self.finished = True
|
||||
|
||||
def get_data_from_con(self, connection):
|
||||
|
|
@ -132,3 +136,52 @@ class SocketThread(threading.Thread):
|
|||
if data == b"MongoError":
|
||||
self.mongo_error = True
|
||||
connection.sendall(data)
|
||||
|
||||
|
||||
class StatusSocketThread(SocketThread):
|
||||
process_name_mapping = {
|
||||
b"RestartS": "storer",
|
||||
b"RestartP": "processor",
|
||||
b"RestartM": "main"
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.process_threads = {}
|
||||
self.stop_subprocess = False
|
||||
super(StatusSocketThread, self).__init__(*args, **kwargs)
|
||||
|
||||
def set_process(self, process_name, thread):
|
||||
try:
|
||||
if not self.subproc:
|
||||
self.process_threads[process_name] = None
|
||||
return
|
||||
|
||||
if (
|
||||
process_name in self.process_threads and
|
||||
self.process_threads[process_name] == thread
|
||||
):
|
||||
return
|
||||
|
||||
self.process_threads[process_name] = thread
|
||||
self.subproc.stdin.write(
|
||||
str.encode("reset:{}\r\n".format(process_name))
|
||||
)
|
||||
self.subproc.stdin.flush()
|
||||
|
||||
except Exception:
|
||||
print("Could not set thread in StatusSocketThread")
|
||||
traceback.print_exception(*sys.exc_info())
|
||||
|
||||
def _handle_data(self, connection, data):
|
||||
if not data:
|
||||
return
|
||||
|
||||
process_name = self.process_name_mapping.get(data)
|
||||
if process_name:
|
||||
if process_name == "main":
|
||||
self.stop_subprocess = True
|
||||
else:
|
||||
subp = self.process_threads.get(process_name)
|
||||
if subp:
|
||||
subp.stop()
|
||||
connection.sendall(data)
|
||||
|
|
|
|||
|
|
@ -1,13 +1,59 @@
|
|||
import os
|
||||
import sys
|
||||
import signal
|
||||
import socket
|
||||
import datetime
|
||||
|
||||
from ftrack_server import FtrackServer
|
||||
from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub
|
||||
from pype.ftrack.ftrack_server.lib import (
|
||||
SocketSession, ProcessEventHub, TOPIC_STATUS_SERVER
|
||||
)
|
||||
import ftrack_api
|
||||
from pypeapp import Logger
|
||||
|
||||
log = Logger().get_logger("Event processor")
|
||||
|
||||
subprocess_started = datetime.datetime.now()
|
||||
|
||||
|
||||
class SessionFactory:
|
||||
session = None
|
||||
|
||||
|
||||
def send_status(event):
|
||||
subprocess_id = event["data"].get("subprocess_id")
|
||||
if not subprocess_id:
|
||||
return
|
||||
|
||||
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
|
||||
return
|
||||
|
||||
session = SessionFactory.session
|
||||
if not session:
|
||||
return
|
||||
|
||||
new_event_data = {
|
||||
"subprocess_id": subprocess_id,
|
||||
"source": "processor",
|
||||
"status_info": {
|
||||
"created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S")
|
||||
}
|
||||
}
|
||||
|
||||
new_event = ftrack_api.event.base.Event(
|
||||
topic="pype.event.server.status.result",
|
||||
data=new_event_data
|
||||
)
|
||||
|
||||
session.event_hub.publish(new_event)
|
||||
|
||||
|
||||
def register(session):
|
||||
'''Registers the event, subscribing the discover and launch topics.'''
|
||||
session.event_hub.subscribe(
|
||||
"topic={}".format(TOPIC_STATUS_SERVER), send_status
|
||||
)
|
||||
|
||||
|
||||
def main(args):
|
||||
port = int(args[-1])
|
||||
|
|
@ -24,6 +70,9 @@ def main(args):
|
|||
session = SocketSession(
|
||||
auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub
|
||||
)
|
||||
register(session)
|
||||
SessionFactory.session = session
|
||||
|
||||
server = FtrackServer("event")
|
||||
log.debug("Launched Ftrack Event processor")
|
||||
server.run_server(session)
|
||||
|
|
|
|||
436
pype/ftrack/ftrack_server/sub_event_status.py
Normal file
436
pype/ftrack/ftrack_server/sub_event_status.py
Normal file
|
|
@ -0,0 +1,436 @@
|
|||
import os
|
||||
import sys
|
||||
import json
|
||||
import threading
|
||||
import signal
|
||||
import socket
|
||||
import datetime
|
||||
|
||||
import ftrack_api
|
||||
from ftrack_server import FtrackServer
|
||||
from pype.ftrack.ftrack_server.lib import (
|
||||
SocketSession, StatusEventHub,
|
||||
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
|
||||
)
|
||||
from pypeapp import Logger, config
|
||||
|
||||
log = Logger().get_logger("Event storer")
|
||||
action_identifier = (
|
||||
"event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"]
|
||||
)
|
||||
host_ip = socket.gethostbyname(socket.gethostname())
|
||||
action_data = {
|
||||
"label": "Pype Admin",
|
||||
"variant": "- Event server Status ({})".format(host_ip),
|
||||
"description": "Get Infromation about event server",
|
||||
"actionIdentifier": action_identifier,
|
||||
"icon": "{}/ftrack/action_icons/PypeAdmin.svg".format(
|
||||
os.environ.get(
|
||||
"PYPE_STATICS_SERVER",
|
||||
"http://localhost:{}".format(
|
||||
config.get_presets().get("services", {}).get(
|
||||
"rest_api", {}
|
||||
).get("default_port", 8021)
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
class ObjectFactory:
|
||||
session = None
|
||||
status_factory = None
|
||||
checker_thread = None
|
||||
last_trigger = None
|
||||
|
||||
|
||||
class Status:
|
||||
default_item = {
|
||||
"type": "label",
|
||||
"value": "Process info is not available at this moment."
|
||||
}
|
||||
|
||||
def __init__(self, name, label, parent):
|
||||
self.name = name
|
||||
self.label = label or name
|
||||
self.parent = parent
|
||||
|
||||
self.info = None
|
||||
self.last_update = None
|
||||
|
||||
def update(self, info):
|
||||
self.last_update = datetime.datetime.now()
|
||||
self.info = info
|
||||
|
||||
def get_delta_string(self, delta):
|
||||
days, hours, minutes = (
|
||||
delta.days, delta.seconds // 3600, delta.seconds // 60 % 60
|
||||
)
|
||||
delta_items = [
|
||||
"{}d".format(days),
|
||||
"{}h".format(hours),
|
||||
"{}m".format(minutes)
|
||||
]
|
||||
if not days:
|
||||
delta_items.pop(0)
|
||||
if not hours:
|
||||
delta_items.pop(0)
|
||||
delta_items.append("{}s".format(delta.seconds % 60))
|
||||
if not minutes:
|
||||
delta_items.pop(0)
|
||||
|
||||
return " ".join(delta_items)
|
||||
|
||||
def get_items(self):
|
||||
items = []
|
||||
last_update = "N/A"
|
||||
if self.last_update:
|
||||
delta = datetime.datetime.now() - self.last_update
|
||||
last_update = "{} ago".format(
|
||||
self.get_delta_string(delta)
|
||||
)
|
||||
|
||||
last_update = "Updated: {}".format(last_update)
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "#{}".format(self.label)
|
||||
})
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "##{}".format(last_update)
|
||||
})
|
||||
|
||||
if not self.info:
|
||||
if self.info is None:
|
||||
trigger_info_get()
|
||||
items.append(self.default_item)
|
||||
return items
|
||||
|
||||
info = {}
|
||||
for key, value in self.info.items():
|
||||
if key not in ["created_at:", "created_at"]:
|
||||
info[key] = value
|
||||
continue
|
||||
|
||||
datetime_value = datetime.datetime.strptime(
|
||||
value, "%Y.%m.%d %H:%M:%S"
|
||||
)
|
||||
delta = datetime.datetime.now() - datetime_value
|
||||
|
||||
running_for = self.get_delta_string(delta)
|
||||
info["Started at"] = "{} [running: {}]".format(value, running_for)
|
||||
|
||||
for key, value in info.items():
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "<b>{}:</b> {}".format(key, value)
|
||||
})
|
||||
|
||||
return items
|
||||
|
||||
|
||||
class StatusFactory:
|
||||
|
||||
note_item = {
|
||||
"type": "label",
|
||||
"value": (
|
||||
"<i>HINT: To refresh data uncheck"
|
||||
" all checkboxes and hit `Submit` button.</i>"
|
||||
)
|
||||
}
|
||||
splitter_item = {
|
||||
"type": "label",
|
||||
"value": "---"
|
||||
}
|
||||
|
||||
def __init__(self, statuses={}):
|
||||
self.statuses = []
|
||||
for status in statuses.items():
|
||||
self.create_status(*status)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.get(key)
|
||||
|
||||
def get(self, key, default=None):
|
||||
for status in self.statuses:
|
||||
if status.name == key:
|
||||
return status
|
||||
return default
|
||||
|
||||
def is_filled(self):
|
||||
for status in self.statuses:
|
||||
if status.info is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def create_status(self, name, label):
|
||||
new_status = Status(name, label, self)
|
||||
self.statuses.append(new_status)
|
||||
|
||||
def process_event_result(self, event):
|
||||
subprocess_id = event["data"].get("subprocess_id")
|
||||
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
|
||||
return
|
||||
|
||||
source = event["data"]["source"]
|
||||
data = event["data"]["status_info"]
|
||||
|
||||
self.update_status_info(source, data)
|
||||
|
||||
def update_status_info(self, process_name, info):
|
||||
for status in self.statuses:
|
||||
if status.name == process_name:
|
||||
status.update(info)
|
||||
break
|
||||
|
||||
def bool_items(self):
|
||||
items = []
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "#Restart process"
|
||||
})
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": (
|
||||
"<i><b>WARNING:</b> Main process may shut down when checked"
|
||||
" if does not run as a service!</i>"
|
||||
)
|
||||
})
|
||||
|
||||
name_labels = {}
|
||||
for status in self.statuses:
|
||||
name_labels[status.name] = status.label
|
||||
|
||||
for name, label in name_labels.items():
|
||||
items.append({
|
||||
"type": "boolean",
|
||||
"value": False,
|
||||
"label": label,
|
||||
"name": name
|
||||
})
|
||||
return items
|
||||
|
||||
def items(self):
|
||||
items = []
|
||||
items.append(self.note_item)
|
||||
items.extend(self.bool_items())
|
||||
|
||||
for status in self.statuses:
|
||||
items.append(self.splitter_item)
|
||||
items.extend(status.get_items())
|
||||
|
||||
return items
|
||||
|
||||
|
||||
def server_activity_validate_user(event):
|
||||
"""Validate user permissions to show server info."""
|
||||
session = ObjectFactory.session
|
||||
|
||||
username = event["source"].get("user", {}).get("username")
|
||||
if not username:
|
||||
return False
|
||||
|
||||
user_ent = session.query(
|
||||
"User where username = \"{}\"".format(username)
|
||||
).first()
|
||||
if not user_ent:
|
||||
return False
|
||||
|
||||
role_list = ["Pypeclub", "Administrator"]
|
||||
for role in user_ent["user_security_roles"]:
|
||||
if role["security_role"]["name"] in role_list:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def server_activity_discover(event):
|
||||
"""Discover action in actions menu conditions."""
|
||||
session = ObjectFactory.session
|
||||
if session is None:
|
||||
return
|
||||
|
||||
if not server_activity_validate_user(event):
|
||||
return
|
||||
|
||||
return {"items": [action_data]}
|
||||
|
||||
|
||||
def server_activity(event):
|
||||
session = ObjectFactory.session
|
||||
if session is None:
|
||||
msg = "Session is not set. Can't trigger Reset action."
|
||||
log.warning(msg)
|
||||
return {
|
||||
"success": False,
|
||||
"message": msg
|
||||
}
|
||||
|
||||
if not server_activity_validate_user(event):
|
||||
return {
|
||||
"success": False,
|
||||
"message": "You don't have permissions to see Event server status!"
|
||||
}
|
||||
|
||||
values = event["data"].get("values") or {}
|
||||
is_checked = False
|
||||
for value in values.values():
|
||||
if value:
|
||||
is_checked = True
|
||||
break
|
||||
|
||||
if not is_checked:
|
||||
return {
|
||||
"items": ObjectFactory.status_factory.items(),
|
||||
"title": "Server current status"
|
||||
}
|
||||
|
||||
session = ObjectFactory.session
|
||||
if values["main"]:
|
||||
session.event_hub.sock.sendall(b"RestartM")
|
||||
return
|
||||
|
||||
if values["storer"]:
|
||||
session.event_hub.sock.sendall(b"RestartS")
|
||||
|
||||
if values["processor"]:
|
||||
session.event_hub.sock.sendall(b"RestartP")
|
||||
|
||||
|
||||
def trigger_info_get():
|
||||
if ObjectFactory.last_trigger:
|
||||
delta = datetime.datetime.now() - ObjectFactory.last_trigger
|
||||
if delta.seconds() < 5:
|
||||
return
|
||||
|
||||
session = ObjectFactory.session
|
||||
session.event_hub.publish(
|
||||
ftrack_api.event.base.Event(
|
||||
topic=TOPIC_STATUS_SERVER,
|
||||
data={"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"]}
|
||||
),
|
||||
on_error="ignore"
|
||||
)
|
||||
|
||||
|
||||
def on_start(event):
|
||||
session = ObjectFactory.session
|
||||
source_id = event.get("source", {}).get("id")
|
||||
if not source_id or source_id != session.event_hub.id:
|
||||
return
|
||||
|
||||
if session is None:
|
||||
log.warning("Session is not set. Can't trigger Sync to avalon action.")
|
||||
return True
|
||||
trigger_info_get()
|
||||
|
||||
|
||||
def register(session):
|
||||
'''Registers the event, subscribing the discover and launch topics.'''
|
||||
session.event_hub.subscribe(
|
||||
"topic=ftrack.action.discover",
|
||||
server_activity_discover
|
||||
)
|
||||
session.event_hub.subscribe("topic=pype.status.started", on_start)
|
||||
|
||||
status_launch_subscription = (
|
||||
"topic=ftrack.action.launch and data.actionIdentifier={}"
|
||||
).format(action_identifier)
|
||||
|
||||
session.event_hub.subscribe(
|
||||
status_launch_subscription,
|
||||
server_activity
|
||||
)
|
||||
|
||||
session.event_hub.subscribe(
|
||||
"topic={}".format(TOPIC_STATUS_SERVER_RESULT),
|
||||
ObjectFactory.status_factory.process_event_result
|
||||
)
|
||||
|
||||
|
||||
def heartbeat():
|
||||
if ObjectFactory.status_factory.is_filled():
|
||||
return
|
||||
|
||||
trigger_info_get()
|
||||
|
||||
|
||||
def main(args):
|
||||
port = int(args[-1])
|
||||
server_info = json.loads(args[-2])
|
||||
|
||||
# Create a TCP/IP socket
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
|
||||
# Connect the socket to the port where the server is listening
|
||||
server_address = ("localhost", port)
|
||||
log.debug("Statuser connected to {} port {}".format(*server_address))
|
||||
sock.connect(server_address)
|
||||
sock.sendall(b"CreatedStatus")
|
||||
# store socket connection object
|
||||
ObjectFactory.sock = sock
|
||||
|
||||
ObjectFactory.status_factory["main"].update(server_info)
|
||||
_returncode = 0
|
||||
try:
|
||||
session = SocketSession(
|
||||
auto_connect_event_hub=True, sock=sock, Eventhub=StatusEventHub
|
||||
)
|
||||
ObjectFactory.session = session
|
||||
session.event_hub.heartbeat_callbacks.append(heartbeat)
|
||||
register(session)
|
||||
server = FtrackServer("event")
|
||||
log.debug("Launched Ftrack Event statuser")
|
||||
|
||||
server.run_server(session, load_files=False)
|
||||
|
||||
except Exception:
|
||||
_returncode = 1
|
||||
log.error("ServerInfo subprocess crashed", exc_info=True)
|
||||
|
||||
finally:
|
||||
log.debug("Ending. Closing socket.")
|
||||
sock.close()
|
||||
return _returncode
|
||||
|
||||
|
||||
class OutputChecker(threading.Thread):
|
||||
read_input = True
|
||||
|
||||
def run(self):
|
||||
while self.read_input:
|
||||
for line in sys.stdin:
|
||||
line = line.rstrip().lower()
|
||||
if not line.startswith("reset:"):
|
||||
continue
|
||||
process_name = line.replace("reset:", "")
|
||||
|
||||
ObjectFactory.status_factory.update_status_info(
|
||||
process_name, None
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
self.read_input = False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Register interupt signal
|
||||
def signal_handler(sig, frame):
|
||||
print("You pressed Ctrl+C. Process ended.")
|
||||
ObjectFactory.checker_thread.stop()
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
statuse_names = {
|
||||
"main": "Main process",
|
||||
"storer": "Event Storer",
|
||||
"processor": "Event Processor"
|
||||
}
|
||||
ObjectFactory.status_factory = StatusFactory(statuse_names)
|
||||
|
||||
checker_thread = OutputChecker()
|
||||
ObjectFactory.checker_thread = checker_thread
|
||||
checker_thread.start()
|
||||
|
||||
sys.exit(main(sys.argv))
|
||||
|
|
@ -8,14 +8,15 @@ import pymongo
|
|||
import ftrack_api
|
||||
from ftrack_server import FtrackServer
|
||||
from pype.ftrack.ftrack_server.lib import (
|
||||
SocketSession, StorerEventHub,
|
||||
get_ftrack_event_mongo_info,
|
||||
SocketSession,
|
||||
StorerEventHub
|
||||
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT
|
||||
)
|
||||
from pype.ftrack.lib.custom_db_connector import DbConnector
|
||||
from pypeapp import Logger
|
||||
|
||||
log = Logger().get_logger("Event storer")
|
||||
subprocess_started = datetime.datetime.now()
|
||||
|
||||
|
||||
class SessionFactory:
|
||||
|
|
@ -138,11 +139,42 @@ def trigger_sync(event):
|
|||
)
|
||||
|
||||
|
||||
def send_status(event):
|
||||
session = SessionFactory.session
|
||||
if not session:
|
||||
return
|
||||
|
||||
subprocess_id = event["data"].get("subprocess_id")
|
||||
if not subprocess_id:
|
||||
return
|
||||
|
||||
if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]:
|
||||
return
|
||||
|
||||
new_event_data = {
|
||||
"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"],
|
||||
"source": "storer",
|
||||
"status_info": {
|
||||
"created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S")
|
||||
}
|
||||
}
|
||||
|
||||
new_event = ftrack_api.event.base.Event(
|
||||
topic=TOPIC_STATUS_SERVER_RESULT,
|
||||
data=new_event_data
|
||||
)
|
||||
|
||||
session.event_hub.publish(new_event)
|
||||
|
||||
|
||||
def register(session):
|
||||
'''Registers the event, subscribing the discover and launch topics.'''
|
||||
install_db()
|
||||
session.event_hub.subscribe("topic=*", launch)
|
||||
session.event_hub.subscribe("topic=pype.storer.started", trigger_sync)
|
||||
session.event_hub.subscribe(
|
||||
"topic={}".format(TOPIC_STATUS_SERVER), send_status
|
||||
)
|
||||
|
||||
|
||||
def main(args):
|
||||
|
|
|
|||
|
|
@ -2,12 +2,14 @@ import sys
|
|||
import signal
|
||||
import socket
|
||||
|
||||
import traceback
|
||||
|
||||
from ftrack_server import FtrackServer
|
||||
from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub
|
||||
from pype.ftrack.ftrack_server.lib import SocketSession, SocketBaseEventHub
|
||||
|
||||
from pypeapp import Logger
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
log = Logger().get_logger("FtrackUserServer")
|
||||
|
||||
|
||||
def main(args):
|
||||
|
|
@ -18,17 +20,21 @@ def main(args):
|
|||
|
||||
# Connect the socket to the port where the server is listening
|
||||
server_address = ("localhost", port)
|
||||
log.debug("Storer connected to {} port {}".format(*server_address))
|
||||
log.debug(
|
||||
"User Ftrack Server connected to {} port {}".format(*server_address)
|
||||
)
|
||||
sock.connect(server_address)
|
||||
sock.sendall(b"CreatedUser")
|
||||
|
||||
try:
|
||||
session = SocketSession(
|
||||
auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub
|
||||
auto_connect_event_hub=True, sock=sock, Eventhub=SocketBaseEventHub
|
||||
)
|
||||
server = FtrackServer("action")
|
||||
log.debug("Launched Ftrack Event storer")
|
||||
log.debug("Launched User Ftrack Server")
|
||||
server.run_server(session=session)
|
||||
except Exception:
|
||||
traceback.print_exception(*sys.exc_info())
|
||||
|
||||
finally:
|
||||
log.debug("Closing socket")
|
||||
|
|
@ -42,7 +48,6 @@ if __name__ == "__main__":
|
|||
log.info(
|
||||
"Process was forced to stop. Process ended."
|
||||
)
|
||||
log.info("Process ended.")
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,11 @@
|
|||
from . import avalon_sync
|
||||
from .credentials import *
|
||||
from . import credentials
|
||||
from .ftrack_app_handler import *
|
||||
from .ftrack_event_handler import *
|
||||
from .ftrack_action_handler import *
|
||||
from .ftrack_base_handler import *
|
||||
|
||||
from .lib import (
|
||||
get_project_from_entity,
|
||||
get_avalon_entities_for_assetversion
|
||||
)
|
||||
|
|
|
|||
|
|
@ -236,6 +236,7 @@ class SyncEntitiesFactory:
|
|||
" from TypedContext where project_id is \"{}\""
|
||||
)
|
||||
ignore_custom_attr_key = "avalon_ignore_sync"
|
||||
ignore_entity_types = ["milestone"]
|
||||
|
||||
report_splitter = {"type": "label", "value": "---"}
|
||||
|
||||
|
|
@ -366,7 +367,10 @@ class SyncEntitiesFactory:
|
|||
parent_id = entity["parent_id"]
|
||||
entity_type = entity.entity_type
|
||||
entity_type_low = entity_type.lower()
|
||||
if entity_type_low == "task":
|
||||
if entity_type_low in self.ignore_entity_types:
|
||||
continue
|
||||
|
||||
elif entity_type_low == "task":
|
||||
entities_dict[parent_id]["tasks"].append(entity["name"])
|
||||
continue
|
||||
|
||||
|
|
@ -1722,7 +1726,11 @@ class SyncEntitiesFactory:
|
|||
self.avalon_project_id = new_id
|
||||
|
||||
self._avalon_ents_by_id[str(new_id)] = project_item
|
||||
if self._avalon_ents_by_ftrack_id is None:
|
||||
self._avalon_ents_by_ftrack_id = {}
|
||||
self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id)
|
||||
if self._avalon_ents_by_name is None:
|
||||
self._avalon_ents_by_name = {}
|
||||
self._avalon_ents_by_name[project_item["name"]] = str(new_id)
|
||||
|
||||
self.create_list.append(project_item)
|
||||
|
|
@ -1991,7 +1999,7 @@ class SyncEntitiesFactory:
|
|||
vis_par = ent["data"]["visualParent"]
|
||||
if (
|
||||
vis_par is not None and
|
||||
str(vis_par) in self.deleted_entities
|
||||
str(vis_par) in _deleted_entities
|
||||
):
|
||||
continue
|
||||
_ready.append(mongo_id)
|
||||
|
|
@ -2059,9 +2067,10 @@ class SyncEntitiesFactory:
|
|||
# different hierarchy - can't recreate entity
|
||||
continue
|
||||
|
||||
_vis_parent = str(deleted_entity["data"]["visualParent"])
|
||||
_vis_parent = deleted_entity["data"]["visualParent"]
|
||||
if _vis_parent is None:
|
||||
_vis_parent = self.avalon_project_id
|
||||
_vis_parent = str(_vis_parent)
|
||||
ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent]
|
||||
self.create_ftrack_ent_from_avalon_ent(
|
||||
deleted_entity, ftrack_parent_id
|
||||
|
|
|
|||
|
|
@ -2,85 +2,140 @@ import os
|
|||
import json
|
||||
import ftrack_api
|
||||
import appdirs
|
||||
import getpass
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
except ImportError:
|
||||
from urlparse import urlparse
|
||||
|
||||
|
||||
config_path = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype'))
|
||||
action_file_name = 'ftrack_cred.json'
|
||||
event_file_name = 'ftrack_event_cred.json'
|
||||
action_fpath = os.path.join(config_path, action_file_name)
|
||||
event_fpath = os.path.join(config_path, event_file_name)
|
||||
folders = set([os.path.dirname(action_fpath), os.path.dirname(event_fpath)])
|
||||
CONFIG_PATH = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
|
||||
CREDENTIALS_FILE_NAME = "ftrack_cred.json"
|
||||
CREDENTIALS_PATH = os.path.join(CONFIG_PATH, CREDENTIALS_FILE_NAME)
|
||||
CREDENTIALS_FOLDER = os.path.dirname(CREDENTIALS_PATH)
|
||||
|
||||
for folder in folders:
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
if not os.path.isdir(CREDENTIALS_FOLDER):
|
||||
os.makedirs(CREDENTIALS_FOLDER)
|
||||
|
||||
USER_GETTER = None
|
||||
|
||||
|
||||
def _get_credentials(event=False):
|
||||
if event:
|
||||
fpath = event_fpath
|
||||
else:
|
||||
fpath = action_fpath
|
||||
def get_ftrack_hostname(ftrack_server=None):
|
||||
if not ftrack_server:
|
||||
ftrack_server = os.environ["FTRACK_SERVER"]
|
||||
|
||||
if "//" not in ftrack_server:
|
||||
ftrack_server = "//" + ftrack_server
|
||||
|
||||
return urlparse(ftrack_server).hostname
|
||||
|
||||
|
||||
def get_user():
|
||||
if USER_GETTER:
|
||||
return USER_GETTER()
|
||||
return getpass.getuser()
|
||||
|
||||
|
||||
def get_credentials(ftrack_server=None, user=None):
|
||||
credentials = {}
|
||||
try:
|
||||
file = open(fpath, 'r')
|
||||
credentials = json.load(file)
|
||||
except Exception:
|
||||
file = open(fpath, 'w')
|
||||
if not os.path.exists(CREDENTIALS_PATH):
|
||||
with open(CREDENTIALS_PATH, "w") as file:
|
||||
file.write(json.dumps(credentials))
|
||||
file.close()
|
||||
return credentials
|
||||
|
||||
file.close()
|
||||
with open(CREDENTIALS_PATH, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
hostname = get_ftrack_hostname(ftrack_server)
|
||||
if not user:
|
||||
user = get_user()
|
||||
|
||||
content_json = json.loads(content or "{}")
|
||||
credentials = content_json.get(hostname, {}).get(user) or {}
|
||||
|
||||
return credentials
|
||||
|
||||
|
||||
def _save_credentials(username, apiKey, event=False, auto_connect=None):
|
||||
data = {
|
||||
'username': username,
|
||||
'apiKey': apiKey
|
||||
def save_credentials(ft_user, ft_api_key, ftrack_server=None, user=None):
|
||||
hostname = get_ftrack_hostname(ftrack_server)
|
||||
if not user:
|
||||
user = get_user()
|
||||
|
||||
with open(CREDENTIALS_PATH, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
content_json = json.loads(content or "{}")
|
||||
if hostname not in content_json:
|
||||
content_json[hostname] = {}
|
||||
|
||||
content_json[hostname][user] = {
|
||||
"username": ft_user,
|
||||
"api_key": ft_api_key
|
||||
}
|
||||
|
||||
if event:
|
||||
fpath = event_fpath
|
||||
if auto_connect is None:
|
||||
cred = _get_credentials(True)
|
||||
auto_connect = cred.get('auto_connect', False)
|
||||
data['auto_connect'] = auto_connect
|
||||
else:
|
||||
fpath = action_fpath
|
||||
# Deprecated keys
|
||||
if "username" in content_json:
|
||||
content_json.pop("username")
|
||||
if "apiKey" in content_json:
|
||||
content_json.pop("apiKey")
|
||||
|
||||
file = open(fpath, 'w')
|
||||
file.write(json.dumps(data))
|
||||
file.close()
|
||||
with open(CREDENTIALS_PATH, "w") as file:
|
||||
file.write(json.dumps(content_json, indent=4))
|
||||
|
||||
|
||||
def _clear_credentials(event=False):
|
||||
if event:
|
||||
fpath = event_fpath
|
||||
else:
|
||||
fpath = action_fpath
|
||||
open(fpath, 'w').close()
|
||||
_set_env(None, None)
|
||||
def clear_credentials(ft_user=None, ftrack_server=None, user=None):
|
||||
if not ft_user:
|
||||
ft_user = os.environ.get("FTRACK_API_USER")
|
||||
|
||||
if not ft_user:
|
||||
return
|
||||
|
||||
hostname = get_ftrack_hostname(ftrack_server)
|
||||
if not user:
|
||||
user = get_user()
|
||||
|
||||
with open(CREDENTIALS_PATH, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
content_json = json.loads(content or "{}")
|
||||
if hostname not in content_json:
|
||||
content_json[hostname] = {}
|
||||
|
||||
content_json[hostname].pop(user, None)
|
||||
|
||||
with open(CREDENTIALS_PATH, "w") as file:
|
||||
file.write(json.dumps(content_json))
|
||||
|
||||
|
||||
def _set_env(username, apiKey):
|
||||
if not username:
|
||||
username = ''
|
||||
if not apiKey:
|
||||
apiKey = ''
|
||||
os.environ['FTRACK_API_USER'] = username
|
||||
os.environ['FTRACK_API_KEY'] = apiKey
|
||||
def set_env(ft_user=None, ft_api_key=None):
|
||||
os.environ["FTRACK_API_USER"] = ft_user or ""
|
||||
os.environ["FTRACK_API_KEY"] = ft_api_key or ""
|
||||
|
||||
|
||||
def _check_credentials(username=None, apiKey=None):
|
||||
def get_env_credentials():
|
||||
return (
|
||||
os.environ.get("FTRACK_API_USER"),
|
||||
os.environ.get("FTRACK_API_KEY")
|
||||
)
|
||||
|
||||
if username and apiKey:
|
||||
_set_env(username, apiKey)
|
||||
|
||||
def check_credentials(ft_user, ft_api_key, ftrack_server=None):
|
||||
if not ftrack_server:
|
||||
ftrack_server = os.environ["FTRACK_SERVER"]
|
||||
|
||||
if not ft_user or not ft_api_key:
|
||||
return False
|
||||
|
||||
try:
|
||||
session = ftrack_api.Session()
|
||||
session = ftrack_api.Session(
|
||||
server_url=ftrack_server,
|
||||
api_key=ft_api_key,
|
||||
api_user=ft_user
|
||||
)
|
||||
session.close()
|
||||
except Exception as e:
|
||||
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -193,6 +193,8 @@ class AppAction(BaseHandler):
|
|||
if parents:
|
||||
hierarchy = os.path.join(*parents)
|
||||
|
||||
os.environ["AVALON_HIERARCHY"] = hierarchy
|
||||
|
||||
application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
|
||||
|
||||
data = {
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ class BaseHandler(object):
|
|||
).format(
|
||||
str(type(session)),
|
||||
str(ftrack_api.session.Session),
|
||||
str(session_processor.ProcessSession)
|
||||
str(SocketSession)
|
||||
))
|
||||
|
||||
self._session = session
|
||||
|
|
|
|||
135
pype/ftrack/lib/lib.py
Normal file
135
pype/ftrack/lib/lib.py
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
from bson.objectid import ObjectId
|
||||
|
||||
from .avalon_sync import CustAttrIdKey
|
||||
import avalon.io
|
||||
|
||||
|
||||
def get_project_from_entity(entity):
|
||||
# TODO add more entities
|
||||
ent_type_lowered = entity.entity_type.lower()
|
||||
if ent_type_lowered == "project":
|
||||
return entity
|
||||
|
||||
elif ent_type_lowered == "assetversion":
|
||||
return entity["asset"]["parent"]["project"]
|
||||
|
||||
elif "project" in entity:
|
||||
return entity["project"]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_avalon_entities_for_assetversion(asset_version, db_con=None):
|
||||
output = {
|
||||
"success": True,
|
||||
"message": None,
|
||||
"project": None,
|
||||
"project_name": None,
|
||||
"asset": None,
|
||||
"asset_name": None,
|
||||
"asset_path": None,
|
||||
"subset": None,
|
||||
"subset_name": None,
|
||||
"version": None,
|
||||
"version_name": None,
|
||||
"representations": None
|
||||
}
|
||||
|
||||
if db_con is None:
|
||||
db_con = avalon.io
|
||||
db_con.install()
|
||||
|
||||
ft_asset = asset_version["asset"]
|
||||
subset_name = ft_asset["name"]
|
||||
version = asset_version["version"]
|
||||
parent = ft_asset["parent"]
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in parent["link"]]
|
||||
)
|
||||
project = get_project_from_entity(asset_version)
|
||||
project_name = project["full_name"]
|
||||
|
||||
output["project_name"] = project_name
|
||||
output["asset_name"] = parent["name"]
|
||||
output["asset_path"] = ent_path
|
||||
output["subset_name"] = subset_name
|
||||
output["version_name"] = version
|
||||
|
||||
db_con.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
avalon_project = db_con.find_one({"type": "project"})
|
||||
output["project"] = avalon_project
|
||||
|
||||
if not avalon_project:
|
||||
output["success"] = False
|
||||
output["message"] = "Project not synchronized to avalon `{}`".format(
|
||||
project_name
|
||||
)
|
||||
return output
|
||||
|
||||
asset_ent = None
|
||||
asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
|
||||
if asset_mongo_id:
|
||||
try:
|
||||
asset_mongo_id = ObjectId(asset_mongo_id)
|
||||
asset_ent = db_con.find_one({
|
||||
"type": "asset",
|
||||
"_id": asset_mongo_id
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not asset_ent:
|
||||
asset_ent = db_con.find_one({
|
||||
"type": "asset",
|
||||
"data.ftrackId": parent["id"]
|
||||
})
|
||||
|
||||
output["asset"] = asset_ent
|
||||
|
||||
if not asset_ent:
|
||||
output["success"] = False
|
||||
output["message"] = "Not synchronized entity to avalon `{}`".format(
|
||||
ent_path
|
||||
)
|
||||
return output
|
||||
|
||||
asset_mongo_id = asset_ent["_id"]
|
||||
|
||||
subset_ent = db_con.find_one({
|
||||
"type": "subset",
|
||||
"parent": asset_mongo_id,
|
||||
"name": subset_name
|
||||
})
|
||||
|
||||
output["subset"] = subset_ent
|
||||
|
||||
if not subset_ent:
|
||||
output["success"] = False
|
||||
output["message"] = (
|
||||
"Subset `{}` does not exist under Asset `{}`"
|
||||
).format(subset_name, ent_path)
|
||||
return output
|
||||
|
||||
version_ent = db_con.find_one({
|
||||
"type": "version",
|
||||
"name": version,
|
||||
"parent": subset_ent["_id"]
|
||||
})
|
||||
|
||||
output["version"] = version_ent
|
||||
|
||||
if not version_ent:
|
||||
output["success"] = False
|
||||
output["message"] = (
|
||||
"Version `{}` does not exist under Subset `{}` | Asset `{}`"
|
||||
).format(version, subset_name, ent_path)
|
||||
return output
|
||||
|
||||
repre_ents = list(db_con.find({
|
||||
"type": "representation",
|
||||
"parent": version_ent["_id"]
|
||||
}))
|
||||
|
||||
output["representations"] = repre_ents
|
||||
return output
|
||||
|
|
@ -34,29 +34,28 @@ class FtrackModule:
|
|||
|
||||
def validate(self):
|
||||
validation = False
|
||||
cred = credentials._get_credentials()
|
||||
try:
|
||||
if 'username' in cred and 'apiKey' in cred:
|
||||
validation = credentials._check_credentials(
|
||||
cred['username'],
|
||||
cred['apiKey']
|
||||
)
|
||||
if validation is False:
|
||||
self.show_login_widget()
|
||||
else:
|
||||
self.show_login_widget()
|
||||
|
||||
except Exception as e:
|
||||
log.error("We are unable to connect to Ftrack: {0}".format(e))
|
||||
|
||||
validation = credentials._check_credentials()
|
||||
if validation is True:
|
||||
cred = credentials.get_credentials()
|
||||
ft_user = cred.get("username")
|
||||
ft_api_key = cred.get("api_key")
|
||||
validation = credentials.check_credentials(ft_user, ft_api_key)
|
||||
if validation:
|
||||
credentials.set_env(ft_user, ft_api_key)
|
||||
log.info("Connected to Ftrack successfully")
|
||||
self.loginChange()
|
||||
else:
|
||||
log.warning("Please sign in to Ftrack")
|
||||
self.bool_logged = False
|
||||
self.set_menu_visibility()
|
||||
|
||||
return validation
|
||||
|
||||
if not validation and ft_user and ft_api_key:
|
||||
log.warning(
|
||||
"Current Ftrack credentials are not valid. {}: {} - {}".format(
|
||||
str(os.environ.get("FTRACK_SERVER")), ft_user, ft_api_key
|
||||
)
|
||||
)
|
||||
|
||||
log.info("Please sign in to Ftrack")
|
||||
self.bool_logged = False
|
||||
self.show_login_widget()
|
||||
self.set_menu_visibility()
|
||||
|
||||
return validation
|
||||
|
||||
|
|
@ -67,7 +66,7 @@ class FtrackModule:
|
|||
self.start_action_server()
|
||||
|
||||
def logout(self):
|
||||
credentials._clear_credentials()
|
||||
credentials.clear_credentials()
|
||||
self.stop_action_server()
|
||||
|
||||
log.info("Logged out of Ftrack")
|
||||
|
|
@ -171,7 +170,7 @@ class FtrackModule:
|
|||
|
||||
# If thread failed test Ftrack and Mongo connection
|
||||
elif not self.thread_socket_server.isAlive():
|
||||
self.thread_socket_server_thread.join()
|
||||
self.thread_socket_server.join()
|
||||
self.thread_socket_server = None
|
||||
ftrack_accessible = False
|
||||
|
||||
|
|
@ -307,11 +306,23 @@ class FtrackModule:
|
|||
except Exception as e:
|
||||
log.error("During Killing Timer event server: {0}".format(e))
|
||||
|
||||
def changed_user(self):
|
||||
self.stop_action_server()
|
||||
credentials.set_env()
|
||||
self.validate()
|
||||
|
||||
def process_modules(self, modules):
|
||||
if 'TimersManager' in modules:
|
||||
self.timer_manager = modules['TimersManager']
|
||||
self.timer_manager.add_module(self)
|
||||
|
||||
if "UserModule" in modules:
|
||||
credentials.USER_GETTER = modules["UserModule"].get_user
|
||||
modules["UserModule"].register_callback_on_user_change(
|
||||
self.changed_user
|
||||
)
|
||||
|
||||
|
||||
def start_timer_manager(self, data):
|
||||
if self.thread_timer is not None:
|
||||
self.thread_timer.ftrack_start_timer(data)
|
||||
|
|
@ -336,7 +347,7 @@ class FtrackEventsThread(QtCore.QThread):
|
|||
|
||||
def __init__(self, parent):
|
||||
super(FtrackEventsThread, self).__init__()
|
||||
cred = credentials._get_credentials()
|
||||
cred = credentials.get_credentials()
|
||||
self.username = cred['username']
|
||||
self.user = None
|
||||
self.last_task = None
|
||||
|
|
|
|||
|
|
@ -204,11 +204,11 @@ class Login_Dialog_ui(QtWidgets.QWidget):
|
|||
self.setError("{0} {1}".format(msg, " and ".join(missing)))
|
||||
return
|
||||
|
||||
verification = credentials._check_credentials(username, apiKey)
|
||||
verification = credentials.check_credentials(username, apiKey)
|
||||
|
||||
if verification:
|
||||
credentials._save_credentials(username, apiKey, self.is_event)
|
||||
credentials._set_env(username, apiKey)
|
||||
credentials.save_credentials(username, apiKey, self.is_event)
|
||||
credentials.set_env(username, apiKey)
|
||||
if self.parent is not None:
|
||||
self.parent.loginChange()
|
||||
self._close_widget()
|
||||
|
|
@ -304,11 +304,11 @@ class Login_Dialog_ui(QtWidgets.QWidget):
|
|||
self._login_server_thread.start(url)
|
||||
return
|
||||
|
||||
verification = credentials._check_credentials(username, apiKey)
|
||||
verification = credentials.check_credentials(username, apiKey)
|
||||
|
||||
if verification is True:
|
||||
credentials._save_credentials(username, apiKey, self.is_event)
|
||||
credentials._set_env(username, apiKey)
|
||||
credentials.save_credentials(username, apiKey, self.is_event)
|
||||
credentials.set_env(username, apiKey)
|
||||
if self.parent is not None:
|
||||
self.parent.loginChange()
|
||||
self._close_widget()
|
||||
|
|
|
|||
123
pype/lib.py
123
pype/lib.py
|
|
@ -13,6 +13,62 @@ import avalon
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_paths_from_environ(env_key, return_first=False):
|
||||
"""Return existing paths from specific envirnment variable.
|
||||
|
||||
:param env_key: Environment key where should look for paths.
|
||||
:type env_key: str
|
||||
:param return_first: Return first path on `True`, list of all on `False`.
|
||||
:type return_first: boolean
|
||||
|
||||
Difference when none of paths exists:
|
||||
- when `return_first` is set to `False` then function returns empty list.
|
||||
- when `return_first` is set to `True` then function returns `None`.
|
||||
"""
|
||||
|
||||
existing_paths = []
|
||||
paths = os.environ.get(env_key) or ""
|
||||
path_items = paths.split(os.pathsep)
|
||||
for path in path_items:
|
||||
# Skip empty string
|
||||
if not path:
|
||||
continue
|
||||
# Normalize path
|
||||
path = os.path.normpath(path)
|
||||
# Check if path exists
|
||||
if os.path.exists(path):
|
||||
# Return path if `return_first` is set to True
|
||||
if return_first:
|
||||
return path
|
||||
# Store path
|
||||
existing_paths.append(path)
|
||||
|
||||
# Return None if none of paths exists
|
||||
if return_first:
|
||||
return None
|
||||
# Return all existing paths from environment variable
|
||||
return existing_paths
|
||||
|
||||
|
||||
def get_ffmpeg_tool_path(tool="ffmpeg"):
|
||||
"""Find path to ffmpeg tool in FFMPEG_PATH paths.
|
||||
|
||||
Function looks for tool in paths set in FFMPEG_PATH environment. If tool
|
||||
exists then returns it's full path.
|
||||
|
||||
Returns tool name itself when tool path was not found. (FFmpeg path may be
|
||||
set in PATH environment variable)
|
||||
"""
|
||||
|
||||
dir_paths = get_paths_from_environ("FFMPEG_PATH")
|
||||
for dir_path in dir_paths:
|
||||
for file_name in os.listdir(dir_path):
|
||||
base, ext = os.path.splitext(file_name)
|
||||
if base.lower() == tool.lower():
|
||||
return os.path.join(dir_path, tool)
|
||||
return tool
|
||||
|
||||
|
||||
# Special naming case for subprocess since its a built-in method.
|
||||
def _subprocess(*args, **kwargs):
|
||||
"""Convenience method for getting output errors for subprocess."""
|
||||
|
|
@ -361,23 +417,7 @@ def _get_host_name():
|
|||
|
||||
|
||||
def get_asset(asset_name=None):
|
||||
entity_data_keys_from_project_when_miss = [
|
||||
"frameStart", "frameEnd", "handleStart", "handleEnd", "fps",
|
||||
"resolutionWidth", "resolutionHeight"
|
||||
]
|
||||
|
||||
entity_keys_from_project_when_miss = []
|
||||
|
||||
alternatives = {
|
||||
"handleStart": "handles",
|
||||
"handleEnd": "handles"
|
||||
}
|
||||
|
||||
defaults = {
|
||||
"handleStart": 0,
|
||||
"handleEnd": 0
|
||||
}
|
||||
|
||||
""" Returning asset document from database """
|
||||
if not asset_name:
|
||||
asset_name = avalon.api.Session["AVALON_ASSET"]
|
||||
|
||||
|
|
@ -385,57 +425,10 @@ def get_asset(asset_name=None):
|
|||
"name": asset_name,
|
||||
"type": "asset"
|
||||
})
|
||||
|
||||
if not asset_document:
|
||||
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
|
||||
|
||||
project_document = io.find_one({"type": "project"})
|
||||
|
||||
for key in entity_data_keys_from_project_when_miss:
|
||||
if asset_document["data"].get(key):
|
||||
continue
|
||||
|
||||
value = project_document["data"].get(key)
|
||||
if value is not None or key not in alternatives:
|
||||
asset_document["data"][key] = value
|
||||
continue
|
||||
|
||||
alt_key = alternatives[key]
|
||||
value = asset_document["data"].get(alt_key)
|
||||
if value is not None:
|
||||
asset_document["data"][key] = value
|
||||
continue
|
||||
|
||||
value = project_document["data"].get(alt_key)
|
||||
if value:
|
||||
asset_document["data"][key] = value
|
||||
continue
|
||||
|
||||
if key in defaults:
|
||||
asset_document["data"][key] = defaults[key]
|
||||
|
||||
for key in entity_keys_from_project_when_miss:
|
||||
if asset_document.get(key):
|
||||
continue
|
||||
|
||||
value = project_document.get(key)
|
||||
if value is not None or key not in alternatives:
|
||||
asset_document[key] = value
|
||||
continue
|
||||
|
||||
alt_key = alternatives[key]
|
||||
value = asset_document.get(alt_key)
|
||||
if value:
|
||||
asset_document[key] = value
|
||||
continue
|
||||
|
||||
value = project_document.get(alt_key)
|
||||
if value:
|
||||
asset_document[key] = value
|
||||
continue
|
||||
|
||||
if key in defaults:
|
||||
asset_document[key] = defaults[key]
|
||||
|
||||
return asset_document
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ from avalon import api as avalon, pipeline, maya
|
|||
from avalon.maya.pipeline import IS_HEADLESS
|
||||
from avalon.tools import workfiles
|
||||
from pyblish import api as pyblish
|
||||
from pypeapp import config
|
||||
|
||||
from ..lib import (
|
||||
any_outdated
|
||||
|
|
@ -156,12 +155,19 @@ def on_open(_):
|
|||
from avalon.vendor.Qt import QtWidgets
|
||||
from ..widgets import popup
|
||||
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.add_render_layer_change_observer()")
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
# Validate FPS after update_task_from_path to
|
||||
# ensure it is using correct FPS for the asset
|
||||
lib.validate_fps()
|
||||
lib.fix_incompatible_containers()
|
||||
|
||||
if any_outdated():
|
||||
log.warning("Scene has outdated content.")
|
||||
|
|
@ -193,6 +199,12 @@ def on_new(_):
|
|||
"""Set project resolution and fps when create a new file"""
|
||||
avalon.logger.info("Running callback on new..")
|
||||
with maya.suspended_refresh():
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.add_render_layer_change_observer()")
|
||||
lib.set_context_settings()
|
||||
|
||||
|
||||
|
|
@ -217,3 +229,10 @@ def on_task_changed(*args):
|
|||
|
||||
# Run
|
||||
maya.pipeline._on_task_changed()
|
||||
with maya.suspended_refresh():
|
||||
lib.set_context_settings()
|
||||
lib.update_content_on_context_change()
|
||||
|
||||
lib.show_message("Context was changed",
|
||||
("Context was changed to {}".format(
|
||||
avalon.Session["AVALON_ASSET"])))
|
||||
|
|
|
|||
237
pype/maya/lib.py
237
pype/maya/lib.py
|
|
@ -2176,18 +2176,29 @@ def load_capture_preset(path=None, data=None):
|
|||
4: 'nolights'}
|
||||
for key in preset[id]:
|
||||
if key == 'high_quality':
|
||||
temp_options2['multiSampleEnable'] = True
|
||||
temp_options2['multiSampleCount'] = 8
|
||||
temp_options2['textureMaxResolution'] = 1024
|
||||
temp_options2['enableTextureMaxRes'] = True
|
||||
if preset[id][key] == True:
|
||||
temp_options2['multiSampleEnable'] = True
|
||||
temp_options2['multiSampleCount'] = 4
|
||||
temp_options2['textureMaxResolution'] = 1024
|
||||
temp_options2['enableTextureMaxRes'] = True
|
||||
temp_options2['textureMaxResMode'] = 1
|
||||
else:
|
||||
temp_options2['multiSampleEnable'] = False
|
||||
temp_options2['multiSampleCount'] = 4
|
||||
temp_options2['textureMaxResolution'] = 512
|
||||
temp_options2['enableTextureMaxRes'] = True
|
||||
temp_options2['textureMaxResMode'] = 0
|
||||
|
||||
if key == 'ssaoEnable':
|
||||
if preset[id][key] == True:
|
||||
temp_options2['ssaoEnable'] = True
|
||||
else:
|
||||
temp_options2['ssaoEnable'] = False
|
||||
|
||||
if key == 'alphaCut':
|
||||
temp_options2['transparencyAlgorithm'] = 5
|
||||
temp_options2['transparencyQuality'] = 1
|
||||
|
||||
if key == 'ssaoEnable':
|
||||
temp_options2['ssaoEnable'] = True
|
||||
|
||||
if key == 'headsUpDisplay':
|
||||
temp_options['headsUpDisplay'] = True
|
||||
|
||||
|
|
@ -2318,6 +2329,25 @@ def get_attr_in_layer(attr, layer):
|
|||
return cmds.getAttr(attr)
|
||||
|
||||
|
||||
def fix_incompatible_containers():
|
||||
"""Return whether the current scene has any outdated content"""
|
||||
|
||||
host = avalon.api.registered_host()
|
||||
for container in host.ls():
|
||||
loader = container['loader']
|
||||
|
||||
print(container['loader'])
|
||||
|
||||
if loader in ["MayaAsciiLoader",
|
||||
"AbcLoader",
|
||||
"ModelLoader",
|
||||
"CameraLoader",
|
||||
"RigLoader",
|
||||
"FBXLoader"]:
|
||||
cmds.setAttr(container["objectName"] + ".loader",
|
||||
"ReferenceLoader", type="string")
|
||||
|
||||
|
||||
def _null(*args):
|
||||
pass
|
||||
|
||||
|
|
@ -2369,15 +2399,19 @@ class shelf():
|
|||
if not item.get('command'):
|
||||
item['command'] = self._null
|
||||
if item['type'] == 'button':
|
||||
self.addButon(item['name'], command=item['command'])
|
||||
self.addButon(item['name'],
|
||||
command=item['command'],
|
||||
icon=item['icon'])
|
||||
if item['type'] == 'menuItem':
|
||||
self.addMenuItem(item['parent'],
|
||||
item['name'],
|
||||
command=item['command'])
|
||||
command=item['command'],
|
||||
icon=item['icon'])
|
||||
if item['type'] == 'subMenu':
|
||||
self.addMenuItem(item['parent'],
|
||||
item['name'],
|
||||
command=item['command'])
|
||||
command=item['command'],
|
||||
icon=item['icon'])
|
||||
|
||||
def addButon(self, label, icon="commandButton.png",
|
||||
command=_null, doubleCommand=_null):
|
||||
|
|
@ -2387,7 +2421,8 @@ class shelf():
|
|||
'''
|
||||
cmds.setParent(self.name)
|
||||
if icon:
|
||||
icon = self.iconPath + icon
|
||||
icon = os.path.join(self.iconPath, icon)
|
||||
print(icon)
|
||||
cmds.shelfButton(width=37, height=37, image=icon, label=label,
|
||||
command=command, dcc=doubleCommand,
|
||||
imageOverlayLabel=label, olb=self.labelBackground,
|
||||
|
|
@ -2399,7 +2434,8 @@ class shelf():
|
|||
double click command and image.
|
||||
'''
|
||||
if icon:
|
||||
icon = self.iconPath + icon
|
||||
icon = os.path.join(self.iconPath, icon)
|
||||
print(icon)
|
||||
return cmds.menuItem(p=parent, label=label, c=command, i="")
|
||||
|
||||
def addSubMenu(self, parent, label, icon=None):
|
||||
|
|
@ -2408,7 +2444,8 @@ class shelf():
|
|||
the specified parent popup menu.
|
||||
'''
|
||||
if icon:
|
||||
icon = self.iconPath + icon
|
||||
icon = os.path.join(self.iconPath, icon)
|
||||
print(icon)
|
||||
return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1)
|
||||
|
||||
def _cleanOldShelf(self):
|
||||
|
|
@ -2422,3 +2459,177 @@ class shelf():
|
|||
cmds.deleteUI(each)
|
||||
else:
|
||||
cmds.shelfLayout(self.name, p="ShelfLayout")
|
||||
|
||||
|
||||
def _get_render_instance():
|
||||
objectset = cmds.ls("*.id", long=True, type="objectSet",
|
||||
recursive=True, objectsOnly=True)
|
||||
|
||||
for objset in objectset:
|
||||
|
||||
if not cmds.attributeQuery("id", node=objset, exists=True):
|
||||
continue
|
||||
|
||||
id_attr = "{}.id".format(objset)
|
||||
if cmds.getAttr(id_attr) != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
has_family = cmds.attributeQuery("family",
|
||||
node=objset,
|
||||
exists=True)
|
||||
if not has_family:
|
||||
continue
|
||||
|
||||
if cmds.getAttr("{}.family".format(objset)) == 'rendering':
|
||||
return objset
|
||||
|
||||
return None
|
||||
|
||||
|
||||
renderItemObserverList = []
|
||||
|
||||
|
||||
class RenderSetupListObserver:
|
||||
|
||||
def listItemAdded(self, item):
|
||||
print("--- adding ...")
|
||||
self._add_render_layer(item)
|
||||
|
||||
def listItemRemoved(self, item):
|
||||
print("--- removing ...")
|
||||
self._remove_render_layer(item.name())
|
||||
|
||||
def _add_render_layer(self, item):
|
||||
render_set = _get_render_instance()
|
||||
layer_name = item.name()
|
||||
|
||||
if not render_set:
|
||||
return
|
||||
|
||||
members = cmds.sets(render_set, query=True) or []
|
||||
if not "LAYER_{}".format(layer_name) in members:
|
||||
print(" - creating set for {}".format(layer_name))
|
||||
set = cmds.sets(n="LAYER_{}".format(layer_name), empty=True)
|
||||
cmds.sets(set, forceElement=render_set)
|
||||
rio = RenderSetupItemObserver(item)
|
||||
print("- adding observer for {}".format(item.name()))
|
||||
item.addItemObserver(rio.itemChanged)
|
||||
renderItemObserverList.append(rio)
|
||||
|
||||
def _remove_render_layer(self, layer_name):
|
||||
render_set = _get_render_instance()
|
||||
|
||||
if not render_set:
|
||||
return
|
||||
|
||||
members = cmds.sets(render_set, query=True)
|
||||
if "LAYER_{}".format(layer_name) in members:
|
||||
print(" - removing set for {}".format(layer_name))
|
||||
cmds.delete("LAYER_{}".format(layer_name))
|
||||
|
||||
|
||||
class RenderSetupItemObserver():
|
||||
|
||||
def __init__(self, item):
|
||||
self.item = item
|
||||
self.original_name = item.name()
|
||||
|
||||
def itemChanged(self, *args, **kwargs):
|
||||
if self.item.name() == self.original_name:
|
||||
return
|
||||
|
||||
render_set = _get_render_instance()
|
||||
|
||||
if not render_set:
|
||||
return
|
||||
|
||||
members = cmds.sets(render_set, query=True)
|
||||
if "LAYER_{}".format(self.original_name) in members:
|
||||
print(" <> renaming {} to {}".format(self.original_name,
|
||||
self.item.name()))
|
||||
cmds.rename("LAYER_{}".format(self.original_name),
|
||||
"LAYER_{}".format(self.item.name()))
|
||||
self.original_name = self.item.name()
|
||||
|
||||
|
||||
renderListObserver = RenderSetupListObserver()
|
||||
|
||||
|
||||
def add_render_layer_change_observer():
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
rs = renderSetup.instance()
|
||||
render_set = _get_render_instance()
|
||||
if not render_set:
|
||||
return
|
||||
|
||||
members = cmds.sets(render_set, query=True)
|
||||
layers = rs.getRenderLayers()
|
||||
for layer in layers:
|
||||
if "LAYER_{}".format(layer.name()) in members:
|
||||
rio = RenderSetupItemObserver(layer)
|
||||
print("- adding observer for {}".format(layer.name()))
|
||||
layer.addItemObserver(rio.itemChanged)
|
||||
renderItemObserverList.append(rio)
|
||||
|
||||
|
||||
def add_render_layer_observer():
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
print("> adding renderSetup observer ...")
|
||||
rs = renderSetup.instance()
|
||||
rs.addListObserver(renderListObserver)
|
||||
pass
|
||||
|
||||
|
||||
def remove_render_layer_observer():
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
print("< removing renderSetup observer ...")
|
||||
rs = renderSetup.instance()
|
||||
try:
|
||||
rs.removeListObserver(renderListObserver)
|
||||
except ValueError:
|
||||
# no observer set yet
|
||||
pass
|
||||
|
||||
|
||||
def update_content_on_context_change():
|
||||
"""
|
||||
This will update scene content to match new asset on context change
|
||||
"""
|
||||
scene_sets = cmds.listSets(allSets=True)
|
||||
new_asset = api.Session["AVALON_ASSET"]
|
||||
new_data = lib.get_asset()["data"]
|
||||
for s in scene_sets:
|
||||
try:
|
||||
if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance":
|
||||
attr = cmds.listAttr(s)
|
||||
print(s)
|
||||
if "asset" in attr:
|
||||
print(" - setting asset to: [ {} ]".format(new_asset))
|
||||
cmds.setAttr("{}.asset".format(s),
|
||||
new_asset, type="string")
|
||||
if "frameStart" in attr:
|
||||
cmds.setAttr("{}.frameStart".format(s),
|
||||
new_data["frameStart"])
|
||||
if "frameEnd" in attr:
|
||||
cmds.setAttr("{}.frameEnd".format(s),
|
||||
new_data["frameEnd"],)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def show_message(title, msg):
|
||||
from avalon.vendor.Qt import QtWidgets
|
||||
from ..widgets import message_window
|
||||
|
||||
# Find maya main window
|
||||
top_level_widgets = {w.objectName(): w for w in
|
||||
QtWidgets.QApplication.topLevelWidgets()}
|
||||
|
||||
parent = top_level_widgets.get("MayaWindow", None)
|
||||
if parent is None:
|
||||
pass
|
||||
else:
|
||||
message_window.message(title=title, message=msg, parent=parent)
|
||||
|
|
|
|||
|
|
@ -15,12 +15,13 @@ log = logging.getLogger(__name__)
|
|||
def _get_menu():
|
||||
"""Return the menu instance if it currently exists in Maya"""
|
||||
|
||||
app = QtWidgets.QApplication.instance()
|
||||
widgets = dict((w.objectName(), w) for w in app.allWidgets())
|
||||
widgets = dict((
|
||||
w.objectName(), w) for w in QtWidgets.QApplication.allWidgets())
|
||||
menu = widgets.get(self._menu)
|
||||
return menu
|
||||
|
||||
|
||||
|
||||
def deferred():
|
||||
|
||||
log.info("Attempting to install scripts menu..")
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
from avalon import api
|
||||
from avalon.vendor import qargparse
|
||||
|
||||
|
||||
def get_reference_node_parents(ref):
|
||||
|
|
@ -33,11 +34,29 @@ class ReferenceLoader(api.Loader):
|
|||
`update` logic.
|
||||
|
||||
"""
|
||||
def load(self,
|
||||
context,
|
||||
name=None,
|
||||
namespace=None,
|
||||
data=None):
|
||||
|
||||
options = [
|
||||
qargparse.Integer(
|
||||
"count",
|
||||
label="Count",
|
||||
default=1,
|
||||
min=1,
|
||||
help="How many times to load?"
|
||||
),
|
||||
qargparse.Double3(
|
||||
"offset",
|
||||
label="Position Offset",
|
||||
help="Offset loaded models for easier selection."
|
||||
)
|
||||
]
|
||||
|
||||
def load(
|
||||
self,
|
||||
context,
|
||||
name=None,
|
||||
namespace=None,
|
||||
options=None
|
||||
):
|
||||
|
||||
import os
|
||||
from avalon.maya import lib
|
||||
|
|
@ -46,29 +65,46 @@ class ReferenceLoader(api.Loader):
|
|||
assert os.path.exists(self.fname), "%s does not exist." % self.fname
|
||||
|
||||
asset = context['asset']
|
||||
loaded_containers = []
|
||||
|
||||
namespace = namespace or lib.unique_namespace(
|
||||
asset["name"] + "_",
|
||||
prefix="_" if asset["name"][0].isdigit() else "",
|
||||
suffix="_",
|
||||
)
|
||||
count = options.get("count") or 1
|
||||
for c in range(0, count):
|
||||
namespace = namespace or lib.unique_namespace(
|
||||
asset["name"] + "_",
|
||||
prefix="_" if asset["name"][0].isdigit() else "",
|
||||
suffix="_",
|
||||
)
|
||||
|
||||
self.process_reference(context=context,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
data=data)
|
||||
# Offset loaded subset
|
||||
if "offset" in options:
|
||||
offset = [i * c for i in options["offset"]]
|
||||
options["translate"] = offset
|
||||
|
||||
# Only containerize if any nodes were loaded by the Loader
|
||||
nodes = self[:]
|
||||
if not nodes:
|
||||
return
|
||||
self.log.info(options)
|
||||
|
||||
return containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
nodes=nodes,
|
||||
context=context,
|
||||
loader=self.__class__.__name__)
|
||||
self.process_reference(
|
||||
context=context,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
options=options
|
||||
)
|
||||
|
||||
# Only containerize if any nodes were loaded by the Loader
|
||||
nodes = self[:]
|
||||
if not nodes:
|
||||
return
|
||||
|
||||
loaded_containers.append(containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
nodes=nodes,
|
||||
context=context,
|
||||
loader=self.__class__.__name__
|
||||
))
|
||||
|
||||
c += 1
|
||||
namespace = None
|
||||
return loaded_containers
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
"""To be implemented by subclass"""
|
||||
|
|
|
|||
|
|
@ -33,41 +33,6 @@ if os.getenv("PYBLISH_GUI", None):
|
|||
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
|
||||
|
||||
|
||||
class NukeHandler(logging.Handler):
|
||||
'''
|
||||
Nuke Handler - emits logs into nuke's script editor.
|
||||
warning will emit nuke.warning()
|
||||
critical and fatal would popup msg dialog to alert of the error.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
logging.Handler.__init__(self)
|
||||
self.set_name("Pype_Nuke_Handler")
|
||||
|
||||
def emit(self, record):
|
||||
# Formated message:
|
||||
msg = self.format(record)
|
||||
|
||||
if record.levelname.lower() in [
|
||||
# "warning",
|
||||
"critical",
|
||||
"fatal",
|
||||
"error"
|
||||
]:
|
||||
msg = self.format(record)
|
||||
nuke.message(msg)
|
||||
|
||||
|
||||
'''Adding Nuke Logging Handler'''
|
||||
log.info([handler.get_name() for handler in logging.root.handlers[:]])
|
||||
nuke_handler = NukeHandler()
|
||||
if nuke_handler.get_name() \
|
||||
not in [handler.get_name()
|
||||
for handler in logging.root.handlers[:]]:
|
||||
logging.getLogger().addHandler(nuke_handler)
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
log.info([handler.get_name() for handler in logging.root.handlers[:]])
|
||||
|
||||
def reload_config():
|
||||
"""Attempt to reload pipeline at run-time.
|
||||
|
||||
|
|
@ -113,7 +78,7 @@ def install():
|
|||
family_states = [
|
||||
"write",
|
||||
"review",
|
||||
"nukenodes"
|
||||
"nukenodes"
|
||||
"gizmo"
|
||||
]
|
||||
|
||||
|
|
@ -128,11 +93,11 @@ def install():
|
|||
|
||||
# Set context settings.
|
||||
nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
|
||||
nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root")
|
||||
|
||||
menu.install()
|
||||
|
||||
|
||||
|
||||
def launch_workfiles_app():
|
||||
'''Function letting start workfiles after start of host
|
||||
'''
|
||||
|
|
|
|||
254
pype/nuke/lib.py
254
pype/nuke/lib.py
|
|
@ -15,13 +15,11 @@ import nuke
|
|||
from .presets import (
|
||||
get_colorspace_preset,
|
||||
get_node_dataflow_preset,
|
||||
get_node_colorspace_preset
|
||||
)
|
||||
|
||||
from .presets import (
|
||||
get_node_colorspace_preset,
|
||||
get_anatomy
|
||||
)
|
||||
# TODO: remove get_anatomy and import directly Anatomy() here
|
||||
|
||||
from .utils import set_context_favorites
|
||||
|
||||
from pypeapp import Logger
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
|
|
@ -50,8 +48,6 @@ def checkInventoryVersions():
|
|||
and check if the node is having actual version. If not then it will color
|
||||
it to red.
|
||||
"""
|
||||
# TODO: make it for all nodes not just Read (Loader
|
||||
|
||||
# get all Loader nodes by avalon attribute metadata
|
||||
for each in nuke.allNodes():
|
||||
if each.Class() == 'Read':
|
||||
|
|
@ -93,7 +89,6 @@ def checkInventoryVersions():
|
|||
def writes_version_sync():
|
||||
''' Callback synchronizing version of publishable write nodes
|
||||
'''
|
||||
# TODO: make it work with new write node group
|
||||
try:
|
||||
rootVersion = pype.get_version_from_path(nuke.root().name())
|
||||
padding = len(rootVersion)
|
||||
|
|
@ -130,7 +125,8 @@ def writes_version_sync():
|
|||
os.makedirs(os.path.dirname(node_new_file), 0o766)
|
||||
except Exception as e:
|
||||
log.warning(
|
||||
"Write node: `{}` has no version in path: {}".format(each.name(), e))
|
||||
"Write node: `{}` has no version in path: {}".format(
|
||||
each.name(), e))
|
||||
|
||||
|
||||
def version_up_script():
|
||||
|
|
@ -183,9 +179,12 @@ def format_anatomy(data):
|
|||
try:
|
||||
padding = int(anatomy.templates['render']['padding'])
|
||||
except KeyError as e:
|
||||
log.error("`padding` key is not in `render` "
|
||||
"Anatomy template. Please, add it there and restart "
|
||||
"the pipeline (padding: \"4\"): `{}`".format(e))
|
||||
msg = ("`padding` key is not in `render` "
|
||||
"Anatomy template. Please, add it there and restart "
|
||||
"the pipeline (padding: \"4\"): `{}`").format(e)
|
||||
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
version = data.get("version", None)
|
||||
if not version:
|
||||
|
|
@ -196,7 +195,7 @@ def format_anatomy(data):
|
|||
"root": api.Session["AVALON_PROJECTS"],
|
||||
"subset": data["avalon"]["subset"],
|
||||
"asset": data["avalon"]["asset"],
|
||||
"task": api.Session["AVALON_TASK"].lower(),
|
||||
"task": api.Session["AVALON_TASK"],
|
||||
"family": data["avalon"]["family"],
|
||||
"project": {"name": project_document["name"],
|
||||
"code": project_document["data"].get("code", '')},
|
||||
|
|
@ -265,7 +264,9 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
anatomy_filled = format_anatomy(data)
|
||||
|
||||
except Exception as e:
|
||||
log.error("problem with resolving anatomy tepmlate: {}".format(e))
|
||||
msg = "problem with resolving anatomy tepmlate: {}".format(e)
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
# build file path to workfiles
|
||||
fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/")
|
||||
|
|
@ -372,7 +373,7 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
now_node.setInput(0, prev_node)
|
||||
|
||||
# imprinting group node
|
||||
GN = avalon.nuke.imprint(GN, data["avalon"])
|
||||
avalon.nuke.imprint(GN, data["avalon"])
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
|
@ -430,7 +431,7 @@ def add_deadline_tab(node):
|
|||
node.addKnob(nuke.Tab_Knob("Deadline"))
|
||||
|
||||
knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size")
|
||||
knob.setValue(1)
|
||||
knob.setValue(0)
|
||||
node.addKnob(knob)
|
||||
|
||||
knob = nuke.Int_Knob("deadlinePriority", "Priority")
|
||||
|
|
@ -517,11 +518,6 @@ class WorkfileSettings(object):
|
|||
self.data = kwargs
|
||||
|
||||
def get_nodes(self, nodes=None, nodes_filter=None):
|
||||
# filter out only dictionaries for node creation
|
||||
#
|
||||
# print("\n\n")
|
||||
# pprint(self._nodes)
|
||||
#
|
||||
|
||||
if not isinstance(nodes, list) and not isinstance(nodes_filter, list):
|
||||
return [n for n in nuke.allNodes()]
|
||||
|
|
@ -543,8 +539,11 @@ class WorkfileSettings(object):
|
|||
viewer_dict (dict): adjustments from presets
|
||||
|
||||
'''
|
||||
assert isinstance(viewer_dict, dict), log.error(
|
||||
"set_viewers_colorspace(): argument should be dictionary")
|
||||
if not isinstance(viewer_dict, dict):
|
||||
msg = "set_viewers_colorspace(): argument should be dictionary"
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
|
||||
filter_knobs = [
|
||||
"viewerProcess",
|
||||
|
|
@ -592,8 +591,10 @@ class WorkfileSettings(object):
|
|||
root_dict (dict): adjustmensts from presets
|
||||
|
||||
'''
|
||||
assert isinstance(root_dict, dict), log.error(
|
||||
"set_root_colorspace(): argument should be dictionary")
|
||||
if not isinstance(root_dict, dict):
|
||||
msg = "set_root_colorspace(): argument should be dictionary"
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
log.debug(">> root_dict: {}".format(root_dict))
|
||||
|
||||
|
|
@ -618,7 +619,8 @@ class WorkfileSettings(object):
|
|||
# third set ocio custom path
|
||||
if root_dict.get("customOCIOConfigPath"):
|
||||
self._root_node["customOCIOConfigPath"].setValue(
|
||||
str(root_dict["customOCIOConfigPath"]).format(**os.environ)
|
||||
str(root_dict["customOCIOConfigPath"]).format(
|
||||
**os.environ).replace("\\", "/")
|
||||
)
|
||||
log.debug("nuke.root()['{}'] changed to: {}".format(
|
||||
"customOCIOConfigPath", root_dict["customOCIOConfigPath"]))
|
||||
|
|
@ -638,12 +640,105 @@ class WorkfileSettings(object):
|
|||
write_dict (dict): nuke write node as dictionary
|
||||
|
||||
'''
|
||||
# TODO: complete this function so any write node in
|
||||
# scene will have fixed colorspace following presets for the project
|
||||
assert isinstance(write_dict, dict), log.error(
|
||||
"set_root_colorspace(): argument should be dictionary")
|
||||
if not isinstance(write_dict, dict):
|
||||
msg = "set_root_colorspace(): argument should be dictionary"
|
||||
log.error(msg)
|
||||
return
|
||||
|
||||
log.debug("__ set_writes_colorspace(): {}".format(write_dict))
|
||||
from avalon.nuke import get_avalon_knob_data
|
||||
|
||||
for node in nuke.allNodes():
|
||||
|
||||
if node.Class() in ["Viewer", "Dot"]:
|
||||
continue
|
||||
|
||||
# get data from avalon knob
|
||||
avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"])
|
||||
|
||||
if not avalon_knob_data:
|
||||
continue
|
||||
|
||||
if avalon_knob_data["id"] != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
# establish families
|
||||
families = [avalon_knob_data["family"]]
|
||||
if avalon_knob_data.get("families"):
|
||||
families.append(avalon_knob_data.get("families"))
|
||||
|
||||
# except disabled nodes but exclude backdrops in test
|
||||
for fmly, knob in write_dict.items():
|
||||
write = None
|
||||
if (fmly in families):
|
||||
# Add all nodes in group instances.
|
||||
if node.Class() == "Group":
|
||||
node.begin()
|
||||
for x in nuke.allNodes():
|
||||
if x.Class() == "Write":
|
||||
write = x
|
||||
node.end()
|
||||
elif node.Class() == "Write":
|
||||
write = node
|
||||
else:
|
||||
log.warning("Wrong write node Class")
|
||||
|
||||
write["colorspace"].setValue(str(knob["colorspace"]))
|
||||
log.info(
|
||||
"Setting `{0}` to `{1}`".format(
|
||||
write.name(),
|
||||
knob["colorspace"]))
|
||||
|
||||
def set_reads_colorspace(self, reads):
|
||||
""" Setting colorspace to Read nodes
|
||||
|
||||
Looping trought all read nodes and tries to set colorspace based on regex rules in presets
|
||||
"""
|
||||
changes = dict()
|
||||
for n in nuke.allNodes():
|
||||
file = nuke.filename(n)
|
||||
if not n.Class() == "Read":
|
||||
continue
|
||||
|
||||
# load nuke presets for Read's colorspace
|
||||
read_clrs_presets = get_colorspace_preset().get(
|
||||
"nuke", {}).get("read", {})
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
preset_clrsp = next((read_clrs_presets[k]
|
||||
for k in read_clrs_presets
|
||||
if bool(re.search(k, file))),
|
||||
None)
|
||||
log.debug(preset_clrsp)
|
||||
if preset_clrsp is not None:
|
||||
current = n["colorspace"].value()
|
||||
future = str(preset_clrsp)
|
||||
if current != future:
|
||||
changes.update({
|
||||
n.name(): {
|
||||
"from": current,
|
||||
"to": future
|
||||
}
|
||||
})
|
||||
log.debug(changes)
|
||||
if changes:
|
||||
msg = "Read nodes are not set to correct colospace:\n\n"
|
||||
for nname, knobs in changes.items():
|
||||
msg += str(" - node: '{0}' is now '{1}' "
|
||||
"but should be '{2}'\n").format(
|
||||
nname, knobs["from"], knobs["to"]
|
||||
)
|
||||
|
||||
msg += "\nWould you like to change it?"
|
||||
|
||||
if nuke.ask(msg):
|
||||
for nname, knobs in changes.items():
|
||||
n = nuke.toNode(nname)
|
||||
n["colorspace"].setValue(knobs["to"])
|
||||
log.info(
|
||||
"Setting `{0}` to `{1}`".format(
|
||||
nname,
|
||||
knobs["to"]))
|
||||
|
||||
def set_colorspace(self):
|
||||
''' Setting colorpace following presets
|
||||
|
|
@ -653,25 +748,33 @@ class WorkfileSettings(object):
|
|||
try:
|
||||
self.set_root_colorspace(nuke_colorspace["root"])
|
||||
except AttributeError:
|
||||
log.error(
|
||||
"set_colorspace(): missing `root` settings in template")
|
||||
msg = "set_colorspace(): missing `root` settings in template"
|
||||
|
||||
try:
|
||||
self.set_viewers_colorspace(nuke_colorspace["viewer"])
|
||||
except AttributeError:
|
||||
log.error(
|
||||
"set_colorspace(): missing `viewer` settings in template")
|
||||
msg = "set_colorspace(): missing `viewer` settings in template"
|
||||
nuke.message(msg)
|
||||
log.error(msg)
|
||||
|
||||
try:
|
||||
self.set_writes_colorspace(nuke_colorspace["write"])
|
||||
except AttributeError:
|
||||
log.error(
|
||||
"set_colorspace(): missing `write` settings in template")
|
||||
msg = "set_colorspace(): missing `write` settings in template"
|
||||
nuke.message(msg)
|
||||
log.error(msg)
|
||||
|
||||
reads = nuke_colorspace.get("read")
|
||||
if reads:
|
||||
self.set_reads_colorspace(reads)
|
||||
|
||||
try:
|
||||
for key in nuke_colorspace:
|
||||
log.debug("Preset's colorspace key: {}".format(key))
|
||||
except TypeError:
|
||||
log.error("Nuke is not in templates! \n\n\n"
|
||||
"contact your supervisor!")
|
||||
msg = "Nuke is not in templates! Contact your supervisor!"
|
||||
nuke.message(msg)
|
||||
log.error(msg)
|
||||
|
||||
def reset_frame_range_handles(self):
|
||||
"""Set frame range to current asset"""
|
||||
|
|
@ -683,6 +786,8 @@ class WorkfileSettings(object):
|
|||
return
|
||||
data = self._asset_entity["data"]
|
||||
|
||||
log.debug("__ asset data: `{}`".format(data))
|
||||
|
||||
missing_cols = []
|
||||
check_cols = ["fps", "frameStart", "frameEnd",
|
||||
"handleStart", "handleEnd"]
|
||||
|
|
@ -758,13 +863,13 @@ class WorkfileSettings(object):
|
|||
}
|
||||
|
||||
if any(x for x in data.values() if x is None):
|
||||
log.error(
|
||||
"Missing set shot attributes in DB."
|
||||
"\nContact your supervisor!."
|
||||
"\n\nWidth: `{width}`"
|
||||
"\nHeight: `{height}`"
|
||||
"\nPixel Asspect: `{pixel_aspect}`".format(**data)
|
||||
)
|
||||
msg = ("Missing set shot attributes in DB."
|
||||
"\nContact your supervisor!."
|
||||
"\n\nWidth: `{width}`"
|
||||
"\nHeight: `{height}`"
|
||||
"\nPixel Asspect: `{pixel_aspect}`").format(**data)
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
bbox = self._asset_entity.get('data', {}).get('crop')
|
||||
|
||||
|
|
@ -781,10 +886,10 @@ class WorkfileSettings(object):
|
|||
)
|
||||
except Exception as e:
|
||||
bbox = None
|
||||
log.error(
|
||||
"{}: {} \nFormat:Crop need to be set with dots, example: "
|
||||
"0.0.1920.1080, /nSetting to default".format(__name__, e)
|
||||
)
|
||||
msg = ("{}:{} \nFormat:Crop need to be set with dots, example: "
|
||||
"0.0.1920.1080, /nSetting to default").format(__name__, e)
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
existing_format = None
|
||||
for format in nuke.formats():
|
||||
|
|
@ -839,6 +944,26 @@ class WorkfileSettings(object):
|
|||
# add colorspace menu item
|
||||
self.set_colorspace()
|
||||
|
||||
def set_favorites(self):
|
||||
projects_root = os.getenv("AVALON_PROJECTS")
|
||||
work_dir = os.getenv("AVALON_WORKDIR")
|
||||
asset = os.getenv("AVALON_ASSET")
|
||||
project = os.getenv("AVALON_PROJECT")
|
||||
hierarchy = os.getenv("AVALON_HIERARCHY")
|
||||
favorite_items = OrderedDict()
|
||||
|
||||
# project
|
||||
favorite_items.update({"Project dir": os.path.join(
|
||||
projects_root, project).replace("\\", "/")})
|
||||
# shot
|
||||
favorite_items.update({"Shot dir": os.path.join(
|
||||
projects_root, project,
|
||||
hierarchy, asset).replace("\\", "/")})
|
||||
# workdir
|
||||
favorite_items.update({"Work dir": work_dir})
|
||||
|
||||
set_context_favorites(favorite_items)
|
||||
|
||||
|
||||
def get_hierarchical_attr(entity, attr, default=None):
|
||||
attr_parts = attr.split('.')
|
||||
|
|
@ -962,7 +1087,7 @@ class BuildWorkfile(WorkfileSettings):
|
|||
"project": {"name": self._project["name"],
|
||||
"code": self._project["data"].get("code", '')},
|
||||
"asset": self._asset or os.environ["AVALON_ASSET"],
|
||||
"task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(),
|
||||
"task": kwargs.get("task") or api.Session["AVALON_TASK"],
|
||||
"hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
|
||||
"version": kwargs.get("version", {}).get("name", 1),
|
||||
"user": getpass.getuser(),
|
||||
|
|
@ -1000,7 +1125,8 @@ class BuildWorkfile(WorkfileSettings):
|
|||
def process(self,
|
||||
regex_filter=None,
|
||||
version=None,
|
||||
representations=["exr", "dpx", "lutJson", "mov", "preview"]):
|
||||
representations=["exr", "dpx", "lutJson", "mov",
|
||||
"preview", "png"]):
|
||||
"""
|
||||
A short description.
|
||||
|
||||
|
|
@ -1041,9 +1167,10 @@ class BuildWorkfile(WorkfileSettings):
|
|||
wn["render"].setValue(True)
|
||||
vn.setInput(0, wn)
|
||||
|
||||
bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
|
||||
color='0xcc1102ff', layer=-1,
|
||||
nodes=[wn])
|
||||
# adding backdrop under write
|
||||
self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
|
||||
color='0xcc1102ff', layer=-1,
|
||||
nodes=[wn])
|
||||
|
||||
# move position
|
||||
self.position_up(4)
|
||||
|
|
@ -1057,10 +1184,12 @@ class BuildWorkfile(WorkfileSettings):
|
|||
version=version,
|
||||
representations=representations)
|
||||
|
||||
log.info("__ subsets: `{}`".format(subsets))
|
||||
for name, subset in subsets.items():
|
||||
log.debug("___________________")
|
||||
log.debug(name)
|
||||
log.debug(subset["version"])
|
||||
|
||||
nodes_backdrop = list()
|
||||
|
||||
for name, subset in subsets.items():
|
||||
if "lut" in name:
|
||||
continue
|
||||
|
|
@ -1090,9 +1219,10 @@ class BuildWorkfile(WorkfileSettings):
|
|||
# move position
|
||||
self.position_right()
|
||||
|
||||
bdn = self.create_backdrop(label="Loaded Reads",
|
||||
color='0x2d7702ff', layer=-1,
|
||||
nodes=nodes_backdrop)
|
||||
# adding backdrop under all read nodes
|
||||
self.create_backdrop(label="Loaded Reads",
|
||||
color='0x2d7702ff', layer=-1,
|
||||
nodes=nodes_backdrop)
|
||||
|
||||
def read_loader(self, representation):
|
||||
"""
|
||||
|
|
@ -1240,8 +1370,8 @@ class ExporterReview:
|
|||
else:
|
||||
self.fname = os.path.basename(self.path_in)
|
||||
self.fhead = os.path.splitext(self.fname)[0] + "."
|
||||
self.first_frame = self.instance.data.get("frameStart", None)
|
||||
self.last_frame = self.instance.data.get("frameEnd", None)
|
||||
self.first_frame = self.instance.data.get("frameStartHandle", None)
|
||||
self.last_frame = self.instance.data.get("frameEndHandle", None)
|
||||
|
||||
if "#" in self.fhead:
|
||||
self.fhead = self.fhead.replace("#", "")[:-1]
|
||||
|
|
@ -1256,7 +1386,7 @@ class ExporterReview:
|
|||
'ext': self.ext,
|
||||
'files': self.file,
|
||||
"stagingDir": self.staging_dir,
|
||||
"anatomy_template": "publish",
|
||||
"anatomy_template": "render",
|
||||
"tags": [self.name.replace("_", "-")] + add_tags
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from pype import api as pype
|
||||
from pypeapp import Anatomy, config
|
||||
|
||||
import nuke
|
||||
|
||||
log = pype.Logger().get_logger(__name__, "nuke")
|
||||
|
||||
|
|
@ -28,7 +28,7 @@ def get_node_dataflow_preset(**kwarg):
|
|||
families = kwarg.get("families", [])
|
||||
preset = kwarg.get("preset", None) # omit < 2.0.0v
|
||||
|
||||
assert any([host, cls]), log.error(
|
||||
assert any([host, cls]), nuke.message(
|
||||
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
|
||||
|
||||
nuke_dataflow = get_dataflow_preset().get(str(host), None)
|
||||
|
|
@ -56,8 +56,10 @@ def get_node_colorspace_preset(**kwarg):
|
|||
families = kwarg.get("families", [])
|
||||
preset = kwarg.get("preset", None) # omit < 2.0.0v
|
||||
|
||||
assert any([host, cls]), log.error(
|
||||
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
|
||||
if not any([host, cls]):
|
||||
msg = "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
|
||||
nuke_colorspace = get_colorspace_preset().get(str(host), None)
|
||||
nuke_colorspace_node = nuke_colorspace.get(str(cls), None)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,23 @@ import nuke
|
|||
from avalon.nuke import lib as anlib
|
||||
|
||||
|
||||
def set_context_favorites(favorites={}):
|
||||
""" Addig favorite folders to nuke's browser
|
||||
|
||||
Argumets:
|
||||
favorites (dict): couples of {name:path}
|
||||
"""
|
||||
dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
icon_path = os.path.join(dir, 'res', 'icons', 'folder-favorite3.png')
|
||||
|
||||
for name, path in favorites.items():
|
||||
nuke.addFavoriteDir(
|
||||
name,
|
||||
path,
|
||||
nuke.IMAGE | nuke.SCRIPT | nuke.GEO,
|
||||
icon=icon_path)
|
||||
|
||||
|
||||
def get_node_outputs(node):
|
||||
'''
|
||||
Return a dictionary of the nodes and pipes that are connected to node
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ if os.getenv("PYBLISH_GUI", None):
|
|||
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
|
||||
|
||||
|
||||
def install(config):
|
||||
def install():
|
||||
"""
|
||||
Installing Nukestudio integration for avalon
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
import hiero
|
||||
import pyblish.api
|
||||
|
|
@ -7,7 +8,6 @@ from avalon.vendor.Qt import (QtWidgets, QtGui)
|
|||
import pype.api as pype
|
||||
from pypeapp import Logger
|
||||
|
||||
|
||||
log = Logger().get_logger(__name__, "nukestudio")
|
||||
|
||||
cached_process = None
|
||||
|
|
@ -361,3 +361,449 @@ def CreateNukeWorkfile(nodes=None,
|
|||
nodes=nuke_script.getNodes(),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
class ClipLoader:
|
||||
|
||||
active_bin = None
|
||||
|
||||
def __init__(self, plugin_cls, context, sequence=None, track=None, **kwargs):
|
||||
""" Initialize object
|
||||
|
||||
Arguments:
|
||||
plugin_cls (api.Loader): plugin object
|
||||
context (dict): loader plugin context
|
||||
sequnce (hiero.core.Sequence): sequence object
|
||||
track (hiero.core.Track): track object
|
||||
kwargs (dict)[optional]: possible keys:
|
||||
projectBinPath: "path/to/binItem"
|
||||
hieroWorkfileName: "name_of_hiero_project_file_no_extension"
|
||||
|
||||
"""
|
||||
self.cls = plugin_cls
|
||||
self.context = context
|
||||
self.kwargs = kwargs
|
||||
self.active_project = self._get_active_project()
|
||||
self.project_bin = self.active_project.clipsBin()
|
||||
|
||||
self.data = dict()
|
||||
|
||||
assert self._set_data(), str("Cannot Load selected data, look into "
|
||||
"database or call your supervisor")
|
||||
|
||||
# inject asset data to representation dict
|
||||
self._get_asset_data()
|
||||
log.debug("__init__ self.data: `{}`".format(self.data))
|
||||
|
||||
# add active components to class
|
||||
self.active_sequence = self._get_active_sequence(sequence)
|
||||
self.active_track = self._get_active_track(track)
|
||||
|
||||
def _set_data(self):
|
||||
""" Gets context and convert it to self.data
|
||||
data structure:
|
||||
{
|
||||
"name": "assetName_subsetName_representationName"
|
||||
"path": "path/to/file/created/by/get_repr..",
|
||||
"binPath": "projectBinPath",
|
||||
}
|
||||
"""
|
||||
# create name
|
||||
repr = self.context["representation"]
|
||||
repr_cntx = repr["context"]
|
||||
asset = str(repr_cntx["asset"])
|
||||
subset = str(repr_cntx["subset"])
|
||||
representation = str(repr_cntx["representation"])
|
||||
self.data["clip_name"] = "_".join([asset, subset, representation])
|
||||
self.data["track_name"] = "_".join([subset, representation])
|
||||
|
||||
# gets file path
|
||||
file = self.cls.fname
|
||||
if not file:
|
||||
repr_id = repr["_id"]
|
||||
log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return None
|
||||
self.data["path"] = file.replace("\\", "/")
|
||||
|
||||
# convert to hashed path
|
||||
if repr_cntx.get("frame"):
|
||||
self._fix_path_hashes()
|
||||
|
||||
# solve project bin structure path
|
||||
hierarchy = str("/".join((
|
||||
"Loader",
|
||||
repr_cntx["hierarchy"].replace("\\", "/"),
|
||||
asset
|
||||
)))
|
||||
|
||||
self.data["binPath"] = self.kwargs.get(
|
||||
"projectBinPath",
|
||||
hierarchy
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def _fix_path_hashes(self):
|
||||
""" Convert file path where it is needed padding with hashes
|
||||
"""
|
||||
file = self.data["path"]
|
||||
if "#" not in file:
|
||||
frame = self.context["representation"]["context"].get("frame")
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#"*padding)
|
||||
self.data["path"] = file
|
||||
|
||||
def _get_active_project(self):
|
||||
""" Get hiero active project object
|
||||
"""
|
||||
fname = self.kwargs.get("hieroWorkfileName", "")
|
||||
|
||||
return next((p for p in hiero.core.projects()
|
||||
if fname in p.name()),
|
||||
hiero.core.projects()[-1])
|
||||
|
||||
def _get_asset_data(self):
|
||||
""" Get all available asset data
|
||||
|
||||
joint `data` key with asset.data dict into the representaion
|
||||
|
||||
"""
|
||||
asset_name = self.context["representation"]["context"]["asset"]
|
||||
self.data["assetData"] = pype.get_asset(asset_name)["data"]
|
||||
|
||||
def _make_project_bin(self, hierarchy):
|
||||
""" Creare bins by given hierarchy path
|
||||
|
||||
It will also make sure no duplicit bins will be created
|
||||
|
||||
Arguments:
|
||||
hierarchy (str): path devided by slashes "bin0/bin1/bin2"
|
||||
|
||||
Returns:
|
||||
bin (hiero.core.BinItem): with the bin to be used for mediaItem
|
||||
"""
|
||||
if self.active_bin:
|
||||
return self.active_bin
|
||||
|
||||
assert hierarchy != "", "Please add hierarchy!"
|
||||
log.debug("__ hierarchy1: `{}`".format(hierarchy))
|
||||
if '/' in hierarchy:
|
||||
hierarchy = hierarchy.split('/')
|
||||
else:
|
||||
hierarchy = [hierarchy]
|
||||
|
||||
parent_bin = None
|
||||
for i, name in enumerate(hierarchy):
|
||||
# if first index and list is more then one long
|
||||
if i == 0:
|
||||
bin = next((bin for bin in self.project_bin.bins()
|
||||
if name in bin.name()), None)
|
||||
if not bin:
|
||||
bin = hiero.core.Bin(name)
|
||||
self.project_bin.addItem(bin)
|
||||
log.debug("__ bin.name: `{}`".format(bin.name()))
|
||||
parent_bin = bin
|
||||
|
||||
# if second to prelast
|
||||
elif (i >= 1) and (i <= (len(hierarchy) - 1)):
|
||||
bin = next((bin for bin in parent_bin.bins()
|
||||
if name in bin.name()), None)
|
||||
if not bin:
|
||||
bin = hiero.core.Bin(name)
|
||||
parent_bin.addItem(bin)
|
||||
|
||||
parent_bin = bin
|
||||
|
||||
return parent_bin
|
||||
|
||||
def _make_track_item(self):
|
||||
""" Create track item with """
|
||||
pass
|
||||
|
||||
def _set_clip_color(self, last_version=True):
|
||||
""" Sets color of clip on clip/track item
|
||||
|
||||
Arguments:
|
||||
last_version (bool): True = green | False = red
|
||||
"""
|
||||
pass
|
||||
|
||||
def _set_container_tag(self, item, metadata):
|
||||
""" Sets container tag to given clip/track item
|
||||
|
||||
Arguments:
|
||||
item (hiero.core.BinItem or hiero.core.TrackItem)
|
||||
metadata (dict): data to be added to tag
|
||||
"""
|
||||
pass
|
||||
|
||||
def _get_active_sequence(self, sequence):
|
||||
if not sequence:
|
||||
return hiero.ui.activeSequence()
|
||||
else:
|
||||
return sequence
|
||||
|
||||
def _get_active_track(self, track):
|
||||
if not track:
|
||||
track_name = self.data["track_name"]
|
||||
else:
|
||||
track_name = track.name()
|
||||
|
||||
track_pass = next(
|
||||
(t for t in self.active_sequence.videoTracks()
|
||||
if t.name() in track_name), None
|
||||
)
|
||||
|
||||
if not track_pass:
|
||||
track_pass = hiero.core.VideoTrack(track_name)
|
||||
self.active_sequence.addTrack(track_pass)
|
||||
|
||||
return track_pass
|
||||
|
||||
def load(self):
|
||||
log.debug("__ active_project: `{}`".format(self.active_project))
|
||||
log.debug("__ active_sequence: `{}`".format(self.active_sequence))
|
||||
|
||||
# create project bin for the media to be imported into
|
||||
self.active_bin = self._make_project_bin(self.data["binPath"])
|
||||
log.debug("__ active_bin: `{}`".format(self.active_bin))
|
||||
|
||||
log.debug("__ version.data: `{}`".format(
|
||||
self.context["version"]["data"]))
|
||||
|
||||
# create mediaItem in active project bin
|
||||
# create clip media
|
||||
media = hiero.core.MediaSource(self.data["path"])
|
||||
media_duration = int(media.duration())
|
||||
|
||||
handle_start = int(self.data["assetData"]["handleStart"])
|
||||
handle_end = int(self.data["assetData"]["handleEnd"])
|
||||
|
||||
clip_in = int(self.data["assetData"]["clipIn"])
|
||||
clip_out = int(self.data["assetData"]["clipOut"])
|
||||
|
||||
log.debug("__ media_duration: `{}`".format(media_duration))
|
||||
log.debug("__ handle_start: `{}`".format(handle_start))
|
||||
log.debug("__ handle_end: `{}`".format(handle_end))
|
||||
log.debug("__ clip_in: `{}`".format(clip_in))
|
||||
log.debug("__ clip_out: `{}`".format(clip_out))
|
||||
|
||||
# check if slate is included
|
||||
# either in version data families or by calculating frame diff
|
||||
slate_on = next(
|
||||
(f for f in self.context["version"]["data"]["families"]
|
||||
if "slate" in f),
|
||||
None) or bool(((
|
||||
clip_out - clip_in + 1) + handle_start + handle_end
|
||||
) - media_duration)
|
||||
|
||||
log.debug("__ slate_on: `{}`".format(slate_on))
|
||||
|
||||
# calculate slate differences
|
||||
if slate_on:
|
||||
media_duration -= 1
|
||||
handle_start += 1
|
||||
|
||||
fps = self.data["assetData"]["fps"]
|
||||
|
||||
# create Clip from Media
|
||||
_clip = hiero.core.Clip(media)
|
||||
_clip.setName(self.data["clip_name"])
|
||||
|
||||
# add Clip to bin if not there yet
|
||||
if self.data["clip_name"] not in [
|
||||
b.name()
|
||||
for b in self.active_bin.items()]:
|
||||
binItem = hiero.core.BinItem(_clip)
|
||||
self.active_bin.addItem(binItem)
|
||||
|
||||
_source = next((item for item in self.active_bin.items()
|
||||
if self.data["clip_name"] in item.name()), None)
|
||||
|
||||
if not _source:
|
||||
log.warning("Problem with created Source clip: `{}`".format(
|
||||
self.data["clip_name"]))
|
||||
|
||||
version = next((s for s in _source.items()), None)
|
||||
clip = version.item()
|
||||
|
||||
# add to track as clip item
|
||||
track_item = hiero.core.TrackItem(
|
||||
self.data["clip_name"], hiero.core.TrackItem.kVideo)
|
||||
|
||||
track_item.setSource(clip)
|
||||
|
||||
track_item.setSourceIn(handle_start)
|
||||
track_item.setTimelineIn(clip_in)
|
||||
|
||||
track_item.setSourceOut(media_duration - handle_end)
|
||||
track_item.setTimelineOut(clip_out)
|
||||
track_item.setPlaybackSpeed(1)
|
||||
self.active_track.addTrackItem(track_item)
|
||||
|
||||
log.info("Loading clips: `{}`".format(self.data["clip_name"]))
|
||||
|
||||
|
||||
def create_nk_workfile_clips(nk_workfiles, seq=None):
|
||||
'''
|
||||
nk_workfile is list of dictionaries like:
|
||||
[{
|
||||
'path': 'P:/Jakub_testy_pipeline/test_v01.nk',
|
||||
'name': 'test',
|
||||
'handleStart': 15, # added asymetrically to handles
|
||||
'handleEnd': 10, # added asymetrically to handles
|
||||
"clipIn": 16,
|
||||
"frameStart": 991,
|
||||
"frameEnd": 1023,
|
||||
'task': 'Comp-tracking',
|
||||
'work_dir': 'VFX_PR',
|
||||
'shot': '00010'
|
||||
}]
|
||||
'''
|
||||
|
||||
proj = hiero.core.projects()[-1]
|
||||
root = proj.clipsBin()
|
||||
|
||||
if not seq:
|
||||
seq = hiero.core.Sequence('NewSequences')
|
||||
root.addItem(hiero.core.BinItem(seq))
|
||||
# todo will ned to define this better
|
||||
# track = seq[1] # lazy example to get a destination# track
|
||||
clips_lst = []
|
||||
for nk in nk_workfiles:
|
||||
task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']])
|
||||
bin = create_bin_in_project(task_path, proj)
|
||||
|
||||
if nk['task'] not in seq.videoTracks():
|
||||
track = hiero.core.VideoTrack(nk['task'])
|
||||
seq.addTrack(track)
|
||||
else:
|
||||
track = seq.tracks(nk['task'])
|
||||
|
||||
# create clip media
|
||||
media = hiero.core.MediaSource(nk['path'])
|
||||
media_in = int(media.startTime() or 0)
|
||||
media_duration = int(media.duration() or 0)
|
||||
|
||||
handle_start = nk.get("handleStart")
|
||||
handle_end = nk.get("handleEnd")
|
||||
|
||||
if media_in:
|
||||
source_in = media_in + handle_start
|
||||
else:
|
||||
source_in = nk["frameStart"] + handle_start
|
||||
|
||||
if media_duration:
|
||||
source_out = (media_in + media_duration - 1) - handle_end
|
||||
else:
|
||||
source_out = nk["frameEnd"] - handle_end
|
||||
|
||||
source = hiero.core.Clip(media)
|
||||
|
||||
name = os.path.basename(os.path.splitext(nk['path'])[0])
|
||||
split_name = split_by_client_version(name)[0] or name
|
||||
|
||||
# add to bin as clip item
|
||||
items_in_bin = [b.name() for b in bin.items()]
|
||||
if split_name not in items_in_bin:
|
||||
binItem = hiero.core.BinItem(source)
|
||||
bin.addItem(binItem)
|
||||
|
||||
new_source = [
|
||||
item for item in bin.items() if split_name in item.name()
|
||||
][0].items()[0].item()
|
||||
|
||||
# add to track as clip item
|
||||
trackItem = hiero.core.TrackItem(
|
||||
split_name, hiero.core.TrackItem.kVideo)
|
||||
trackItem.setSource(new_source)
|
||||
trackItem.setSourceIn(source_in)
|
||||
trackItem.setSourceOut(source_out)
|
||||
trackItem.setTimelineIn(nk["clipIn"])
|
||||
trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in))
|
||||
track.addTrackItem(trackItem)
|
||||
clips_lst.append(trackItem)
|
||||
|
||||
return clips_lst
|
||||
|
||||
|
||||
def create_bin_in_project(bin_name='', project=''):
|
||||
'''
|
||||
create bin in project and
|
||||
if the bin_name is "bin1/bin2/bin3" it will create whole depth
|
||||
'''
|
||||
|
||||
if not project:
|
||||
# get the first loaded project
|
||||
project = hiero.core.projects()[-1]
|
||||
if not bin_name:
|
||||
return None
|
||||
if '/' in bin_name:
|
||||
bin_name = bin_name.split('/')
|
||||
else:
|
||||
bin_name = [bin_name]
|
||||
|
||||
clipsBin = project.clipsBin()
|
||||
|
||||
done_bin_lst = []
|
||||
for i, b in enumerate(bin_name):
|
||||
if i == 0 and len(bin_name) > 1:
|
||||
if b in [bin.name() for bin in clipsBin.bins()]:
|
||||
bin = [bin for bin in clipsBin.bins() if b in bin.name()][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = hiero.core.Bin(b)
|
||||
clipsBin.addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
|
||||
elif i >= 1 and i < len(bin_name) - 1:
|
||||
if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]:
|
||||
bin = [
|
||||
bin for bin in done_bin_lst[i - 1].bins()
|
||||
if b in bin.name()
|
||||
][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = hiero.core.Bin(b)
|
||||
done_bin_lst[i - 1].addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
|
||||
elif i == len(bin_name) - 1:
|
||||
if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]:
|
||||
bin = [
|
||||
bin for bin in done_bin_lst[i - 1].bins()
|
||||
if b in bin.name()
|
||||
][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = hiero.core.Bin(b)
|
||||
done_bin_lst[i - 1].addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
# print [bin.name() for bin in clipsBin.bins()]
|
||||
return done_bin_lst[-1]
|
||||
|
||||
|
||||
def split_by_client_version(string):
|
||||
regex = r"[/_.]v\d+"
|
||||
try:
|
||||
matches = re.findall(regex, string, re.IGNORECASE)
|
||||
return string.split(matches[0])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return None
|
||||
|
||||
|
||||
# nk_workfiles = [{
|
||||
# 'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk',
|
||||
# 'name': '120sh020_platesMain',
|
||||
# 'handles': 10,
|
||||
# 'handleStart': 10,
|
||||
# 'handleEnd': 10,
|
||||
# "clipIn": 16,
|
||||
# "frameStart": 991,
|
||||
# "frameEnd": 1023,
|
||||
# 'task': 'platesMain',
|
||||
# 'work_dir': 'shots',
|
||||
# 'shot': '120sh020'
|
||||
# }]
|
||||
|
|
|
|||
|
|
@ -5,13 +5,6 @@ from pypeapp import Logger
|
|||
from avalon.api import Session
|
||||
from hiero.ui import findMenuAction
|
||||
|
||||
# this way we secure compatibility between nuke 10 and 11
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
except Exception:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
|
||||
from .tags import add_tags_from_presets
|
||||
|
||||
from .lib import (
|
||||
|
|
@ -50,14 +43,8 @@ def install():
|
|||
"""
|
||||
|
||||
# here is the best place to add menu
|
||||
from avalon.tools import (
|
||||
creator,
|
||||
publish,
|
||||
cbloader,
|
||||
cbsceneinventory,
|
||||
contextmanager,
|
||||
libraryloader
|
||||
)
|
||||
from avalon.tools import publish, cbloader
|
||||
from avalon.vendor.Qt import QtGui
|
||||
|
||||
menu_name = os.environ['AVALON_LABEL']
|
||||
|
||||
|
|
@ -67,94 +54,57 @@ def install():
|
|||
|
||||
self._change_context_menu = context_label
|
||||
|
||||
# Grab Hiero's MenuBar
|
||||
M = hiero.ui.menuBar()
|
||||
|
||||
try:
|
||||
check_made_menu = findMenuAction(menu_name)
|
||||
except Exception:
|
||||
pass
|
||||
check_made_menu = None
|
||||
|
||||
if not check_made_menu:
|
||||
menu = M.addMenu(menu_name)
|
||||
# Grab Hiero's MenuBar
|
||||
menu = hiero.ui.menuBar().addMenu(menu_name)
|
||||
else:
|
||||
menu = check_made_menu.menu()
|
||||
|
||||
actions = [
|
||||
{
|
||||
'parent': context_label,
|
||||
'action': QAction('Set Context', None),
|
||||
'function': contextmanager.show,
|
||||
'icon': QIcon('icons:Position.png')
|
||||
},
|
||||
"separator",
|
||||
{
|
||||
'action': QAction("Work Files...", None),
|
||||
'function': set_workfiles,
|
||||
'icon': QIcon('icons:Position.png')
|
||||
},
|
||||
{
|
||||
'action': QAction('Create Default Tags..', None),
|
||||
'function': add_tags_from_presets,
|
||||
'icon': QIcon('icons:Position.png')
|
||||
},
|
||||
"separator",
|
||||
# {
|
||||
# 'action': QAction('Create...', None),
|
||||
# 'function': creator.show,
|
||||
# 'icon': QIcon('icons:ColorAdd.png')
|
||||
# },
|
||||
# {
|
||||
# 'action': QAction('Load...', None),
|
||||
# 'function': cbloader.show,
|
||||
# 'icon': QIcon('icons:CopyRectangle.png')
|
||||
# },
|
||||
{
|
||||
'action': QAction('Publish...', None),
|
||||
'function': publish.show,
|
||||
'icon': QIcon('icons:Output.png')
|
||||
},
|
||||
# {
|
||||
# 'action': QAction('Manage...', None),
|
||||
# 'function': cbsceneinventory.show,
|
||||
# 'icon': QIcon('icons:ModifyMetaData.png')
|
||||
# },
|
||||
{
|
||||
'action': QAction('Library...', None),
|
||||
'function': libraryloader.show,
|
||||
'icon': QIcon('icons:ColorAdd.png')
|
||||
},
|
||||
"separator",
|
||||
{
|
||||
'action': QAction('Reload pipeline...', None),
|
||||
'function': reload_config,
|
||||
'icon': QIcon('icons:ColorAdd.png')
|
||||
}]
|
||||
context_label_action = menu.addAction(context_label)
|
||||
context_label_action.setEnabled(False)
|
||||
|
||||
# Create menu items
|
||||
for a in actions:
|
||||
add_to_menu = menu
|
||||
if isinstance(a, dict):
|
||||
# create action
|
||||
for k in a.keys():
|
||||
if 'parent' in k:
|
||||
submenus = [sm for sm in a[k].split('/')]
|
||||
submenu = None
|
||||
for sm in submenus:
|
||||
if submenu:
|
||||
submenu.addMenu(sm)
|
||||
else:
|
||||
submenu = menu.addMenu(sm)
|
||||
add_to_menu = submenu
|
||||
if 'action' in k:
|
||||
action = a[k]
|
||||
elif 'function' in k:
|
||||
action.triggered.connect(a[k])
|
||||
elif 'icon' in k:
|
||||
action.setIcon(a[k])
|
||||
menu.addSeparator()
|
||||
|
||||
# add action to menu
|
||||
add_to_menu.addAction(action)
|
||||
hiero.ui.registerAction(action)
|
||||
elif isinstance(a, str):
|
||||
add_to_menu.addSeparator()
|
||||
workfiles_action = menu.addAction("Work Files...")
|
||||
workfiles_action.setIcon(QtGui.QIcon("icons:Position.png"))
|
||||
workfiles_action.triggered.connect(set_workfiles)
|
||||
|
||||
default_tags_action = menu.addAction("Create Default Tags...")
|
||||
default_tags_action.setIcon(QtGui.QIcon("icons:Position.png"))
|
||||
default_tags_action.triggered.connect(add_tags_from_presets)
|
||||
|
||||
menu.addSeparator()
|
||||
|
||||
publish_action = menu.addAction("Publish...")
|
||||
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
|
||||
publish_action.triggered.connect(
|
||||
lambda *args: publish.show(hiero.ui.mainWindow())
|
||||
)
|
||||
|
||||
loader_action = menu.addAction("Load...")
|
||||
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
loader_action.triggered.connect(cbloader.show)
|
||||
menu.addSeparator()
|
||||
|
||||
reload_action = menu.addAction("Reload pipeline...")
|
||||
reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
reload_action.triggered.connect(reload_config)
|
||||
|
||||
# Is this required?
|
||||
# hiero.ui.registerAction(context_label_action)
|
||||
# hiero.ui.registerAction(workfiles_action)
|
||||
# hiero.ui.registerAction(default_tags_action)
|
||||
# hiero.ui.registerAction(publish_action)
|
||||
# hiero.ui.registerAction(loader_action)
|
||||
# hiero.ui.registerAction(reload_action)
|
||||
|
||||
self.context_label_action = context_label_action
|
||||
self.workfile_actions = workfiles_action
|
||||
self.default_tags_action = default_tags_action
|
||||
self.publish_action = publish_action
|
||||
self.reload_action = reload_action
|
||||
|
|
|
|||
|
|
@ -1,188 +0,0 @@
|
|||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
def create_nk_script_clips(script_lst, seq=None):
|
||||
'''
|
||||
nk_scripts is list of dictionaries like:
|
||||
[{
|
||||
'path': 'P:/Jakub_testy_pipeline/test_v01.nk',
|
||||
'name': 'test',
|
||||
'handles': 10,
|
||||
'handleStart': 15, # added asymetrically to handles
|
||||
'handleEnd': 10, # added asymetrically to handles
|
||||
"clipIn": 16,
|
||||
"frameStart": 991,
|
||||
"frameEnd": 1023,
|
||||
'task': 'Comp-tracking',
|
||||
'work_dir': 'VFX_PR',
|
||||
'shot': '00010'
|
||||
}]
|
||||
'''
|
||||
|
||||
proj = hiero.core.projects()[-1]
|
||||
root = proj.clipsBin()
|
||||
|
||||
if not seq:
|
||||
seq = hiero.core.Sequence('NewSequences')
|
||||
root.addItem(hiero.core.BinItem(seq))
|
||||
# todo will ned to define this better
|
||||
# track = seq[1] # lazy example to get a destination# track
|
||||
clips_lst = []
|
||||
for nk in script_lst:
|
||||
task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']])
|
||||
bin = create_bin_in_project(task_path, proj)
|
||||
|
||||
if nk['task'] not in seq.videoTracks():
|
||||
track = hiero.core.VideoTrack(nk['task'])
|
||||
seq.addTrack(track)
|
||||
else:
|
||||
track = seq.tracks(nk['task'])
|
||||
|
||||
# create slip media
|
||||
print("__ path: `{}`".format(nk['path']))
|
||||
|
||||
media = hiero.core.MediaSource(nk['path'])
|
||||
media_in = int(media.startTime() or 0)
|
||||
media_duration = int(media.duration() or 0)
|
||||
|
||||
handle_start = nk.get("handleStart") or nk['handles']
|
||||
handle_end = nk.get("handleEnd") or nk['handles']
|
||||
|
||||
if media_in:
|
||||
source_in = media_in + handle_start
|
||||
else:
|
||||
source_in = nk["frameStart"] + handle_start
|
||||
|
||||
if media_duration:
|
||||
source_out = (media_in + media_duration - 1) - handle_end
|
||||
else:
|
||||
source_out = nk["frameEnd"] - handle_end
|
||||
|
||||
print("__ media: `{}`".format(media))
|
||||
print("__ media_in: `{}`".format(media_in))
|
||||
print("__ media_duration : `{}`".format(media_duration))
|
||||
print("__ source_in: `{}`".format(source_in))
|
||||
print("__ source_out : `{}`".format(source_out))
|
||||
|
||||
source = hiero.core.Clip(media)
|
||||
print("__ source : `{}`".format(source))
|
||||
print("__ source.sourceIn(): `{}`".format(source.sourceIn()))
|
||||
|
||||
name = os.path.basename(os.path.splitext(nk['path'])[0])
|
||||
split_name = split_by_client_version(name)[0] or name
|
||||
|
||||
print("__ split_name: `{}`".format(split_name))
|
||||
|
||||
# add to bin as clip item
|
||||
items_in_bin = [b.name() for b in bin.items()]
|
||||
if split_name not in items_in_bin:
|
||||
binItem = hiero.core.BinItem(source)
|
||||
bin.addItem(binItem)
|
||||
|
||||
print("__ bin.items(): `{}`".format(bin.items()))
|
||||
|
||||
new_source = [
|
||||
item for item in bin.items() if split_name in item.name()
|
||||
][0].items()[0].item()
|
||||
|
||||
print("__ new_source: `{}`".format(new_source))
|
||||
print("__ new_source: `{}`".format(new_source))
|
||||
|
||||
# add to track as clip item
|
||||
trackItem = hiero.core.TrackItem(split_name, hiero.core.TrackItem.kVideo)
|
||||
trackItem.setSource(new_source)
|
||||
trackItem.setSourceIn(source_in)
|
||||
trackItem.setSourceOut(source_out)
|
||||
trackItem.setSourceIn(source_in)
|
||||
trackItem.setTimelineIn(nk["clipIn"])
|
||||
trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in))
|
||||
track.addTrackItem(trackItem)
|
||||
track.addTrackItem(trackItem)
|
||||
clips_lst.append(trackItem)
|
||||
|
||||
return clips_lst
|
||||
|
||||
|
||||
def create_bin_in_project(bin_name='', project=''):
|
||||
'''
|
||||
create bin in project and
|
||||
if the bin_name is "bin1/bin2/bin3" it will create whole depth
|
||||
'''
|
||||
|
||||
if not project:
|
||||
# get the first loaded project
|
||||
project = hiero.core.projects()[-1]
|
||||
if not bin_name:
|
||||
return None
|
||||
if '/' in bin_name:
|
||||
bin_name = bin_name.split('/')
|
||||
else:
|
||||
bin_name = [bin_name]
|
||||
|
||||
clipsBin = project.clipsBin()
|
||||
|
||||
done_bin_lst = []
|
||||
for i, b in enumerate(bin_name):
|
||||
if i == 0 and len(bin_name) > 1:
|
||||
if b in [bin.name() for bin in clipsBin.bins()]:
|
||||
bin = [bin for bin in clipsBin.bins() if b in bin.name()][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = hiero.core.Bin(b)
|
||||
clipsBin.addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
|
||||
elif i >= 1 and i < len(bin_name) - 1:
|
||||
if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]:
|
||||
bin = [
|
||||
bin for bin in done_bin_lst[i - 1].bins()
|
||||
if b in bin.name()
|
||||
][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = hiero.core.Bin(b)
|
||||
done_bin_lst[i - 1].addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
|
||||
elif i == len(bin_name) - 1:
|
||||
if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]:
|
||||
bin = [
|
||||
bin for bin in done_bin_lst[i - 1].bins()
|
||||
if b in bin.name()
|
||||
][0]
|
||||
done_bin_lst.append(bin)
|
||||
else:
|
||||
create_bin = hiero.core.Bin(b)
|
||||
done_bin_lst[i - 1].addItem(create_bin)
|
||||
done_bin_lst.append(create_bin)
|
||||
# print [bin.name() for bin in clipsBin.bins()]
|
||||
return done_bin_lst[-1]
|
||||
|
||||
|
||||
def split_by_client_version(string):
|
||||
regex = r"[/_.]v\d+"
|
||||
try:
|
||||
matches = re.findall(regex, string, re.IGNORECASE)
|
||||
return string.split(matches[0])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return None
|
||||
|
||||
|
||||
script_lst = [{
|
||||
'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk',
|
||||
'name': '120sh020_platesMain',
|
||||
'handles': 10,
|
||||
'handleStart': 10,
|
||||
'handleEnd': 10,
|
||||
"clipIn": 16,
|
||||
"frameStart": 991,
|
||||
"frameEnd": 1023,
|
||||
'task': 'platesMain',
|
||||
'work_dir': 'shots',
|
||||
'shot': '120sh020'
|
||||
}]
|
||||
|
|
@ -73,5 +73,5 @@ def current_file():
|
|||
return normalised
|
||||
|
||||
|
||||
def work_root():
|
||||
return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/")
|
||||
def work_root(session):
|
||||
return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/")
|
||||
|
|
|
|||
|
|
@ -7,8 +7,9 @@ class IntegrateFtrackComments(pyblish.api.InstancePlugin):
|
|||
"""Create comments in Ftrack."""
|
||||
|
||||
order = pyblish.api.IntegratorOrder
|
||||
label = "Integrate Comments to Ftrack."
|
||||
label = "Integrate Comments to Ftrack"
|
||||
families = ["shot"]
|
||||
enabled = False
|
||||
|
||||
def process(self, instance):
|
||||
session = instance.context.data["ftrackSession"]
|
||||
|
|
@ -23,25 +23,79 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
|
|||
|
||||
# Collect session
|
||||
session = ftrack_api.Session()
|
||||
self.log.debug("Ftrack user: \"{0}\"".format(session.api_user))
|
||||
context.data["ftrackSession"] = session
|
||||
|
||||
# Collect task
|
||||
|
||||
project = os.environ.get('AVALON_PROJECT', '')
|
||||
asset = os.environ.get('AVALON_ASSET', '')
|
||||
task = os.environ.get('AVALON_TASK', None)
|
||||
self.log.debug(task)
|
||||
project_name = os.environ.get('AVALON_PROJECT', '')
|
||||
asset_name = os.environ.get('AVALON_ASSET', '')
|
||||
task_name = os.environ.get('AVALON_TASK', None)
|
||||
|
||||
# Find project entity
|
||||
project_query = 'Project where full_name is "{0}"'.format(project_name)
|
||||
self.log.debug("Project query: < {0} >".format(project_query))
|
||||
project_entity = list(session.query(project_query).all())
|
||||
if len(project_entity) == 0:
|
||||
raise AssertionError(
|
||||
"Project \"{0}\" not found in Ftrack.".format(project_name)
|
||||
)
|
||||
# QUESTION Is possible to happen?
|
||||
elif len(project_entity) > 1:
|
||||
raise AssertionError((
|
||||
"Found more than one project with name \"{0}\" in Ftrack."
|
||||
).format(project_name))
|
||||
|
||||
project_entity = project_entity[0]
|
||||
self.log.debug("Project found: {0}".format(project_entity))
|
||||
|
||||
# Find asset entity
|
||||
entity_query = (
|
||||
'TypedContext where project_id is "{0}"'
|
||||
' and name is "{1}"'
|
||||
).format(project_entity["id"], asset_name)
|
||||
self.log.debug("Asset entity query: < {0} >".format(entity_query))
|
||||
asset_entities = []
|
||||
for entity in session.query(entity_query).all():
|
||||
# Skip tasks
|
||||
if entity.entity_type.lower() != "task":
|
||||
asset_entities.append(entity)
|
||||
|
||||
if len(asset_entities) == 0:
|
||||
raise AssertionError((
|
||||
"Entity with name \"{0}\" not found"
|
||||
" in Ftrack project \"{1}\"."
|
||||
).format(asset_name, project_name))
|
||||
|
||||
elif len(asset_entities) > 1:
|
||||
raise AssertionError((
|
||||
"Found more than one entity with name \"{0}\""
|
||||
" in Ftrack project \"{1}\"."
|
||||
).format(asset_name, project_name))
|
||||
|
||||
asset_entity = asset_entities[0]
|
||||
self.log.debug("Asset found: {0}".format(asset_entity))
|
||||
|
||||
# Find task entity if task is set
|
||||
if task_name:
|
||||
task_query = (
|
||||
'Task where name is "{0}" and parent_id is "{1}"'
|
||||
).format(task_name, asset_entity["id"])
|
||||
self.log.debug("Task entity query: < {0} >".format(task_query))
|
||||
task_entity = session.query(task_query).first()
|
||||
if not task_entity:
|
||||
self.log.warning(
|
||||
"Task entity with name \"{0}\" was not found.".format(
|
||||
task_name
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.log.debug("Task entity found: {0}".format(task_entity))
|
||||
|
||||
if task:
|
||||
result = session.query('Task where\
|
||||
project.full_name is "{0}" and\
|
||||
name is "{1}" and\
|
||||
parent.name is "{2}"'.format(project, task, asset)).one()
|
||||
context.data["ftrackTask"] = result
|
||||
else:
|
||||
result = session.query('TypedContext where\
|
||||
project.full_name is "{0}" and\
|
||||
name is "{1}"'.format(project, asset)).one()
|
||||
context.data["ftrackEntity"] = result
|
||||
task_entity = None
|
||||
self.log.warning("Task name is not set.")
|
||||
|
||||
self.log.info(result)
|
||||
context.data["ftrackProject"] = asset_entity
|
||||
context.data["ftrackEntity"] = asset_entity
|
||||
context.data["ftrackTask"] = task_entity
|
||||
|
|
|
|||
|
|
@ -77,6 +77,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
info_msg = "Created new {entity_type} with data: {data}"
|
||||
info_msg += ", metadata: {metadata}."
|
||||
|
||||
used_asset_versions = []
|
||||
# Iterate over components and publish
|
||||
for data in instance.data.get("ftrackComponentsList", []):
|
||||
|
||||
|
|
@ -148,6 +149,9 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
assetversion_cust_attrs = _assetversion_data.pop(
|
||||
"custom_attributes", {}
|
||||
)
|
||||
asset_version_comment = _assetversion_data.pop(
|
||||
"comment", None
|
||||
)
|
||||
assetversion_data.update(_assetversion_data)
|
||||
|
||||
assetversion_entity = session.query(
|
||||
|
|
@ -185,6 +189,20 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
existing_assetversion_metadata.update(assetversion_metadata)
|
||||
assetversion_entity["metadata"] = existing_assetversion_metadata
|
||||
|
||||
# Add comment
|
||||
if asset_version_comment:
|
||||
assetversion_entity["comment"] = asset_version_comment
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning((
|
||||
"Comment was not possible to set for AssetVersion"
|
||||
"\"{0}\". Can't set it's value to: \"{1}\""
|
||||
).format(
|
||||
assetversion_entity["id"], str(asset_version_comment)
|
||||
))
|
||||
|
||||
# Adding Custom Attributes
|
||||
for attr, val in assetversion_cust_attrs.items():
|
||||
if attr in assetversion_entity["custom_attributes"]:
|
||||
|
|
@ -369,3 +387,14 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
if assetversion_entity not in used_asset_versions:
|
||||
used_asset_versions.append(assetversion_entity)
|
||||
|
||||
asset_versions_key = "ftrackIntegratedAssetVersions"
|
||||
if asset_versions_key not in instance.data:
|
||||
instance.data[asset_versions_key] = []
|
||||
|
||||
for asset_version in used_asset_versions:
|
||||
if asset_version not in instance.data[asset_versions_key]:
|
||||
instance.data[asset_versions_key].append(asset_version)
|
||||
|
|
|
|||
|
|
@ -73,9 +73,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
'''
|
||||
start_frame = 0
|
||||
end_frame = 1
|
||||
if 'endFrameReview' in comp and 'startFrameReview' in comp:
|
||||
if 'frameEndFtrack' in comp and 'frameStartFtrack' in comp:
|
||||
end_frame += (
|
||||
comp['endFrameReview'] - comp['startFrameReview']
|
||||
comp['frameEndFtrack'] - comp['frameStartFtrack']
|
||||
)
|
||||
else:
|
||||
end_frame += (
|
||||
|
|
@ -127,7 +127,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
# Add custom attributes for AssetVersion
|
||||
assetversion_cust_attrs = {}
|
||||
intent_val = instance.context.data.get("intent")
|
||||
intent_val = instance.context.data.get("intent", {}).get("value")
|
||||
if intent_val:
|
||||
assetversion_cust_attrs["intent"] = intent_val
|
||||
|
||||
|
|
|
|||
143
pype/plugins/ftrack/publish/integrate_ftrack_note.py
Normal file
143
pype/plugins/ftrack/publish/integrate_ftrack_note.py
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
import sys
|
||||
import json
|
||||
import pyblish.api
|
||||
import six
|
||||
|
||||
|
||||
class IntegrateFtrackNote(pyblish.api.InstancePlugin):
|
||||
"""Create comments in Ftrack."""
|
||||
|
||||
# Must be after integrate asset new
|
||||
order = pyblish.api.IntegratorOrder + 0.4999
|
||||
label = "Integrate Ftrack note"
|
||||
families = ["ftrack"]
|
||||
optional = True
|
||||
|
||||
# Can be set in presets:
|
||||
# - Allows only `intent` and `comment` keys
|
||||
note_with_intent_template = "{intent}: {comment}"
|
||||
# - note label must exist in Ftrack
|
||||
note_labels = []
|
||||
|
||||
def get_intent_label(self, session, intent_value):
|
||||
if not intent_value:
|
||||
return
|
||||
|
||||
intent_configurations = session.query(
|
||||
"CustomAttributeConfiguration where key is intent"
|
||||
).all()
|
||||
if not intent_configurations:
|
||||
return
|
||||
|
||||
intent_configuration = intent_configurations[0]
|
||||
if len(intent_configuration) > 1:
|
||||
self.log.warning((
|
||||
"Found more than one `intent` custom attribute."
|
||||
" Using first found."
|
||||
))
|
||||
|
||||
config = intent_configuration.get("config")
|
||||
if not config:
|
||||
return
|
||||
|
||||
configuration = json.loads(config)
|
||||
items = configuration.get("data")
|
||||
if not items:
|
||||
return
|
||||
|
||||
if sys.version_info[0] < 3:
|
||||
string_type = basestring
|
||||
else:
|
||||
string_type = str
|
||||
|
||||
if isinstance(items, string_type):
|
||||
items = json.loads(items)
|
||||
|
||||
intent_label = None
|
||||
for item in items:
|
||||
if item["value"] == intent_value:
|
||||
intent_label = item["menu"]
|
||||
break
|
||||
|
||||
return intent_label
|
||||
|
||||
def process(self, instance):
|
||||
comment = (instance.context.data.get("comment") or "").strip()
|
||||
if not comment:
|
||||
self.log.info("Comment is not set.")
|
||||
return
|
||||
|
||||
self.log.debug("Comment is set to `{}`".format(comment))
|
||||
|
||||
session = instance.context.data["ftrackSession"]
|
||||
|
||||
intent_val = instance.context.data.get("intent", {}).get("value")
|
||||
intent_label = instance.context.data.get("intent", {}).get("label")
|
||||
final_label = None
|
||||
if intent_val:
|
||||
final_label = self.get_intent_label(session, intent_val)
|
||||
if final_label is None:
|
||||
final_label = intent_label
|
||||
|
||||
# if intent label is set then format comment
|
||||
# - it is possible that intent_label is equal to "" (empty string)
|
||||
if final_label:
|
||||
msg = "Intent label is set to `{}`.".format(final_label)
|
||||
comment = self.note_with_intent_template.format(**{
|
||||
"intent": final_label,
|
||||
"comment": comment
|
||||
})
|
||||
|
||||
elif intent_val:
|
||||
msg = (
|
||||
"Intent is set to `{}` and was not added"
|
||||
" to comment because label is set to `{}`."
|
||||
).format(intent_val, final_label)
|
||||
|
||||
else:
|
||||
msg = "Intent is not set."
|
||||
|
||||
self.log.debug(msg)
|
||||
|
||||
asset_versions_key = "ftrackIntegratedAssetVersions"
|
||||
asset_versions = instance.data.get(asset_versions_key)
|
||||
if not asset_versions:
|
||||
self.log.info("There are any integrated AssetVersions")
|
||||
return
|
||||
|
||||
user = session.query(
|
||||
"User where username is \"{}\"".format(session.api_user)
|
||||
).first()
|
||||
if not user:
|
||||
self.log.warning(
|
||||
"Was not able to query current User {}".format(
|
||||
session.api_user
|
||||
)
|
||||
)
|
||||
|
||||
labels = []
|
||||
if self.note_labels:
|
||||
all_labels = session.query("NoteLabel").all()
|
||||
labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels}
|
||||
for _label in self.note_labels:
|
||||
label = labels_by_low_name.get(_label.lower())
|
||||
if not label:
|
||||
self.log.warning(
|
||||
"Note Label `{}` was not found.".format(_label)
|
||||
)
|
||||
continue
|
||||
|
||||
labels.append(label)
|
||||
|
||||
for asset_version in asset_versions:
|
||||
asset_version.create_note(comment, author=user, labels=labels)
|
||||
|
||||
try:
|
||||
session.commit()
|
||||
self.log.debug("Note added to AssetVersion \"{}\"".format(
|
||||
str(asset_version)
|
||||
))
|
||||
except Exception:
|
||||
tp, value, tb = sys.exc_info()
|
||||
session.rollback()
|
||||
six.reraise(tp, value, tb)
|
||||
|
|
@ -11,13 +11,13 @@ class IntegrateCleanComponentData(pyblish.api.InstancePlugin):
|
|||
label = 'Clean component data'
|
||||
families = ["ftrack"]
|
||||
optional = True
|
||||
active = True
|
||||
active = False
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
for comp in instance.data['representations']:
|
||||
self.log.debug('component {}'.format(comp))
|
||||
|
||||
|
||||
if "%" in comp['published_path'] or "#" in comp['published_path']:
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -1,60 +0,0 @@
|
|||
import os
|
||||
import subprocess
|
||||
|
||||
import pyblish.api
|
||||
|
||||
CREATE_NO_WINDOW = 0x08000000
|
||||
|
||||
|
||||
def deadline_command(cmd):
|
||||
# Find Deadline
|
||||
path = os.environ.get("DEADLINE_PATH", None)
|
||||
assert path is not None, "Variable 'DEADLINE_PATH' must be set"
|
||||
|
||||
executable = os.path.join(path, "deadlinecommand")
|
||||
if os.name == "nt":
|
||||
executable += ".exe"
|
||||
assert os.path.exists(
|
||||
executable), "Deadline executable not found at %s" % executable
|
||||
assert cmd, "Must have a command"
|
||||
|
||||
query = (executable, cmd)
|
||||
|
||||
process = subprocess.Popen(query, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
universal_newlines=True,
|
||||
creationflags=CREATE_NO_WINDOW)
|
||||
out, err = process.communicate()
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class CollectDeadlineUser(pyblish.api.ContextPlugin):
|
||||
"""Retrieve the local active Deadline user"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = "Deadline User"
|
||||
|
||||
hosts = ['maya', 'fusion', 'nuke']
|
||||
families = [
|
||||
"renderlayer",
|
||||
"saver.deadline",
|
||||
"imagesequence"
|
||||
]
|
||||
|
||||
|
||||
def process(self, context):
|
||||
"""Inject the current working file"""
|
||||
user = None
|
||||
try:
|
||||
user = deadline_command("GetCurrentUserName").strip()
|
||||
except:
|
||||
self.log.warning("Deadline command seems not to be working")
|
||||
|
||||
if not user:
|
||||
self.log.warning("No Deadline user found. "
|
||||
"Do you have Deadline installed?")
|
||||
return
|
||||
|
||||
self.log.info("Found Deadline user: {}".format(user))
|
||||
context.data['deadlineUser'] = user
|
||||
|
|
@ -1,127 +0,0 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
|
||||
import pyblish.api
|
||||
import clique
|
||||
|
||||
|
||||
class CollectJSON(pyblish.api.ContextPlugin):
|
||||
""" Collecting the json files in current directory. """
|
||||
|
||||
label = "JSON"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ['maya']
|
||||
|
||||
def version_get(self, string, prefix):
|
||||
""" Extract version information from filenames. Code from Foundry"s
|
||||
nukescripts.version_get()
|
||||
"""
|
||||
|
||||
regex = r"[/_.]{}\d+".format(prefix)
|
||||
matches = re.findall(regex, string, re.IGNORECASE)
|
||||
|
||||
if not len(matches):
|
||||
msg = "No '_{}#' found in '{}'".format(prefix, string)
|
||||
raise ValueError(msg)
|
||||
return matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group()
|
||||
|
||||
def process(self, context):
|
||||
current_file = context.data.get("currentFile", '')
|
||||
# Skip if current file is not a directory
|
||||
if not os.path.isdir(current_file):
|
||||
return
|
||||
|
||||
# Traverse directory and collect collections from json files.
|
||||
instances = []
|
||||
for root, dirs, files in os.walk(current_file):
|
||||
for f in files:
|
||||
if f.endswith(".json"):
|
||||
with open(os.path.join(root, f)) as json_data:
|
||||
for data in json.load(json_data):
|
||||
instances.append(data)
|
||||
|
||||
# Validate instance based on supported families.
|
||||
valid_families = ["img", "cache", "scene", "mov"]
|
||||
valid_data = []
|
||||
for data in instances:
|
||||
families = data.get("families", []) + [data["family"]]
|
||||
family_type = list(set(families) & set(valid_families))
|
||||
if family_type:
|
||||
valid_data.append(data)
|
||||
|
||||
# Create existing output instance.
|
||||
scanned_dirs = []
|
||||
files = []
|
||||
collections = []
|
||||
for data in valid_data:
|
||||
if "collection" not in data.keys():
|
||||
continue
|
||||
if data["collection"] is None:
|
||||
continue
|
||||
|
||||
instance_collection = clique.parse(data["collection"])
|
||||
|
||||
try:
|
||||
version = self.version_get(
|
||||
os.path.basename(instance_collection.format()), "v"
|
||||
)[1]
|
||||
except KeyError:
|
||||
# Ignore any output that is not versioned
|
||||
continue
|
||||
|
||||
# Getting collections of all previous versions and current version
|
||||
for count in range(1, int(version) + 1):
|
||||
|
||||
# Generate collection
|
||||
version_string = "v" + str(count).zfill(len(version))
|
||||
head = instance_collection.head.replace(
|
||||
"v" + version, version_string
|
||||
)
|
||||
collection = clique.Collection(
|
||||
head=head.replace("\\", "/"),
|
||||
padding=instance_collection.padding,
|
||||
tail=instance_collection.tail
|
||||
)
|
||||
collection.version = count
|
||||
|
||||
# Scan collection directory
|
||||
scan_dir = os.path.dirname(collection.head)
|
||||
if scan_dir not in scanned_dirs and os.path.exists(scan_dir):
|
||||
for f in os.listdir(scan_dir):
|
||||
file_path = os.path.join(scan_dir, f)
|
||||
files.append(file_path.replace("\\", "/"))
|
||||
scanned_dirs.append(scan_dir)
|
||||
|
||||
# Match files to collection and add
|
||||
for f in files:
|
||||
if collection.match(f):
|
||||
collection.add(f)
|
||||
|
||||
# Skip if no files were found in the collection
|
||||
if not list(collection):
|
||||
continue
|
||||
|
||||
# Skip existing collections
|
||||
if collection in collections:
|
||||
continue
|
||||
|
||||
instance = context.create_instance(name=data["name"])
|
||||
version = self.version_get(
|
||||
os.path.basename(collection.format()), "v"
|
||||
)[1]
|
||||
|
||||
basename = os.path.basename(collection.format())
|
||||
instance.data["label"] = "{0} - {1}".format(
|
||||
data["name"], basename
|
||||
)
|
||||
|
||||
families = data["families"] + [data["family"]]
|
||||
family = list(set(valid_families) & set(families))[0]
|
||||
instance.data["family"] = family
|
||||
instance.data["families"] = ["output"]
|
||||
instance.data["collection"] = collection
|
||||
instance.data["version"] = int(version)
|
||||
instance.data["publish"] = False
|
||||
|
||||
collections.append(collection)
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
import copy
|
||||
from avalon import io
|
||||
from pprint import pprint
|
||||
|
||||
import pyblish.api
|
||||
from avalon import api
|
||||
|
||||
|
||||
texture_extensions = ['.tif', '.tiff', '.jpg', '.jpeg', '.tx', '.png', '.tga',
|
||||
'.psd', '.dpx', '.hdr', '.hdri', '.exr', '.sxr', '.psb']
|
||||
|
||||
|
||||
class CollectTextures(pyblish.api.ContextPlugin):
|
||||
"""
|
||||
Gather all texture files in working directory, traversing whole structure.
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
targets = ["texture"]
|
||||
label = "Textures"
|
||||
hosts = ["shell"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
if os.environ.get("PYPE_PUBLISH_PATHS"):
|
||||
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
|
||||
else:
|
||||
cwd = context.get("workspaceDir", os.getcwd())
|
||||
paths = [cwd]
|
||||
|
||||
textures = []
|
||||
for path in paths:
|
||||
for dir, subdir, files in os.walk(path):
|
||||
textures.extend(
|
||||
os.path.join(dir, x) for x in files
|
||||
if os.path.splitext(x)[1].lower() in texture_extensions)
|
||||
|
||||
self.log.info("Got {} texture files.".format(len(textures)))
|
||||
if len(textures) < 1:
|
||||
raise RuntimeError("no textures found.")
|
||||
|
||||
asset_name = os.environ.get("AVALON_ASSET")
|
||||
family = 'texture'
|
||||
subset = 'Main'
|
||||
|
||||
project = io.find_one({'type': 'project'})
|
||||
asset = io.find_one({
|
||||
'type': 'asset',
|
||||
'name': asset_name
|
||||
})
|
||||
|
||||
context.data['project'] = project
|
||||
context.data['asset'] = asset
|
||||
|
||||
for tex in textures:
|
||||
self.log.info("Processing: {}".format(tex))
|
||||
name, ext = os.path.splitext(tex)
|
||||
simple_name = os.path.splitext(os.path.basename(tex))[0]
|
||||
instance = context.create_instance(simple_name)
|
||||
|
||||
instance.data.update({
|
||||
"subset": subset,
|
||||
"asset": asset_name,
|
||||
"label": simple_name,
|
||||
"name": simple_name,
|
||||
"family": family,
|
||||
"families": [family, 'ftrack'],
|
||||
})
|
||||
instance.data['destination_list'] = list()
|
||||
instance.data['representations'] = list()
|
||||
instance.data['source'] = 'pype command'
|
||||
|
||||
texture_data = {}
|
||||
texture_data['anatomy_template'] = 'texture'
|
||||
texture_data["ext"] = ext
|
||||
texture_data["label"] = simple_name
|
||||
texture_data["name"] = "texture"
|
||||
texture_data["stagingDir"] = os.path.dirname(tex)
|
||||
texture_data["files"] = os.path.basename(tex)
|
||||
texture_data["thumbnail"] = False
|
||||
texture_data["preview"] = False
|
||||
|
||||
instance.data["representations"].append(texture_data)
|
||||
self.log.info("collected instance: {}".format(instance.data))
|
||||
|
||||
self.log.info("All collected.")
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
import os
|
||||
import json
|
||||
import datetime
|
||||
import time
|
||||
|
||||
import pyblish.api
|
||||
import clique
|
||||
|
||||
|
||||
class ExtractJSON(pyblish.api.ContextPlugin):
|
||||
""" Extract all instances to a serialized json file. """
|
||||
|
||||
order = pyblish.api.IntegratorOrder
|
||||
label = "JSON"
|
||||
hosts = ['maya']
|
||||
|
||||
def process(self, context):
|
||||
|
||||
workspace = os.path.join(
|
||||
os.path.dirname(context.data["currentFile"]), "workspace",
|
||||
"instances")
|
||||
|
||||
if not os.path.exists(workspace):
|
||||
os.makedirs(workspace)
|
||||
|
||||
output_data = []
|
||||
for instance in context:
|
||||
self.log.debug(instance['data'])
|
||||
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
if isinstance(value, clique.Collection):
|
||||
value = value.format()
|
||||
|
||||
try:
|
||||
json.dumps(value)
|
||||
data[key] = value
|
||||
except KeyError:
|
||||
msg = "\"{0}\"".format(value)
|
||||
msg += " in instance.data[\"{0}\"]".format(key)
|
||||
msg += " could not be serialized."
|
||||
self.log.debug(msg)
|
||||
|
||||
output_data.append(data)
|
||||
|
||||
timestamp = datetime.datetime.fromtimestamp(
|
||||
time.time()).strftime("%Y%m%d-%H%M%S")
|
||||
filename = timestamp + "_instances.json"
|
||||
|
||||
with open(os.path.join(workspace, filename), "w") as outfile:
|
||||
outfile.write(json.dumps(output_data, indent=4, sort_keys=True))
|
||||
|
|
@ -1,86 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import subprocess
|
||||
import clique
|
||||
|
||||
|
||||
class ExtractQuicktimeEXR(pyblish.api.InstancePlugin):
|
||||
"""Resolve any dependency issies
|
||||
|
||||
This plug-in resolves any paths which, if not updated might break
|
||||
the published file.
|
||||
|
||||
The order of families is important, when working with lookdev you want to
|
||||
first publish the texture, update the texture paths in the nodes and then
|
||||
publish the shading network. Same goes for file dependent assets.
|
||||
"""
|
||||
|
||||
label = "Extract Quicktime"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["imagesequence", "render", "write", "source"]
|
||||
hosts = ["shell"]
|
||||
|
||||
def process(self, instance):
|
||||
# fps = instance.data.get("fps")
|
||||
# start = instance.data.get("startFrame")
|
||||
# stagingdir = os.path.normpath(instance.data.get("stagingDir"))
|
||||
#
|
||||
# collected_frames = os.listdir(stagingdir)
|
||||
# collections, remainder = clique.assemble(collected_frames)
|
||||
#
|
||||
# full_input_path = os.path.join(
|
||||
# stagingdir, collections[0].format('{head}{padding}{tail}')
|
||||
# )
|
||||
# self.log.info("input {}".format(full_input_path))
|
||||
#
|
||||
# filename = collections[0].format('{head}')
|
||||
# if not filename.endswith('.'):
|
||||
# filename += "."
|
||||
# movFile = filename + "mov"
|
||||
# full_output_path = os.path.join(stagingdir, movFile)
|
||||
#
|
||||
# self.log.info("output {}".format(full_output_path))
|
||||
#
|
||||
# config_data = instance.context.data['output_repre_config']
|
||||
#
|
||||
# proj_name = os.environ.get('AVALON_PROJECT', '__default__')
|
||||
# profile = config_data.get(proj_name, config_data['__default__'])
|
||||
#
|
||||
# input_args = []
|
||||
# # overrides output file
|
||||
# input_args.append("-y")
|
||||
# # preset's input data
|
||||
# input_args.extend(profile.get('input', []))
|
||||
# # necessary input data
|
||||
# input_args.append("-start_number {}".format(start))
|
||||
# input_args.append("-i {}".format(full_input_path))
|
||||
# input_args.append("-framerate {}".format(fps))
|
||||
#
|
||||
# output_args = []
|
||||
# # preset's output data
|
||||
# output_args.extend(profile.get('output', []))
|
||||
# # output filename
|
||||
# output_args.append(full_output_path)
|
||||
# mov_args = [
|
||||
# "ffmpeg",
|
||||
# " ".join(input_args),
|
||||
# " ".join(output_args)
|
||||
# ]
|
||||
# subprocess_mov = " ".join(mov_args)
|
||||
# sub_proc = subprocess.Popen(subprocess_mov)
|
||||
# sub_proc.wait()
|
||||
#
|
||||
# if not os.path.isfile(full_output_path):
|
||||
# raise("Quicktime wasn't created succesfully")
|
||||
#
|
||||
# if "representations" not in instance.data:
|
||||
# instance.data["representations"] = []
|
||||
#
|
||||
# representation = {
|
||||
# 'name': 'mov',
|
||||
# 'ext': 'mov',
|
||||
# 'files': movFile,
|
||||
# "stagingDir": stagingdir,
|
||||
# "preview": True
|
||||
# }
|
||||
# instance.data["representations"].append(representation)
|
||||
|
|
@ -1,153 +0,0 @@
|
|||
import os
|
||||
import subprocess
|
||||
|
||||
import pyblish.api
|
||||
import filelink
|
||||
|
||||
|
||||
class ExtractTranscode(pyblish.api.InstancePlugin):
|
||||
"""Extracts review movie from image sequence.
|
||||
|
||||
Offset to get images to transcode from.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.1
|
||||
label = "Transcode"
|
||||
optional = True
|
||||
families = ["review"]
|
||||
|
||||
def find_previous_index(self, index, indexes):
|
||||
"""Finds the closest previous value in a list from a value."""
|
||||
|
||||
data = []
|
||||
for i in indexes:
|
||||
if i >= index:
|
||||
continue
|
||||
data.append(index - i)
|
||||
|
||||
return indexes[data.index(min(data))]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if "collection" in instance.data.keys():
|
||||
self.process_image(instance)
|
||||
|
||||
if "output_path" in instance.data.keys():
|
||||
self.process_movie(instance)
|
||||
|
||||
def process_image(self, instance):
|
||||
|
||||
collection = instance.data.get("collection", [])
|
||||
|
||||
if not list(collection):
|
||||
msg = "Skipping \"{0}\" because no frames was found."
|
||||
self.log.warning(msg.format(instance.data["name"]))
|
||||
return
|
||||
|
||||
# Temporary fill the missing frames.
|
||||
missing = collection.holes()
|
||||
if not collection.is_contiguous():
|
||||
pattern = collection.format("{head}{padding}{tail}")
|
||||
for index in missing.indexes:
|
||||
dst = pattern % index
|
||||
src_index = self.find_previous_index(
|
||||
index, list(collection.indexes)
|
||||
)
|
||||
src = pattern % src_index
|
||||
|
||||
filelink.create(src, dst)
|
||||
|
||||
# Generate args.
|
||||
# Has to be yuv420p for compatibility with older players and smooth
|
||||
# playback. This does come with a sacrifice of more visible banding
|
||||
# issues.
|
||||
# -crf 18 is visually lossless.
|
||||
args = [
|
||||
"ffmpeg", "-y",
|
||||
"-start_number", str(min(collection.indexes)),
|
||||
"-framerate", str(instance.context.data["framerate"]),
|
||||
"-i", collection.format("{head}{padding}{tail}"),
|
||||
"-pix_fmt", "yuv420p",
|
||||
"-crf", "18",
|
||||
"-timecode", "00:00:00:01",
|
||||
"-vframes",
|
||||
str(max(collection.indexes) - min(collection.indexes) + 1),
|
||||
"-vf",
|
||||
"scale=trunc(iw/2)*2:trunc(ih/2)*2",
|
||||
]
|
||||
|
||||
if instance.data.get("baked_colorspace_movie"):
|
||||
args = [
|
||||
"ffmpeg", "-y",
|
||||
"-i", instance.data["baked_colorspace_movie"],
|
||||
"-pix_fmt", "yuv420p",
|
||||
"-crf", "18",
|
||||
"-timecode", "00:00:00:01",
|
||||
]
|
||||
|
||||
args.append(collection.format("{head}.mov"))
|
||||
|
||||
self.log.debug("Executing args: {0}".format(args))
|
||||
|
||||
# Can't use subprocess.check_output, cause Houdini doesn't like that.
|
||||
p = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdin=subprocess.PIPE,
|
||||
cwd=os.path.dirname(args[-1])
|
||||
)
|
||||
|
||||
output = p.communicate()[0]
|
||||
|
||||
# Remove temporary frame fillers
|
||||
for f in missing:
|
||||
os.remove(f)
|
||||
|
||||
if p.returncode != 0:
|
||||
raise ValueError(output)
|
||||
|
||||
self.log.debug(output)
|
||||
|
||||
def process_movie(self, instance):
|
||||
# Generate args.
|
||||
# Has to be yuv420p for compatibility with older players and smooth
|
||||
# playback. This does come with a sacrifice of more visible banding
|
||||
# issues.
|
||||
args = [
|
||||
"ffmpeg", "-y",
|
||||
"-i", instance.data["output_path"],
|
||||
"-pix_fmt", "yuv420p",
|
||||
"-crf", "18",
|
||||
"-timecode", "00:00:00:01",
|
||||
]
|
||||
|
||||
if instance.data.get("baked_colorspace_movie"):
|
||||
args = [
|
||||
"ffmpeg", "-y",
|
||||
"-i", instance.data["baked_colorspace_movie"],
|
||||
"-pix_fmt", "yuv420p",
|
||||
"-crf", "18",
|
||||
"-timecode", "00:00:00:01",
|
||||
]
|
||||
|
||||
split = os.path.splitext(instance.data["output_path"])
|
||||
args.append(split[0] + "_review.mov")
|
||||
|
||||
self.log.debug("Executing args: {0}".format(args))
|
||||
|
||||
# Can't use subprocess.check_output, cause Houdini doesn't like that.
|
||||
p = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdin=subprocess.PIPE,
|
||||
cwd=os.path.dirname(args[-1])
|
||||
)
|
||||
|
||||
output = p.communicate()[0]
|
||||
|
||||
if p.returncode != 0:
|
||||
raise ValueError(output)
|
||||
|
||||
self.log.debug(output)
|
||||
|
|
@ -18,7 +18,7 @@ def open(filepath):
|
|||
class Openfile(api.Loader):
|
||||
"""Open Image Sequence with system default"""
|
||||
|
||||
families = ["write"]
|
||||
families = ["render2d"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Open"
|
||||
|
|
|
|||
|
|
@ -1,10 +1,20 @@
|
|||
"""
|
||||
"""Collect Anatomy and global anatomy data.
|
||||
|
||||
Requires:
|
||||
None
|
||||
session -> AVALON_TASK
|
||||
projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder)
|
||||
username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001)
|
||||
datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder)
|
||||
|
||||
Provides:
|
||||
context -> anatomy (pypeapp.Anatomy)
|
||||
context -> anatomyData
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
from avalon import api, lib
|
||||
from pypeapp import Anatomy
|
||||
import pyblish.api
|
||||
|
||||
|
|
@ -12,9 +22,52 @@ import pyblish.api
|
|||
class CollectAnatomy(pyblish.api.ContextPlugin):
|
||||
"""Collect Anatomy into Context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder + 0.002
|
||||
label = "Collect Anatomy"
|
||||
|
||||
def process(self, context):
|
||||
context.data['anatomy'] = Anatomy()
|
||||
self.log.info("Anatomy templates collected...")
|
||||
root_path = api.registered_root()
|
||||
task_name = api.Session["AVALON_TASK"]
|
||||
|
||||
project_entity = context.data["projectEntity"]
|
||||
asset_entity = context.data["assetEntity"]
|
||||
|
||||
project_name = project_entity["name"]
|
||||
|
||||
context.data["anatomy"] = Anatomy(project_name)
|
||||
self.log.info(
|
||||
"Anatomy object collected for project \"{}\".".format(project_name)
|
||||
)
|
||||
|
||||
hierarchy_items = asset_entity["data"]["parents"]
|
||||
hierarchy = ""
|
||||
if hierarchy_items:
|
||||
hierarchy = os.path.join(*hierarchy_items)
|
||||
|
||||
context_data = {
|
||||
"root": root_path,
|
||||
"project": {
|
||||
"name": project_name,
|
||||
"code": project_entity["data"].get("code")
|
||||
},
|
||||
"asset": asset_entity["name"],
|
||||
"hierarchy": hierarchy.replace("\\", "/"),
|
||||
"task": task_name,
|
||||
|
||||
"username": context.data["user"]
|
||||
}
|
||||
|
||||
avalon_app_name = os.environ.get("AVALON_APP_NAME")
|
||||
if avalon_app_name:
|
||||
application_def = lib.get_application(avalon_app_name)
|
||||
app_dir = application_def.get("application_dir")
|
||||
if app_dir:
|
||||
context_data["app"] = app_dir
|
||||
|
||||
datetime_data = context.data.get("datetimeData") or {}
|
||||
context_data.update(datetime_data)
|
||||
|
||||
context.data["anatomyData"] = context_data
|
||||
|
||||
self.log.info("Global anatomy Data collected")
|
||||
self.log.debug(json.dumps(context_data, indent=4))
|
||||
|
|
|
|||
53
pype/plugins/global/publish/collect_avalon_entities.py
Normal file
53
pype/plugins/global/publish/collect_avalon_entities.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
"""Collect Anatomy and global anatomy data.
|
||||
|
||||
Requires:
|
||||
session -> AVALON_PROJECT, AVALON_ASSET
|
||||
|
||||
Provides:
|
||||
context -> projectEntity - project entity from database
|
||||
context -> assetEntity - asset entity from database
|
||||
"""
|
||||
|
||||
from avalon import io, api
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectAvalonEntities(pyblish.api.ContextPlugin):
|
||||
"""Collect Anatomy into Context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Avalon Entities"
|
||||
|
||||
def process(self, context):
|
||||
io.install()
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
asset_name = api.Session["AVALON_ASSET"]
|
||||
|
||||
project_entity = io.find_one({
|
||||
"type": "project",
|
||||
"name": project_name
|
||||
})
|
||||
assert project_entity, (
|
||||
"Project '{0}' was not found."
|
||||
).format(project_name)
|
||||
self.log.debug("Collected Project \"{}\"".format(project_entity))
|
||||
|
||||
asset_entity = io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name,
|
||||
"parent": project_entity["_id"]
|
||||
})
|
||||
assert asset_entity, (
|
||||
"No asset found by the name '{0}' in project '{1}'"
|
||||
).format(asset_name, project_name)
|
||||
|
||||
self.log.debug("Collected Asset \"{}\"".format(asset_entity))
|
||||
|
||||
context.data["projectEntity"] = project_entity
|
||||
context.data["assetEntity"] = asset_entity
|
||||
|
||||
data = asset_entity['data']
|
||||
handles = int(data.get("handles") or 0)
|
||||
context.data["handles"] = handles
|
||||
context.data["handleStart"] = int(data.get("handleStart", handles))
|
||||
context.data["handleEnd"] = int(data.get("handleEnd", handles))
|
||||
|
|
@ -15,4 +15,5 @@ class CollectComment(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.CollectorOrder
|
||||
|
||||
def process(self, context):
|
||||
context.data["comment"] = ""
|
||||
comment = (context.data.get("comment") or "").strip()
|
||||
context.data["comment"] = comment
|
||||
|
|
|
|||
18
pype/plugins/global/publish/collect_datetime_data.py
Normal file
18
pype/plugins/global/publish/collect_datetime_data.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
"""These data *must* be collected only once during publishing process.
|
||||
|
||||
Provides:
|
||||
context -> datetimeData
|
||||
"""
|
||||
|
||||
import pyblish.api
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
class CollectDateTimeData(pyblish.api.ContextPlugin):
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect DateTime data"
|
||||
|
||||
def process(self, context):
|
||||
key = "datetimeData"
|
||||
if key not in context.data:
|
||||
context.data[key] = config.get_datetime_data()
|
||||
|
|
@ -1,446 +0,0 @@
|
|||
"""
|
||||
Requires:
|
||||
environment -> PYPE_PUBLISH_PATHS
|
||||
context -> workspaceDir
|
||||
|
||||
Provides:
|
||||
context -> user (str)
|
||||
instance -> new instance
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import copy
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
from avalon import api
|
||||
|
||||
|
||||
def collect(root,
|
||||
regex=None,
|
||||
exclude_regex=None,
|
||||
frame_start=None,
|
||||
frame_end=None):
|
||||
"""Collect sequence collections in root"""
|
||||
|
||||
from avalon.vendor import clique
|
||||
|
||||
files = list()
|
||||
for filename in os.listdir(root):
|
||||
|
||||
# Must have extension
|
||||
ext = os.path.splitext(filename)[1]
|
||||
if not ext:
|
||||
continue
|
||||
|
||||
# Only files
|
||||
if not os.path.isfile(os.path.join(root, filename)):
|
||||
continue
|
||||
|
||||
# Include and exclude regex
|
||||
if regex and not re.search(regex, filename):
|
||||
continue
|
||||
if exclude_regex and re.search(exclude_regex, filename):
|
||||
continue
|
||||
|
||||
files.append(filename)
|
||||
|
||||
# Match collections
|
||||
# Support filenames like: projectX_shot01_0010.tiff with this regex
|
||||
pattern = r"(?P<index>(?P<padding>0*)\d+)\.\D+\d?$"
|
||||
collections, remainder = clique.assemble(files,
|
||||
patterns=[pattern],
|
||||
minimum_items=1)
|
||||
|
||||
# Exclude any frames outside start and end frame.
|
||||
for collection in collections:
|
||||
for index in list(collection.indexes):
|
||||
if frame_start is not None and index < frame_start:
|
||||
collection.indexes.discard(index)
|
||||
continue
|
||||
if frame_end is not None and index > frame_end:
|
||||
collection.indexes.discard(index)
|
||||
continue
|
||||
|
||||
# Keep only collections that have at least a single frame
|
||||
collections = [c for c in collections if c.indexes]
|
||||
|
||||
return collections, remainder
|
||||
|
||||
|
||||
class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
||||
"""Gather file sequences from working directory
|
||||
|
||||
When "FILESEQUENCE" environment variable is set these paths (folders or
|
||||
.json files) are parsed for image sequences. Otherwise the current
|
||||
working directory is searched for file sequences.
|
||||
|
||||
The json configuration may have the optional keys:
|
||||
asset (str): The asset to publish to. If not provided fall back to
|
||||
api.Session["AVALON_ASSET"]
|
||||
subset (str): The subset to publish to. If not provided the sequence's
|
||||
head (up to frame number) will be used.
|
||||
frame_start (int): The start frame for the sequence
|
||||
frame_end (int): The end frame for the sequence
|
||||
root (str): The path to collect from (can be relative to the .json)
|
||||
regex (str): A regex for the sequence filename
|
||||
exclude_regex (str): A regex for filename to exclude from collection
|
||||
metadata (dict): Custom metadata for instance.data["metadata"]
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.0001
|
||||
targets = ["filesequence"]
|
||||
label = "RenderedFrames"
|
||||
|
||||
def process(self, context):
|
||||
pixel_aspect = 1
|
||||
resolution_width = 1920
|
||||
resolution_height = 1080
|
||||
lut_path = None
|
||||
slate_frame = None
|
||||
families_data = None
|
||||
subset = None
|
||||
version = None
|
||||
frame_start = 0
|
||||
frame_end = 0
|
||||
if os.environ.get("PYPE_PUBLISH_PATHS"):
|
||||
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
|
||||
self.log.info("Collecting paths: {}".format(paths))
|
||||
else:
|
||||
cwd = context.get("workspaceDir", os.getcwd())
|
||||
paths = [cwd]
|
||||
|
||||
for path in paths:
|
||||
|
||||
self.log.info("Loading: {}".format(path))
|
||||
|
||||
if path.endswith(".json"):
|
||||
# Search using .json configuration
|
||||
with open(path, "r") as f:
|
||||
try:
|
||||
data = json.load(f)
|
||||
except Exception as exc:
|
||||
self.log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc)
|
||||
)
|
||||
raise
|
||||
|
||||
cwd = os.path.dirname(path)
|
||||
root_override = data.get("root")
|
||||
frame_start = int(data.get("frameStart"))
|
||||
frame_end = int(data.get("frameEnd"))
|
||||
subset = data.get("subset")
|
||||
|
||||
if root_override:
|
||||
if os.path.isabs(root_override):
|
||||
root = root_override
|
||||
else:
|
||||
root = os.path.join(cwd, root_override)
|
||||
else:
|
||||
root = cwd
|
||||
|
||||
if data.get("ftrack"):
|
||||
f = data.get("ftrack")
|
||||
os.environ["FTRACK_API_USER"] = f["FTRACK_API_USER"]
|
||||
os.environ["FTRACK_API_KEY"] = f["FTRACK_API_KEY"]
|
||||
os.environ["FTRACK_SERVER"] = f["FTRACK_SERVER"]
|
||||
|
||||
metadata = data.get("metadata")
|
||||
if metadata:
|
||||
session = metadata.get("session")
|
||||
if session:
|
||||
self.log.info("setting session using metadata")
|
||||
api.Session.update(session)
|
||||
os.environ.update(session)
|
||||
instance = metadata.get("instance")
|
||||
if instance:
|
||||
instance_family = instance.get("family")
|
||||
pixel_aspect = instance.get("pixelAspect", 1)
|
||||
resolution_width = instance.get("resolutionWidth", 1920)
|
||||
resolution_height = instance.get("resolutionHeight", 1080)
|
||||
lut_path = instance.get("lutPath", None)
|
||||
baked_mov_path = instance.get("bakeRenderPath")
|
||||
families_data = instance.get("families")
|
||||
slate_frame = instance.get("slateFrame")
|
||||
version = instance.get("version")
|
||||
|
||||
|
||||
else:
|
||||
# Search in directory
|
||||
data = dict()
|
||||
root = path
|
||||
|
||||
self.log.info("Collecting: {}".format(root))
|
||||
|
||||
regex = data.get("regex")
|
||||
if baked_mov_path:
|
||||
regex = "^{}.*$".format(subset)
|
||||
|
||||
if regex:
|
||||
self.log.info("Using regex: {}".format(regex))
|
||||
|
||||
if "slate" in families_data:
|
||||
frame_start -= 1
|
||||
|
||||
collections, remainder = collect(
|
||||
root=root,
|
||||
regex=regex,
|
||||
exclude_regex=data.get("exclude_regex"),
|
||||
frame_start=frame_start,
|
||||
frame_end=frame_end,
|
||||
)
|
||||
|
||||
self.log.info("Found collections: {}".format(collections))
|
||||
self.log.info("Found remainder: {}".format(remainder))
|
||||
|
||||
fps = data.get("fps", 25)
|
||||
|
||||
if data.get("user"):
|
||||
context.data["user"] = data["user"]
|
||||
|
||||
if data.get("version"):
|
||||
version = data.get("version")
|
||||
|
||||
# Get family from the data
|
||||
families = data.get("families", ["render"])
|
||||
if "render" not in families:
|
||||
families.append("render")
|
||||
if "ftrack" not in families:
|
||||
families.append("ftrack")
|
||||
if "write" in instance_family:
|
||||
families.append("write")
|
||||
if families_data and "slate" in families_data:
|
||||
families.append("slate")
|
||||
|
||||
if data.get("attachTo"):
|
||||
# we need to attach found collections to existing
|
||||
# subset version as review represenation.
|
||||
|
||||
for attach in data.get("attachTo"):
|
||||
self.log.info(
|
||||
"Attaching render {}:v{}".format(
|
||||
attach["subset"], attach["version"]))
|
||||
instance = context.create_instance(
|
||||
attach["subset"])
|
||||
instance.data.update(
|
||||
{
|
||||
"name": attach["subset"],
|
||||
"version": attach["version"],
|
||||
"family": 'review',
|
||||
"families": ['review', 'ftrack'],
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height
|
||||
})
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
for collection in collections:
|
||||
self.log.info(
|
||||
" - adding representation: {}".format(
|
||||
str(collection))
|
||||
)
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
elif subset:
|
||||
# if we have subset - add all collections and known
|
||||
# reminder as representations
|
||||
|
||||
# take out review family if mov path
|
||||
# this will make imagesequence none review
|
||||
|
||||
if baked_mov_path:
|
||||
self.log.info(
|
||||
"Baked mov is available {}".format(
|
||||
baked_mov_path))
|
||||
families.append("review")
|
||||
|
||||
if session['AVALON_APP'] == "maya":
|
||||
families.append("review")
|
||||
|
||||
self.log.info(
|
||||
"Adding representations to subset {}".format(
|
||||
subset))
|
||||
|
||||
instance = context.create_instance(subset)
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
instance.data.update(
|
||||
{
|
||||
"name": subset,
|
||||
"family": families[0],
|
||||
"families": list(families),
|
||||
"subset": subset,
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"slateFrame": slate_frame,
|
||||
"version": version
|
||||
}
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
for collection in collections:
|
||||
self.log.info(" - {}".format(str(collection)))
|
||||
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
if "slate" in instance.data["families"]:
|
||||
frame_start += 1
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"] if not baked_mov_path else [],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
# filter out only relevant mov in case baked available
|
||||
self.log.debug("__ remainder {}".format(remainder))
|
||||
if baked_mov_path:
|
||||
remainder = [r for r in remainder
|
||||
if r in baked_mov_path]
|
||||
self.log.debug("__ remainder {}".format(remainder))
|
||||
|
||||
# process reminders
|
||||
for rem in remainder:
|
||||
# add only known types to representation
|
||||
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
|
||||
self.log.info(" . {}".format(rem))
|
||||
|
||||
if "slate" in instance.data["families"]:
|
||||
frame_start += 1
|
||||
|
||||
tags = ["review"]
|
||||
|
||||
if baked_mov_path:
|
||||
tags.append("delete")
|
||||
|
||||
representation = {
|
||||
"name": rem.split(".")[-1],
|
||||
"ext": "{}".format(rem.split(".")[-1]),
|
||||
"files": rem,
|
||||
"stagingDir": root,
|
||||
"frameStart": frame_start,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": tags
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
else:
|
||||
# we have no subset so we take every collection and create one
|
||||
# from it
|
||||
for collection in collections:
|
||||
instance = context.create_instance(str(collection))
|
||||
self.log.info("Creating subset from: %s" % str(collection))
|
||||
|
||||
# Ensure each instance gets a unique reference to the data
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
# If no subset provided, get it from collection's head
|
||||
subset = data.get("subset", collection.head.rstrip("_. "))
|
||||
|
||||
# If no start or end frame provided, get it from collection
|
||||
indices = list(collection.indexes)
|
||||
start = data.get("frameStart", indices[0])
|
||||
end = data.get("frameEnd", indices[-1])
|
||||
|
||||
ext = list(collection)[0].split(".")[-1]
|
||||
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
|
||||
instance.data.update(
|
||||
{
|
||||
"name": str(collection),
|
||||
"family": families[0], # backwards compatibility
|
||||
"families": list(families),
|
||||
"subset": subset,
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"version": version
|
||||
}
|
||||
)
|
||||
if lut_path:
|
||||
instance.data.update({"lutPath": lut_path})
|
||||
|
||||
instance.append(collection)
|
||||
instance.context.data["fps"] = fps
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
# temporary ... allow only beauty on ftrack
|
||||
if session['AVALON_APP'] == "maya":
|
||||
AOV_filter = ['beauty']
|
||||
for aov in AOV_filter:
|
||||
if aov not in instance.data['subset']:
|
||||
instance.data['families'].remove('review')
|
||||
instance.data['families'].remove('ftrack')
|
||||
representation["tags"].remove('review')
|
||||
|
||||
self.log.debug(
|
||||
"__ representations {}".format(
|
||||
instance.data["representations"]))
|
||||
self.log.debug(
|
||||
"__ instance.data {}".format(instance.data))
|
||||
129
pype/plugins/global/publish/collect_instance_anatomy_data.py
Normal file
129
pype/plugins/global/publish/collect_instance_anatomy_data.py
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
"""
|
||||
Requires:
|
||||
context -> anatomyData
|
||||
context -> projectEntity
|
||||
context -> assetEntity
|
||||
instance -> asset
|
||||
instance -> subset
|
||||
instance -> family
|
||||
|
||||
Optional:
|
||||
instance -> version
|
||||
instance -> resolutionWidth
|
||||
instance -> resolutionHeight
|
||||
instance -> fps
|
||||
|
||||
Provides:
|
||||
instance -> projectEntity
|
||||
instance -> assetEntity
|
||||
instance -> anatomyData
|
||||
instance -> version
|
||||
instance -> latestVersion
|
||||
"""
|
||||
|
||||
import copy
|
||||
import json
|
||||
|
||||
from avalon import io
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectInstanceAnatomyData(pyblish.api.InstancePlugin):
|
||||
"""Fill templates with data needed for publish"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.49
|
||||
label = "Collect instance anatomy data"
|
||||
|
||||
def process(self, instance):
|
||||
# get all the stuff from the database
|
||||
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
|
||||
project_entity = instance.context.data["projectEntity"]
|
||||
context_asset_entity = instance.context.data["assetEntity"]
|
||||
|
||||
asset_name = instance.data["asset"]
|
||||
# Check if asset name is the same as what is in context
|
||||
# - they may be different, e.g. in NukeStudio
|
||||
if context_asset_entity["name"] == asset_name:
|
||||
asset_entity = context_asset_entity
|
||||
|
||||
else:
|
||||
asset_entity = io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name,
|
||||
"parent": project_entity["_id"]
|
||||
})
|
||||
|
||||
subset_name = instance.data["subset"]
|
||||
version_number = instance.data.get("version")
|
||||
latest_version = None
|
||||
|
||||
if asset_entity:
|
||||
subset_entity = io.find_one({
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset_entity["_id"]
|
||||
})
|
||||
|
||||
if subset_entity is None:
|
||||
self.log.debug("Subset entity does not exist yet.")
|
||||
else:
|
||||
version_entity = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset_entity["_id"]
|
||||
},
|
||||
sort=[("name", -1)]
|
||||
)
|
||||
if version_entity:
|
||||
latest_version = version_entity["name"]
|
||||
|
||||
# If version is not specified for instance or context
|
||||
if version_number is None:
|
||||
# TODO we should be able to change default version by studio
|
||||
# preferences (like start with version number `0`)
|
||||
version_number = 1
|
||||
# use latest version (+1) if already any exist
|
||||
if latest_version is not None:
|
||||
version_number += int(latest_version)
|
||||
|
||||
anatomy_updates = {
|
||||
"asset": asset_name,
|
||||
"family": instance.data["family"],
|
||||
"subset": subset_name,
|
||||
"version": version_number
|
||||
}
|
||||
|
||||
task_name = instance.data.get("task")
|
||||
if task_name:
|
||||
anatomy_updates["task"] = task_name
|
||||
|
||||
# Version should not be collected since may be instance
|
||||
anatomy_data.update(anatomy_updates)
|
||||
|
||||
resolution_width = instance.data.get("resolutionWidth")
|
||||
if resolution_width:
|
||||
anatomy_data["resolution_width"] = resolution_width
|
||||
|
||||
resolution_height = instance.data.get("resolutionHeight")
|
||||
if resolution_height:
|
||||
anatomy_data["resolution_height"] = resolution_height
|
||||
|
||||
pixel_aspect = instance.data.get("pixelAspect")
|
||||
if pixel_aspect:
|
||||
anatomy_data["pixel_aspect"] = float("{:0.2f}".format(
|
||||
float(pixel_aspect)))
|
||||
|
||||
fps = instance.data.get("fps")
|
||||
if fps:
|
||||
anatomy_data["fps"] = float("{:0.2f}".format(
|
||||
float(fps)))
|
||||
|
||||
instance.data["projectEntity"] = project_entity
|
||||
instance.data["assetEntity"] = asset_entity
|
||||
instance.data["anatomyData"] = anatomy_data
|
||||
instance.data["latestVersion"] = latest_version
|
||||
# TODO should be version number set here?
|
||||
instance.data["version"] = version_number
|
||||
|
||||
self.log.info("Instance anatomy Data collected")
|
||||
self.log.debug(json.dumps(anatomy_data, indent=4))
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
"""
|
||||
Requires:
|
||||
None
|
||||
|
||||
Provides:
|
||||
context -> projectData
|
||||
"""
|
||||
|
||||
import pyblish.api
|
||||
import pype.api as pype
|
||||
|
||||
|
||||
class CollectProjectData(pyblish.api.ContextPlugin):
|
||||
"""Collecting project data from avalon db"""
|
||||
|
||||
label = "Collect Project Data"
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
hosts = ["nukestudio"]
|
||||
|
||||
def process(self, context):
|
||||
# get project data from avalon db
|
||||
context.data["projectData"] = pype.get_project()["data"]
|
||||
|
||||
return
|
||||
94
pype/plugins/global/publish/collect_rendered_files.py
Normal file
94
pype/plugins/global/publish/collect_rendered_files.py
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
from avalon import api
|
||||
|
||||
from pypeapp import PypeLauncher
|
||||
|
||||
|
||||
class CollectRenderedFiles(pyblish.api.ContextPlugin):
|
||||
"""
|
||||
This collector will try to find json files in provided
|
||||
`PYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
|
||||
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder - 0.0001
|
||||
targets = ["filesequence"]
|
||||
label = "Collect rendered frames"
|
||||
|
||||
_context = None
|
||||
|
||||
def _load_json(self, path):
|
||||
assert os.path.isfile(path), ("path to json file doesn't exist")
|
||||
data = None
|
||||
with open(path, "r") as json_file:
|
||||
try:
|
||||
data = json.load(json_file)
|
||||
except Exception as exc:
|
||||
self.log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc)
|
||||
)
|
||||
return data
|
||||
|
||||
def _process_path(self, data):
|
||||
# validate basic necessary data
|
||||
data_err = "invalid json file - missing data"
|
||||
required = ["asset", "user", "comment",
|
||||
"job", "instances", "session", "version"]
|
||||
assert all(elem in data.keys() for elem in required), data_err
|
||||
|
||||
# set context by first json file
|
||||
ctx = self._context.data
|
||||
|
||||
ctx["asset"] = ctx.get("asset") or data.get("asset")
|
||||
ctx["intent"] = ctx.get("intent") or data.get("intent")
|
||||
ctx["comment"] = ctx.get("comment") or data.get("comment")
|
||||
ctx["user"] = ctx.get("user") or data.get("user")
|
||||
ctx["version"] = ctx.get("version") or data.get("version")
|
||||
|
||||
# basic sanity check to see if we are working in same context
|
||||
# if some other json file has different context, bail out.
|
||||
ctx_err = "inconsistent contexts in json files - %s"
|
||||
assert ctx.get("asset") == data.get("asset"), ctx_err % "asset"
|
||||
assert ctx.get("intent") == data.get("intent"), ctx_err % "intent"
|
||||
assert ctx.get("comment") == data.get("comment"), ctx_err % "comment"
|
||||
assert ctx.get("user") == data.get("user"), ctx_err % "user"
|
||||
assert ctx.get("version") == data.get("version"), ctx_err % "version"
|
||||
|
||||
# ftrack credentials are passed as environment variables by Deadline
|
||||
# to publish job, but Muster doesn't pass them.
|
||||
if data.get("ftrack") and not os.environ.get("FTRACK_API_USER"):
|
||||
ftrack = data.get("ftrack")
|
||||
os.environ["FTRACK_API_USER"] = ftrack["FTRACK_API_USER"]
|
||||
os.environ["FTRACK_API_KEY"] = ftrack["FTRACK_API_KEY"]
|
||||
os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"]
|
||||
|
||||
# now we can just add instances from json file and we are done
|
||||
for instance in data.get("instances"):
|
||||
self.log.info(" - processing instance for {}".format(
|
||||
instance.get("subset")))
|
||||
i = self._context.create_instance(instance.get("subset"))
|
||||
self.log.info("remapping paths ...")
|
||||
i.data["representations"] = [PypeLauncher().path_remapper(
|
||||
data=r) for r in instance.get("representations")]
|
||||
i.data.update(instance)
|
||||
|
||||
def process(self, context):
|
||||
self._context = context
|
||||
|
||||
assert os.environ.get("PYPE_PUBLISH_DATA"), (
|
||||
"Missing `PYPE_PUBLISH_DATA`")
|
||||
paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep)
|
||||
|
||||
session_set = False
|
||||
for path in paths:
|
||||
data = self._load_json(path)
|
||||
if not session_set:
|
||||
self.log.info("Setting session using data from file")
|
||||
api.Session.update(data.get("session"))
|
||||
os.environ.update(data.get("session"))
|
||||
session_set = True
|
||||
assert data, "failed to load json file"
|
||||
self._process_path(data)
|
||||
60
pype/plugins/global/publish/collect_resources_path.py
Normal file
60
pype/plugins/global/publish/collect_resources_path.py
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
"""
|
||||
Requires:
|
||||
context -> anatomy
|
||||
context -> anatomyData
|
||||
|
||||
Provides:
|
||||
instance -> publishDir
|
||||
instance -> resourcesDir
|
||||
"""
|
||||
|
||||
import os
|
||||
import copy
|
||||
|
||||
import pyblish.api
|
||||
from avalon import api
|
||||
|
||||
|
||||
class CollectResourcesPath(pyblish.api.InstancePlugin):
|
||||
"""Generate directory path where the files and resources will be stored"""
|
||||
|
||||
label = "Collect Resources Path"
|
||||
order = pyblish.api.CollectorOrder + 0.495
|
||||
|
||||
def process(self, instance):
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
|
||||
template_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
|
||||
# This is for cases of Deprecated anatomy without `folder`
|
||||
# TODO remove when all clients have solved this issue
|
||||
template_data.update({
|
||||
"frame": "FRAME_TEMP",
|
||||
"representation": "TEMP"
|
||||
})
|
||||
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
|
||||
if "folder" in anatomy.templates["publish"]:
|
||||
publish_folder = anatomy_filled["publish"]["folder"]
|
||||
else:
|
||||
# solve deprecated situation when `folder` key is not underneath
|
||||
# `publish` anatomy
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
self.log.warning((
|
||||
"Deprecation warning: Anatomy does not have set `folder`"
|
||||
" key underneath `publish` (in global of for project `{}`)."
|
||||
).format(project_name))
|
||||
|
||||
file_path = anatomy_filled["publish"]["path"]
|
||||
# Directory
|
||||
publish_folder = os.path.dirname(file_path)
|
||||
|
||||
publish_folder = os.path.normpath(publish_folder)
|
||||
resources_folder = os.path.join(publish_folder, "resources")
|
||||
|
||||
instance.data["publishDir"] = publish_folder
|
||||
instance.data["resourcesDir"] = resources_folder
|
||||
|
||||
self.log.debug("publishDir: \"{}\"".format(publish_folder))
|
||||
self.log.debug("resourcesDir: \"{}\"".format(resources_folder))
|
||||
|
|
@ -21,7 +21,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
|
|||
if '<shell>' in filename:
|
||||
return
|
||||
|
||||
rootVersion = pype.get_version_from_path(filename)
|
||||
rootVersion = int(pype.get_version_from_path(filename))
|
||||
context.data['version'] = rootVersion
|
||||
|
||||
self.log.info("{}".format(type(rootVersion)))
|
||||
self.log.info('Scene Version: %s' % context.data.get('version'))
|
||||
|
|
|
|||
|
|
@ -1,113 +0,0 @@
|
|||
"""
|
||||
Requires:
|
||||
session -> AVALON_PROJECT
|
||||
context -> anatomy (pypeapp.Anatomy)
|
||||
instance -> subset
|
||||
instance -> asset
|
||||
instance -> family
|
||||
|
||||
Provides:
|
||||
instance -> template
|
||||
instance -> assumedTemplateData
|
||||
instance -> assumedDestination
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from avalon import io, api
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectTemplates(pyblish.api.InstancePlugin):
|
||||
"""Fill templates with data needed for publish"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
label = "Collect and fill Templates"
|
||||
hosts = ["maya", "nuke", "standalonepublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
# get all the stuff from the database
|
||||
subset_name = instance.data["subset"]
|
||||
asset_name = instance.data["asset"]
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
|
||||
project = io.find_one(
|
||||
{
|
||||
"type": "project",
|
||||
"name": project_name
|
||||
},
|
||||
projection={"config": True, "data": True}
|
||||
)
|
||||
|
||||
template = project["config"]["template"]["publish"]
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
asset = io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name,
|
||||
"parent": project["_id"]
|
||||
})
|
||||
|
||||
assert asset, ("No asset found by the name '{}' "
|
||||
"in project '{}'".format(asset_name, project_name))
|
||||
silo = asset.get('silo')
|
||||
|
||||
subset = io.find_one({
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset["_id"]
|
||||
})
|
||||
|
||||
# assume there is no version yet, we start at `1`
|
||||
version = None
|
||||
version_number = 1
|
||||
if subset is not None:
|
||||
version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
},
|
||||
sort=[("name", -1)]
|
||||
)
|
||||
|
||||
# if there is a subset there ought to be version
|
||||
if version is not None:
|
||||
version_number += int(version["name"])
|
||||
|
||||
hierarchy = asset['data']['parents']
|
||||
if hierarchy:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = os.path.join(*hierarchy)
|
||||
|
||||
template_data = {"root": api.Session["AVALON_PROJECTS"],
|
||||
"project": {"name": project_name,
|
||||
"code": project['data']['code']},
|
||||
"silo": silo,
|
||||
"family": instance.data['family'],
|
||||
"asset": asset_name,
|
||||
"subset": subset_name,
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy.replace("\\", "/"),
|
||||
"representation": "TEMP")}
|
||||
|
||||
resolution_width = instance.data.get("resolutionWidth")
|
||||
resolution_height = instance.data.get("resolutionHeight")
|
||||
fps = instance.data.get("fps")
|
||||
|
||||
if resolution_width:
|
||||
template_data["resolution_width"] = resolution_width
|
||||
if resolution_width:
|
||||
template_data["resolution_height"] = resolution_height
|
||||
if resolution_width:
|
||||
template_data["fps"] = fps
|
||||
|
||||
instance.data["template"] = template
|
||||
instance.data["assumedTemplateData"] = template_data
|
||||
|
||||
# We take the parent folder of representation 'filepath'
|
||||
instance.data["assumedDestination"] = os.path.dirname(
|
||||
(anatomy.format(template_data))["publish"]["path"]
|
||||
)
|
||||
self.log.info("Assumed Destination has been created...")
|
||||
self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"]))
|
||||
self.log.debug("__ template: `{}`".format(instance.data["template"]))
|
||||
|
|
@ -4,7 +4,6 @@ import copy
|
|||
|
||||
import pype.api
|
||||
import pyblish
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
class ExtractBurnin(pype.api.Extractor):
|
||||
|
|
@ -16,7 +15,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
`tags` including `burnin`
|
||||
"""
|
||||
|
||||
label = "Quicktime with burnins"
|
||||
label = "Extract burnins"
|
||||
order = pyblish.api.ExtractorOrder + 0.03
|
||||
families = ["review", "burnin"]
|
||||
hosts = ["nuke", "maya", "shell"]
|
||||
|
|
@ -26,46 +25,38 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
if "representations" not in instance.data:
|
||||
raise RuntimeError("Burnin needs already created mov to work on.")
|
||||
|
||||
version = instance.context.data.get(
|
||||
'version', instance.data.get('version'))
|
||||
context_data = instance.context.data
|
||||
|
||||
version = instance.data.get(
|
||||
'version', instance.context.data.get('version'))
|
||||
frame_start = int(instance.data.get("frameStart") or 0)
|
||||
frame_end = int(instance.data.get("frameEnd") or 1)
|
||||
duration = frame_end - frame_start + 1
|
||||
handle_start = instance.data.get("handleStart",
|
||||
context_data.get("handleStart"))
|
||||
handle_end = instance.data.get("handleEnd",
|
||||
context_data.get("handleEnd"))
|
||||
|
||||
prep_data = {
|
||||
"username": instance.context.data['user'],
|
||||
"asset": os.environ['AVALON_ASSET'],
|
||||
"task": os.environ['AVALON_TASK'],
|
||||
"frame_start": frame_start,
|
||||
"frame_end": frame_end,
|
||||
"duration": duration,
|
||||
"version": int(version),
|
||||
"comment": instance.context.data.get("comment", ""),
|
||||
"intent": instance.context.data.get("intent", "")
|
||||
}
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
duration = frame_end_handle - frame_start_handle + 1
|
||||
|
||||
# Add datetime data to preparation data
|
||||
prep_data.update(config.get_datetime_data())
|
||||
prep_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
|
||||
slate_frame_start = frame_start
|
||||
slate_frame_end = frame_end
|
||||
slate_duration = duration
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
slate_frame_start = frame_start - 1
|
||||
slate_frame_end = frame_end
|
||||
slate_duration = slate_frame_end - slate_frame_start + 1
|
||||
if "slate.farm" in instance.data["families"]:
|
||||
frame_start_handle += 1
|
||||
duration -= 1
|
||||
|
||||
prep_data.update({
|
||||
"slate_frame_start": slate_frame_start,
|
||||
"slate_frame_end": slate_frame_end,
|
||||
"slate_duration": slate_duration
|
||||
"frame_start": frame_start_handle,
|
||||
"frame_end": frame_end_handle,
|
||||
"duration": duration,
|
||||
"version": int(version),
|
||||
"comment": instance.context.data.get("comment", "")
|
||||
})
|
||||
|
||||
# Update data with template data
|
||||
template_data = instance.data.get("assumedTemplateData") or {}
|
||||
prep_data.update(template_data)
|
||||
intent = instance.context.data.get("intent", {}).get("label")
|
||||
if intent:
|
||||
prep_data["intent"] = intent
|
||||
|
||||
# get anatomy project
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
|
@ -77,27 +68,77 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
if "burnin" not in repre.get("tags", []):
|
||||
continue
|
||||
|
||||
is_sequence = "sequence" in repre.get("tags", [])
|
||||
|
||||
# no handles switch from profile tags
|
||||
no_handles = "no-handles" in repre.get("tags", [])
|
||||
|
||||
stagingdir = repre["stagingDir"]
|
||||
filename = "{0}".format(repre["files"])
|
||||
|
||||
if is_sequence:
|
||||
filename = repre["sequence_file"]
|
||||
|
||||
name = "_burnin"
|
||||
ext = os.path.splitext(filename)[1]
|
||||
movieFileBurnin = filename.replace(ext, "") + name + ext
|
||||
|
||||
if is_sequence:
|
||||
fn_splt = filename.split(".")
|
||||
movieFileBurnin = ".".join(
|
||||
((fn_splt[0] + name), fn_splt[-2], fn_splt[-1]))
|
||||
|
||||
self.log.debug("__ movieFileBurnin: `{}`".format(movieFileBurnin))
|
||||
|
||||
full_movie_path = os.path.join(
|
||||
os.path.normpath(stagingdir), repre["files"]
|
||||
)
|
||||
os.path.normpath(stagingdir), filename)
|
||||
full_burnin_path = os.path.join(
|
||||
os.path.normpath(stagingdir), movieFileBurnin
|
||||
)
|
||||
os.path.normpath(stagingdir), movieFileBurnin)
|
||||
|
||||
self.log.debug("__ full_movie_path: {}".format(full_movie_path))
|
||||
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
|
||||
|
||||
# create copy of prep_data for anatomy formatting
|
||||
_prep_data = copy.deepcopy(prep_data)
|
||||
_prep_data["representation"] = repre["name"]
|
||||
_prep_data["anatomy"] = (
|
||||
anatomy.format_all(_prep_data).get("solved") or {}
|
||||
)
|
||||
filled_anatomy = anatomy.format_all(_prep_data)
|
||||
_prep_data["anatomy"] = filled_anatomy.get_solved()
|
||||
|
||||
# copy frame range variables
|
||||
frame_start_cp = frame_start_handle
|
||||
frame_end_cp = frame_end_handle
|
||||
duration_cp = duration
|
||||
|
||||
if no_handles:
|
||||
frame_start_cp = frame_start
|
||||
frame_end_cp = frame_end
|
||||
duration_cp = frame_end_cp - frame_start_cp + 1
|
||||
_prep_data.update({
|
||||
"frame_start": frame_start_cp,
|
||||
"frame_end": frame_end_cp,
|
||||
"duration": duration_cp,
|
||||
})
|
||||
|
||||
# dealing with slates
|
||||
slate_frame_start = frame_start_cp
|
||||
slate_frame_end = frame_end_cp
|
||||
slate_duration = duration_cp
|
||||
|
||||
# exception for slate workflow
|
||||
if ("slate" in instance.data["families"]):
|
||||
if "slate-frame" in repre.get("tags", []):
|
||||
slate_frame_start = frame_start_cp - 1
|
||||
slate_frame_end = frame_end_cp
|
||||
slate_duration = duration_cp + 1
|
||||
|
||||
self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start))
|
||||
|
||||
_prep_data.update({
|
||||
"slate_frame_start": slate_frame_start,
|
||||
"slate_frame_end": slate_frame_end,
|
||||
"slate_duration": slate_duration
|
||||
})
|
||||
|
||||
burnin_data = {
|
||||
"input": full_movie_path.replace("\\", "/"),
|
||||
"codec": repre.get("codec", []),
|
||||
|
|
@ -144,15 +185,35 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_update = {
|
||||
"anatomy_template": "render",
|
||||
"files": movieFileBurnin,
|
||||
"name": repre["name"],
|
||||
"tags": [x for x in repre["tags"] if x != "delete"]
|
||||
}
|
||||
|
||||
if is_sequence:
|
||||
burnin_seq_files = list()
|
||||
for frame_index in range(_prep_data["duration"] + 1):
|
||||
if frame_index == 0:
|
||||
continue
|
||||
burnin_seq_files.append(movieFileBurnin % frame_index)
|
||||
repre_update.update({
|
||||
"files": burnin_seq_files
|
||||
})
|
||||
|
||||
instance.data["representations"][i].update(repre_update)
|
||||
|
||||
# removing the source mov file
|
||||
os.remove(full_movie_path)
|
||||
self.log.debug("Removed: `{}`".format(full_movie_path))
|
||||
if is_sequence:
|
||||
for frame_index in range(_prep_data["duration"] + 1):
|
||||
if frame_index == 0:
|
||||
continue
|
||||
rm_file = full_movie_path % frame_index
|
||||
os.remove(rm_file)
|
||||
self.log.debug("Removed: `{}`".format(rm_file))
|
||||
else:
|
||||
os.remove(full_movie_path)
|
||||
self.log.debug("Removed: `{}`".format(full_movie_path))
|
||||
|
||||
# Remove any representations tagged for deletion.
|
||||
for repre in instance.data["representations"]:
|
||||
|
|
|
|||
|
|
@ -28,29 +28,33 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
entity_type = entity_data["entity_type"]
|
||||
|
||||
data = {}
|
||||
|
||||
data["inputs"] = entity_data.get("inputs", [])
|
||||
data["entityType"] = entity_type
|
||||
|
||||
# Custom attributes.
|
||||
for k, val in entity_data.get("custom_attributes", {}).items():
|
||||
data[k] = val
|
||||
|
||||
# Tasks.
|
||||
tasks = entity_data.get("tasks", [])
|
||||
if tasks is not None or len(tasks) > 0:
|
||||
data["tasks"] = tasks
|
||||
parents = []
|
||||
visualParent = None
|
||||
# do not store project"s id as visualParent (silo asset)
|
||||
if self.project is not None:
|
||||
if self.project["_id"] != parent["_id"]:
|
||||
visualParent = parent["_id"]
|
||||
parents.extend(parent.get("data", {}).get("parents", []))
|
||||
parents.append(parent["name"])
|
||||
data["visualParent"] = visualParent
|
||||
data["parents"] = parents
|
||||
if entity_type.lower() != "project":
|
||||
data["inputs"] = entity_data.get("inputs", [])
|
||||
|
||||
# Tasks.
|
||||
tasks = entity_data.get("tasks", [])
|
||||
if tasks is not None or len(tasks) > 0:
|
||||
data["tasks"] = tasks
|
||||
parents = []
|
||||
visualParent = None
|
||||
# do not store project"s id as visualParent (silo asset)
|
||||
if self.project is not None:
|
||||
if self.project["_id"] != parent["_id"]:
|
||||
visualParent = parent["_id"]
|
||||
parents.extend(
|
||||
parent.get("data", {}).get("parents", [])
|
||||
)
|
||||
parents.append(parent["name"])
|
||||
data["visualParent"] = visualParent
|
||||
data["parents"] = parents
|
||||
|
||||
update_data = True
|
||||
# Process project
|
||||
if entity_type.lower() == "project":
|
||||
entity = io.find_one({"type": "project"})
|
||||
|
|
@ -58,8 +62,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
assert (entity is not None), "Did not find project in DB"
|
||||
|
||||
# get data from already existing project
|
||||
for key, value in entity.get("data", {}).items():
|
||||
data[key] = value
|
||||
cur_entity_data = entity.get("data") or {}
|
||||
cur_entity_data.update(data)
|
||||
data = cur_entity_data
|
||||
|
||||
self.project = entity
|
||||
# Raise error if project or parent are not set
|
||||
|
|
@ -70,16 +75,63 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
# Else process assset
|
||||
else:
|
||||
entity = io.find_one({"type": "asset", "name": name})
|
||||
# Create entity if doesn"t exist
|
||||
if entity is None:
|
||||
entity = self.create_avalon_asset(name, data)
|
||||
if entity:
|
||||
# Do not override data, only update
|
||||
cur_entity_data = entity.get("data") or {}
|
||||
cur_entity_data.update(data)
|
||||
data = cur_entity_data
|
||||
else:
|
||||
# Skip updating data
|
||||
update_data = False
|
||||
|
||||
# Update entity data with input data
|
||||
io.update_many({"_id": entity["_id"]}, {"$set": {"data": data}})
|
||||
archived_entities = io.find({
|
||||
"type": "archived_asset",
|
||||
"name": name
|
||||
})
|
||||
unarchive_entity = None
|
||||
for archived_entity in archived_entities:
|
||||
archived_parents = (
|
||||
archived_entity
|
||||
.get("data", {})
|
||||
.get("parents")
|
||||
)
|
||||
if data["parents"] == archived_parents:
|
||||
unarchive_entity = archived_entity
|
||||
break
|
||||
|
||||
if unarchive_entity is None:
|
||||
# Create entity if doesn"t exist
|
||||
entity = self.create_avalon_asset(name, data)
|
||||
else:
|
||||
# Unarchive if entity was archived
|
||||
entity = self.unarchive_entity(unarchive_entity, data)
|
||||
|
||||
if update_data:
|
||||
# Update entity data with input data
|
||||
io.update_many(
|
||||
{"_id": entity["_id"]},
|
||||
{"$set": {"data": data}}
|
||||
)
|
||||
|
||||
if "childs" in entity_data:
|
||||
self.import_to_avalon(entity_data["childs"], entity)
|
||||
|
||||
def unarchive_entity(self, entity, data):
|
||||
# Unarchived asset should not use same data
|
||||
new_entity = {
|
||||
"_id": entity["_id"],
|
||||
"schema": "avalon-core:asset-3.0",
|
||||
"name": entity["name"],
|
||||
"parent": self.project["_id"],
|
||||
"type": "asset",
|
||||
"data": data
|
||||
}
|
||||
io.replace_one(
|
||||
{"_id": entity["_id"]},
|
||||
new_entity
|
||||
)
|
||||
return new_entity
|
||||
|
||||
def create_avalon_asset(self, name, data):
|
||||
item = {
|
||||
"schema": "avalon-core:asset-3.0",
|
||||
|
|
|
|||
|
|
@ -1,33 +1,20 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
import clique
|
||||
import pype.api
|
||||
import pype.lib
|
||||
|
||||
|
||||
class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
||||
"""Resolve any dependency issies
|
||||
|
||||
This plug-in resolves any paths which, if not updated might break
|
||||
the published file.
|
||||
|
||||
The order of families is important, when working with lookdev you want to
|
||||
first publish the texture, update the texture paths in the nodes and then
|
||||
publish the shading network. Same goes for file dependent assets.
|
||||
"""
|
||||
"""Create jpg thumbnail from sequence using ffmpeg"""
|
||||
|
||||
label = "Extract Jpeg EXR"
|
||||
hosts = ["shell"]
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["imagesequence", "render", "write", "source"]
|
||||
families = ["imagesequence", "render", "render2d", "source"]
|
||||
enabled = False
|
||||
|
||||
def process(self, instance):
|
||||
start = instance.data.get("frameStart")
|
||||
stagingdir = os.path.normpath(instance.data.get("stagingDir"))
|
||||
|
||||
collected_frames = os.listdir(stagingdir)
|
||||
collections, remainder = clique.assemble(collected_frames)
|
||||
|
||||
self.log.info("subset {}".format(instance.data['subset']))
|
||||
if 'crypto' in instance.data['subset']:
|
||||
|
|
@ -40,10 +27,16 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
representations_new = representations[:]
|
||||
|
||||
for repre in representations:
|
||||
tags = repre.get("tags", [])
|
||||
self.log.debug(repre)
|
||||
if 'review' not in repre['tags']:
|
||||
return
|
||||
valid = 'review' in tags or "thumb-nuke" in tags
|
||||
if not valid:
|
||||
continue
|
||||
|
||||
if not isinstance(repre['files'], list):
|
||||
continue
|
||||
|
||||
stagingdir = os.path.normpath(repre.get("stagingDir"))
|
||||
input_file = repre['files'][0]
|
||||
|
||||
# input_file = (
|
||||
|
|
@ -55,8 +48,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
filename = os.path.splitext(input_file)[0]
|
||||
if not filename.endswith('.'):
|
||||
filename += "."
|
||||
jpegFile = filename + "jpg"
|
||||
full_output_path = os.path.join(stagingdir, jpegFile)
|
||||
jpeg_file = filename + "jpg"
|
||||
full_output_path = os.path.join(stagingdir, jpeg_file)
|
||||
|
||||
self.log.info("output {}".format(full_output_path))
|
||||
|
||||
|
|
@ -65,9 +58,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
|
||||
profile = config_data.get(proj_name, config_data['__default__'])
|
||||
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
jpeg_items = []
|
||||
jpeg_items.append(
|
||||
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
|
||||
jpeg_items.append(ffmpeg_path)
|
||||
# override file if already exists
|
||||
jpeg_items.append("-y")
|
||||
# use same input args like with mov
|
||||
|
|
@ -87,9 +81,9 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'jpg',
|
||||
'name': 'thumbnail',
|
||||
'ext': 'jpg',
|
||||
'files': jpegFile,
|
||||
'files': jpeg_file,
|
||||
"stagingDir": stagingdir,
|
||||
"thumbnail": True,
|
||||
"tags": ['thumbnail']
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import pyblish.api
|
||||
import clique
|
||||
import pype.api
|
||||
import pype.lib
|
||||
|
||||
|
||||
class ExtractReview(pyblish.api.InstancePlugin):
|
||||
|
|
@ -11,7 +12,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
otherwise the representation is ignored.
|
||||
|
||||
All new represetnations are created and encoded by ffmpeg following
|
||||
presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension
|
||||
presets found in `pype-config/presets/plugins/global/
|
||||
publish.json:ExtractReview:outputs`. To change the file extension
|
||||
filter values use preset's attributes `ext_filter`
|
||||
"""
|
||||
|
||||
|
|
@ -22,316 +24,413 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
outputs = {}
|
||||
ext_filter = []
|
||||
to_width = 1920
|
||||
to_height = 1080
|
||||
|
||||
def process(self, instance):
|
||||
to_width = 1920
|
||||
to_height = 1080
|
||||
|
||||
output_profiles = self.outputs or {}
|
||||
|
||||
inst_data = instance.data
|
||||
fps = inst_data.get("fps")
|
||||
start_frame = inst_data.get("frameStart")
|
||||
resolution_width = inst_data.get("resolutionWidth", to_width)
|
||||
resolution_height = inst_data.get("resolutionHeight", to_height)
|
||||
context_data = instance.context.data
|
||||
fps = float(inst_data.get("fps"))
|
||||
frame_start = inst_data.get("frameStart")
|
||||
frame_end = inst_data.get("frameEnd")
|
||||
handle_start = inst_data.get("handleStart",
|
||||
context_data.get("handleStart"))
|
||||
handle_end = inst_data.get("handleEnd",
|
||||
context_data.get("handleEnd"))
|
||||
pixel_aspect = inst_data.get("pixelAspect", 1)
|
||||
resolution_width = inst_data.get("resolutionWidth", self.to_width)
|
||||
resolution_height = inst_data.get("resolutionHeight", self.to_height)
|
||||
self.log.debug("Families In: `{}`".format(inst_data["families"]))
|
||||
self.log.debug("__ frame_start: {}".format(frame_start))
|
||||
self.log.debug("__ frame_end: {}".format(frame_end))
|
||||
self.log.debug("__ handle_start: {}".format(handle_start))
|
||||
self.log.debug("__ handle_end: {}".format(handle_end))
|
||||
|
||||
# get representation and loop them
|
||||
representations = inst_data["representations"]
|
||||
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
for repre in representations:
|
||||
if repre['ext'] in self.ext_filter:
|
||||
tags = repre.get("tags", [])
|
||||
|
||||
if "thumbnail" in tags:
|
||||
continue
|
||||
|
||||
self.log.info("Try repre: {}".format(repre))
|
||||
|
||||
if "review" in tags:
|
||||
staging_dir = repre["stagingDir"]
|
||||
for name, profile in output_profiles.items():
|
||||
self.log.debug("Profile name: {}".format(name))
|
||||
|
||||
ext = profile.get("ext", None)
|
||||
if not ext:
|
||||
ext = "mov"
|
||||
self.log.warning(
|
||||
str("`ext` attribute not in output "
|
||||
"profile. Setting to default ext: `mov`"))
|
||||
|
||||
self.log.debug(
|
||||
"instance.families: {}".format(
|
||||
instance.data['families']))
|
||||
self.log.debug(
|
||||
"profile.families: {}".format(profile['families']))
|
||||
|
||||
if any(item in instance.data['families'] for item in profile['families']):
|
||||
if isinstance(repre["files"], list):
|
||||
collections, remainder = clique.assemble(
|
||||
repre["files"])
|
||||
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, collections[0].format(
|
||||
'{head}{padding}{tail}')
|
||||
)
|
||||
|
||||
filename = collections[0].format('{head}')
|
||||
if filename.endswith('.'):
|
||||
filename = filename[:-1]
|
||||
else:
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, repre["files"])
|
||||
filename = repre["files"].split(".")[0]
|
||||
|
||||
repr_file = filename + "_{0}.{1}".format(name, ext)
|
||||
|
||||
full_output_path = os.path.join(
|
||||
staging_dir, repr_file)
|
||||
|
||||
self.log.info("input {}".format(full_input_path))
|
||||
self.log.info("output {}".format(full_output_path))
|
||||
|
||||
repre_new = repre.copy()
|
||||
|
||||
new_tags = [x for x in tags if x != "delete"]
|
||||
p_tags = profile.get('tags', [])
|
||||
self.log.info("p_tags: `{}`".format(p_tags))
|
||||
# add families
|
||||
[instance.data["families"].append(t)
|
||||
for t in p_tags
|
||||
if t not in instance.data["families"]]
|
||||
|
||||
# add to
|
||||
[new_tags.append(t) for t in p_tags
|
||||
if t not in new_tags]
|
||||
|
||||
self.log.info("new_tags: `{}`".format(new_tags))
|
||||
|
||||
input_args = []
|
||||
|
||||
# overrides output file
|
||||
input_args.append("-y")
|
||||
|
||||
# preset's input data
|
||||
input_args.extend(profile.get('input', []))
|
||||
|
||||
# necessary input data
|
||||
# adds start arg only if image sequence
|
||||
if isinstance(repre["files"], list):
|
||||
input_args.append(
|
||||
"-start_number {0} -framerate {1}".format(
|
||||
start_frame, fps))
|
||||
|
||||
input_args.append("-i {}".format(full_input_path))
|
||||
|
||||
for audio in instance.data.get("audio", []):
|
||||
offset_frames = (
|
||||
instance.data.get("startFrameReview") -
|
||||
audio["offset"]
|
||||
)
|
||||
offset_seconds = offset_frames / fps
|
||||
|
||||
if offset_seconds > 0:
|
||||
input_args.append("-ss")
|
||||
else:
|
||||
input_args.append("-itsoffset")
|
||||
|
||||
input_args.append(str(abs(offset_seconds)))
|
||||
|
||||
input_args.extend(
|
||||
["-i", audio["filename"]]
|
||||
)
|
||||
|
||||
# Need to merge audio if there are more
|
||||
# than 1 input.
|
||||
if len(instance.data["audio"]) > 1:
|
||||
input_args.extend(
|
||||
[
|
||||
"-filter_complex",
|
||||
"amerge",
|
||||
"-ac",
|
||||
"2"
|
||||
]
|
||||
)
|
||||
|
||||
output_args = []
|
||||
codec_args = profile.get('codec', [])
|
||||
output_args.extend(codec_args)
|
||||
# preset's output data
|
||||
output_args.extend(profile.get('output', []))
|
||||
|
||||
# defining image ratios
|
||||
resolution_ratio = float(resolution_width / (
|
||||
resolution_height * pixel_aspect))
|
||||
delivery_ratio = float(to_width) / float(to_height)
|
||||
self.log.debug(resolution_ratio)
|
||||
self.log.debug(delivery_ratio)
|
||||
|
||||
# get scale factor
|
||||
scale_factor = to_height / (
|
||||
resolution_height * pixel_aspect)
|
||||
self.log.debug(scale_factor)
|
||||
|
||||
# letter_box
|
||||
lb = profile.get('letter_box', 0)
|
||||
if lb != 0:
|
||||
ffmpet_width = to_width
|
||||
ffmpet_height = to_height
|
||||
if "reformat" not in p_tags:
|
||||
lb /= pixel_aspect
|
||||
if resolution_ratio != delivery_ratio:
|
||||
ffmpet_width = resolution_width
|
||||
ffmpet_height = int(
|
||||
resolution_height * pixel_aspect)
|
||||
else:
|
||||
if resolution_ratio != delivery_ratio:
|
||||
lb /= scale_factor
|
||||
else:
|
||||
lb /= pixel_aspect
|
||||
|
||||
output_args.append(str(
|
||||
"-filter:v scale={0}x{1}:flags=lanczos,"
|
||||
"setsar=1,drawbox=0:0:iw:"
|
||||
"round((ih-(iw*(1/{2})))/2):t=fill:"
|
||||
"c=black,drawbox=0:ih-round((ih-(iw*("
|
||||
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
|
||||
"/2):t=fill:c=black").format(
|
||||
ffmpet_width, ffmpet_height, lb))
|
||||
|
||||
# In case audio is longer than video.
|
||||
output_args.append("-shortest")
|
||||
|
||||
# output filename
|
||||
output_args.append(full_output_path)
|
||||
|
||||
self.log.debug(
|
||||
"__ pixel_aspect: `{}`".format(pixel_aspect))
|
||||
self.log.debug(
|
||||
"__ resolution_width: `{}`".format(
|
||||
resolution_width))
|
||||
self.log.debug(
|
||||
"__ resolution_height: `{}`".format(
|
||||
resolution_height))
|
||||
|
||||
# scaling none square pixels and 1920 width
|
||||
if "reformat" in p_tags:
|
||||
if resolution_ratio < delivery_ratio:
|
||||
self.log.debug("lower then delivery")
|
||||
width_scale = int(to_width * scale_factor)
|
||||
width_half_pad = int((
|
||||
to_width - width_scale)/2)
|
||||
height_scale = to_height
|
||||
height_half_pad = 0
|
||||
else:
|
||||
self.log.debug("heigher then delivery")
|
||||
width_scale = to_width
|
||||
width_half_pad = 0
|
||||
scale_factor = float(to_width) / float(
|
||||
resolution_width)
|
||||
self.log.debug(scale_factor)
|
||||
height_scale = int(
|
||||
resolution_height * scale_factor)
|
||||
height_half_pad = int(
|
||||
(to_height - height_scale)/2)
|
||||
|
||||
self.log.debug(
|
||||
"__ width_scale: `{}`".format(width_scale))
|
||||
self.log.debug(
|
||||
"__ width_half_pad: `{}`".format(
|
||||
width_half_pad))
|
||||
self.log.debug(
|
||||
"__ height_scale: `{}`".format(
|
||||
height_scale))
|
||||
self.log.debug(
|
||||
"__ height_half_pad: `{}`".format(
|
||||
height_half_pad))
|
||||
|
||||
scaling_arg = str(
|
||||
"scale={0}x{1}:flags=lanczos,"
|
||||
"pad={2}:{3}:{4}:{5}:black,setsar=1"
|
||||
).format(width_scale, height_scale,
|
||||
to_width, to_height,
|
||||
width_half_pad,
|
||||
height_half_pad
|
||||
)
|
||||
|
||||
vf_back = self.add_video_filter_args(
|
||||
output_args, scaling_arg)
|
||||
# add it to output_args
|
||||
output_args.insert(0, vf_back)
|
||||
|
||||
# baking lut file application
|
||||
lut_path = instance.data.get("lutPath")
|
||||
if lut_path and ("bake-lut" in p_tags):
|
||||
# removing Gama info as it is all baked in lut
|
||||
gamma = next((g for g in input_args
|
||||
if "-gamma" in g), None)
|
||||
if gamma:
|
||||
input_args.remove(gamma)
|
||||
|
||||
# create lut argument
|
||||
lut_arg = "lut3d=file='{}'".format(
|
||||
lut_path.replace(
|
||||
"\\", "/").replace(":/", "\\:/")
|
||||
)
|
||||
lut_arg += ",colormatrix=bt601:bt709"
|
||||
|
||||
vf_back = self.add_video_filter_args(
|
||||
output_args, lut_arg)
|
||||
# add it to output_args
|
||||
output_args.insert(0, vf_back)
|
||||
self.log.info("Added Lut to ffmpeg command")
|
||||
self.log.debug(
|
||||
"_ output_args: `{}`".format(output_args))
|
||||
|
||||
mov_args = [
|
||||
os.path.join(
|
||||
os.environ.get(
|
||||
"FFMPEG_PATH",
|
||||
""), "ffmpeg"),
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
subprcs_cmd = " ".join(mov_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = pype.api.subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
# create representation data
|
||||
repre_new.update({
|
||||
'name': name,
|
||||
'ext': ext,
|
||||
'files': repr_file,
|
||||
"tags": new_tags,
|
||||
"outputName": name,
|
||||
"codec": codec_args,
|
||||
"_profile": profile,
|
||||
"resolutionHeight": resolution_height,
|
||||
"resolutionWidth": resolution_width,
|
||||
})
|
||||
if repre_new.get('preview'):
|
||||
repre_new.pop("preview")
|
||||
if repre_new.get('thumbnail'):
|
||||
repre_new.pop("thumbnail")
|
||||
|
||||
# adding representation
|
||||
self.log.debug("Adding: {}".format(repre_new))
|
||||
representations_new.append(repre_new)
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
if repre['ext'] not in self.ext_filter:
|
||||
continue
|
||||
|
||||
tags = repre.get("tags", [])
|
||||
|
||||
if "thumbnail" in tags:
|
||||
continue
|
||||
|
||||
self.log.info("Try repre: {}".format(repre))
|
||||
|
||||
if "review" not in tags:
|
||||
continue
|
||||
|
||||
staging_dir = repre["stagingDir"]
|
||||
|
||||
# iterating preset output profiles
|
||||
for name, profile in output_profiles.items():
|
||||
repre_new = repre.copy()
|
||||
ext = profile.get("ext", None)
|
||||
p_tags = profile.get('tags', [])
|
||||
self.log.info("p_tags: `{}`".format(p_tags))
|
||||
|
||||
# adding control for presets to be sequence
|
||||
# or single file
|
||||
is_sequence = ("sequence" in p_tags) and (ext in (
|
||||
"png", "jpg", "jpeg"))
|
||||
|
||||
# no handles switch from profile tags
|
||||
no_handles = "no-handles" in p_tags
|
||||
|
||||
self.log.debug("Profile name: {}".format(name))
|
||||
|
||||
if not ext:
|
||||
ext = "mov"
|
||||
self.log.warning(
|
||||
str("`ext` attribute not in output "
|
||||
"profile. Setting to default ext: `mov`"))
|
||||
|
||||
self.log.debug(
|
||||
"instance.families: {}".format(
|
||||
instance.data['families']))
|
||||
self.log.debug(
|
||||
"profile.families: {}".format(profile['families']))
|
||||
|
||||
profile_family_check = False
|
||||
for _family in profile['families']:
|
||||
if _family in instance.data['families']:
|
||||
profile_family_check = True
|
||||
break
|
||||
|
||||
if not profile_family_check:
|
||||
continue
|
||||
|
||||
if isinstance(repre["files"], list):
|
||||
collections, remainder = clique.assemble(
|
||||
repre["files"])
|
||||
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, collections[0].format(
|
||||
'{head}{padding}{tail}')
|
||||
)
|
||||
|
||||
filename = collections[0].format('{head}')
|
||||
if filename.endswith('.'):
|
||||
filename = filename[:-1]
|
||||
else:
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, repre["files"])
|
||||
filename = repre["files"].split(".")[0]
|
||||
|
||||
repr_file = filename + "_{0}.{1}".format(name, ext)
|
||||
full_output_path = os.path.join(
|
||||
staging_dir, repr_file)
|
||||
|
||||
if is_sequence:
|
||||
filename_base = filename + "_{0}".format(name)
|
||||
repr_file = filename_base + ".%08d.{0}".format(
|
||||
ext)
|
||||
repre_new["sequence_file"] = repr_file
|
||||
full_output_path = os.path.join(
|
||||
staging_dir, filename_base, repr_file)
|
||||
|
||||
self.log.info("input {}".format(full_input_path))
|
||||
self.log.info("output {}".format(full_output_path))
|
||||
|
||||
new_tags = [x for x in tags if x != "delete"]
|
||||
|
||||
# add families
|
||||
[instance.data["families"].append(t)
|
||||
for t in p_tags
|
||||
if t not in instance.data["families"]]
|
||||
|
||||
# add to
|
||||
[new_tags.append(t) for t in p_tags
|
||||
if t not in new_tags]
|
||||
|
||||
self.log.info("new_tags: `{}`".format(new_tags))
|
||||
|
||||
input_args = []
|
||||
output_args = []
|
||||
|
||||
# overrides output file
|
||||
input_args.append("-y")
|
||||
|
||||
# preset's input data
|
||||
input_args.extend(profile.get('input', []))
|
||||
|
||||
# necessary input data
|
||||
# adds start arg only if image sequence
|
||||
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
if isinstance(repre["files"], list):
|
||||
if frame_start_handle != repre.get("detectedStart", frame_start_handle):
|
||||
frame_start_handle = repre.get("detectedStart")
|
||||
|
||||
# exclude handle if no handles defined
|
||||
if no_handles:
|
||||
frame_start_handle = frame_start
|
||||
frame_end_handle = frame_end
|
||||
|
||||
input_args.append(
|
||||
"-start_number {0} -framerate {1}".format(
|
||||
frame_start_handle, fps))
|
||||
else:
|
||||
if no_handles:
|
||||
start_sec = float(handle_start) / fps
|
||||
input_args.append("-ss {:0.2f}".format(start_sec))
|
||||
frame_start_handle = frame_start
|
||||
frame_end_handle = frame_end
|
||||
|
||||
input_args.append("-i {}".format(full_input_path))
|
||||
|
||||
for audio in instance.data.get("audio", []):
|
||||
offset_frames = (
|
||||
instance.data.get("frameStartFtrack") -
|
||||
audio["offset"]
|
||||
)
|
||||
offset_seconds = offset_frames / fps
|
||||
|
||||
if offset_seconds > 0:
|
||||
input_args.append("-ss")
|
||||
else:
|
||||
input_args.append("-itsoffset")
|
||||
|
||||
input_args.append(str(abs(offset_seconds)))
|
||||
|
||||
input_args.extend(
|
||||
["-i", audio["filename"]]
|
||||
)
|
||||
|
||||
# Need to merge audio if there are more
|
||||
# than 1 input.
|
||||
if len(instance.data["audio"]) > 1:
|
||||
input_args.extend(
|
||||
[
|
||||
"-filter_complex",
|
||||
"amerge",
|
||||
"-ac",
|
||||
"2"
|
||||
]
|
||||
)
|
||||
|
||||
codec_args = profile.get('codec', [])
|
||||
output_args.extend(codec_args)
|
||||
# preset's output data
|
||||
output_args.extend(profile.get('output', []))
|
||||
|
||||
# defining image ratios
|
||||
resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height
|
||||
delivery_ratio = float(self.to_width) / float(self.to_height)
|
||||
self.log.debug(
|
||||
"__ resolution_ratio: `{}`".format(resolution_ratio))
|
||||
self.log.debug(
|
||||
"__ delivery_ratio: `{}`".format(delivery_ratio))
|
||||
|
||||
# get scale factor
|
||||
scale_factor = float(self.to_height) / (
|
||||
resolution_height * pixel_aspect)
|
||||
|
||||
# shorten two decimals long float number for testing conditions
|
||||
resolution_ratio_test = float(
|
||||
"{:0.2f}".format(resolution_ratio))
|
||||
delivery_ratio_test = float(
|
||||
"{:0.2f}".format(delivery_ratio))
|
||||
|
||||
if resolution_ratio_test < delivery_ratio_test:
|
||||
scale_factor = float(self.to_width) / (
|
||||
resolution_width * pixel_aspect)
|
||||
|
||||
self.log.debug("__ scale_factor: `{}`".format(scale_factor))
|
||||
|
||||
# letter_box
|
||||
lb = profile.get('letter_box', 0)
|
||||
if lb != 0:
|
||||
ffmpeg_width = self.to_width
|
||||
ffmpeg_height = self.to_height
|
||||
if "reformat" not in p_tags:
|
||||
lb /= pixel_aspect
|
||||
if resolution_ratio_test != delivery_ratio_test:
|
||||
ffmpeg_width = resolution_width
|
||||
ffmpeg_height = int(
|
||||
resolution_height * pixel_aspect)
|
||||
else:
|
||||
if resolution_ratio_test != delivery_ratio_test:
|
||||
lb /= scale_factor
|
||||
else:
|
||||
lb /= pixel_aspect
|
||||
|
||||
output_args.append(str(
|
||||
"-filter:v scale={0}x{1}:flags=lanczos,"
|
||||
"setsar=1,drawbox=0:0:iw:"
|
||||
"round((ih-(iw*(1/{2})))/2):t=fill:"
|
||||
"c=black,drawbox=0:ih-round((ih-(iw*("
|
||||
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
|
||||
"/2):t=fill:c=black").format(
|
||||
ffmpeg_width, ffmpeg_height, lb))
|
||||
|
||||
# In case audio is longer than video.
|
||||
output_args.append("-shortest")
|
||||
|
||||
if no_handles:
|
||||
duration_sec = float(frame_end_handle - frame_start_handle + 1) / fps
|
||||
|
||||
output_args.append("-t {:0.2f}".format(duration_sec))
|
||||
|
||||
# output filename
|
||||
output_args.append(full_output_path)
|
||||
|
||||
self.log.debug(
|
||||
"__ pixel_aspect: `{}`".format(pixel_aspect))
|
||||
self.log.debug(
|
||||
"__ resolution_width: `{}`".format(
|
||||
resolution_width))
|
||||
self.log.debug(
|
||||
"__ resolution_height: `{}`".format(
|
||||
resolution_height))
|
||||
|
||||
# scaling none square pixels and 1920 width
|
||||
if "reformat" in p_tags:
|
||||
if resolution_ratio_test < delivery_ratio_test:
|
||||
self.log.debug("lower then delivery")
|
||||
width_scale = int(self.to_width * scale_factor)
|
||||
width_half_pad = int((
|
||||
self.to_width - width_scale)/2)
|
||||
height_scale = self.to_height
|
||||
height_half_pad = 0
|
||||
else:
|
||||
self.log.debug("heigher then delivery")
|
||||
width_scale = self.to_width
|
||||
width_half_pad = 0
|
||||
scale_factor = float(self.to_width) / (float(
|
||||
resolution_width) * pixel_aspect)
|
||||
self.log.debug(
|
||||
"__ scale_factor: `{}`".format(
|
||||
scale_factor))
|
||||
height_scale = int(
|
||||
resolution_height * scale_factor)
|
||||
height_half_pad = int(
|
||||
(self.to_height - height_scale)/2)
|
||||
|
||||
self.log.debug(
|
||||
"__ width_scale: `{}`".format(width_scale))
|
||||
self.log.debug(
|
||||
"__ width_half_pad: `{}`".format(
|
||||
width_half_pad))
|
||||
self.log.debug(
|
||||
"__ height_scale: `{}`".format(
|
||||
height_scale))
|
||||
self.log.debug(
|
||||
"__ height_half_pad: `{}`".format(
|
||||
height_half_pad))
|
||||
|
||||
scaling_arg = str(
|
||||
"scale={0}x{1}:flags=lanczos,"
|
||||
"pad={2}:{3}:{4}:{5}:black,setsar=1"
|
||||
).format(width_scale, height_scale,
|
||||
self.to_width, self.to_height,
|
||||
width_half_pad,
|
||||
height_half_pad
|
||||
)
|
||||
|
||||
vf_back = self.add_video_filter_args(
|
||||
output_args, scaling_arg)
|
||||
# add it to output_args
|
||||
output_args.insert(0, vf_back)
|
||||
|
||||
# baking lut file application
|
||||
lut_path = instance.data.get("lutPath")
|
||||
if lut_path and ("bake-lut" in p_tags):
|
||||
# removing Gama info as it is all baked in lut
|
||||
gamma = next((g for g in input_args
|
||||
if "-gamma" in g), None)
|
||||
if gamma:
|
||||
input_args.remove(gamma)
|
||||
|
||||
# create lut argument
|
||||
lut_arg = "lut3d=file='{}'".format(
|
||||
lut_path.replace(
|
||||
"\\", "/").replace(":/", "\\:/")
|
||||
)
|
||||
lut_arg += ",colormatrix=bt601:bt709"
|
||||
|
||||
vf_back = self.add_video_filter_args(
|
||||
output_args, lut_arg)
|
||||
# add it to output_args
|
||||
output_args.insert(0, vf_back)
|
||||
self.log.info("Added Lut to ffmpeg command")
|
||||
self.log.debug(
|
||||
"_ output_args: `{}`".format(output_args))
|
||||
|
||||
if is_sequence:
|
||||
stg_dir = os.path.dirname(full_output_path)
|
||||
|
||||
if not os.path.exists(stg_dir):
|
||||
self.log.debug(
|
||||
"creating dir: {}".format(stg_dir))
|
||||
os.mkdir(stg_dir)
|
||||
|
||||
mov_args = [
|
||||
ffmpeg_path,
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
subprcs_cmd = " ".join(mov_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = pype.api.subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
# create representation data
|
||||
repre_new.update({
|
||||
'name': name,
|
||||
'ext': ext,
|
||||
'files': repr_file,
|
||||
"tags": new_tags,
|
||||
"outputName": name,
|
||||
"codec": codec_args,
|
||||
"_profile": profile,
|
||||
"resolutionHeight": resolution_height,
|
||||
"resolutionWidth": resolution_width,
|
||||
"frameStartFtrack": frame_start_handle,
|
||||
"frameEndFtrack": frame_end_handle
|
||||
})
|
||||
if is_sequence:
|
||||
repre_new.update({
|
||||
"stagingDir": stg_dir,
|
||||
"files": os.listdir(stg_dir)
|
||||
})
|
||||
if no_handles:
|
||||
repre_new.update({
|
||||
"outputName": name + "_noHandles",
|
||||
"frameStartFtrack": frame_start,
|
||||
"frameEndFtrack": frame_end
|
||||
})
|
||||
if repre_new.get('preview'):
|
||||
repre_new.pop("preview")
|
||||
if repre_new.get('thumbnail'):
|
||||
repre_new.pop("thumbnail")
|
||||
|
||||
# adding representation
|
||||
self.log.debug("Adding: {}".format(repre_new))
|
||||
representations_new.append(repre_new)
|
||||
|
||||
for repre in representations_new:
|
||||
if "delete" in repre.get("tags", []):
|
||||
representations_new.remove(repre)
|
||||
|
||||
instance.data.update({
|
||||
"reviewToWidth": self.to_width,
|
||||
"reviewToHeight": self.to_height
|
||||
})
|
||||
|
||||
self.log.debug(
|
||||
"new representations: {}".format(representations_new))
|
||||
instance.data["representations"] = representations_new
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import pype.api
|
||||
import pype.lib
|
||||
import pyblish
|
||||
|
||||
|
||||
|
|
@ -21,26 +22,38 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
|
||||
suffix = "_slate"
|
||||
slate_path = inst_data.get("slateFrame")
|
||||
ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg")
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
# values are set in ExtractReview
|
||||
to_width = inst_data["reviewToWidth"]
|
||||
to_height = inst_data["reviewToHeight"]
|
||||
|
||||
to_width = 1920
|
||||
to_height = 1080
|
||||
resolution_width = inst_data.get("resolutionWidth", to_width)
|
||||
resolution_height = inst_data.get("resolutionHeight", to_height)
|
||||
pixel_aspect = inst_data.get("pixelAspect", 1)
|
||||
fps = inst_data.get("fps")
|
||||
|
||||
# defining image ratios
|
||||
resolution_ratio = float(resolution_width / (
|
||||
resolution_height * pixel_aspect))
|
||||
resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height
|
||||
delivery_ratio = float(to_width) / float(to_height)
|
||||
self.log.debug(resolution_ratio)
|
||||
self.log.debug(delivery_ratio)
|
||||
self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio))
|
||||
self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio))
|
||||
|
||||
# get scale factor
|
||||
scale_factor = to_height / (
|
||||
scale_factor = float(to_height) / (
|
||||
resolution_height * pixel_aspect)
|
||||
self.log.debug(scale_factor)
|
||||
|
||||
# shorten two decimals long float number for testing conditions
|
||||
resolution_ratio_test = float(
|
||||
"{:0.2f}".format(resolution_ratio))
|
||||
delivery_ratio_test = float(
|
||||
"{:0.2f}".format(delivery_ratio))
|
||||
|
||||
if resolution_ratio_test < delivery_ratio_test:
|
||||
scale_factor = float(to_width) / (
|
||||
resolution_width * pixel_aspect)
|
||||
|
||||
self.log.debug("__ scale_factor: `{}`".format(scale_factor))
|
||||
|
||||
for i, repre in enumerate(inst_data["representations"]):
|
||||
_remove_at_end = []
|
||||
|
|
@ -94,7 +107,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
|
||||
# scaling none square pixels and 1920 width
|
||||
if "reformat" in p_tags:
|
||||
if resolution_ratio < delivery_ratio:
|
||||
if resolution_ratio_test < delivery_ratio_test:
|
||||
self.log.debug("lower then delivery")
|
||||
width_scale = int(to_width * scale_factor)
|
||||
width_half_pad = int((
|
||||
|
|
@ -105,7 +118,8 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
self.log.debug("heigher then delivery")
|
||||
width_scale = to_width
|
||||
width_half_pad = 0
|
||||
scale_factor = float(to_width) / float(resolution_width)
|
||||
scale_factor = float(to_width) / (float(
|
||||
resolution_width) * pixel_aspect)
|
||||
self.log.debug(scale_factor)
|
||||
height_scale = int(
|
||||
resolution_height * scale_factor)
|
||||
|
|
|
|||
|
|
@ -1,417 +0,0 @@
|
|||
import os
|
||||
import logging
|
||||
import shutil
|
||||
|
||||
import errno
|
||||
import pyblish.api
|
||||
from avalon import api, io
|
||||
from avalon.vendor import filelink
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IntegrateAsset(pyblish.api.InstancePlugin):
|
||||
"""Resolve any dependency issies
|
||||
|
||||
This plug-in resolves any paths which, if not updated might break
|
||||
the published file.
|
||||
|
||||
The order of families is important, when working with lookdev you want to
|
||||
first publish the texture, update the texture paths in the nodes and then
|
||||
publish the shading network. Same goes for file dependent assets.
|
||||
"""
|
||||
|
||||
label = "Integrate Asset"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
families = ["assembly"]
|
||||
exclude_families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
if [ef for ef in self.exclude_families
|
||||
if instance.data["family"] in ef]:
|
||||
return
|
||||
|
||||
self.register(instance)
|
||||
|
||||
self.log.info("Integrating Asset in to the database ...")
|
||||
if instance.data.get('transfer', True):
|
||||
self.integrate(instance)
|
||||
|
||||
def register(self, instance):
|
||||
# Required environment variables
|
||||
PROJECT = api.Session["AVALON_PROJECT"]
|
||||
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
|
||||
LOCATION = api.Session["AVALON_LOCATION"]
|
||||
|
||||
context = instance.context
|
||||
# Atomicity
|
||||
#
|
||||
# Guarantee atomic publishes - each asset contains
|
||||
# an identical set of members.
|
||||
# __
|
||||
# / o
|
||||
# / \
|
||||
# | o |
|
||||
# \ /
|
||||
# o __/
|
||||
#
|
||||
assert all(result["success"] for result in context.data["results"]), (
|
||||
"Atomicity not held, aborting.")
|
||||
|
||||
# Assemble
|
||||
#
|
||||
# |
|
||||
# v
|
||||
# ---> <----
|
||||
# ^
|
||||
# |
|
||||
#
|
||||
stagingdir = instance.data.get("stagingDir")
|
||||
assert stagingdir, ("Incomplete instance \"%s\": "
|
||||
"Missing reference to staging area." % instance)
|
||||
|
||||
# extra check if stagingDir actually exists and is available
|
||||
|
||||
self.log.debug("Establishing staging directory @ %s" % stagingdir)
|
||||
|
||||
# Ensure at least one file is set up for transfer in staging dir.
|
||||
files = instance.data.get("files", [])
|
||||
assert files, "Instance has no files to transfer"
|
||||
assert isinstance(files, (list, tuple)), (
|
||||
"Instance 'files' must be a list, got: {0}".format(files)
|
||||
)
|
||||
|
||||
project = io.find_one({"type": "project"})
|
||||
|
||||
asset = io.find_one({
|
||||
"type": "asset",
|
||||
"name": ASSET,
|
||||
"parent": project["_id"]
|
||||
})
|
||||
|
||||
assert all([project, asset]), ("Could not find current project or "
|
||||
"asset '%s'" % ASSET)
|
||||
|
||||
subset = self.get_subset(asset, instance)
|
||||
|
||||
# get next version
|
||||
latest_version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
},
|
||||
{"name": True},
|
||||
sort=[("name", -1)]
|
||||
)
|
||||
|
||||
next_version = 1
|
||||
if latest_version is not None:
|
||||
next_version += latest_version["name"]
|
||||
|
||||
self.log.info("Verifying version from assumed destination")
|
||||
|
||||
assumed_data = instance.data["assumedTemplateData"]
|
||||
assumed_version = assumed_data["version"]
|
||||
if assumed_version != next_version:
|
||||
raise AttributeError("Assumed version 'v{0:03d}' does not match"
|
||||
"next version in database "
|
||||
"('v{1:03d}')".format(assumed_version,
|
||||
next_version))
|
||||
|
||||
self.log.debug("Next version: v{0:03d}".format(next_version))
|
||||
|
||||
version_data = self.create_version_data(context, instance)
|
||||
version = self.create_version(subset=subset,
|
||||
version_number=next_version,
|
||||
locations=[LOCATION],
|
||||
data=version_data)
|
||||
|
||||
self.log.debug("Creating version ...")
|
||||
version_id = io.insert_one(version).inserted_id
|
||||
|
||||
# Write to disk
|
||||
# _
|
||||
# | |
|
||||
# _| |_
|
||||
# ____\ /
|
||||
# |\ \ / \
|
||||
# \ \ v \
|
||||
# \ \________.
|
||||
# \|________|
|
||||
#
|
||||
root = api.registered_root()
|
||||
hierarchy = ""
|
||||
parents = io.find_one({
|
||||
"type": 'asset',
|
||||
"name": ASSET
|
||||
})['data']['parents']
|
||||
if parents and len(parents) > 0:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = os.path.join(*parents)
|
||||
|
||||
template_data = {"root": root,
|
||||
"project": {"name": PROJECT,
|
||||
"code": project['data']['code']},
|
||||
"silo": asset['silo'],
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": int(version["name"]),
|
||||
"hierarchy": hierarchy}
|
||||
|
||||
# template_publish = project["config"]["template"]["publish"]
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
# Find the representations to transfer amongst the files
|
||||
# Each should be a single representation (as such, a single extension)
|
||||
representations = []
|
||||
destination_list = []
|
||||
if 'transfers' not in instance.data:
|
||||
instance.data['transfers'] = []
|
||||
|
||||
for files in instance.data["files"]:
|
||||
|
||||
# Collection
|
||||
# _______
|
||||
# |______|\
|
||||
# | |\|
|
||||
# | ||
|
||||
# | ||
|
||||
# | ||
|
||||
# |_______|
|
||||
#
|
||||
|
||||
if isinstance(files, list):
|
||||
collection = files
|
||||
# Assert that each member has identical suffix
|
||||
_, ext = os.path.splitext(collection[0])
|
||||
assert all(ext == os.path.splitext(name)[1]
|
||||
for name in collection), (
|
||||
"Files had varying suffixes, this is a bug"
|
||||
)
|
||||
|
||||
assert not any(os.path.isabs(name) for name in collection)
|
||||
|
||||
template_data["representation"] = ext[1:]
|
||||
|
||||
for fname in collection:
|
||||
|
||||
src = os.path.join(stagingdir, fname)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
dst = anatomy_filled["publish"]["path"]
|
||||
|
||||
instance.data["transfers"].append([src, dst])
|
||||
template = anatomy.templates["publish"]["path"]
|
||||
|
||||
else:
|
||||
# Single file
|
||||
# _______
|
||||
# | |\
|
||||
# | |
|
||||
# | |
|
||||
# | |
|
||||
# |_______|
|
||||
#
|
||||
fname = files
|
||||
assert not os.path.isabs(fname), (
|
||||
"Given file name is a full path"
|
||||
)
|
||||
_, ext = os.path.splitext(fname)
|
||||
|
||||
template_data["representation"] = ext[1:]
|
||||
|
||||
src = os.path.join(stagingdir, fname)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
dst = anatomy_filled["publish"]["path"]
|
||||
|
||||
instance.data["transfers"].append([src, dst])
|
||||
template = anatomy.templates["publish"]["path"]
|
||||
|
||||
representation = {
|
||||
"schema": "pype:representation-2.0",
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": ext[1:],
|
||||
"data": {'path': dst, 'template': template},
|
||||
"dependencies": instance.data.get("dependencies", "").split(),
|
||||
|
||||
# Imprint shortcut to context
|
||||
# for performance reasons.
|
||||
"context": {
|
||||
"root": root,
|
||||
"project": {"name": PROJECT,
|
||||
"code": project['data']['code']},
|
||||
'task': api.Session["AVALON_TASK"],
|
||||
"silo": asset['silo'],
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": version["name"],
|
||||
"hierarchy": hierarchy,
|
||||
"representation": ext[1:]
|
||||
}
|
||||
}
|
||||
|
||||
destination_list.append(dst)
|
||||
instance.data['destination_list'] = destination_list
|
||||
representations.append(representation)
|
||||
|
||||
self.log.info("Registering {} items".format(len(representations)))
|
||||
|
||||
io.insert_many(representations)
|
||||
|
||||
def integrate(self, instance):
|
||||
"""Move the files
|
||||
|
||||
Through `instance.data["transfers"]`
|
||||
|
||||
Args:
|
||||
instance: the instance to integrate
|
||||
"""
|
||||
|
||||
transfers = instance.data.get("transfers", list())
|
||||
|
||||
for src, dest in transfers:
|
||||
self.log.info("Copying file .. {} -> {}".format(src, dest))
|
||||
self.copy_file(src, dest)
|
||||
|
||||
# Produce hardlinked copies
|
||||
# Note: hardlink can only be produced between two files on the same
|
||||
# server/disk and editing one of the two will edit both files at once.
|
||||
# As such it is recommended to only make hardlinks between static files
|
||||
# to ensure publishes remain safe and non-edited.
|
||||
hardlinks = instance.data.get("hardlinks", list())
|
||||
for src, dest in hardlinks:
|
||||
self.log.info("Hardlinking file .. {} -> {}".format(src, dest))
|
||||
self.hardlink_file(src, dest)
|
||||
|
||||
def copy_file(self, src, dst):
|
||||
""" Copy given source to destination
|
||||
|
||||
Arguments:
|
||||
src (str): the source file which needs to be copied
|
||||
dst (str): the destination of the sourc file
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
dirname = os.path.dirname(dst)
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
raise
|
||||
|
||||
shutil.copy(src, dst)
|
||||
|
||||
def hardlink_file(self, src, dst):
|
||||
|
||||
dirname = os.path.dirname(dst)
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
raise
|
||||
|
||||
filelink.create(src, dst, filelink.HARDLINK)
|
||||
|
||||
def get_subset(self, asset, instance):
|
||||
|
||||
subset = io.find_one({
|
||||
"type": "subset",
|
||||
"parent": asset["_id"],
|
||||
"name": instance.data["subset"]
|
||||
})
|
||||
|
||||
if subset is None:
|
||||
subset_name = instance.data["subset"]
|
||||
self.log.info("Subset '%s' not found, creating.." % subset_name)
|
||||
|
||||
_id = io.insert_one({
|
||||
"schema": "avalon-core:subset-2.0",
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"data": {},
|
||||
"parent": asset["_id"]
|
||||
}).inserted_id
|
||||
|
||||
subset = io.find_one({"_id": _id})
|
||||
|
||||
return subset
|
||||
|
||||
def create_version(self, subset, version_number, locations, data=None):
|
||||
""" Copy given source to destination
|
||||
|
||||
Args:
|
||||
subset (dict): the registered subset of the asset
|
||||
version_number (int): the version number
|
||||
locations (list): the currently registered locations
|
||||
|
||||
Returns:
|
||||
dict: collection of data to create a version
|
||||
"""
|
||||
# Imprint currently registered location
|
||||
version_locations = [location for location in locations if
|
||||
location is not None]
|
||||
|
||||
return {"schema": "avalon-core:version-2.0",
|
||||
"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"name": version_number,
|
||||
"locations": version_locations,
|
||||
"data": data}
|
||||
|
||||
def create_version_data(self, context, instance):
|
||||
"""Create the data collection for the version
|
||||
|
||||
Args:
|
||||
context: the current context
|
||||
instance: the current instance being published
|
||||
|
||||
Returns:
|
||||
dict: the required information with instance.data as key
|
||||
"""
|
||||
|
||||
families = []
|
||||
current_families = instance.data.get("families", list())
|
||||
instance_family = instance.data.get("family", None)
|
||||
|
||||
if instance_family is not None:
|
||||
families.append(instance_family)
|
||||
families += current_families
|
||||
|
||||
self.log.debug("Registered root: {}".format(api.registered_root()))
|
||||
# create relative source path for DB
|
||||
try:
|
||||
source = instance.data['source']
|
||||
except KeyError:
|
||||
source = context.data["currentFile"]
|
||||
|
||||
relative_path = os.path.relpath(source, api.registered_root())
|
||||
source = os.path.join("{root}", relative_path).replace("\\", "/")
|
||||
|
||||
self.log.debug("Source: {}".format(source))
|
||||
version_data = {"families": families,
|
||||
"time": context.data["time"],
|
||||
"author": context.data["user"],
|
||||
"source": source,
|
||||
"comment": context.data.get("comment"),
|
||||
"machine": context.data.get("machine"),
|
||||
"fps": context.data.get("fps")}
|
||||
|
||||
# Include optional data if present in
|
||||
optionals = [
|
||||
"frameStart", "frameEnd", "step", "handles", "sourceHashes"
|
||||
]
|
||||
for key in optionals:
|
||||
if key in instance.data:
|
||||
version_data[key] = instance.data[key]
|
||||
|
||||
return version_data
|
||||
|
|
@ -1,147 +0,0 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
|
||||
from avalon import io, api
|
||||
|
||||
|
||||
class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
|
||||
"""Generate the assumed destination path where the file will be stored"""
|
||||
|
||||
label = "Integrate Assumed Destination"
|
||||
order = pyblish.api.IntegratorOrder - 0.05
|
||||
families = ["clip", "projectfile", "plate"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
self.create_destination_template(instance, anatomy)
|
||||
|
||||
template_data = instance.data["assumedTemplateData"]
|
||||
# self.log.info(anatomy.templates)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
|
||||
# self.log.info(anatomy_filled)
|
||||
mock_template = anatomy_filled["publish"]["path"]
|
||||
|
||||
# For now assume resources end up in a "resources" folder in the
|
||||
# published folder
|
||||
mock_destination = os.path.join(os.path.dirname(mock_template),
|
||||
"resources")
|
||||
|
||||
# Clean the path
|
||||
mock_destination = os.path.abspath(
|
||||
os.path.normpath(mock_destination)).replace("\\", "/")
|
||||
|
||||
# Define resource destination and transfers
|
||||
resources = instance.data.get("resources", list())
|
||||
transfers = instance.data.get("transfers", list())
|
||||
for resource in resources:
|
||||
|
||||
# Add destination to the resource
|
||||
source_filename = os.path.basename(
|
||||
resource["source"]).replace("\\", "/")
|
||||
destination = os.path.join(mock_destination, source_filename)
|
||||
|
||||
# Force forward slashes to fix issue with software unable
|
||||
# to work correctly with backslashes in specific scenarios
|
||||
# (e.g. escape characters in PLN-151 V-Ray UDIM)
|
||||
destination = destination.replace("\\", "/")
|
||||
|
||||
resource['destination'] = destination
|
||||
|
||||
# Collect transfers for the individual files of the resource
|
||||
# e.g. all individual files of a cache or UDIM textures.
|
||||
files = resource['files']
|
||||
for fsrc in files:
|
||||
fname = os.path.basename(fsrc)
|
||||
fdest = os.path.join(
|
||||
mock_destination, fname).replace("\\", "/")
|
||||
transfers.append([fsrc, fdest])
|
||||
|
||||
instance.data["resources"] = resources
|
||||
instance.data["transfers"] = transfers
|
||||
|
||||
def create_destination_template(self, instance, anatomy):
|
||||
"""Create a filepath based on the current data available
|
||||
|
||||
Example template:
|
||||
{root}/{project}/{asset}/publish/{subset}/v{version:0>3}/
|
||||
{subset}.{representation}
|
||||
Args:
|
||||
instance: the instance to publish
|
||||
|
||||
Returns:
|
||||
file path (str)
|
||||
"""
|
||||
|
||||
# get all the stuff from the database
|
||||
subset_name = instance.data["subset"]
|
||||
self.log.info(subset_name)
|
||||
asset_name = instance.data["asset"]
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
a_template = anatomy.templates
|
||||
|
||||
project = io.find_one(
|
||||
{"type": "project", "name": project_name},
|
||||
projection={"config": True, "data": True}
|
||||
)
|
||||
|
||||
template = a_template['publish']['path']
|
||||
# anatomy = instance.context.data['anatomy']
|
||||
|
||||
asset = io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name,
|
||||
"parent": project["_id"]
|
||||
})
|
||||
|
||||
assert asset, ("No asset found by the name '{}' "
|
||||
"in project '{}'".format(asset_name, project_name))
|
||||
|
||||
subset = io.find_one({
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset["_id"]
|
||||
})
|
||||
|
||||
# assume there is no version yet, we start at `1`
|
||||
version = None
|
||||
version_number = 1
|
||||
if subset is not None:
|
||||
version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
},
|
||||
sort=[("name", -1)]
|
||||
)
|
||||
|
||||
# if there is a subset there ought to be version
|
||||
if version is not None:
|
||||
version_number += version["name"]
|
||||
|
||||
if instance.data.get('version'):
|
||||
version_number = int(instance.data.get('version'))
|
||||
|
||||
padding = int(a_template['render']['padding'])
|
||||
|
||||
hierarchy = asset['data']['parents']
|
||||
if hierarchy:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = "/".join(hierarchy)
|
||||
|
||||
template_data = {"root": api.Session["AVALON_PROJECTS"],
|
||||
"project": {"name": project_name,
|
||||
"code": project['data']['code']},
|
||||
"family": instance.data['family'],
|
||||
"asset": asset_name,
|
||||
"subset": subset_name,
|
||||
"frame": ('#' * padding),
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy,
|
||||
"representation": "TEMP"}
|
||||
|
||||
instance.data["assumedTemplateData"] = template_data
|
||||
self.log.info(template_data)
|
||||
instance.data["template"] = template
|
||||
|
|
@ -2,8 +2,11 @@ import os
|
|||
from os.path import getsize
|
||||
import logging
|
||||
import sys
|
||||
import copy
|
||||
import clique
|
||||
import errno
|
||||
|
||||
from pymongo import DeleteOne, InsertOne
|
||||
import pyblish.api
|
||||
from avalon import api, io
|
||||
from avalon.vendor import filelink
|
||||
|
|
@ -76,8 +79,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"source",
|
||||
"matchmove",
|
||||
"image"
|
||||
"source",
|
||||
"assembly",
|
||||
"textures"
|
||||
]
|
||||
exclude_families = ["clip"]
|
||||
db_representation_context_keys = [
|
||||
"project", "asset", "task", "subset", "version", "representation",
|
||||
"family", "hierarchy", "task", "username"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -94,144 +104,148 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
def register(self, instance):
|
||||
# Required environment variables
|
||||
PROJECT = api.Session["AVALON_PROJECT"]
|
||||
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
|
||||
TASK = instance.data.get("task") or api.Session["AVALON_TASK"]
|
||||
LOCATION = api.Session["AVALON_LOCATION"]
|
||||
anatomy_data = instance.data["anatomyData"]
|
||||
|
||||
io.install()
|
||||
|
||||
context = instance.context
|
||||
# Atomicity
|
||||
#
|
||||
# Guarantee atomic publishes - each asset contains
|
||||
# an identical set of members.
|
||||
# __
|
||||
# / o
|
||||
# / \
|
||||
# | o |
|
||||
# \ /
|
||||
# o __/
|
||||
#
|
||||
# for result in context.data["results"]:
|
||||
# if not result["success"]:
|
||||
# self.log.debug(result)
|
||||
# exc_type, exc_value, exc_traceback = result["error_info"]
|
||||
# extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
|
||||
# self.log.debug(
|
||||
# "Error at line {}: \"{}\"".format(
|
||||
# extracted_traceback[1], result["error"]
|
||||
# )
|
||||
# )
|
||||
# assert all(result["success"] for result in context.data["results"]),(
|
||||
# "Atomicity not held, aborting.")
|
||||
|
||||
# Assemble
|
||||
#
|
||||
# |
|
||||
# v
|
||||
# ---> <----
|
||||
# ^
|
||||
# |
|
||||
#
|
||||
project_entity = instance.data["projectEntity"]
|
||||
|
||||
context_asset_name = context.data["assetEntity"]["name"]
|
||||
|
||||
asset_name = instance.data["asset"]
|
||||
asset_entity = instance.data.get("assetEntity")
|
||||
if not asset_entity or asset_entity["name"] != context_asset_name:
|
||||
asset_entity = io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name,
|
||||
"parent": project_entity["_id"]
|
||||
})
|
||||
assert asset_entity, (
|
||||
"No asset found by the name \"{0}\" in project \"{1}\""
|
||||
).format(asset_name, project_entity["name"])
|
||||
|
||||
instance.data["assetEntity"] = asset_entity
|
||||
|
||||
# update anatomy data with asset specific keys
|
||||
# - name should already been set
|
||||
hierarchy = ""
|
||||
parents = asset_entity["data"]["parents"]
|
||||
if parents:
|
||||
hierarchy = "/".join(parents)
|
||||
anatomy_data["hierarchy"] = hierarchy
|
||||
|
||||
task_name = instance.data.get("task")
|
||||
if task_name:
|
||||
anatomy_data["task"] = task_name
|
||||
|
||||
stagingdir = instance.data.get("stagingDir")
|
||||
if not stagingdir:
|
||||
self.log.info('''{} is missing reference to staging
|
||||
directory Will try to get it from
|
||||
representation'''.format(instance))
|
||||
self.log.info((
|
||||
"{0} is missing reference to staging directory."
|
||||
" Will try to get it from representation."
|
||||
).format(instance))
|
||||
|
||||
# extra check if stagingDir actually exists and is available
|
||||
|
||||
self.log.debug("Establishing staging directory @ %s" % stagingdir)
|
||||
else:
|
||||
self.log.debug(
|
||||
"Establishing staging directory @ {0}".format(stagingdir)
|
||||
)
|
||||
|
||||
# Ensure at least one file is set up for transfer in staging dir.
|
||||
repres = instance.data.get("representations", None)
|
||||
repres = instance.data.get("representations")
|
||||
assert repres, "Instance has no files to transfer"
|
||||
assert isinstance(repres, (list, tuple)), (
|
||||
"Instance 'files' must be a list, got: {0}".format(repres)
|
||||
"Instance 'files' must be a list, got: {0} {1}".format(
|
||||
str(type(repres)), str(repres)
|
||||
)
|
||||
)
|
||||
|
||||
# FIXME: io is not initialized at this point for shell host
|
||||
io.install()
|
||||
project = io.find_one({"type": "project"})
|
||||
subset = self.get_subset(asset_entity, instance)
|
||||
|
||||
asset = io.find_one({
|
||||
"type": "asset",
|
||||
"name": ASSET,
|
||||
"parent": project["_id"]
|
||||
})
|
||||
|
||||
assert all([project, asset]), ("Could not find current project or "
|
||||
"asset '%s'" % ASSET)
|
||||
|
||||
subset = self.get_subset(asset, instance)
|
||||
|
||||
# get next version
|
||||
latest_version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
},
|
||||
{"name": True},
|
||||
sort=[("name", -1)]
|
||||
)
|
||||
|
||||
next_version = 1
|
||||
if latest_version is not None:
|
||||
next_version += latest_version["name"]
|
||||
|
||||
if instance.data.get('version'):
|
||||
next_version = int(instance.data.get('version'))
|
||||
|
||||
self.log.debug("Next version: v{0:03d}".format(next_version))
|
||||
version_number = instance.data["version"]
|
||||
self.log.debug("Next version: v{}".format(version_number))
|
||||
|
||||
version_data = self.create_version_data(context, instance)
|
||||
|
||||
version_data_instance = instance.data.get('versionData')
|
||||
|
||||
if version_data_instance:
|
||||
version_data.update(version_data_instance)
|
||||
|
||||
version = self.create_version(subset=subset,
|
||||
version_number=next_version,
|
||||
locations=[LOCATION],
|
||||
data=version_data)
|
||||
# TODO rename method from `create_version` to
|
||||
# `prepare_version` or similar...
|
||||
version = self.create_version(
|
||||
subset=subset,
|
||||
version_number=version_number,
|
||||
data=version_data
|
||||
)
|
||||
|
||||
self.log.debug("Creating version ...")
|
||||
|
||||
new_repre_names_low = [_repre["name"].lower() for _repre in repres]
|
||||
|
||||
existing_version = io.find_one({
|
||||
'type': 'version',
|
||||
'parent': subset["_id"],
|
||||
'name': next_version
|
||||
'name': version_number
|
||||
})
|
||||
|
||||
if existing_version is None:
|
||||
version_id = io.insert_one(version).inserted_id
|
||||
else:
|
||||
# Check if instance have set `append` mode which cause that
|
||||
# only replicated representations are set to archive
|
||||
append_repres = instance.data.get("append", False)
|
||||
|
||||
# Update version data
|
||||
# TODO query by _id and
|
||||
io.update_many({
|
||||
'type': 'version',
|
||||
'parent': subset["_id"],
|
||||
'name': next_version
|
||||
}, {'$set': version}
|
||||
)
|
||||
'name': version_number
|
||||
}, {
|
||||
'$set': version
|
||||
})
|
||||
version_id = existing_version['_id']
|
||||
|
||||
# Find representations of existing version and archive them
|
||||
current_repres = list(io.find({
|
||||
"type": "representation",
|
||||
"parent": version_id
|
||||
}))
|
||||
bulk_writes = []
|
||||
for repre in current_repres:
|
||||
if append_repres:
|
||||
# archive only duplicated representations
|
||||
if repre["name"].lower() not in new_repre_names_low:
|
||||
continue
|
||||
# Representation must change type,
|
||||
# `_id` must be stored to other key and replaced with new
|
||||
# - that is because new representations should have same ID
|
||||
repre_id = repre["_id"]
|
||||
bulk_writes.append(DeleteOne({"_id": repre_id}))
|
||||
|
||||
repre["orig_id"] = repre_id
|
||||
repre["_id"] = io.ObjectId()
|
||||
repre["type"] = "archived_representation"
|
||||
bulk_writes.append(InsertOne(repre))
|
||||
|
||||
# bulk updates
|
||||
if bulk_writes:
|
||||
io._database[io.Session["AVALON_PROJECT"]].bulk_write(
|
||||
bulk_writes
|
||||
)
|
||||
|
||||
existing_repres = list(io.find({
|
||||
"parent": version_id,
|
||||
"type": "archived_representation"
|
||||
}))
|
||||
|
||||
instance.data['version'] = version['name']
|
||||
|
||||
# Write to disk
|
||||
# _
|
||||
# | |
|
||||
# _| |_
|
||||
# ____\ /
|
||||
# |\ \ / \
|
||||
# \ \ v \
|
||||
# \ \________.
|
||||
# \|________|
|
||||
#
|
||||
root = api.registered_root()
|
||||
hierarchy = ""
|
||||
parents = io.find_one({
|
||||
"type": 'asset',
|
||||
"name": ASSET
|
||||
})['data']['parents']
|
||||
if parents and len(parents) > 0:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = os.path.join(*parents)
|
||||
intent = context.data.get("intent")
|
||||
if intent is not None:
|
||||
anatomy_data["intent"] = intent
|
||||
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
|
|
@ -244,27 +258,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
instance.data['transfers'] = []
|
||||
|
||||
for idx, repre in enumerate(instance.data["representations"]):
|
||||
|
||||
# Collection
|
||||
# _______
|
||||
# |______|\
|
||||
# | |\|
|
||||
# | ||
|
||||
# | ||
|
||||
# | ||
|
||||
# |_______|
|
||||
#
|
||||
# create template data for Anatomy
|
||||
template_data = {"root": root,
|
||||
"project": {"name": PROJECT,
|
||||
"code": project['data']['code']},
|
||||
"silo": asset.get('silo'),
|
||||
"task": TASK,
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": int(version["name"]),
|
||||
"hierarchy": hierarchy}
|
||||
template_data = copy.deepcopy(anatomy_data)
|
||||
if intent is not None:
|
||||
template_data["intent"] = intent
|
||||
|
||||
resolution_width = repre.get("resolutionWidth")
|
||||
resolution_height = repre.get("resolutionHeight")
|
||||
|
|
@ -282,11 +279,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
stagingdir = repre['stagingDir']
|
||||
if repre.get('anatomy_template'):
|
||||
template_name = repre['anatomy_template']
|
||||
if repre.get("outputName"):
|
||||
template_data["output"] = repre['outputName']
|
||||
|
||||
template = os.path.normpath(
|
||||
anatomy.templates[template_name]["path"])
|
||||
|
||||
sequence_repre = isinstance(files, list)
|
||||
|
||||
repre_context = None
|
||||
if sequence_repre:
|
||||
src_collections, remainder = clique.assemble(files)
|
||||
self.log.debug(
|
||||
|
|
@ -309,10 +309,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
template_data["representation"] = repre['ext']
|
||||
template_data["frame"] = src_padding_exp % i
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
|
||||
template_filled = anatomy_filled[template_name]["path"]
|
||||
if repre_context is None:
|
||||
repre_context = template_filled.used_values
|
||||
test_dest_files.append(
|
||||
os.path.normpath(
|
||||
anatomy_filled[template_name]["path"])
|
||||
os.path.normpath(template_filled)
|
||||
)
|
||||
|
||||
self.log.debug(
|
||||
|
|
@ -326,23 +327,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
index_frame_start = None
|
||||
|
||||
if repre.get("frameStart"):
|
||||
frame_start_padding = len(str(
|
||||
repre.get("frameEnd")))
|
||||
frame_start_padding = (
|
||||
anatomy.templates["render"]["padding"]
|
||||
)
|
||||
index_frame_start = int(repre.get("frameStart"))
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
if index_frame_start and "slate" in instance.data["families"]:
|
||||
index_frame_start -= 1
|
||||
|
||||
dst_padding_exp = src_padding_exp
|
||||
dst_start_frame = None
|
||||
for i in src_collection.indexes:
|
||||
# TODO 1.) do not count padding in each index iteration
|
||||
# 2.) do not count dst_padding from src_padding before
|
||||
# index_frame_start check
|
||||
src_padding = src_padding_exp % i
|
||||
|
||||
# for adding first frame into db
|
||||
if not dst_start_frame:
|
||||
dst_start_frame = src_padding
|
||||
|
||||
src_file_name = "{0}{1}{2}".format(
|
||||
src_head, src_padding, src_tail)
|
||||
|
||||
|
|
@ -364,6 +365,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.debug("source: {}".format(src))
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
# for adding first frame into db
|
||||
if not dst_start_frame:
|
||||
dst_start_frame = dst_padding
|
||||
|
||||
dst = "{0}{1}{2}".format(
|
||||
dst_head,
|
||||
dst_start_frame,
|
||||
|
|
@ -387,20 +392,38 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
template_data["representation"] = repre['ext']
|
||||
|
||||
if repre.get("outputName"):
|
||||
template_data["output"] = repre['outputName']
|
||||
|
||||
src = os.path.join(stagingdir, fname)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
dst = os.path.normpath(
|
||||
anatomy_filled[template_name]["path"]).replace("..", ".")
|
||||
template_filled = anatomy_filled[template_name]["path"]
|
||||
repre_context = template_filled.used_values
|
||||
dst = os.path.normpath(template_filled).replace("..", ".")
|
||||
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
repre['published_path'] = self.unc_convert(dst)
|
||||
self.log.debug("__ dst: {}".format(dst))
|
||||
|
||||
for key in self.db_representation_context_keys:
|
||||
value = template_data.get(key)
|
||||
if not value:
|
||||
continue
|
||||
repre_context[key] = template_data[key]
|
||||
|
||||
# Use previous representation's id if there are any
|
||||
repre_id = None
|
||||
repre_name_low = repre["name"].lower()
|
||||
for _repre in existing_repres:
|
||||
# NOTE should we check lowered names?
|
||||
if repre_name_low == _repre["name"]:
|
||||
repre_id = _repre["orig_id"]
|
||||
break
|
||||
|
||||
# Create new id if existing representations does not match
|
||||
if repre_id is None:
|
||||
repre_id = io.ObjectId()
|
||||
|
||||
representation = {
|
||||
"_id": repre_id,
|
||||
"schema": "pype:representation-2.0",
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
|
|
@ -410,26 +433,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
# Imprint shortcut to context
|
||||
# for performance reasons.
|
||||
"context": {
|
||||
"root": root,
|
||||
"project": {"name": PROJECT,
|
||||
"code": project['data']['code']},
|
||||
'task': TASK,
|
||||
"silo": asset.get('silo'),
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": version["name"],
|
||||
"hierarchy": hierarchy,
|
||||
"representation": repre['ext']
|
||||
}
|
||||
"context": repre_context
|
||||
}
|
||||
|
||||
if repre.get("outputName"):
|
||||
representation["context"]["output"] = repre['outputName']
|
||||
|
||||
if sequence_repre and repre.get("frameStart"):
|
||||
representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart"))
|
||||
representation['context']['frame'] = (
|
||||
dst_padding_exp % int(repre.get("frameStart"))
|
||||
)
|
||||
|
||||
self.log.debug("__ representation: {}".format(representation))
|
||||
destination_list.append(dst)
|
||||
|
|
@ -438,11 +451,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
representations.append(representation)
|
||||
self.log.debug("__ representations: {}".format(representations))
|
||||
|
||||
# Remove old representations if there are any (before insertion of new)
|
||||
if existing_repres:
|
||||
repre_ids_to_remove = []
|
||||
for repre in existing_repres:
|
||||
repre_ids_to_remove.append(repre["_id"])
|
||||
io.delete_many({"_id": {"$in": repre_ids_to_remove}})
|
||||
|
||||
self.log.debug("__ representations: {}".format(representations))
|
||||
for rep in instance.data["representations"]:
|
||||
self.log.debug("__ represNAME: {}".format(rep['name']))
|
||||
self.log.debug("__ represPATH: {}".format(rep['published_path']))
|
||||
io.insert_many(representations)
|
||||
instance.data["published_representations"] = representations
|
||||
# self.log.debug("Representation: {}".format(representations))
|
||||
self.log.info("Registered {} items".format(len(representations)))
|
||||
|
||||
|
|
@ -502,7 +523,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
src = self.unc_convert(src)
|
||||
dst = self.unc_convert(dst)
|
||||
|
||||
src = os.path.normpath(src)
|
||||
dst = os.path.normpath(dst)
|
||||
self.log.debug("Copying file .. {} -> {}".format(src, dst))
|
||||
dirname = os.path.dirname(dst)
|
||||
try:
|
||||
|
|
@ -538,14 +560,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
filelink.create(src, dst, filelink.HARDLINK)
|
||||
|
||||
def get_subset(self, asset, instance):
|
||||
subset_name = instance.data["subset"]
|
||||
subset = io.find_one({
|
||||
"type": "subset",
|
||||
"parent": asset["_id"],
|
||||
"name": instance.data["subset"]
|
||||
"name": subset_name
|
||||
})
|
||||
|
||||
if subset is None:
|
||||
subset_name = instance.data["subset"]
|
||||
self.log.info("Subset '%s' not found, creating.." % subset_name)
|
||||
self.log.debug("families. %s" % instance.data.get('families'))
|
||||
self.log.debug(
|
||||
|
|
@ -574,26 +596,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
return subset
|
||||
|
||||
def create_version(self, subset, version_number, locations, data=None):
|
||||
def create_version(self, subset, version_number, data=None):
|
||||
""" Copy given source to destination
|
||||
|
||||
Args:
|
||||
subset (dict): the registered subset of the asset
|
||||
version_number (int): the version number
|
||||
locations (list): the currently registered locations
|
||||
|
||||
Returns:
|
||||
dict: collection of data to create a version
|
||||
"""
|
||||
# Imprint currently registered location
|
||||
version_locations = [location for location in locations if
|
||||
location is not None]
|
||||
|
||||
return {"schema": "pype:version-3.0",
|
||||
"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"name": version_number,
|
||||
"locations": version_locations,
|
||||
"data": data}
|
||||
|
||||
def create_version_data(self, context, instance):
|
||||
|
|
@ -636,6 +653,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"fps": context.data.get(
|
||||
"fps", instance.data.get("fps"))}
|
||||
|
||||
intent = context.data.get("intent")
|
||||
if intent is not None:
|
||||
version_data["intent"] = intent
|
||||
|
||||
# Include optional data if present in
|
||||
optionals = [
|
||||
"frameStart", "frameEnd", "step", "handles",
|
||||
|
|
|
|||
|
|
@ -1,423 +0,0 @@
|
|||
import os
|
||||
import logging
|
||||
import shutil
|
||||
import clique
|
||||
|
||||
import errno
|
||||
import pyblish.api
|
||||
from avalon import api, io
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IntegrateFrames(pyblish.api.InstancePlugin):
|
||||
"""Resolve any dependency issies
|
||||
|
||||
This plug-in resolves any paths which, if not updated might break
|
||||
the published file.
|
||||
|
||||
The order of families is important, when working with lookdev you want to
|
||||
first publish the texture, update the texture paths in the nodes and then
|
||||
publish the shading network. Same goes for file dependent assets.
|
||||
"""
|
||||
|
||||
label = "Integrate Frames"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
families = ["imagesequence"]
|
||||
|
||||
family_targets = [".frames", ".local", ".review", "imagesequence", "render", "source"]
|
||||
exclude_families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
if [ef for ef in self.exclude_families
|
||||
if instance.data["family"] in ef]:
|
||||
return
|
||||
|
||||
families = [f for f in instance.data["families"]
|
||||
for search in self.family_targets
|
||||
if search in f]
|
||||
|
||||
if not families:
|
||||
return
|
||||
|
||||
self.register(instance)
|
||||
|
||||
# self.log.info("Integrating Asset in to the database ...")
|
||||
# self.log.info("instance.data: {}".format(instance.data))
|
||||
if instance.data.get('transfer', True):
|
||||
self.integrate(instance)
|
||||
|
||||
def register(self, instance):
|
||||
|
||||
# Required environment variables
|
||||
PROJECT = api.Session["AVALON_PROJECT"]
|
||||
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
|
||||
LOCATION = api.Session["AVALON_LOCATION"]
|
||||
|
||||
context = instance.context
|
||||
# Atomicity
|
||||
#
|
||||
# Guarantee atomic publishes - each asset contains
|
||||
# an identical set of members.
|
||||
# __
|
||||
# / o
|
||||
# / \
|
||||
# | o |
|
||||
# \ /
|
||||
# o __/
|
||||
#
|
||||
assert all(result["success"] for result in context.data["results"]), (
|
||||
"Atomicity not held, aborting.")
|
||||
|
||||
# Assemble
|
||||
#
|
||||
# |
|
||||
# v
|
||||
# ---> <----
|
||||
# ^
|
||||
# |
|
||||
#
|
||||
stagingdir = instance.data.get("stagingDir")
|
||||
assert stagingdir, ("Incomplete instance \"%s\": "
|
||||
"Missing reference to staging area." % instance)
|
||||
|
||||
# extra check if stagingDir actually exists and is available
|
||||
|
||||
self.log.debug("Establishing staging directory @ %s" % stagingdir)
|
||||
|
||||
project = io.find_one({"type": "project"})
|
||||
|
||||
asset = io.find_one({
|
||||
"type": "asset",
|
||||
"name": ASSET,
|
||||
"parent": project["_id"]
|
||||
})
|
||||
|
||||
assert all([project, asset]), ("Could not find current project or "
|
||||
"asset '%s'" % ASSET)
|
||||
|
||||
subset = self.get_subset(asset, instance)
|
||||
|
||||
# get next version
|
||||
latest_version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
},
|
||||
{"name": True},
|
||||
sort=[("name", -1)]
|
||||
)
|
||||
|
||||
next_version = 1
|
||||
if latest_version is not None:
|
||||
next_version += latest_version["name"]
|
||||
|
||||
self.log.info("Verifying version from assumed destination")
|
||||
|
||||
assumed_data = instance.data["assumedTemplateData"]
|
||||
assumed_version = assumed_data["version"]
|
||||
if assumed_version != next_version:
|
||||
raise AttributeError("Assumed version 'v{0:03d}' does not match"
|
||||
"next version in database "
|
||||
"('v{1:03d}')".format(assumed_version,
|
||||
next_version))
|
||||
|
||||
if instance.data.get('version'):
|
||||
next_version = int(instance.data.get('version'))
|
||||
|
||||
self.log.debug("Next version: v{0:03d}".format(next_version))
|
||||
|
||||
version_data = self.create_version_data(context, instance)
|
||||
version = self.create_version(subset=subset,
|
||||
version_number=next_version,
|
||||
locations=[LOCATION],
|
||||
data=version_data)
|
||||
|
||||
self.log.debug("Creating version ...")
|
||||
version_id = io.insert_one(version).inserted_id
|
||||
|
||||
# Write to disk
|
||||
# _
|
||||
# | |
|
||||
# _| |_
|
||||
# ____\ /
|
||||
# |\ \ / \
|
||||
# \ \ v \
|
||||
# \ \________.
|
||||
# \|________|
|
||||
#
|
||||
root = api.registered_root()
|
||||
hierarchy = ""
|
||||
parents = io.find_one({"type": 'asset', "name": ASSET})[
|
||||
'data']['parents']
|
||||
if parents and len(parents) > 0:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = os.path.join(*parents)
|
||||
|
||||
template_data = {"root": root,
|
||||
"project": {"name": PROJECT,
|
||||
"code": project['data']['code']},
|
||||
"silo": asset.get('silo'),
|
||||
"task": api.Session["AVALON_TASK"],
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": int(version["name"]),
|
||||
"hierarchy": hierarchy}
|
||||
|
||||
# template_publish = project["config"]["template"]["publish"]
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
# Find the representations to transfer amongst the files
|
||||
# Each should be a single representation (as such, a single extension)
|
||||
representations = []
|
||||
destination_list = []
|
||||
|
||||
if 'transfers' not in instance.data:
|
||||
instance.data['transfers'] = []
|
||||
|
||||
for files in instance.data["files"]:
|
||||
# Collection
|
||||
# _______
|
||||
# |______|\
|
||||
# | |\|
|
||||
# | ||
|
||||
# | ||
|
||||
# | ||
|
||||
# |_______|
|
||||
#
|
||||
if isinstance(files, list):
|
||||
|
||||
src_collections, remainder = clique.assemble(files)
|
||||
src_collection = src_collections[0]
|
||||
# Assert that each member has identical suffix
|
||||
src_head = src_collection.format("{head}")
|
||||
src_tail = ext = src_collection.format("{tail}")
|
||||
|
||||
test_dest_files = list()
|
||||
for i in [1, 2]:
|
||||
template_data["representation"] = src_tail[1:]
|
||||
template_data["frame"] = src_collection.format(
|
||||
"{padding}") % i
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
test_dest_files.append(anatomy_filled["render"]["path"])
|
||||
|
||||
dst_collections, remainder = clique.assemble(test_dest_files)
|
||||
dst_collection = dst_collections[0]
|
||||
dst_head = dst_collection.format("{head}")
|
||||
dst_tail = dst_collection.format("{tail}")
|
||||
|
||||
for i in src_collection.indexes:
|
||||
src_padding = src_collection.format("{padding}") % i
|
||||
src_file_name = "{0}{1}{2}".format(
|
||||
src_head, src_padding, src_tail)
|
||||
dst_padding = dst_collection.format("{padding}") % i
|
||||
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
|
||||
|
||||
src = os.path.join(stagingdir, src_file_name)
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
else:
|
||||
# Single file
|
||||
# _______
|
||||
# | |\
|
||||
# | |
|
||||
# | |
|
||||
# | |
|
||||
# |_______|
|
||||
#
|
||||
|
||||
template_data.pop("frame", None)
|
||||
|
||||
fname = files
|
||||
|
||||
self.log.info("fname: {}".format(fname))
|
||||
|
||||
assert not os.path.isabs(fname), (
|
||||
"Given file name is a full path"
|
||||
)
|
||||
_, ext = os.path.splitext(fname)
|
||||
|
||||
template_data["representation"] = ext[1:]
|
||||
|
||||
src = os.path.join(stagingdir, fname)
|
||||
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
dst = anatomy_filled["render"]["path"]
|
||||
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
if ext[1:] not in ["jpeg", "jpg", "mov", "mp4", "wav"]:
|
||||
template_data["frame"] = "#" * int(anatomy_filled["render"]["padding"])
|
||||
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
path_to_save = anatomy_filled["render"]["path"]
|
||||
template = anatomy.templates["render"]["path"]
|
||||
|
||||
self.log.debug("path_to_save: {}".format(path_to_save))
|
||||
|
||||
representation = {
|
||||
"schema": "pype:representation-2.0",
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": ext[1:],
|
||||
"data": {'path': path_to_save, 'template': template},
|
||||
"dependencies": instance.data.get("dependencies", "").split(),
|
||||
|
||||
# Imprint shortcut to context
|
||||
# for performance reasons.
|
||||
"context": {
|
||||
"root": root,
|
||||
"project": {
|
||||
"name": PROJECT,
|
||||
"code": project['data']['code']
|
||||
},
|
||||
"task": api.Session["AVALON_TASK"],
|
||||
"silo": asset['silo'],
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": int(version["name"]),
|
||||
"hierarchy": hierarchy,
|
||||
"representation": ext[1:]
|
||||
}
|
||||
}
|
||||
|
||||
destination_list.append(dst)
|
||||
instance.data['destination_list'] = destination_list
|
||||
representations.append(representation)
|
||||
|
||||
self.log.info("Registering {} items".format(len(representations)))
|
||||
io.insert_many(representations)
|
||||
|
||||
def integrate(self, instance):
|
||||
"""Move the files
|
||||
|
||||
Through `instance.data["transfers"]`
|
||||
|
||||
Args:
|
||||
instance: the instance to integrate
|
||||
"""
|
||||
|
||||
transfers = instance.data["transfers"]
|
||||
|
||||
for src, dest in transfers:
|
||||
src = os.path.normpath(src)
|
||||
dest = os.path.normpath(dest)
|
||||
if src in dest:
|
||||
continue
|
||||
|
||||
self.log.info("Copying file .. {} -> {}".format(src, dest))
|
||||
self.copy_file(src, dest)
|
||||
|
||||
def copy_file(self, src, dst):
|
||||
""" Copy given source to destination
|
||||
|
||||
Arguments:
|
||||
src (str): the source file which needs to be copied
|
||||
dst (str): the destination of the sourc file
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
dirname = os.path.dirname(dst)
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
self.log.critical("An unexpected error occurred.")
|
||||
raise
|
||||
|
||||
shutil.copy(src, dst)
|
||||
|
||||
def get_subset(self, asset, instance):
|
||||
|
||||
subset = io.find_one({
|
||||
"type": "subset",
|
||||
"parent": asset["_id"],
|
||||
"name": instance.data["subset"]
|
||||
})
|
||||
|
||||
if subset is None:
|
||||
subset_name = instance.data["subset"]
|
||||
self.log.info("Subset '%s' not found, creating.." % subset_name)
|
||||
|
||||
_id = io.insert_one({
|
||||
"schema": "pype:subset-2.0",
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"data": {},
|
||||
"parent": asset["_id"]
|
||||
}).inserted_id
|
||||
|
||||
subset = io.find_one({"_id": _id})
|
||||
|
||||
return subset
|
||||
|
||||
def create_version(self, subset, version_number, locations, data=None):
|
||||
""" Copy given source to destination
|
||||
|
||||
Args:
|
||||
subset (dict): the registered subset of the asset
|
||||
version_number (int): the version number
|
||||
locations (list): the currently registered locations
|
||||
|
||||
Returns:
|
||||
dict: collection of data to create a version
|
||||
"""
|
||||
# Imprint currently registered location
|
||||
version_locations = [location for location in locations if
|
||||
location is not None]
|
||||
|
||||
return {"schema": "pype:version-2.0",
|
||||
"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"name": version_number,
|
||||
"locations": version_locations,
|
||||
"data": data}
|
||||
|
||||
def create_version_data(self, context, instance):
|
||||
"""Create the data collection for the version
|
||||
|
||||
Args:
|
||||
context: the current context
|
||||
instance: the current instance being published
|
||||
|
||||
Returns:
|
||||
dict: the required information with instance.data as key
|
||||
"""
|
||||
|
||||
families = []
|
||||
current_families = instance.data.get("families", list())
|
||||
instance_family = instance.data.get("family", None)
|
||||
|
||||
if instance_family is not None:
|
||||
families.append(instance_family)
|
||||
families += current_families
|
||||
|
||||
try:
|
||||
source = instance.data['source']
|
||||
except KeyError:
|
||||
source = context.data["currentFile"]
|
||||
|
||||
relative_path = os.path.relpath(source, api.registered_root())
|
||||
source = os.path.join("{root}", relative_path).replace("\\", "/")
|
||||
|
||||
version_data = {"families": families,
|
||||
"time": context.data["time"],
|
||||
"author": context.data["user"],
|
||||
"source": source,
|
||||
"comment": context.data.get("comment")}
|
||||
|
||||
# Include optional data if present in
|
||||
optionals = ["frameStart", "frameEnd", "step",
|
||||
"handles", "colorspace", "fps", "outputDir"]
|
||||
|
||||
for key in optionals:
|
||||
if key in instance.data:
|
||||
version_data[key] = instance.data.get(key, None)
|
||||
|
||||
return version_data
|
||||
49
pype/plugins/global/publish/integrate_resources_path.py
Normal file
49
pype/plugins/global/publish/integrate_resources_path.py
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class IntegrateResourcesPath(pyblish.api.InstancePlugin):
|
||||
"""Generate directory path where the files and resources will be stored"""
|
||||
|
||||
label = "Integrate Resources Path"
|
||||
order = pyblish.api.IntegratorOrder - 0.05
|
||||
families = ["clip", "projectfile", "plate"]
|
||||
|
||||
def process(self, instance):
|
||||
resources = instance.data.get("resources") or []
|
||||
transfers = instance.data.get("transfers") or []
|
||||
|
||||
if not resources and not transfers:
|
||||
self.log.debug(
|
||||
"Instance does not have `resources` and `transfers`"
|
||||
)
|
||||
return
|
||||
|
||||
resources_folder = instance.data["resourcesDir"]
|
||||
|
||||
# Define resource destination and transfers
|
||||
for resource in resources:
|
||||
# Add destination to the resource
|
||||
source_filename = os.path.basename(
|
||||
resource["source"]).replace("\\", "/")
|
||||
destination = os.path.join(resources_folder, source_filename)
|
||||
|
||||
# Force forward slashes to fix issue with software unable
|
||||
# to work correctly with backslashes in specific scenarios
|
||||
# (e.g. escape characters in PLN-151 V-Ray UDIM)
|
||||
destination = destination.replace("\\", "/")
|
||||
|
||||
resource['destination'] = destination
|
||||
|
||||
# Collect transfers for the individual files of the resource
|
||||
# e.g. all individual files of a cache or UDIM textures.
|
||||
files = resource['files']
|
||||
for fsrc in files:
|
||||
fname = os.path.basename(fsrc)
|
||||
fdest = os.path.join(
|
||||
resources_folder, fname
|
||||
).replace("\\", "/")
|
||||
transfers.append([fsrc, fdest])
|
||||
|
||||
instance.data["resources"] = resources
|
||||
instance.data["transfers"] = transfers
|
||||
148
pype/plugins/global/publish/integrate_thumbnail.py
Normal file
148
pype/plugins/global/publish/integrate_thumbnail.py
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
import os
|
||||
import sys
|
||||
import errno
|
||||
import shutil
|
||||
import copy
|
||||
|
||||
import six
|
||||
import pyblish.api
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from avalon import api, io
|
||||
|
||||
|
||||
class IntegrateThumbnails(pyblish.api.InstancePlugin):
|
||||
"""Integrate Thumbnails."""
|
||||
|
||||
label = "Integrate Thumbnails"
|
||||
order = pyblish.api.IntegratorOrder + 0.01
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if not os.environ.get("AVALON_THUMBNAIL_ROOT"):
|
||||
self.log.info("AVALON_THUMBNAIL_ROOT is not set."
|
||||
" Skipping thumbnail integration.")
|
||||
return
|
||||
|
||||
published_repres = instance.data.get("published_representations")
|
||||
if not published_repres:
|
||||
self.log.debug(
|
||||
"There are not published representation ids on the instance."
|
||||
)
|
||||
return
|
||||
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
if "publish" not in anatomy.templates:
|
||||
raise AssertionError("Anatomy does not have set publish key!")
|
||||
|
||||
if "thumbnail" not in anatomy.templates["publish"]:
|
||||
raise AssertionError((
|
||||
"There is not set \"thumbnail\" template for project \"{}\""
|
||||
).format(project_name))
|
||||
|
||||
thumbnail_template = anatomy.templates["publish"]["thumbnail"]
|
||||
|
||||
io.install()
|
||||
|
||||
thumb_repre = None
|
||||
for repre in published_repres:
|
||||
if repre["name"].lower() == "thumbnail":
|
||||
thumb_repre = repre
|
||||
break
|
||||
|
||||
if not thumb_repre:
|
||||
self.log.debug(
|
||||
"There is not representation with name \"thumbnail\""
|
||||
)
|
||||
return
|
||||
|
||||
version = io.find_one({"_id": thumb_repre["parent"]})
|
||||
if not version:
|
||||
raise AssertionError(
|
||||
"There does not exist version with id {}".format(
|
||||
str(thumb_repre["parent"])
|
||||
)
|
||||
)
|
||||
|
||||
# Get full path to thumbnail file from representation
|
||||
src_full_path = os.path.normpath(thumb_repre["data"]["path"])
|
||||
if not os.path.exists(src_full_path):
|
||||
self.log.warning("Thumbnail file was not found. Path: {}".format(
|
||||
src_full_path
|
||||
))
|
||||
return
|
||||
|
||||
filename, file_extension = os.path.splitext(src_full_path)
|
||||
# Create id for mongo entity now to fill anatomy template
|
||||
thumbnail_id = ObjectId()
|
||||
|
||||
# Prepare anatomy template fill data
|
||||
template_data = copy.deepcopy(thumb_repre["context"])
|
||||
template_data.update({
|
||||
"_id": str(thumbnail_id),
|
||||
"thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"),
|
||||
"ext": file_extension,
|
||||
"thumbnail_type": "thumbnail"
|
||||
})
|
||||
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
final_path = anatomy_filled.get("publish", {}).get("thumbnail")
|
||||
if not final_path:
|
||||
raise AssertionError((
|
||||
"Anatomy template was not filled with entered data"
|
||||
"\nTemplate: {} "
|
||||
"\nData: {}"
|
||||
).format(thumbnail_template, str(template_data)))
|
||||
|
||||
dst_full_path = os.path.normpath(final_path)
|
||||
self.log.debug(
|
||||
"Copying file .. {} -> {}".format(src_full_path, dst_full_path)
|
||||
)
|
||||
dirname = os.path.dirname(dst_full_path)
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
tp, value, tb = sys.exc_info()
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
shutil.copy(src_full_path, dst_full_path)
|
||||
|
||||
# Clean template data from keys that are dynamic
|
||||
template_data.pop("_id")
|
||||
template_data.pop("thumbnail_root")
|
||||
|
||||
thumbnail_entity = {
|
||||
"_id": thumbnail_id,
|
||||
"type": "thumbnail",
|
||||
"schema": "pype:thumbnail-1.0",
|
||||
"data": {
|
||||
"template": thumbnail_template,
|
||||
"template_data": template_data
|
||||
}
|
||||
}
|
||||
# Create thumbnail entity
|
||||
io.insert_one(thumbnail_entity)
|
||||
self.log.debug(
|
||||
"Creating entity in database {}".format(str(thumbnail_entity))
|
||||
)
|
||||
# Set thumbnail id for version
|
||||
io.update_many(
|
||||
{"_id": version["_id"]},
|
||||
{"$set": {"data.thumbnail_id": thumbnail_id}}
|
||||
)
|
||||
self.log.debug("Setting thumbnail for version \"{}\" <{}>".format(
|
||||
version["name"], str(version["_id"])
|
||||
))
|
||||
|
||||
asset_entity = instance.data["assetEntity"]
|
||||
io.update_many(
|
||||
{"_id": asset_entity["_id"]},
|
||||
{"$set": {"data.thumbnail_id": thumbnail_id}}
|
||||
)
|
||||
self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format(
|
||||
asset_entity["name"], str(version["_id"])
|
||||
))
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
import logging
|
||||
from copy import copy
|
||||
|
||||
from avalon import api, io
|
||||
from avalon.vendor import requests, clique
|
||||
|
|
@ -14,16 +14,15 @@ def _get_script():
|
|||
try:
|
||||
from pype.scripts import publish_filesequence
|
||||
except Exception:
|
||||
raise RuntimeError("Expected module 'publish_deadline'"
|
||||
"to be available")
|
||||
assert False, "Expected module 'publish_deadline'to be available"
|
||||
|
||||
module_path = publish_filesequence.__file__
|
||||
if module_path.endswith(".pyc"):
|
||||
module_path = module_path[:-len(".pyc")] + ".py"
|
||||
module_path = module_path[: -len(".pyc")] + ".py"
|
||||
|
||||
module_path = os.path.normpath(module_path)
|
||||
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT'])
|
||||
network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH'])
|
||||
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"])
|
||||
network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"])
|
||||
|
||||
module_path = module_path.replace(mount_root, network_root)
|
||||
|
||||
|
|
@ -34,39 +33,29 @@ def _get_script():
|
|||
def get_latest_version(asset_name, subset_name, family):
|
||||
# Get asset
|
||||
asset_name = io.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
},
|
||||
projection={"name": True}
|
||||
{"type": "asset", "name": asset_name}, projection={"name": True}
|
||||
)
|
||||
|
||||
subset = io.find_one(
|
||||
{
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset_name["_id"]
|
||||
},
|
||||
projection={"_id": True, "name": True}
|
||||
{"type": "subset", "name": subset_name, "parent": asset_name["_id"]},
|
||||
projection={"_id": True, "name": True},
|
||||
)
|
||||
|
||||
# Check if subsets actually exists (pre-run check)
|
||||
assert subset, "No subsets found, please publish with `extendFrames` off"
|
||||
|
||||
# Get version
|
||||
version_projection = {"name": True,
|
||||
"data.startFrame": True,
|
||||
"data.endFrame": True,
|
||||
"parent": True}
|
||||
version_projection = {
|
||||
"name": True,
|
||||
"data.startFrame": True,
|
||||
"data.endFrame": True,
|
||||
"parent": True,
|
||||
}
|
||||
|
||||
version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"data.families": family
|
||||
},
|
||||
{"type": "version", "parent": subset["_id"], "data.families": family},
|
||||
projection=version_projection,
|
||||
sort=[("name", -1)]
|
||||
sort=[("name", -1)],
|
||||
)
|
||||
|
||||
assert version, "No version found, this is a bug"
|
||||
|
|
@ -87,8 +76,12 @@ def get_resources(version, extension=None):
|
|||
|
||||
directory = api.get_representation_path(representation)
|
||||
print("Source: ", directory)
|
||||
resources = sorted([os.path.normpath(os.path.join(directory, fname))
|
||||
for fname in os.listdir(directory)])
|
||||
resources = sorted(
|
||||
[
|
||||
os.path.normpath(os.path.join(directory, fname))
|
||||
for fname in os.listdir(directory)
|
||||
]
|
||||
)
|
||||
|
||||
return resources
|
||||
|
||||
|
|
@ -138,8 +131,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
- publishJobState (str, Optional): "Active" or "Suspended"
|
||||
This defaults to "Suspended"
|
||||
|
||||
This requires a "frameStart" and "frameEnd" to be present in instance.data
|
||||
or in context.data.
|
||||
- expectedFiles (list or dict): explained bellow
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -149,22 +141,39 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
hosts = ["fusion", "maya", "nuke"]
|
||||
|
||||
families = [
|
||||
"render.farm",
|
||||
"renderlayer",
|
||||
"imagesequence"
|
||||
]
|
||||
families = ["render.farm", "renderlayer", "imagesequence"]
|
||||
|
||||
aov_filter = {"maya": ["beauty"]}
|
||||
|
||||
enviro_filter = [
|
||||
"PATH",
|
||||
"PYTHONPATH",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER",
|
||||
"PYPE_ROOT",
|
||||
"PYPE_STUDIO_PROJECTS_PATH",
|
||||
"PYPE_STUDIO_PROJECTS_MOUNT"
|
||||
]
|
||||
"PATH",
|
||||
"PYTHONPATH",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER",
|
||||
"PYPE_ROOT",
|
||||
"PYPE_METADATA_FILE",
|
||||
"PYPE_STUDIO_PROJECTS_PATH",
|
||||
"PYPE_STUDIO_PROJECTS_MOUNT",
|
||||
]
|
||||
|
||||
# pool used to do the publishing job
|
||||
deadline_pool = ""
|
||||
|
||||
# regex for finding frame number in string
|
||||
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
|
||||
|
||||
# mapping of instance properties to be transfered to new instance for every
|
||||
# specified family
|
||||
instance_transfer = {
|
||||
"slate": ["slateFrame"],
|
||||
"review": ["lutPath"],
|
||||
"render.farm": ["bakeScriptPath", "bakeRenderPath",
|
||||
"bakeWriteNodeName", "version"]
|
||||
}
|
||||
|
||||
# list of family names to transfer to new family if present
|
||||
families_transfer = ["render3d", "render2d", "ftrack", "slate"]
|
||||
|
||||
def _submit_deadline_post_job(self, instance, job):
|
||||
"""
|
||||
|
|
@ -175,8 +184,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
data = instance.data.copy()
|
||||
subset = data["subset"]
|
||||
job_name = "{batch} - {subset} [publish image sequence]".format(
|
||||
batch=job["Props"]["Name"],
|
||||
subset=subset
|
||||
batch=job["Props"]["Name"], subset=subset
|
||||
)
|
||||
|
||||
metadata_filename = "{}_metadata.json".format(subset)
|
||||
|
|
@ -184,10 +192,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
metadata_path = os.path.join(output_dir, metadata_filename)
|
||||
|
||||
metadata_path = os.path.normpath(metadata_path)
|
||||
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
|
||||
network_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_PATH'])
|
||||
|
||||
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"])
|
||||
network_root = os.environ["PYPE_STUDIO_PROJECTS_PATH"]
|
||||
metadata_path = metadata_path.replace(mount_root, network_root)
|
||||
metadata_path = os.path.normpath(metadata_path)
|
||||
|
||||
# Generate the payload for Deadline submission
|
||||
payload = {
|
||||
|
|
@ -195,54 +203,287 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"Plugin": "Python",
|
||||
"BatchName": job["Props"]["Batch"],
|
||||
"Name": job_name,
|
||||
"JobType": "Normal",
|
||||
"JobDependency0": job["_id"],
|
||||
"UserName": job["Props"]["User"],
|
||||
"Comment": instance.context.data.get("comment", ""),
|
||||
"Priority": job["Props"]["Pri"]
|
||||
"Priority": job["Props"]["Pri"],
|
||||
"Pool": self.deadline_pool,
|
||||
"OutputDirectory0": output_dir
|
||||
},
|
||||
"PluginInfo": {
|
||||
"Version": "3.6",
|
||||
"ScriptFile": _get_script(),
|
||||
"Arguments": '--paths "{}"'.format(metadata_path),
|
||||
"SingleFrameOnly": "True"
|
||||
"Arguments": "",
|
||||
"SingleFrameOnly": "True",
|
||||
},
|
||||
|
||||
# Mandatory for Deadline, may be empty
|
||||
"AuxFiles": []
|
||||
"AuxFiles": [],
|
||||
}
|
||||
|
||||
# Transfer the environment from the original job to this dependent
|
||||
# job so they use the same environment
|
||||
|
||||
environment = job["Props"].get("Env", {})
|
||||
|
||||
environment["PYPE_METADATA_FILE"] = metadata_path
|
||||
i = 0
|
||||
for index, key in enumerate(environment):
|
||||
self.log.info("KEY: {}".format(key))
|
||||
self.log.info("FILTER: {}".format(self.enviro_filter))
|
||||
|
||||
if key.upper() in self.enviro_filter:
|
||||
payload["JobInfo"].update({
|
||||
"EnvironmentKeyValue%d" % i: "{key}={value}".format(
|
||||
key=key,
|
||||
value=environment[key]
|
||||
)
|
||||
})
|
||||
payload["JobInfo"].update(
|
||||
{
|
||||
"EnvironmentKeyValue%d"
|
||||
% i: "{key}={value}".format(
|
||||
key=key, value=environment[key]
|
||||
)
|
||||
}
|
||||
)
|
||||
i += 1
|
||||
|
||||
# Avoid copied pools and remove secondary pool
|
||||
payload["JobInfo"]["Pool"] = "none"
|
||||
# remove secondary pool
|
||||
payload["JobInfo"].pop("SecondaryPool", None)
|
||||
|
||||
self.log.info("Submitting..")
|
||||
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
|
||||
self.log.info("Submitting Deadline job ...")
|
||||
# self.log.info(json.dumps(payload, indent=4, sort_keys=True))
|
||||
|
||||
url = "{}/api/jobs".format(self.DEADLINE_REST_URL)
|
||||
response = requests.post(url, json=payload)
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
||||
def _copy_extend_frames(self, instance, representation):
|
||||
"""
|
||||
This will copy all existing frames from subset's latest version back
|
||||
to render directory and rename them to what renderer is expecting.
|
||||
|
||||
:param instance: instance to get required data from
|
||||
:type instance: pyblish.plugin.Instance
|
||||
"""
|
||||
|
||||
import speedcopy
|
||||
|
||||
self.log.info("Preparing to copy ...")
|
||||
start = instance.data.get("startFrame")
|
||||
end = instance.data.get("endFrame")
|
||||
|
||||
# get latest version of subset
|
||||
# this will stop if subset wasn't published yet
|
||||
version = get_latest_version(
|
||||
instance.data.get("asset"),
|
||||
instance.data.get("subset"), "render")
|
||||
# get its files based on extension
|
||||
subset_resources = get_resources(version, representation.get("ext"))
|
||||
r_col, _ = clique.assemble(subset_resources)
|
||||
|
||||
# if override remove all frames we are expecting to be rendered
|
||||
# so we'll copy only those missing from current render
|
||||
if instance.data.get("overrideExistingFrame"):
|
||||
for frame in range(start, end+1):
|
||||
if frame not in r_col.indexes:
|
||||
continue
|
||||
r_col.indexes.remove(frame)
|
||||
|
||||
# now we need to translate published names from represenation
|
||||
# back. This is tricky, right now we'll just use same naming
|
||||
# and only switch frame numbers
|
||||
resource_files = []
|
||||
r_filename = os.path.basename(
|
||||
representation.get("files")[0]) # first file
|
||||
op = re.search(self.R_FRAME_NUMBER, r_filename)
|
||||
pre = r_filename[:op.start("frame")]
|
||||
post = r_filename[op.end("frame"):]
|
||||
assert op is not None, "padding string wasn't found"
|
||||
for frame in list(r_col):
|
||||
fn = re.search(self.R_FRAME_NUMBER, frame)
|
||||
# silencing linter as we need to compare to True, not to
|
||||
# type
|
||||
assert fn is not None, "padding string wasn't found"
|
||||
# list of tuples (source, destination)
|
||||
resource_files.append(
|
||||
(frame,
|
||||
os.path.join(representation.get("stagingDir"),
|
||||
"{}{}{}".format(pre,
|
||||
fn.group("frame"),
|
||||
post)))
|
||||
)
|
||||
|
||||
# test if destination dir exists and create it if not
|
||||
output_dir = os.path.dirname(representation.get("files")[0])
|
||||
if not os.path.isdir(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
|
||||
# copy files
|
||||
for source in resource_files:
|
||||
speedcopy.copy(source[0], source[1])
|
||||
self.log.info(" > {}".format(source[1]))
|
||||
|
||||
self.log.info(
|
||||
"Finished copying %i files" % len(resource_files))
|
||||
|
||||
def _create_instances_for_aov(self, instance_data, exp_files):
|
||||
"""
|
||||
This will create new instance for every aov it can detect in expected
|
||||
files list.
|
||||
|
||||
:param instance_data: skeleton data for instance (those needed) later
|
||||
by collector
|
||||
:type instance_data: pyblish.plugin.Instance
|
||||
:param exp_files: list of expected files divided by aovs
|
||||
:type exp_files: list
|
||||
:returns: list of instances
|
||||
:rtype: list(publish.plugin.Instance)
|
||||
"""
|
||||
|
||||
task = os.environ["AVALON_TASK"]
|
||||
subset = instance_data["subset"]
|
||||
instances = []
|
||||
# go through aovs in expected files
|
||||
for aov, files in exp_files[0].items():
|
||||
cols, rem = clique.assemble(files)
|
||||
# we shouldn't have any reminders
|
||||
if rem:
|
||||
self.log.warning(
|
||||
"skipping unexpected files found "
|
||||
"in sequence: {}".format(rem))
|
||||
|
||||
# but we really expect only one collection, nothing else make sense
|
||||
assert len(cols) == 1, "only one image sequence type is expected"
|
||||
|
||||
# create subset name `familyTaskSubset_AOV`
|
||||
subset_name = 'render{}{}{}{}_{}'.format(
|
||||
task[0].upper(), task[1:],
|
||||
subset[0].upper(), subset[1:],
|
||||
aov)
|
||||
|
||||
staging = os.path.dirname(list(cols[0])[0])
|
||||
|
||||
self.log.info("Creating data for: {}".format(subset_name))
|
||||
|
||||
app = os.environ.get("AVALON_APP", "")
|
||||
|
||||
preview = False
|
||||
if app in self.aov_filter.keys():
|
||||
if aov in self.aov_filter[app]:
|
||||
preview = True
|
||||
|
||||
new_instance = copy(instance_data)
|
||||
new_instance["subset"] = subset_name
|
||||
|
||||
ext = cols[0].tail.lstrip(".")
|
||||
|
||||
# create represenation
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(cols[0])],
|
||||
"frameStart": int(instance_data.get("frameStartHandle")),
|
||||
"frameEnd": int(instance_data.get("frameEndHandle")),
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"anatomy_template": "render",
|
||||
"fps": new_instance.get("fps"),
|
||||
"tags": ["review"] if preview else []
|
||||
}
|
||||
|
||||
self._solve_families(new_instance, preview)
|
||||
|
||||
new_instance["representations"] = [rep]
|
||||
|
||||
# if extending frames from existing version, copy files from there
|
||||
# into our destination directory
|
||||
if new_instance.get("extendFrames", False):
|
||||
self._copy_extend_frames(new_instance, rep)
|
||||
instances.append(new_instance)
|
||||
|
||||
return instances
|
||||
|
||||
def _get_representations(self, instance, exp_files):
|
||||
"""
|
||||
This will return representations of expected files if they are not
|
||||
in hierarchy of aovs. There should be only one sequence of files for
|
||||
most cases, but if not - we create representation from each of them.
|
||||
|
||||
:param instance: instance for which we are setting representations
|
||||
:type instance: pyblish.plugin.Instance
|
||||
:param exp_files: list of expected files
|
||||
:type exp_files: list
|
||||
:returns: list of representations
|
||||
:rtype: list(dict)
|
||||
"""
|
||||
|
||||
representations = []
|
||||
cols, rem = clique.assemble(exp_files)
|
||||
bake_render_path = instance.get("bakeRenderPath")
|
||||
|
||||
# create representation for every collected sequence
|
||||
for c in cols:
|
||||
ext = c.tail.lstrip(".")
|
||||
preview = False
|
||||
# if filtered aov name is found in filename, toggle it for
|
||||
# preview video rendering
|
||||
for app in self.aov_filter:
|
||||
if os.environ.get("AVALON_APP", "") == app:
|
||||
for aov in self.aov_filter[app]:
|
||||
if re.match(
|
||||
r".+(?:\.|_)({})(?:\.|_).*".format(aov),
|
||||
list(c)[0]
|
||||
):
|
||||
preview = True
|
||||
break
|
||||
break
|
||||
|
||||
if bake_render_path:
|
||||
preview = False
|
||||
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(c)],
|
||||
"frameStart": int(instance.get("frameStartHandle")),
|
||||
"frameEnd": int(instance.get("frameEndHandle")),
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": os.path.dirname(list(c)[0]),
|
||||
"anatomy_template": "render",
|
||||
"fps": instance.get("fps"),
|
||||
"tags": ["review", "preview"] if preview else [],
|
||||
}
|
||||
|
||||
representations.append(rep)
|
||||
|
||||
self._solve_families(instance, preview)
|
||||
|
||||
# add reminders as representations
|
||||
for r in rem:
|
||||
ext = r.split(".")[-1]
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": os.path.basename(r),
|
||||
"stagingDir": os.path.dirname(r),
|
||||
"anatomy_template": "publish",
|
||||
}
|
||||
if r in bake_render_path:
|
||||
rep.update({
|
||||
"fps": instance.get("fps"),
|
||||
"anatomy_template": "render",
|
||||
"tags": ["review", "delete"]
|
||||
})
|
||||
# solve families with `preview` attributes
|
||||
self._solve_families(instance, True)
|
||||
representations.append(rep)
|
||||
|
||||
return representations
|
||||
|
||||
def _solve_families(self, instance, preview=False):
|
||||
families = instance.get("families")
|
||||
# if we have one representation with preview tag
|
||||
# flag whole instance for review and for ftrack
|
||||
if preview:
|
||||
if "ftrack" not in families:
|
||||
if os.environ.get("FTRACK_SERVER"):
|
||||
families.append("ftrack")
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
instance["families"] = families
|
||||
|
||||
def process(self, instance):
|
||||
"""
|
||||
Detect type of renderfarm submission and create and post dependend job
|
||||
|
|
@ -252,210 +493,280 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
:param instance: Instance data
|
||||
:type instance: dict
|
||||
"""
|
||||
# Get a submission job
|
||||
data = instance.data.copy()
|
||||
context = instance.context
|
||||
self.context = context
|
||||
|
||||
if hasattr(instance, "_log"):
|
||||
data['_log'] = instance._log
|
||||
render_job = data.pop("deadlineSubmissionJob", None)
|
||||
submission_type = "deadline"
|
||||
|
||||
if not render_job:
|
||||
# No deadline job. Try Muster: musterSubmissionJob
|
||||
render_job = data.pop("musterSubmissionJob", None)
|
||||
submission_type = "muster"
|
||||
if not render_job:
|
||||
raise RuntimeError("Can't continue without valid Deadline "
|
||||
"or Muster submission prior to this "
|
||||
"plug-in.")
|
||||
assert render_job, (
|
||||
"Can't continue without valid Deadline "
|
||||
"or Muster submission prior to this "
|
||||
"plug-in."
|
||||
)
|
||||
|
||||
if submission_type == "deadline":
|
||||
self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
|
||||
"http://localhost:8082")
|
||||
self.DEADLINE_REST_URL = os.environ.get(
|
||||
"DEADLINE_REST_URL", "http://localhost:8082"
|
||||
)
|
||||
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
|
||||
|
||||
self._submit_deadline_post_job(instance, render_job)
|
||||
|
||||
asset = data.get("asset") or api.Session["AVALON_ASSET"]
|
||||
subset = data["subset"]
|
||||
subset = data.get("subset")
|
||||
|
||||
# Get start/end frame from instance, if not available get from context
|
||||
context = instance.context
|
||||
start = instance.data.get("frameStart")
|
||||
if start is None:
|
||||
start = context.data["frameStart"]
|
||||
|
||||
end = instance.data.get("frameEnd")
|
||||
if end is None:
|
||||
end = context.data["frameEnd"]
|
||||
|
||||
# Add in regex for sequence filename
|
||||
# This assumes the output files start with subset name and ends with
|
||||
# a file extension. The "ext" key includes the dot with the extension.
|
||||
if "ext" in instance.data:
|
||||
ext = r"\." + re.escape(instance.data["ext"])
|
||||
else:
|
||||
ext = r"\.\D+"
|
||||
handle_start = instance.data.get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = context.data["handleStart"]
|
||||
|
||||
regex = r"^{subset}.*\d+{ext}$".format(subset=re.escape(subset),
|
||||
ext=ext)
|
||||
handle_end = instance.data.get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = context.data["handleEnd"]
|
||||
|
||||
fps = instance.data.get("fps")
|
||||
if fps is None:
|
||||
fps = context.data["fps"]
|
||||
|
||||
if data.get("extendFrames", False):
|
||||
start, end = self._extend_frames(
|
||||
asset,
|
||||
subset,
|
||||
start,
|
||||
end,
|
||||
data["overrideExistingFrame"])
|
||||
|
||||
try:
|
||||
source = data['source']
|
||||
source = data["source"]
|
||||
except KeyError:
|
||||
source = context.data["currentFile"]
|
||||
|
||||
source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"),
|
||||
api.registered_root())
|
||||
|
||||
source = source.replace(
|
||||
os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root()
|
||||
)
|
||||
relative_path = os.path.relpath(source, api.registered_root())
|
||||
source = os.path.join("{root}", relative_path).replace("\\", "/")
|
||||
|
||||
# find subsets and version to attach render to
|
||||
attach_to = instance.data.get("attachTo")
|
||||
attach_subset_versions = []
|
||||
if attach_to:
|
||||
for subset in attach_to:
|
||||
for instance in context:
|
||||
if instance.data["subset"] != subset["subset"]:
|
||||
continue
|
||||
attach_subset_versions.append(
|
||||
{"version": instance.data["version"],
|
||||
"subset": subset["subset"],
|
||||
"family": subset["family"]})
|
||||
families = ["render"]
|
||||
|
||||
# Write metadata for publish job
|
||||
metadata = {
|
||||
instance_skeleton_data = {
|
||||
"family": "render",
|
||||
"subset": subset,
|
||||
"families": families,
|
||||
"asset": asset,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStartHandle": start - handle_start,
|
||||
"frameEndHandle": end + handle_end,
|
||||
"fps": fps,
|
||||
"source": source,
|
||||
"extendFrames": data.get("extendFrames"),
|
||||
"overrideExistingFrame": data.get("overrideExistingFrame"),
|
||||
"pixelAspect": data.get("pixelAspect", 1),
|
||||
"resolutionWidth": data.get("resolutionWidth", 1920),
|
||||
"resolutionHeight": data.get("resolutionHeight", 1080),
|
||||
}
|
||||
|
||||
# transfer specific families from original instance to new render
|
||||
for item in self.families_transfer:
|
||||
if item in instance.data.get("families", []):
|
||||
instance_skeleton_data["families"] += [item]
|
||||
|
||||
# transfer specific properties from original instance based on
|
||||
# mapping dictionary `instance_transfer`
|
||||
for key, values in self.instance_transfer.items():
|
||||
if key in instance.data.get("families", []):
|
||||
for v in values:
|
||||
instance_skeleton_data[v] = instance.data.get(v)
|
||||
|
||||
# look into instance data if representations are not having any
|
||||
# which are having tag `publish_on_farm` and include them
|
||||
for r in instance.data.get("representations", []):
|
||||
if "publish_on_farm" in r.get("tags"):
|
||||
# create representations attribute of not there
|
||||
if "representations" not in instance_skeleton_data.keys():
|
||||
instance_skeleton_data["representations"] = []
|
||||
|
||||
instance_skeleton_data["representations"].append(r)
|
||||
|
||||
instances = None
|
||||
assert data.get("expectedFiles"), ("Submission from old Pype version"
|
||||
" - missing expectedFiles")
|
||||
|
||||
"""
|
||||
if content of `expectedFiles` are dictionaries, we will handle
|
||||
it as list of AOVs, creating instance from every one of them.
|
||||
|
||||
Example:
|
||||
--------
|
||||
|
||||
expectedFiles = [
|
||||
{
|
||||
"beauty": [
|
||||
"foo_v01.0001.exr",
|
||||
"foo_v01.0002.exr"
|
||||
],
|
||||
|
||||
"Z": [
|
||||
"boo_v01.0001.exr",
|
||||
"boo_v01.0002.exr"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
This will create instances for `beauty` and `Z` subset
|
||||
adding those files to their respective representations.
|
||||
|
||||
If we've got only list of files, we collect all filesequences.
|
||||
More then one doesn't probably make sense, but we'll handle it
|
||||
like creating one instance with multiple representations.
|
||||
|
||||
Example:
|
||||
--------
|
||||
|
||||
expectedFiles = [
|
||||
"foo_v01.0001.exr",
|
||||
"foo_v01.0002.exr",
|
||||
"xxx_v01.0001.exr",
|
||||
"xxx_v01.0002.exr"
|
||||
]
|
||||
|
||||
This will result in one instance with two representations:
|
||||
`foo` and `xxx`
|
||||
"""
|
||||
|
||||
self.log.info(data.get("expectedFiles"))
|
||||
|
||||
if isinstance(data.get("expectedFiles")[0], dict):
|
||||
# we cannot attach AOVs to other subsets as we consider every
|
||||
# AOV subset of its own.
|
||||
|
||||
if len(data.get("attachTo")) > 0:
|
||||
assert len(data.get("expectedFiles")[0].keys()) == 1, (
|
||||
"attaching multiple AOVs or renderable cameras to "
|
||||
"subset is not supported")
|
||||
|
||||
# create instances for every AOV we found in expected files.
|
||||
# note: this is done for every AOV and every render camere (if
|
||||
# there are multiple renderable cameras in scene)
|
||||
instances = self._create_instances_for_aov(
|
||||
instance_skeleton_data,
|
||||
data.get("expectedFiles"))
|
||||
self.log.info("got {} instance{}".format(
|
||||
len(instances),
|
||||
"s" if len(instances) > 1 else ""))
|
||||
|
||||
else:
|
||||
representations = self._get_representations(
|
||||
instance_skeleton_data,
|
||||
data.get("expectedFiles")
|
||||
)
|
||||
|
||||
if "representations" not in instance_skeleton_data.keys():
|
||||
instance_skeleton_data["representations"] = []
|
||||
|
||||
# add representation
|
||||
instance_skeleton_data["representations"] += representations
|
||||
instances = [instance_skeleton_data]
|
||||
|
||||
# if we are attaching to other subsets, create copy of existing
|
||||
# instances, change data to match thats subset and replace
|
||||
# existing instances with modified data
|
||||
if instance.data.get("attachTo"):
|
||||
self.log.info("Attaching render to subset:")
|
||||
new_instances = []
|
||||
for at in instance.data.get("attachTo"):
|
||||
for i in instances:
|
||||
new_i = copy(i)
|
||||
new_i["version"] = at.get("version")
|
||||
new_i["subset"] = at.get("subset")
|
||||
new_i["append"] = True
|
||||
new_i["families"].append(at.get("family"))
|
||||
new_instances.append(new_i)
|
||||
self.log.info(" - {} / v{}".format(
|
||||
at.get("subset"), at.get("version")))
|
||||
instances = new_instances
|
||||
|
||||
# publish job file
|
||||
publish_job = {
|
||||
"asset": asset,
|
||||
"regex": regex,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": context.data.get("fps", None),
|
||||
"families": ["render"],
|
||||
"source": source,
|
||||
"user": context.data["user"],
|
||||
"version": context.data["version"],
|
||||
"version": context.data["version"], # this is workfile version
|
||||
"intent": context.data.get("intent"),
|
||||
"comment": context.data.get("comment"),
|
||||
# Optional metadata (for debugging)
|
||||
"metadata": {
|
||||
"instance": data,
|
||||
"job": render_job,
|
||||
"session": api.Session.copy()
|
||||
}
|
||||
"job": render_job,
|
||||
"session": api.Session.copy(),
|
||||
"instances": instances
|
||||
}
|
||||
|
||||
if api.Session["AVALON_APP"] == "nuke":
|
||||
metadata['subset'] = subset
|
||||
|
||||
# pass Ftrack credentials in case of Muster
|
||||
if submission_type == "muster":
|
||||
ftrack = {
|
||||
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
|
||||
"FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"),
|
||||
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER")
|
||||
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER"),
|
||||
}
|
||||
metadata.update({"ftrack": ftrack})
|
||||
publish_job.update({"ftrack": ftrack})
|
||||
|
||||
# Ensure output dir exists
|
||||
output_dir = instance.data["outputDir"]
|
||||
if not os.path.isdir(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
|
||||
if data.get("extendFrames", False):
|
||||
|
||||
family = "render"
|
||||
override = data["overrideExistingFrame"]
|
||||
|
||||
# override = data.get("overrideExistingFrame", False)
|
||||
out_file = render_job.get("OutFile")
|
||||
if not out_file:
|
||||
raise RuntimeError("OutFile not found in render job!")
|
||||
|
||||
extension = os.path.splitext(out_file[0])[1]
|
||||
_ext = extension[1:]
|
||||
|
||||
# Frame comparison
|
||||
prev_start = None
|
||||
prev_end = None
|
||||
resource_range = range(int(start), int(end)+1)
|
||||
|
||||
# Gather all the subset files (one subset per render pass!)
|
||||
subset_names = [data["subset"]]
|
||||
subset_names.extend(data.get("renderPasses", []))
|
||||
resources = []
|
||||
for subset_name in subset_names:
|
||||
version = get_latest_version(asset_name=data["asset"],
|
||||
subset_name=subset_name,
|
||||
family=family)
|
||||
|
||||
# Set prev start / end frames for comparison
|
||||
if not prev_start and not prev_end:
|
||||
prev_start = version["data"]["frameStart"]
|
||||
prev_end = version["data"]["frameEnd"]
|
||||
|
||||
subset_resources = get_resources(version, _ext)
|
||||
resource_files = get_resource_files(subset_resources,
|
||||
resource_range,
|
||||
override)
|
||||
|
||||
resources.extend(resource_files)
|
||||
|
||||
updated_start = min(start, prev_start)
|
||||
updated_end = max(end, prev_end)
|
||||
|
||||
# Update metadata and instance start / end frame
|
||||
self.log.info("Updating start / end frame : "
|
||||
"{} - {}".format(updated_start, updated_end))
|
||||
|
||||
# TODO : Improve logic to get new frame range for the
|
||||
# publish job (publish_filesequence.py)
|
||||
# The current approach is not following Pyblish logic
|
||||
# which is based
|
||||
# on Collect / Validate / Extract.
|
||||
|
||||
# ---- Collect Plugins ---
|
||||
# Collect Extend Frames - Only run if extendFrames is toggled
|
||||
# # # Store in instance:
|
||||
# # # Previous rendered files per subset based on frames
|
||||
# # # --> Add to instance.data[resources]
|
||||
# # # Update publish frame range
|
||||
|
||||
# ---- Validate Plugins ---
|
||||
# Validate Extend Frames
|
||||
# # # Check if instance has the requirements to extend frames
|
||||
# There might have been some things which can be added to the list
|
||||
# Please do so when fixing this.
|
||||
|
||||
# Start frame
|
||||
metadata["frameStart"] = updated_start
|
||||
metadata["metadata"]["instance"]["frameStart"] = updated_start
|
||||
|
||||
# End frame
|
||||
metadata["frameEnd"] = updated_end
|
||||
metadata["metadata"]["instance"]["frameEnd"] = updated_end
|
||||
|
||||
metadata_filename = "{}_metadata.json".format(subset)
|
||||
|
||||
metadata_path = os.path.join(output_dir, metadata_filename)
|
||||
# convert log messages if they are `LogRecord` to their
|
||||
# string format to allow serializing as JSON later on.
|
||||
rendered_logs = []
|
||||
for log in metadata["metadata"]["instance"].get("_log", []):
|
||||
if isinstance(log, logging.LogRecord):
|
||||
rendered_logs.append(log.getMessage())
|
||||
else:
|
||||
rendered_logs.append(log)
|
||||
|
||||
metadata["metadata"]["instance"]["_log"] = rendered_logs
|
||||
self.log.info("Writing json file: {}".format(metadata_path))
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(metadata, f, indent=4, sort_keys=True)
|
||||
json.dump(publish_job, f, indent=4, sort_keys=True)
|
||||
|
||||
# Copy files from previous render if extendFrame is True
|
||||
if data.get("extendFrames", False):
|
||||
def _extend_frames(self, asset, subset, start, end, override):
|
||||
"""
|
||||
This will get latest version of asset and update frame range based
|
||||
on minimum and maximuma values
|
||||
"""
|
||||
|
||||
self.log.info("Preparing to copy ..")
|
||||
import shutil
|
||||
# Frame comparison
|
||||
prev_start = None
|
||||
prev_end = None
|
||||
|
||||
dest_path = data["outputDir"]
|
||||
for source in resources:
|
||||
src_file = os.path.basename(source)
|
||||
dest = os.path.join(dest_path, src_file)
|
||||
shutil.copy(source, dest)
|
||||
version = get_latest_version(
|
||||
asset_name=asset,
|
||||
subset_name=subset,
|
||||
family='render'
|
||||
)
|
||||
|
||||
self.log.info("Finished copying %i files" % len(resources))
|
||||
# Set prev start / end frames for comparison
|
||||
if not prev_start and not prev_end:
|
||||
prev_start = version["data"]["frameStart"]
|
||||
prev_end = version["data"]["frameEnd"]
|
||||
|
||||
updated_start = min(start, prev_start)
|
||||
updated_end = max(end, prev_end)
|
||||
|
||||
self.log.info(
|
||||
"Updating start / end frame : "
|
||||
"{} - {}".format(updated_start, updated_end)
|
||||
)
|
||||
|
||||
return updated_start, updated_end
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin):
|
|||
|
||||
host = pyblish.api.current_host()
|
||||
to_check = context.data["presets"].get(
|
||||
host, {}).get("ftrack_attributes")
|
||||
host, {}).get("ftrack_custom_attributes")
|
||||
if not to_check:
|
||||
self.log.warning("ftrack_attributes preset not found")
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
import subprocess
|
||||
import pype.lib
|
||||
try:
|
||||
import os.errno as errno
|
||||
except ImportError:
|
||||
import errno
|
||||
|
||||
|
||||
class ValidateFfmpegInstallef(pyblish.api.Validator):
|
||||
class ValidateFFmpegInstalled(pyblish.api.Validator):
|
||||
"""Validate availability of ffmpeg tool in PATH"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
|
|
@ -27,10 +28,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator):
|
|||
return True
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("ffmpeg path: `{}`".format(
|
||||
os.environ.get("FFMPEG_PATH", "")))
|
||||
if self.is_tool(
|
||||
os.path.join(
|
||||
os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False:
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
self.log.info("ffmpeg path: `{}`".format(ffmpeg_path))
|
||||
if self.is_tool(ffmpeg_path) is False:
|
||||
self.log.error("ffmpeg not found in PATH")
|
||||
raise RuntimeError('ffmpeg not installed.')
|
||||
|
|
|
|||
|
|
@ -1,43 +0,0 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
|
||||
|
||||
class ValidateTemplates(pyblish.api.ContextPlugin):
|
||||
"""Check if all templates were filled"""
|
||||
|
||||
label = "Validate Templates"
|
||||
order = pyblish.api.ValidatorOrder - 0.1
|
||||
hosts = ["maya", "houdini", "nuke"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
anatomy = context.data["anatomy"]
|
||||
if not anatomy:
|
||||
raise RuntimeError("Did not find anatomy")
|
||||
else:
|
||||
data = {
|
||||
"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
|
||||
"project": {"name": "D001_projectsx",
|
||||
"code": "prjX"},
|
||||
"ext": "exr",
|
||||
"version": 3,
|
||||
"task": "animation",
|
||||
"asset": "sh001",
|
||||
"app": "maya",
|
||||
"hierarchy": "ep101/sq01/sh010"}
|
||||
|
||||
anatomy_filled = anatomy.format(data)
|
||||
self.log.info(anatomy_filled)
|
||||
|
||||
data = {"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
|
||||
"project": {"name": "D001_projectsy",
|
||||
"code": "prjY"},
|
||||
"ext": "abc",
|
||||
"version": 1,
|
||||
"task": "lookdev",
|
||||
"asset": "bob",
|
||||
"app": "maya",
|
||||
"hierarchy": "ep101/sq01/bob"}
|
||||
|
||||
anatomy_filled = context.data["anatomy"].format(data)
|
||||
self.log.info(anatomy_filled["work"]["folder"])
|
||||
25
pype/plugins/global/publish/validate_version.py
Normal file
25
pype/plugins/global/publish/validate_version.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateVersion(pyblish.api.InstancePlugin):
|
||||
"""Validate instance version.
|
||||
|
||||
Pype is not allowing overwiting previously published versions.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
|
||||
label = "Validate Version"
|
||||
hosts = ["nuke", "maya", "blender"]
|
||||
|
||||
def process(self, instance):
|
||||
version = instance.data.get("version")
|
||||
latest_version = instance.data.get("latestVersion")
|
||||
|
||||
if latest_version is not None:
|
||||
msg = ("Version `{0}` that you are"
|
||||
" trying to publish, already"
|
||||
" exists in the"
|
||||
" database.").format(
|
||||
version, latest_version)
|
||||
assert (int(version) > int(latest_version)), msg
|
||||
|
|
@ -2,43 +2,108 @@ import os
|
|||
import json
|
||||
import appdirs
|
||||
import requests
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
import pype.maya.lib as lib
|
||||
import avalon.maya
|
||||
|
||||
|
||||
class CreateRenderGlobals(avalon.maya.Creator):
|
||||
class CreateRender(avalon.maya.Creator):
|
||||
"""Create render layer for export"""
|
||||
|
||||
label = "Render Globals"
|
||||
family = "renderglobals"
|
||||
icon = "gears"
|
||||
defaults = ['Main']
|
||||
label = "Render"
|
||||
family = "rendering"
|
||||
icon = "eye"
|
||||
defaults = ["Main"]
|
||||
|
||||
_token = None
|
||||
_user = None
|
||||
_password = None
|
||||
|
||||
# renderSetup instance
|
||||
_rs = None
|
||||
|
||||
_image_prefix_nodes = {
|
||||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
_image_prefixes = {
|
||||
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
|
||||
'vray': '"maya/<scene>/<Layer>/<Layer>',
|
||||
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
|
||||
'renderman': 'maya/<Scene>/<layer>/<layer>_<aov>',
|
||||
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>'
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateRenderGlobals, self).__init__(*args, **kwargs)
|
||||
super(CreateRender, self).__init__(*args, **kwargs)
|
||||
|
||||
# We won't be publishing this one
|
||||
self.data["id"] = "avalon.renderglobals"
|
||||
def process(self):
|
||||
exists = cmds.ls(self.name)
|
||||
if exists:
|
||||
return cmds.warning("%s already exists." % exists[0])
|
||||
|
||||
use_selection = self.options.get("useSelection")
|
||||
with lib.undo_chunk():
|
||||
self._create_render_settings()
|
||||
instance = super(CreateRender, self).process()
|
||||
cmds.setAttr("{}.machineList".format(instance), lock=True)
|
||||
self._rs = renderSetup.instance()
|
||||
layers = self._rs.getRenderLayers()
|
||||
if use_selection:
|
||||
print(">>> processing existing layers")
|
||||
sets = []
|
||||
for layer in layers:
|
||||
print(" - creating set for {}".format(layer.name()))
|
||||
render_set = cmds.sets(n="LAYER_{}".format(layer.name()))
|
||||
sets.append(render_set)
|
||||
cmds.sets(sets, forceElement=instance)
|
||||
|
||||
# if no render layers are present, create default one with
|
||||
# asterix selector
|
||||
if not layers:
|
||||
rl = self._rs.createRenderLayer('Main')
|
||||
cl = rl.createCollection("defaultCollection")
|
||||
cl.getSelector().setPattern('*')
|
||||
|
||||
renderer = cmds.getAttr(
|
||||
'defaultRenderGlobals.currentRenderer').lower()
|
||||
# handle various renderman names
|
||||
if renderer.startswith('renderman'):
|
||||
renderer = 'renderman'
|
||||
|
||||
cmds.setAttr(self._image_prefix_nodes[renderer],
|
||||
self._image_prefixes[renderer],
|
||||
type="string")
|
||||
|
||||
def _create_render_settings(self):
|
||||
# get pools
|
||||
pools = []
|
||||
|
||||
deadline_url = os.environ.get('DEADLINE_REST_URL', None)
|
||||
muster_url = os.environ.get('MUSTER_REST_URL', None)
|
||||
deadline_url = os.environ.get("DEADLINE_REST_URL", None)
|
||||
muster_url = os.environ.get("MUSTER_REST_URL", None)
|
||||
if deadline_url and muster_url:
|
||||
self.log.error("Both Deadline and Muster are enabled. "
|
||||
"Cannot support both.")
|
||||
self.log.error(
|
||||
"Both Deadline and Muster are enabled. " "Cannot support both."
|
||||
)
|
||||
raise RuntimeError("Both Deadline and Muster are enabled")
|
||||
|
||||
if deadline_url is None:
|
||||
self.log.warning("Deadline REST API url not found.")
|
||||
else:
|
||||
argument = "{}/api/pools?NamesOnly=true".format(deadline_url)
|
||||
response = self._requests_get(argument)
|
||||
try:
|
||||
response = self._requests_get(argument)
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
msg = 'Cannot connect to deadline web service'
|
||||
self.log.error(msg)
|
||||
raise RuntimeError('{} - {}'.format(msg, e))
|
||||
if not response.ok:
|
||||
self.log.warning("No pools retrieved")
|
||||
else:
|
||||
|
|
@ -57,8 +122,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
try:
|
||||
pools = self._get_muster_pools()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.startswith('401'):
|
||||
self.log.warning('access token expired')
|
||||
if e.startswith("401"):
|
||||
self.log.warning("access token expired")
|
||||
self._show_login()
|
||||
raise RuntimeError("Access token expired")
|
||||
except requests.exceptions.ConnectionError:
|
||||
|
|
@ -66,20 +131,15 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
raise RuntimeError("Cannot connect to {}".format(muster_url))
|
||||
pool_names = []
|
||||
for pool in pools:
|
||||
self.log.info(" - pool: {}".format(pool['name']))
|
||||
pool_names.append(pool['name'])
|
||||
self.log.info(" - pool: {}".format(pool["name"]))
|
||||
pool_names.append(pool["name"])
|
||||
|
||||
self.data["primaryPool"] = pool_names
|
||||
|
||||
# We don't need subset or asset attributes
|
||||
# self.data.pop("subset", None)
|
||||
# self.data.pop("asset", None)
|
||||
# self.data.pop("active", None)
|
||||
|
||||
self.data["suspendPublishJob"] = False
|
||||
self.data["extendFrames"] = False
|
||||
self.data["overrideExistingFrame"] = True
|
||||
self.data["useLegacyRenderLayers"] = True
|
||||
# self.data["useLegacyRenderLayers"] = True
|
||||
self.data["priority"] = 50
|
||||
self.data["framesPerTask"] = 1
|
||||
self.data["whitelist"] = False
|
||||
|
|
@ -88,20 +148,6 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
|
||||
self.options = {"useSelection": False} # Force no content
|
||||
|
||||
def process(self):
|
||||
|
||||
exists = cmds.ls(self.name)
|
||||
assert len(exists) <= 1, (
|
||||
"More than one renderglobal exists, this is a bug"
|
||||
)
|
||||
|
||||
if exists:
|
||||
return cmds.warning("%s already exists." % exists[0])
|
||||
|
||||
with lib.undo_chunk():
|
||||
super(CreateRenderGlobals, self).process()
|
||||
cmds.setAttr("{}.machineList".format(self.name), lock=True)
|
||||
|
||||
def _load_credentials(self):
|
||||
"""
|
||||
Load Muster credentials from file and set `MUSTER_USER`,
|
||||
|
|
@ -111,14 +157,12 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
|
||||
Show login dialog if access token is invalid or missing.
|
||||
"""
|
||||
app_dir = os.path.normpath(
|
||||
appdirs.user_data_dir('pype-app', 'pype')
|
||||
)
|
||||
file_name = 'muster_cred.json'
|
||||
app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
|
||||
file_name = "muster_cred.json"
|
||||
fpath = os.path.join(app_dir, file_name)
|
||||
file = open(fpath, 'r')
|
||||
file = open(fpath, "r")
|
||||
muster_json = json.load(file)
|
||||
self._token = muster_json.get('token', None)
|
||||
self._token = muster_json.get("token", None)
|
||||
if not self._token:
|
||||
self._show_login()
|
||||
raise RuntimeError("Invalid access token for Muster")
|
||||
|
|
@ -131,26 +175,25 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
"""
|
||||
Get render pools from muster
|
||||
"""
|
||||
params = {
|
||||
'authToken': self._token
|
||||
}
|
||||
api_entry = '/api/pools/list'
|
||||
response = self._requests_get(
|
||||
self.MUSTER_REST_URL + api_entry, params=params)
|
||||
params = {"authToken": self._token}
|
||||
api_entry = "/api/pools/list"
|
||||
response = self._requests_get(self.MUSTER_REST_URL + api_entry,
|
||||
params=params)
|
||||
if response.status_code != 200:
|
||||
if response.status_code == 401:
|
||||
self.log.warning('Authentication token expired.')
|
||||
self.log.warning("Authentication token expired.")
|
||||
self._show_login()
|
||||
else:
|
||||
self.log.error(
|
||||
'Cannot get pools from Muster: {}'.format(
|
||||
response.status_code))
|
||||
raise Exception('Cannot get pools from Muster')
|
||||
("Cannot get pools from "
|
||||
"Muster: {}").format(response.status_code)
|
||||
)
|
||||
raise Exception("Cannot get pools from Muster")
|
||||
try:
|
||||
pools = response.json()['ResponseData']['pools']
|
||||
pools = response.json()["ResponseData"]["pools"]
|
||||
except ValueError as e:
|
||||
self.log.error('Invalid response from Muster server {}'.format(e))
|
||||
raise Exception('Invalid response from Muster server')
|
||||
self.log.error("Invalid response from Muster server {}".format(e))
|
||||
raise Exception("Invalid response from Muster server")
|
||||
|
||||
return pools
|
||||
|
||||
|
|
@ -162,8 +205,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
self.log.debug(api_url)
|
||||
login_response = self._requests_post(api_url, timeout=1)
|
||||
if login_response.status_code != 200:
|
||||
self.log.error('Cannot show login form to Muster')
|
||||
raise Exception('Cannot show login form to Muster')
|
||||
self.log.error("Cannot show login form to Muster")
|
||||
raise Exception("Cannot show login form to Muster")
|
||||
|
||||
def _requests_post(self, *args, **kwargs):
|
||||
""" Wrapper for requests, disabling SSL certificate validation if
|
||||
|
|
@ -175,8 +218,10 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
WARNING: disabling SSL certificate validation is defeating one line
|
||||
of defense SSL is providing and it is not recommended.
|
||||
"""
|
||||
if 'verify' not in kwargs:
|
||||
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
|
||||
if "verify" not in kwargs:
|
||||
kwargs["verify"] = (
|
||||
False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True
|
||||
) # noqa
|
||||
return requests.post(*args, **kwargs)
|
||||
|
||||
def _requests_get(self, *args, **kwargs):
|
||||
|
|
@ -189,6 +234,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
WARNING: disabling SSL certificate validation is defeating one line
|
||||
of defense SSL is providing and it is not recommended.
|
||||
"""
|
||||
if 'verify' not in kwargs:
|
||||
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
|
||||
if "verify" not in kwargs:
|
||||
kwargs["verify"] = (
|
||||
False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True
|
||||
) # noqa
|
||||
return requests.get(*args, **kwargs)
|
||||
|
|
@ -140,9 +140,9 @@ class ImportMayaLoader(api.Loader):
|
|||
|
||||
message = "Are you sure you want import this"
|
||||
state = QtWidgets.QMessageBox.warning(None,
|
||||
"Are you sure?",
|
||||
message,
|
||||
buttons=buttons,
|
||||
defaultButton=accept)
|
||||
"Are you sure?",
|
||||
message,
|
||||
buttons=buttons,
|
||||
defaultButton=accept)
|
||||
|
||||
return state == accept
|
||||
|
|
|
|||
|
|
@ -1,62 +0,0 @@
|
|||
import pype.maya.plugin
|
||||
import os
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
class CameraLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Specific loader of Alembic for the pype.camera family"""
|
||||
|
||||
families = ["camera"]
|
||||
label = "Reference camera"
|
||||
representations = ["abc", "ma"]
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
|
||||
import maya.cmds as cmds
|
||||
# Get family type from the context
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
except ValueError:
|
||||
family = "camera"
|
||||
|
||||
cmds.loadPlugin("AbcImport.mll", quiet=True)
|
||||
groupName = "{}:{}".format(namespace, name)
|
||||
nodes = cmds.file(self.fname,
|
||||
namespace=namespace,
|
||||
sharedReferenceFile=False,
|
||||
groupReference=True,
|
||||
groupName="{}:{}".format(namespace, name),
|
||||
reference=True,
|
||||
returnNewNodes=True)
|
||||
|
||||
cameras = cmds.ls(nodes, type="camera")
|
||||
|
||||
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
|
||||
colors = presets['plugins']['maya']['load']['colors']
|
||||
|
||||
c = colors.get(family)
|
||||
if c is not None:
|
||||
cmds.setAttr(groupName + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(groupName + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
|
||||
# Check the Maya version, lockTransform has been introduced since
|
||||
# Maya 2016.5 Ext 2
|
||||
version = int(cmds.about(version=True))
|
||||
if version >= 2016:
|
||||
for camera in cameras:
|
||||
cmds.camera(camera, edit=True, lockTransform=True)
|
||||
else:
|
||||
self.log.warning("This version of Maya does not support locking of"
|
||||
" transforms of cameras.")
|
||||
|
||||
self[:] = nodes
|
||||
|
||||
return nodes
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
import pype.maya.plugin
|
||||
import os
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
class FBXLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Load the FBX"""
|
||||
|
||||
families = ["fbx"]
|
||||
representations = ["fbx"]
|
||||
|
||||
label = "Reference FBX"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
|
||||
import maya.cmds as cmds
|
||||
from avalon import maya
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
except ValueError:
|
||||
family = "fbx"
|
||||
|
||||
# Ensure FBX plug-in is loaded
|
||||
cmds.loadPlugin("fbxmaya", quiet=True)
|
||||
|
||||
with maya.maintained_selection():
|
||||
nodes = cmds.file(self.fname,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName="{}:{}".format(namespace, name))
|
||||
|
||||
groupName = "{}:{}".format(namespace, name)
|
||||
|
||||
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
|
||||
colors = presets['plugins']['maya']['load']['colors']
|
||||
|
||||
c = colors.get(family)
|
||||
if c is not None:
|
||||
cmds.setAttr(groupName + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(groupName + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
|
||||
self[:] = nodes
|
||||
|
||||
return nodes
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
import pype.maya.plugin
|
||||
from pypeapp import config
|
||||
import os
|
||||
|
||||
|
||||
class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Load the model"""
|
||||
|
||||
families = ["mayaAscii",
|
||||
"setdress",
|
||||
"layout"]
|
||||
representations = ["ma"]
|
||||
|
||||
label = "Reference Maya Ascii"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
|
||||
import maya.cmds as cmds
|
||||
from avalon import maya
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
except ValueError:
|
||||
family = "model"
|
||||
|
||||
with maya.maintained_selection():
|
||||
nodes = cmds.file(self.fname,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName="{}:{}".format(namespace, name))
|
||||
|
||||
self[:] = nodes
|
||||
groupName = "{}:{}".format(namespace, name)
|
||||
|
||||
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
|
||||
colors = presets['plugins']['maya']['load']['colors']
|
||||
|
||||
c = colors.get(family)
|
||||
if c is not None:
|
||||
cmds.setAttr(groupName + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(groupName + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
cmds.setAttr(groupName + ".displayHandle", 1)
|
||||
# get bounding box
|
||||
bbox = cmds.exactWorldBoundingBox(groupName)
|
||||
# get pivot position on world space
|
||||
pivot = cmds.xform(groupName, q=True, sp=True, ws=True)
|
||||
# center of bounding box
|
||||
cx = (bbox[0] + bbox[3]) / 2
|
||||
cy = (bbox[1] + bbox[4]) / 2
|
||||
cz = (bbox[2] + bbox[5]) / 2
|
||||
# add pivot position to calculate offset
|
||||
cx = cx + pivot[0]
|
||||
cy = cy + pivot[1]
|
||||
cz = cz + pivot[2]
|
||||
# set selection handle offset to center of bounding box
|
||||
cmds.setAttr(groupName + ".selectHandleX", cx)
|
||||
cmds.setAttr(groupName + ".selectHandleY", cy)
|
||||
cmds.setAttr(groupName + ".selectHandleZ", cz)
|
||||
return nodes
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
import pype.maya.plugin
|
||||
from avalon import api, maya
|
||||
from maya import cmds
|
||||
import os
|
||||
from pypeapp import config
|
||||
|
||||
|
|
@ -6,8 +8,15 @@ from pypeapp import config
|
|||
class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Load the model"""
|
||||
|
||||
families = ["model", "pointcache", "animation"]
|
||||
representations = ["ma", "abc"]
|
||||
families = ["model",
|
||||
"pointcache",
|
||||
"animation",
|
||||
"mayaAscii",
|
||||
"setdress",
|
||||
"layout",
|
||||
"camera",
|
||||
"rig"]
|
||||
representations = ["ma", "abc", "fbx"]
|
||||
tool_names = ["loader"]
|
||||
|
||||
label = "Reference"
|
||||
|
|
@ -15,7 +24,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
|
|||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
def process_reference(self, context, name, namespace, options):
|
||||
import maya.cmds as cmds
|
||||
from avalon import maya
|
||||
import pymel.core as pm
|
||||
|
|
@ -37,27 +46,29 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
|
|||
reference=True,
|
||||
returnNewNodes=True)
|
||||
|
||||
namespace = cmds.referenceQuery(nodes[0], namespace=True)
|
||||
# namespace = cmds.referenceQuery(nodes[0], namespace=True)
|
||||
|
||||
shapes = cmds.ls(nodes, shapes=True, long=True)
|
||||
print(shapes)
|
||||
|
||||
newNodes = (list(set(nodes) - set(shapes)))
|
||||
print(newNodes)
|
||||
|
||||
current_namespace = pm.namespaceInfo(currentNamespace=True)
|
||||
|
||||
if current_namespace != ":":
|
||||
groupName = current_namespace + ":" + groupName
|
||||
|
||||
groupNode = pm.PyNode(groupName)
|
||||
roots = set()
|
||||
print(nodes)
|
||||
|
||||
for node in newNodes:
|
||||
try:
|
||||
roots.add(pm.PyNode(node).getAllParents()[-2])
|
||||
except:
|
||||
except: # noqa: E722
|
||||
pass
|
||||
for root in roots:
|
||||
root.setParent(world=True)
|
||||
|
||||
groupNode.root().zeroTransformPivots()
|
||||
groupNode.zeroTransformPivots()
|
||||
for root in roots:
|
||||
root.setParent(groupNode)
|
||||
|
||||
|
|
@ -90,23 +101,41 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
|
|||
cmds.setAttr(groupName + ".selectHandleY", cy)
|
||||
cmds.setAttr(groupName + ".selectHandleZ", cz)
|
||||
|
||||
if family == "rig":
|
||||
self._post_process_rig(name, namespace, context, options)
|
||||
else:
|
||||
if "translate" in options:
|
||||
cmds.setAttr(groupName + ".t", *options["translate"])
|
||||
|
||||
return newNodes
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def _post_process_rig(self, name, namespace, context, options):
|
||||
|
||||
# for backwards compatibility
|
||||
class AbcLoader(ReferenceLoader):
|
||||
label = "Deprecated loader (don't use)"
|
||||
families = ["pointcache", "animation"]
|
||||
representations = ["abc"]
|
||||
tool_names = []
|
||||
output = next((node for node in self if
|
||||
node.endswith("out_SET")), None)
|
||||
controls = next((node for node in self if
|
||||
node.endswith("controls_SET")), None)
|
||||
|
||||
assert output, "No out_SET in rig, this is a bug."
|
||||
assert controls, "No controls_SET in rig, this is a bug."
|
||||
|
||||
# for backwards compatibility
|
||||
class ModelLoader(ReferenceLoader):
|
||||
label = "Deprecated loader (don't use)"
|
||||
families = ["model", "pointcache"]
|
||||
representations = ["abc"]
|
||||
tool_names = []
|
||||
# Find the roots amongst the loaded nodes
|
||||
roots = cmds.ls(self[:], assemblies=True, long=True)
|
||||
assert roots, "No root nodes in rig, this is a bug."
|
||||
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
dependency = str(context["representation"]["_id"])
|
||||
|
||||
self.log.info("Creating subset: {}".format(namespace))
|
||||
|
||||
# Create the animation instance
|
||||
with maya.maintained_selection():
|
||||
cmds.select([output, controls] + roots, noExpand=True)
|
||||
api.create(name=namespace,
|
||||
asset=asset,
|
||||
family="animation",
|
||||
options={"useSelection": True},
|
||||
data={"dependencies": dependency})
|
||||
|
|
|
|||
|
|
@ -1,95 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pype.maya.plugin
|
||||
from avalon import api, maya
|
||||
import os
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
class RigLoader(pype.maya.plugin.ReferenceLoader):
|
||||
"""Specific loader for rigs
|
||||
|
||||
This automatically creates an instance for animators upon load.
|
||||
|
||||
"""
|
||||
|
||||
families = ["rig"]
|
||||
representations = ["ma"]
|
||||
|
||||
label = "Reference rig"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def process_reference(self, context, name, namespace, data):
|
||||
|
||||
try:
|
||||
family = context["representation"]["context"]["family"]
|
||||
except ValueError:
|
||||
family = "rig"
|
||||
|
||||
groupName = "{}:{}".format(namespace, name)
|
||||
nodes = cmds.file(self.fname,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName=groupName)
|
||||
|
||||
cmds.xform(groupName, pivots=(0, 0, 0))
|
||||
|
||||
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
|
||||
colors = presets['plugins']['maya']['load']['colors']
|
||||
|
||||
c = colors.get(family)
|
||||
if c is not None:
|
||||
cmds.setAttr(groupName + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(groupName + ".outlinerColor",
|
||||
c[0], c[1], c[2])
|
||||
|
||||
shapes = cmds.ls(nodes, shapes=True, long=True)
|
||||
print(shapes)
|
||||
|
||||
newNodes = (list(set(nodes) - set(shapes)))
|
||||
print(newNodes)
|
||||
|
||||
# Store for post-process
|
||||
self[:] = newNodes
|
||||
if data.get("post_process", True):
|
||||
self._post_process(name, namespace, context, data)
|
||||
|
||||
return newNodes
|
||||
|
||||
def _post_process(self, name, namespace, context, data):
|
||||
|
||||
# TODO(marcus): We are hardcoding the name "out_SET" here.
|
||||
# Better register this keyword, so that it can be used
|
||||
# elsewhere, such as in the Integrator plug-in,
|
||||
# without duplication.
|
||||
|
||||
output = next((node for node in self if
|
||||
node.endswith("out_SET")), None)
|
||||
controls = next((node for node in self if
|
||||
node.endswith("controls_SET")), None)
|
||||
|
||||
assert output, "No out_SET in rig, this is a bug."
|
||||
assert controls, "No controls_SET in rig, this is a bug."
|
||||
|
||||
# Find the roots amongst the loaded nodes
|
||||
roots = cmds.ls(self[:], assemblies=True, long=True)
|
||||
assert roots, "No root nodes in rig, this is a bug."
|
||||
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
dependency = str(context["representation"]["_id"])
|
||||
|
||||
# Create the animation instance
|
||||
with maya.maintained_selection():
|
||||
cmds.select([output, controls] + roots, noExpand=True)
|
||||
api.create(name=namespace,
|
||||
asset=asset,
|
||||
family="animation",
|
||||
options={"useSelection": True},
|
||||
data={"dependencies": dependency})
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
|
@ -117,7 +117,7 @@ class VRayProxyLoader(api.Loader):
|
|||
vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name))
|
||||
mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name))
|
||||
vray_mat = cmds.shadingNode("VRayMeshMaterial", asShader=True,
|
||||
name="{}_VRMM".format(name))
|
||||
name="{}_VRMM".format(name))
|
||||
vray_mat_sg = cmds.sets(name="{}_VRSG".format(name),
|
||||
empty=True,
|
||||
renderable=True,
|
||||
|
|
|
|||
|
|
@ -103,16 +103,22 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
# Store the exact members of the object set
|
||||
instance.data["setMembers"] = members
|
||||
|
||||
|
||||
# Define nice label
|
||||
name = cmds.ls(objset, long=False)[0] # use short name
|
||||
label = "{0} ({1})".format(name,
|
||||
data["asset"])
|
||||
|
||||
if "handles" in data:
|
||||
data["handleStart"] = data["handles"]
|
||||
data["handleEnd"] = data["handles"]
|
||||
|
||||
# Append start frame and end frame to label if present
|
||||
if "frameStart" and "frameEnd" in data:
|
||||
label += " [{0}-{1}]".format(int(data["frameStart"]),
|
||||
int(data["frameEnd"]))
|
||||
data["frameStartHandle"] = data["frameStart"] - data["handleStart"]
|
||||
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"]
|
||||
|
||||
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
|
||||
int(data["frameEndHandle"]))
|
||||
|
||||
instance.data["label"] = label
|
||||
|
||||
|
|
@ -122,7 +128,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
# user interface interested in visualising it.
|
||||
self.log.info("Found: \"%s\" " % instance.data["name"])
|
||||
self.log.debug("DATA: \"%s\" " % instance.data)
|
||||
|
||||
|
||||
def sort_by_family(instance):
|
||||
"""Sort by family"""
|
||||
|
|
|
|||
911
pype/plugins/maya/publish/collect_render.py
Normal file
911
pype/plugins/maya/publish/collect_render.py
Normal file
|
|
@ -0,0 +1,911 @@
|
|||
"""
|
||||
This collector will go through render layers in maya and prepare all data
|
||||
needed to create instances and their representations for submition and
|
||||
publishing on farm.
|
||||
|
||||
Requires:
|
||||
instance -> families
|
||||
instance -> setMembers
|
||||
|
||||
context -> currentFile
|
||||
context -> workspaceDir
|
||||
context -> user
|
||||
|
||||
session -> AVALON_ASSET
|
||||
|
||||
Optional:
|
||||
|
||||
Provides:
|
||||
instance -> label
|
||||
instance -> subset
|
||||
instance -> attachTo
|
||||
instance -> setMembers
|
||||
instance -> publish
|
||||
instance -> frameStart
|
||||
instance -> frameEnd
|
||||
instance -> byFrameStep
|
||||
instance -> renderer
|
||||
instance -> family
|
||||
instance -> families
|
||||
instance -> asset
|
||||
instance -> time
|
||||
instance -> author
|
||||
instance -> source
|
||||
instance -> expectedFiles
|
||||
instance -> resolutionWidth
|
||||
instance -> resolutionHeight
|
||||
instance -> pixelAspect
|
||||
"""
|
||||
|
||||
import re
|
||||
import os
|
||||
import types
|
||||
import six
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import maya, api
|
||||
import pype.maya.lib as lib
|
||||
|
||||
|
||||
R_SINGLE_FRAME = re.compile(r'^(-?)\d+$')
|
||||
R_FRAME_RANGE = re.compile(r'^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$')
|
||||
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
|
||||
R_LAYER_TOKEN = re.compile(
|
||||
r'.*%l.*|.*<layer>.*|.*<renderlayer>.*', re.IGNORECASE)
|
||||
R_AOV_TOKEN = re.compile(r'.*%a.*|.*<aov>.*|.*<renderpass>.*', re.IGNORECASE)
|
||||
R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a|<aov>|<renderpass>', re.IGNORECASE)
|
||||
R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_<aov>|_<renderpass>', re.IGNORECASE)
|
||||
# to remove unused renderman tokens
|
||||
R_CLEAN_FRAME_TOKEN = re.compile(r'\.?<f\d>\.?', re.IGNORECASE)
|
||||
R_CLEAN_EXT_TOKEN = re.compile(r'\.?<ext>\.?', re.IGNORECASE)
|
||||
|
||||
R_SUBSTITUTE_LAYER_TOKEN = re.compile(
|
||||
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
|
||||
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
|
||||
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
|
||||
|
||||
RENDERER_NAMES = {
|
||||
'mentalray': 'MentalRay',
|
||||
'vray': 'V-Ray',
|
||||
'arnold': 'Arnold',
|
||||
'renderman': 'Renderman',
|
||||
'redshift': 'Redshift'
|
||||
}
|
||||
|
||||
# not sure about the renderman image prefix
|
||||
ImagePrefixes = {
|
||||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
|
||||
class CollectMayaRender(pyblish.api.ContextPlugin):
|
||||
"""Gather all publishable render layers from renderSetup"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
hosts = ["maya"]
|
||||
label = "Collect Render Layers"
|
||||
|
||||
def process(self, context):
|
||||
render_instance = None
|
||||
for instance in context:
|
||||
if 'rendering' in instance.data['families']:
|
||||
render_instance = instance
|
||||
render_instance.data["remove"] = True
|
||||
|
||||
# make sure workfile instance publishing is enabled
|
||||
if 'workfile' in instance.data['families']:
|
||||
instance.data["publish"] = True
|
||||
|
||||
if not render_instance:
|
||||
self.log.info("No render instance found, skipping render "
|
||||
"layer collection.")
|
||||
return
|
||||
|
||||
render_globals = render_instance
|
||||
collected_render_layers = render_instance.data['setMembers']
|
||||
filepath = context.data["currentFile"].replace("\\", "/")
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
workspace = context.data["workspaceDir"]
|
||||
|
||||
self._rs = renderSetup.instance()
|
||||
maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()}
|
||||
|
||||
self.maya_layers = maya_render_layers
|
||||
|
||||
for layer in collected_render_layers:
|
||||
# every layer in set should start with `LAYER_` prefix
|
||||
try:
|
||||
expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1)
|
||||
except IndexError:
|
||||
msg = ("Invalid layer name in set [ {} ]".format(layer))
|
||||
self.log.warnig(msg)
|
||||
continue
|
||||
|
||||
self.log.info("processing %s" % layer)
|
||||
# check if layer is part of renderSetup
|
||||
if expected_layer_name not in maya_render_layers:
|
||||
msg = ("Render layer [ {} ] is not in "
|
||||
"Render Setup".format(expected_layer_name))
|
||||
self.log.warning(msg)
|
||||
continue
|
||||
|
||||
# check if layer is renderable
|
||||
if not maya_render_layers[expected_layer_name].isRenderable():
|
||||
msg = ("Render layer [ {} ] is not "
|
||||
"renderable".format(expected_layer_name))
|
||||
self.log.warning(msg)
|
||||
continue
|
||||
|
||||
# test if there are sets (subsets) to attach render to
|
||||
sets = cmds.sets(layer, query=True) or []
|
||||
attachTo = []
|
||||
if sets:
|
||||
for s in sets:
|
||||
attachTo.append({
|
||||
"version": None, # we need integrator to get version
|
||||
"subset": s,
|
||||
"family": cmds.getAttr("{}.family".format(s))
|
||||
})
|
||||
self.log.info(" -> attach render to: {}".format(s))
|
||||
|
||||
layer_name = "rs_{}".format(expected_layer_name)
|
||||
|
||||
# collect all frames we are expecting to be rendered
|
||||
renderer = cmds.getAttr(
|
||||
'defaultRenderGlobals.currentRenderer').lower()
|
||||
# handle various renderman names
|
||||
if renderer.startswith('renderman'):
|
||||
renderer = 'renderman'
|
||||
|
||||
# return all expected files for all cameras and aovs in given
|
||||
# frame range
|
||||
exp_files = ExpectedFiles().get(renderer, layer_name)
|
||||
assert exp_files, ("no file names were generated, this is bug")
|
||||
|
||||
# if we want to attach render to subset, check if we have AOV's
|
||||
# in expectedFiles. If so, raise error as we cannot attach AOV
|
||||
# (considered to be subset on its own) to another subset
|
||||
if attachTo:
|
||||
assert len(exp_files[0].keys()) == 1, (
|
||||
"attaching multiple AOVs or renderable cameras to "
|
||||
"subset is not supported")
|
||||
|
||||
# append full path
|
||||
full_exp_files = []
|
||||
aov_dict = {}
|
||||
|
||||
# we either get AOVs or just list of files. List of files can
|
||||
# mean two things - there are no AOVs enabled or multipass EXR
|
||||
# is produced. In either case we treat those as `beauty`.
|
||||
if isinstance(exp_files[0], dict):
|
||||
for aov, files in exp_files[0].items():
|
||||
full_paths = []
|
||||
for ef in files:
|
||||
full_path = os.path.join(workspace, "renders", ef)
|
||||
full_path = full_path.replace("\\", "/")
|
||||
full_paths.append(full_path)
|
||||
aov_dict[aov] = full_paths
|
||||
else:
|
||||
full_paths = []
|
||||
for ef in exp_files:
|
||||
full_path = os.path.join(workspace, "renders", ef)
|
||||
full_path = full_path.replace("\\", "/")
|
||||
full_paths.append(full_path)
|
||||
aov_dict["beauty"] = full_paths
|
||||
|
||||
full_exp_files.append(aov_dict)
|
||||
self.log.info(full_exp_files)
|
||||
self.log.info("collecting layer: {}".format(layer_name))
|
||||
# Get layer specific settings, might be overrides
|
||||
data = {
|
||||
"subset": expected_layer_name,
|
||||
"attachTo": attachTo,
|
||||
"setMembers": layer_name,
|
||||
"publish": True,
|
||||
"frameStart": int(context.data["assetEntity"]['data']['frameStart']),
|
||||
"frameEnd": int(context.data["assetEntity"]['data']['frameEnd']),
|
||||
"frameStartHandle": int(self.get_render_attribute("startFrame",
|
||||
layer=layer_name)),
|
||||
"frameEndHandle": int(self.get_render_attribute("endFrame",
|
||||
layer=layer_name)),
|
||||
"byFrameStep": int(
|
||||
self.get_render_attribute("byFrameStep",
|
||||
layer=layer_name)),
|
||||
"renderer": self.get_render_attribute("currentRenderer",
|
||||
layer=layer_name),
|
||||
"handleStart": int(context.data["assetEntity"]['data']['handleStart']),
|
||||
"handleEnd": int(context.data["assetEntity"]['data']['handleEnd']),
|
||||
|
||||
# instance subset
|
||||
"family": "renderlayer",
|
||||
"families": ["renderlayer"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": filepath,
|
||||
"expectedFiles": full_exp_files,
|
||||
"resolutionWidth": cmds.getAttr("defaultResolution.width"),
|
||||
"resolutionHeight": cmds.getAttr("defaultResolution.height"),
|
||||
"pixelAspect": cmds.getAttr("defaultResolution.pixelAspect")
|
||||
}
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
for attr in cmds.listAttr(layer, userDefined=True) or list():
|
||||
try:
|
||||
value = cmds.getAttr("{}.{}".format(layer, attr))
|
||||
except Exception:
|
||||
# Some attributes cannot be read directly,
|
||||
# such as mesh and color attributes. These
|
||||
# are considered non-essential to this
|
||||
# particular publishing pipeline.
|
||||
value = None
|
||||
|
||||
data[attr] = value
|
||||
|
||||
# Include (optional) global settings
|
||||
# Get global overrides and translate to Deadline values
|
||||
overrides = self.parse_options(str(render_globals))
|
||||
data.update(**overrides)
|
||||
|
||||
# Define nice label
|
||||
label = "{0} ({1})".format(expected_layer_name, data["asset"])
|
||||
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
|
||||
int(data["frameEndHandle"]))
|
||||
|
||||
instance = context.create_instance(expected_layer_name)
|
||||
instance.data["label"] = label
|
||||
instance.data.update(data)
|
||||
pass
|
||||
|
||||
def parse_options(self, render_globals):
|
||||
"""Get all overrides with a value, skip those without
|
||||
|
||||
Here's the kicker. These globals override defaults in the submission
|
||||
integrator, but an empty value means no overriding is made.
|
||||
Otherwise, Frames would override the default frames set under globals.
|
||||
|
||||
Args:
|
||||
render_globals (str): collection of render globals
|
||||
|
||||
Returns:
|
||||
dict: only overrides with values
|
||||
"""
|
||||
|
||||
attributes = maya.read(render_globals)
|
||||
|
||||
options = {"renderGlobals": {}}
|
||||
options["renderGlobals"]["Priority"] = attributes["priority"]
|
||||
|
||||
# Check for specific pools
|
||||
pool_a, pool_b = self._discover_pools(attributes)
|
||||
options["renderGlobals"].update({"Pool": pool_a})
|
||||
if pool_b:
|
||||
options["renderGlobals"].update({"SecondaryPool": pool_b})
|
||||
|
||||
# Machine list
|
||||
machine_list = attributes["machineList"]
|
||||
if machine_list:
|
||||
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
|
||||
options['renderGlobals'][key] = machine_list
|
||||
|
||||
# Suspend publish job
|
||||
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
|
||||
options["publishJobState"] = state
|
||||
|
||||
chunksize = attributes.get("framesPerTask", 1)
|
||||
options["renderGlobals"]["ChunkSize"] = chunksize
|
||||
|
||||
# Override frames should be False if extendFrames is False. This is
|
||||
# to ensure it doesn't go off doing crazy unpredictable things
|
||||
override_frames = False
|
||||
extend_frames = attributes.get("extendFrames", False)
|
||||
if extend_frames:
|
||||
override_frames = attributes.get("overrideExistingFrame", False)
|
||||
|
||||
options["extendFrames"] = extend_frames
|
||||
options["overrideExistingFrame"] = override_frames
|
||||
|
||||
maya_render_plugin = "MayaBatch"
|
||||
if not attributes.get("useMayaBatch", True):
|
||||
maya_render_plugin = "MayaCmd"
|
||||
|
||||
options["mayaRenderPlugin"] = maya_render_plugin
|
||||
|
||||
return options
|
||||
|
||||
def _discover_pools(self, attributes):
|
||||
|
||||
pool_a = None
|
||||
pool_b = None
|
||||
|
||||
# Check for specific pools
|
||||
pool_b = []
|
||||
if "primaryPool" in attributes:
|
||||
pool_a = attributes["primaryPool"]
|
||||
if "secondaryPool" in attributes:
|
||||
pool_b = attributes["secondaryPool"]
|
||||
|
||||
else:
|
||||
# Backwards compatibility
|
||||
pool_str = attributes.get("pools", None)
|
||||
if pool_str:
|
||||
pool_a, pool_b = pool_str.split(";")
|
||||
|
||||
# Ensure empty entry token is caught
|
||||
if pool_b == "-":
|
||||
pool_b = None
|
||||
|
||||
return pool_a, pool_b
|
||||
|
||||
def _get_overrides(self, layer):
|
||||
rset = self.maya_layers[layer].renderSettingsCollectionInstance()
|
||||
return rset.getOverrides()
|
||||
|
||||
def get_render_attribute(self, attr, layer):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=layer)
|
||||
|
||||
|
||||
class ExpectedFiles:
|
||||
|
||||
def get(self, renderer, layer):
|
||||
if renderer.lower() == 'arnold':
|
||||
return ExpectedFilesArnold(layer).get_files()
|
||||
elif renderer.lower() == 'vray':
|
||||
return ExpectedFilesVray(layer).get_files()
|
||||
elif renderer.lower() == 'redshift':
|
||||
return ExpectedFilesRedshift(layer).get_files()
|
||||
elif renderer.lower() == 'mentalray':
|
||||
return ExpectedFilesMentalray(layer).get_files()
|
||||
elif renderer.lower() == 'renderman':
|
||||
return ExpectedFilesRenderman(layer).get_files()
|
||||
else:
|
||||
raise UnsupportedRendererException(
|
||||
"unsupported {}".format(renderer))
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class AExpectedFiles:
|
||||
renderer = None
|
||||
layer = None
|
||||
|
||||
def __init__(self, layer):
|
||||
self.layer = layer
|
||||
|
||||
@abstractmethod
|
||||
def get_aovs(self):
|
||||
pass
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
try:
|
||||
file_prefix = cmds.getAttr(ImagePrefixes[self.renderer])
|
||||
except KeyError:
|
||||
raise UnsupportedRendererException(
|
||||
"Unsupported renderer {}".format(self.renderer))
|
||||
return file_prefix
|
||||
|
||||
def _get_layer_data(self):
|
||||
# ______________________________________________
|
||||
# ____________________/ ____________________________________________/
|
||||
# 1 - get scene name /__________________/
|
||||
# ____________________/
|
||||
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
|
||||
scene_name, _ = os.path.splitext(scene_basename)
|
||||
|
||||
# ______________________________________________
|
||||
# ____________________/ ____________________________________________/
|
||||
# 2 - detect renderer /__________________/
|
||||
# ____________________/
|
||||
renderer = self.renderer
|
||||
|
||||
# ________________________________________________
|
||||
# __________________/ ______________________________________________/
|
||||
# 3 - image prefix /__________________/
|
||||
# __________________/
|
||||
file_prefix = self.get_renderer_prefix()
|
||||
|
||||
if not file_prefix:
|
||||
raise RuntimeError("Image prefix not set")
|
||||
|
||||
default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
|
||||
# ________________________________________________
|
||||
# __________________/ ______________________________________________/
|
||||
# 4 - get renderable cameras_____________/
|
||||
# __________________/
|
||||
|
||||
# if we have <camera> token in prefix path we'll expect output for
|
||||
# every renderable camera in layer.
|
||||
|
||||
renderable_cameras = self.get_renderable_cameras()
|
||||
# ________________________________________________
|
||||
# __________________/ ______________________________________________/
|
||||
# 5 - get AOVs /____________________/
|
||||
# __________________/
|
||||
|
||||
enabled_aovs = self.get_aovs()
|
||||
|
||||
layer_name = self.layer
|
||||
if self.layer.startswith("rs_"):
|
||||
layer_name = self.layer[3:]
|
||||
start_frame = int(self.get_render_attribute('startFrame'))
|
||||
end_frame = int(self.get_render_attribute('endFrame'))
|
||||
frame_step = int(self.get_render_attribute('byFrameStep'))
|
||||
padding = int(self.get_render_attribute('extensionPadding'))
|
||||
|
||||
scene_data = {
|
||||
"frameStart": start_frame,
|
||||
"frameEnd": end_frame,
|
||||
"frameStep": frame_step,
|
||||
"padding": padding,
|
||||
"cameras": renderable_cameras,
|
||||
"sceneName": scene_name,
|
||||
"layerName": layer_name,
|
||||
"renderer": renderer,
|
||||
"defaultExt": default_ext,
|
||||
"filePrefix": file_prefix,
|
||||
"enabledAOVs": enabled_aovs
|
||||
}
|
||||
return scene_data
|
||||
|
||||
def _generate_single_file_sequence(self, layer_data):
|
||||
expected_files = []
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
for cam in layer_data["cameras"]:
|
||||
mappings = (
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
|
||||
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
|
||||
# this is required to remove unfilled aov token, for example
|
||||
# in Redshift
|
||||
(R_REMOVE_AOV_TOKEN, ""),
|
||||
(R_CLEAN_FRAME_TOKEN, ""),
|
||||
(R_CLEAN_EXT_TOKEN, "")
|
||||
)
|
||||
|
||||
for regex, value in mappings:
|
||||
file_prefix = re.sub(regex, value, file_prefix)
|
||||
|
||||
for frame in range(
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"])):
|
||||
expected_files.append(
|
||||
'{}.{}.{}'.format(file_prefix,
|
||||
str(frame).rjust(
|
||||
layer_data["padding"], "0"),
|
||||
layer_data["defaultExt"]))
|
||||
return expected_files
|
||||
|
||||
def _generate_aov_file_sequences(self, layer_data):
|
||||
expected_files = []
|
||||
aov_file_list = {}
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
for aov in layer_data["enabledAOVs"]:
|
||||
for cam in layer_data["cameras"]:
|
||||
|
||||
mappings = (
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
|
||||
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
|
||||
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
|
||||
(R_CLEAN_FRAME_TOKEN, ""),
|
||||
(R_CLEAN_EXT_TOKEN, "")
|
||||
)
|
||||
|
||||
for regex, value in mappings:
|
||||
file_prefix = re.sub(regex, value, file_prefix)
|
||||
|
||||
aov_files = []
|
||||
for frame in range(
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"])):
|
||||
aov_files.append(
|
||||
'{}.{}.{}'.format(
|
||||
file_prefix,
|
||||
str(frame).rjust(layer_data["padding"], "0"),
|
||||
aov[1]))
|
||||
|
||||
# if we have more then one renderable camera, append
|
||||
# camera name to AOV to allow per camera AOVs.
|
||||
aov_name = aov[0]
|
||||
if len(layer_data["cameras"]) > 1:
|
||||
aov_name = "{}_{}".format(aov[0], cam)
|
||||
|
||||
aov_file_list[aov_name] = aov_files
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
|
||||
expected_files.append(aov_file_list)
|
||||
return expected_files
|
||||
|
||||
def get_files(self):
|
||||
"""
|
||||
This method will return list of expected files.
|
||||
|
||||
It will translate render token strings ('<RenderPass>', etc.) to
|
||||
their values. This task is tricky as every renderer deals with this
|
||||
differently. It depends on `get_aovs()` abstract method implemented
|
||||
for every supported renderer.
|
||||
"""
|
||||
layer_data = self._get_layer_data()
|
||||
|
||||
expected_files = []
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files = self._generate_aov_file_sequences(layer_data)
|
||||
else:
|
||||
expected_files = self._generate_single_file_sequence(layer_data)
|
||||
|
||||
return expected_files
|
||||
|
||||
def get_renderable_cameras(self):
|
||||
cam_parents = [cmds.listRelatives(x, ap=True)[-1]
|
||||
for x in cmds.ls(cameras=True)]
|
||||
|
||||
renderable_cameras = []
|
||||
for cam in cam_parents:
|
||||
renderable = False
|
||||
if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))):
|
||||
renderable = True
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.renderable'.format(cam), self.layer):
|
||||
renderable = self.maya_is_true(override)
|
||||
|
||||
if renderable:
|
||||
renderable_cameras.append(cam)
|
||||
return renderable_cameras
|
||||
|
||||
def maya_is_true(self, attr_val):
|
||||
"""
|
||||
Whether a Maya attr evaluates to True.
|
||||
When querying an attribute value from an ambiguous object the
|
||||
Maya API will return a list of values, which need to be properly
|
||||
handled to evaluate properly.
|
||||
"""
|
||||
if isinstance(attr_val, types.BooleanType):
|
||||
return attr_val
|
||||
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
|
||||
return any(attr_val)
|
||||
else:
|
||||
return bool(attr_val)
|
||||
|
||||
def get_layer_overrides(self, attr, layer):
|
||||
connections = cmds.listConnections(attr, plugs=True)
|
||||
if connections:
|
||||
for connection in connections:
|
||||
if connection:
|
||||
node_name = connection.split('.')[0]
|
||||
if cmds.nodeType(node_name) == 'renderLayer':
|
||||
attr_name = '%s.value' % '.'.join(
|
||||
connection.split('.')[:-1])
|
||||
if node_name == layer:
|
||||
yield cmds.getAttr(attr_name)
|
||||
|
||||
def get_render_attribute(self, attr):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=self.layer)
|
||||
|
||||
|
||||
class ExpectedFilesArnold(AExpectedFiles):
|
||||
|
||||
# Arnold AOV driver extension mapping
|
||||
# Is there a better way?
|
||||
aiDriverExtension = {
|
||||
'jpeg': 'jpg',
|
||||
'exr': 'exr',
|
||||
'deepexr': 'exr',
|
||||
'png': 'png',
|
||||
'tiff': 'tif',
|
||||
'mtoa_shaders': 'ass', # TODO: research what those last two should be
|
||||
'maya': ''
|
||||
}
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesArnold, self).__init__(layer)
|
||||
self.renderer = 'arnold'
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
try:
|
||||
if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
|
||||
and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')):
|
||||
# AOVs are merged in mutli-channel file
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
# such case there are no Arnold options created so query for AOVs
|
||||
# will fail. We terminate here as there are no AOVs specified then.
|
||||
# This state will most probably fail later on some Validator
|
||||
# anyway.
|
||||
return enabled_aovs
|
||||
|
||||
# AOVs are set to be rendered separately. We should expect
|
||||
# <RenderPass> token in path.
|
||||
|
||||
ai_aovs = [n for n in cmds.ls(type='aiAOV')]
|
||||
|
||||
for aov in ai_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
ai_driver = cmds.listConnections(
|
||||
'{}.outputs'.format(aov))[0]
|
||||
ai_translator = cmds.getAttr(
|
||||
'{}.aiTranslator'.format(ai_driver))
|
||||
try:
|
||||
aov_ext = self.aiDriverExtension[ai_translator]
|
||||
except KeyError:
|
||||
msg = ('Unrecognized arnold '
|
||||
'driver format for AOV - {}').format(
|
||||
cmds.getAttr('{}.name'.format(aov))
|
||||
)
|
||||
raise AOVError(msg)
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), self.layer):
|
||||
enabled = self.maya_is_true(override)
|
||||
if enabled:
|
||||
# If aov RGBA is selected, arnold will translate it to `beauty`
|
||||
aov_name = cmds.getAttr('%s.name' % aov)
|
||||
if aov_name == 'RGBA':
|
||||
aov_name = 'beauty'
|
||||
enabled_aovs.append(
|
||||
(
|
||||
aov_name,
|
||||
aov_ext
|
||||
)
|
||||
)
|
||||
# Append 'beauty' as this is arnolds
|
||||
# default. If <RenderPass> token is specified and no AOVs are
|
||||
# defined, this will be used.
|
||||
enabled_aovs.append(
|
||||
(
|
||||
u'beauty',
|
||||
cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
)
|
||||
)
|
||||
return enabled_aovs
|
||||
|
||||
|
||||
class ExpectedFilesVray(AExpectedFiles):
|
||||
|
||||
# V-ray file extension mapping
|
||||
# 5 - exr
|
||||
# 6 - multichannel exr
|
||||
# 13 - deep exr
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesVray, self).__init__(layer)
|
||||
self.renderer = 'vray'
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
|
||||
prefix = "{}_<aov>".format(prefix)
|
||||
return prefix
|
||||
|
||||
def get_files(self):
|
||||
expected_files = super(ExpectedFilesVray, self).get_files()
|
||||
|
||||
# we need to add one sequence for plain beauty if AOVs are enabled.
|
||||
# as vray output beauty without 'beauty' in filename.
|
||||
|
||||
layer_data = self._get_layer_data()
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
|
||||
|
||||
return expected_files
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
|
||||
try:
|
||||
# really? do we set it in vray just by selecting multichannel exr?
|
||||
if cmds.getAttr(
|
||||
"vraySettings.imageFormatStr") == "exr (multichannel)":
|
||||
# AOVs are merged in mutli-channel file
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
# such case there are no Arnold options created so query for AOVs
|
||||
# will fail. We terminate here as there are no AOVs specified then.
|
||||
# This state will most probably fail later on some Validator
|
||||
# anyway.
|
||||
return enabled_aovs
|
||||
|
||||
default_ext = cmds.getAttr('vraySettings.imageFormatStr')
|
||||
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
|
||||
default_ext = "exr"
|
||||
|
||||
vr_aovs = [n for n in cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"])]
|
||||
|
||||
# todo: find out how to detect multichannel exr for vray
|
||||
for aov in vr_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), 'rs_{}'.format(self.layer)):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
# todo: find how vray set format for AOVs
|
||||
enabled_aovs.append(
|
||||
(
|
||||
self._get_vray_aov_name(aov),
|
||||
default_ext)
|
||||
)
|
||||
return enabled_aovs
|
||||
|
||||
def _get_vray_aov_name(self, node):
|
||||
|
||||
# Get render element pass type
|
||||
vray_node_attr = next(attr for attr in cmds.listAttr(node)
|
||||
if attr.startswith("vray_name"))
|
||||
pass_type = vray_node_attr.rsplit("_", 1)[-1]
|
||||
|
||||
# Support V-Ray extratex explicit name (if set by user)
|
||||
if pass_type == "extratex":
|
||||
explicit_attr = "{}.vray_explicit_name_extratex".format(node)
|
||||
explicit_name = cmds.getAttr(explicit_attr)
|
||||
if explicit_name:
|
||||
return explicit_name
|
||||
|
||||
# Node type is in the attribute name but we need to check if value
|
||||
# of the attribute as it can be changed
|
||||
return cmds.getAttr("{}.{}".format(node, vray_node_attr))
|
||||
|
||||
|
||||
class ExpectedFilesRedshift(AExpectedFiles):
|
||||
|
||||
# mapping redshift extension dropdown values to strings
|
||||
ext_mapping = ['iff', 'exr', 'tif', 'png', 'tga', 'jpg']
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesRedshift, self).__init__(layer)
|
||||
self.renderer = 'redshift'
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
|
||||
prefix = "{}_<aov>".format(prefix)
|
||||
return prefix
|
||||
|
||||
def get_files(self):
|
||||
expected_files = super(ExpectedFilesRedshift, self).get_files()
|
||||
|
||||
# we need to add one sequence for plain beauty if AOVs are enabled.
|
||||
# as redshift output beauty without 'beauty' in filename.
|
||||
|
||||
layer_data = self._get_layer_data()
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
|
||||
|
||||
return expected_files
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
|
||||
try:
|
||||
if self.maya_is_true(
|
||||
cmds.getAttr("redshiftOptions.exrForceMultilayer")):
|
||||
# AOVs are merged in mutli-channel file
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
# such case there are no Arnold options created so query for AOVs
|
||||
# will fail. We terminate here as there are no AOVs specified then.
|
||||
# This state will most probably fail later on some Validator
|
||||
# anyway.
|
||||
return enabled_aovs
|
||||
|
||||
default_ext = self.ext_mapping[
|
||||
cmds.getAttr('redshiftOptions.imageFormat')
|
||||
]
|
||||
rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')]
|
||||
|
||||
# todo: find out how to detect multichannel exr for redshift
|
||||
for aov in rs_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), self.layer):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
enabled_aovs.append(
|
||||
(
|
||||
cmds.getAttr('%s.name' % aov),
|
||||
default_ext
|
||||
)
|
||||
)
|
||||
|
||||
return enabled_aovs
|
||||
|
||||
|
||||
class ExpectedFilesRenderman(AExpectedFiles):
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesRenderman, self).__init__(layer)
|
||||
self.renderer = 'renderman'
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
|
||||
default_ext = "exr"
|
||||
displays = cmds.listConnections("rmanGlobals.displays")
|
||||
for aov in displays:
|
||||
aov_name = str(aov)
|
||||
if aov_name == "rmanDefaultDisplay":
|
||||
aov_name = "beauty"
|
||||
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr("{}.enable".format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enable'.format(aov), self.layer):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
enabled_aovs.append(
|
||||
(
|
||||
aov_name,
|
||||
default_ext
|
||||
)
|
||||
)
|
||||
|
||||
return enabled_aovs
|
||||
|
||||
def get_files(self):
|
||||
"""
|
||||
In renderman we hack it with prepending path. This path would
|
||||
normally be translated from `rmanGlobals.imageOutputDir`. We skip
|
||||
this and harcode prepend path we expect. There is no place for user
|
||||
to mess around with this settings anyway and it is enforced in
|
||||
render settings validator.
|
||||
"""
|
||||
layer_data = self._get_layer_data()
|
||||
new_aovs = {}
|
||||
|
||||
expected_files = super(ExpectedFilesRenderman, self).get_files()
|
||||
# we always get beauty
|
||||
for aov, files in expected_files[0].items():
|
||||
new_files = []
|
||||
for file in files:
|
||||
new_file = "{}/{}/{}".format(layer_data["sceneName"],
|
||||
layer_data["layerName"],
|
||||
file)
|
||||
new_files.append(new_file)
|
||||
new_aovs[aov] = new_files
|
||||
|
||||
return [new_aovs]
|
||||
|
||||
|
||||
class ExpectedFilesMentalray(AExpectedFiles):
|
||||
|
||||
def __init__(self, layer):
|
||||
raise UnimplementedRendererException('Mentalray not implemented')
|
||||
|
||||
def get_aovs(self):
|
||||
return []
|
||||
|
||||
|
||||
class AOVError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedRendererException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnimplementedRendererException(Exception):
|
||||
pass
|
||||
|
|
@ -17,7 +17,7 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
layer = instance.data["setMembers"]
|
||||
|
||||
self.log.info("layer: {}".format(layer))
|
||||
cameras = cmds.ls(type="camera", long=True)
|
||||
renderable = [c for c in cameras if
|
||||
lib.get_attr_in_layer("%s.renderable" % c, layer=layer)]
|
||||
|
|
|
|||
|
|
@ -1,201 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import maya, api
|
||||
import pype.maya.lib as lib
|
||||
|
||||
|
||||
class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
|
||||
"""Gather instances by active render layers"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
hosts = ["maya"]
|
||||
label = "Render Layers"
|
||||
|
||||
def process(self, context):
|
||||
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
filepath = context.data["currentFile"].replace("\\", "/")
|
||||
|
||||
# Get render globals node
|
||||
try:
|
||||
render_globals = cmds.ls("renderglobalsMain")[0]
|
||||
for instance in context:
|
||||
self.log.debug(instance.name)
|
||||
if instance.data['family'] == 'workfile':
|
||||
instance.data['publish'] = True
|
||||
except IndexError:
|
||||
self.log.info("Skipping renderlayer collection, no "
|
||||
"renderGlobalsDefault found..")
|
||||
return
|
||||
# Get all valid renderlayers
|
||||
# This is how Maya populates the renderlayer display
|
||||
rlm_attribute = "renderLayerManager.renderLayerId"
|
||||
connected_layers = cmds.listConnections(rlm_attribute) or []
|
||||
valid_layers = set(connected_layers)
|
||||
|
||||
# Get all renderlayers and check their state
|
||||
renderlayers = [i for i in cmds.ls(type="renderLayer") if
|
||||
cmds.getAttr("{}.renderable".format(i)) and not
|
||||
cmds.referenceQuery(i, isNodeReferenced=True)]
|
||||
|
||||
# Sort by displayOrder
|
||||
def sort_by_display_order(layer):
|
||||
return cmds.getAttr("%s.displayOrder" % layer)
|
||||
|
||||
renderlayers = sorted(renderlayers, key=sort_by_display_order)
|
||||
|
||||
for layer in renderlayers:
|
||||
|
||||
# Check if layer is in valid (linked) layers
|
||||
if layer not in valid_layers:
|
||||
self.log.warning("%s is invalid, skipping" % layer)
|
||||
continue
|
||||
|
||||
if layer.endswith("defaultRenderLayer"):
|
||||
continue
|
||||
else:
|
||||
# Remove Maya render setup prefix `rs_`
|
||||
layername = layer.split("rs_", 1)[-1]
|
||||
|
||||
# Get layer specific settings, might be overrides
|
||||
data = {
|
||||
"subset": layername,
|
||||
"setMembers": layer,
|
||||
"publish": True,
|
||||
"frameStart": self.get_render_attribute("startFrame",
|
||||
layer=layer),
|
||||
"frameEnd": self.get_render_attribute("endFrame",
|
||||
layer=layer),
|
||||
"byFrameStep": self.get_render_attribute("byFrameStep",
|
||||
layer=layer),
|
||||
"renderer": self.get_render_attribute("currentRenderer",
|
||||
layer=layer),
|
||||
|
||||
# instance subset
|
||||
"family": "Render Layers",
|
||||
"families": ["renderlayer"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": filepath
|
||||
}
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
for attr in cmds.listAttr(layer, userDefined=True) or list():
|
||||
try:
|
||||
value = cmds.getAttr("{}.{}".format(layer, attr))
|
||||
except Exception:
|
||||
# Some attributes cannot be read directly,
|
||||
# such as mesh and color attributes. These
|
||||
# are considered non-essential to this
|
||||
# particular publishing pipeline.
|
||||
value = None
|
||||
|
||||
data[attr] = value
|
||||
|
||||
# Include (optional) global settings
|
||||
# TODO(marcus): Take into account layer overrides
|
||||
# Get global overrides and translate to Deadline values
|
||||
overrides = self.parse_options(render_globals)
|
||||
data.update(**overrides)
|
||||
|
||||
# Define nice label
|
||||
label = "{0} ({1})".format(layername, data["asset"])
|
||||
label += " [{0}-{1}]".format(int(data["frameStart"]),
|
||||
int(data["frameEnd"]))
|
||||
|
||||
instance = context.create_instance(layername)
|
||||
instance.data["label"] = label
|
||||
instance.data.update(data)
|
||||
|
||||
def get_render_attribute(self, attr, layer):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=layer)
|
||||
|
||||
def parse_options(self, render_globals):
|
||||
"""Get all overrides with a value, skip those without
|
||||
|
||||
Here's the kicker. These globals override defaults in the submission
|
||||
integrator, but an empty value means no overriding is made.
|
||||
Otherwise, Frames would override the default frames set under globals.
|
||||
|
||||
Args:
|
||||
render_globals (str): collection of render globals
|
||||
|
||||
Returns:
|
||||
dict: only overrides with values
|
||||
"""
|
||||
|
||||
attributes = maya.read(render_globals)
|
||||
|
||||
options = {"renderGlobals": {}}
|
||||
options["renderGlobals"]["Priority"] = attributes["priority"]
|
||||
|
||||
# Check for specific pools
|
||||
pool_a, pool_b = self._discover_pools(attributes)
|
||||
options["renderGlobals"].update({"Pool": pool_a})
|
||||
if pool_b:
|
||||
options["renderGlobals"].update({"SecondaryPool": pool_b})
|
||||
|
||||
legacy = attributes["useLegacyRenderLayers"]
|
||||
options["renderGlobals"]["UseLegacyRenderLayers"] = legacy
|
||||
|
||||
# Machine list
|
||||
machine_list = attributes["machineList"]
|
||||
if machine_list:
|
||||
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
|
||||
options['renderGlobals'][key] = machine_list
|
||||
|
||||
# Suspend publish job
|
||||
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
|
||||
options["publishJobState"] = state
|
||||
|
||||
chunksize = attributes.get("framesPerTask", 1)
|
||||
options["renderGlobals"]["ChunkSize"] = chunksize
|
||||
|
||||
# Override frames should be False if extendFrames is False. This is
|
||||
# to ensure it doesn't go off doing crazy unpredictable things
|
||||
override_frames = False
|
||||
extend_frames = attributes.get("extendFrames", False)
|
||||
if extend_frames:
|
||||
override_frames = attributes.get("overrideExistingFrame", False)
|
||||
|
||||
options["extendFrames"] = extend_frames
|
||||
options["overrideExistingFrame"] = override_frames
|
||||
|
||||
maya_render_plugin = "MayaBatch"
|
||||
if not attributes.get("useMayaBatch", True):
|
||||
maya_render_plugin = "MayaCmd"
|
||||
|
||||
options["mayaRenderPlugin"] = maya_render_plugin
|
||||
|
||||
return options
|
||||
|
||||
def _discover_pools(self, attributes):
|
||||
|
||||
pool_a = None
|
||||
pool_b = None
|
||||
|
||||
# Check for specific pools
|
||||
pool_b = []
|
||||
if "primaryPool" in attributes:
|
||||
pool_a = attributes["primaryPool"]
|
||||
if "secondaryPool" in attributes:
|
||||
pool_b = attributes["secondaryPool"]
|
||||
|
||||
else:
|
||||
# Backwards compatibility
|
||||
pool_str = attributes.get("pools", None)
|
||||
if pool_str:
|
||||
pool_a, pool_b = pool_str.split(";")
|
||||
|
||||
# Ensure empty entry token is caught
|
||||
if pool_b == "-":
|
||||
pool_b = None
|
||||
|
||||
return pool_a, pool_b
|
||||
|
|
@ -54,8 +54,10 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug('adding review family to {}'.format(reviewable_subset))
|
||||
data['review_camera'] = camera
|
||||
# data["publish"] = False
|
||||
data['startFrameReview'] = instance.data["frameStart"]
|
||||
data['endFrameReview'] = instance.data["frameEnd"]
|
||||
data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
data['frameStartHandle'] = instance.data["frameStartHandle"]
|
||||
data['frameEndHandle'] = instance.data["frameEndHandle"]
|
||||
data["frameStart"] = instance.data["frameStart"]
|
||||
data["frameEnd"] = instance.data["frameEnd"]
|
||||
data['handles'] = instance.data['handles']
|
||||
|
|
@ -69,8 +71,8 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
else:
|
||||
instance.data['subset'] = task + 'Review'
|
||||
instance.data['review_camera'] = camera
|
||||
instance.data['startFrameReview'] = instance.data["frameStart"]
|
||||
instance.data['endFrameReview'] = instance.data["frameEnd"]
|
||||
instance.data['frameStartFtrack'] = instance.data["frameStartHandle"]
|
||||
instance.data['frameEndFtrack'] = instance.data["frameEndHandle"]
|
||||
|
||||
# make ftrack publishable
|
||||
instance.data["families"] = ['ftrack']
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
|
|||
"subset": subset,
|
||||
"asset": os.getenv("AVALON_ASSET", None),
|
||||
"label": subset,
|
||||
"publish": False,
|
||||
"publish": True,
|
||||
"family": 'workfile',
|
||||
"families": ['workfile'],
|
||||
"setMembers": [current_file]
|
||||
|
|
|
|||
28
pype/plugins/maya/publish/determine_future_version.py
Normal file
28
pype/plugins/maya/publish/determine_future_version.py
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
import pyblish
|
||||
|
||||
class DetermineFutureVersion(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
This will determine version of subset if we want render to be attached to.
|
||||
"""
|
||||
label = "Determine Subset Version"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
hosts = ["maya"]
|
||||
families = ["renderlayer"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
attach_to_subsets = [s["subset"] for s in instance.data['attachTo']]
|
||||
|
||||
if not attach_to_subsets:
|
||||
return
|
||||
|
||||
for i in context:
|
||||
if i.data["subset"] in attach_to_subsets:
|
||||
# # this will get corresponding subset in attachTo list
|
||||
# # so we can set version there
|
||||
sub = next(item for item in instance.data['attachTo'] if item["subset"] == i.data["subset"]) # noqa: E501
|
||||
|
||||
sub["version"] = i.data.get("version", 1)
|
||||
self.log.info("render will be attached to {} v{}".format(
|
||||
sub["subset"], sub["version"]
|
||||
))
|
||||
|
|
@ -22,11 +22,11 @@ class ExtractAssembly(pype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
parent_dir = self.staging_dir(instance)
|
||||
staging_dir = self.staging_dir(instance)
|
||||
hierarchy_filename = "{}.abc".format(instance.name)
|
||||
hierarchy_path = os.path.join(parent_dir, hierarchy_filename)
|
||||
hierarchy_path = os.path.join(staging_dir, hierarchy_filename)
|
||||
json_filename = "{}.json".format(instance.name)
|
||||
json_path = os.path.join(parent_dir, json_filename)
|
||||
json_path = os.path.join(staging_dir, json_filename)
|
||||
|
||||
self.log.info("Dumping scene data for debugging ..")
|
||||
with open(json_path, "w") as filepath:
|
||||
|
|
@ -46,8 +46,24 @@ class ExtractAssembly(pype.api.Extractor):
|
|||
"uvWrite": True,
|
||||
"selection": True})
|
||||
|
||||
instance.data["files"] = [json_filename, hierarchy_filename]
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation_abc = {
|
||||
'name': 'abc',
|
||||
'ext': 'abc',
|
||||
'files': hierarchy_filename,
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
instance.data["representations"].append(representation_abc)
|
||||
|
||||
representation_json = {
|
||||
'name': 'json',
|
||||
'ext': 'json',
|
||||
'files': json_filename,
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
instance.data["representations"].append(representation_json)
|
||||
# Remove data
|
||||
instance.data.pop("scenedata", None)
|
||||
|
||||
|
|
|
|||
|
|
@ -94,11 +94,6 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
step = instance.data.get("step", 1.0)
|
||||
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
|
||||
|
||||
# TODO: Implement a bake to non-world space
|
||||
# Currently it will always bake the resulting camera to world-space
|
||||
# and it does not allow to include the parent hierarchy, even though
|
||||
# with `bakeToWorldSpace` set to False it should include its
|
||||
# hierarchy to be correct with the family implementation.
|
||||
if not bake_to_worldspace:
|
||||
self.log.warning("Camera (Maya Ascii) export only supports world"
|
||||
"space baked camera extractions. The disabled "
|
||||
|
|
@ -113,7 +108,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
framerange[1] + handles]
|
||||
|
||||
# validate required settings
|
||||
assert len(cameras) == 1, "Not a single camera found in extraction"
|
||||
assert len(cameras) == 1, "Single camera must be found in extraction"
|
||||
assert isinstance(step, float), "Step must be a float value"
|
||||
camera = cameras[0]
|
||||
transform = cmds.listRelatives(camera, parent=True, fullPath=True)
|
||||
|
|
@ -124,21 +119,24 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
path = os.path.join(dir_path, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing camera bakes for: {0}".format(transform))
|
||||
with avalon.maya.maintained_selection():
|
||||
with lib.evaluation("off"):
|
||||
with avalon.maya.suspended_refresh():
|
||||
baked = lib.bake_to_world_space(
|
||||
transform,
|
||||
frame_range=range_with_handles,
|
||||
step=step
|
||||
)
|
||||
baked_shapes = cmds.ls(baked,
|
||||
type="camera",
|
||||
dag=True,
|
||||
shapes=True,
|
||||
long=True)
|
||||
|
||||
if bake_to_worldspace:
|
||||
self.log.info(
|
||||
"Performing camera bakes: {}".format(transform))
|
||||
baked = lib.bake_to_world_space(
|
||||
transform,
|
||||
frame_range=range_with_handles,
|
||||
step=step
|
||||
)
|
||||
baked_shapes = cmds.ls(baked,
|
||||
type="camera",
|
||||
dag=True,
|
||||
shapes=True,
|
||||
long=True)
|
||||
else:
|
||||
baked_shapes = cameras
|
||||
# Fix PLN-178: Don't allow background color to be non-black
|
||||
for cam in baked_shapes:
|
||||
attrs = {"backgroundColorR": 0.0,
|
||||
|
|
@ -164,7 +162,8 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
|
|||
expressions=False)
|
||||
|
||||
# Delete the baked hierarchy
|
||||
cmds.delete(baked)
|
||||
if bake_to_worldspace:
|
||||
cmds.delete(baked)
|
||||
|
||||
massage_ma_file(path)
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue