Merge branch '2.x/develop' into feature/maya-tile-rendering

This commit is contained in:
Ondřej Samohel 2020-08-10 14:28:40 +02:00
commit 3040eed159
No known key found for this signature in database
GPG key ID: 8A29C663C672C2B7
37 changed files with 1101 additions and 724 deletions

View file

@ -40,7 +40,8 @@ from .lib import (
get_version_from_path, get_version_from_path,
get_last_version_from_path, get_last_version_from_path,
modified_environ, modified_environ,
add_tool_to_environment add_tool_to_environment,
get_latest_version
) )
# Special naming case for subprocess since its a built-in method. # Special naming case for subprocess since its a built-in method.
@ -85,5 +86,6 @@ __all__ = [
"modified_environ", "modified_environ",
"add_tool_to_environment", "add_tool_to_environment",
"subprocess" "subprocess",
"get_latest_version"
] ]

View file

@ -151,27 +151,31 @@ def application_launch():
def export_template(backdrops, nodes, filepath): def export_template(backdrops, nodes, filepath):
func = """function func(args) func = """function func(args)
{ {
// Add an extra node just so a new group can be created.
var temp_node = node.add("Top", "temp_note", "NOTE", 0, 0, 0); var temp_node = node.add("Top", "temp_note", "NOTE", 0, 0, 0);
var template_group = node.createGroup(temp_node, "temp_group"); var template_group = node.createGroup(temp_node, "temp_group");
node.deleteNode( template_group + "/temp_note" ); node.deleteNode( template_group + "/temp_note" );
// This will make Node View to focus on the new group. selection.clearSelection();
for (var f = 0; f < args[1].length; f++)
{
selection.addNodeToSelection(args[1][f]);
}
Action.perform("copy()", "Node View");
selection.clearSelection(); selection.clearSelection();
selection.addNodeToSelection(template_group); selection.addNodeToSelection(template_group);
Action.perform("onActionEnterGroup()", "Node View"); Action.perform("onActionEnterGroup()", "Node View");
Action.perform("paste()", "Node View");
// Recreate backdrops in group. // Recreate backdrops in group.
for (var i = 0 ; i < args[0].length; i++) for (var i = 0 ; i < args[0].length; i++)
{ {
MessageLog.trace(args[0][i]);
Backdrop.addBackdrop(template_group, args[0][i]); Backdrop.addBackdrop(template_group, args[0][i]);
}; };
// Copy-paste the selected nodes into the new group.
var drag_object = copyPaste.copy(args[1], 1, frame.numberOf, "");
copyPaste.pasteNewNodes(drag_object, template_group, "");
// Select all nodes within group and export as template.
Action.perform( "selectAll()", "Node View" ); Action.perform( "selectAll()", "Node View" );
copyPaste.createTemplateFromSelection(args[2], args[3]); copyPaste.createTemplateFromSelection(args[2], args[3]);

View file

@ -158,6 +158,25 @@ class AExpectedFiles:
"""To be implemented by renderer class.""" """To be implemented by renderer class."""
pass pass
def sanitize_camera_name(self, camera):
"""Sanitize camera name.
Remove Maya illegal characters from camera name.
Args:
camera (str): Maya camera name.
Returns:
(str): sanitized camera name
Example:
>>> sanizite_camera_name('test:camera_01')
test_camera_01
"""
sanitized = re.sub('[^0-9a-zA-Z_]+', '_', camera)
return sanitized
def get_renderer_prefix(self): def get_renderer_prefix(self):
"""Return prefix for specific renderer. """Return prefix for specific renderer.
@ -252,7 +271,7 @@ class AExpectedFiles:
mappings = ( mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam), (R_SUBSTITUTE_CAMERA_TOKEN, self.sanitize_camera_name(cam)),
# this is required to remove unfilled aov token, for example # this is required to remove unfilled aov token, for example
# in Redshift # in Redshift
(R_REMOVE_AOV_TOKEN, ""), (R_REMOVE_AOV_TOKEN, ""),
@ -287,7 +306,8 @@ class AExpectedFiles:
mappings = ( mappings = (
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
(R_SUBSTITUTE_CAMERA_TOKEN, cam), (R_SUBSTITUTE_CAMERA_TOKEN,
self.sanitize_camera_name(cam)),
(R_SUBSTITUTE_AOV_TOKEN, aov[0]), (R_SUBSTITUTE_AOV_TOKEN, aov[0]),
(R_CLEAN_FRAME_TOKEN, ""), (R_CLEAN_FRAME_TOKEN, ""),
(R_CLEAN_EXT_TOKEN, ""), (R_CLEAN_EXT_TOKEN, ""),
@ -314,7 +334,8 @@ class AExpectedFiles:
# camera name to AOV to allow per camera AOVs. # camera name to AOV to allow per camera AOVs.
aov_name = aov[0] aov_name = aov[0]
if len(layer_data["cameras"]) > 1: if len(layer_data["cameras"]) > 1:
aov_name = "{}_{}".format(aov[0], cam) aov_name = "{}_{}".format(aov[0],
self.sanitize_camera_name(cam))
aov_file_list[aov_name] = aov_files aov_file_list[aov_name] = aov_files
file_prefix = layer_data["filePrefix"] file_prefix = layer_data["filePrefix"]

View file

@ -1445,7 +1445,7 @@ class ExporterReview:
anlib.reset_selection() anlib.reset_selection()
ipn_orig = None ipn_orig = None
for v in [n for n in nuke.allNodes() for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]: if "Viewer" == n.Class()]:
ip = v['input_process'].getValue() ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue() ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip: if "VIEWER_INPUT" not in ipn and ip:

View file

@ -0,0 +1,262 @@
{
"Hierarchy": {
"editable": "1",
"note": "{folder}/{sequence}/{shot}",
"icon": {
"path": "hierarchy.png"
},
"metadata": {
"folder": "FOLDER_NAME",
"shot": "{clip}",
"track": "{track}",
"sequence": "{sequence}",
"episode": "EPISODE_NAME",
"root": "{projectroot}"
}
},
"Source Resolution": {
"editable": "1",
"note": "Use source resolution",
"icon": {
"path": "resolution.png"
},
"metadata": {
"family": "resolution"
}
},
"Retiming": {
"editable": "1",
"note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)",
"icon": {
"path": "retiming.png"
},
"metadata": {
"family": "retiming",
"marginIn": 1,
"marginOut": 1
}
},
"Frame start": {
"editable": "1",
"note": "Starting frame for comps. \n\n> Use `value` and add either number or write `source` (if you want to preserve source frame numbering)",
"icon": {
"path": "icons:TagBackground.png"
},
"metadata": {
"family": "frameStart",
"value": "1001"
}
},
"[Lenses]": {
"Set lense here": {
"editable": "1",
"note": "Adjust parameters of your lense and then drop to clip. Remember! You can always overwrite on clip",
"icon": {
"path": "lense.png"
},
"metadata": {
"focalLengthMm": 57
}
}
},
"[Subsets]": {
"Audio": {
"editable": "1",
"note": "Export with Audio",
"icon": {
"path": "volume.png"
},
"metadata": {
"family": "audio",
"subset": "main"
}
},
"plateFg": {
"editable": "1",
"note": "Add to publish to \"forground\" subset. Change metadata subset name if different order number",
"icon": {
"path": "z_layer_fg.png"
},
"metadata": {
"family": "plate",
"subset": "Fg01"
}
},
"plateBg": {
"editable": "1",
"note": "Add to publish to \"background\" subset. Change metadata subset name if different order number",
"icon": {
"path": "z_layer_bg.png"
},
"metadata": {
"family": "plate",
"subset": "Bg01"
}
},
"plateRef": {
"editable": "1",
"note": "Add to publish to \"reference\" subset.",
"icon": {
"path": "icons:Reference.png"
},
"metadata": {
"family": "plate",
"subset": "Ref"
}
},
"plateMain": {
"editable": "1",
"note": "Add to publish to \"main\" subset.",
"icon": {
"path": "z_layer_main.png"
},
"metadata": {
"family": "plate",
"subset": "main"
}
},
"plateProxy": {
"editable": "1",
"note": "Add to publish to \"proxy\" subset.",
"icon": {
"path": "z_layer_main.png"
},
"metadata": {
"family": "plate",
"subset": "proxy"
}
},
"review": {
"editable": "1",
"note": "Upload to Ftrack as review component.",
"icon": {
"path": "review.png"
},
"metadata": {
"family": "review",
"track": "review"
}
}
},
"[Handles]": {
"start: add 20 frames": {
"editable": "1",
"note": "Adding frames to start of selected clip",
"icon": {
"path": "3_add_handles_start.png"
},
"metadata": {
"family": "handles",
"value": "20",
"args": "{'op':'add','where':'start'}"
}
},
"start: add 10 frames": {
"editable": "1",
"note": "Adding frames to start of selected clip",
"icon": {
"path": "3_add_handles_start.png"
},
"metadata": {
"family": "handles",
"value": "10",
"args": "{'op':'add','where':'start'}"
}
},
"start: add 5 frames": {
"editable": "1",
"note": "Adding frames to start of selected clip",
"icon": {
"path": "3_add_handles_start.png"
},
"metadata": {
"family": "handles",
"value": "5",
"args": "{'op':'add','where':'start'}"
}
},
"start: add 0 frames": {
"editable": "1",
"note": "Adding frames to start of selected clip",
"icon": {
"path": "3_add_handles_start.png"
},
"metadata": {
"family": "handles",
"value": "0",
"args": "{'op':'add','where':'start'}"
}
},
"end: add 20 frames": {
"editable": "1",
"note": "Adding frames to end of selected clip",
"icon": {
"path": "1_add_handles_end.png"
},
"metadata": {
"family": "handles",
"value": "20",
"args": "{'op':'add','where':'end'}"
}
},
"end: add 10 frames": {
"editable": "1",
"note": "Adding frames to end of selected clip",
"icon": {
"path": "1_add_handles_end.png"
},
"metadata": {
"family": "handles",
"value": "10",
"args": "{'op':'add','where':'end'}"
}
},
"end: add 5 frames": {
"editable": "1",
"note": "Adding frames to end of selected clip",
"icon": {
"path": "1_add_handles_end.png"
},
"metadata": {
"family": "handles",
"value": "5",
"args": "{'op':'add','where':'end'}"
}
},
"end: add 0 frames": {
"editable": "1",
"note": "Adding frames to end of selected clip",
"icon": {
"path": "1_add_handles_end.png"
},
"metadata": {
"family": "handles",
"value": "0",
"args": "{'op':'add','where':'end'}"
}
}
},
"NukeScript": {
"editable": "1",
"note": "Collecting track items to Nuke scripts.",
"icon": {
"path": "icons:TagNuke.png"
},
"metadata": {
"family": "nukescript",
"subset": "main"
}
},
"Comment": {
"editable": "1",
"note": "Comment on a shot.",
"icon": {
"path": "icons:TagComment.png"
},
"metadata": {
"family": "comment",
"subset": "main"
}
}
}

View file

@ -1,16 +1,22 @@
import re import re
import os import os
import json
import hiero import hiero
from pype.api import ( from pype.api import Logger
config,
Logger
)
from avalon import io from avalon import io
log = Logger().get_logger(__name__, "nukestudio") log = Logger().get_logger(__name__, "nukestudio")
def tag_data():
current_dir = os.path.dirname(__file__)
json_path = os.path.join(current_dir, "tags.json")
with open(json_path, "r") as json_stream:
data = json.load(json_stream)
return data
def create_tag(key, value): def create_tag(key, value):
""" """
Creating Tag object. Creating Tag object.
@ -58,13 +64,9 @@ def add_tags_from_presets():
return return
log.debug("Setting default tags on project: {}".format(project.name())) log.debug("Setting default tags on project: {}".format(project.name()))
# get all presets
presets = config.get_presets()
# get nukestudio tag.json from presets # get nukestudio tags.json
nks_pres = presets["nukestudio"] nks_pres_tags = tag_data()
nks_pres_tags = nks_pres.get("tags", None)
# Get project task types. # Get project task types.
tasks = io.find_one({"type": "project"})["config"]["tasks"] tasks = io.find_one({"type": "project"})["config"]["tasks"]

View file

@ -1379,3 +1379,40 @@ def ffprobe_streams(path_to_file):
popen_output = popen.communicate()[0] popen_output = popen.communicate()[0]
log.debug("FFprobe output: {}".format(popen_output)) log.debug("FFprobe output: {}".format(popen_output))
return json.loads(popen_output)["streams"] return json.loads(popen_output)["streams"]
def get_latest_version(asset_name, subset_name):
"""Retrieve latest version from `asset_name`, and `subset_name`.
Args:
asset_name (str): Name of asset.
subset_name (str): Name of subset.
"""
# Get asset
asset_name = io.find_one(
{"type": "asset", "name": asset_name}, projection={"name": True}
)
subset = io.find_one(
{"type": "subset", "name": subset_name, "parent": asset_name["_id"]},
projection={"_id": True, "name": True},
)
# Check if subsets actually exists.
assert subset, "No subsets found."
# Get version
version_projection = {
"name": True,
"parent": True,
}
version = io.find_one(
{"type": "version", "parent": subset["_id"]},
projection=version_projection,
sort=[("name", -1)],
)
assert version, "No version found, this is a bug"
return version

View file

@ -105,11 +105,34 @@ class DeleteOldVersions(BaseAction):
"value": False "value": False
}) })
items.append(self.splitter_item)
items.append({
"type": "label",
"value": (
"<i>This will <b>NOT</b> delete any files and only return the "
"total size of the files.</i>"
)
})
items.append({
"type": "boolean",
"name": "only_calculate",
"label": "Only calculate size of files.",
"value": False
})
return { return {
"items": items, "items": items,
"title": self.inteface_title "title": self.inteface_title
} }
def sizeof_fmt(self, num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def launch(self, session, entities, event): def launch(self, session, entities, event):
values = event["data"].get("values") values = event["data"].get("values")
if not values: if not values:
@ -117,6 +140,7 @@ class DeleteOldVersions(BaseAction):
versions_count = int(values["last_versions_count"]) versions_count = int(values["last_versions_count"])
force_to_remove = values["force_delete_publish_folder"] force_to_remove = values["force_delete_publish_folder"]
only_calculate = values["only_calculate"]
_val1 = "OFF" _val1 = "OFF"
if force_to_remove: if force_to_remove:
@ -318,10 +342,29 @@ class DeleteOldVersions(BaseAction):
"Folder does not exist. Deleting it's files skipped: {}" "Folder does not exist. Deleting it's files skipped: {}"
).format(paths_msg)) ).format(paths_msg))
# Size of files.
size = 0
if only_calculate:
if force_to_remove:
size = self.delete_whole_dir_paths(
dir_paths.values(), delete=False
)
else:
size = self.delete_only_repre_files(
dir_paths, file_paths_by_dir, delete=False
)
msg = "Total size of files: " + self.sizeof_fmt(size)
self.log.warning(msg)
return {"success": True, "message": msg}
if force_to_remove: if force_to_remove:
self.delete_whole_dir_paths(dir_paths.values()) size = self.delete_whole_dir_paths(dir_paths.values())
else: else:
self.delete_only_repre_files(dir_paths, file_paths_by_dir) size = self.delete_only_repre_files(dir_paths, file_paths_by_dir)
mongo_changes_bulk = [] mongo_changes_bulk = []
for version in versions: for version in versions:
@ -383,17 +426,31 @@ class DeleteOldVersions(BaseAction):
"message": msg "message": msg
} }
return True msg = "Total size of files deleted: " + self.sizeof_fmt(size)
self.log.warning(msg)
return {"success": True, "message": msg}
def delete_whole_dir_paths(self, dir_paths, delete=True):
size = 0
def delete_whole_dir_paths(self, dir_paths):
for dir_path in dir_paths: for dir_path in dir_paths:
# Delete all files and fodlers in dir path # Delete all files and fodlers in dir path
for root, dirs, files in os.walk(dir_path, topdown=False): for root, dirs, files in os.walk(dir_path, topdown=False):
for name in files: for name in files:
os.remove(os.path.join(root, name)) file_path = os.path.join(root, name)
size += os.path.getsize(file_path)
if delete:
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
for name in dirs: for name in dirs:
os.rmdir(os.path.join(root, name)) if delete:
os.rmdir(os.path.join(root, name))
if not delete:
continue
# Delete even the folder and it's parents folders if they are empty # Delete even the folder and it's parents folders if they are empty
while True: while True:
@ -406,7 +463,11 @@ class DeleteOldVersions(BaseAction):
os.rmdir(os.path.join(dir_path)) os.rmdir(os.path.join(dir_path))
def delete_only_repre_files(self, dir_paths, file_paths): return size
def delete_only_repre_files(self, dir_paths, file_paths, delete=True):
size = 0
for dir_id, dir_path in dir_paths.items(): for dir_id, dir_path in dir_paths.items():
dir_files = os.listdir(dir_path) dir_files = os.listdir(dir_path)
collections, remainders = clique.assemble(dir_files) collections, remainders = clique.assemble(dir_files)
@ -420,8 +481,13 @@ class DeleteOldVersions(BaseAction):
"File was not found: {}".format(file_path) "File was not found: {}".format(file_path)
) )
continue continue
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path)) size += os.path.getsize(file_path)
if delete:
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
remainders.remove(file_path_base) remainders.remove(file_path_base)
continue continue
@ -440,21 +506,34 @@ class DeleteOldVersions(BaseAction):
final_col.head = os.path.join(dir_path, final_col.head) final_col.head = os.path.join(dir_path, final_col.head)
for _file_path in final_col: for _file_path in final_col:
if os.path.exists(_file_path): if os.path.exists(_file_path):
os.remove(_file_path)
size += os.path.getsize(_file_path)
if delete:
os.remove(_file_path)
self.log.debug(
"Removed file: {}".format(_file_path)
)
_seq_path = final_col.format("{head}{padding}{tail}") _seq_path = final_col.format("{head}{padding}{tail}")
self.log.debug("Removed files: {}".format(_seq_path)) self.log.debug("Removed files: {}".format(_seq_path))
collections.remove(final_col) collections.remove(final_col)
elif os.path.exists(file_path): elif os.path.exists(file_path):
os.remove(file_path) size += os.path.getsize(file_path)
self.log.debug("Removed file: {}".format(file_path))
if delete:
os.remove(file_path)
self.log.debug("Removed file: {}".format(file_path))
else: else:
self.log.warning( self.log.warning(
"File was not found: {}".format(file_path) "File was not found: {}".format(file_path)
) )
# Delete as much as possible parent folders # Delete as much as possible parent folders
if not delete:
return size
for dir_path in dir_paths.values(): for dir_path in dir_paths.values():
while True: while True:
if not os.path.exists(dir_path): if not os.path.exists(dir_path):
@ -467,6 +546,8 @@ class DeleteOldVersions(BaseAction):
self.log.debug("Removed folder: {}".format(dir_path)) self.log.debug("Removed folder: {}".format(dir_path))
os.rmdir(dir_path) os.rmdir(dir_path)
return size
def path_from_represenation(self, representation, anatomy): def path_from_represenation(self, representation, anatomy):
try: try:
template = representation["data"]["template"] template = representation["data"]["template"]

View file

@ -81,13 +81,15 @@ class Delivery(BaseAction):
anatomy = Anatomy(project_name) anatomy = Anatomy(project_name)
new_anatomies = [] new_anatomies = []
first = None first = None
for key in (anatomy.templates.get("delivery") or {}): for key, template in (anatomy.templates.get("delivery") or {}).items():
new_anatomies.append({ # Use only keys with `{root}` or `{root[*]}` in value
"label": key, if isinstance(template, str) and "{root" in template:
"value": key new_anatomies.append({
}) "label": key,
if first is None: "value": key
first = key })
if first is None:
first = key
skipped = False skipped = False
# Add message if there are any common components # Add message if there are any common components
@ -293,6 +295,20 @@ class Delivery(BaseAction):
repres_to_deliver.append(repre) repres_to_deliver.append(repre)
anatomy = Anatomy(project_name) anatomy = Anatomy(project_name)
format_dict = {}
if location_path:
location_path = location_path.replace("\\", "/")
root_names = anatomy.root_names_from_templates(
anatomy.templates["delivery"]
)
if root_names is None:
format_dict["root"] = location_path
else:
format_dict["root"] = {}
for name in root_names:
format_dict["root"][name] = location_path
for repre in repres_to_deliver: for repre in repres_to_deliver:
# Get destination repre path # Get destination repre path
anatomy_data = copy.deepcopy(repre["context"]) anatomy_data = copy.deepcopy(repre["context"])
@ -339,25 +355,33 @@ class Delivery(BaseAction):
repre_path = self.path_from_represenation(repre, anatomy) repre_path = self.path_from_represenation(repre, anatomy)
# TODO add backup solution where root of path from component # TODO add backup solution where root of path from component
# is repalced with root # is repalced with root
if not frame: args = (
self.process_single_file( repre_path,
repre_path, anatomy, anatomy_name, anatomy_data anatomy,
) anatomy_name,
anatomy_data,
format_dict
)
if not frame:
self.process_single_file(*args)
else: else:
self.process_sequence( self.process_sequence(*args)
repre_path, anatomy, anatomy_name, anatomy_data
)
self.db_con.uninstall() self.db_con.uninstall()
return self.report() return self.report()
def process_single_file( def process_single_file(
self, repre_path, anatomy, anatomy_name, anatomy_data self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict
): ):
anatomy_filled = anatomy.format(anatomy_data) anatomy_filled = anatomy.format(anatomy_data)
delivery_path = anatomy_filled["delivery"][anatomy_name] if format_dict:
template_result = anatomy_filled["delivery"][anatomy_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path) delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder): if not os.path.exists(delivery_folder):
os.makedirs(delivery_folder) os.makedirs(delivery_folder)
@ -365,7 +389,7 @@ class Delivery(BaseAction):
self.copy_file(repre_path, delivery_path) self.copy_file(repre_path, delivery_path)
def process_sequence( def process_sequence(
self, repre_path, anatomy, anatomy_name, anatomy_data self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict
): ):
dir_path, file_name = os.path.split(str(repre_path)) dir_path, file_name = os.path.split(str(repre_path))
@ -408,8 +432,12 @@ class Delivery(BaseAction):
anatomy_data["frame"] = frame_indicator anatomy_data["frame"] = frame_indicator
anatomy_filled = anatomy.format(anatomy_data) anatomy_filled = anatomy.format(anatomy_data)
delivery_path = anatomy_filled["delivery"][anatomy_name] if format_dict:
print(delivery_path) template_result = anatomy_filled["delivery"][anatomy_name]
delivery_path = template_result.rootless.format(**format_dict)
else:
delivery_path = anatomy_filled["delivery"][anatomy_name]
delivery_folder = os.path.dirname(delivery_path) delivery_folder = os.path.dirname(delivery_path)
dst_head, dst_tail = delivery_path.split(frame_indicator) dst_head, dst_tail = delivery_path.split(frame_indicator)
dst_padding = src_collection.padding dst_padding = src_collection.padding

View file

@ -81,11 +81,12 @@ def check_regex(name, entity_type, in_schema=None, schema_patterns=None):
def get_pype_attr(session, split_hierarchical=True): def get_pype_attr(session, split_hierarchical=True):
custom_attributes = [] custom_attributes = []
hier_custom_attributes = [] hier_custom_attributes = []
# TODO remove deprecated "avalon" group from query
cust_attrs_query = ( cust_attrs_query = (
"select id, entity_type, object_type_id, is_hierarchical, default" "select id, entity_type, object_type_id, is_hierarchical, default"
" from CustomAttributeConfiguration" " from CustomAttributeConfiguration"
" where group.name = \"{}\"" " where group.name in (\"avalon\", \"pype\")"
).format(CUST_ATTR_GROUP) )
all_avalon_attr = session.query(cust_attrs_query).all() all_avalon_attr = session.query(cust_attrs_query).all()
for cust_attr in all_avalon_attr: for cust_attr in all_avalon_attr:
if split_hierarchical and cust_attr["is_hierarchical"]: if split_hierarchical and cust_attr["is_hierarchical"]:

View file

@ -8,9 +8,9 @@ class LogsWindow(QtWidgets.QWidget):
super(LogsWindow, self).__init__(parent) super(LogsWindow, self).__init__(parent)
self.setStyleSheet(style.load_stylesheet()) self.setStyleSheet(style.load_stylesheet())
self.resize(1200, 800) self.resize(1400, 800)
logs_widget = LogsWidget(parent=self)
log_detail = OutputWidget(parent=self) log_detail = OutputWidget(parent=self)
logs_widget = LogsWidget(log_detail, parent=self)
main_layout = QtWidgets.QHBoxLayout() main_layout = QtWidgets.QHBoxLayout()
@ -18,8 +18,6 @@ class LogsWindow(QtWidgets.QWidget):
log_splitter.setOrientation(QtCore.Qt.Horizontal) log_splitter.setOrientation(QtCore.Qt.Horizontal)
log_splitter.addWidget(logs_widget) log_splitter.addWidget(logs_widget)
log_splitter.addWidget(log_detail) log_splitter.addWidget(log_detail)
log_splitter.setStretchFactor(0, 65)
log_splitter.setStretchFactor(1, 35)
main_layout.addWidget(log_splitter) main_layout.addWidget(log_splitter)
@ -28,10 +26,3 @@ class LogsWindow(QtWidgets.QWidget):
self.setLayout(main_layout) self.setLayout(main_layout)
self.setWindowTitle("Logs") self.setWindowTitle("Logs")
self.logs_widget.active_changed.connect(self.on_selection_changed)
def on_selection_changed(self):
index = self.logs_widget.selected_log()
node = index.data(self.logs_widget.model.NodeRole)
self.log_detail.set_detail(node)

View file

@ -1,94 +0,0 @@
import contextlib
from Qt import QtCore
def _iter_model_rows(
model, column, include_root=False
):
"""Iterate over all row indices in a model"""
indices = [QtCore.QModelIndex()] # start iteration at root
for index in indices:
# Add children to the iterations
child_rows = model.rowCount(index)
for child_row in range(child_rows):
child_index = model.index(child_row, column, index)
indices.append(child_index)
if not include_root and not index.isValid():
continue
yield index
@contextlib.contextmanager
def preserve_states(
tree_view, column=0, role=None,
preserve_expanded=True, preserve_selection=True,
expanded_role=QtCore.Qt.DisplayRole, selection_role=QtCore.Qt.DisplayRole
):
"""Preserves row selection in QTreeView by column's data role.
This function is created to maintain the selection status of
the model items. When refresh is triggered the items which are expanded
will stay expanded and vise versa.
tree_view (QWidgets.QTreeView): the tree view nested in the application
column (int): the column to retrieve the data from
role (int): the role which dictates what will be returned
Returns:
None
"""
# When `role` is set then override both expanded and selection roles
if role:
expanded_role = role
selection_role = role
model = tree_view.model()
selection_model = tree_view.selectionModel()
flags = selection_model.Select | selection_model.Rows
expanded = set()
if preserve_expanded:
for index in _iter_model_rows(
model, column=column, include_root=False
):
if tree_view.isExpanded(index):
value = index.data(expanded_role)
expanded.add(value)
selected = None
if preserve_selection:
selected_rows = selection_model.selectedRows()
if selected_rows:
selected = set(row.data(selection_role) for row in selected_rows)
try:
yield
finally:
if expanded:
for index in _iter_model_rows(
model, column=0, include_root=False
):
value = index.data(expanded_role)
is_expanded = value in expanded
# skip if new index was created meanwhile
if is_expanded is None:
continue
tree_view.setExpanded(index, is_expanded)
if selected:
# Go through all indices, select the ones with similar data
for index in _iter_model_rows(
model, column=column, include_root=False
):
value = index.data(selection_role)
state = value in selected
if state:
tree_view.scrollTo(index) # Ensure item is visible
selection_model.select(index, flags)

View file

@ -1,21 +1,20 @@
import collections import collections
from Qt import QtCore from Qt import QtCore, QtGui
from pype.api import Logger from pype.api import Logger
from pypeapp.lib.log import _bootstrap_mongo_log, LOG_COLLECTION_NAME from pypeapp.lib.log import _bootstrap_mongo_log, LOG_COLLECTION_NAME
log = Logger().get_logger("LogModel", "LoggingModule") log = Logger().get_logger("LogModel", "LoggingModule")
class LogModel(QtCore.QAbstractItemModel): class LogModel(QtGui.QStandardItemModel):
COLUMNS = [ COLUMNS = (
"process_name", "process_name",
"hostname", "hostname",
"hostip", "hostip",
"username", "username",
"system_name", "system_name",
"started" "started"
] )
colums_mapping = { colums_mapping = {
"process_name": "Process Name", "process_name": "Process Name",
"process_id": "Process Id", "process_id": "Process Id",
@ -25,30 +24,53 @@ class LogModel(QtCore.QAbstractItemModel):
"system_name": "System name", "system_name": "System name",
"started": "Started at" "started": "Started at"
} }
process_keys = [ process_keys = (
"process_id", "hostname", "hostip", "process_id", "hostname", "hostip",
"username", "system_name", "process_name" "username", "system_name", "process_name"
] )
log_keys = [ log_keys = (
"timestamp", "level", "thread", "threadName", "message", "loggerName", "timestamp", "level", "thread", "threadName", "message", "loggerName",
"fileName", "module", "method", "lineNumber" "fileName", "module", "method", "lineNumber"
] )
default_value = "- Not set -" default_value = "- Not set -"
NodeRole = QtCore.Qt.UserRole + 1
ROLE_LOGS = QtCore.Qt.UserRole + 2
ROLE_PROCESS_ID = QtCore.Qt.UserRole + 3
def __init__(self, parent=None): def __init__(self, parent=None):
super(LogModel, self).__init__(parent) super(LogModel, self).__init__(parent)
self._root_node = Node()
self.log_by_process = None
self.dbcon = None self.dbcon = None
# Crash if connection is not possible to skip this module # Crash if connection is not possible to skip this module
database = _bootstrap_mongo_log() database = _bootstrap_mongo_log()
if LOG_COLLECTION_NAME in database.list_collection_names(): if LOG_COLLECTION_NAME in database.list_collection_names():
self.dbcon = database[LOG_COLLECTION_NAME] self.dbcon = database[LOG_COLLECTION_NAME]
def add_log(self, log): def headerData(self, section, orientation, role):
node = Node(log) if (
self._root_node.add_child(node) role == QtCore.Qt.DisplayRole
and orientation == QtCore.Qt.Horizontal
):
if section < len(self.COLUMNS):
key = self.COLUMNS[section]
return self.colums_mapping.get(key, key)
super(LogModel, self).headerData(section, orientation, role)
def add_process_logs(self, process_logs):
items = []
first_item = True
for key in self.COLUMNS:
display_value = str(process_logs[key])
item = QtGui.QStandardItem(display_value)
if first_item:
first_item = False
item.setData(process_logs["_logs"], self.ROLE_LOGS)
item.setData(process_logs["process_id"], self.ROLE_PROCESS_ID)
items.append(item)
self.appendRow(items)
def refresh(self): def refresh(self):
self.log_by_process = collections.defaultdict(list) self.log_by_process = collections.defaultdict(list)
@ -65,16 +87,13 @@ class LogModel(QtCore.QAbstractItemModel):
continue continue
if process_id not in self.process_info: if process_id not in self.process_info:
proc_dict = {} proc_dict = {"_logs": []}
for key in self.process_keys: for key in self.process_keys:
proc_dict[key] = ( proc_dict[key] = (
item.get(key) or self.default_value item.get(key) or self.default_value
) )
self.process_info[process_id] = proc_dict self.process_info[process_id] = proc_dict
if "_logs" not in self.process_info[process_id]:
self.process_info[process_id]["_logs"] = []
log_item = {} log_item = {}
for key in self.log_keys: for key in self.log_keys:
log_item[key] = item.get(key) or self.default_value log_item[key] = item.get(key) or self.default_value
@ -89,114 +108,29 @@ class LogModel(QtCore.QAbstractItemModel):
item["_logs"], key=lambda item: item["timestamp"] item["_logs"], key=lambda item: item["timestamp"]
) )
item["started"] = item["_logs"][0]["timestamp"] item["started"] = item["_logs"][0]["timestamp"]
self.add_log(item) self.add_process_logs(item)
self.endResetModel() self.endResetModel()
def data(self, index, role):
if not index.isValid():
return None
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: class LogsFilterProxy(QtCore.QSortFilterProxyModel):
node = index.internalPointer() def __init__(self, *args, **kwargs):
column = index.column() super(LogsFilterProxy, self).__init__(*args, **kwargs)
self.col_usernames = None
self.filter_usernames = set()
key = self.COLUMNS[column] def update_users_filter(self, users):
if key == "started": self.filter_usernames = set()
return str(node.get(key, None)) for user in users or tuple():
return node.get(key, None) self.filter_usernames.add(user)
self.invalidateFilter()
if role == self.NodeRole: def filterAcceptsRow(self, source_row, source_parent):
return index.internalPointer() if self.col_usernames is not None:
index = self.sourceModel().index(
def index(self, row, column, parent): source_row, self.col_usernames, source_parent
"""Return index for row/column under parent""" )
user = index.data(QtCore.Qt.DisplayRole)
if not parent.isValid(): if user not in self.filter_usernames:
parent_node = self._root_node return False
else: return True
parent_node = parent.internalPointer()
child_item = parent_node.child(row)
if child_item:
return self.createIndex(row, column, child_item)
return QtCore.QModelIndex()
def rowCount(self, parent):
node = self._root_node
if parent.isValid():
node = parent.internalPointer()
return node.childCount()
def columnCount(self, parent):
return len(self.COLUMNS)
def parent(self, index):
return QtCore.QModelIndex()
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if section < len(self.COLUMNS):
key = self.COLUMNS[section]
return self.colums_mapping.get(key, key)
super(LogModel, self).headerData(section, orientation, role)
def flags(self, index):
return (QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
def clear(self):
self.beginResetModel()
self._root_node = Node()
self.endResetModel()
class Node(dict):
"""A node that can be represented in a tree view.
The node can store data just like a dictionary.
>>> data = {"name": "John", "score": 10}
>>> node = Node(data)
>>> assert node["name"] == "John"
"""
def __init__(self, data=None):
super(Node, self).__init__()
self._children = list()
self._parent = None
if data is not None:
assert isinstance(data, dict)
self.update(data)
def childCount(self):
return len(self._children)
def child(self, row):
if row >= len(self._children):
log.warning("Invalid row as child: {0}".format(row))
return
return self._children[row]
def children(self):
return self._children
def parent(self):
return self._parent
def row(self):
"""
Returns:
int: Index of this node under parent"""
if self._parent is not None:
siblings = self.parent().children()
return siblings.index(self)
def add_child(self, child):
"""Add a child to this node"""
child._parent = self
self._children.append(child)

View file

@ -1,6 +1,6 @@
from Qt import QtCore, QtWidgets, QtGui from Qt import QtCore, QtWidgets
from PyQt5.QtCore import QVariant from avalon.vendor import qtawesome
from .models import LogModel from .models import LogModel, LogsFilterProxy
class SearchComboBox(QtWidgets.QComboBox): class SearchComboBox(QtWidgets.QComboBox):
@ -50,37 +50,6 @@ class SearchComboBox(QtWidgets.QComboBox):
return text return text
class CheckableComboBox2(QtWidgets.QComboBox):
def __init__(self, parent=None):
super(CheckableComboBox, self).__init__(parent)
self.view().pressed.connect(self.handleItemPressed)
self._changed = False
def handleItemPressed(self, index):
item = self.model().itemFromIndex(index)
if item.checkState() == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
else:
item.setCheckState(QtCore.Qt.Checked)
self._changed = True
def hidePopup(self):
if not self._changed:
super(CheckableComboBox, self).hidePopup()
self._changed = False
def itemChecked(self, index):
item = self.model().item(index, self.modelColumn())
return item.checkState() == QtCore.Qt.Checked
def setItemChecked(self, index, checked=True):
item = self.model().item(index, self.modelColumn())
if checked:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
class SelectableMenu(QtWidgets.QMenu): class SelectableMenu(QtWidgets.QMenu):
selection_changed = QtCore.Signal() selection_changed = QtCore.Signal()
@ -137,144 +106,108 @@ class CustomCombo(QtWidgets.QWidget):
yield action yield action
class CheckableComboBox(QtWidgets.QComboBox):
def __init__(self, parent=None):
super(CheckableComboBox, self).__init__(parent)
view = QtWidgets.QTreeView()
view.header().hide()
view.setRootIsDecorated(False)
model = QtGui.QStandardItemModel()
view.pressed.connect(self.handleItemPressed)
self._changed = False
self.setView(view)
self.setModel(model)
self.view = view
self.model = model
def handleItemPressed(self, index):
item = self.model.itemFromIndex(index)
if item.checkState() == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
else:
item.setCheckState(QtCore.Qt.Checked)
self._changed = True
def hidePopup(self):
if not self._changed:
super(CheckableComboBox, self).hidePopup()
self._changed = False
def itemChecked(self, index):
item = self.model.item(index, self.modelColumn())
return item.checkState() == QtCore.Qt.Checked
def setItemChecked(self, index, checked=True):
item = self.model.item(index, self.modelColumn())
if checked:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
def addItems(self, items):
for text, checked in items:
text_item = QtGui.QStandardItem(text)
checked_item = QtGui.QStandardItem()
checked_item.setData(QVariant(checked), QtCore.Qt.CheckStateRole)
self.model.appendRow([text_item, checked_item])
class LogsWidget(QtWidgets.QWidget): class LogsWidget(QtWidgets.QWidget):
"""A widget that lists the published subsets for an asset""" """A widget that lists the published subsets for an asset"""
active_changed = QtCore.Signal() def __init__(self, detail_widget, parent=None):
def __init__(self, parent=None):
super(LogsWidget, self).__init__(parent=parent) super(LogsWidget, self).__init__(parent=parent)
model = LogModel() model = LogModel()
proxy_model = LogsFilterProxy()
proxy_model.setSourceModel(model)
proxy_model.col_usernames = model.COLUMNS.index("username")
filter_layout = QtWidgets.QHBoxLayout() filter_layout = QtWidgets.QHBoxLayout()
# user_filter = SearchComboBox(self, "Users") # user_filter = SearchComboBox(self, "Users")
user_filter = CustomCombo("Users", self) user_filter = CustomCombo("Users", self)
users = model.dbcon.distinct("user") users = model.dbcon.distinct("username")
user_filter.populate(users) user_filter.populate(users)
user_filter.selection_changed.connect(self.user_changed) user_filter.selection_changed.connect(self._user_changed)
proxy_model.update_users_filter(users)
level_filter = CustomCombo("Levels", self) level_filter = CustomCombo("Levels", self)
# levels = [(level, True) for level in model.dbcon.distinct("level")] # levels = [(level, True) for level in model.dbcon.distinct("level")]
levels = model.dbcon.distinct("level") levels = model.dbcon.distinct("level")
level_filter.addItems(levels) level_filter.addItems(levels)
level_filter.selection_changed.connect(self._level_changed)
date_from_label = QtWidgets.QLabel("From:") detail_widget.update_level_filter(levels)
date_filter_from = QtWidgets.QDateTimeEdit()
date_from_layout = QtWidgets.QVBoxLayout() spacer = QtWidgets.QWidget()
date_from_layout.addWidget(date_from_label)
date_from_layout.addWidget(date_filter_from)
# now = datetime.datetime.now() icon = qtawesome.icon("fa.refresh", color="white")
# QtCore.QDateTime( refresh_btn = QtWidgets.QPushButton(icon, "")
# now.year,
# now.month,
# now.day,
# now.hour,
# now.minute,
# second=0,
# msec=0,
# timeSpec=0
# )
date_to_label = QtWidgets.QLabel("To:")
date_filter_to = QtWidgets.QDateTimeEdit()
date_to_layout = QtWidgets.QVBoxLayout()
date_to_layout.addWidget(date_to_label)
date_to_layout.addWidget(date_filter_to)
filter_layout.addWidget(user_filter) filter_layout.addWidget(user_filter)
filter_layout.addWidget(level_filter) filter_layout.addWidget(level_filter)
filter_layout.addWidget(spacer, 1)
filter_layout.addLayout(date_from_layout) filter_layout.addWidget(refresh_btn)
filter_layout.addLayout(date_to_layout)
view = QtWidgets.QTreeView(self) view = QtWidgets.QTreeView(self)
view.setAllColumnsShowFocus(True) view.setAllColumnsShowFocus(True)
view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
layout = QtWidgets.QVBoxLayout(self) layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0) layout.setContentsMargins(0, 0, 0, 0)
layout.addLayout(filter_layout) layout.addLayout(filter_layout)
layout.addWidget(view) layout.addWidget(view)
view.setModel(proxy_model)
view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
view.setSortingEnabled(True) view.setSortingEnabled(True)
view.sortByColumn( view.sortByColumn(
model.COLUMNS.index("started"), model.COLUMNS.index("started"),
QtCore.Qt.AscendingOrder QtCore.Qt.DescendingOrder
) )
view.setModel(model) view.selectionModel().selectionChanged.connect(self._on_index_change)
view.pressed.connect(self._on_activated) refresh_btn.clicked.connect(self._on_refresh_clicked)
# prepare
model.refresh()
# Store to memory # Store to memory
self.model = model self.model = model
self.proxy_model = proxy_model
self.view = view self.view = view
self.user_filter = user_filter self.user_filter = user_filter
self.level_filter = level_filter self.level_filter = level_filter
def _on_activated(self, *args, **kwargs): self.detail_widget = detail_widget
self.active_changed.emit() self.refresh_btn = refresh_btn
def user_changed(self): # prepare
self.refresh()
def refresh(self):
self.model.refresh()
self.detail_widget.refresh()
def _on_refresh_clicked(self):
self.refresh()
def _on_index_change(self, to_index, from_index):
index = self._selected_log()
if index:
logs = index.data(self.model.ROLE_LOGS)
else:
logs = []
self.detail_widget.set_detail(logs)
def _user_changed(self):
checked_values = set()
for action in self.user_filter.items(): for action in self.user_filter.items():
print(action) if action.isChecked():
checked_values.add(action.text())
self.proxy_model.update_users_filter(checked_values)
def _level_changed(self):
checked_values = set()
for action in self.level_filter.items():
if action.isChecked():
checked_values.add(action.text())
self.detail_widget.update_level_filter(checked_values)
def on_context_menu(self, point): def on_context_menu(self, point):
# TODO will be any actions? it's ready # TODO will be any actions? it's ready
@ -288,7 +221,7 @@ class LogsWidget(QtWidgets.QWidget):
selection = self.view.selectionModel() selection = self.view.selectionModel()
rows = selection.selectedRows(column=0) rows = selection.selectedRows(column=0)
def selected_log(self): def _selected_log(self):
selection = self.view.selectionModel() selection = self.view.selectionModel()
rows = selection.selectedRows(column=0) rows = selection.selectedRows(column=0)
if len(rows) == 1: if len(rows) == 1:
@ -300,22 +233,55 @@ class OutputWidget(QtWidgets.QWidget):
def __init__(self, parent=None): def __init__(self, parent=None):
super(OutputWidget, self).__init__(parent=parent) super(OutputWidget, self).__init__(parent=parent)
layout = QtWidgets.QVBoxLayout(self) layout = QtWidgets.QVBoxLayout(self)
show_timecode_checkbox = QtWidgets.QCheckBox("Show timestamp")
output_text = QtWidgets.QTextEdit() output_text = QtWidgets.QTextEdit()
output_text.setReadOnly(True) output_text.setReadOnly(True)
# output_text.setLineWrapMode(QtWidgets.QTextEdit.FixedPixelWidth) # output_text.setLineWrapMode(QtWidgets.QTextEdit.FixedPixelWidth)
layout.addWidget(show_timecode_checkbox)
layout.addWidget(output_text) layout.addWidget(output_text)
show_timecode_checkbox.stateChanged.connect(
self.on_show_timecode_change
)
self.setLayout(layout) self.setLayout(layout)
self.output_text = output_text self.output_text = output_text
self.show_timecode_checkbox = show_timecode_checkbox
self.refresh()
def refresh(self):
self.set_detail()
def show_timecode(self):
return self.show_timecode_checkbox.isChecked()
def on_show_timecode_change(self):
self.set_detail(self.las_logs)
def update_level_filter(self, levels):
self.filter_levels = set()
for level in levels or tuple():
self.filter_levels.add(level.lower())
self.set_detail(self.las_logs)
def add_line(self, line): def add_line(self, line):
self.output_text.append(line) self.output_text.append(line)
def set_detail(self, node): def set_detail(self, logs=None):
self.las_logs = logs
self.output_text.clear() self.output_text.clear()
for log in node["_logs"]: if not logs:
return
show_timecode = self.show_timecode()
for log in logs:
level = log["level"].lower() level = log["level"].lower()
if level not in self.filter_levels:
continue
line_f = "<font color=\"White\">{message}" line_f = "<font color=\"White\">{message}"
if level == "debug": if level == "debug":
@ -353,66 +319,13 @@ class OutputWidget(QtWidgets.QWidget):
line = line_f.format(**log) line = line_f.format(**log)
if show_timecode:
timestamp = log["timestamp"]
line = timestamp.strftime("%Y-%d-%m %H:%M:%S") + " " + line
self.add_line(line) self.add_line(line)
if not exc: if not exc:
continue continue
for _line in exc["stackTrace"].split("\n"): for _line in exc["stackTrace"].split("\n"):
self.add_line(_line) self.add_line(_line)
class LogDetailWidget(QtWidgets.QWidget):
"""A Widget that display information about a specific version"""
data_rows = [
"user",
"message",
"level",
"logname",
"method",
"module",
"fileName",
"lineNumber",
"host",
"timestamp"
]
html_text = u"""
<h3>{user} - {timestamp}</h3>
<b>User</b><br>{user}<br>
<br><b>Level</b><br>{level}<br>
<br><b>Message</b><br>{message}<br>
<br><b>Log Name</b><br>{logname}<br><br><b>Method</b><br>{method}<br>
<br><b>File</b><br>{fileName}<br>
<br><b>Line</b><br>{lineNumber}<br>
<br><b>Host</b><br>{host}<br>
<br><b>Timestamp</b><br>{timestamp}<br>
"""
def __init__(self, parent=None):
super(LogDetailWidget, self).__init__(parent=parent)
layout = QtWidgets.QVBoxLayout(self)
label = QtWidgets.QLabel("Detail")
detail_widget = QtWidgets.QTextEdit()
detail_widget.setReadOnly(True)
layout.addWidget(label)
layout.addWidget(detail_widget)
self.detail_widget = detail_widget
self.setEnabled(True)
self.set_detail(None)
def set_detail(self, detail_data):
if not detail_data:
self.detail_widget.setText("")
return
data = dict()
for row in self.data_rows:
value = detail_data.get(row) or "< Not set >"
data[row] = value
self.detail_widget.setHtml(self.html_text.format(**data))

View file

@ -12,7 +12,7 @@ class CollectRenderPath(pyblish.api.InstancePlugin):
# Presets # Presets
anatomy_render_key = None anatomy_render_key = None
anatomy_publish_render_key = None publish_render_metadata = None
def process(self, instance): def process(self, instance):
anatomy = instance.context.data["anatomy"] anatomy = instance.context.data["anatomy"]
@ -28,7 +28,7 @@ class CollectRenderPath(pyblish.api.InstancePlugin):
# get anatomy rendering keys # get anatomy rendering keys
anatomy_render_key = self.anatomy_render_key or "render" anatomy_render_key = self.anatomy_render_key or "render"
anatomy_publish_render_key = self.anatomy_publish_render_key or "render" publish_render_metadata = self.publish_render_metadata or "render"
# get folder and path for rendering images from celaction # get folder and path for rendering images from celaction
render_dir = anatomy_filled[anatomy_render_key]["folder"] render_dir = anatomy_filled[anatomy_render_key]["folder"]
@ -46,8 +46,11 @@ class CollectRenderPath(pyblish.api.InstancePlugin):
instance.data["path"] = render_path instance.data["path"] = render_path
# get anatomy for published renders folder path # get anatomy for published renders folder path
if anatomy_filled.get(anatomy_publish_render_key): if anatomy_filled.get(publish_render_metadata):
instance.data["publishRenderFolder"] = anatomy_filled[ instance.data["publishRenderMetadataFolder"] = anatomy_filled[
anatomy_publish_render_key]["folder"] publish_render_metadata]["folder"]
self.log.info("Metadata render path: `{}`".format(
instance.data["publishRenderMetadataFolder"]
))
self.log.info(f"Render output path set to: `{render_path}`") self.log.info(f"Render output path set to: `{render_path}`")

View file

@ -1,5 +1,5 @@
import shutil import shutil
import re import pype
import pyblish.api import pyblish.api
@ -12,57 +12,9 @@ class VersionUpScene(pyblish.api.ContextPlugin):
def process(self, context): def process(self, context):
current_file = context.data.get('currentFile') current_file = context.data.get('currentFile')
v_up = get_version_up(current_file) v_up = pype.lib.version_up(current_file)
self.log.debug('Current file is: {}'.format(current_file)) self.log.debug('Current file is: {}'.format(current_file))
self.log.debug('Version up: {}'.format(v_up)) self.log.debug('Version up: {}'.format(v_up))
shutil.copy2(current_file, v_up) shutil.copy2(current_file, v_up)
self.log.info('Scene saved into new version: {}'.format(v_up)) self.log.info('Scene saved into new version: {}'.format(v_up))
def version_get(string, prefix, suffix=None):
"""Extract version information from filenames used by DD (and Weta, apparently)
These are _v# or /v# or .v# where v is a prefix string, in our case
we use "v" for render version and "c" for camera track version.
See the version.py and camera.py plugins for usage."""
if string is None:
raise ValueError("Empty version string - no match")
regex = r"[/_.]{}\d+".format(prefix)
matches = re.findall(regex, string, re.IGNORECASE)
if not len(matches):
msg = f"No `_{prefix}#` found in `{string}`"
raise ValueError(msg)
return (matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group())
def version_set(string, prefix, oldintval, newintval):
"""Changes version information from filenames used by DD (and Weta, apparently)
These are _v# or /v# or .v# where v is a prefix string, in our case
we use "v" for render version and "c" for camera track version.
See the version.py and camera.py plugins for usage."""
regex = r"[/_.]{}\d+".format(prefix)
matches = re.findall(regex, string, re.IGNORECASE)
if not len(matches):
return ""
# Filter to retain only version strings with matching numbers
matches = filter(lambda s: int(s[2:]) == oldintval, matches)
# Replace all version strings with matching numbers
for match in matches:
# use expression instead of expr so 0 prefix does not make octal
fmt = "%%(#)0%dd" % (len(match) - 2)
newfullvalue = match[0] + prefix + str(fmt % {"#": newintval})
string = re.sub(match, newfullvalue, string)
return string
def get_version_up(path):
""" Returns the next version of the path """
(prefix, v) = version_get(path, 'v')
v = int(v)
return version_set(path, prefix, v, v + 1)

View file

@ -1,11 +1,18 @@
# -*- coding: utf-8 -*-
"""Cleanup leftover files from publish."""
import os import os
import shutil import shutil
import pyblish.api import pyblish.api
def clean_renders(instance): def clean_renders(instance):
transfers = instance.data.get("transfers", list()) """Delete renders after publishing.
Args:
instance (pyblish.api.Instace): Instance to work on.
"""
transfers = instance.data.get("transfers", list())
current_families = instance.data.get("families", list()) current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None) instance_family = instance.data.get("family", None)
dirnames = [] dirnames = []
@ -40,6 +47,7 @@ class CleanUp(pyblish.api.InstancePlugin):
active = True active = True
def process(self, instance): def process(self, instance):
"""Plugin entry point."""
# Get the errored instances # Get the errored instances
failed = [] failed = []
for result in instance.context.data["results"]: for result in instance.context.data["results"]:
@ -52,7 +60,7 @@ class CleanUp(pyblish.api.InstancePlugin):
) )
) )
self.log.info("Cleaning renders ...") self.log.info("Performing cleanup on {}".format(instance))
clean_renders(instance) clean_renders(instance)
if [ef for ef in self.exclude_families if [ef for ef in self.exclude_families
@ -60,16 +68,21 @@ class CleanUp(pyblish.api.InstancePlugin):
return return
import tempfile import tempfile
temp_root = tempfile.gettempdir()
staging_dir = instance.data.get("stagingDir", None) staging_dir = instance.data.get("stagingDir", None)
if not staging_dir or not os.path.exists(staging_dir):
self.log.info("No staging directory found: %s" % staging_dir) if not staging_dir:
self.log.info("Staging dir not set.")
return return
temp_root = tempfile.gettempdir()
if not os.path.normpath(staging_dir).startswith(temp_root): if not os.path.normpath(staging_dir).startswith(temp_root):
self.log.info("Skipping cleanup. Staging directory is not in the " self.log.info("Skipping cleanup. Staging directory is not in the "
"temp folder: %s" % staging_dir) "temp folder: %s" % staging_dir)
return return
self.log.info("Removing staging directory ...") if not os.path.exists(staging_dir):
self.log.info("No staging directory found: %s" % staging_dir)
return
self.log.info("Removing staging directory {}".format(staging_dir))
shutil.rmtree(staging_dir) shutil.rmtree(staging_dir)

View file

@ -206,7 +206,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
def _create_metadata_path(self, instance): def _create_metadata_path(self, instance):
ins_data = instance.data ins_data = instance.data
# Ensure output dir exists # Ensure output dir exists
output_dir = ins_data.get("publishRenderFolder", ins_data["outputDir"]) output_dir = ins_data.get(
"publishRenderMetadataFolder", ins_data["outputDir"])
try: try:
if not os.path.isdir(output_dir): if not os.path.isdir(output_dir):

View file

@ -0,0 +1,133 @@
# -*- coding: utf-8 -*-
"""Validate if instance asset is the same as context asset."""
from __future__ import absolute_import
import pyblish.api
import pype.api
class SelectInvalidInstances(pyblish.api.Action):
"""Select invalid instances in Outliner."""
label = "Select Instances"
icon = "briefcase"
on = "failed"
def process(self, context, plugin):
"""Process invalid validators and select invalid instances."""
# Get the errored instances
failed = []
for result in context.data["results"]:
if result["error"] is None:
continue
if result["instance"] is None:
continue
if result["instance"] in failed:
continue
if result["plugin"] != plugin:
continue
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
if instances:
self.log.info(
"Selecting invalid nodes: %s" % ", ".join(
[str(x) for x in instances]
)
)
self.select(instances)
else:
self.log.info("No invalid nodes found.")
self.deselect()
def select(self, instances):
if "nuke" in pyblish.api.registered_hosts():
import avalon.nuke.lib
import nuke
avalon.nuke.lib.select_nodes(
[nuke.toNode(str(x)) for x in instances]
)
if "maya" in pyblish.api.registered_hosts():
from maya import cmds
cmds.select(instances, replace=True, noExpand=True)
def deselect(self):
if "nuke" in pyblish.api.registered_hosts():
import avalon.nuke.lib
avalon.nuke.lib.reset_selection()
if "maya" in pyblish.api.registered_hosts():
from maya import cmds
cmds.select(deselect=True)
class RepairSelectInvalidInstances(pyblish.api.Action):
"""Repair the instance asset."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if result["error"] is None:
continue
if result["instance"] is None:
continue
if result["instance"] in failed:
continue
if result["plugin"] != plugin:
continue
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
context_asset = context.data["assetEntity"]["name"]
for instance in instances:
self.set_attribute(instance, context_asset)
def set_attribute(self, instance, context_asset):
if "nuke" in pyblish.api.registered_hosts():
import nuke
nuke.toNode(
instance.data.get("name")
)["avalon:asset"].setValue(context_asset)
if "maya" in pyblish.api.registered_hosts():
from maya import cmds
cmds.setAttr(
instance.data.get("name") + ".asset",
context_asset,
type="string"
)
class ValidateInstanceInContext(pyblish.api.InstancePlugin):
"""Validator to check if instance asset match context asset.
When working in per-shot style you always publish data in context of
current asset (shot). This validator checks if this is so. It is optional
so it can be disabled when needed.
Action on this validator will select invalid instances in Outliner.
"""
order = pype.api.ValidateContentsOrder
label = "Instance in same Context"
optional = True
hosts = ["maya", "nuke"]
actions = [SelectInvalidInstances, RepairSelectInvalidInstances]
def process(self, instance):
asset = instance.data.get("asset")
context_asset = instance.context.data["assetEntity"]["name"]
msg = "{} has asset {}".format(instance.name, asset)
assert asset == context_asset, msg

View file

@ -72,19 +72,27 @@ class ExtractRender(pyblish.api.InstancePlugin):
self.log.info(output.decode("utf-8")) self.log.info(output.decode("utf-8"))
# Collect rendered files. # Collect rendered files.
self.log.debug(path)
files = os.listdir(path) files = os.listdir(path)
self.log.debug(files)
collections, remainder = clique.assemble(files, minimum_items=1) collections, remainder = clique.assemble(files, minimum_items=1)
assert not remainder, ( assert not remainder, (
"There should not be a remainder for {0}: {1}".format( "There should not be a remainder for {0}: {1}".format(
instance[0], remainder instance[0], remainder
) )
) )
assert len(collections) == 1, ( self.log.debug(collections)
"There should only be one image sequence in {}. Found: {}".format( if len(collections) > 1:
path, len(collections) for col in collections:
) if len(list(col)) > 1:
) collection = col
collection = collections[0] else:
# assert len(collections) == 1, (
# "There should only be one image sequence in {}. Found: {}".format(
# path, len(collections)
# )
# )
collection = collections[0]
# Generate thumbnail. # Generate thumbnail.
thumbnail_path = os.path.join(path, "thumbnail.png") thumbnail_path = os.path.join(path, "thumbnail.png")

View file

@ -28,8 +28,11 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
hosts = ["harmony"] hosts = ["harmony"]
actions = [ValidateSceneSettingsRepair] actions = [ValidateSceneSettingsRepair]
frame_check_filter = ["_ch_", "_pr_", "_intd_", "_extd_"]
def process(self, instance): def process(self, instance):
expected_settings = pype.hosts.harmony.get_asset_settings() expected_settings = pype.hosts.harmony.get_asset_settings()
self.log.info(expected_settings)
# Harmony is expected to start at 1. # Harmony is expected to start at 1.
frame_start = expected_settings["frameStart"] frame_start = expected_settings["frameStart"]
@ -37,6 +40,14 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
expected_settings["frameEnd"] = frame_end - frame_start + 1 expected_settings["frameEnd"] = frame_end - frame_start + 1
expected_settings["frameStart"] = 1 expected_settings["frameStart"] = 1
self.log.info(instance.context.data['anatomyData']['asset'])
if any(string in instance.context.data['anatomyData']['asset']
for string in frame_check_filter):
expected_settings.pop("frameEnd")
func = """function func() func = """function func()
{ {
return { return {

View file

@ -1,14 +1,25 @@
from avalon import api # -*- coding: utf-8 -*-
import maya.app.renderSetup.model.renderSetup as renderSetup """Load and update RenderSetup settings.
from avalon.maya import lib
from maya import cmds Working with RenderSetup setting is Maya is done utilizing json files.
When this json is loaded, it will overwrite all settings on RenderSetup
instance.
"""
import json import json
import six
import sys
from avalon import api
from avalon.maya import lib
from pype.hosts.maya import lib as pypelib
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
class RenderSetupLoader(api.Loader): class RenderSetupLoader(api.Loader):
""" """Load json preset for RenderSetup overwriting current one."""
This will load json preset for RenderSetup, overwriting current one.
"""
families = ["rendersetup"] families = ["rendersetup"]
representations = ["json"] representations = ["json"]
@ -19,7 +30,7 @@ class RenderSetupLoader(api.Loader):
color = "orange" color = "orange"
def load(self, context, name, namespace, data): def load(self, context, name, namespace, data):
"""Load RenderSetup settings."""
from avalon.maya.pipeline import containerise from avalon.maya.pipeline import containerise
# from pype.hosts.maya.lib import namespaced # from pype.hosts.maya.lib import namespaced
@ -29,7 +40,7 @@ class RenderSetupLoader(api.Loader):
prefix="_" if asset[0].isdigit() else "", prefix="_" if asset[0].isdigit() else "",
suffix="_", suffix="_",
) )
self.log.info(">>> loading json [ {} ]".format(self.fname))
with open(self.fname, "r") as file: with open(self.fname, "r") as file:
renderSetup.instance().decode( renderSetup.instance().decode(
json.load(file), renderSetup.DECODE_AND_OVERWRITE, None) json.load(file), renderSetup.DECODE_AND_OVERWRITE, None)
@ -42,9 +53,56 @@ class RenderSetupLoader(api.Loader):
if not nodes: if not nodes:
return return
self.log.info(">>> containerising [ {} ]".format(name))
return containerise( return containerise(
name=name, name=name,
namespace=namespace, namespace=namespace,
nodes=nodes, nodes=nodes,
context=context, context=context,
loader=self.__class__.__name__) loader=self.__class__.__name__)
def remove(self, container):
"""Remove RenderSetup settings instance."""
from maya import cmds
container_name = container["objectName"]
self.log.info("Removing '%s' from Maya.." % container["name"])
container_content = cmds.sets(container_name, query=True)
nodes = cmds.ls(container_content, long=True)
nodes.append(container_name)
try:
cmds.delete(nodes)
except ValueError:
# Already implicitly deleted by Maya upon removing reference
pass
def update(self, container, representation):
"""Update RenderSetup setting by overwriting existing settings."""
pypelib.show_message(
"Render setup update",
"Render setup setting will be overwritten by new version. All "
"setting specified by user not included in loaded version "
"will be lost.")
path = api.get_representation_path(representation)
with open(path, "r") as file:
try:
renderSetup.instance().decode(
json.load(file), renderSetup.DECODE_AND_OVERWRITE, None)
except Exception:
self.log.error("There were errors during loading")
six.reraise(*sys.exc_info())
# Update metadata
node = container["objectName"]
cmds.setAttr("{}.representation".format(node),
str(representation["_id"]),
type="string")
self.log.info("... updated")
def switch(self, container, representation):
"""Switch representations."""
self.update(container, representation)

View file

@ -19,6 +19,7 @@ class ExtractCameraAlembic(pype.api.Extractor):
label = "Camera (Alembic)" label = "Camera (Alembic)"
hosts = ["maya"] hosts = ["maya"]
families = ["camera"] families = ["camera"]
bake_attributes = []
def process(self, instance): def process(self, instance):
@ -66,6 +67,14 @@ class ExtractCameraAlembic(pype.api.Extractor):
job_str += ' -file "{0}"'.format(path) job_str += ' -file "{0}"'.format(path)
# bake specified attributes in preset
assert isinstance(self.bake_attributes, (list, tuple)), (
"Attributes to bake must be specified as a list"
)
for attr in self.bake_attributes:
self.log.info("Adding {} attribute".format(attr))
job_str += " -attr {0}".format(attr)
with lib.evaluation("off"): with lib.evaluation("off"):
with avalon.maya.suspended_refresh(): with avalon.maya.suspended_refresh():
cmds.AbcExport(j=job_str, verbose=False) cmds.AbcExport(j=job_str, verbose=False)

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Extract camera as Maya Scene."""
import os import os
from maya import cmds from maya import cmds
@ -65,8 +67,8 @@ def unlock(plug):
cmds.disconnectAttr(source, destination) cmds.disconnectAttr(source, destination)
class ExtractCameraMayaAscii(pype.api.Extractor): class ExtractCameraMayaScene(pype.api.Extractor):
"""Extract a Camera as Maya Ascii. """Extract a Camera as Maya Scene.
This will create a duplicate of the camera that will be baked *with* This will create a duplicate of the camera that will be baked *with*
substeps and handles for the required frames. This temporary duplicate substeps and handles for the required frames. This temporary duplicate
@ -81,13 +83,28 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
""" """
label = "Camera (Maya Ascii)" label = "Camera (Maya Scene)"
hosts = ["maya"] hosts = ["maya"]
families = ["camera"] families = ["camera"]
scene_type = "ma"
def process(self, instance): def process(self, instance):
"""Plugin entry point."""
# get settings # get settings
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
if ext_mapping:
self.log.info("Looking in presets for scene type ...")
# use extension mapping for first family found
for family in self.families:
try:
self.scene_type = ext_mapping[family]
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
# no preset found
pass
framerange = [instance.data.get("frameStart", 1), framerange = [instance.data.get("frameStart", 1),
instance.data.get("frameEnd", 1)] instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0) handles = instance.data.get("handles", 0)
@ -95,7 +112,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
bake_to_worldspace = instance.data("bakeToWorldSpace", True) bake_to_worldspace = instance.data("bakeToWorldSpace", True)
if not bake_to_worldspace: if not bake_to_worldspace:
self.log.warning("Camera (Maya Ascii) export only supports world" self.log.warning("Camera (Maya Scene) export only supports world"
"space baked camera extractions. The disabled " "space baked camera extractions. The disabled "
"bake to world space is ignored...") "bake to world space is ignored...")
@ -115,7 +132,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
# Define extract output file path # Define extract output file path
dir_path = self.staging_dir(instance) dir_path = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name) filename = "{0}.{1}".format(instance.name, self.scene_type)
path = os.path.join(dir_path, filename) path = os.path.join(dir_path, filename)
# Perform extraction # Perform extraction
@ -152,7 +169,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
cmds.select(baked_shapes, noExpand=True) cmds.select(baked_shapes, noExpand=True)
cmds.file(path, cmds.file(path,
force=True, force=True,
typ="mayaAscii", typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501
exportSelected=True, exportSelected=True,
preserveReferences=False, preserveReferences=False,
constructionHistory=False, constructionHistory=False,
@ -164,15 +181,15 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
# Delete the baked hierarchy # Delete the baked hierarchy
if bake_to_worldspace: if bake_to_worldspace:
cmds.delete(baked) cmds.delete(baked)
if self.scene_type == "ma":
massage_ma_file(path) massage_ma_file(path)
if "representations" not in instance.data: if "representations" not in instance.data:
instance.data["representations"] = [] instance.data["representations"] = []
representation = { representation = {
'name': 'ma', 'name': self.scene_type,
'ext': 'ma', 'ext': self.scene_type,
'files': filename, 'files': filename,
"stagingDir": dir_path, "stagingDir": dir_path,
} }

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Extract data as Maya scene (raw)."""
import os import os
from maya import cmds from maya import cmds
@ -6,24 +8,37 @@ import avalon.maya
import pype.api import pype.api
class ExtractMayaAsciiRaw(pype.api.Extractor): class ExtractMayaSceneRaw(pype.api.Extractor):
"""Extract as Maya Ascii (raw) """Extract as Maya Scene (raw).
This will preserve all references, construction history, etc. This will preserve all references, construction history, etc.
""" """
label = "Maya ASCII (Raw)" label = "Maya Scene (Raw)"
hosts = ["maya"] hosts = ["maya"]
families = ["mayaAscii", families = ["mayaAscii",
"setdress", "setdress",
"layout"] "layout"]
scene_type = "ma"
def process(self, instance): def process(self, instance):
"""Plugin entry point."""
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
if ext_mapping:
self.log.info("Looking in presets for scene type ...")
# use extension mapping for first family found
for family in self.families:
try:
self.scene_type = ext_mapping[family]
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
# no preset found
pass
# Define extract output file path # Define extract output file path
dir_path = self.staging_dir(instance) dir_path = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name) filename = "{0}.{1}".format(instance.name, self.scene_type)
path = os.path.join(dir_path, filename) path = os.path.join(dir_path, filename)
# Whether to include all nodes in the instance (including those from # Whether to include all nodes in the instance (including those from
@ -38,12 +53,12 @@ class ExtractMayaAsciiRaw(pype.api.Extractor):
members = instance[:] members = instance[:]
# Perform extraction # Perform extraction
self.log.info("Performing extraction..") self.log.info("Performing extraction ...")
with avalon.maya.maintained_selection(): with avalon.maya.maintained_selection():
cmds.select(members, noExpand=True) cmds.select(members, noExpand=True)
cmds.file(path, cmds.file(path,
force=True, force=True,
typ="mayaAscii", typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501
exportSelected=True, exportSelected=True,
preserveReferences=True, preserveReferences=True,
constructionHistory=True, constructionHistory=True,
@ -55,8 +70,8 @@ class ExtractMayaAsciiRaw(pype.api.Extractor):
instance.data["representations"] = [] instance.data["representations"] = []
representation = { representation = {
'name': 'ma', 'name': self.scene_type,
'ext': 'ma', 'ext': self.scene_type,
'files': filename, 'files': filename,
"stagingDir": dir_path "stagingDir": dir_path
} }

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Extract model as Maya Scene."""
import os import os
from maya import cmds from maya import cmds
@ -8,7 +10,7 @@ from pype.hosts.maya import lib
class ExtractModel(pype.api.Extractor): class ExtractModel(pype.api.Extractor):
"""Extract as Model (Maya Ascii) """Extract as Model (Maya Scene).
Only extracts contents based on the original "setMembers" data to ensure Only extracts contents based on the original "setMembers" data to ensure
publishing the least amount of required shapes. From that it only takes publishing the least amount of required shapes. From that it only takes
@ -22,19 +24,33 @@ class ExtractModel(pype.api.Extractor):
""" """
label = "Model (Maya ASCII)" label = "Model (Maya Scene)"
hosts = ["maya"] hosts = ["maya"]
families = ["model"] families = ["model"]
scene_type = "ma"
def process(self, instance): def process(self, instance):
"""Plugin entry point."""
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
if ext_mapping:
self.log.info("Looking in presets for scene type ...")
# use extension mapping for first family found
for family in self.families:
try:
self.scene_type = ext_mapping[family]
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
# no preset found
pass
# Define extract output file path # Define extract output file path
stagingdir = self.staging_dir(instance) stagingdir = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name) filename = "{0}.{1}".format(instance.name, self.scene_type)
path = os.path.join(stagingdir, filename) path = os.path.join(stagingdir, filename)
# Perform extraction # Perform extraction
self.log.info("Performing extraction..") self.log.info("Performing extraction ...")
# Get only the shape contents we need in such a way that we avoid # Get only the shape contents we need in such a way that we avoid
# taking along intermediateObjects # taking along intermediateObjects
@ -59,7 +75,7 @@ class ExtractModel(pype.api.Extractor):
cmds.select(members, noExpand=True) cmds.select(members, noExpand=True)
cmds.file(path, cmds.file(path,
force=True, force=True,
typ="mayaAscii", typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501
exportSelected=True, exportSelected=True,
preserveReferences=False, preserveReferences=False,
channels=False, channels=False,
@ -73,8 +89,8 @@ class ExtractModel(pype.api.Extractor):
instance.data["representations"] = [] instance.data["representations"] = []
representation = { representation = {
'name': 'ma', 'name': self.scene_type,
'ext': 'ma', 'ext': self.scene_type,
'files': filename, 'files': filename,
"stagingDir": stagingdir, "stagingDir": stagingdir,
} }

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
"""Extract rig as Maya Scene."""
import os import os
from maya import cmds from maya import cmds
@ -7,26 +9,40 @@ import pype.api
class ExtractRig(pype.api.Extractor): class ExtractRig(pype.api.Extractor):
"""Extract rig as Maya Ascii""" """Extract rig as Maya Scene."""
label = "Extract Rig (Maya ASCII)" label = "Extract Rig (Maya Scene)"
hosts = ["maya"] hosts = ["maya"]
families = ["rig"] families = ["rig"]
scene_type = "ma"
def process(self, instance): def process(self, instance):
"""Plugin entry point."""
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
if ext_mapping:
self.log.info("Looking in presets for scene type ...")
# use extension mapping for first family found
for family in self.families:
try:
self.scene_type = ext_mapping[family]
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
# no preset found
pass
# Define extract output file path # Define extract output file path
dir_path = self.staging_dir(instance) dir_path = self.staging_dir(instance)
filename = "{0}.ma".format(instance.name) filename = "{0}.{1}".format(instance.name, self.scene_type)
path = os.path.join(dir_path, filename) path = os.path.join(dir_path, filename)
# Perform extraction # Perform extraction
self.log.info("Performing extraction..") self.log.info("Performing extraction ...")
with avalon.maya.maintained_selection(): with avalon.maya.maintained_selection():
cmds.select(instance, noExpand=True) cmds.select(instance, noExpand=True)
cmds.file(path, cmds.file(path,
force=True, force=True,
typ="mayaAscii", typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501
exportSelected=True, exportSelected=True,
preserveReferences=False, preserveReferences=False,
channels=True, channels=True,
@ -38,12 +54,11 @@ class ExtractRig(pype.api.Extractor):
instance.data["representations"] = [] instance.data["representations"] = []
representation = { representation = {
'name': 'ma', 'name': self.scene_type,
'ext': 'ma', 'ext': self.scene_type,
'files': filename, 'files': filename,
"stagingDir": dir_path "stagingDir": dir_path
} }
instance.data["representations"].append(representation) instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
"""Extract Yeti rig."""
import os import os
import json import json
import contextlib import contextlib
@ -11,7 +14,7 @@ import pype.hosts.maya.lib as maya
@contextlib.contextmanager @contextlib.contextmanager
def disconnect_plugs(settings, members): def disconnect_plugs(settings, members):
"""Disconnect and store attribute connections."""
members = cmds.ls(members, long=True) members = cmds.ls(members, long=True)
original_connections = [] original_connections = []
try: try:
@ -55,7 +58,7 @@ def disconnect_plugs(settings, members):
@contextlib.contextmanager @contextlib.contextmanager
def yetigraph_attribute_values(assumed_destination, resources): def yetigraph_attribute_values(assumed_destination, resources):
"""Get values from Yeti attributes in graph."""
try: try:
for resource in resources: for resource in resources:
if "graphnode" not in resource: if "graphnode" not in resource:
@ -89,14 +92,28 @@ def yetigraph_attribute_values(assumed_destination, resources):
class ExtractYetiRig(pype.api.Extractor): class ExtractYetiRig(pype.api.Extractor):
"""Extract the Yeti rig to a MayaAscii and write the Yeti rig data""" """Extract the Yeti rig to a Maya Scene and write the Yeti rig data."""
label = "Extract Yeti Rig" label = "Extract Yeti Rig"
hosts = ["maya"] hosts = ["maya"]
families = ["yetiRig"] families = ["yetiRig"]
scene_type = "ma"
def process(self, instance): def process(self, instance):
"""Plugin entry point."""
ext_mapping = instance.context.data["presets"]["maya"].get("ext_mapping") # noqa: E501
if ext_mapping:
self.log.info("Looking in presets for scene type ...")
# use extension mapping for first family found
for family in self.families:
try:
self.scene_type = ext_mapping[family]
self.log.info(
"Using {} as scene type".format(self.scene_type))
break
except AttributeError:
# no preset found
pass
yeti_nodes = cmds.ls(instance, type="pgYetiMaya") yeti_nodes = cmds.ls(instance, type="pgYetiMaya")
if not yeti_nodes: if not yeti_nodes:
raise RuntimeError("No pgYetiMaya nodes found in the instance") raise RuntimeError("No pgYetiMaya nodes found in the instance")
@ -106,7 +123,8 @@ class ExtractYetiRig(pype.api.Extractor):
settings_path = os.path.join(dirname, "yeti.rigsettings") settings_path = os.path.join(dirname, "yeti.rigsettings")
# Yeti related staging dirs # Yeti related staging dirs
maya_path = os.path.join(dirname, "yeti_rig.ma") maya_path = os.path.join(
dirname, "yeti_rig.{}".format(self.scene_type))
self.log.info("Writing metadata file") self.log.info("Writing metadata file")
@ -153,7 +171,7 @@ class ExtractYetiRig(pype.api.Extractor):
cmds.file(maya_path, cmds.file(maya_path,
force=True, force=True,
exportSelected=True, exportSelected=True,
typ="mayaAscii", typ="mayaAscii" if self.scene_type == "ma" else "mayaBinary", # noqa: E501
preserveReferences=False, preserveReferences=False,
constructionHistory=True, constructionHistory=True,
shader=False) shader=False)
@ -163,21 +181,21 @@ class ExtractYetiRig(pype.api.Extractor):
if "representations" not in instance.data: if "representations" not in instance.data:
instance.data["representations"] = [] instance.data["representations"] = []
self.log.info("rig file: {}".format("yeti_rig.ma")) self.log.info("rig file: {}".format(maya_path))
instance.data["representations"].append( instance.data["representations"].append(
{ {
'name': "ma", 'name': self.scene_type,
'ext': 'ma', 'ext': self.scene_type,
'files': "yeti_rig.ma", 'files': os.path.basename(maya_path),
'stagingDir': dirname 'stagingDir': dirname
} }
) )
self.log.info("settings file: {}".format("yeti.rigsettings")) self.log.info("settings file: {}".format(settings))
instance.data["representations"].append( instance.data["representations"].append(
{ {
'name': 'rigsettings', 'name': 'rigsettings',
'ext': 'rigsettings', 'ext': 'rigsettings',
'files': 'yeti.rigsettings', 'files': os.path.basename(settings),
'stagingDir': dirname 'stagingDir': dirname
} }
) )

View file

@ -62,9 +62,16 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
for family in families: for family in families:
for preset in presets[family]: for preset in presets[family]:
[node_name, attribute_name] = preset.split(".") [node_name, attribute_name] = preset.split(".")
attributes.update( try:
{node_name: {attribute_name: presets[family][preset]}} attributes[node_name].update(
) {attribute_name: presets[family][preset]}
)
except KeyError:
attributes.update({
node_name: {
attribute_name: presets[family][preset]
}
})
# Get invalid attributes. # Get invalid attributes.
nodes = pm.ls() nodes = pm.ls()

View file

@ -1,108 +0,0 @@
# -*- coding: utf-8 -*-
"""Validate if instance asset is the same as context asset."""
from __future__ import absolute_import
import pyblish.api
from pype.action import get_errored_instances_from_context
import pype.api
class SelectInvalidInstances(pyblish.api.Action):
"""Select invalid instances in Outliner."""
label = "Show Instances"
icon = "briefcase"
on = "failed"
def process(self, context, plugin):
"""Process invalid validators and select invalid instances."""
try:
from maya import cmds
except ImportError:
raise ImportError("Current host is not Maya")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for _instance in instances:
invalid_instances = plugin.get_invalid(context)
if invalid_instances:
if isinstance(invalid_instances, (list, tuple)):
invalid.extend(invalid_instances)
else:
self.log.warning("Plug-in returned to be invalid, "
"but has no selectable nodes.")
# Ensure unique (process each node only once)
invalid = list(set(invalid))
if invalid:
self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid))
cmds.select(invalid, replace=True, noExpand=True)
else:
self.log.info("No invalid nodes found.")
cmds.select(deselect=True)
class RepairSelectInvalidInstances(pyblish.api.Action):
"""Repair the instance asset."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
from maya import cmds
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
context_asset = context.data["assetEntity"]["name"]
for instance in instances:
cmds.setAttr(instance.data.get("name") + ".asset",
context_asset, type="string")
class ValidateInstanceInContext(pyblish.api.ContextPlugin):
"""Validator to check if instance asset match context asset.
When working in per-shot style you always publish data in context of
current asset (shot). This validator checks if this is so. It is optional
so it can be disabled when needed.
Action on this validator will select invalid instances in Outliner.
"""
order = pype.api.ValidateContentsOrder
label = "Instance in same Context"
optional = True
actions = [SelectInvalidInstances, RepairSelectInvalidInstances]
@classmethod
def get_invalid(cls, context):
"""Get invalid instances."""
invalid = []
context_asset = context.data["assetEntity"]["name"]
cls.log.info("we are in {}".format(context_asset))
for instance in context:
asset = instance.data.get("asset")
if asset != context_asset:
cls.log.warning("{} has asset {}".format(instance.name, asset))
invalid.append(instance.name)
return invalid
def process(self, context):
"""Check instances."""
invalid = self.get_invalid(context)
if invalid:
raise AssertionError("Some instances doesn't share same context")

View file

@ -106,7 +106,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
instance.data.update({ instance.data.update({
"subset": subset, "subset": subset,
"asset": os.environ["AVALON_ASSET"], "asset": avalon_knob_data["asset"],
"label": node.name(), "label": node.name(),
"name": node.name(), "name": node.name(),
"subset": subset, "subset": subset,

View file

@ -1,4 +1,7 @@
import pyblish.api import pyblish.api
import pype.api
from avalon import io, api
import nuke import nuke
@ -23,6 +26,21 @@ class CollectReview(pyblish.api.InstancePlugin):
if not node["review"].value(): if not node["review"].value():
return return
# Add audio to instance if it exists.
try:
version = pype.api.get_latest_version(
instance.context.data["assetEntity"]["name"], "audioMain"
)
representation = io.find_one(
{"type": "representation", "parent": version["_id"]}
)
instance.data["audio"] = [{
"offset": 0,
"filename": api.get_representation_path(representation)
}]
except AssertionError:
pass
instance.data["families"].append("review") instance.data["families"].append("review")
instance.data['families'].append('ftrack') instance.data['families'].append('ftrack')

View file

@ -152,7 +152,7 @@ class ExtractThumbnail(pype.api.Extractor):
ipn_orig = None ipn_orig = None
for v in [n for n in nuke.allNodes() for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]: if "Viewer" == n.Class()]:
ip = v['input_process'].getValue() ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue() ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip: if "VIEWER_INPUT" not in ipn and ip:

View file

@ -13,6 +13,7 @@ class ExtractImage(pype.api.Extractor):
label = "Extract Image" label = "Extract Image"
hosts = ["photoshop"] hosts = ["photoshop"]
families = ["image"] families = ["image"]
formats = ["png", "jpg"]
def process(self, instance): def process(self, instance):
@ -32,20 +33,22 @@ class ExtractImage(pype.api.Extractor):
if layer.id not in extract_ids: if layer.id not in extract_ids:
layer.Visible = False layer.Visible = False
save_options = { save_options = {}
"png": photoshop.com_objects.PNGSaveOptions(), if "png" in self.formats:
"jpg": photoshop.com_objects.JPEGSaveOptions() save_options["png"] = photoshop.com_objects.PNGSaveOptions()
} if "jpg" in self.formats:
save_options["jpg"] = photoshop.com_objects.JPEGSaveOptions()
file_basename = os.path.splitext(
photoshop.app().ActiveDocument.Name
)[0]
for extension, save_option in save_options.items(): for extension, save_option in save_options.items():
_filename = "{}.{}".format(file_basename, extension)
files[extension] = _filename
full_filename = os.path.join(staging_dir, _filename)
photoshop.app().ActiveDocument.SaveAs( photoshop.app().ActiveDocument.SaveAs(
staging_dir, save_option, True full_filename, save_option, True
)
files[extension] = "{} copy.{}".format(
os.path.splitext(
photoshop.app().ActiveDocument.Name
)[0],
extension
) )
representations = [] representations = []

View file

@ -24,9 +24,10 @@ class ExtractReview(pype.api.Extractor):
layers.append(image_instance[0]) layers.append(image_instance[0])
# Perform extraction # Perform extraction
output_image = "{} copy.jpg".format( output_image = "{}.jpg".format(
os.path.splitext(photoshop.app().ActiveDocument.Name)[0] os.path.splitext(photoshop.app().ActiveDocument.Name)[0]
) )
output_image_path = os.path.join(staging_dir, output_image)
with photoshop.maintained_visibility(): with photoshop.maintained_visibility():
# Hide all other layers. # Hide all other layers.
extract_ids = [ extract_ids = [
@ -39,7 +40,9 @@ class ExtractReview(pype.api.Extractor):
layer.Visible = False layer.Visible = False
photoshop.app().ActiveDocument.SaveAs( photoshop.app().ActiveDocument.SaveAs(
staging_dir, photoshop.com_objects.JPEGSaveOptions(), True output_image_path,
photoshop.com_objects.JPEGSaveOptions(),
True
) )
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
@ -56,7 +59,7 @@ class ExtractReview(pype.api.Extractor):
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
args = [ args = [
ffmpeg_path, "-y", ffmpeg_path, "-y",
"-i", os.path.join(staging_dir, output_image), "-i", output_image_path,
"-vf", "scale=300:-1", "-vf", "scale=300:-1",
"-vframes", "1", "-vframes", "1",
thumbnail_path thumbnail_path
@ -77,7 +80,7 @@ class ExtractReview(pype.api.Extractor):
mov_path = os.path.join(staging_dir, "review.mov") mov_path = os.path.join(staging_dir, "review.mov")
args = [ args = [
ffmpeg_path, "-y", ffmpeg_path, "-y",
"-i", os.path.join(staging_dir, output_image), "-i", output_image_path,
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
"-vframes", "1", "-vframes", "1",
mov_path mov_path

View file

@ -528,6 +528,9 @@ def burnins_from_data(
if pix_fmt: if pix_fmt:
ffmpeg_args.append("-pix_fmt {}".format(pix_fmt)) ffmpeg_args.append("-pix_fmt {}".format(pix_fmt))
# Use group one (same as `-intra` argument, which is deprecated)
ffmpeg_args.append("-g 1")
ffmpeg_args_str = " ".join(ffmpeg_args) ffmpeg_args_str = " ".join(ffmpeg_args)
burnin.render( burnin.render(
output_path, args=ffmpeg_args_str, overwrite=overwrite, **data output_path, args=ffmpeg_args_str, overwrite=overwrite, **data

View file

@ -1 +1 @@
__version__ = "2.11.0" __version__ = "2.11.3"