Merge branch 'develop' into feature/nuke_in_pype3

This commit is contained in:
Milan Kolar 2021-01-22 12:14:12 +01:00
commit af12c6ddc0
16 changed files with 724 additions and 138 deletions

View file

@ -68,8 +68,8 @@ class CreateRenderPass(pipeline.Creator):
self.data["render_layer"] = render_layer
# Collect selected layer ids to be stored into instance
layer_ids = [layer["layer_id"] for layer in selected_layers]
self.data["layer_ids"] = layer_ids
layer_names = [layer["name"] for layer in selected_layers]
self.data["layer_names"] = layer_names
# Replace `beauty` in beauty's subset name with entered name
subset_name = self.subset_template.format(**{

View file

@ -1,3 +1,4 @@
import collections
from avalon.pipeline import get_representation_context
from avalon.vendor import qargparse
from avalon.tvpaint import lib, pipeline
@ -15,7 +16,7 @@ class LoadImage(pipeline.Loader):
color = "white"
import_script = (
"filepath = \"{}\"\n"
"filepath = '\"'\"{}\"'\"'\n"
"layer_name = \"{}\"\n"
"tv_loadsequence filepath {}PARSE layer_id\n"
"tv_layerrename layer_id layer_name"
@ -92,30 +93,55 @@ class LoadImage(pipeline.Loader):
"Loading probably failed during execution of george script."
)
layer_ids = [loaded_layer["layer_id"]]
layer_names = [loaded_layer["name"]]
namespace = namespace or layer_name
return pipeline.containerise(
name=name,
namespace=namespace,
layer_ids=layer_ids,
members=layer_names,
context=context,
loader=self.__class__.__name__
)
def _remove_layers(self, layer_ids, layers=None):
if not layer_ids:
self.log.warning("Got empty layer ids list.")
def _remove_layers(self, layer_names=None, layer_ids=None, layers=None):
if not layer_names and not layer_ids:
self.log.warning("Got empty layer names list.")
return
if layers is None:
layers = lib.layers_data()
available_ids = set(layer["layer_id"] for layer in layers)
layer_ids_to_remove = []
for layer_id in layer_ids:
if layer_id in available_ids:
layer_ids_to_remove.append(layer_id)
if layer_ids is None:
# Backwards compatibility (layer ids were stored instead of names)
layer_names_are_ids = True
for layer_name in layer_names:
if (
not isinstance(layer_name, int)
and not layer_name.isnumeric()
):
layer_names_are_ids = False
break
if layer_names_are_ids:
layer_ids = layer_names
layer_ids_to_remove = []
if layer_ids is not None:
for layer_id in layer_ids:
if layer_id in available_ids:
layer_ids_to_remove.append(layer_id)
else:
layers_by_name = collections.defaultdict(list)
for layer in layers:
layers_by_name[layer["name"]].append(layer)
for layer_name in layer_names:
layers = layers_by_name[layer_name]
if len(layers) == 1:
layer_ids_to_remove.append(layers[0]["layer_id"])
if not layer_ids_to_remove:
self.log.warning("No layers to delete.")
@ -128,16 +154,19 @@ class LoadImage(pipeline.Loader):
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
def remove(self, container):
layer_ids = self.layer_ids_from_container(container)
self.log.warning("Layers to delete {}".format(layer_ids))
self._remove_layers(layer_ids)
def _remove_container(self, container, members=None):
if not container:
return
representation = container["representation"]
members = self.get_members_from_container(container)
current_containers = pipeline.ls()
pop_idx = None
for idx, cur_con in enumerate(current_containers):
cur_con_layer_ids = self.layer_ids_from_container(cur_con)
if cur_con_layer_ids == layer_ids:
cur_members = self.get_members_from_container(cur_con)
if (
cur_members == members
and cur_con["representation"] == representation
):
pop_idx = idx
break
@ -154,6 +183,12 @@ class LoadImage(pipeline.Loader):
pipeline.SECTION_NAME_CONTAINERS, current_containers
)
def remove(self, container):
members = self.get_members_from_container(container)
self.log.warning("Layers to delete {}".format(members))
self._remove_layers(members)
self._remove_container(container)
def switch(self, container, representation):
self.update(container, representation)
@ -166,39 +201,41 @@ class LoadImage(pipeline.Loader):
"""
# Create new containers first
context = get_representation_context(representation)
# Change `fname` to new representation
self.fname = self.filepath_from_context(context)
name = container["name"]
namespace = container["namespace"]
new_container = self.load(context, name, namespace, {})
new_layer_ids = self.layer_ids_from_container(new_container)
# Get layer ids from previous container
old_layer_ids = self.layer_ids_from_container(container)
old_layer_names = self.get_members_from_container(container)
layers = lib.layers_data()
layers_by_id = {
layer["layer_id"]: layer
for layer in layers
}
# Backwards compatibility (layer ids were stored instead of names)
old_layers_are_ids = True
for name in old_layer_names:
if isinstance(name, int) or name.isnumeric():
continue
old_layers_are_ids = False
break
old_layers = []
new_layers = []
for layer_id in old_layer_ids:
layer = layers_by_id.get(layer_id)
if layer:
old_layers.append(layer)
layers = lib.layers_data()
previous_layer_ids = set(layer["layer_id"] for layer in layers)
if old_layers_are_ids:
for layer in layers:
if layer["layer_id"] in old_layer_names:
old_layers.append(layer)
else:
layers_by_name = collections.defaultdict(list)
for layer in layers:
layers_by_name[layer["name"]].append(layer)
for layer_id in new_layer_ids:
layer = layers_by_id.get(layer_id)
if layer:
new_layers.append(layer)
for layer_name in old_layer_names:
layers = layers_by_name[layer_name]
if len(layers) == 1:
old_layers.append(layers[0])
# Prepare few data
new_start_position = None
new_group_id = None
layer_ids_to_remove = set()
for layer in old_layers:
layer_ids_to_remove.add(layer["layer_id"])
position = layer["position"]
group_id = layer["group_id"]
if new_start_position is None:
@ -213,6 +250,28 @@ class LoadImage(pipeline.Loader):
elif new_group_id != group_id:
new_group_id = -1
# Remove old container
self._remove_container(container)
# Remove old layers
self._remove_layers(layer_ids=layer_ids_to_remove)
# Change `fname` to new representation
self.fname = self.filepath_from_context(context)
name = container["name"]
namespace = container["namespace"]
new_container = self.load(context, name, namespace, {})
new_layer_names = self.get_members_from_container(new_container)
layers = lib.layers_data()
new_layers = []
for layer in layers:
if layer["layer_id"] in previous_layer_ids:
continue
if layer["name"] in new_layer_names:
new_layers.append(layer)
george_script_lines = []
# Group new layers to same group as previous container layers had
# - all old layers must be under same group
@ -246,6 +305,3 @@ class LoadImage(pipeline.Loader):
if george_script_lines:
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
# Remove old container
self.remove(container)

View file

@ -0,0 +1,119 @@
import os
import tempfile
from avalon.tvpaint import lib, pipeline
class ImportSound(pipeline.Loader):
"""Load sound to TVPaint.
Sound layers does not have ids but only position index so we can't
reference them as we can't say which is which input.
We might do that (in future) by input path. Which may be identifier if
we'll allow only one loaded instance of the representation as an audio.
This plugin does not work for all version of TVPaint. Known working
version is TVPaint 11.0.10 .
It is allowed to load video files as sound but it does not check if video
file contain any audio.
"""
families = ["audio", "review", "plate"]
representations = ["*"]
label = "Import Sound"
order = 1
icon = "image"
color = "white"
import_script_lines = (
"sound_path = '\"'\"{}\"'\"'",
"output_path = \"{}\"",
# Try to get sound clip info to check if we are in TVPaint that can
# load sound
"tv_clipcurrentid",
"clip_id = result",
"tv_soundclipinfo clip_id 0",
"IF CMP(result,\"\")==1",
(
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"'"
" 'success|'"
),
"EXIT",
"END",
"tv_soundclipnew sound_path",
"line = 'success|'result",
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line"
)
def load(self, context, name, namespace, options):
# Create temp file for output
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="pype_tvp_", suffix=".txt", delete=False
)
output_file.close()
output_filepath = output_file.name.replace("\\", "/")
# Prepare george script
import_script = "\n".join(self.import_script_lines)
george_script = import_script.format(
self.fname.replace("\\", "/"),
output_filepath
)
self.log.info("*** George script:\n{}\n***".format(george_script))
# Execute geoge script
lib.execute_george_through_file(george_script)
# Read output file
lines = []
with open(output_filepath, "r") as file_stream:
for line in file_stream:
line = line.rstrip()
if line:
lines.append(line)
# Clean up temp file
os.remove(output_filepath)
output = {}
for line in lines:
key, value = line.split("|")
output[key] = value
success = output.get("success")
# Successfully loaded sound
if success == "0":
return
if success == "":
raise ValueError(
"Your TVPaint version does not support loading of"
" sound through George script. Please use manual load."
)
if success is None:
raise ValueError(
"Unknown error happened during load."
" Please report and try to use manual load."
)
# Possible errors by TVPaint documentation
# https://www.tvpaint.com/doc/tvpaint-animation-11/george-commands#tv_soundclipnew
if success == "-1":
raise ValueError(
"BUG: George command did not get enough arguments."
)
if success == "-2":
# Who know what does that mean?
raise ValueError("No current clip without mixer.")
if success == "-3":
raise ValueError("TVPaint couldn't read the file.")
if success == "-4":
raise ValueError("TVPaint couldn't add the track.")
raise ValueError("BUG: Unknown success value {}.".format(success))

View file

@ -148,17 +148,44 @@ class CollectInstances(pyblish.api.ContextPlugin):
))
layers_data = context.data["layersData"]
layers_by_id = {
layer["layer_id"]: layer
layers_by_name = {
layer["name"]: layer
for layer in layers_data
}
layer_ids = instance_data["layer_ids"]
if "layer_names" in instance_data:
layer_names = instance_data["layer_names"]
else:
# Backwards compatibility
# - not 100% working as it was found out that layer ids can't be
# used as unified identifier across multiple workstations
layers_by_id = {
layer["id"]: layer
for layer in layers_data
}
layer_ids = instance_data["layer_ids"]
layer_names = []
for layer_id in layer_ids:
layer = layers_by_id.get(layer_id)
if layer:
layer_names.append(layer["name"])
if not layer_names:
raise ValueError((
"Metadata contain old way of storing layers information."
" It is not possible to identify layers to publish with"
" these data. Please remove Render Pass instances with"
" Subset manager and use Creator tool to recreate them."
))
render_pass_layers = []
for layer_id in layer_ids:
layer = layers_by_id.get(layer_id)
for layer_name in layer_names:
layer = layers_by_name.get(layer_name)
# NOTE This is kind of validation before validators?
if not layer:
self.log.warning(f"Layer with id {layer_id} was not found.")
self.log.warning(
f"Layer with name {layer_name} was not found."
)
continue
render_pass_layers.append(layer)

View file

@ -89,7 +89,15 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect information about layers
self.log.info("Collecting layers data from workfile")
layers_data = lib.layers_data()
layers_by_name = {}
for layer in layers_data:
layer_name = layer["name"]
if layer_name not in layers_by_name:
layers_by_name[layer_name] = []
layers_by_name[layer_name].append(layer)
context.data["layersData"] = layers_data
context.data["layersByName"] = layers_by_name
self.log.debug(
"Layers data:\"{}".format(json.dumps(layers_data, indent=4))
)

View file

@ -62,17 +62,20 @@ class ExtractSequence(pyblish.api.Extractor):
for layer in layers
if layer["visible"]
]
layer_ids = [str(layer["layer_id"]) for layer in filtered_layers]
if not layer_ids:
layer_names = [str(layer["name"]) for layer in filtered_layers]
if not layer_names:
self.log.info(
f"None of the layers from the instance"
" are visible. Extraction skipped."
)
return
joined_layer_names = ", ".join(
["\"{}\"".format(name) for name in layer_names]
)
self.log.debug(
"Instance has {} layers with ids: {}".format(
len(layer_ids), ", ".join(layer_ids)
"Instance has {} layers with names: {}".format(
len(layer_names), joined_layer_names
)
)
# This is plugin attribe cleanup method

View file

@ -0,0 +1,43 @@
import pyblish.api
class ValidateLayersGroup(pyblish.api.InstancePlugin):
"""Validate layer names for publishing are unique for whole workfile."""
label = "Validate Duplicated Layers Names"
order = pyblish.api.ValidatorOrder
families = ["renderPass"]
def process(self, instance):
# Prepare layers
layers_by_name = instance.context.data["layersByName"]
# Layers ids of an instance
layer_names = instance.data["layer_names"]
# Check if all layers from render pass are in right group
duplicated_layer_names = []
for layer_name in layer_names:
layers = layers_by_name.get(layer_name)
if len(layers) > 1:
duplicated_layer_names.append(layer_name)
# Everything is OK and skip exception
if not duplicated_layer_names:
return
layers_msg = ", ".join([
"\"{}\"".format(layer_name)
for layer_name in duplicated_layer_names
])
# Raise an error
raise AssertionError(
(
"Layers have duplicated names for instance {}."
# Description what's wrong
" There are layers with same name and one of them is marked"
" for publishing so it is not possible to know which should"
" be published. Please look for layers with names: {}"
).format(instance.data["label"], layers_msg)
)

View file

@ -0,0 +1,42 @@
import pyblish.api
class ValidateMissingLayers(pyblish.api.InstancePlugin):
"""Validate existence of renderPass layers."""
label = "Validate Missing Layers Names"
order = pyblish.api.ValidatorOrder
families = ["renderPass"]
def process(self, instance):
# Prepare layers
layers_by_name = instance.context.data["layersByName"]
# Layers ids of an instance
layer_names = instance.data["layer_names"]
# Check if all layers from render pass are in right group
missing_layer_names = []
for layer_name in layer_names:
layers = layers_by_name.get(layer_name)
if not layers:
missing_layer_names.append(layer_name)
# Everything is OK and skip exception
if not missing_layer_names:
return
layers_msg = ", ".join([
"\"{}\"".format(layer_name)
for layer_name in missing_layer_names
])
# Raise an error
raise AssertionError(
(
"Layers were not found by name for instance \"{}\"."
# Description what's wrong
" Layer names marked for publishing are not available"
" in layers list. Missing layer names: {}"
).format(instance.data["label"], layers_msg)
)

View file

@ -9,25 +9,25 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
"""
label = "Validate Layers Group"
order = pyblish.api.ValidatorOrder
order = pyblish.api.ValidatorOrder + 0.1
families = ["renderPass"]
def process(self, instance):
# Prepare layers
layers_data = instance.context.data["layersData"]
layers_by_id = {
layer["layer_id"]: layer
layers_by_name = {
layer["name"]: layer
for layer in layers_data
}
# Expected group id for instance layers
group_id = instance.data["group_id"]
# Layers ids of an instance
layer_ids = instance.data["layer_ids"]
layer_names = instance.data["layer_names"]
# Check if all layers from render pass are in right group
invalid_layers_by_group_id = collections.defaultdict(list)
for layer_id in layer_ids:
layer = layers_by_id.get(layer_id)
for layer_name in layer_names:
layer = layers_by_name.get(layer_name)
_group_id = layer["group_id"]
if _group_id != group_id:
invalid_layers_by_group_id[_group_id].append(layer)

View file

@ -159,6 +159,15 @@ class Anatomy:
"""Return PYPE_ROOT_* environments for current project in dict."""
return self._roots_obj.root_environments()
def root_environmets_fill_data(self, template=None):
"""Environment variable values in dictionary for rootless path.
Args:
template (str): Template for environment variable key fill.
By default is set to `"${}"`.
"""
return self.roots_obj.root_environmets_fill_data(template)
def find_root_template_from_path(self, *args, **kwargs):
"""Wrapper for Roots `find_root_template_from_path`."""
return self.roots_obj.find_root_template_from_path(*args, **kwargs)
@ -264,6 +273,78 @@ class Anatomy:
# NOTE does not care if there are different keys than "root"
return template_path.format(**{"root": self.roots})
@classmethod
def fill_root_with_path(cls, rootless_path, root_path):
"""Fill path without filled "root" key with passed path.
This is helper to fill root with different directory path than anatomy
has defined no matter if is single or multiroot.
Output path is same as input path if `rootless_path` does not contain
unfilled root key.
Args:
rootless_path (str): Path without filled "root" key. Example:
"{root[work]}/MyProject/..."
root_path (str): What should replace root key in `rootless_path`.
Returns:
str: Path with filled root.
"""
output = str(rootless_path)
for group in re.findall(cls.root_key_regex, rootless_path):
replacement = "{" + group + "}"
output = output.replace(replacement, root_path)
return output
def replace_root_with_env_key(self, filepath, template=None):
"""Replace root of path with environment key.
# Example:
## Project with roots:
```
{
"nas": {
"windows": P:/projects",
...
}
...
}
```
## Entered filepath
"P:/projects/project/asset/task/animation_v001.ma"
## Entered template
"<{}>"
## Output
"<PYPE_PROJECT_ROOT_NAS>/project/asset/task/animation_v001.ma"
Args:
filepath (str): Full file path where root should be replaced.
template (str): Optional template for environment key. Must
have one index format key.
Default value if not entered: "${}"
Returns:
str: Path where root is replaced with environment root key.
Raise:
ValueError: When project's roots were not found in entered path.
"""
success, rootless_path = self.find_root_template_from_path(filepath)
if not success:
raise ValueError(
"{}: Project's roots were not found in path: {}".format(
self.project_name, filepath
)
)
data = self.root_environmets_fill_data(template)
return rootless_path.format(**data)
class TemplateMissingKey(Exception):
"""Exception for cases when key does not exist in Anatomy."""
@ -1245,6 +1326,10 @@ class RootItem:
root_paths = list(self.cleaned_data.values())
mod_path = self.clean_path(path)
for root_path in root_paths:
# Skip empty paths
if not root_path:
continue
if mod_path.startswith(root_path):
result = True
replacement = "{" + self.full_key() + "}"
@ -1435,6 +1520,41 @@ class Roots:
output.update(self._root_environments(_keys, _value))
return output
def root_environmets_fill_data(self, template=None):
"""Environment variable values in dictionary for rootless path.
Args:
template (str): Template for environment variable key fill.
By default is set to `"${}"`.
"""
if template is None:
template = "${}"
return self._root_environmets_fill_data(template)
def _root_environmets_fill_data(self, template, keys=None, roots=None):
if keys is None and roots is None:
return {
"root": self._root_environmets_fill_data(
template, [], self.roots
)
}
if isinstance(roots, RootItem):
key_items = [Roots.env_prefix]
for _key in keys:
key_items.append(_key.upper())
key = "_".join(key_items)
return template.format(key)
output = {}
for key, value in roots.items():
_keys = list(keys)
_keys.append(key)
output[key] = self._root_environmets_fill_data(
template, _keys, value
)
return output
@property
def project_name(self):
"""Return project name which will be used for loading root values."""

View file

@ -0,0 +1,128 @@
import json
from pype.modules.ftrack.lib import ServerAction
def clone_review_session(session, entity):
# Create a client review with timestamp.
name = entity["name"]
review_session = session.create(
"ReviewSession",
{
"name": f"Clone of {name}",
"project": entity["project"]
}
)
# Add all invitees.
for invitee in entity["review_session_invitees"]:
session.create(
"ReviewSessionInvitee",
{
"name": invitee["name"],
"email": invitee["email"],
"review_session": review_session
}
)
# Add all objects to new review session.
for obj in entity["review_session_objects"]:
session.create(
"ReviewSessionObject",
{
"name": obj["name"],
"version": obj["version"],
"review_session": review_session,
"asset_version": obj["asset_version"]
}
)
session.commit()
class CloneReviewSession(ServerAction):
'''Generate Client Review action
`label` a descriptive string identifing your action.
`varaint` To group actions together, give them the same
label and specify a unique variant per action.
`identifier` a unique identifier for your action.
`description` a verbose descriptive text for you action
'''
label = "Clone Review Session"
variant = None
identifier = "clone-review-session"
description = None
settings_key = "clone_review_session"
def discover(self, session, entities, event):
'''Return true if we can handle the selected entities.
*session* is a `ftrack_api.Session` instance
*entities* is a list of tuples each containing the entity type and the
entity id.
If the entity is a hierarchical you will always get the entity
type TypedContext, once retrieved through a get operation you
will have the "real" entity type ie. example Shot, Sequence
or Asset Build.
*event* the unmodified original event
'''
is_valid = (
len(entities) == 1
and entities[0].entity_type == "ReviewSession"
)
if is_valid:
is_valid = self.valid_roles(session, entities, event)
return is_valid
def launch(self, session, entities, event):
'''Callback method for the custom action.
return either a bool ( True if successful or False if the action
failed ) or a dictionary with they keys `message` and `success`, the
message should be a string and will be displayed as feedback to the
user, success should be a bool, True if successful or False if the
action failed.
*session* is a `ftrack_api.Session` instance
*entities* is a list of tuples each containing the entity type and the
entity id.
If the entity is a hierarchical you will always get the entity
type TypedContext, once retrieved through a get operation you
will have the "real" entity type ie. example Shot, Sequence
or Asset Build.
*event* the unmodified original event
'''
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
job = session.create(
'Job',
{
'user': user,
'status': 'running',
'data': json.dumps({
'description': 'Cloning Review Session.'
})
}
)
session.commit()
try:
clone_review_session(session, entities[0])
job['status'] = 'done'
session.commit()
except Exception:
session.rollback()
job["status"] = "failed"
session.commit()
self.log.error(
"Cloning review session failed ({})", exc_info=True
)
return {
'success': True,
'message': 'Action completed successfully'
}
def register(session):
'''Register action. Called when used as an event plugin.'''
CloneReviewSession(session).register()

View file

@ -26,6 +26,14 @@
"Project Manager"
]
},
"clone_review_session": {
"enabled": true,
"role_list": [
"Pypeclub",
"Administrator",
"Project Manager"
]
},
"thumbnail_updates": {
"enabled": true,
"levels": 1

View file

@ -119,14 +119,11 @@ class ActionModel(QtGui.QStandardItemModel):
self.application_manager = ApplicationManager()
self._session = {}
self._groups = {}
self.default_icon = qtawesome.icon("fa.cube", color="white")
# Cache of available actions
self._registered_actions = list()
self.discover()
def discover(self):
"""Set up Actions cache. Run this for each new project."""
# Discover all registered actions
@ -178,14 +175,12 @@ class ActionModel(QtGui.QStandardItemModel):
return self.default_icon
return icon
def refresh(self):
def filter_actions(self):
# Validate actions based on compatibility
self.clear()
self._groups.clear()
self.discover()
actions = self.filter_compatible_actions(self._registered_actions)
self.beginResetModel()
@ -237,14 +232,17 @@ class ActionModel(QtGui.QStandardItemModel):
if icon is None:
icon = self.default_icon
item = QtGui.QStandardItem(icon, action.label)
item = QtGui.QStandardItem(icon, label)
item.setData(label, QtCore.Qt.ToolTipRole)
item.setData(actions, self.ACTION_ROLE)
item.setData(True, self.VARIANT_GROUP_ROLE)
items_by_order[order].append(item)
for action in single_actions:
icon = self.get_icon(action)
item = QtGui.QStandardItem(icon, lib.get_action_label(action))
label = lib.get_action_label(action)
item = QtGui.QStandardItem(icon, label)
item.setData(label, QtCore.Qt.ToolTipRole)
item.setData(action, self.ACTION_ROLE)
items_by_order[action.order].append(item)
@ -275,11 +273,6 @@ class ActionModel(QtGui.QStandardItemModel):
self.endResetModel()
def set_session(self, session):
assert isinstance(session, dict)
self._session = copy.deepcopy(session)
self.refresh()
def filter_compatible_actions(self, actions):
"""Collect all actions which are compatible with the environment
@ -294,8 +287,15 @@ class ActionModel(QtGui.QStandardItemModel):
"""
compatible = []
_session = copy.deepcopy(self.dbcon.Session)
session = {
key: value
for key, value in _session.items()
if value
}
for action in actions:
if action().is_compatible(self._session):
if action().is_compatible(session):
compatible.append(action)
# Sort by order and name

View file

@ -123,6 +123,12 @@ class ActionBar(QtWidgets.QWidget):
view.clicked.connect(self.on_clicked)
def discover_actions(self):
self.model.discover()
def filter_actions(self):
self.model.filter_actions()
def set_row_height(self, rows):
self.setMinimumHeight(rows * 75)

View file

@ -12,7 +12,11 @@ from avalon.tools.widgets import AssetWidget
from avalon.vendor import qtawesome
from .models import ProjectModel
from .widgets import (
ProjectBar, ActionBar, TasksWidget, ActionHistory, SlidePageWidget
ProjectBar,
ActionBar,
TasksWidget,
ActionHistory,
SlidePageWidget
)
from .flickcharm import FlickCharm
@ -119,6 +123,7 @@ class ProjectsPanel(QtWidgets.QWidget):
class AssetsPanel(QtWidgets.QWidget):
"""Assets page"""
back_clicked = QtCore.Signal()
session_changed = QtCore.Signal()
def __init__(self, dbcon, parent=None):
super(AssetsPanel, self).__init__(parent=parent)
@ -187,6 +192,8 @@ class AssetsPanel(QtWidgets.QWidget):
project_bar.project_changed.connect(self.on_project_changed)
assets_widget.selection_changed.connect(self.on_asset_changed)
assets_widget.refreshed.connect(self.on_asset_changed)
tasks_widget.task_changed.connect(self.on_task_change)
btn_back.clicked.connect(self.back_clicked)
# Force initial refresh for the assets since we might not be
@ -197,14 +204,19 @@ class AssetsPanel(QtWidgets.QWidget):
def set_project(self, project):
before = self.project_bar.get_current_project()
self.project_bar.set_project(project)
if project == before:
# Force a refresh on the assets if the project hasn't changed
if before == project:
self.assets_widget.refresh()
return
self.project_bar.set_project(project)
self.on_project_changed()
def on_project_changed(self):
project_name = self.project_bar.get_current_project()
self.dbcon.Session["AVALON_PROJECT"] = project_name
self.session_changed.emit()
self.assets_widget.refresh()
def on_asset_changed(self):
@ -216,28 +228,41 @@ class AssetsPanel(QtWidgets.QWidget):
print("Asset changed..")
asset_doc = self.assets_widget.get_active_asset_document()
if asset_doc:
self.tasks_widget.set_asset(asset_doc["_id"])
else:
self.tasks_widget.set_asset(None)
asset_name = None
asset_silo = None
def get_current_session(self):
# Check asset on current index and selected assets
asset_doc = self.assets_widget.get_active_asset_document()
session = copy.deepcopy(self.dbcon.Session)
# Clear some values that we are about to collect if available
session.pop("AVALON_SILO", None)
session.pop("AVALON_ASSET", None)
session.pop("AVALON_TASK", None)
selected_asset_docs = self.assets_widget.get_selected_assets()
# If there are not asset selected docs then active asset is not
# selected
if not selected_asset_docs:
asset_doc = None
elif asset_doc:
# If selected asset doc and current asset are not same than
# something bad happened
if selected_asset_docs[0]["_id"] != asset_doc["_id"]:
asset_doc = None
if asset_doc:
session["AVALON_ASSET"] = asset_doc["name"]
task_name = self.tasks_widget.get_current_task()
if task_name:
session["AVALON_TASK"] = task_name
asset_name = asset_doc["name"]
asset_silo = asset_doc.get("silo")
return session
self.dbcon.Session["AVALON_TASK"] = None
self.dbcon.Session["AVALON_ASSET"] = asset_name
self.dbcon.Session["AVALON_SILO"] = asset_silo
self.session_changed.emit()
asset_id = None
if asset_doc:
asset_id = asset_doc["_id"]
self.tasks_widget.set_asset(asset_id)
def on_task_change(self):
task_name = self.tasks_widget.get_current_task()
self.dbcon.Session["AVALON_TASK"] = task_name
self.session_changed.emit()
class LauncherWindow(QtWidgets.QDialog):
@ -323,14 +348,7 @@ class LauncherWindow(QtWidgets.QDialog):
action_history.trigger_history.connect(self.on_history_action)
project_panel.project_clicked.connect(self.on_project_clicked)
asset_panel.back_clicked.connect(self.on_back_clicked)
# Add some signals to propagate from the asset panel
for signal in (
asset_panel.project_bar.project_changed,
asset_panel.assets_widget.selection_changed,
asset_panel.tasks_widget.task_changed
):
signal.connect(self.on_session_changed)
asset_panel.session_changed.connect(self.on_session_changed)
# todo: Simplify this callback connection
asset_panel.project_bar.project_changed.connect(
@ -339,6 +357,11 @@ class LauncherWindow(QtWidgets.QDialog):
self.resize(520, 740)
def showEvent(self, event):
super().showEvent(event)
# TODO implement refresh/reset which will trigger updating
self.discover_actions()
def set_page(self, page):
current = self.page_slider.currentIndex()
if current == page and self._page == page:
@ -348,10 +371,6 @@ class LauncherWindow(QtWidgets.QDialog):
self._page = page
self.page_slider.slide_view(page, direction=direction)
def refresh(self):
self.asset_panel.assets_widget.refresh()
self.refresh_actions()
def echo(self, message):
self.message_label.setText(str(message))
QtCore.QTimer.singleShot(5000, lambda: self.message_label.setText(""))
@ -362,30 +381,30 @@ class LauncherWindow(QtWidgets.QDialog):
self.dbcon.Session["AVALON_PROJECT"] = project_name
# Update the Action plug-ins available for the current project
self.actions_bar.model.discover()
self.discover_actions()
def on_session_changed(self):
self.refresh_actions()
self.filter_actions()
def refresh_actions(self, delay=1):
tools_lib.schedule(self.on_refresh_actions, delay)
def discover_actions(self):
self.actions_bar.discover_actions()
self.filter_actions()
def filter_actions(self):
self.actions_bar.filter_actions()
def on_project_clicked(self, project_name):
self.dbcon.Session["AVALON_PROJECT"] = project_name
# Refresh projects
self.asset_panel.set_project(project_name)
self.set_page(1)
self.refresh_actions()
self.discover_actions()
def on_back_clicked(self):
self.dbcon.Session["AVALON_PROJECT"] = None
self.set_page(0)
self.project_panel.model.refresh() # Refresh projects
self.refresh_actions()
def on_refresh_actions(self):
session = self.get_current_session()
self.actions_bar.model.set_session(session)
self.actions_bar.model.refresh()
self.discover_actions()
def on_action_clicked(self, action):
self.echo("Running action: {}".format(action.name))
@ -404,33 +423,21 @@ class LauncherWindow(QtWidgets.QDialog):
# User is holding control, rerun the action
self.run_action(action, session=session)
def get_current_session(self):
if self._page == 1:
# Assets page
return self.asset_panel.get_current_session()
session = copy.deepcopy(self.dbcon.Session)
# Remove some potential invalid session values
# that we know are not set when not browsing in
# a project.
session.pop("AVALON_PROJECT", None)
session.pop("AVALON_ASSET", None)
session.pop("AVALON_SILO", None)
session.pop("AVALON_TASK", None)
return session
def run_action(self, action, session=None):
if session is None:
session = self.get_current_session()
session = copy.deepcopy(self.dbcon.Session)
filtered_session = {
key: value
for key, value in session.items()
if value
}
# Add to history
self.action_history.add_action(action, session)
self.action_history.add_action(action, filtered_session)
# Process the Action
try:
action().process(session)
action().process(filtered_session)
except Exception as exc:
self.log.warning("Action launch failed.", exc_info=True)
self.echo("Failed: {}".format(str(exc)))

View file

@ -104,6 +104,25 @@
}
]
},
{
"type": "dict",
"key": "clone_review_session",
"label": "Clone Review Session",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "list",
"key": "role_list",
"label": "Roles for action",
"object_type": "text"
}
]
},
{
"type": "dict",
"key": "thumbnail_updates",