Merge branch 'develop' into 3.0/refactoring

This commit is contained in:
Ondřej Samohel 2020-09-10 10:20:43 +02:00 committed by GitHub
commit ac9bc33e3e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
120 changed files with 5567 additions and 2890 deletions

View file

@ -41,6 +41,7 @@ from .lib import (
get_last_version_from_path,
modified_environ,
add_tool_to_environment,
source_hash,
get_latest_version
)
@ -59,6 +60,7 @@ __all__ = [
# Resources
"resources",
# plugin classes
"Extractor",
# ordering
@ -85,6 +87,7 @@ __all__ = [
"get_last_version_from_path",
"modified_environ",
"add_tool_to_environment",
"source_hash",
"subprocess",
"get_latest_version"

View file

@ -1,7 +1,6 @@
import os
import re
import sys
import getpass
from collections import OrderedDict
from avalon import api, io, lib
@ -1060,310 +1059,6 @@ def get_write_node_template_attr(node):
return avalon.nuke.lib.fix_data_for_node_create(correct_data)
class BuildWorkfile(WorkfileSettings):
"""
Building first version of workfile.
Settings are taken from presets and db. It will add all subsets
in last version for defined representaions
Arguments:
variable (type): description
"""
xpos = 0
ypos = 0
xpos_size = 80
ypos_size = 90
xpos_gap = 50
ypos_gap = 50
pos_layer = 10
def __init__(self,
root_path=None,
root_node=None,
nodes=None,
to_script=None,
**kwargs):
"""
A short description.
A bit longer description.
Argumetns:
root_path (str): description
root_node (nuke.Node): description
nodes (list): list of nuke.Node
nodes_effects (dict): dictionary with subsets
Example:
nodes_effects = {
"plateMain": {
"nodes": [
[("Class", "Reformat"),
("resize", "distort"),
("flip", True)],
[("Class", "Grade"),
("blackpoint", 0.5),
("multiply", 0.4)]
]
},
}
"""
WorkfileSettings.__init__(self,
root_node=root_node,
nodes=nodes,
**kwargs)
self.to_script = to_script
# collect data for formating
self.data_tmp = {
"project": {"name": self._project["name"],
"code": self._project["data"].get("code", "")},
"asset": self._asset or os.environ["AVALON_ASSET"],
"task": kwargs.get("task") or api.Session["AVALON_TASK"],
"hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
"version": kwargs.get("version", {}).get("name", 1),
"user": getpass.getuser(),
"comment": "firstBuild",
"ext": "nk"
}
# get presets from anatomy
anatomy = get_anatomy()
# format anatomy
anatomy_filled = anatomy.format(self.data_tmp)
# get dir and file for workfile
self.work_dir = anatomy_filled["work"]["folder"]
self.work_file = anatomy_filled["work"]["file"]
def save_script_as(self, path=None):
# first clear anything in open window
nuke.scriptClear()
if not path:
dir = self.work_dir
path = os.path.join(
self.work_dir,
self.work_file).replace("\\", "/")
else:
dir = os.path.dirname(path)
# check if folder is created
if not os.path.exists(dir):
os.makedirs(dir)
# save script to path
nuke.scriptSaveAs(path)
def process(self,
regex_filter=None,
version=None,
representations=["exr", "dpx", "lutJson", "mov",
"preview", "png", "jpeg", "jpg"]):
"""
A short description.
A bit longer description.
Args:
regex_filter (raw string): regex pattern to filter out subsets
version (int): define a particular version, None gets last
representations (list):
Returns:
type: description
Raises:
Exception: description
"""
if not self.to_script:
# save the script
self.save_script_as()
# create viewer and reset frame range
viewer = self.get_nodes(nodes_filter=["Viewer"])
if not viewer:
vn = nuke.createNode("Viewer")
vn["xpos"].setValue(self.xpos)
vn["ypos"].setValue(self.ypos)
else:
vn = viewer[-1]
# move position
self.position_up()
wn = self.write_create()
wn["xpos"].setValue(self.xpos)
wn["ypos"].setValue(self.ypos)
wn["render"].setValue(True)
vn.setInput(0, wn)
# adding backdrop under write
self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
color='0xcc1102ff', layer=-1,
nodes=[wn])
# move position
self.position_up(4)
# set frame range for new viewer
self.reset_frame_range_handles()
# get all available representations
subsets = pype.get_subsets(self._asset,
regex_filter=regex_filter,
version=version,
representations=representations)
for name, subset in subsets.items():
log.debug("___________________")
log.debug(name)
log.debug(subset["version"])
nodes_backdrop = list()
for name, subset in subsets.items():
if "lut" in name:
continue
log.info("Building Loader to: `{}`".format(name))
version = subset["version"]
log.info("Version to: `{}`".format(version["name"]))
representations = subset["representaions"]
for repr in representations:
rn = self.read_loader(repr)
rn["xpos"].setValue(self.xpos)
rn["ypos"].setValue(self.ypos)
wn.setInput(0, rn)
# get editional nodes
lut_subset = [s for n, s in subsets.items()
if "lut{}".format(name.lower()) in n.lower()]
log.debug(">> lut_subset: `{}`".format(lut_subset))
if len(lut_subset) > 0:
lsub = lut_subset[0]
fxn = self.effect_loader(lsub["representaions"][-1])
fxn_ypos = fxn["ypos"].value()
fxn["ypos"].setValue(fxn_ypos - 100)
nodes_backdrop.append(fxn)
nodes_backdrop.append(rn)
# move position
self.position_right()
# adding backdrop under all read nodes
self.create_backdrop(label="Loaded Reads",
color='0x2d7702ff', layer=-1,
nodes=nodes_backdrop)
def read_loader(self, representation):
"""
Gets Loader plugin for image sequence or mov
Arguments:
representation (dict): avalon db entity
"""
context = representation["context"]
loader_name = "LoadSequence"
if "mov" in context["representation"]:
loader_name = "LoadMov"
loader_plugin = None
for Loader in api.discover(api.Loader):
if Loader.__name__ != loader_name:
continue
loader_plugin = Loader
return api.load(Loader=loader_plugin,
representation=representation["_id"])
def effect_loader(self, representation):
"""
Gets Loader plugin for effects
Arguments:
representation (dict): avalon db entity
"""
loader_name = "LoadLuts"
loader_plugin = None
for Loader in api.discover(api.Loader):
if Loader.__name__ != loader_name:
continue
loader_plugin = Loader
return api.load(Loader=loader_plugin,
representation=representation["_id"])
def write_create(self):
"""
Create render write
Arguments:
representation (dict): avalon db entity
"""
task = self.data_tmp["task"]
sanitized_task = re.sub('[^0-9a-zA-Z]+', '', task)
subset_name = "render{}Main".format(
sanitized_task.capitalize())
Create_name = "CreateWriteRender"
creator_plugin = None
for Creator in api.discover(api.Creator):
if Creator.__name__ != Create_name:
continue
creator_plugin = Creator
# return api.create()
return creator_plugin(subset_name, self._asset).process()
def create_backdrop(self, label="", color=None, layer=0,
nodes=None):
"""
Create Backdrop node
Arguments:
color (str): nuke compatible string with color code
layer (int): layer of node usually used (self.pos_layer - 1)
label (str): the message
nodes (list): list of nodes to be wrapped into backdrop
"""
assert isinstance(nodes, list), "`nodes` should be a list of nodes"
layer = self.pos_layer + layer
create_backdrop(label=label, color=color, layer=layer, nodes=nodes)
def position_reset(self, xpos=0, ypos=0):
self.xpos = xpos
self.ypos = ypos
def position_right(self, multiply=1):
self.xpos += (self.xpos_size * multiply) + self.xpos_gap
def position_left(self, multiply=1):
self.xpos -= (self.xpos_size * multiply) + self.xpos_gap
def position_down(self, multiply=1):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
def position_up(self, multiply=1):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
class ExporterReview:
"""
Base class object for generating review data from Nuke

View file

@ -2,10 +2,12 @@ import nuke
from avalon.api import Session
from pype.hosts.nuke import lib
from ...lib import BuildWorkfile
from pype.api import Logger
log = Logger().get_logger(__name__, "nuke")
def install():
menubar = nuke.menu("Nuke")
menu = menubar.findItem(Session["AVALON_LABEL"])
@ -20,7 +22,11 @@ def install():
log.debug("Changing Item: {}".format(rm_item))
# rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
menu.addCommand(new_name, lambda: workfile_settings().reset_resolution(), index=(rm_item[0]))
menu.addCommand(
new_name,
lambda: workfile_settings().reset_resolution(),
index=(rm_item[0])
)
# replace reset frame range from avalon core to pype's
name = "Reset Frame Range"
@ -31,33 +37,38 @@ def install():
log.debug("Changing Item: {}".format(rm_item))
# rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
menu.addCommand(new_name, lambda: workfile_settings().reset_frame_range_handles(), index=(rm_item[0]))
menu.addCommand(
new_name,
lambda: workfile_settings().reset_frame_range_handles(),
index=(rm_item[0])
)
# add colorspace menu item
name = "Set colorspace"
name = "Set Colorspace"
menu.addCommand(
name, lambda: workfile_settings().set_colorspace(),
index=(rm_item[0]+2)
index=(rm_item[0] + 2)
)
log.debug("Adding menu item: {}".format(name))
# add workfile builder menu item
name = "Build First Workfile.."
name = "Build Workfile"
menu.addCommand(
name, lambda: lib.BuildWorkfile().process(),
index=(rm_item[0]+7)
name, lambda: BuildWorkfile().process(),
index=(rm_item[0] + 7)
)
log.debug("Adding menu item: {}".format(name))
# add item that applies all setting above
name = "Apply all settings"
name = "Apply All Settings"
menu.addCommand(
name, lambda: workfile_settings().set_context_settings(), index=(rm_item[0]+3)
name,
lambda: workfile_settings().set_context_settings(),
index=(rm_item[0] + 3)
)
log.debug("Adding menu item: {}".format(name))
def uninstall():
menubar = nuke.menu("Nuke")

View file

@ -7,16 +7,19 @@ import json
import collections
import logging
import itertools
import copy
import contextlib
import subprocess
import getpass
import inspect
import acre
import platform
from abc import ABCMeta, abstractmethod
from avalon import io, pipeline
import six
import avalon.api
from .api import config
from .api import config, Anatomy
log = logging.getLogger(__name__)
@ -743,8 +746,9 @@ class PypeHook:
def get_linked_assets(asset_entity):
"""Return linked assets for `asset_entity`."""
# TODO implement
return []
inputs = asset_entity["data"].get("inputs", [])
inputs = [io.find_one({"_id": x}) for x in inputs]
return inputs
def map_subsets_by_family(subsets):
@ -1383,6 +1387,27 @@ def ffprobe_streams(path_to_file):
return json.loads(popen_output)["streams"]
def source_hash(filepath, *args):
"""Generate simple identifier for a source file.
This is used to identify whether a source file has previously been
processe into the pipeline, e.g. a texture.
The hash is based on source filepath, modification time and file size.
This is only used to identify whether a specific source file was already
published before from the same location with the same modification date.
We opt to do it this way as opposed to Avalanch C4 hash as this is much
faster and predictable enough for all our production use cases.
Args:
filepath (str): The source file path.
You can specify additional arguments in the function
to allow for specific 'processing' values to be included.
"""
# We replace dots with comma because . cannot be a key in a pymongo dict.
file_name = os.path.basename(filepath)
time = str(os.path.getmtime(filepath))
size = str(os.path.getsize(filepath))
return "|".join([file_name, time, size] + list(args)).replace(".", ",")
def get_latest_version(asset_name, subset_name):
"""Retrieve latest version from `asset_name`, and `subset_name`.
@ -1418,3 +1443,207 @@ def get_latest_version(asset_name, subset_name):
assert version, "No version found, this is a bug"
return version
class ApplicationLaunchFailed(Exception):
pass
def launch_application(project_name, asset_name, task_name, app_name):
database = get_avalon_database()
project_document = database[project_name].find_one({"type": "project"})
asset_document = database[project_name].find_one({
"type": "asset",
"name": asset_name
})
asset_doc_parents = asset_document["data"].get("parents")
hierarchy = "/".join(asset_doc_parents)
app_def = avalon.lib.get_application(app_name)
app_label = app_def.get("ftrack_label", app_def.get("label", app_name))
host_name = app_def["application_dir"]
data = {
"project": {
"name": project_document["name"],
"code": project_document["data"].get("code")
},
"task": task_name,
"asset": asset_name,
"app": host_name,
"hierarchy": hierarchy
}
try:
anatomy = Anatomy(project_name)
anatomy_filled = anatomy.format(data)
workdir = os.path.normpath(anatomy_filled["work"]["folder"])
except Exception as exc:
raise ApplicationLaunchFailed(
"Error in anatomy.format: {}".format(str(exc))
)
try:
os.makedirs(workdir)
except FileExistsError:
pass
last_workfile_path = None
extensions = avalon.api.HOST_WORKFILE_EXTENSIONS.get(host_name)
if extensions:
# Find last workfile
file_template = anatomy.templates["work"]["file"]
data.update({
"version": 1,
"user": os.environ.get("PYPE_USERNAME") or getpass.getuser(),
"ext": extensions[0]
})
last_workfile_path = avalon.api.last_workfile(
workdir, file_template, data, extensions, True
)
# set environments for Avalon
prep_env = copy.deepcopy(os.environ)
prep_env.update({
"AVALON_PROJECT": project_name,
"AVALON_ASSET": asset_name,
"AVALON_TASK": task_name,
"AVALON_APP": host_name,
"AVALON_APP_NAME": app_name,
"AVALON_HIERARCHY": hierarchy,
"AVALON_WORKDIR": workdir
})
start_last_workfile = avalon.api.should_start_last_workfile(
project_name, host_name, task_name
)
# Store boolean as "0"(False) or "1"(True)
prep_env["AVALON_OPEN_LAST_WORKFILE"] = (
str(int(bool(start_last_workfile)))
)
if (
start_last_workfile
and last_workfile_path
and os.path.exists(last_workfile_path)
):
prep_env["AVALON_LAST_WORKFILE"] = last_workfile_path
prep_env.update(anatomy.roots_obj.root_environments())
# collect all the 'environment' attributes from parents
tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]]
tools_env = asset_document["data"].get("tools_env") or []
tools_attr.extend(tools_env)
tools_env = acre.get_tools(tools_attr)
env = acre.compute(tools_env)
env = acre.merge(env, current_env=dict(prep_env))
# Get path to execute
st_temp_path = os.environ["PYPE_CONFIG"]
os_plat = platform.system().lower()
# Path to folder with launchers
path = os.path.join(st_temp_path, "launchers", os_plat)
# Full path to executable launcher
execfile = None
launch_hook = app_def.get("launch_hook")
if launch_hook:
log.info("launching hook: {}".format(launch_hook))
ret_val = execute_hook(launch_hook, env=env)
if not ret_val:
raise ApplicationLaunchFailed(
"Hook didn't finish successfully {}".format(app_label)
)
if sys.platform == "win32":
for ext in os.environ["PATHEXT"].split(os.pathsep):
fpath = os.path.join(path.strip('"'), app_def["executable"] + ext)
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
execfile = fpath
break
# Run SW if was found executable
if execfile is None:
raise ApplicationLaunchFailed(
"We didn't find launcher for {}".format(app_label)
)
popen = avalon.lib.launch(
executable=execfile, args=[], environment=env
)
elif (
sys.platform.startswith("linux")
or sys.platform.startswith("darwin")
):
execfile = os.path.join(path.strip('"'), app_def["executable"])
# Run SW if was found executable
if execfile is None:
raise ApplicationLaunchFailed(
"We didn't find launcher for {}".format(app_label)
)
if not os.path.isfile(execfile):
raise ApplicationLaunchFailed(
"Launcher doesn't exist - {}".format(execfile)
)
try:
fp = open(execfile)
except PermissionError as perm_exc:
raise ApplicationLaunchFailed(
"Access denied on launcher {} - {}".format(execfile, perm_exc)
)
fp.close()
# check executable permission
if not os.access(execfile, os.X_OK):
raise ApplicationLaunchFailed(
"No executable permission - {}".format(execfile)
)
popen = avalon.lib.launch( # noqa: F841
"/usr/bin/env", args=["bash", execfile], environment=env
)
return popen
class ApplicationAction(avalon.api.Action):
"""Default application launcher
This is a convenience application Action that when "config" refers to a
parsed application `.toml` this can launch the application.
"""
config = None
group = None
variant = None
required_session_keys = (
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK"
)
def is_compatible(self, session):
for key in self.required_session_keys:
if key not in session:
return False
return True
def process(self, session, **kwargs):
"""Process the full Application action"""
project_name = session["AVALON_PROJECT"]
asset_name = session["AVALON_ASSET"]
task_name = session["AVALON_TASK"]
return launch_application(
project_name, asset_name, task_name, self.name
)

View file

@ -1,8 +1,6 @@
from .io_nonsingleton import DbConnector
from .rest_api import AdobeRestApi, PUBLISH_PATHS
__all__ = [
"PUBLISH_PATHS",
"DbConnector",
"AdobeRestApi"
]

View file

@ -1,460 +0,0 @@
"""
Wrapper around interactions with the database
Copy of io module in avalon-core.
- In this case not working as singleton with api.Session!
"""
import os
import time
import errno
import shutil
import logging
import tempfile
import functools
import contextlib
from avalon import schema
from avalon.vendor import requests
from avalon.io import extract_port_from_url
# Third-party dependencies
import pymongo
def auto_reconnect(func):
"""Handling auto reconnect in 3 retry times"""
@functools.wraps(func)
def decorated(*args, **kwargs):
object = args[0]
for retry in range(3):
try:
return func(*args, **kwargs)
except pymongo.errors.AutoReconnect:
object.log.error("Reconnecting..")
time.sleep(0.1)
else:
raise
return decorated
class DbConnector(object):
log = logging.getLogger(__name__)
def __init__(self):
self.Session = {}
self._mongo_client = None
self._sentry_client = None
self._sentry_logging_handler = None
self._database = None
self._is_installed = False
def __getitem__(self, key):
# gives direct access to collection withou setting `active_table`
return self._database[key]
def __getattribute__(self, attr):
# not all methods of PyMongo database are implemented with this it is
# possible to use them too
try:
return super(DbConnector, self).__getattribute__(attr)
except AttributeError:
cur_proj = self.Session["AVALON_PROJECT"]
return self._database[cur_proj].__getattribute__(attr)
def install(self):
"""Establish a persistent connection to the database"""
if self._is_installed:
return
logging.basicConfig()
self.Session.update(self._from_environment())
timeout = int(self.Session["AVALON_TIMEOUT"])
mongo_url = self.Session["AVALON_MONGO"]
kwargs = {
"host": mongo_url,
"serverSelectionTimeoutMS": timeout
}
port = extract_port_from_url(mongo_url)
if port is not None:
kwargs["port"] = int(port)
self._mongo_client = pymongo.MongoClient(**kwargs)
for retry in range(3):
try:
t1 = time.time()
self._mongo_client.server_info()
except Exception:
self.log.error("Retrying..")
time.sleep(1)
timeout *= 1.5
else:
break
else:
raise IOError(
"ERROR: Couldn't connect to %s in "
"less than %.3f ms" % (self.Session["AVALON_MONGO"], timeout))
self.log.info("Connected to %s, delay %.3f s" % (
self.Session["AVALON_MONGO"], time.time() - t1))
self._install_sentry()
self._database = self._mongo_client[self.Session["AVALON_DB"]]
self._is_installed = True
def _install_sentry(self):
if "AVALON_SENTRY" not in self.Session:
return
try:
from raven import Client
from raven.handlers.logging import SentryHandler
from raven.conf import setup_logging
except ImportError:
# Note: There was a Sentry address in this Session
return self.log.warning("Sentry disabled, raven not installed")
client = Client(self.Session["AVALON_SENTRY"])
# Transmit log messages to Sentry
handler = SentryHandler(client)
handler.setLevel(logging.WARNING)
setup_logging(handler)
self._sentry_client = client
self._sentry_logging_handler = handler
self.log.info(
"Connected to Sentry @ %s" % self.Session["AVALON_SENTRY"]
)
def _from_environment(self):
Session = {
item[0]: os.getenv(item[0], item[1])
for item in (
# Root directory of projects on disk
("AVALON_PROJECTS", None),
# Name of current Project
("AVALON_PROJECT", ""),
# Name of current Asset
("AVALON_ASSET", ""),
# Name of current silo
("AVALON_SILO", ""),
# Name of current task
("AVALON_TASK", None),
# Name of current app
("AVALON_APP", None),
# Path to working directory
("AVALON_WORKDIR", None),
# Name of current Config
# TODO(marcus): Establish a suitable default config
("AVALON_CONFIG", "no_config"),
# Name of Avalon in graphical user interfaces
# Use this to customise the visual appearance of Avalon
# to better integrate with your surrounding pipeline
("AVALON_LABEL", "Avalon"),
# Used during any connections to the outside world
("AVALON_TIMEOUT", "1000"),
# Address to Asset Database
("AVALON_MONGO", "mongodb://localhost:27017"),
# Name of database used in MongoDB
("AVALON_DB", "avalon"),
# Address to Sentry
("AVALON_SENTRY", None),
# Address to Deadline Web Service
# E.g. http://192.167.0.1:8082
("AVALON_DEADLINE", None),
# Enable features not necessarily stable. The user's own risk
("AVALON_EARLY_ADOPTER", None),
# Address of central asset repository, contains
# the following interface:
# /upload
# /download
# /manager (optional)
("AVALON_LOCATION", "http://127.0.0.1"),
# Boolean of whether to upload published material
# to central asset repository
("AVALON_UPLOAD", None),
# Generic username and password
("AVALON_USERNAME", "avalon"),
("AVALON_PASSWORD", "secret"),
# Unique identifier for instances in working files
("AVALON_INSTANCE_ID", "avalon.instance"),
("AVALON_CONTAINER_ID", "avalon.container"),
# Enable debugging
("AVALON_DEBUG", None),
) if os.getenv(item[0], item[1]) is not None
}
Session["schema"] = "avalon-core:session-2.0"
try:
schema.validate(Session)
except schema.ValidationError as e:
# TODO(marcus): Make this mandatory
self.log.warning(e)
return Session
def uninstall(self):
"""Close any connection to the database"""
try:
self._mongo_client.close()
except AttributeError:
pass
self._mongo_client = None
self._database = None
self._is_installed = False
def active_project(self):
"""Return the name of the active project"""
return self.Session["AVALON_PROJECT"]
def activate_project(self, project_name):
self.Session["AVALON_PROJECT"] = project_name
def projects(self):
"""List available projects
Returns:
list of project documents
"""
collection_names = self.collections()
for project in collection_names:
if project in ("system.indexes",):
continue
# Each collection will have exactly one project document
document = self.find_project(project)
if document is not None:
yield document
def locate(self, path):
"""Traverse a hierarchy from top-to-bottom
Example:
representation = locate(["hulk", "Bruce", "modelDefault", 1, "ma"])
Returns:
representation (ObjectId)
"""
components = zip(
("project", "asset", "subset", "version", "representation"),
path
)
parent = None
for type_, name in components:
latest = (type_ == "version") and name in (None, -1)
try:
if latest:
parent = self.find_one(
filter={
"type": type_,
"parent": parent
},
projection={"_id": 1},
sort=[("name", -1)]
)["_id"]
else:
parent = self.find_one(
filter={
"type": type_,
"name": name,
"parent": parent
},
projection={"_id": 1},
)["_id"]
except TypeError:
return None
return parent
@auto_reconnect
def collections(self):
return self._database.collection_names()
@auto_reconnect
def find_project(self, project):
return self._database[project].find_one({"type": "project"})
@auto_reconnect
def insert_one(self, item):
assert isinstance(item, dict), "item must be of type <dict>"
schema.validate(item)
return self._database[self.Session["AVALON_PROJECT"]].insert_one(item)
@auto_reconnect
def insert_many(self, items, ordered=True):
# check if all items are valid
assert isinstance(items, list), "`items` must be of type <list>"
for item in items:
assert isinstance(item, dict), "`item` must be of type <dict>"
schema.validate(item)
return self._database[self.Session["AVALON_PROJECT"]].insert_many(
items,
ordered=ordered)
@auto_reconnect
def find(self, filter, projection=None, sort=None):
return self._database[self.Session["AVALON_PROJECT"]].find(
filter=filter,
projection=projection,
sort=sort
)
@auto_reconnect
def find_one(self, filter, projection=None, sort=None):
assert isinstance(filter, dict), "filter must be <dict>"
return self._database[self.Session["AVALON_PROJECT"]].find_one(
filter=filter,
projection=projection,
sort=sort
)
@auto_reconnect
def save(self, *args, **kwargs):
return self._database[self.Session["AVALON_PROJECT"]].save(
*args, **kwargs)
@auto_reconnect
def replace_one(self, filter, replacement):
return self._database[self.Session["AVALON_PROJECT"]].replace_one(
filter, replacement)
@auto_reconnect
def update_many(self, filter, update):
return self._database[self.Session["AVALON_PROJECT"]].update_many(
filter, update)
@auto_reconnect
def distinct(self, *args, **kwargs):
return self._database[self.Session["AVALON_PROJECT"]].distinct(
*args, **kwargs)
@auto_reconnect
def drop(self, *args, **kwargs):
return self._database[self.Session["AVALON_PROJECT"]].drop(
*args, **kwargs)
@auto_reconnect
def delete_many(self, *args, **kwargs):
return self._database[self.Session["AVALON_PROJECT"]].delete_many(
*args, **kwargs)
def parenthood(self, document):
assert document is not None, "This is a bug"
parents = list()
while document.get("parent") is not None:
document = self.find_one({"_id": document["parent"]})
if document is None:
break
if document.get("type") == "master_version":
_document = self.find_one({"_id": document["version_id"]})
document["data"] = _document["data"]
parents.append(document)
return parents
@contextlib.contextmanager
def tempdir(self):
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def download(self, src, dst):
"""Download `src` to `dst`
Arguments:
src (str): URL to source file
dst (str): Absolute path to destination file
Yields tuple (progress, error):
progress (int): Between 0-100
error (Exception): Any exception raised when first making connection
"""
try:
response = requests.get(
src,
stream=True,
auth=requests.auth.HTTPBasicAuth(
self.Session["AVALON_USERNAME"],
self.Session["AVALON_PASSWORD"]
)
)
except requests.ConnectionError as e:
yield None, e
return
with self.tempdir() as dirname:
tmp = os.path.join(dirname, os.path.basename(src))
with open(tmp, "wb") as f:
total_length = response.headers.get("content-length")
if total_length is None: # no content length header
f.write(response.content)
else:
downloaded = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
downloaded += len(data)
f.write(data)
yield int(100.0 * downloaded / total_length), None
try:
os.makedirs(os.path.dirname(dst))
except OSError as e:
# An already existing destination directory is fine.
if e.errno != errno.EEXIST:
raise
shutil.copy(tmp, dst)

View file

@ -2,7 +2,7 @@ import os
import sys
import copy
from pype.modules.rest_api import RestApi, route, abort, CallbackResult
from .io_nonsingleton import DbConnector
from avalon.api import AvalonMongoDB
from pype.api import config, execute, Logger
log = Logger().get_logger("AdobeCommunicator")
@ -14,7 +14,7 @@ PUBLISH_PATHS = []
class AdobeRestApi(RestApi):
dbcon = DbConnector()
dbcon = AvalonMongoDB()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

View file

@ -1,10 +1,7 @@
import os
import argparse
from Qt import QtGui, QtWidgets
from Qt import QtWidgets
from avalon.tools import libraryloader
from pype.api import Logger
from avalon import io
from launcher import launcher_widget, lib as launcher_lib
from pype.tools.launcher import LauncherWindow, actions
class AvalonApps:
@ -12,7 +9,12 @@ class AvalonApps:
self.log = Logger().get_logger(__name__)
self.main_parent = main_parent
self.parent = parent
self.app_launcher = None
self.app_launcher = LauncherWindow()
# actions.register_default_actions()
actions.register_config_actions()
actions.register_environment_actions()
def process_modules(self, modules):
if "RestApiServer" in modules:
@ -32,23 +34,22 @@ class AvalonApps:
self.log.warning('Parent menu is not set')
return
icon = QtGui.QIcon(launcher_lib.resource("icon", "main.png"))
aShowLauncher = QtWidgets.QAction(icon, "&Launcher", parent_menu)
aLibraryLoader = QtWidgets.QAction("Library", parent_menu)
action_launcher = QtWidgets.QAction("Launcher", parent_menu)
action_library_loader = QtWidgets.QAction(
"Library loader", parent_menu
)
aShowLauncher.triggered.connect(self.show_launcher)
aLibraryLoader.triggered.connect(self.show_library_loader)
action_launcher.triggered.connect(self.show_launcher)
action_library_loader.triggered.connect(self.show_library_loader)
parent_menu.addAction(aShowLauncher)
parent_menu.addAction(aLibraryLoader)
parent_menu.addAction(action_launcher)
parent_menu.addAction(action_library_loader)
def show_launcher(self):
# if app_launcher don't exist create it/otherwise only show main window
if self.app_launcher is None:
io.install()
APP_PATH = launcher_lib.resource("qml", "main.qml")
self.app_launcher = launcher_widget.Launcher(APP_PATH)
self.app_launcher.window.show()
self.app_launcher.show()
self.app_launcher.raise_()
self.app_launcher.activateWindow()
def show_library_loader(self):
libraryloader.show(

View file

@ -4,14 +4,14 @@ import json
import bson
import bson.json_util
from pype.modules.rest_api import RestApi, abort, CallbackResult
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
from avalon.api import AvalonMongoDB
class AvalonRestApi(RestApi):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dbcon = DbConnector()
self.dbcon = AvalonMongoDB()
self.dbcon.install()
@RestApi.route("/projects/<project_name>", url_prefix="/avalon", methods="GET")

View file

@ -409,6 +409,10 @@ class CustomAttributes(BaseAction):
))
)
)
# Make sure there is at least one item
if not app_definitions:
app_definitions.append({"empty": "< Empty >"})
return app_definitions
def applications_attribute(self, event):
@ -432,6 +436,10 @@ class CustomAttributes(BaseAction):
if usage:
tools_data.append({tool_name: tool_name})
# Make sure there is at least one item
if not tools_data:
tools_data.append({"empty": "< Empty >"})
tools_custom_attr_data = {
"label": "Tools",
"key": "tools_env",

View file

@ -5,7 +5,7 @@ from queue import Queue
from bson.objectid import ObjectId
from pype.modules.ftrack.lib import BaseAction, statics_icon
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
from avalon.api import AvalonMongoDB
class DeleteAssetSubset(BaseAction):
@ -21,7 +21,7 @@ class DeleteAssetSubset(BaseAction):
#: roles that are allowed to register this action
role_list = ["Pypeclub", "Administrator", "Project Manager"]
#: Db connection
dbcon = DbConnector()
dbcon = AvalonMongoDB()
splitter = {"type": "label", "value": "---"}
action_data_by_id = {}

View file

@ -6,7 +6,7 @@ import clique
from pymongo import UpdateOne
from pype.modules.ftrack.lib import BaseAction, statics_icon
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
from avalon.api import AvalonMongoDB
from pype.api import Anatomy
import avalon.pipeline
@ -24,7 +24,7 @@ class DeleteOldVersions(BaseAction):
role_list = ["Pypeclub", "Project Manager", "Administrator"]
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
dbcon = DbConnector()
dbcon = AvalonMongoDB()
inteface_title = "Choose your preferences"
splitter_item = {"type": "label", "value": "---"}

View file

@ -1,5 +1,6 @@
import os
import copy
import json
import shutil
import collections
@ -9,10 +10,10 @@ from bson.objectid import ObjectId
from avalon import pipeline
from avalon.vendor import filelink
from pype.api import Anatomy
from pype.api import Anatomy, config
from pype.modules.ftrack.lib import BaseAction, statics_icon
from pype.modules.ftrack.lib.avalon_sync import CustAttrIdKey
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from avalon.api import AvalonMongoDB
class Delivery(BaseAction):
@ -23,7 +24,7 @@ class Delivery(BaseAction):
role_list = ["Pypeclub", "Administrator", "Project manager"]
icon = statics_icon("ftrack", "action_icons", "Delivery.svg")
db_con = DbConnector()
db_con = AvalonMongoDB()
def discover(self, session, entities, event):
for entity in entities:
@ -41,36 +42,22 @@ class Delivery(BaseAction):
items = []
item_splitter = {"type": "label", "value": "---"}
# Prepare component names for processing
components = None
project = None
for entity in entities:
if project is None:
project_id = None
for ent_info in entity["link"]:
if ent_info["type"].lower() == "project":
project_id = ent_info["id"]
break
project_entity = self.get_project_from_entity(entities[0])
project_name = project_entity["full_name"]
self.db_con.install()
self.db_con.Session["AVALON_PROJECT"] = project_name
project_doc = self.db_con.find_one({"type": "project"})
if not project_doc:
return {
"success": False,
"message": (
"Didn't found project \"{}\" in avalon."
).format(project_name)
}
if project_id is None:
project = entity["asset"]["parent"]["project"]
else:
project = session.query((
"select id, full_name from Project where id is \"{}\""
).format(project_id)).one()
repre_names = self._get_repre_names(entities)
self.db_con.uninstall()
_components = set(
[component["name"] for component in entity["components"]]
)
if components is None:
components = _components
continue
components = components.intersection(_components)
if not components:
break
project_name = project["full_name"]
items.append({
"type": "hidden",
"name": "__project_name__",
@ -93,7 +80,7 @@ class Delivery(BaseAction):
skipped = False
# Add message if there are any common components
if not components or not new_anatomies:
if not repre_names or not new_anatomies:
skipped = True
items.append({
"type": "label",
@ -106,7 +93,7 @@ class Delivery(BaseAction):
"value": skipped
})
if not components:
if not repre_names:
if len(entities) == 1:
items.append({
"type": "label",
@ -143,12 +130,12 @@ class Delivery(BaseAction):
"type": "label"
})
for component in components:
for repre_name in repre_names:
items.append({
"type": "boolean",
"value": False,
"label": component,
"name": component
"label": repre_name,
"name": repre_name
})
items.append(item_splitter)
@ -198,27 +185,233 @@ class Delivery(BaseAction):
"title": title
}
def _get_repre_names(self, entities):
version_ids = self._get_interest_version_ids(entities)
repre_docs = self.db_con.find({
"type": "representation",
"parent": {"$in": version_ids}
})
return list(sorted(repre_docs.distinct("name")))
def _get_interest_version_ids(self, entities):
parent_ent_by_id = {}
subset_names = set()
version_nums = set()
for entity in entities:
asset = entity["asset"]
parent = asset["parent"]
parent_ent_by_id[parent["id"]] = parent
subset_name = asset["name"]
subset_names.add(subset_name)
version = entity["version"]
version_nums.add(version)
asset_docs_by_ftrack_id = self._get_asset_docs(parent_ent_by_id)
subset_docs = self._get_subset_docs(
asset_docs_by_ftrack_id, subset_names, entities
)
version_docs = self._get_version_docs(
asset_docs_by_ftrack_id, subset_docs, version_nums, entities
)
return [version_doc["_id"] for version_doc in version_docs]
def _get_version_docs(
self, asset_docs_by_ftrack_id, subset_docs, version_nums, entities
):
subset_docs_by_id = {
subset_doc["_id"]: subset_doc
for subset_doc in subset_docs
}
version_docs = list(self.db_con.find({
"type": "version",
"parent": {"$in": list(subset_docs_by_id.keys())},
"name": {"$in": list(version_nums)}
}))
version_docs_by_parent_id = collections.defaultdict(dict)
for version_doc in version_docs:
subset_doc = subset_docs_by_id[version_doc["parent"]]
asset_id = subset_doc["parent"]
subset_name = subset_doc["name"]
version = version_doc["name"]
if version_docs_by_parent_id[asset_id].get(subset_name) is None:
version_docs_by_parent_id[asset_id][subset_name] = {}
version_docs_by_parent_id[asset_id][subset_name][version] = (
version_doc
)
filtered_versions = []
for entity in entities:
asset = entity["asset"]
parent = asset["parent"]
asset_doc = asset_docs_by_ftrack_id[parent["id"]]
subsets_by_name = version_docs_by_parent_id.get(asset_doc["_id"])
if not subsets_by_name:
continue
subset_name = asset["name"]
version_docs_by_version = subsets_by_name.get(subset_name)
if not version_docs_by_version:
continue
version = entity["version"]
version_doc = version_docs_by_version.get(version)
if version_doc:
filtered_versions.append(version_doc)
return filtered_versions
def _get_subset_docs(
self, asset_docs_by_ftrack_id, subset_names, entities
):
asset_doc_ids = list()
for asset_doc in asset_docs_by_ftrack_id.values():
asset_doc_ids.append(asset_doc["_id"])
subset_docs = list(self.db_con.find({
"type": "subset",
"parent": {"$in": asset_doc_ids},
"name": {"$in": list(subset_names)}
}))
subset_docs_by_parent_id = collections.defaultdict(dict)
for subset_doc in subset_docs:
asset_id = subset_doc["parent"]
subset_name = subset_doc["name"]
subset_docs_by_parent_id[asset_id][subset_name] = subset_doc
filtered_subsets = []
for entity in entities:
asset = entity["asset"]
parent = asset["parent"]
asset_doc = asset_docs_by_ftrack_id[parent["id"]]
subsets_by_name = subset_docs_by_parent_id.get(asset_doc["_id"])
if not subsets_by_name:
continue
subset_name = asset["name"]
subset_doc = subsets_by_name.get(subset_name)
if subset_doc:
filtered_subsets.append(subset_doc)
return filtered_subsets
def _get_asset_docs(self, parent_ent_by_id):
asset_docs = list(self.db_con.find({
"type": "asset",
"data.ftrackId": {"$in": list(parent_ent_by_id.keys())}
}))
asset_docs_by_ftrack_id = {
asset_doc["data"]["ftrackId"]: asset_doc
for asset_doc in asset_docs
}
entities_by_mongo_id = {}
entities_by_names = {}
for ftrack_id, entity in parent_ent_by_id.items():
if ftrack_id not in asset_docs_by_ftrack_id:
parent_mongo_id = entity["custom_attributes"].get(
CUST_ATTR_ID_KEY
)
if parent_mongo_id:
entities_by_mongo_id[ObjectId(parent_mongo_id)] = entity
else:
entities_by_names[entity["name"]] = entity
expressions = []
if entities_by_mongo_id:
expression = {
"type": "asset",
"_id": {"$in": list(entities_by_mongo_id.keys())}
}
expressions.append(expression)
if entities_by_names:
expression = {
"type": "asset",
"name": {"$in": list(entities_by_names.keys())}
}
expressions.append(expression)
if expressions:
if len(expressions) == 1:
filter = expressions[0]
else:
filter = {"$or": expressions}
asset_docs = self.db_con.find(filter)
for asset_doc in asset_docs:
if asset_doc["_id"] in entities_by_mongo_id:
entity = entities_by_mongo_id[asset_doc["_id"]]
asset_docs_by_ftrack_id[entity["id"]] = asset_doc
elif asset_doc["name"] in entities_by_names:
entity = entities_by_names[asset_doc["name"]]
asset_docs_by_ftrack_id[entity["id"]] = asset_doc
return asset_docs_by_ftrack_id
def launch(self, session, entities, event):
if "values" not in event["data"]:
return
self.report_items = collections.defaultdict(list)
values = event["data"]["values"]
skipped = values.pop("__skipped__")
if skipped:
return None
component_names = []
user_id = event["source"]["user"]["id"]
user_entity = session.query(
"User where id is {}".format(user_id)
).one()
job = session.create("Job", {
"user": user_entity,
"status": "running",
"data": json.dumps({
"description": "Delivery processing."
})
})
session.commit()
try:
self.db_con.install()
self.real_launch(session, entities, event)
job["status"] = "done"
except Exception:
self.log.warning(
"Failed during processing delivery action.",
exc_info=True
)
finally:
if job["status"] != "done":
job["status"] = "failed"
session.commit()
self.db_con.uninstall()
def real_launch(self, session, entities, event):
self.log.info("Delivery action just started.")
report_items = collections.defaultdict(list)
values = event["data"]["values"]
location_path = values.pop("__location_path__")
anatomy_name = values.pop("__new_anatomies__")
project_name = values.pop("__project_name__")
repre_names = []
for key, value in values.items():
if value is True:
component_names.append(key)
repre_names.append(key)
if not component_names:
if not repre_names:
return {
"success": True,
"message": "Not selected components to deliver."
@ -230,64 +423,15 @@ class Delivery(BaseAction):
if not os.path.exists(location_path):
os.makedirs(location_path)
self.db_con.install()
self.db_con.Session["AVALON_PROJECT"] = project_name
repres_to_deliver = []
for entity in entities:
asset = entity["asset"]
subset_name = asset["name"]
version = entity["version"]
parent = asset["parent"]
parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
if parent_mongo_id:
parent_mongo_id = ObjectId(parent_mongo_id)
else:
asset_ent = self.db_con.find_one({
"type": "asset",
"data.ftrackId": parent["id"]
})
if not asset_ent:
ent_path = "/".join(
[ent["name"] for ent in parent["link"]]
)
msg = "Not synchronized entities to avalon"
self.report_items[msg].append(ent_path)
self.log.warning("{} <{}>".format(msg, ent_path))
continue
parent_mongo_id = asset_ent["_id"]
subset_ent = self.db_con.find_one({
"type": "subset",
"parent": parent_mongo_id,
"name": subset_name
})
version_ent = self.db_con.find_one({
"type": "version",
"name": version,
"parent": subset_ent["_id"]
})
repre_ents = self.db_con.find({
"type": "representation",
"parent": version_ent["_id"]
})
repres_by_name = {}
for repre in repre_ents:
repre_name = repre["name"]
repres_by_name[repre_name] = repre
for component in entity["components"]:
comp_name = component["name"]
if comp_name not in component_names:
continue
repre = repres_by_name.get(comp_name)
repres_to_deliver.append(repre)
self.log.debug("Collecting representations to process.")
version_ids = self._get_interest_version_ids(entities)
repres_to_deliver = list(self.db_con.find({
"type": "representation",
"parent": {"$in": version_ids},
"name": {"$in": repre_names}
}))
anatomy = Anatomy(project_name)
@ -304,9 +448,17 @@ class Delivery(BaseAction):
for name in root_names:
format_dict["root"][name] = location_path
datetime_data = config.get_datetime_data()
for repre in repres_to_deliver:
source_path = repre.get("data", {}).get("path")
debug_msg = "Processing representation {}".format(repre["_id"])
if source_path:
debug_msg += " with published path {}.".format(source_path)
self.log.debug(debug_msg)
# Get destination repre path
anatomy_data = copy.deepcopy(repre["context"])
anatomy_data.update(datetime_data)
anatomy_filled = anatomy.format_all(anatomy_data)
test_path = anatomy_filled["delivery"][anatomy_name]
@ -333,7 +485,7 @@ class Delivery(BaseAction):
"- Invalid value DataType: \"{}\"<br>"
).format(str(repre["_id"]), keys)
self.report_items[msg].append(sub_msg)
report_items[msg].append(sub_msg)
self.log.warning(
"{} Representation: \"{}\" Filled: <{}>".format(
msg, str(repre["_id"]), str(test_path)
@ -355,20 +507,19 @@ class Delivery(BaseAction):
anatomy,
anatomy_name,
anatomy_data,
format_dict
format_dict,
report_items
)
if not frame:
self.process_single_file(*args)
else:
self.process_sequence(*args)
self.db_con.uninstall()
return self.report()
return self.report(report_items)
def process_single_file(
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict,
report_items
):
anatomy_filled = anatomy.format(anatomy_data)
if format_dict:
@ -384,7 +535,8 @@ class Delivery(BaseAction):
self.copy_file(repre_path, delivery_path)
def process_sequence(
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict
self, repre_path, anatomy, anatomy_name, anatomy_data, format_dict,
report_items
):
dir_path, file_name = os.path.split(str(repre_path))
@ -398,7 +550,7 @@ class Delivery(BaseAction):
if not file_name_items:
msg = "Source file was not found"
self.report_items[msg].append(repre_path)
report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
@ -418,7 +570,7 @@ class Delivery(BaseAction):
if src_collection is None:
# TODO log error!
msg = "Source collection of files was not found"
self.report_items[msg].append(repre_path)
report_items[msg].append(repre_path)
self.log.warning("{} <{}>".format(msg, repre_path))
return
@ -491,10 +643,10 @@ class Delivery(BaseAction):
except OSError:
shutil.copyfile(src_path, dst_path)
def report(self):
def report(self, report_items):
items = []
title = "Delivery report"
for msg, _items in self.report_items.items():
for msg, _items in report_items.items():
if not _items:
continue

View file

@ -6,7 +6,7 @@ import json
from bson.objectid import ObjectId
from pype.modules.ftrack.lib import BaseAction, statics_icon
from pype.api import Anatomy
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
from avalon.api import AvalonMongoDB
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
@ -25,7 +25,7 @@ class StoreThumbnailsToAvalon(BaseAction):
icon = statics_icon("ftrack", "action_icons", "PypeAdmin.svg")
thumbnail_key = "AVALON_THUMBNAIL_ROOT"
db_con = DbConnector()
db_con = AvalonMongoDB()
def discover(self, session, entities, event):
for entity in entities:

View file

@ -41,9 +41,9 @@ class ThumbToParent(BaseAction):
parent = None
thumbid = None
if entity.entity_type.lower() == 'assetversion':
try:
parent = entity['task']
except Exception:
parent = entity['task']
if parent is None:
par_ent = entity['link'][-2]
parent = session.get(par_ent['type'], par_ent['id'])
else:
@ -51,7 +51,7 @@ class ThumbToParent(BaseAction):
parent = entity['parent']
except Exception as e:
msg = (
"Durin Action 'Thumb to Parent'"
"During Action 'Thumb to Parent'"
" went something wrong"
)
self.log.error(msg)
@ -62,7 +62,10 @@ class ThumbToParent(BaseAction):
parent['thumbnail_id'] = thumbid
status = 'done'
else:
status = 'failed'
raise Exception(
"Parent or thumbnail id not found. Parent: {}. "
"Thumbnail id: {}".format(parent, thumbid)
)
# inform the user that the job is done
job['status'] = status or 'done'

View file

@ -0,0 +1,437 @@
import json
import collections
import ftrack_api
from pype.modules.ftrack.lib import BaseAction
class PushFrameValuesToTaskAction(BaseAction):
"""Action for testing purpose or as base for new actions."""
# Ignore event handler by default
ignore_me = True
identifier = "admin.push_frame_values_to_task"
label = "Pype Admin"
variant = "- Push Frame values to Task"
entities_query = (
"select id, name, parent_id, link from TypedContext"
" where project_id is \"{}\" and object_type_id in ({})"
)
cust_attrs_query = (
"select id, key, object_type_id, is_hierarchical, default"
" from CustomAttributeConfiguration"
" where key in ({})"
)
cust_attr_value_query = (
"select value, entity_id from CustomAttributeValue"
" where entity_id in ({}) and configuration_id in ({})"
)
pushing_entity_types = {"Shot"}
hierarchical_custom_attribute_keys = {"frameStart", "frameEnd"}
custom_attribute_mapping = {
"frameStart": "fstart",
"frameEnd": "fend"
}
discover_role_list = {"Pypeclub", "Administrator", "Project Manager"}
def register(self):
modified_role_names = set()
for role_name in self.discover_role_list:
modified_role_names.add(role_name.lower())
self.discover_role_list = modified_role_names
self.session.event_hub.subscribe(
"topic=ftrack.action.discover",
self._discover,
priority=self.priority
)
launch_subscription = (
"topic=ftrack.action.launch and data.actionIdentifier={0}"
).format(self.identifier)
self.session.event_hub.subscribe(launch_subscription, self._launch)
def discover(self, session, entities, event):
""" Validation """
# Check if selection is valid
valid_selection = False
for ent in event["data"]["selection"]:
# Ignore entities that are not tasks or projects
if ent["entityType"].lower() == "show":
valid_selection = True
break
if not valid_selection:
return False
# Get user and check his roles
user_id = event.get("source", {}).get("user", {}).get("id")
if not user_id:
return False
user = session.query("User where id is \"{}\"".format(user_id)).first()
if not user:
return False
for role in user["user_security_roles"]:
lowered_role = role["security_role"]["name"].lower()
if lowered_role in self.discover_role_list:
return True
return False
def launch(self, session, entities, event):
self.log.debug("{}: Creating job".format(self.label))
user_entity = session.query(
"User where id is {}".format(event["source"]["user"]["id"])
).one()
job = session.create("Job", {
"user": user_entity,
"status": "running",
"data": json.dumps({
"description": "Propagation of Frame attribute values to task."
})
})
session.commit()
try:
project_entity = self.get_project_from_entity(entities[0])
result = self.propagate_values(session, project_entity, event)
job["status"] = "done"
session.commit()
return result
except Exception:
session.rollback()
job["status"] = "failed"
session.commit()
msg = "Pushing Custom attribute values to task Failed"
self.log.warning(msg, exc_info=True)
return {
"success": False,
"message": msg
}
finally:
if job["status"] == "running":
job["status"] = "failed"
session.commit()
def task_attributes(self, session):
task_object_type = session.query(
"ObjectType where name is \"Task\""
).one()
hier_attr_names = list(
self.custom_attribute_mapping.keys()
)
entity_type_specific_names = list(
self.custom_attribute_mapping.values()
)
joined_keys = self.join_keys(
hier_attr_names + entity_type_specific_names
)
attribute_entities = session.query(
self.cust_attrs_query.format(joined_keys)
).all()
hier_attrs = []
task_attrs = {}
for attr in attribute_entities:
attr_key = attr["key"]
if attr["is_hierarchical"]:
if attr_key in hier_attr_names:
hier_attrs.append(attr)
elif attr["object_type_id"] == task_object_type["id"]:
if attr_key in entity_type_specific_names:
task_attrs[attr_key] = attr["id"]
return task_attrs, hier_attrs
def join_keys(self, items):
return ",".join(["\"{}\"".format(item) for item in items])
def propagate_values(self, session, project_entity, event):
self.log.debug("Querying project's entities \"{}\".".format(
project_entity["full_name"]
))
pushing_entity_types = tuple(
ent_type.lower()
for ent_type in self.pushing_entity_types
)
destination_object_types = []
all_object_types = session.query("ObjectType").all()
for object_type in all_object_types:
lowered_name = object_type["name"].lower()
if (
lowered_name == "task"
or lowered_name in pushing_entity_types
):
destination_object_types.append(object_type)
destination_object_type_ids = tuple(
obj_type["id"]
for obj_type in destination_object_types
)
entities = session.query(self.entities_query.format(
project_entity["id"],
self.join_keys(destination_object_type_ids)
)).all()
entities_by_id = {
entity["id"]: entity
for entity in entities
}
self.log.debug("Filtering Task entities.")
task_entities_by_parent_id = collections.defaultdict(list)
non_task_entities = []
non_task_entity_ids = []
for entity in entities:
if entity.entity_type.lower() != "task":
non_task_entities.append(entity)
non_task_entity_ids.append(entity["id"])
continue
parent_id = entity["parent_id"]
if parent_id in entities_by_id:
task_entities_by_parent_id[parent_id].append(entity)
task_attr_id_by_keys, hier_attrs = self.task_attributes(session)
self.log.debug("Getting Custom attribute values from tasks' parents.")
hier_values_by_entity_id = self.get_hier_values(
session,
hier_attrs,
non_task_entity_ids
)
self.log.debug("Setting parents' values to task.")
task_missing_keys = self.set_task_attr_values(
session,
task_entities_by_parent_id,
hier_values_by_entity_id,
task_attr_id_by_keys
)
self.log.debug("Setting values to entities themselves.")
missing_keys_by_object_name = self.push_values_to_entities(
session,
non_task_entities,
hier_values_by_entity_id
)
if task_missing_keys:
missing_keys_by_object_name["Task"] = task_missing_keys
if missing_keys_by_object_name:
self.report(missing_keys_by_object_name, event)
return True
def report(self, missing_keys_by_object_name, event):
splitter = {"type": "label", "value": "---"}
title = "Push Custom Attribute values report:"
items = []
items.append({
"type": "label",
"value": "# Pushing values was not complete"
})
items.append({
"type": "label",
"value": (
"<p>It was due to missing custom"
" attribute configurations for specific entity type/s."
" These configurations are not created automatically.</p>"
)
})
log_message_items = []
log_message_item_template = (
"Entity type \"{}\" does not have created Custom Attribute/s: {}"
)
for object_name, missing_attr_names in (
missing_keys_by_object_name.items()
):
log_message_items.append(log_message_item_template.format(
object_name, self.join_keys(missing_attr_names)
))
items.append(splitter)
items.append({
"type": "label",
"value": "## Entity type: {}".format(object_name)
})
items.append({
"type": "label",
"value": "<p>{}</p>".format("<br>".join(missing_attr_names))
})
self.log.warning((
"Couldn't finish pushing attribute values because"
" few entity types miss Custom attribute configurations:\n{}"
).format("\n".join(log_message_items)))
self.show_interface(items, title, event)
def get_hier_values(self, session, hier_attrs, focus_entity_ids):
joined_entity_ids = self.join_keys(focus_entity_ids)
hier_attr_ids = self.join_keys(
tuple(hier_attr["id"] for hier_attr in hier_attrs)
)
hier_attrs_key_by_id = {
hier_attr["id"]: hier_attr["key"]
for hier_attr in hier_attrs
}
call_expr = [{
"action": "query",
"expression": self.cust_attr_value_query.format(
joined_entity_ids, hier_attr_ids
)
}]
if hasattr(session, "call"):
[values] = session.call(call_expr)
else:
[values] = session._call(call_expr)
values_per_entity_id = {}
for item in values["data"]:
entity_id = item["entity_id"]
key = hier_attrs_key_by_id[item["configuration_id"]]
if entity_id not in values_per_entity_id:
values_per_entity_id[entity_id] = {}
value = item["value"]
if value is not None:
values_per_entity_id[entity_id][key] = value
output = {}
for entity_id in focus_entity_ids:
value = values_per_entity_id.get(entity_id)
if value:
output[entity_id] = value
return output
def set_task_attr_values(
self,
session,
task_entities_by_parent_id,
hier_values_by_entity_id,
task_attr_id_by_keys
):
missing_keys = set()
for parent_id, values in hier_values_by_entity_id.items():
task_entities = task_entities_by_parent_id[parent_id]
for hier_key, value in values.items():
key = self.custom_attribute_mapping[hier_key]
if key not in task_attr_id_by_keys:
missing_keys.add(key)
continue
for task_entity in task_entities:
_entity_key = collections.OrderedDict({
"configuration_id": task_attr_id_by_keys[key],
"entity_id": task_entity["id"]
})
session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
_entity_key,
"value",
ftrack_api.symbol.NOT_SET,
value
)
)
session.commit()
return missing_keys
def push_values_to_entities(
self,
session,
non_task_entities,
hier_values_by_entity_id
):
object_types = session.query(
"ObjectType where name in ({})".format(
self.join_keys(self.pushing_entity_types)
)
).all()
object_type_names_by_id = {
object_type["id"]: object_type["name"]
for object_type in object_types
}
joined_keys = self.join_keys(
self.custom_attribute_mapping.values()
)
attribute_entities = session.query(
self.cust_attrs_query.format(joined_keys)
).all()
attrs_by_obj_id = {}
for attr in attribute_entities:
if attr["is_hierarchical"]:
continue
obj_id = attr["object_type_id"]
if obj_id not in object_type_names_by_id:
continue
if obj_id not in attrs_by_obj_id:
attrs_by_obj_id[obj_id] = {}
attr_key = attr["key"]
attrs_by_obj_id[obj_id][attr_key] = attr["id"]
entities_by_obj_id = collections.defaultdict(list)
for entity in non_task_entities:
entities_by_obj_id[entity["object_type_id"]].append(entity)
missing_keys_by_object_id = collections.defaultdict(set)
for obj_type_id, attr_keys in attrs_by_obj_id.items():
entities = entities_by_obj_id.get(obj_type_id)
if not entities:
continue
for entity in entities:
values = hier_values_by_entity_id.get(entity["id"])
if not values:
continue
for hier_key, value in values.items():
key = self.custom_attribute_mapping[hier_key]
if key not in attr_keys:
missing_keys_by_object_id[obj_type_id].add(key)
continue
_entity_key = collections.OrderedDict({
"configuration_id": attr_keys[key],
"entity_id": entity["id"]
})
session.recorded_operations.push(
ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
_entity_key,
"value",
ftrack_api.symbol.NOT_SET,
value
)
)
session.commit()
missing_keys_by_object_name = {}
for obj_id, missing_keys in missing_keys_by_object_id.items():
obj_name = object_type_names_by_id[obj_id]
missing_keys_by_object_name[obj_name] = missing_keys
return missing_keys_by_object_name
def register(session, plugins_presets={}):
PushFrameValuesToTaskAction(session, plugins_presets).register()

View file

@ -1,92 +1,220 @@
import ftrack_api
from pype.modules.ftrack import BaseEvent
import operator
import collections
from pype.modules.ftrack import BaseEvent
class NextTaskUpdate(BaseEvent):
def filter_entities_info(self, session, event):
# Filter if event contain relevant data
entities_info = event["data"].get("entities")
if not entities_info:
return
def get_next_task(self, task, session):
parent = task['parent']
# tasks = parent['tasks']
tasks = parent['children']
first_filtered_entities = []
for entity_info in entities_info:
# Care only about tasks
if entity_info.get("entityType") != "task":
continue
def sort_types(types):
data = {}
for t in types:
data[t] = t.get('sort')
# Care only about changes of status
changes = entity_info.get("changes") or {}
statusid_changes = changes.get("statusid") or {}
if (
statusid_changes.get("new") is None
or statusid_changes.get("old") is None
):
continue
data = sorted(data.items(), key=operator.itemgetter(1))
results = []
for item in data:
results.append(item[0])
return results
first_filtered_entities.append(entity_info)
types_sorted = sort_types(session.query('Type'))
next_types = None
for t in types_sorted:
if t['id'] == task['type_id']:
next_types = types_sorted[(types_sorted.index(t) + 1):]
status_ids = [
entity_info["changes"]["statusid"]["new"]
for entity_info in first_filtered_entities
]
statuses_by_id = self.get_statuses_by_id(
session, status_ids=status_ids
)
for nt in next_types:
for t in tasks:
if nt['id'] == t['type_id']:
return t
# Care only about tasks having status with state `Done`
filtered_entities = []
for entity_info in first_filtered_entities:
status_id = entity_info["changes"]["statusid"]["new"]
status_entity = statuses_by_id[status_id]
if status_entity["state"]["name"].lower() == "done":
filtered_entities.append(entity_info)
return None
return filtered_entities
def get_parents_by_id(self, session, entities_info):
parent_ids = [
"\"{}\"".format(entity_info["parentId"])
for entity_info in entities_info
]
parent_entities = session.query(
"TypedContext where id in ({})".format(", ".join(parent_ids))
).all()
return {
entity["id"]: entity
for entity in parent_entities
}
def get_tasks_by_id(self, session, parent_ids):
joined_parent_ids = ",".join([
"\"{}\"".format(parent_id)
for parent_id in parent_ids
])
task_entities = session.query(
"Task where parent_id in ({})".format(joined_parent_ids)
).all()
return {
entity["id"]: entity
for entity in task_entities
}
def get_statuses_by_id(self, session, task_entities=None, status_ids=None):
if task_entities is None and status_ids is None:
return {}
if status_ids is None:
status_ids = []
for task_entity in task_entities:
status_ids.append(task_entity["status_id"])
if not status_ids:
return {}
status_entities = session.query(
"Status where id in ({})".format(", ".join(status_ids))
).all()
return {
entity["id"]: entity
for entity in status_entities
}
def get_sorted_task_types(self, session):
data = {
_type: _type.get("sort")
for _type in session.query("Type").all()
if _type.get("sort") is not None
}
return [
item[0]
for item in sorted(data.items(), key=operator.itemgetter(1))
]
def launch(self, session, event):
'''Propagates status from version to task when changed'''
# self.log.info(event)
# start of event procedure ----------------------------------
entities_info = self.filter_entities_info(session, event)
if not entities_info:
return
for entity in event['data'].get('entities', []):
changes = entity.get('changes', None)
if changes is None:
continue
statusid_changes = changes.get('statusid', {})
if (
entity['entityType'] != 'task' or
'statusid' not in (entity.get('keys') or []) or
statusid_changes.get('new', None) is None or
statusid_changes.get('old', None) is None
):
parents_by_id = self.get_parents_by_id(session, entities_info)
tasks_by_id = self.get_tasks_by_id(
session, tuple(parents_by_id.keys())
)
tasks_to_parent_id = collections.defaultdict(list)
for task_entity in tasks_by_id.values():
tasks_to_parent_id[task_entity["parent_id"]].append(task_entity)
statuses_by_id = self.get_statuses_by_id(session, tasks_by_id.values())
next_status_name = "Ready"
next_status = session.query(
"Status where name is \"{}\"".format(next_status_name)
).first()
if not next_status:
self.log.warning("Couldn't find status with name \"{}\"".format(
next_status_name
))
return
for entity_info in entities_info:
parent_id = entity_info["parentId"]
task_id = entity_info["entityId"]
task_entity = tasks_by_id[task_id]
all_same_type_taks_done = True
for parents_task in tasks_to_parent_id[parent_id]:
if (
parents_task["id"] == task_id
or parents_task["type_id"] != task_entity["type_id"]
):
continue
parents_task_status = statuses_by_id[parents_task["status_id"]]
low_status_name = parents_task_status["name"].lower()
# Skip if task's status name "Omitted"
if low_status_name == "omitted":
continue
low_state_name = parents_task_status["state"]["name"].lower()
if low_state_name != "done":
all_same_type_taks_done = False
break
if not all_same_type_taks_done:
continue
task = session.get('Task', entity['entityId'])
# Prepare all task types
sorted_task_types = self.get_sorted_task_types(session)
sorted_task_types_len = len(sorted_task_types)
status = session.get('Status',
entity['changes']['statusid']['new'])
state = status['state']['name']
from_idx = None
for idx, task_type in enumerate(sorted_task_types):
if task_type["id"] == task_entity["type_id"]:
from_idx = idx + 1
break
next_task = self.get_next_task(task, session)
# Current task type is last in order
if from_idx is None or from_idx >= sorted_task_types_len:
continue
# Setting next task to Ready, if on NOT READY
if next_task and state == 'Done':
if next_task['status']['name'].lower() == 'not ready':
next_task_type_id = None
next_task_type_tasks = []
for idx in range(from_idx, sorted_task_types_len):
next_task_type = sorted_task_types[idx]
for parents_task in tasks_to_parent_id[parent_id]:
if next_task_type_id is None:
if parents_task["type_id"] != next_task_type["id"]:
continue
next_task_type_id = next_task_type["id"]
# Get path to task
path = task['name']
for p in task['ancestors']:
path = p['name'] + '/' + path
if parents_task["type_id"] == next_task_type_id:
next_task_type_tasks.append(parents_task)
# Setting next task status
try:
query = 'Status where name is "{}"'.format('Ready')
status_to_set = session.query(query).one()
next_task['status'] = status_to_set
session.commit()
self.log.info((
'>>> [ {} ] updated to [ Ready ]'
).format(path))
except Exception as e:
session.rollback()
self.log.warning((
'!!! [ {} ] status couldnt be set: [ {} ]'
).format(path, str(e)), exc_info=True)
if next_task_type_id is not None:
break
for next_task_entity in next_task_type_tasks:
if next_task_entity["status"]["name"].lower() != "not ready":
continue
ent_path = "/".join(
[ent["name"] for ent in next_task_entity["link"]]
)
try:
next_task_entity["status"] = next_status
session.commit()
self.log.info(
"\"{}\" updated status to \"{}\"".format(
ent_path, next_status_name
)
)
except Exception:
session.rollback()
self.log.warning(
"\"{}\" status couldnt be set to \"{}\"".format(
ent_path, next_status_name
),
exc_info=True
)
def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
NextTaskUpdate(session, plugins_presets).register()

View file

@ -0,0 +1,230 @@
import collections
import ftrack_api
from pype.modules.ftrack import BaseEvent
class PushFrameValuesToTaskEvent(BaseEvent):
# Ignore event handler by default
ignore_me = True
cust_attrs_query = (
"select id, key, object_type_id, is_hierarchical, default"
" from CustomAttributeConfiguration"
" where key in ({}) and object_type_id in ({})"
)
interest_entity_types = {"Shot"}
interest_attributes = {"frameStart", "frameEnd"}
interest_attr_mapping = {
"frameStart": "fstart",
"frameEnd": "fend"
}
_cached_task_object_id = None
_cached_interest_object_ids = None
@staticmethod
def join_keys(keys):
return ",".join(["\"{}\"".format(key) for key in keys])
@classmethod
def task_object_id(cls, session):
if cls._cached_task_object_id is None:
task_object_type = session.query(
"ObjectType where name is \"Task\""
).one()
cls._cached_task_object_id = task_object_type["id"]
return cls._cached_task_object_id
@classmethod
def interest_object_ids(cls, session):
if cls._cached_interest_object_ids is None:
object_types = session.query(
"ObjectType where name in ({})".format(
cls.join_keys(cls.interest_entity_types)
)
).all()
cls._cached_interest_object_ids = tuple(
object_type["id"]
for object_type in object_types
)
return cls._cached_interest_object_ids
def launch(self, session, event):
interesting_data = self.extract_interesting_data(session, event)
if not interesting_data:
return
entities = self.get_entities(session, interesting_data)
if not entities:
return
entities_by_id = {
entity["id"]: entity
for entity in entities
}
for entity_id in tuple(interesting_data.keys()):
if entity_id not in entities_by_id:
interesting_data.pop(entity_id)
task_entities = self.get_task_entities(session, interesting_data)
attrs_by_obj_id = self.attrs_configurations(session)
if not attrs_by_obj_id:
self.log.warning((
"There is not created Custom Attributes {}"
" for \"Task\" entity type."
).format(self.join_keys(self.interest_attributes)))
return
task_entities_by_parent_id = collections.defaultdict(list)
for task_entity in task_entities:
task_entities_by_parent_id[task_entity["parent_id"]].append(
task_entity
)
missing_keys_by_object_name = collections.defaultdict(set)
for parent_id, values in interesting_data.items():
entities = task_entities_by_parent_id.get(parent_id) or []
entities.append(entities_by_id[parent_id])
for hier_key, value in values.items():
changed_ids = []
for entity in entities:
key = self.interest_attr_mapping[hier_key]
entity_attrs_mapping = (
attrs_by_obj_id.get(entity["object_type_id"])
)
if not entity_attrs_mapping:
missing_keys_by_object_name[entity.entity_type].add(
key
)
continue
configuration_id = entity_attrs_mapping.get(key)
if not configuration_id:
missing_keys_by_object_name[entity.entity_type].add(
key
)
continue
changed_ids.append(entity["id"])
entity_key = collections.OrderedDict({
"configuration_id": configuration_id,
"entity_id": entity["id"]
})
if value is None:
op = ftrack_api.operation.DeleteEntityOperation(
"CustomAttributeValue",
entity_key
)
else:
op = ftrack_api.operation.UpdateEntityOperation(
"ContextCustomAttributeValue",
entity_key,
"value",
ftrack_api.symbol.NOT_SET,
value
)
session.recorded_operations.push(op)
self.log.info((
"Changing Custom Attribute \"{}\" to value"
" \"{}\" on entities: {}"
).format(key, value, self.join_keys(changed_ids)))
try:
session.commit()
except Exception:
session.rollback()
self.log.warning(
"Changing of values failed.",
exc_info=True
)
if not missing_keys_by_object_name:
return
msg_items = []
for object_name, missing_keys in missing_keys_by_object_name.items():
msg_items.append(
"{}: ({})".format(object_name, self.join_keys(missing_keys))
)
self.log.warning((
"Missing Custom Attribute configuration"
" per specific object types: {}"
).format(", ".join(msg_items)))
def extract_interesting_data(self, session, event):
# Filter if event contain relevant data
entities_info = event["data"].get("entities")
if not entities_info:
return
interesting_data = {}
for entity_info in entities_info:
# Care only about tasks
if entity_info.get("entityType") != "task":
continue
# Care only about changes of status
changes = entity_info.get("changes") or {}
if not changes:
continue
# Care only about changes if specific keys
entity_changes = {}
for key in self.interest_attributes:
if key in changes:
entity_changes[key] = changes[key]["new"]
if not entity_changes:
continue
# Do not care about "Task" entity_type
task_object_id = self.task_object_id(session)
if entity_info.get("objectTypeId") == task_object_id:
continue
interesting_data[entity_info["entityId"]] = entity_changes
return interesting_data
def get_entities(self, session, interesting_data):
entities = session.query(
"TypedContext where id in ({})".format(
self.join_keys(interesting_data.keys())
)
).all()
output = []
interest_object_ids = self.interest_object_ids(session)
for entity in entities:
if entity["object_type_id"] in interest_object_ids:
output.append(entity)
return output
def get_task_entities(self, session, interesting_data):
return session.query(
"Task where parent_id in ({})".format(
self.join_keys(interesting_data.keys())
)
).all()
def attrs_configurations(self, session):
object_ids = list(self.interest_object_ids(session))
object_ids.append(self.task_object_id(session))
attrs = session.query(self.cust_attrs_query.format(
self.join_keys(self.interest_attr_mapping.values()),
self.join_keys(object_ids)
)).all()
output = {}
for attr in attrs:
obj_id = attr["object_type_id"]
if obj_id not in output:
output[obj_id] = {}
output[obj_id][attr["key"]] = attr["id"]
return output
def register(session, plugins_presets):
PushFrameValuesToTaskEvent(session, plugins_presets).register()

View file

@ -19,12 +19,12 @@ from pype.modules.ftrack.lib.avalon_sync import (
import ftrack_api
from pype.modules.ftrack import BaseEvent
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
from avalon.api import AvalonMongoDB
class SyncToAvalonEvent(BaseEvent):
dbcon = DbConnector()
dbcon = AvalonMongoDB()
interest_entTypes = ["show", "task"]
ignore_ent_types = ["Milestone"]

View file

@ -0,0 +1,222 @@
import collections
from pype.modules.ftrack import BaseEvent
class TaskToVersionStatus(BaseEvent):
"""Changes status of task's latest AssetVersions on its status change."""
# Attribute for caching session user id
_cached_user_id = None
# Presets usage
asset_types_of_focus = []
def register(self, *args, **kwargs):
# Skip registration if attribute `asset_types_of_focus` is not set
modified_asset_types_of_focus = list()
if self.asset_types_of_focus:
if isinstance(self.asset_types_of_focus, str):
self.asset_types_of_focus = [self.asset_types_of_focus]
for asset_type_name in self.asset_types_of_focus:
modified_asset_types_of_focus.append(
asset_type_name.lower()
)
if not modified_asset_types_of_focus:
raise Exception((
"Event handler \"{}\" does not"
" have set presets for attribute \"{}\""
).format(self.__class__.__name__, "asset_types_of_focus"))
self.asset_types_of_focus = modified_asset_types_of_focus
return super(TaskToVersionStatus, self).register(*args, **kwargs)
def is_event_invalid(self, session, event):
# Cache user id of currently running session
if self._cached_user_id is None:
session_user_entity = session.query(
"User where username is \"{}\"".format(session.api_user)
).first()
if not session_user_entity:
self.log.warning(
"Couldn't query Ftrack user with username \"{}\"".format(
session.api_user
)
)
return False
self._cached_user_id = session_user_entity["id"]
# Skip processing if current session user was the user who created
# the event
user_info = event["source"].get("user") or {}
user_id = user_info.get("id")
# Mark as invalid if user is unknown
if user_id is None:
return True
return user_id == self._cached_user_id
def filter_event_entities(self, event):
# Filter if event contain relevant data
entities_info = event["data"].get("entities")
if not entities_info:
return
filtered_entities = []
for entity_info in entities_info:
# Care only about tasks
if entity_info.get("entityType") != "task":
continue
# Care only about changes of status
changes = entity_info.get("changes") or {}
statusid_changes = changes.get("statusid") or {}
if (
statusid_changes.get("new") is None
or statusid_changes.get("old") is None
):
continue
filtered_entities.append(entity_info)
return filtered_entities
def _get_ent_path(self, entity):
return "/".join(
[ent["name"] for ent in entity["link"]]
)
def launch(self, session, event):
'''Propagates status from version to task when changed'''
if self.is_event_invalid(session, event):
return
filtered_entity_infos = self.filter_event_entities(event)
if not filtered_entity_infos:
return
task_ids = [
entity_info["entityId"]
for entity_info in filtered_entity_infos
]
joined_ids = ",".join(
["\"{}\"".format(entity_id) for entity_id in task_ids]
)
# Query tasks' AssetVersions
asset_versions = session.query((
"AssetVersion where task_id in ({}) order by version descending"
).format(joined_ids)).all()
last_asset_version_by_task_id = (
self.last_asset_version_by_task_id(asset_versions, task_ids)
)
if not last_asset_version_by_task_id:
return
# Query Task entities for last asset versions
joined_filtered_ids = ",".join([
"\"{}\"".format(entity_id)
for entity_id in last_asset_version_by_task_id.keys()
])
task_entities = session.query(
"Task where id in ({})".format(joined_filtered_ids)
).all()
if not task_entities:
return
# Final process of changing statuses
av_statuses_by_low_name = self.asset_version_statuses(task_entities[0])
for task_entity in task_entities:
task_id = task_entity["id"]
task_path = self._get_ent_path(task_entity)
task_status_name = task_entity["status"]["name"]
task_status_name_low = task_status_name.lower()
last_asset_versions = last_asset_version_by_task_id[task_id]
for last_asset_version in last_asset_versions:
self.log.debug((
"Trying to change status of last AssetVersion {}"
" for task \"{}\""
).format(last_asset_version["version"], task_path))
new_asset_version_status = av_statuses_by_low_name.get(
task_status_name_low
)
# Skip if tasks status is not available to AssetVersion
if not new_asset_version_status:
self.log.debug((
"AssetVersion does not have matching status to \"{}\""
).format(task_status_name))
continue
av_ent_path = task_path + " Asset {} AssetVersion {}".format(
last_asset_version["asset"]["name"],
last_asset_version["version"]
)
# Skip if current AssetVersion's status is same
current_status_name = last_asset_version["status"]["name"]
if current_status_name.lower() == task_status_name_low:
self.log.debug((
"AssetVersion already has set status \"{}\". \"{}\""
).format(current_status_name, av_ent_path))
continue
# Change the status
try:
last_asset_version["status"] = new_asset_version_status
session.commit()
self.log.info("[ {} ] Status updated to [ {} ]".format(
av_ent_path, new_asset_version_status["name"]
))
except Exception:
session.rollback()
self.log.warning(
"[ {} ]Status couldn't be set to \"{}\"".format(
av_ent_path, new_asset_version_status["name"]
),
exc_info=True
)
def asset_version_statuses(self, entity):
project_entity = self.get_project_from_entity(entity)
project_schema = project_entity["project_schema"]
# Get all available statuses for Task
statuses = project_schema.get_statuses("AssetVersion")
# map lowered status name with it's object
av_statuses_by_low_name = {
status["name"].lower(): status for status in statuses
}
return av_statuses_by_low_name
def last_asset_version_by_task_id(self, asset_versions, task_ids):
last_asset_version_by_task_id = collections.defaultdict(list)
last_version_by_task_id = {}
poping_entity_ids = set(task_ids)
for asset_version in asset_versions:
asset_type_name_low = (
asset_version["asset"]["type"]["name"].lower()
)
if asset_type_name_low not in self.asset_types_of_focus:
continue
task_id = asset_version["task_id"]
last_version = last_version_by_task_id.get(task_id)
if last_version is None:
last_version_by_task_id[task_id] = asset_version["version"]
elif last_version != asset_version["version"]:
poping_entity_ids.remove(task_id)
if not poping_entity_ids:
break
if task_id in poping_entity_ids:
last_asset_version_by_task_id[task_id].append(asset_version)
return last_asset_version_by_task_id
def register(session, plugins_presets):
TaskToVersionStatus(session, plugins_presets).register()

View file

@ -4,7 +4,7 @@ import subprocess
from pype.modules.ftrack import BaseEvent
from pype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
from avalon.api import AvalonMongoDB
from bson.objectid import ObjectId
@ -37,7 +37,7 @@ class UserAssigmentEvent(BaseEvent):
3) path to publish files of task user was (de)assigned to
"""
db_con = DbConnector()
db_con = AvalonMongoDB()
def error(self, *err):
for e in err:

View file

@ -5,7 +5,7 @@ import json
import collections
import copy
from pype.modules.ftrack.lib.io_nonsingleton import DbConnector
from avalon.api import AvalonMongoDB
import avalon
import avalon.api
@ -240,7 +240,7 @@ def get_hierarchical_attributes(session, entity, attr_names, attr_defaults={}):
class SyncEntitiesFactory:
dbcon = DbConnector()
dbcon = AvalonMongoDB()
project_query = (
"select full_name, name, custom_attributes"

View file

@ -1,16 +1,6 @@
import os
import sys
import copy
import platform
import avalon.lib
import acre
import getpass
from pype import lib as pypelib
from pype.api import config, Anatomy
from pype.api import config
from .ftrack_action_handler import BaseAction
from avalon.api import (
last_workfile, HOST_WORKFILE_EXTENSIONS, should_start_last_workfile
)
class AppAction(BaseAction):
@ -156,43 +146,23 @@ class AppAction(BaseAction):
entity = entities[0]
task_name = entity["name"]
project_name = entity["project"]["full_name"]
database = pypelib.get_avalon_database()
asset_name = entity["parent"]["name"]
asset_document = database[project_name].find_one({
"type": "asset",
"name": asset_name
})
hierarchy = ""
asset_doc_parents = asset_document["data"].get("parents")
if asset_doc_parents:
hierarchy = os.path.join(*asset_doc_parents)
application = avalon.lib.get_application(self.identifier)
host_name = application["application_dir"]
data = {
"project": {
"name": entity["project"]["full_name"],
"code": entity["project"]["name"]
},
"task": task_name,
"asset": asset_name,
"app": host_name,
"hierarchy": hierarchy
}
project_name = entity["project"]["full_name"]
try:
anatomy = Anatomy(project_name)
anatomy_filled = anatomy.format(data)
workdir = os.path.normpath(anatomy_filled["work"]["folder"])
pypelib.launch_application(
project_name, asset_name, task_name, self.identifier
)
except Exception as exc:
msg = "Error in anatomy.format: {}".format(
str(exc)
except pypelib.ApplicationLaunchFailed as exc:
self.log.error(str(exc))
return {
"success": False,
"message": str(exc)
}
except Exception:
msg = "Unexpected failure of application launch {}".format(
self.label
)
self.log.error(msg, exc_info=True)
return {
@ -200,160 +170,6 @@ class AppAction(BaseAction):
"message": msg
}
try:
os.makedirs(workdir)
except FileExistsError:
pass
last_workfile_path = None
extensions = HOST_WORKFILE_EXTENSIONS.get(host_name)
if extensions:
# Find last workfile
file_template = anatomy.templates["work"]["file"]
data.update({
"version": 1,
"user": getpass.getuser(),
"ext": extensions[0]
})
last_workfile_path = last_workfile(
workdir, file_template, data, extensions, True
)
# set environments for Avalon
prep_env = copy.deepcopy(os.environ)
prep_env.update({
"AVALON_PROJECT": project_name,
"AVALON_ASSET": asset_name,
"AVALON_TASK": task_name,
"AVALON_APP": host_name,
"AVALON_APP_NAME": self.identifier,
"AVALON_HIERARCHY": hierarchy,
"AVALON_WORKDIR": workdir
})
start_last_workfile = should_start_last_workfile(
project_name, host_name, task_name
)
# Store boolean as "0"(False) or "1"(True)
prep_env["AVALON_OPEN_LAST_WORKFILE"] = (
str(int(bool(start_last_workfile)))
)
if (
start_last_workfile
and last_workfile_path
and os.path.exists(last_workfile_path)
):
prep_env["AVALON_LAST_WORKFILE"] = last_workfile_path
prep_env.update(anatomy.roots_obj.root_environments())
# collect all parents from the task
parents = []
for item in entity['link']:
parents.append(session.get(item['type'], item['id']))
# collect all the 'environment' attributes from parents
tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]]
tools_env = asset_document["data"].get("tools_env") or []
tools_attr.extend(tools_env)
tools_env = acre.get_tools(tools_attr)
env = acre.compute(tools_env)
env = acre.merge(env, current_env=dict(prep_env))
# Get path to execute
st_temp_path = os.environ["PYPE_CONFIG"]
os_plat = platform.system().lower()
# Path to folder with launchers
path = os.path.join(st_temp_path, "launchers", os_plat)
# Full path to executable launcher
execfile = None
if application.get("launch_hook"):
hook = application.get("launch_hook")
self.log.info("launching hook: {}".format(hook))
ret_val = pypelib.execute_hook(
application.get("launch_hook"), env=env)
if not ret_val:
return {
'success': False,
'message': "Hook didn't finish successfully {0}"
.format(self.label)
}
if sys.platform == "win32":
for ext in os.environ["PATHEXT"].split(os.pathsep):
fpath = os.path.join(path.strip('"'), self.executable + ext)
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
execfile = fpath
break
# Run SW if was found executable
if execfile is None:
return {
"success": False,
"message": "We didn't find launcher for {0}".format(
self.label
)
}
popen = avalon.lib.launch(
executable=execfile, args=[], environment=env
)
elif (sys.platform.startswith("linux")
or sys.platform.startswith("darwin")):
execfile = os.path.join(path.strip('"'), self.executable)
if not os.path.isfile(execfile):
msg = "Launcher doesn't exist - {}".format(execfile)
self.log.error(msg)
return {
"success": False,
"message": msg
}
try:
fp = open(execfile)
except PermissionError as perm_exc:
msg = "Access denied on launcher {} - {}".format(
execfile, perm_exc
)
self.log.exception(msg, exc_info=True)
return {
"success": False,
"message": msg
}
fp.close()
# check executable permission
if not os.access(execfile, os.X_OK):
msg = "No executable permission - {}".format(execfile)
self.log.error(msg)
return {
"success": False,
"message": msg
}
# Run SW if was found executable
if execfile is None:
return {
"success": False,
"message": "We didn't found launcher for {0}".format(
self.label
)
}
popen = avalon.lib.launch( # noqa: F841
"/usr/bin/env", args=["bash", execfile], environment=env
)
# Change status of task to In progress
presets = config.get_presets()["ftrack"]["ftrack_config"]

View file

@ -1,460 +0,0 @@
"""
Wrapper around interactions with the database
Copy of io module in avalon-core.
- In this case not working as singleton with api.Session!
"""
import os
import time
import errno
import shutil
import logging
import tempfile
import functools
import contextlib
from avalon import schema
from avalon.vendor import requests
from avalon.io import extract_port_from_url
# Third-party dependencies
import pymongo
def auto_reconnect(func):
"""Handling auto reconnect in 3 retry times"""
@functools.wraps(func)
def decorated(*args, **kwargs):
object = args[0]
for retry in range(3):
try:
return func(*args, **kwargs)
except pymongo.errors.AutoReconnect:
object.log.error("Reconnecting..")
time.sleep(0.1)
else:
raise
return decorated
class DbConnector(object):
log = logging.getLogger(__name__)
def __init__(self):
self.Session = {}
self._mongo_client = None
self._sentry_client = None
self._sentry_logging_handler = None
self._database = None
self._is_installed = False
def __getitem__(self, key):
# gives direct access to collection withou setting `active_table`
return self._database[key]
def __getattribute__(self, attr):
# not all methods of PyMongo database are implemented with this it is
# possible to use them too
try:
return super(DbConnector, self).__getattribute__(attr)
except AttributeError:
cur_proj = self.Session["AVALON_PROJECT"]
return self._database[cur_proj].__getattribute__(attr)
def install(self):
"""Establish a persistent connection to the database"""
if self._is_installed:
return
logging.basicConfig()
self.Session.update(self._from_environment())
timeout = int(self.Session["AVALON_TIMEOUT"])
mongo_url = self.Session["AVALON_MONGO"]
kwargs = {
"host": mongo_url,
"serverSelectionTimeoutMS": timeout
}
port = extract_port_from_url(mongo_url)
if port is not None:
kwargs["port"] = int(port)
self._mongo_client = pymongo.MongoClient(**kwargs)
for retry in range(3):
try:
t1 = time.time()
self._mongo_client.server_info()
except Exception:
self.log.error("Retrying..")
time.sleep(1)
timeout *= 1.5
else:
break
else:
raise IOError(
"ERROR: Couldn't connect to %s in "
"less than %.3f ms" % (self.Session["AVALON_MONGO"], timeout))
self.log.info("Connected to %s, delay %.3f s" % (
self.Session["AVALON_MONGO"], time.time() - t1))
self._install_sentry()
self._database = self._mongo_client[self.Session["AVALON_DB"]]
self._is_installed = True
def _install_sentry(self):
if "AVALON_SENTRY" not in self.Session:
return
try:
from raven import Client
from raven.handlers.logging import SentryHandler
from raven.conf import setup_logging
except ImportError:
# Note: There was a Sentry address in this Session
return self.log.warning("Sentry disabled, raven not installed")
client = Client(self.Session["AVALON_SENTRY"])
# Transmit log messages to Sentry
handler = SentryHandler(client)
handler.setLevel(logging.WARNING)
setup_logging(handler)
self._sentry_client = client
self._sentry_logging_handler = handler
self.log.info(
"Connected to Sentry @ %s" % self.Session["AVALON_SENTRY"]
)
def _from_environment(self):
Session = {
item[0]: os.getenv(item[0], item[1])
for item in (
# Root directory of projects on disk
("AVALON_PROJECTS", None),
# Name of current Project
("AVALON_PROJECT", ""),
# Name of current Asset
("AVALON_ASSET", ""),
# Name of current silo
("AVALON_SILO", ""),
# Name of current task
("AVALON_TASK", None),
# Name of current app
("AVALON_APP", None),
# Path to working directory
("AVALON_WORKDIR", None),
# Name of current Config
# TODO(marcus): Establish a suitable default config
("AVALON_CONFIG", "no_config"),
# Name of Avalon in graphical user interfaces
# Use this to customise the visual appearance of Avalon
# to better integrate with your surrounding pipeline
("AVALON_LABEL", "Avalon"),
# Used during any connections to the outside world
("AVALON_TIMEOUT", "1000"),
# Address to Asset Database
("AVALON_MONGO", "mongodb://localhost:27017"),
# Name of database used in MongoDB
("AVALON_DB", "avalon"),
# Address to Sentry
("AVALON_SENTRY", None),
# Address to Deadline Web Service
# E.g. http://192.167.0.1:8082
("AVALON_DEADLINE", None),
# Enable features not necessarily stable. The user's own risk
("AVALON_EARLY_ADOPTER", None),
# Address of central asset repository, contains
# the following interface:
# /upload
# /download
# /manager (optional)
("AVALON_LOCATION", "http://127.0.0.1"),
# Boolean of whether to upload published material
# to central asset repository
("AVALON_UPLOAD", None),
# Generic username and password
("AVALON_USERNAME", "avalon"),
("AVALON_PASSWORD", "secret"),
# Unique identifier for instances in working files
("AVALON_INSTANCE_ID", "avalon.instance"),
("AVALON_CONTAINER_ID", "avalon.container"),
# Enable debugging
("AVALON_DEBUG", None),
) if os.getenv(item[0], item[1]) is not None
}
Session["schema"] = "avalon-core:session-2.0"
try:
schema.validate(Session)
except schema.ValidationError as e:
# TODO(marcus): Make this mandatory
self.log.warning(e)
return Session
def uninstall(self):
"""Close any connection to the database"""
try:
self._mongo_client.close()
except AttributeError:
pass
self._mongo_client = None
self._database = None
self._is_installed = False
def active_project(self):
"""Return the name of the active project"""
return self.Session["AVALON_PROJECT"]
def activate_project(self, project_name):
self.Session["AVALON_PROJECT"] = project_name
def projects(self):
"""List available projects
Returns:
list of project documents
"""
collection_names = self.collections()
for project in collection_names:
if project in ("system.indexes",):
continue
# Each collection will have exactly one project document
document = self.find_project(project)
if document is not None:
yield document
def locate(self, path):
"""Traverse a hierarchy from top-to-bottom
Example:
representation = locate(["hulk", "Bruce", "modelDefault", 1, "ma"])
Returns:
representation (ObjectId)
"""
components = zip(
("project", "asset", "subset", "version", "representation"),
path
)
parent = None
for type_, name in components:
latest = (type_ == "version") and name in (None, -1)
try:
if latest:
parent = self.find_one(
filter={
"type": type_,
"parent": parent
},
projection={"_id": 1},
sort=[("name", -1)]
)["_id"]
else:
parent = self.find_one(
filter={
"type": type_,
"name": name,
"parent": parent
},
projection={"_id": 1},
)["_id"]
except TypeError:
return None
return parent
@auto_reconnect
def collections(self):
return self._database.collection_names()
@auto_reconnect
def find_project(self, project):
return self._database[project].find_one({"type": "project"})
@auto_reconnect
def insert_one(self, item):
assert isinstance(item, dict), "item must be of type <dict>"
schema.validate(item)
return self._database[self.Session["AVALON_PROJECT"]].insert_one(item)
@auto_reconnect
def insert_many(self, items, ordered=True):
# check if all items are valid
assert isinstance(items, list), "`items` must be of type <list>"
for item in items:
assert isinstance(item, dict), "`item` must be of type <dict>"
schema.validate(item)
return self._database[self.Session["AVALON_PROJECT"]].insert_many(
items,
ordered=ordered)
@auto_reconnect
def find(self, filter, projection=None, sort=None):
return self._database[self.Session["AVALON_PROJECT"]].find(
filter=filter,
projection=projection,
sort=sort
)
@auto_reconnect
def find_one(self, filter, projection=None, sort=None):
assert isinstance(filter, dict), "filter must be <dict>"
return self._database[self.Session["AVALON_PROJECT"]].find_one(
filter=filter,
projection=projection,
sort=sort
)
@auto_reconnect
def save(self, *args, **kwargs):
return self._database[self.Session["AVALON_PROJECT"]].save(
*args, **kwargs)
@auto_reconnect
def replace_one(self, filter, replacement):
return self._database[self.Session["AVALON_PROJECT"]].replace_one(
filter, replacement)
@auto_reconnect
def update_many(self, filter, update):
return self._database[self.Session["AVALON_PROJECT"]].update_many(
filter, update)
@auto_reconnect
def distinct(self, *args, **kwargs):
return self._database[self.Session["AVALON_PROJECT"]].distinct(
*args, **kwargs)
@auto_reconnect
def drop(self, *args, **kwargs):
return self._database[self.Session["AVALON_PROJECT"]].drop(
*args, **kwargs)
@auto_reconnect
def delete_many(self, *args, **kwargs):
return self._database[self.Session["AVALON_PROJECT"]].delete_many(
*args, **kwargs)
def parenthood(self, document):
assert document is not None, "This is a bug"
parents = list()
while document.get("parent") is not None:
document = self.find_one({"_id": document["parent"]})
if document is None:
break
if document.get("type") == "master_version":
_document = self.find_one({"_id": document["version_id"]})
document["data"] = _document["data"]
parents.append(document)
return parents
@contextlib.contextmanager
def tempdir(self):
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def download(self, src, dst):
"""Download `src` to `dst`
Arguments:
src (str): URL to source file
dst (str): Absolute path to destination file
Yields tuple (progress, error):
progress (int): Between 0-100
error (Exception): Any exception raised when first making connection
"""
try:
response = requests.get(
src,
stream=True,
auth=requests.auth.HTTPBasicAuth(
self.Session["AVALON_USERNAME"],
self.Session["AVALON_PASSWORD"]
)
)
except requests.ConnectionError as e:
yield None, e
return
with self.tempdir() as dirname:
tmp = os.path.join(dirname, os.path.basename(src))
with open(tmp, "wb") as f:
total_length = response.headers.get("content-length")
if total_length is None: # no content length header
f.write(response.content)
else:
downloaded = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
downloaded += len(data)
f.write(data)
yield int(100.0 * downloaded / total_length), None
try:
os.makedirs(os.path.dirname(dst))
except OSError as e:
# An already existing destination directory is fine.
if e.errno != errno.EEXIST:
raise
shutil.copy(tmp, dst)

View file

@ -2,7 +2,7 @@ import os
import time
import datetime
import threading
from Qt import QtCore, QtWidgets
from Qt import QtCore, QtWidgets, QtGui
import ftrack_api
from ..ftrack_server.lib import check_ftrack_url
@ -10,7 +10,7 @@ from ..ftrack_server import socket_thread
from ..lib import credentials
from . import login_dialog
from pype.api import Logger
from pype.api import Logger, resources
log = Logger().get_logger("FtrackModule", "ftrack")
@ -19,7 +19,7 @@ log = Logger().get_logger("FtrackModule", "ftrack")
class FtrackModule:
def __init__(self, main_parent=None, parent=None):
self.parent = parent
self.widget_login = login_dialog.Login_Dialog_ui(self)
self.thread_action_server = None
self.thread_socket_server = None
self.thread_timer = None
@ -29,8 +29,22 @@ class FtrackModule:
self.bool_action_thread_running = False
self.bool_timer_event = False
self.widget_login = login_dialog.CredentialsDialog()
self.widget_login.login_changed.connect(self.on_login_change)
self.widget_login.logout_signal.connect(self.on_logout)
self.action_credentials = None
self.icon_logged = QtGui.QIcon(
resources.get_resource("icons", "circle_green.png")
)
self.icon_not_logged = QtGui.QIcon(
resources.get_resource("icons", "circle_orange.png")
)
def show_login_widget(self):
self.widget_login.show()
self.widget_login.activateWindow()
self.widget_login.raise_()
def validate(self):
validation = False
@ -39,9 +53,10 @@ class FtrackModule:
ft_api_key = cred.get("api_key")
validation = credentials.check_credentials(ft_user, ft_api_key)
if validation:
self.widget_login.set_credentials(ft_user, ft_api_key)
credentials.set_env(ft_user, ft_api_key)
log.info("Connected to Ftrack successfully")
self.loginChange()
self.on_login_change()
return validation
@ -60,15 +75,28 @@ class FtrackModule:
return validation
# Necessary - login_dialog works with this method after logging in
def loginChange(self):
def on_login_change(self):
self.bool_logged = True
if self.action_credentials:
self.action_credentials.setIcon(self.icon_logged)
self.action_credentials.setToolTip(
"Logged as user \"{}\"".format(
self.widget_login.user_input.text()
)
)
self.set_menu_visibility()
self.start_action_server()
def logout(self):
def on_logout(self):
credentials.clear_credentials()
self.stop_action_server()
if self.action_credentials:
self.action_credentials.setIcon(self.icon_not_logged)
self.action_credentials.setToolTip("Logged out")
log.info("Logged out of Ftrack")
self.bool_logged = False
self.set_menu_visibility()
@ -218,43 +246,45 @@ class FtrackModule:
# Definition of Tray menu
def tray_menu(self, parent_menu):
# Menu for Tray App
self.menu = QtWidgets.QMenu('Ftrack', parent_menu)
self.menu.setProperty('submenu', 'on')
# Actions - server
self.smActionS = self.menu.addMenu("Action server")
self.aRunActionS = QtWidgets.QAction(
"Run action server", self.smActionS
)
self.aResetActionS = QtWidgets.QAction(
"Reset action server", self.smActionS
)
self.aStopActionS = QtWidgets.QAction(
"Stop action server", self.smActionS
)
self.aRunActionS.triggered.connect(self.start_action_server)
self.aResetActionS.triggered.connect(self.reset_action_server)
self.aStopActionS.triggered.connect(self.stop_action_server)
self.smActionS.addAction(self.aRunActionS)
self.smActionS.addAction(self.aResetActionS)
self.smActionS.addAction(self.aStopActionS)
tray_menu = QtWidgets.QMenu("Ftrack", parent_menu)
# Actions - basic
self.aLogin = QtWidgets.QAction("Login", self.menu)
self.aLogin.triggered.connect(self.validate)
self.aLogout = QtWidgets.QAction("Logout", self.menu)
self.aLogout.triggered.connect(self.logout)
action_credentials = QtWidgets.QAction("Credentials", tray_menu)
action_credentials.triggered.connect(self.show_login_widget)
if self.bool_logged:
icon = self.icon_logged
else:
icon = self.icon_not_logged
action_credentials.setIcon(icon)
tray_menu.addAction(action_credentials)
self.action_credentials = action_credentials
self.menu.addAction(self.aLogin)
self.menu.addAction(self.aLogout)
# Actions - server
tray_server_menu = tray_menu.addMenu("Action server")
self.action_server_run = QtWidgets.QAction(
"Run action server", tray_server_menu
)
self.action_server_reset = QtWidgets.QAction(
"Reset action server", tray_server_menu
)
self.action_server_stop = QtWidgets.QAction(
"Stop action server", tray_server_menu
)
self.action_server_run.triggered.connect(self.start_action_server)
self.action_server_reset.triggered.connect(self.reset_action_server)
self.action_server_stop.triggered.connect(self.stop_action_server)
tray_server_menu.addAction(self.action_server_run)
tray_server_menu.addAction(self.action_server_reset)
tray_server_menu.addAction(self.action_server_stop)
self.tray_server_menu = tray_server_menu
self.bool_logged = False
self.set_menu_visibility()
parent_menu.addMenu(self.menu)
parent_menu.addMenu(tray_menu)
def tray_start(self):
self.validate()
@ -264,19 +294,15 @@ class FtrackModule:
# Definition of visibility of each menu actions
def set_menu_visibility(self):
self.smActionS.menuAction().setVisible(self.bool_logged)
self.aLogin.setVisible(not self.bool_logged)
self.aLogout.setVisible(self.bool_logged)
self.tray_server_menu.menuAction().setVisible(self.bool_logged)
if self.bool_logged is False:
if self.bool_timer_event is True:
self.stop_timer_thread()
return
self.aRunActionS.setVisible(not self.bool_action_server_running)
self.aResetActionS.setVisible(self.bool_action_thread_running)
self.aStopActionS.setVisible(self.bool_action_server_running)
self.action_server_run.setVisible(not self.bool_action_server_running)
self.action_server_reset.setVisible(self.bool_action_thread_running)
self.action_server_stop.setVisible(self.bool_action_server_running)
if self.bool_timer_event is False:
self.start_timer_thread()

View file

@ -7,309 +7,314 @@ from pype.api import resources
from Qt import QtCore, QtGui, QtWidgets
class Login_Dialog_ui(QtWidgets.QWidget):
class CredentialsDialog(QtWidgets.QDialog):
SIZE_W = 300
SIZE_H = 230
loginSignal = QtCore.Signal(object, object, object)
_login_server_thread = None
inputs = []
buttons = []
labels = []
login_changed = QtCore.Signal()
logout_signal = QtCore.Signal()
def __init__(self, parent=None, is_event=False):
def __init__(self, parent=None):
super(CredentialsDialog, self).__init__(parent)
super(Login_Dialog_ui, self).__init__()
self.setWindowTitle("Pype - Ftrack Login")
self.parent = parent
self.is_event = is_event
self._login_server_thread = None
self._is_logged = False
self._in_advance_mode = False
if hasattr(parent, 'icon'):
self.setWindowIcon(self.parent.icon)
elif hasattr(parent, 'parent') and hasattr(parent.parent, 'icon'):
self.setWindowIcon(self.parent.parent.icon)
else:
icon = QtGui.QIcon(resources.pype_icon_filepath())
self.setWindowIcon(icon)
icon = QtGui.QIcon(resources.pype_icon_filepath())
self.setWindowIcon(icon)
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint |
QtCore.Qt.WindowMinimizeButtonHint
)
self.loginSignal.connect(self.loginWithCredentials)
self._translate = QtCore.QCoreApplication.translate
self.font = QtGui.QFont()
self.font.setFamily("DejaVu Sans Condensed")
self.font.setPointSize(9)
self.font.setBold(True)
self.font.setWeight(50)
self.font.setKerning(True)
self.resize(self.SIZE_W, self.SIZE_H)
self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H))
self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100))
self.setMaximumSize(QtCore.QSize(self.SIZE_W + 100, self.SIZE_H + 100))
self.setStyleSheet(style.load_stylesheet())
self.setLayout(self._main())
self.setWindowTitle('Pype - Ftrack Login')
self.login_changed.connect(self._on_login)
def _main(self):
self.main = QtWidgets.QVBoxLayout()
self.main.setObjectName("main")
self.ui_init()
self.form = QtWidgets.QFormLayout()
self.form.setContentsMargins(10, 15, 10, 5)
self.form.setObjectName("form")
self.ftsite_label = QtWidgets.QLabel("FTrack URL:")
self.ftsite_label.setFont(self.font)
self.ftsite_label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.ftsite_label.setTextFormat(QtCore.Qt.RichText)
self.ftsite_label.setObjectName("user_label")
def ui_init(self):
self.ftsite_label = QtWidgets.QLabel("Ftrack URL:")
self.user_label = QtWidgets.QLabel("Username:")
self.api_label = QtWidgets.QLabel("API Key:")
self.ftsite_input = QtWidgets.QLineEdit()
self.ftsite_input.setEnabled(True)
self.ftsite_input.setFrame(True)
self.ftsite_input.setEnabled(False)
self.ftsite_input.setReadOnly(True)
self.ftsite_input.setObjectName("ftsite_input")
self.user_label = QtWidgets.QLabel("Username:")
self.user_label.setFont(self.font)
self.user_label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.user_label.setTextFormat(QtCore.Qt.RichText)
self.user_label.setObjectName("user_label")
self.ftsite_input.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.user_input = QtWidgets.QLineEdit()
self.user_input.setEnabled(True)
self.user_input.setFrame(True)
self.user_input.setObjectName("user_input")
self.user_input.setPlaceholderText(
self._translate("main", "user.name")
)
self.user_input.setPlaceholderText("user.name")
self.user_input.textChanged.connect(self._user_changed)
self.api_label = QtWidgets.QLabel("API Key:")
self.api_label.setFont(self.font)
self.api_label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.api_label.setTextFormat(QtCore.Qt.RichText)
self.api_label.setObjectName("api_label")
self.api_input = QtWidgets.QLineEdit()
self.api_input.setEnabled(True)
self.api_input.setFrame(True)
self.api_input.setObjectName("api_input")
self.api_input.setPlaceholderText(self._translate(
"main", "e.g. xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
))
self.api_input.setPlaceholderText(
"e.g. xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
)
self.api_input.textChanged.connect(self._api_changed)
input_layout = QtWidgets.QFormLayout()
input_layout.setContentsMargins(10, 15, 10, 5)
input_layout.addRow(self.ftsite_label, self.ftsite_input)
input_layout.addRow(self.user_label, self.user_input)
input_layout.addRow(self.api_label, self.api_input)
self.btn_advanced = QtWidgets.QPushButton("Advanced")
self.btn_advanced.clicked.connect(self._on_advanced_clicked)
self.btn_simple = QtWidgets.QPushButton("Simple")
self.btn_simple.clicked.connect(self._on_simple_clicked)
self.btn_login = QtWidgets.QPushButton("Login")
self.btn_login.setToolTip(
"Set Username and API Key with entered values"
)
self.btn_login.clicked.connect(self._on_login_clicked)
self.btn_ftrack_login = QtWidgets.QPushButton("Ftrack login")
self.btn_ftrack_login.setToolTip("Open browser for Login to Ftrack")
self.btn_ftrack_login.clicked.connect(self._on_ftrack_login_clicked)
self.btn_logout = QtWidgets.QPushButton("Logout")
self.btn_logout.clicked.connect(self._on_logout_clicked)
self.btn_close = QtWidgets.QPushButton("Close")
self.btn_close.setToolTip("Close this window")
self.btn_close.clicked.connect(self._close_widget)
btns_layout = QtWidgets.QHBoxLayout()
btns_layout.addWidget(self.btn_advanced)
btns_layout.addWidget(self.btn_simple)
btns_layout.addStretch(1)
btns_layout.addWidget(self.btn_ftrack_login)
btns_layout.addWidget(self.btn_login)
btns_layout.addWidget(self.btn_logout)
btns_layout.addWidget(self.btn_close)
self.note_label = QtWidgets.QLabel((
"NOTE: Click on \"{}\" button to log with your default browser"
" or click on \"{}\" button to enter API key manually."
).format(self.btn_ftrack_login.text(), self.btn_advanced.text()))
self.note_label.setWordWrap(True)
self.note_label.hide()
self.error_label = QtWidgets.QLabel("")
self.error_label.setFont(self.font)
self.error_label.setTextFormat(QtCore.Qt.RichText)
self.error_label.setObjectName("error_label")
self.error_label.setWordWrap(True)
self.error_label.hide()
self.form.addRow(self.ftsite_label, self.ftsite_input)
self.form.addRow(self.user_label, self.user_input)
self.form.addRow(self.api_label, self.api_input)
self.form.addRow(self.error_label)
label_layout = QtWidgets.QVBoxLayout()
label_layout.setContentsMargins(10, 5, 10, 5)
label_layout.addWidget(self.note_label)
label_layout.addWidget(self.error_label)
self.btnGroup = QtWidgets.QHBoxLayout()
self.btnGroup.addStretch(1)
self.btnGroup.setObjectName("btnGroup")
main = QtWidgets.QVBoxLayout(self)
main.addLayout(input_layout)
main.addLayout(label_layout)
main.addStretch(1)
main.addLayout(btns_layout)
self.btnEnter = QtWidgets.QPushButton("Login")
self.btnEnter.setToolTip(
'Set Username and API Key with entered values'
)
self.btnEnter.clicked.connect(self.enter_credentials)
self.fill_ftrack_url()
self.btnClose = QtWidgets.QPushButton("Close")
self.btnClose.setToolTip('Close this window')
self.btnClose.clicked.connect(self._close_widget)
self.set_is_logged(self._is_logged)
self.btnFtrack = QtWidgets.QPushButton("Ftrack")
self.btnFtrack.setToolTip('Open browser for Login to Ftrack')
self.btnFtrack.clicked.connect(self.open_ftrack)
self.setLayout(main)
self.btnGroup.addWidget(self.btnFtrack)
self.btnGroup.addWidget(self.btnEnter)
self.btnGroup.addWidget(self.btnClose)
def fill_ftrack_url(self):
url = os.getenv("FTRACK_SERVER")
checked_url = self.check_url(url)
self.main.addLayout(self.form)
self.main.addLayout(self.btnGroup)
if checked_url is None:
checked_url = ""
self.btn_login.setEnabled(False)
self.btn_ftrack_login.setEnabled(False)
self.inputs.append(self.api_input)
self.inputs.append(self.user_input)
self.inputs.append(self.ftsite_input)
self.api_input.setEnabled(False)
self.user_input.setEnabled(False)
self.ftsite_input.setEnabled(False)
self.enter_site()
return self.main
self.ftsite_input.setText(checked_url)
def enter_site(self):
try:
url = os.getenv('FTRACK_SERVER')
newurl = self.checkUrl(url)
def set_advanced_mode(self, is_advanced):
self._in_advance_mode = is_advanced
if newurl is None:
self.btnEnter.setEnabled(False)
self.btnFtrack.setEnabled(False)
for input in self.inputs:
input.setEnabled(False)
newurl = url
self.error_label.setVisible(False)
self.ftsite_input.setText(newurl)
is_logged = self._is_logged
except Exception:
self.setError("FTRACK_SERVER is not set in templates")
self.btnEnter.setEnabled(False)
self.btnFtrack.setEnabled(False)
for input in self.inputs:
input.setEnabled(False)
self.note_label.setVisible(not is_logged and not is_advanced)
self.btn_ftrack_login.setVisible(not is_logged and not is_advanced)
self.btn_advanced.setVisible(not is_logged and not is_advanced)
def setError(self, msg):
self.btn_login.setVisible(not is_logged and is_advanced)
self.btn_simple.setVisible(not is_logged and is_advanced)
self.user_label.setVisible(is_logged or is_advanced)
self.user_input.setVisible(is_logged or is_advanced)
self.api_label.setVisible(is_logged or is_advanced)
self.api_input.setVisible(is_logged or is_advanced)
if is_advanced:
self.user_input.setFocus()
else:
self.btn_ftrack_login.setFocus()
def set_is_logged(self, is_logged):
self._is_logged = is_logged
self.user_input.setReadOnly(is_logged)
self.api_input.setReadOnly(is_logged)
self.user_input.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.api_input.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.btn_logout.setVisible(is_logged)
self.set_advanced_mode(self._in_advance_mode)
def set_error(self, msg):
self.error_label.setText(msg)
self.error_label.show()
def _on_logout_clicked(self):
self.user_input.setText("")
self.api_input.setText("")
self.set_is_logged(False)
self.logout_signal.emit()
def _on_simple_clicked(self):
self.set_advanced_mode(False)
def _on_advanced_clicked(self):
self.set_advanced_mode(True)
def _user_changed(self):
self.user_input.setStyleSheet("")
self._not_invalid_input(self.user_input)
def _api_changed(self):
self.api_input.setStyleSheet("")
self._not_invalid_input(self.api_input)
def _invalid_input(self, entity):
entity.setStyleSheet("border: 1px solid red;")
def _not_invalid_input(self, input_widget):
input_widget.setStyleSheet("")
def enter_credentials(self):
def _invalid_input(self, input_widget):
input_widget.setStyleSheet("border: 1px solid red;")
def _on_login(self):
self.set_is_logged(True)
self._close_widget()
def _on_login_clicked(self):
username = self.user_input.text().strip()
apiKey = self.api_input.text().strip()
msg = "You didn't enter "
api_key = self.api_input.text().strip()
missing = []
if username == "":
missing.append("Username")
self._invalid_input(self.user_input)
if apiKey == "":
if api_key == "":
missing.append("API Key")
self._invalid_input(self.api_input)
if len(missing) > 0:
self.setError("{0} {1}".format(msg, " and ".join(missing)))
self.set_error("You didn't enter {}".format(" and ".join(missing)))
return
verification = credentials.check_credentials(username, apiKey)
if verification:
credentials.save_credentials(username, apiKey, self.is_event)
credentials.set_env(username, apiKey)
if self.parent is not None:
self.parent.loginChange()
self._close_widget()
else:
if not self.login_with_credentials(username, api_key):
self._invalid_input(self.user_input)
self._invalid_input(self.api_input)
self.setError(
self.set_error(
"We're unable to sign in to Ftrack with these credentials"
)
def open_ftrack(self):
url = self.ftsite_input.text()
self.loginWithCredentials(url, None, None)
def checkUrl(self, url):
url = url.strip('/ ')
def _on_ftrack_login_clicked(self):
url = self.check_url(self.ftsite_input.text())
if not url:
self.setError("There is no URL set in Templates")
return
if 'http' not in url:
if url.endswith('ftrackapp.com'):
url = 'https://' + url
else:
url = 'https://{0}.ftrackapp.com'.format(url)
try:
result = requests.get(
url,
# Old python API will not work with redirect.
allow_redirects=False
)
except requests.exceptions.RequestException:
self.setError(
'The server URL set in Templates could not be reached.'
)
return
if (
result.status_code != 200 or 'FTRACK_VERSION' not in result.headers
):
self.setError(
'The server URL set in Templates is not a valid ftrack server.'
)
return
return url
def loginWithCredentials(self, url, username, apiKey):
url = url.strip('/ ')
if not url:
self.setError(
'You need to specify a valid server URL, '
'for example https://server-name.ftrackapp.com'
)
return
if 'http' not in url:
if url.endswith('ftrackapp.com'):
url = 'https://' + url
else:
url = 'https://{0}.ftrackapp.com'.format(url)
try:
result = requests.get(
url,
# Old python API will not work with redirect.
allow_redirects=False
)
except requests.exceptions.RequestException:
self.setError(
'The server URL you provided could not be reached.'
)
return
if (
result.status_code != 200 or 'FTRACK_VERSION' not in result.headers
):
self.setError(
'The server URL you provided is not a valid ftrack server.'
)
return
# If there is an existing server thread running we need to stop it.
if self._login_server_thread:
self._login_server_thread.quit()
self._login_server_thread.join()
self._login_server_thread = None
# If credentials are not properly set, try to get them using a http
# server.
if not username or not apiKey:
self._login_server_thread = login_tools.LoginServerThread()
self._login_server_thread.loginSignal.connect(self.loginSignal)
self._login_server_thread.start(url)
self._login_server_thread = login_tools.LoginServerThread(
url, self._result_of_ftrack_thread
)
self._login_server_thread.start()
def _result_of_ftrack_thread(self, username, api_key):
if not self.login_with_credentials(username, api_key):
self._invalid_input(self.api_input)
self.set_error((
"Somthing happened with Ftrack login."
" Try enter Username and API key manually."
))
def login_with_credentials(self, username, api_key):
verification = credentials.check_credentials(username, api_key)
if verification:
credentials.save_credentials(username, api_key, False)
credentials.set_env(username, api_key)
self.set_credentials(username, api_key)
self.login_changed.emit()
return verification
def set_credentials(self, username, api_key, is_logged=True):
self.user_input.setText(username)
self.api_input.setText(api_key)
self.error_label.hide()
self._not_invalid_input(self.ftsite_input)
self._not_invalid_input(self.user_input)
self._not_invalid_input(self.api_input)
if is_logged is not None:
self.set_is_logged(is_logged)
def check_url(self, url):
if url is not None:
url = url.strip("/ ")
if not url:
self.set_error((
"You need to specify a valid server URL, "
"for example https://server-name.ftrackapp.com"
))
return
verification = credentials.check_credentials(username, apiKey)
if "http" not in url:
if url.endswith("ftrackapp.com"):
url = "https://" + url
else:
url = "https://{}.ftrackapp.com".format(url)
try:
result = requests.get(
url,
# Old python API will not work with redirect.
allow_redirects=False
)
except requests.exceptions.RequestException:
self.set_error(
"Specified URL could not be reached."
)
return
if verification is True:
credentials.save_credentials(username, apiKey, self.is_event)
credentials.set_env(username, apiKey)
if self.parent is not None:
self.parent.loginChange()
self._close_widget()
if (
result.status_code != 200
or "FTRACK_VERSION" not in result.headers
):
self.set_error(
"Specified URL does not lead to a valid Ftrack server."
)
return
return url
def closeEvent(self, event):
event.ignore()

View file

@ -2,7 +2,7 @@ from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib import parse
import webbrowser
import functools
from Qt import QtCore
import threading
from pype.api import resources
@ -55,20 +55,17 @@ class LoginServerHandler(BaseHTTPRequestHandler):
)
class LoginServerThread(QtCore.QThread):
class LoginServerThread(threading.Thread):
'''Login server thread.'''
# Login signal.
loginSignal = QtCore.Signal(object, object, object)
def start(self, url):
'''Start thread.'''
def __init__(self, url, callback):
self.url = url
super(LoginServerThread, self).start()
self.callback = callback
super(LoginServerThread, self).__init__()
def _handle_login(self, api_user, api_key):
'''Login to server with *api_user* and *api_key*.'''
self.loginSignal.emit(self.url, api_user, api_key)
self.callback(api_user, api_key)
def run(self):
'''Listen for events.'''

View file

@ -8,9 +8,9 @@ class LogsWindow(QtWidgets.QWidget):
super(LogsWindow, self).__init__(parent)
self.setStyleSheet(style.load_stylesheet())
self.resize(1200, 800)
logs_widget = LogsWidget(parent=self)
self.resize(1400, 800)
log_detail = OutputWidget(parent=self)
logs_widget = LogsWidget(log_detail, parent=self)
main_layout = QtWidgets.QHBoxLayout()
@ -18,8 +18,6 @@ class LogsWindow(QtWidgets.QWidget):
log_splitter.setOrientation(QtCore.Qt.Horizontal)
log_splitter.addWidget(logs_widget)
log_splitter.addWidget(log_detail)
log_splitter.setStretchFactor(0, 65)
log_splitter.setStretchFactor(1, 35)
main_layout.addWidget(log_splitter)
@ -28,10 +26,3 @@ class LogsWindow(QtWidgets.QWidget):
self.setLayout(main_layout)
self.setWindowTitle("Logs")
self.logs_widget.active_changed.connect(self.on_selection_changed)
def on_selection_changed(self):
index = self.logs_widget.selected_log()
node = index.data(self.logs_widget.model.NodeRole)
self.log_detail.set_detail(node)

View file

@ -1,94 +0,0 @@
import contextlib
from Qt import QtCore
def _iter_model_rows(
model, column, include_root=False
):
"""Iterate over all row indices in a model"""
indices = [QtCore.QModelIndex()] # start iteration at root
for index in indices:
# Add children to the iterations
child_rows = model.rowCount(index)
for child_row in range(child_rows):
child_index = model.index(child_row, column, index)
indices.append(child_index)
if not include_root and not index.isValid():
continue
yield index
@contextlib.contextmanager
def preserve_states(
tree_view, column=0, role=None,
preserve_expanded=True, preserve_selection=True,
expanded_role=QtCore.Qt.DisplayRole, selection_role=QtCore.Qt.DisplayRole
):
"""Preserves row selection in QTreeView by column's data role.
This function is created to maintain the selection status of
the model items. When refresh is triggered the items which are expanded
will stay expanded and vise versa.
tree_view (QWidgets.QTreeView): the tree view nested in the application
column (int): the column to retrieve the data from
role (int): the role which dictates what will be returned
Returns:
None
"""
# When `role` is set then override both expanded and selection roles
if role:
expanded_role = role
selection_role = role
model = tree_view.model()
selection_model = tree_view.selectionModel()
flags = selection_model.Select | selection_model.Rows
expanded = set()
if preserve_expanded:
for index in _iter_model_rows(
model, column=column, include_root=False
):
if tree_view.isExpanded(index):
value = index.data(expanded_role)
expanded.add(value)
selected = None
if preserve_selection:
selected_rows = selection_model.selectedRows()
if selected_rows:
selected = set(row.data(selection_role) for row in selected_rows)
try:
yield
finally:
if expanded:
for index in _iter_model_rows(
model, column=0, include_root=False
):
value = index.data(expanded_role)
is_expanded = value in expanded
# skip if new index was created meanwhile
if is_expanded is None:
continue
tree_view.setExpanded(index, is_expanded)
if selected:
# Go through all indices, select the ones with similar data
for index in _iter_model_rows(
model, column=column, include_root=False
):
value = index.data(selection_role)
state = value in selected
if state:
tree_view.scrollTo(index) # Ensure item is visible
selection_model.select(index, flags)

View file

@ -1,21 +1,20 @@
import collections
from Qt import QtCore
from Qt import QtCore, QtGui
from pype.api import Logger
from pypeapp.lib.log import _bootstrap_mongo_log, LOG_COLLECTION_NAME
log = Logger().get_logger("LogModel", "LoggingModule")
class LogModel(QtCore.QAbstractItemModel):
COLUMNS = [
class LogModel(QtGui.QStandardItemModel):
COLUMNS = (
"process_name",
"hostname",
"hostip",
"username",
"system_name",
"started"
]
)
colums_mapping = {
"process_name": "Process Name",
"process_id": "Process Id",
@ -25,30 +24,53 @@ class LogModel(QtCore.QAbstractItemModel):
"system_name": "System name",
"started": "Started at"
}
process_keys = [
process_keys = (
"process_id", "hostname", "hostip",
"username", "system_name", "process_name"
]
log_keys = [
)
log_keys = (
"timestamp", "level", "thread", "threadName", "message", "loggerName",
"fileName", "module", "method", "lineNumber"
]
)
default_value = "- Not set -"
NodeRole = QtCore.Qt.UserRole + 1
ROLE_LOGS = QtCore.Qt.UserRole + 2
ROLE_PROCESS_ID = QtCore.Qt.UserRole + 3
def __init__(self, parent=None):
super(LogModel, self).__init__(parent)
self._root_node = Node()
self.log_by_process = None
self.dbcon = None
# Crash if connection is not possible to skip this module
database = _bootstrap_mongo_log()
if LOG_COLLECTION_NAME in database.list_collection_names():
self.dbcon = database[LOG_COLLECTION_NAME]
def add_log(self, log):
node = Node(log)
self._root_node.add_child(node)
def headerData(self, section, orientation, role):
if (
role == QtCore.Qt.DisplayRole
and orientation == QtCore.Qt.Horizontal
):
if section < len(self.COLUMNS):
key = self.COLUMNS[section]
return self.colums_mapping.get(key, key)
super(LogModel, self).headerData(section, orientation, role)
def add_process_logs(self, process_logs):
items = []
first_item = True
for key in self.COLUMNS:
display_value = str(process_logs[key])
item = QtGui.QStandardItem(display_value)
if first_item:
first_item = False
item.setData(process_logs["_logs"], self.ROLE_LOGS)
item.setData(process_logs["process_id"], self.ROLE_PROCESS_ID)
items.append(item)
self.appendRow(items)
def refresh(self):
self.log_by_process = collections.defaultdict(list)
@ -65,16 +87,13 @@ class LogModel(QtCore.QAbstractItemModel):
continue
if process_id not in self.process_info:
proc_dict = {}
proc_dict = {"_logs": []}
for key in self.process_keys:
proc_dict[key] = (
item.get(key) or self.default_value
)
self.process_info[process_id] = proc_dict
if "_logs" not in self.process_info[process_id]:
self.process_info[process_id]["_logs"] = []
log_item = {}
for key in self.log_keys:
log_item[key] = item.get(key) or self.default_value
@ -89,114 +108,29 @@ class LogModel(QtCore.QAbstractItemModel):
item["_logs"], key=lambda item: item["timestamp"]
)
item["started"] = item["_logs"][0]["timestamp"]
self.add_log(item)
self.add_process_logs(item)
self.endResetModel()
def data(self, index, role):
if not index.isValid():
return None
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
node = index.internalPointer()
column = index.column()
class LogsFilterProxy(QtCore.QSortFilterProxyModel):
def __init__(self, *args, **kwargs):
super(LogsFilterProxy, self).__init__(*args, **kwargs)
self.col_usernames = None
self.filter_usernames = set()
key = self.COLUMNS[column]
if key == "started":
return str(node.get(key, None))
return node.get(key, None)
def update_users_filter(self, users):
self.filter_usernames = set()
for user in users or tuple():
self.filter_usernames.add(user)
self.invalidateFilter()
if role == self.NodeRole:
return index.internalPointer()
def index(self, row, column, parent):
"""Return index for row/column under parent"""
if not parent.isValid():
parent_node = self._root_node
else:
parent_node = parent.internalPointer()
child_item = parent_node.child(row)
if child_item:
return self.createIndex(row, column, child_item)
return QtCore.QModelIndex()
def rowCount(self, parent):
node = self._root_node
if parent.isValid():
node = parent.internalPointer()
return node.childCount()
def columnCount(self, parent):
return len(self.COLUMNS)
def parent(self, index):
return QtCore.QModelIndex()
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if section < len(self.COLUMNS):
key = self.COLUMNS[section]
return self.colums_mapping.get(key, key)
super(LogModel, self).headerData(section, orientation, role)
def flags(self, index):
return (QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
def clear(self):
self.beginResetModel()
self._root_node = Node()
self.endResetModel()
class Node(dict):
"""A node that can be represented in a tree view.
The node can store data just like a dictionary.
>>> data = {"name": "John", "score": 10}
>>> node = Node(data)
>>> assert node["name"] == "John"
"""
def __init__(self, data=None):
super(Node, self).__init__()
self._children = list()
self._parent = None
if data is not None:
assert isinstance(data, dict)
self.update(data)
def childCount(self):
return len(self._children)
def child(self, row):
if row >= len(self._children):
log.warning("Invalid row as child: {0}".format(row))
return
return self._children[row]
def children(self):
return self._children
def parent(self):
return self._parent
def row(self):
"""
Returns:
int: Index of this node under parent"""
if self._parent is not None:
siblings = self.parent().children()
return siblings.index(self)
def add_child(self, child):
"""Add a child to this node"""
child._parent = self
self._children.append(child)
def filterAcceptsRow(self, source_row, source_parent):
if self.col_usernames is not None:
index = self.sourceModel().index(
source_row, self.col_usernames, source_parent
)
user = index.data(QtCore.Qt.DisplayRole)
if user not in self.filter_usernames:
return False
return True

View file

@ -1,6 +1,6 @@
from Qt import QtCore, QtWidgets, QtGui
from PyQt5.QtCore import QVariant
from .models import LogModel
from Qt import QtCore, QtWidgets
from avalon.vendor import qtawesome
from .models import LogModel, LogsFilterProxy
class SearchComboBox(QtWidgets.QComboBox):
@ -50,37 +50,6 @@ class SearchComboBox(QtWidgets.QComboBox):
return text
class CheckableComboBox2(QtWidgets.QComboBox):
def __init__(self, parent=None):
super(CheckableComboBox, self).__init__(parent)
self.view().pressed.connect(self.handleItemPressed)
self._changed = False
def handleItemPressed(self, index):
item = self.model().itemFromIndex(index)
if item.checkState() == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
else:
item.setCheckState(QtCore.Qt.Checked)
self._changed = True
def hidePopup(self):
if not self._changed:
super(CheckableComboBox, self).hidePopup()
self._changed = False
def itemChecked(self, index):
item = self.model().item(index, self.modelColumn())
return item.checkState() == QtCore.Qt.Checked
def setItemChecked(self, index, checked=True):
item = self.model().item(index, self.modelColumn())
if checked:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
class SelectableMenu(QtWidgets.QMenu):
selection_changed = QtCore.Signal()
@ -137,144 +106,108 @@ class CustomCombo(QtWidgets.QWidget):
yield action
class CheckableComboBox(QtWidgets.QComboBox):
def __init__(self, parent=None):
super(CheckableComboBox, self).__init__(parent)
view = QtWidgets.QTreeView()
view.header().hide()
view.setRootIsDecorated(False)
model = QtGui.QStandardItemModel()
view.pressed.connect(self.handleItemPressed)
self._changed = False
self.setView(view)
self.setModel(model)
self.view = view
self.model = model
def handleItemPressed(self, index):
item = self.model.itemFromIndex(index)
if item.checkState() == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
else:
item.setCheckState(QtCore.Qt.Checked)
self._changed = True
def hidePopup(self):
if not self._changed:
super(CheckableComboBox, self).hidePopup()
self._changed = False
def itemChecked(self, index):
item = self.model.item(index, self.modelColumn())
return item.checkState() == QtCore.Qt.Checked
def setItemChecked(self, index, checked=True):
item = self.model.item(index, self.modelColumn())
if checked:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
def addItems(self, items):
for text, checked in items:
text_item = QtGui.QStandardItem(text)
checked_item = QtGui.QStandardItem()
checked_item.setData(QVariant(checked), QtCore.Qt.CheckStateRole)
self.model.appendRow([text_item, checked_item])
class LogsWidget(QtWidgets.QWidget):
"""A widget that lists the published subsets for an asset"""
active_changed = QtCore.Signal()
def __init__(self, parent=None):
def __init__(self, detail_widget, parent=None):
super(LogsWidget, self).__init__(parent=parent)
model = LogModel()
proxy_model = LogsFilterProxy()
proxy_model.setSourceModel(model)
proxy_model.col_usernames = model.COLUMNS.index("username")
filter_layout = QtWidgets.QHBoxLayout()
# user_filter = SearchComboBox(self, "Users")
user_filter = CustomCombo("Users", self)
users = model.dbcon.distinct("user")
users = model.dbcon.distinct("username")
user_filter.populate(users)
user_filter.selection_changed.connect(self.user_changed)
user_filter.selection_changed.connect(self._user_changed)
proxy_model.update_users_filter(users)
level_filter = CustomCombo("Levels", self)
# levels = [(level, True) for level in model.dbcon.distinct("level")]
levels = model.dbcon.distinct("level")
level_filter.addItems(levels)
level_filter.selection_changed.connect(self._level_changed)
date_from_label = QtWidgets.QLabel("From:")
date_filter_from = QtWidgets.QDateTimeEdit()
detail_widget.update_level_filter(levels)
date_from_layout = QtWidgets.QVBoxLayout()
date_from_layout.addWidget(date_from_label)
date_from_layout.addWidget(date_filter_from)
spacer = QtWidgets.QWidget()
# now = datetime.datetime.now()
# QtCore.QDateTime(
# now.year,
# now.month,
# now.day,
# now.hour,
# now.minute,
# second=0,
# msec=0,
# timeSpec=0
# )
date_to_label = QtWidgets.QLabel("To:")
date_filter_to = QtWidgets.QDateTimeEdit()
date_to_layout = QtWidgets.QVBoxLayout()
date_to_layout.addWidget(date_to_label)
date_to_layout.addWidget(date_filter_to)
icon = qtawesome.icon("fa.refresh", color="white")
refresh_btn = QtWidgets.QPushButton(icon, "")
filter_layout.addWidget(user_filter)
filter_layout.addWidget(level_filter)
filter_layout.addLayout(date_from_layout)
filter_layout.addLayout(date_to_layout)
filter_layout.addWidget(spacer, 1)
filter_layout.addWidget(refresh_btn)
view = QtWidgets.QTreeView(self)
view.setAllColumnsShowFocus(True)
view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addLayout(filter_layout)
layout.addWidget(view)
view.setModel(proxy_model)
view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
view.setSortingEnabled(True)
view.sortByColumn(
model.COLUMNS.index("started"),
QtCore.Qt.AscendingOrder
QtCore.Qt.DescendingOrder
)
view.setModel(model)
view.pressed.connect(self._on_activated)
# prepare
model.refresh()
view.selectionModel().selectionChanged.connect(self._on_index_change)
refresh_btn.clicked.connect(self._on_refresh_clicked)
# Store to memory
self.model = model
self.proxy_model = proxy_model
self.view = view
self.user_filter = user_filter
self.level_filter = level_filter
def _on_activated(self, *args, **kwargs):
self.active_changed.emit()
self.detail_widget = detail_widget
self.refresh_btn = refresh_btn
def user_changed(self):
# prepare
self.refresh()
def refresh(self):
self.model.refresh()
self.detail_widget.refresh()
def _on_refresh_clicked(self):
self.refresh()
def _on_index_change(self, to_index, from_index):
index = self._selected_log()
if index:
logs = index.data(self.model.ROLE_LOGS)
else:
logs = []
self.detail_widget.set_detail(logs)
def _user_changed(self):
checked_values = set()
for action in self.user_filter.items():
print(action)
if action.isChecked():
checked_values.add(action.text())
self.proxy_model.update_users_filter(checked_values)
def _level_changed(self):
checked_values = set()
for action in self.level_filter.items():
if action.isChecked():
checked_values.add(action.text())
self.detail_widget.update_level_filter(checked_values)
def on_context_menu(self, point):
# TODO will be any actions? it's ready
@ -288,7 +221,7 @@ class LogsWidget(QtWidgets.QWidget):
selection = self.view.selectionModel()
rows = selection.selectedRows(column=0)
def selected_log(self):
def _selected_log(self):
selection = self.view.selectionModel()
rows = selection.selectedRows(column=0)
if len(rows) == 1:
@ -300,22 +233,55 @@ class OutputWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(OutputWidget, self).__init__(parent=parent)
layout = QtWidgets.QVBoxLayout(self)
show_timecode_checkbox = QtWidgets.QCheckBox("Show timestamp")
output_text = QtWidgets.QTextEdit()
output_text.setReadOnly(True)
# output_text.setLineWrapMode(QtWidgets.QTextEdit.FixedPixelWidth)
layout.addWidget(show_timecode_checkbox)
layout.addWidget(output_text)
show_timecode_checkbox.stateChanged.connect(
self.on_show_timecode_change
)
self.setLayout(layout)
self.output_text = output_text
self.show_timecode_checkbox = show_timecode_checkbox
self.refresh()
def refresh(self):
self.set_detail()
def show_timecode(self):
return self.show_timecode_checkbox.isChecked()
def on_show_timecode_change(self):
self.set_detail(self.las_logs)
def update_level_filter(self, levels):
self.filter_levels = set()
for level in levels or tuple():
self.filter_levels.add(level.lower())
self.set_detail(self.las_logs)
def add_line(self, line):
self.output_text.append(line)
def set_detail(self, node):
def set_detail(self, logs=None):
self.las_logs = logs
self.output_text.clear()
for log in node["_logs"]:
if not logs:
return
show_timecode = self.show_timecode()
for log in logs:
level = log["level"].lower()
if level not in self.filter_levels:
continue
line_f = "<font color=\"White\">{message}"
if level == "debug":
@ -353,66 +319,13 @@ class OutputWidget(QtWidgets.QWidget):
line = line_f.format(**log)
if show_timecode:
timestamp = log["timestamp"]
line = timestamp.strftime("%Y-%d-%m %H:%M:%S") + " " + line
self.add_line(line)
if not exc:
continue
for _line in exc["stackTrace"].split("\n"):
self.add_line(_line)
class LogDetailWidget(QtWidgets.QWidget):
"""A Widget that display information about a specific version"""
data_rows = [
"user",
"message",
"level",
"logname",
"method",
"module",
"fileName",
"lineNumber",
"host",
"timestamp"
]
html_text = u"""
<h3>{user} - {timestamp}</h3>
<b>User</b><br>{user}<br>
<br><b>Level</b><br>{level}<br>
<br><b>Message</b><br>{message}<br>
<br><b>Log Name</b><br>{logname}<br><br><b>Method</b><br>{method}<br>
<br><b>File</b><br>{fileName}<br>
<br><b>Line</b><br>{lineNumber}<br>
<br><b>Host</b><br>{host}<br>
<br><b>Timestamp</b><br>{timestamp}<br>
"""
def __init__(self, parent=None):
super(LogDetailWidget, self).__init__(parent=parent)
layout = QtWidgets.QVBoxLayout(self)
label = QtWidgets.QLabel("Detail")
detail_widget = QtWidgets.QTextEdit()
detail_widget.setReadOnly(True)
layout.addWidget(label)
layout.addWidget(detail_widget)
self.detail_widget = detail_widget
self.setEnabled(True)
self.set_detail(None)
def set_detail(self, detail_data):
if not detail_data:
self.detail_widget.setText("")
return
data = dict()
for row in self.data_rows:
value = detail_data.get(row) or "< Not set >"
data[row] = value
self.detail_widget.setHtml(self.html_text.format(**data))

View file

@ -1,14 +1,5 @@
PUBLISH_PATHS = []
from .standalonepublish_module import StandAlonePublishModule
from .app import (
show,
cli
)
__all__ = [
"show",
"cli"
]
def tray_init(tray_widget, main_widget):
return StandAlonePublishModule(main_widget, tray_widget)

View file

@ -1,5 +0,0 @@
from . import cli
if __name__ == '__main__':
import sys
sys.exit(cli(sys.argv[1:]))

View file

@ -1,133 +0,0 @@
import os
import sys
import json
import tempfile
import random
import string
from avalon import io
import pype
from pype.api import execute, Logger
import pyblish.api
log = Logger().get_logger("standalonepublisher")
def set_context(project, asset, task, app):
''' Sets context for pyblish (must be done before pyblish is launched)
:param project: Name of `Project` where instance should be published
:type project: str
:param asset: Name of `Asset` where instance should be published
:type asset: str
'''
os.environ["AVALON_PROJECT"] = project
io.Session["AVALON_PROJECT"] = project
os.environ["AVALON_ASSET"] = asset
io.Session["AVALON_ASSET"] = asset
if not task:
task = ''
os.environ["AVALON_TASK"] = task
io.Session["AVALON_TASK"] = task
io.install()
av_project = io.find_one({'type': 'project'})
av_asset = io.find_one({
"type": 'asset',
"name": asset
})
parents = av_asset['data']['parents']
hierarchy = ''
if parents and len(parents) > 0:
hierarchy = os.path.sep.join(parents)
os.environ["AVALON_HIERARCHY"] = hierarchy
io.Session["AVALON_HIERARCHY"] = hierarchy
os.environ["AVALON_PROJECTCODE"] = av_project['data'].get('code', '')
io.Session["AVALON_PROJECTCODE"] = av_project['data'].get('code', '')
io.Session["current_dir"] = os.path.normpath(os.getcwd())
os.environ["AVALON_APP"] = app
io.Session["AVALON_APP"] = app
io.uninstall()
def publish(data, gui=True):
# cli pyblish seems like better solution
return cli_publish(data, gui)
def cli_publish(data, gui=True):
from . import PUBLISH_PATHS
PUBLISH_SCRIPT_PATH = os.path.join(os.path.dirname(__file__), "publish.py")
io.install()
# Create hash name folder in temp
chars = "".join([random.choice(string.ascii_letters) for i in range(15)])
staging_dir = tempfile.mkdtemp(chars)
# create also json and fill with data
json_data_path = staging_dir + os.path.basename(staging_dir) + '.json'
with open(json_data_path, 'w') as outfile:
json.dump(data, outfile)
envcopy = os.environ.copy()
envcopy["PYBLISH_HOSTS"] = "standalonepublisher"
envcopy["SAPUBLISH_INPATH"] = json_data_path
envcopy["PYBLISHGUI"] = "pyblish_pype"
envcopy["PUBLISH_PATHS"] = os.pathsep.join(PUBLISH_PATHS)
if data.get("family", "").lower() == "editorial":
envcopy["PYBLISH_SUSPEND_LOGS"] = "1"
result = execute(
[sys.executable, PUBLISH_SCRIPT_PATH],
env=envcopy
)
result = {}
if os.path.exists(json_data_path):
with open(json_data_path, "r") as f:
result = json.load(f)
log.info(f"Publish result: {result}")
io.uninstall()
return False
def main(env):
from avalon.tools import publish
# Registers pype's Global pyblish plugins
pype.install()
# Register additional paths
addition_paths_str = env.get("PUBLISH_PATHS") or ""
addition_paths = addition_paths_str.split(os.pathsep)
for path in addition_paths:
path = os.path.normpath(path)
if not os.path.exists(path):
continue
pyblish.api.register_plugin_path(path)
# Register project specific plugins
project_name = os.environ["AVALON_PROJECT"]
project_plugins_paths = env.get("PYPE_PROJECT_PLUGINS") or ""
for path in project_plugins_paths.split(os.pathsep):
plugin_path = os.path.join(path, project_name, "plugins")
if os.path.exists(plugin_path):
pyblish.api.register_plugin_path(plugin_path)
return publish.show()
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

View file

@ -1,21 +1,22 @@
import os
from .app import show
from .widgets import QtWidgets
import sys
import subprocess
import pype
from . import PUBLISH_PATHS
from pype import tools
class StandAlonePublishModule:
def __init__(self, main_parent=None, parent=None):
self.main_parent = main_parent
self.parent_widget = parent
PUBLISH_PATHS.clear()
PUBLISH_PATHS.append(os.path.sep.join(
[pype.PLUGINS_DIR, "standalonepublisher", "publish"]
))
self.publish_paths = [
os.path.join(
pype.PLUGINS_DIR, "standalonepublisher", "publish"
)
]
def tray_menu(self, parent_menu):
from Qt import QtWidgets
self.run_action = QtWidgets.QAction(
"Publish", parent_menu
)
@ -24,9 +25,17 @@ class StandAlonePublishModule:
def process_modules(self, modules):
if "FtrackModule" in modules:
PUBLISH_PATHS.append(os.path.sep.join(
[pype.PLUGINS_DIR, "ftrack", "publish"]
self.publish_paths.append(os.path.join(
pype.PLUGINS_DIR, "ftrack", "publish"
))
def show(self):
show(self.main_parent, False)
standalone_publisher_tool_path = os.path.join(
os.path.dirname(tools.__file__),
"standalonepublish"
)
subprocess.Popen([
sys.executable,
standalone_publisher_tool_path,
os.pathsep.join(self.publish_paths).replace("\\", "/")
])

View file

@ -1,113 +0,0 @@
from xml.dom import minidom
from . import QtGui, QtCore, QtWidgets
from PyQt5 import QtSvg, QtXml
class SvgResizable(QtSvg.QSvgWidget):
clicked = QtCore.Signal()
def __init__(self, filepath, width=None, height=None, fill=None):
super().__init__()
self.xmldoc = minidom.parse(filepath)
itemlist = self.xmldoc.getElementsByTagName('svg')
for element in itemlist:
if fill:
element.setAttribute('fill', str(fill))
# TODO auto scale if only one is set
if width is not None and height is not None:
self.setMaximumSize(width, height)
self.setMinimumSize(width, height)
xml_string = self.xmldoc.toxml()
svg_bytes = bytearray(xml_string, encoding='utf-8')
self.load(svg_bytes)
def change_color(self, color):
element = self.xmldoc.getElementsByTagName('svg')[0]
element.setAttribute('fill', str(color))
xml_string = self.xmldoc.toxml()
svg_bytes = bytearray(xml_string, encoding='utf-8')
self.load(svg_bytes)
def mousePressEvent(self, event):
self.clicked.emit()
class SvgButton(QtWidgets.QFrame):
clicked = QtCore.Signal()
def __init__(
self, filepath, width=None, height=None, fills=[],
parent=None, checkable=True
):
super().__init__(parent)
self.checkable = checkable
self.checked = False
xmldoc = minidom.parse(filepath)
element = xmldoc.getElementsByTagName('svg')[0]
c_actual = '#777777'
if element.hasAttribute('fill'):
c_actual = element.getAttribute('fill')
self.store_fills(fills, c_actual)
self.installEventFilter(self)
self.svg_widget = SvgResizable(filepath, width, height, self.c_normal)
xmldoc = minidom.parse(filepath)
layout = QtWidgets.QHBoxLayout(self)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.svg_widget)
if width is not None and height is not None:
self.setMaximumSize(width, height)
self.setMinimumSize(width, height)
def store_fills(self, fills, actual):
if len(fills) == 0:
fills = [actual, actual, actual, actual]
elif len(fills) == 1:
fills = [fills[0], fills[0], fills[0], fills[0]]
elif len(fills) == 2:
fills = [fills[0], fills[1], fills[1], fills[1]]
elif len(fills) == 3:
fills = [fills[0], fills[1], fills[2], fills[2]]
self.c_normal = fills[0]
self.c_hover = fills[1]
self.c_active = fills[2]
self.c_active_hover = fills[3]
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.Enter:
self.hoverEnterEvent(event)
return True
elif event.type() == QtCore.QEvent.Leave:
self.hoverLeaveEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
self.mousePressEvent(event)
return False
def change_checked(self, hover=True):
if self.checkable:
self.checked = not self.checked
if hover:
self.hoverEnterEvent()
else:
self.hoverLeaveEvent()
def hoverEnterEvent(self, event=None):
color = self.c_hover
if self.checked:
color = self.c_active_hover
self.svg_widget.change_color(color)
def hoverLeaveEvent(self, event=None):
color = self.c_normal
if self.checked:
color = self.c_active
self.svg_widget.change_color(color)
def mousePressEvent(self, event=None):
self.clicked.emit()

View file

@ -34,6 +34,7 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
]
def process(self, instance):
instance.data["toBeRenderedOn"] = "deadline"
context = instance.context
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL")

View file

@ -22,7 +22,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
families = ["saver.deadline"]
def process(self, instance):
instance.data["toBeRenderedOn"] = "deadline"
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)

View file

@ -47,12 +47,16 @@ class CleanUp(pyblish.api.InstancePlugin):
temp_root = tempfile.gettempdir()
staging_dir = instance.data.get("stagingDir", None)
if not staging_dir:
self.log.info("Staging dir not set.")
return
if not os.path.normpath(staging_dir).startswith(temp_root):
self.log.info("Skipping cleanup. Staging directory is not in the "
"temp folder: %s" % staging_dir)
return
if not staging_dir or not os.path.exists(staging_dir):
if not os.path.exists(staging_dir):
self.log.info("No staging directory found: %s" % staging_dir)
return

View file

@ -78,6 +78,11 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
if entity:
# Do not override data, only update
cur_entity_data = entity.get("data") or {}
new_tasks = data.pop("tasks", [])
if "tasks" in cur_entity_data and new_tasks:
for task_name in new_tasks:
if task_name not in cur_entity_data["tasks"]:
cur_entity_data["tasks"].append(task_name)
cur_entity_data.update(data)
data = cur_entity_data
else:

View file

@ -26,6 +26,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
if instance.data.get("multipartExr") is True:
return
# Skip review when requested.
if not instance.data.get("review", True):
return
# get representation and loop them
representations = instance.data["representations"]
@ -44,10 +48,11 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
continue
if not isinstance(repre['files'], (list, tuple)):
continue
input_file = repre['files']
else:
input_file = repre['files'][0]
stagingdir = os.path.normpath(repre.get("stagingDir"))
input_file = repre['files'][0]
# input_file = (
# collections[0].format('{head}{padding}{tail}') % start

View file

@ -50,6 +50,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
to_height = 1080
def process(self, instance):
# Skip review when requested.
if not instance.data.get("review", True):
return
# ffmpeg doesn't support multipart exrs
if instance.data.get("multipartExr") is True:
instance_label = (

View file

@ -6,11 +6,15 @@ import copy
import clique
import errno
import six
import re
import shutil
from pymongo import DeleteOne, InsertOne
import pyblish.api
from avalon import io
from avalon.vendor import filelink
import pype.api
from datetime import datetime
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
@ -44,6 +48,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"frameStart"
"frameEnd"
'fps'
"data": additional metadata for each representation.
"""
label = "Integrate Asset New"
@ -95,18 +100,28 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
default_template_name = "publish"
template_name_profiles = None
def process(self, instance):
# file_url : file_size of all published and uploaded files
integrated_file_sizes = {}
TMP_FILE_EXT = 'tmp' # suffix to denote temporary files, use without '.'
def process(self, instance):
self.integrated_file_sizes = {}
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
self.log.info("instance.data: {}".format(instance.data))
if instance.data.get('transfer', True):
self.integrate(instance)
try:
self.register(instance)
self.log.info("Integrated Asset in to the database ...")
self.log.info("instance.data: {}".format(instance.data))
self.handle_destination_files(self.integrated_file_sizes,
'finalize')
except Exception:
# clean destination
self.log.critical("Error when registering", exc_info=True)
self.handle_destination_files(self.integrated_file_sizes, 'remove')
six.reraise(*sys.exc_info())
def register(self, instance):
# Required environment variables
@ -269,13 +284,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
representations = []
destination_list = []
orig_transfers = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
else:
orig_transfers = list(instance.data['transfers'])
template_name = self.template_name_from_instance(instance)
published_representations = {}
for idx, repre in enumerate(instance.data["representations"]):
# reset transfers for next representation
# instance.data['transfers'] is used as a global variable
# in current codebase
instance.data['transfers'] = list(orig_transfers)
if "delete" in repre.get("tags", []):
continue
@ -459,13 +482,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if repre_id is None:
repre_id = io.ObjectId()
data = repre.get("data") or {}
data.update({'path': dst, 'template': template})
representation = {
"_id": repre_id,
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": repre['name'],
"data": {'path': dst, 'template': template},
"data": data,
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
@ -481,6 +506,24 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_padding_exp % int(repre.get("frameStart"))
)
# any file that should be physically copied is expected in
# 'transfers' or 'hardlinks'
if instance.data.get('transfers', False) or \
instance.data.get('hardlinks', False):
# could throw exception, will be caught in 'process'
# all integration to DB is being done together lower,
# so no rollback needed
self.log.debug("Integrating source files to destination ...")
self.integrated_file_sizes.update(self.integrate(instance))
self.log.debug("Integrated files {}".
format(self.integrated_file_sizes))
# get 'files' info for representation and all attached resources
self.log.debug("Preparing files information ...")
representation["files"] = self.get_files_info(
instance,
self.integrated_file_sizes)
self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
self.log.debug("__ destination_list: {}".format(destination_list))
@ -518,10 +561,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
Args:
instance: the instance to integrate
Returns:
integrated_file_sizes: dictionary of destination file url and
its size in bytes
"""
transfers = instance.data.get("transfers", list())
# store destination url and size for reporting and rollback
integrated_file_sizes = {}
transfers = list(instance.data.get("transfers", list()))
for src, dest in transfers:
self.copy_file(src, dest)
if os.path.normpath(src) != os.path.normpath(dest):
dest = self.get_dest_temp_url(dest)
self.copy_file(src, dest)
# TODO needs to be updated during site implementation
integrated_file_sizes[dest] = os.path.getsize(dest)
# Produce hardlinked copies
# Note: hardlink can only be produced between two files on the same
@ -530,8 +582,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# to ensure publishes remain safe and non-edited.
hardlinks = instance.data.get("hardlinks", list())
for src, dest in hardlinks:
self.log.debug("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
dest = self.get_dest_temp_url(dest)
self.log.debug("Hardlinking file ... {} -> {}".format(src, dest))
if not os.path.exists(dest):
self.hardlink_file(src, dest)
# TODO needs to be updated during site implementation
integrated_file_sizes[dest] = os.path.getsize(dest)
return integrated_file_sizes
def copy_file(self, src, dst):
""" Copy given source to destination
@ -544,7 +603,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"""
src = os.path.normpath(src)
dst = os.path.normpath(dst)
self.log.debug("Copying file .. {} -> {}".format(src, dst))
self.log.debug("Copying file ... {} -> {}".format(src, dst))
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
@ -553,20 +612,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
pass
else:
self.log.critical("An unexpected error occurred.")
raise
six.reraise(*sys.exc_info())
# copy file with speedcopy and check if size of files are simetrical
while True:
import shutil
try:
copyfile(src, dst)
except shutil.SameFileError as sfe:
self.log.critical("files are the same {} to {}".format(src, dst))
except shutil.SameFileError:
self.log.critical("files are the same {} to {}".format(src,
dst))
os.remove(dst)
try:
shutil.copyfile(src, dst)
self.log.debug("Copying files with shutil...")
except (OSError) as e:
except OSError as e:
self.log.critical("Cannot copy {} to {}".format(src, dst))
self.log.critical(e)
six.reraise(*sys.exc_info())
@ -583,7 +643,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
pass
else:
self.log.critical("An unexpected error occurred.")
raise
six.reraise(*sys.exc_info())
filelink.create(src, dst, filelink.HARDLINK)
@ -596,7 +656,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
})
if subset is None:
self.log.info("Subset '%s' not found, creating.." % subset_name)
self.log.info("Subset '%s' not found, creating ..." % subset_name)
self.log.debug("families. %s" % instance.data.get('families'))
self.log.debug(
"families. %s" % type(instance.data.get('families')))
@ -666,16 +726,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
else:
source = context.data["currentFile"]
anatomy = instance.context.data["anatomy"]
success, rootless_path = (
anatomy.find_root_template_from_path(source)
)
if success:
source = rootless_path
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(source))
source = self.get_rootless_path(anatomy, source)
self.log.debug("Source: {}".format(source))
version_data = {
@ -774,3 +825,167 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
).format(family, task_name, template_name))
return template_name
def get_rootless_path(self, anatomy, path):
""" Returns, if possible, path without absolute portion from host
(eg. 'c:\' or '/opt/..')
This information is host dependent and shouldn't be captured.
Example:
'c:/projects/MyProject1/Assets/publish...' >
'{root}/MyProject1/Assets...'
Args:
anatomy: anatomy part from instance
path: path (absolute)
Returns:
path: modified path if possible, or unmodified path
+ warning logged
"""
success, rootless_path = (
anatomy.find_root_template_from_path(path)
)
if success:
path = rootless_path
else:
self.log.warning((
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(path))
return path
def get_files_info(self, instance, integrated_file_sizes):
""" Prepare 'files' portion for attached resources and main asset.
Combining records from 'transfers' and 'hardlinks' parts from
instance.
All attached resources should be added, currently without
Context info.
Arguments:
instance: the current instance being published
integrated_file_sizes: dictionary of destination path (absolute)
and its file size
Returns:
output_resources: array of dictionaries to be added to 'files' key
in representation
"""
resources = list(instance.data.get("transfers", []))
resources.extend(list(instance.data.get("hardlinks", [])))
self.log.debug("get_resource_files_info.resources:{}".
format(resources))
output_resources = []
anatomy = instance.context.data["anatomy"]
for _src, dest in resources:
path = self.get_rootless_path(anatomy, dest)
dest = self.get_dest_temp_url(dest)
file_hash = pype.api.source_hash(dest)
if self.TMP_FILE_EXT and \
',{}'.format(self.TMP_FILE_EXT) in file_hash:
file_hash = file_hash.replace(',{}'.format(self.TMP_FILE_EXT),
'')
file_info = self.prepare_file_info(path,
integrated_file_sizes[dest],
file_hash)
output_resources.append(file_info)
return output_resources
def get_dest_temp_url(self, dest):
""" Enhance destination path with TMP_FILE_EXT to denote temporary
file.
Temporary files will be renamed after successful registration
into DB and full copy to destination
Arguments:
dest: destination url of published file (absolute)
Returns:
dest: destination path + '.TMP_FILE_EXT'
"""
if self.TMP_FILE_EXT and '.{}'.format(self.TMP_FILE_EXT) not in dest:
dest += '.{}'.format(self.TMP_FILE_EXT)
return dest
def prepare_file_info(self, path, size=None, file_hash=None, sites=None):
""" Prepare information for one file (asset or resource)
Arguments:
path: destination url of published file (rootless)
size(optional): size of file in bytes
file_hash(optional): hash of file for synchronization validation
sites(optional): array of published locations,
['studio': {'created_dt':date}] by default
keys expected ['studio', 'site1', 'gdrive1']
Returns:
rec: dictionary with filled info
"""
rec = {
"_id": io.ObjectId(),
"path": path
}
if size:
rec["size"] = size
if file_hash:
rec["hash"] = file_hash
if sites:
rec["sites"] = sites
else:
meta = {"created_dt": datetime.now()}
rec["sites"] = {"studio": meta}
return rec
def handle_destination_files(self, integrated_file_sizes, mode):
""" Clean destination files
Called when error happened during integrating to DB or to disk
OR called to rename uploaded files from temporary name to final to
highlight publishing in progress/broken
Used to clean unwanted files
Arguments:
integrated_file_sizes: dictionary, file urls as keys, size as value
mode: 'remove' - clean files,
'finalize' - rename files,
remove TMP_FILE_EXT suffix denoting temp file
"""
if integrated_file_sizes:
for file_url, _file_size in integrated_file_sizes.items():
if not os.path.exists(file_url):
self.log.debug(
"File {} was not found.".format(file_url)
)
continue
try:
if mode == 'remove':
self.log.debug("Removing file {}".format(file_url))
os.remove(file_url)
if mode == 'finalize':
new_name = re.sub(
r'\.{}$'.format(self.TMP_FILE_EXT),
'',
file_url
)
if os.path.exists(new_name):
self.log.debug(
"Overwriting file {} to {}".format(
file_url, new_name
)
)
shutil.copy(file_url, new_name)
else:
self.log.debug(
"Renaming file {} to {}".format(
file_url, new_name
)
)
os.rename(file_url, new_name)
except OSError:
self.log.error("Cannot {} file {}".format(mode, file_url),
exc_info=True)
six.reraise(*sys.exc_info())

View file

@ -236,7 +236,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
return (metadata_path, roothless_mtdt_p)
def _submit_deadline_post_job(self, instance, job):
def _submit_deadline_post_job(self, instance, job, instances):
"""Submit publish job to Deadline.
Deadline specific code separated from :meth:`process` for sake of
@ -256,7 +256,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"Plugin": "Python",
"BatchName": job["Props"]["Batch"],
"Name": job_name,
"JobDependency0": job["_id"],
"UserName": job["Props"]["User"],
"Comment": instance.context.data.get("comment", ""),
@ -280,11 +279,20 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"AuxFiles": [],
}
# add assembly jobs as dependencies
if instance.data.get("tileRendering"):
self.log.info("Adding tile assembly jobs as dependencies...")
job_index = 0
for assembly_id in instance.data.get("assemblySubmissionJobs"):
payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501
job_index += 1
else:
payload["JobInfo"]["JobDependency0"] = job["_id"]
# Transfer the environment from the original job to this dependent
# job so they use the same environment
metadata_path, roothless_metadata_path = self._create_metadata_path(
instance)
environment = job["Props"].get("Env", {})
environment["PYPE_METADATA_FILE"] = roothless_metadata_path
environment["AVALON_PROJECT"] = io.Session["AVALON_PROJECT"]
@ -420,12 +428,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"to render, don't know what to do "
"with them.")
col = rem[0]
_, ext = os.path.splitext(col)
ext = os.path.splitext(col)[1].lstrip(".")
else:
# but we really expect only one collection.
# Nothing else make sense.
assert len(cols) == 1, "only one image sequence type is expected" # noqa: E501
_, ext = os.path.splitext(cols[0].tail)
ext = cols[0].tail.lstrip(".")
col = list(cols[0])
self.log.debug(col)
@ -639,25 +647,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
if hasattr(instance, "_log"):
data['_log'] = instance._log
render_job = data.pop("deadlineSubmissionJob", None)
submission_type = "deadline"
if not render_job:
# No deadline job. Try Muster: musterSubmissionJob
render_job = data.pop("musterSubmissionJob", None)
submission_type = "muster"
assert render_job, (
"Can't continue without valid Deadline "
"or Muster submission prior to this "
"plug-in."
)
if submission_type == "deadline":
self.DEADLINE_REST_URL = os.environ.get(
"DEADLINE_REST_URL", "http://localhost:8082"
)
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
self._submit_deadline_post_job(instance, render_job)
asset = data.get("asset") or api.Session["AVALON_ASSET"]
subset = data.get("subset")
@ -728,7 +717,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"pixelAspect": data.get("pixelAspect", 1),
"resolutionWidth": data.get("resolutionWidth", 1920),
"resolutionHeight": data.get("resolutionHeight", 1080),
"multipartExr": data.get("multipartExr", False)
"multipartExr": data.get("multipartExr", False),
"jobBatchName": data.get("jobBatchName", "")
}
if "prerender" in instance.data["families"]:
@ -872,6 +862,66 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
at.get("subset"), at.get("version")))
instances = new_instances
r''' SUBMiT PUBLiSH JOB 2 D34DLiN3
____
' ' .---. .---. .--. .---. .--..--..--..--. .---.
| | --= \ | . \/ _|/ \| . \ || || \ |/ _|
| JOB | --= / | | || __| .. | | | |;_ || \ || __|
| | |____./ \.__|._||_.|___./|_____|||__|\__|\.___|
._____.
'''
render_job = None
if instance.data.get("toBeRenderedOn") == "deadline":
render_job = data.pop("deadlineSubmissionJob", None)
submission_type = "deadline"
if instance.data.get("toBeRenderedOn") == "muster":
render_job = data.pop("musterSubmissionJob", None)
submission_type = "muster"
if not render_job and instance.data.get("tileRendering") is False:
raise AssertionError(("Cannot continue without valid Deadline "
"or Muster submission."))
if not render_job:
import getpass
render_job = {}
self.log.info("Faking job data ...")
render_job["Props"] = {}
# Render job doesn't exist because we do not have prior submission.
# We still use data from it so lets fake it.
#
# Batch name reflect original scene name
if instance.data.get("assemblySubmissionJobs"):
render_job["Props"]["Batch"] = instance.data.get(
"jobBatchName")
else:
render_job["Props"]["Batch"] = os.path.splitext(
os.path.basename(context.data.get("currentFile")))[0]
# User is deadline user
render_job["Props"]["User"] = context.data.get(
"deadlineUser", getpass.getuser())
# Priority is now not handled at all
render_job["Props"]["Pri"] = instance.data.get("priority")
render_job["Props"]["Env"] = {
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
"FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"),
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER"),
}
if submission_type == "deadline":
self.DEADLINE_REST_URL = os.environ.get(
"DEADLINE_REST_URL", "http://localhost:8082"
)
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
self._submit_deadline_post_job(instance, render_job, instances)
# publish job file
publish_job = {
"asset": asset,
@ -883,7 +933,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"version": context.data["version"], # this is workfile version
"intent": context.data.get("intent"),
"comment": context.data.get("comment"),
"job": render_job,
"job": render_job or None,
"session": api.Session.copy(),
"instances": instances
}

View file

@ -31,7 +31,7 @@ func
class ImportAudioLoader(api.Loader):
"""Import audio."""
families = ["shot"]
families = ["shot", "audio"]
representations = ["wav"]
label = "Import Audio"

View file

@ -230,7 +230,7 @@ class ImageSequenceLoader(api.Loader):
"""Load images
Stores the imported asset in a container named after the asset.
"""
families = ["shot", "render", "image", "plate"]
families = ["shot", "render", "image", "plate", "reference"]
representations = ["jpeg", "png", "jpg"]
def load(self, context, name=None, namespace=None, data=None):

View file

@ -40,6 +40,9 @@ class CreateRender(avalon.maya.Creator):
vrscene (bool): Submit as ``vrscene`` file for standalone V-Ray
renderer.
ass (bool): Submit as ``ass`` file for standalone Arnold renderer.
tileRendering (bool): Instance is set to tile rendering mode. We
won't submit actuall render, but we'll make publish job to wait
for Tile Assemly job done and then publish.
See Also:
https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup
@ -172,6 +175,7 @@ class CreateRender(avalon.maya.Creator):
self.data["primaryPool"] = pool_names
self.data["suspendPublishJob"] = False
self.data["review"] = True
self.data["extendFrames"] = False
self.data["overrideExistingFrame"] = True
# self.data["useLegacyRenderLayers"] = True
@ -181,6 +185,9 @@ class CreateRender(avalon.maya.Creator):
self.data["machineList"] = ""
self.data["useMayaBatch"] = False
self.data["vrayScene"] = False
self.data["tileRendering"] = False
self.data["tilesX"] = 2
self.data["tilesY"] = 2
# Disable for now as this feature is not working yet
# self.data["assScene"] = False
@ -189,8 +196,8 @@ class CreateRender(avalon.maya.Creator):
def _load_credentials(self):
"""Load Muster credentials.
Load Muster credentials from file and set ```MUSTER_USER``,
```MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from presets.
Load Muster credentials from file and set ``MUSTER_USER``,
``MUSTER_PASSWORD``, ``MUSTER_REST_URL`` is loaded from presets.
Raises:
RuntimeError: If loaded credentials are invalid.

View file

@ -11,6 +11,8 @@ class CreateReview(avalon.maya.Creator):
family = "review"
icon = "video-camera"
defaults = ['Main']
keepImages = False
isolate = False
def __init__(self, *args, **kwargs):
super(CreateReview, self).__init__(*args, **kwargs)
@ -21,4 +23,7 @@ class CreateReview(avalon.maya.Creator):
for key, value in animation_data.items():
data[key] = value
data["isolate"] = self.isolate
data["keepImages"] = self.keepImages
self.data = data

View file

@ -1,6 +1,9 @@
from maya import cmds, mel
import pymel.core as pc
from avalon import api
from avalon.maya.pipeline import containerise
from avalon.maya import lib
class AudioLoader(api.Loader):
@ -24,4 +27,48 @@ class AudioLoader(api.Loader):
displaySound=True
)
return [sound_node]
asset = context["asset"]["name"]
namespace = namespace or lib.unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
return containerise(
name=name,
namespace=namespace,
nodes=[sound_node],
context=context,
loader=self.__class__.__name__
)
def update(self, container, representation):
audio_node = None
for node in pc.PyNode(container["objectName"]).members():
if node.nodeType() == "audio":
audio_node = node
assert audio_node is not None, "Audio node not found."
path = api.get_representation_path(representation)
audio_node.filename.set(path)
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
type="string"
)
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -1,4 +1,9 @@
import pymel.core as pc
import maya.cmds as cmds
from avalon import api
from avalon.maya.pipeline import containerise
from avalon.maya import lib
from Qt import QtWidgets
@ -7,15 +12,19 @@ class ImagePlaneLoader(api.Loader):
families = ["plate", "render"]
label = "Create imagePlane on selected camera."
representations = ["mov", "exr"]
representations = ["mov", "exr", "preview"]
icon = "image"
color = "orange"
def load(self, context, name, namespace, data):
import pymel.core as pc
new_nodes = []
image_plane_depth = 1000
asset = context['asset']['name']
namespace = namespace or lib.unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
# Getting camera from selection.
selection = pc.ls(selection=True)
@ -74,12 +83,16 @@ class ImagePlaneLoader(api.Loader):
image_plane_shape.frameOut.set(end_frame)
image_plane_shape.useFrameExtension.set(1)
if context["representation"]["name"] == "mov":
movie_representations = ["mov", "preview"]
if context["representation"]["name"] in movie_representations:
# Need to get "type" by string, because its a method as well.
pc.Attribute(image_plane_shape + ".type").set(2)
# Ask user whether to use sequence or still image.
if context["representation"]["name"] == "exr":
# Ensure OpenEXRLoader plugin is loaded.
pc.loadPlugin("OpenEXRLoader.mll", quiet=True)
reply = QtWidgets.QMessageBox.information(
None,
"Frame Hold.",
@ -93,11 +106,51 @@ class ImagePlaneLoader(api.Loader):
)
image_plane_shape.frameExtension.set(start_frame)
# Ensure OpenEXRLoader plugin is loaded.
pc.loadPlugin("OpenEXRLoader.mll", quiet=True)
new_nodes.extend(
[image_plane_transform.name(), image_plane_shape.name()]
[
image_plane_transform.longName().split("|")[-1],
image_plane_shape.longName().split("|")[-1]
]
)
return new_nodes
for node in new_nodes:
pc.rename(node, "{}:{}".format(namespace, node))
return containerise(
name=name,
namespace=namespace,
nodes=new_nodes,
context=context,
loader=self.__class__.__name__
)
def update(self, container, representation):
image_plane_shape = None
for node in pc.PyNode(container["objectName"]).members():
if node.nodeType() == "imagePlane":
image_plane_shape = node
assert image_plane_shape is not None, "Image plane not found."
path = api.get_representation_path(representation)
image_plane_shape.imageName.set(path)
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
type="string"
)
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass

View file

@ -216,6 +216,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"attachTo": attach_to,
"setMembers": layer_name,
"multipartExr": ef.multipart,
"review": render_instance.data.get("review") or False,
"publish": True,
"handleStart": handle_start,
@ -242,6 +243,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
"resolutionWidth": cmds.getAttr("defaultResolution.width"),
"resolutionHeight": cmds.getAttr("defaultResolution.height"),
"pixelAspect": cmds.getAttr("defaultResolution.pixelAspect"),
"tileRendering": render_instance.data.get("tileRendering") or False, # noqa: E501
"tilesX": render_instance.data.get("tilesX") or 2,
"tilesY": render_instance.data.get("tilesY") or 2,
"priority": render_instance.data.get("priority")
}
# Apply each user defined attribute as data

View file

@ -13,6 +13,7 @@ class CollectReview(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.3
label = 'Collect Review Data'
families = ["review"]
legacy = True
def process(self, instance):
@ -63,13 +64,23 @@ class CollectReview(pyblish.api.InstancePlugin):
data['handles'] = instance.data.get('handles', None)
data['step'] = instance.data['step']
data['fps'] = instance.data['fps']
data["isolate"] = instance.data["isolate"]
cmds.setAttr(str(instance) + '.active', 1)
self.log.debug('data {}'.format(instance.context[i].data))
instance.context[i].data.update(data)
instance.data['remove'] = True
i += 1
else:
instance.data['subset'] = task + 'Review'
if self.legacy:
instance.data['subset'] = task + 'Review'
else:
subset = "{}{}{}".format(
task,
instance.data["subset"][0].upper(),
instance.data["subset"][1:]
)
instance.data['subset'] = subset
instance.data['review_camera'] = camera
instance.data['frameStartFtrack'] = instance.data["frameStartHandle"]
instance.data['frameEndFtrack'] = instance.data["frameEndHandle"]

View file

@ -21,27 +21,6 @@ COPY = 1
HARDLINK = 2
def source_hash(filepath, *args):
"""Generate simple identifier for a source file.
This is used to identify whether a source file has previously been
processe into the pipeline, e.g. a texture.
The hash is based on source filepath, modification time and file size.
This is only used to identify whether a specific source file was already
published before from the same location with the same modification date.
We opt to do it this way as opposed to Avalanch C4 hash as this is much
faster and predictable enough for all our production use cases.
Args:
filepath (str): The source file path.
You can specify additional arguments in the function
to allow for specific 'processing' values to be included.
"""
# We replace dots with comma because . cannot be a key in a pymongo dict.
file_name = os.path.basename(filepath)
time = str(os.path.getmtime(filepath))
size = str(os.path.getsize(filepath))
return "|".join([file_name, time, size] + list(args)).replace(".", ",")
def find_paths_by_hash(texture_hash):
# Find the texture hash key in the dictionary and all paths that
# originate from it.
@ -363,7 +342,7 @@ class ExtractLook(pype.api.Extractor):
args = []
if do_maketx:
args.append("maketx")
texture_hash = source_hash(filepath, *args)
texture_hash = pype.api.source_hash(filepath, *args)
# If source has been published before with the same settings,
# then don't reprocess but hardlink from the original

View file

@ -53,7 +53,6 @@ class ExtractPlayblast(pype.api.Extractor):
preset['camera'] = camera
preset['format'] = "image"
# preset['compression'] = "qt"
preset['quality'] = 95
preset['compression'] = "png"
preset['start_frame'] = start
@ -77,6 +76,11 @@ class ExtractPlayblast(pype.api.Extractor):
pm.currentTime(refreshFrameInt - 1, edit=True)
pm.currentTime(refreshFrameInt, edit=True)
# Isolate view is requested by having objects in the set besides a
# camera.
if instance.data.get("isolate"):
preset["isolate"] = instance.data["setMembers"]
with maintained_time():
filename = preset.get("filename", "%TEMP%")
@ -102,6 +106,10 @@ class ExtractPlayblast(pype.api.Extractor):
if "representations" not in instance.data:
instance.data["representations"] = []
tags = ["review"]
if not instance.data.get("keepImages"):
tags.append("delete")
representation = {
'name': 'png',
'ext': 'png',
@ -111,7 +119,7 @@ class ExtractPlayblast(pype.api.Extractor):
"frameEnd": end,
'fps': fps,
'preview': True,
'tags': ['review', 'delete']
'tags': tags
}
instance.data["representations"].append(representation)

View file

@ -77,6 +77,11 @@ class ExtractThumbnail(pype.api.Extractor):
pm.currentTime(refreshFrameInt - 1, edit=True)
pm.currentTime(refreshFrameInt, edit=True)
# Isolate view is requested by having objects in the set besides a
# camera.
if instance.data.get("isolate"):
preset["isolate"] = instance.data["setMembers"]
with maintained_time():
filename = preset.get("filename", "%TEMP%")

View file

@ -16,11 +16,16 @@ Attributes:
"""
from __future__ import print_function
import os
import json
import getpass
import copy
import re
import hashlib
from datetime import datetime
import itertools
from collections import OrderedDict
import clique
import requests
@ -45,6 +50,7 @@ payload_skeleton = {
"Plugin": "MayaPype",
"Frames": "{start}-{end}x{step}",
"Comment": None,
"Priority": 50,
},
"PluginInfo": {
"SceneFile": None, # Input
@ -60,6 +66,98 @@ payload_skeleton = {
}
def _format_tiles(
filename, index, tiles_x, tiles_y,
width, height, prefix):
"""Generate tile entries for Deadline tile job.
Returns two dictionaries - one that can be directly used in Deadline
job, second that can be used for Deadline Assembly job configuration
file.
This will format tile names:
Example::
{
"OutputFilename0Tile0": "_tile_1x1_4x4_Main_beauty.1001.exr",
"OutputFilename0Tile1": "_tile_2x1_4x4_Main_beauty.1001.exr"
}
And add tile prefixes like:
Example::
Image prefix is:
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
Result for tile 0 for 4x4 will be:
`maya/<Scene>/<RenderLayer>/_tile_1x1_4x4_<RenderLayer>_<RenderPass>`
Calculating coordinates is tricky as in Job they are defined as top,
left, bottom, right with zero being in top-left corner. But Assembler
configuration file takes tile coordinates as X, Y, Width and Height and
zero is bottom left corner.
Args:
filename (str): Filename to process as tiles.
index (int): Index of that file if it is sequence.
tiles_x (int): Number of tiles in X.
tiles_y (int): Number if tikes in Y.
width (int): Width resolution of final image.
height (int): Height resolution of final image.
prefix (str): Image prefix.
Returns:
(dict, dict): Tuple of two dictionaires - first can be used to
extend JobInfo, second has tiles x, y, width and height
used for assembler configuration.
"""
tile = 0
out = {"JobInfo": {}, "PluginInfo": {}}
cfg = OrderedDict()
w_space = width / tiles_x
h_space = height / tiles_y
cfg["TilesCropped"] = "False"
for tile_x in range(1, tiles_x + 1):
for tile_y in reversed(range(1, tiles_y + 1)):
tile_prefix = "_tile_{}x{}_{}x{}_".format(
tile_x, tile_y,
tiles_x,
tiles_y
)
out_tile_index = "OutputFilename{}Tile{}".format(
str(index), tile
)
new_filename = "{}/{}{}".format(
os.path.dirname(filename),
tile_prefix,
os.path.basename(filename)
)
out["JobInfo"][out_tile_index] = new_filename
out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \
"/{}".format(tile_prefix).join(prefix.rsplit("/", 1))
out["PluginInfo"]["RegionTop{}".format(tile)] = int(height) - (tile_y * h_space) # noqa: E501
out["PluginInfo"]["RegionBottom{}".format(tile)] = int(height) - ((tile_y - 1) * h_space) - 1 # noqa: E501
out["PluginInfo"]["RegionLeft{}".format(tile)] = (tile_x - 1) * w_space # noqa: E501
out["PluginInfo"]["RegionRight{}".format(tile)] = (tile_x * w_space) - 1 # noqa: E501
cfg["Tile{}".format(tile)] = new_filename
cfg["Tile{}Tile".format(tile)] = new_filename
cfg["Tile{}FileName".format(tile)] = new_filename
cfg["Tile{}X".format(tile)] = (tile_x - 1) * w_space
cfg["Tile{}Y".format(tile)] = int(height) - (tile_y * h_space)
cfg["Tile{}Width".format(tile)] = w_space
cfg["Tile{}Height".format(tile)] = h_space
tile += 1
return out, cfg
def get_renderer_variables(renderlayer, root):
"""Retrieve the extension which has been set in the VRay settings.
@ -86,7 +184,8 @@ def get_renderer_variables(renderlayer, root):
gin="#" * int(padding),
lut=True,
layer=renderlayer or lib.get_current_renderlayer())[0]
filename_0 = filename_0.replace('_<RenderPass>', '_beauty')
filename_0 = re.sub('_<RenderPass>', '_beauty',
filename_0, flags=re.IGNORECASE)
prefix_attr = "defaultRenderGlobals.imageFilePrefix"
if renderer == "vray":
renderlayer = renderlayer.split("_")[-1]
@ -162,9 +261,11 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
optional = True
use_published = True
tile_assembler_plugin = "PypeTileAssembler"
def process(self, instance):
"""Plugin entry point."""
instance.data["toBeRenderedOn"] = "deadline"
self._instance = instance
self._deadline_url = os.environ.get(
"DEADLINE_REST_URL", "http://localhost:8082")
@ -173,6 +274,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
context = instance.context
workspace = context.data["workspaceDir"]
anatomy = context.data['anatomy']
instance.data["toBeRenderedOn"] = "deadline"
filepath = None
@ -299,10 +401,13 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
payload_skeleton["JobInfo"]["Name"] = jobname
# Arbitrary username, for visualisation in Monitor
payload_skeleton["JobInfo"]["UserName"] = deadline_user
# Set job priority
payload_skeleton["JobInfo"]["Priority"] = self._instance.data.get(
"priority", 50)
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
payload_skeleton["JobInfo"]["OutputDirectory0"] = \
os.path.dirname(output_filename_0)
os.path.dirname(output_filename_0).replace("\\", "/")
payload_skeleton["JobInfo"]["OutputFilename0"] = \
output_filename_0.replace("\\", "/")
@ -369,9 +474,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
# Add list of expected files to job ---------------------------------
exp = instance.data.get("expectedFiles")
output_filenames = {}
exp_index = 0
output_filenames = {}
if isinstance(exp[0], dict):
# we have aovs and we need to iterate over them
@ -383,44 +487,246 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
assert len(rem) == 1, ("Found multiple non related files "
"to render, don't know what to do "
"with them.")
payload['JobInfo']['OutputFilename' + str(exp_index)] = rem[0] # noqa: E501
output_file = rem[0]
if not instance.data.get("tileRendering"):
payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501
else:
output_file = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501
output_filenames[exp_index] = output_file
output_file = col[0].format('{head}{padding}{tail}')
if not instance.data.get("tileRendering"):
payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501
output_filenames['OutputFilename' + str(exp_index)] = output_file # noqa: E501
exp_index += 1
else:
col, rem = clique.assemble(files)
col, rem = clique.assemble(exp)
if not col and rem:
# we couldn't find any collections but have
# individual files.
assert len(rem) == 1, ("Found multiple non related files "
"to render, don't know what to do "
"with them.")
payload['JobInfo']['OutputFilename' + str(exp_index)] = rem[0] # noqa: E501
output_file = rem[0]
if not instance.data.get("tileRendering"):
payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501
else:
output_file = col.format('{head}{padding}{tail}')
payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501
output_file = col[0].format('{head}{padding}{tail}')
if not instance.data.get("tileRendering"):
payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501
output_filenames['OutputFilename' + str(exp_index)] = output_file
plugin = payload["JobInfo"]["Plugin"]
self.log.info("using render plugin : {}".format(plugin))
self.preflight_check(instance)
# Submit job to farm ------------------------------------------------
self.log.info("Submitting ...")
self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(self._deadline_url)
response = self._requests_post(url, json=payload)
if not response.ok:
raise Exception(response.text)
# Store output dir for unified publisher (filesequence)
instance.data["outputDir"] = os.path.dirname(output_filename_0)
instance.data["deadlineSubmissionJob"] = response.json()
self.preflight_check(instance)
# Prepare tiles data ------------------------------------------------
if instance.data.get("tileRendering"):
# if we have sequence of files, we need to create tile job for
# every frame
payload["JobInfo"]["TileJob"] = True
payload["JobInfo"]["TileJobTilesInX"] = instance.data.get("tilesX")
payload["JobInfo"]["TileJobTilesInY"] = instance.data.get("tilesY")
payload["PluginInfo"]["ImageHeight"] = instance.data.get("resolutionHeight") # noqa: E501
payload["PluginInfo"]["ImageWidth"] = instance.data.get("resolutionWidth") # noqa: E501
payload["PluginInfo"]["RegionRendering"] = True
assembly_payload = {
"AuxFiles": [],
"JobInfo": {
"BatchName": payload["JobInfo"]["BatchName"],
"Frames": 1,
"Name": "{} - Tile Assembly Job".format(
payload["JobInfo"]["Name"]),
"OutputDirectory0":
payload["JobInfo"]["OutputDirectory0"].replace(
"\\", "/"),
"Plugin": self.tile_assembler_plugin,
"MachineLimit": 1
},
"PluginInfo": {
"CleanupTiles": 1,
"ErrorOnMissing": True
}
}
assembly_payload["JobInfo"].update(output_filenames)
assembly_payload["JobInfo"]["Priority"] = self._instance.data.get(
"priority", 50)
assembly_payload["JobInfo"]["UserName"] = deadline_user
frame_payloads = []
assembly_payloads = []
R_FRAME_NUMBER = re.compile(r".+\.(?P<frame>[0-9]+)\..+") # noqa: N806, E501
REPL_FRAME_NUMBER = re.compile(r"(.+\.)([0-9]+)(\..+)") # noqa: N806, E501
if isinstance(exp[0], dict):
# we have aovs and we need to iterate over them
# get files from `beauty`
files = exp[0].get("beauty")
# assembly files are used for assembly jobs as we need to put
# together all AOVs
assembly_files = list(
itertools.chain.from_iterable(
[f for _, f in exp[0].items()]))
if not files:
# if beauty doesn't exists, use first aov we found
files = exp[0].get(list(exp[0].keys())[0])
else:
files = exp
assembly_files = files
frame_jobs = {}
file_index = 1
for file in files:
frame = re.search(R_FRAME_NUMBER, file).group("frame")
new_payload = copy.deepcopy(payload)
new_payload["JobInfo"]["Name"] = \
"{} (Frame {} - {} tiles)".format(
payload["JobInfo"]["Name"],
frame,
instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501
)
self.log.info(
"... preparing job {}".format(
new_payload["JobInfo"]["Name"]))
new_payload["JobInfo"]["TileJobFrame"] = frame
tiles_data = _format_tiles(
file, 0,
instance.data.get("tilesX"),
instance.data.get("tilesY"),
instance.data.get("resolutionWidth"),
instance.data.get("resolutionHeight"),
payload["PluginInfo"]["OutputFilePrefix"]
)[0]
new_payload["JobInfo"].update(tiles_data["JobInfo"])
new_payload["PluginInfo"].update(tiles_data["PluginInfo"])
job_hash = hashlib.sha256("{}_{}".format(file_index, file))
frame_jobs[frame] = job_hash.hexdigest()
new_payload["JobInfo"]["ExtraInfo0"] = job_hash.hexdigest()
new_payload["JobInfo"]["ExtraInfo1"] = file
frame_payloads.append(new_payload)
file_index += 1
file_index = 1
for file in assembly_files:
frame = re.search(R_FRAME_NUMBER, file).group("frame")
new_assembly_payload = copy.deepcopy(assembly_payload)
new_assembly_payload["JobInfo"]["Name"] = \
"{} (Frame {})".format(
assembly_payload["JobInfo"]["Name"],
frame)
new_assembly_payload["JobInfo"]["OutputFilename0"] = re.sub(
REPL_FRAME_NUMBER,
"\\1{}\\3".format("#" * len(frame)), file)
new_assembly_payload["PluginInfo"]["Renderer"] = self._instance.data["renderer"] # noqa: E501
new_assembly_payload["JobInfo"]["ExtraInfo0"] = frame_jobs[frame] # noqa: E501
new_assembly_payload["JobInfo"]["ExtraInfo1"] = file
assembly_payloads.append(new_assembly_payload)
file_index += 1
self.log.info(
"Submitting tile job(s) [{}] ...".format(len(frame_payloads)))
url = "{}/api/jobs".format(self._deadline_url)
tiles_count = instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501
for tile_job in frame_payloads:
response = self._requests_post(url, json=tile_job)
if not response.ok:
raise Exception(response.text)
job_id = response.json()["_id"]
hash = response.json()["Props"]["Ex0"]
for assembly_job in assembly_payloads:
if assembly_job["JobInfo"]["ExtraInfo0"] == hash:
assembly_job["JobInfo"]["JobDependency0"] = job_id
for assembly_job in assembly_payloads:
file = assembly_job["JobInfo"]["ExtraInfo1"]
# write assembly job config files
now = datetime.now()
config_file = os.path.join(
os.path.dirname(output_filename_0),
"{}_config_{}.txt".format(
os.path.splitext(file)[0],
now.strftime("%Y_%m_%d_%H_%M_%S")
)
)
try:
if not os.path.isdir(os.path.dirname(config_file)):
os.makedirs(os.path.dirname(config_file))
except OSError:
# directory is not available
self.log.warning(
"Path is unreachable: `{}`".format(
os.path.dirname(config_file)))
# add config file as job auxFile
assembly_job["AuxFiles"] = [config_file]
with open(config_file, "w") as cf:
print("TileCount={}".format(tiles_count), file=cf)
print("ImageFileName={}".format(file), file=cf)
print("ImageWidth={}".format(
instance.data.get("resolutionWidth")), file=cf)
print("ImageHeight={}".format(
instance.data.get("resolutionHeight")), file=cf)
tiles = _format_tiles(
file, 0,
instance.data.get("tilesX"),
instance.data.get("tilesY"),
instance.data.get("resolutionWidth"),
instance.data.get("resolutionHeight"),
payload["PluginInfo"]["OutputFilePrefix"]
)[1]
sorted(tiles)
for k, v in tiles.items():
print("{}={}".format(k, v), file=cf)
job_idx = 1
instance.data["assemblySubmissionJobs"] = []
for ass_job in assembly_payloads:
self.log.info("submitting assembly job {} of {}".format(
job_idx, len(assembly_payloads)
))
self.log.debug(json.dumps(ass_job, indent=4, sort_keys=True))
response = self._requests_post(url, json=ass_job)
if not response.ok:
raise Exception(response.text)
instance.data["assemblySubmissionJobs"].append(
response.json()["_id"])
job_idx += 1
instance.data["jobBatchName"] = payload["JobInfo"]["BatchName"]
self.log.info("Setting batch name on instance: {}".format(
instance.data["jobBatchName"]))
else:
# Submit job to farm --------------------------------------------
self.log.info("Submitting ...")
self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
# E.g. http://192.168.0.1:8082/api/jobs
url = "{}/api/jobs".format(self._deadline_url)
response = self._requests_post(url, json=payload)
if not response.ok:
raise Exception(response.text)
instance.data["deadlineSubmissionJob"] = response.json()
def _get_maya_payload(self, data):
payload = copy.deepcopy(payload_skeleton)

View file

@ -249,6 +249,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
Authenticate with Muster, collect all data, prepare path for post
render publish job and submit job to farm.
"""
instance.data["toBeRenderedOn"] = "muster"
# setup muster environment
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")

View file

@ -29,6 +29,12 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
def process(self, instance):
context = instance.context
if instance.data.get("tileRendering"):
self.log.info((
"Skipping frame range validation because "
"tile rendering is enabled."
))
return
frame_start_handle = int(context.data.get("frameStartHandle"))
frame_end_handle = int(context.data.get("frameEndHandle"))

View file

@ -0,0 +1,233 @@
import re
import nuke
from avalon.vendor import qargparse
from avalon import api, io
from pype.hosts.nuke import presets
class LoadImage(api.Loader):
"""Load still image into Nuke"""
families = [
"render2d", "source", "plate",
"render", "prerender", "review",
"image"
]
representations = ["exr", "dpx", "jpg", "jpeg", "png", "psd"]
label = "Load Image"
order = -10
icon = "image"
color = "white"
options = [
qargparse.Integer(
"frame_number",
label="Frame Number",
default=int(nuke.root()["first_frame"].getValue()),
min=1,
max=999999,
help="What frame is reading from?"
)
]
def load(self, context, name, namespace, options):
from avalon.nuke import (
containerise,
viewer_update_and_undo_stop
)
self.log.info("__ options: `{}`".format(options))
frame_number = options.get("frame_number", 1)
version = context['version']
version_data = version.get("data", {})
repr_id = context["representation"]["_id"]
self.log.info("version_data: {}\n".format(version_data))
self.log.debug(
"Representation id `{}` ".format(repr_id))
last = first = int(frame_number)
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
file = self.fname
if not file:
repr_id = context["representation"]["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
repr_cont = context["representation"]["context"]
frame = repr_cont.get("frame")
if frame:
padding = len(frame)
file = file.replace(
frame,
format(frame_number, "0{}".format(padding)))
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
r = nuke.createNode(
"Read",
"name {}".format(read_name))
r["file"].setValue(file)
# Set colorspace defined in version data
colorspace = context["version"]["data"].get("colorspace")
if colorspace:
r["colorspace"].setValue(str(colorspace))
# load nuke presets for Read's colorspace
read_clrs_presets = presets.get_colorspace_preset().get(
"nuke", {}).get("read", {})
# check if any colorspace presets for read is mathing
preset_clrsp = next((read_clrs_presets[k]
for k in read_clrs_presets
if bool(re.search(k, file))),
None)
if preset_clrsp is not None:
r["colorspace"].setValue(str(preset_clrsp))
r["origfirst"].setValue(first)
r["first"].setValue(first)
r["origlast"].setValue(last)
r["last"].setValue(last)
# add additional metadata from the version to imprint Avalon knob
add_keys = ["source", "colorspace", "author", "fps", "version"]
data_imprint = {
"frameStart": first,
"frameEnd": last
}
for k in add_keys:
if k == 'version':
data_imprint.update({k: context["version"]['name']})
else:
data_imprint.update(
{k: context["version"]['data'].get(k, str(None))})
data_imprint.update({"objectName": read_name})
r["tile_color"].setValue(int("0x4ecd25ff", 16))
return containerise(r,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
from avalon.nuke import (
update_container
)
node = nuke.toNode(container["objectName"])
frame_number = node["first"].value()
assert node.Class() == "Read", "Must be Read"
repr_cont = representation["context"]
file = api.get_representation_path(representation)
if not file:
repr_id = representation["_id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
file = file.replace("\\", "/")
frame = repr_cont.get("frame")
if frame:
padding = len(frame)
file = file.replace(
frame,
format(frame_number, "0{}".format(padding)))
# Get start frame from version data
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
version_data = version.get("data", {})
last = first = int(frame_number)
# Set the global in to the start frame of the sequence
node["origfirst"].setValue(first)
node["first"].setValue(first)
node["origlast"].setValue(last)
node["last"].setValue(last)
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
"frameStart": str(first),
"frameEnd": str(last),
"version": str(version.get("name")),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
"fps": str(version_data.get("fps")),
"author": version_data.get("author"),
"outputDir": version_data.get("outputDir"),
})
# change color of node
if version.get("name") not in [max_version]:
node["tile_color"].setValue(int("0xd84f20ff", 16))
else:
node["tile_color"].setValue(int("0x4ecd25ff", 16))
# Update the imprinted representation
update_container(
node,
updated_dict
)
self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
node = nuke.toNode(container['objectName'])
assert node.Class() == "Read", "Must be Read"
with viewer_update_and_undo_stop():
nuke.delete(node)

View file

@ -70,7 +70,7 @@ def loader_shift(node, frame, relative=True):
class LoadSequence(api.Loader):
"""Load image sequence into Nuke"""
families = ["render2d", "source", "plate", "render", "prerender"]
families = ["render2d", "source", "plate", "render", "prerender", "review"]
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
label = "Load sequence"
@ -120,12 +120,12 @@ class LoadSequence(api.Loader):
if "#" not in file:
frame = repr_cont.get("frame")
padding = len(frame)
file = file.replace(frame, "#"*padding)
file = file.replace(frame, "#" * padding)
read_name = "Read_{0}_{1}_{2}".format(
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
repr_cont["asset"],
repr_cont["subset"],
repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
@ -250,7 +250,7 @@ class LoadSequence(api.Loader):
if "#" not in file:
frame = repr_cont.get("frame")
padding = len(frame)
file = file.replace(frame, "#"*padding)
file = file.replace(frame, "#" * padding)
# Get start frame from version data
version = io.find_one({
@ -276,10 +276,10 @@ class LoadSequence(api.Loader):
last = version_data.get("frameEnd")
if first is None:
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(
node['name'].value(), representation))
self.log.warning(
"Missing start frame for updated version"
"assuming starts at frame 0 for: "
"{} ({})".format(node['name'].value(), representation))
first = 0
first -= self.handle_start

View file

@ -28,6 +28,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
deadline_chunk_size = 1
def process(self, instance):
instance.data["toBeRenderedOn"] = "deadline"
families = instance.data["families"]
node = instance[0]

View file

@ -63,10 +63,14 @@ class CollectReviews(api.InstancePlugin):
self.log.debug("Track item on plateMain")
rev_inst = None
for inst in instance.context[:]:
if inst.data["track"] in track:
rev_inst = inst
self.log.debug("Instance review: {}".format(
rev_inst.data["name"]))
if inst.data["track"] != track:
continue
if inst.data["item"].name() != instance.data["item"].name():
continue
rev_inst = inst
break
if rev_inst is None:
raise RuntimeError((
@ -82,7 +86,7 @@ class CollectReviews(api.InstancePlugin):
ext = os.path.splitext(file)[-1][1:]
# change label
instance.data["label"] = "{0} - {1} - ({2}) - review".format(
instance.data["label"] = "{0} - {1} - ({2})".format(
instance.data['asset'], instance.data["subset"], ext
)
@ -99,7 +103,7 @@ class CollectReviews(api.InstancePlugin):
"step": 1,
"fps": rev_inst.data.get("fps"),
"name": "preview",
"tags": ["preview"],
"tags": ["preview", "ftrackreview"],
"ext": ext
}

View file

@ -524,6 +524,10 @@ def burnins_from_data(
profile_name = profile_name.replace(" ", "_").lower()
ffmpeg_args.append("-profile:v {}".format(profile_name))
bit_rate = burnin._streams[0].get("bit_rate")
if bit_rate:
ffmpeg_args.append("-b:v {}".format(bit_rate))
pix_fmt = burnin._streams[0].get("pix_fmt")
if pix_fmt:
ffmpeg_args.append("-pix_fmt {}".format(pix_fmt))

View file

@ -0,0 +1,236 @@
import pymongo
import bson
import random
class TestPerformance():
'''
Class for testing performance of representation and their 'files' parts.
Discussion is if embedded array:
'files' : [ {'_id': '1111', 'path':'....},
{'_id'...}]
OR documents:
'files' : {
'1111': {'path':'....'},
'2222': {'path':'...'}
}
is faster.
Current results: without additional partial index documents is 3x faster
With index is array 50x faster then document
Partial index something like:
db.getCollection('performance_test').createIndex
({'files._id': 1},
{partialFilterExpresion: {'files': {'$exists': true}})
!DIDNT work for me, had to create manually in Compass
'''
MONGO_URL = 'mongodb://localhost:27017'
MONGO_DB = 'performance_test'
MONGO_COLLECTION = 'performance_test'
inserted_ids = []
def __init__(self, version='array'):
'''
It creates and fills collection, based on value of 'version'.
:param version: 'array' - files as embedded array,
'doc' - as document
'''
self.client = pymongo.MongoClient(self.MONGO_URL)
self.db = self.client[self.MONGO_DB]
self.collection_name = self.MONGO_COLLECTION
self.version = version
if self.version != 'array':
self.collection_name = self.MONGO_COLLECTION + '_doc'
self.collection = self.db[self.collection_name]
self.ids = [] # for testing
self.inserted_ids = []
def prepare(self, no_of_records=100000):
'''
Produce 'no_of_records' of representations with 'files' segment.
It depends on 'version' value in constructor, 'arrray' or 'doc'
:return:
'''
print('Purging {} collection'.format(self.collection_name))
self.collection.delete_many({})
id = bson.objectid.ObjectId()
insert_recs = []
for i in range(no_of_records):
file_id = bson.objectid.ObjectId()
file_id2 = bson.objectid.ObjectId()
file_id3 = bson.objectid.ObjectId()
self.inserted_ids.extend([file_id, file_id2, file_id3])
document = {"files": self.get_files(self.version, i,
file_id, file_id2, file_id3)
,
"context": {
"subset": "workfileLookdev",
"username": "petrk",
"task": "lookdev",
"family": "workfile",
"hierarchy": "Assets",
"project": {"code": "test", "name": "Test"},
"version": 1,
"asset": "Cylinder",
"representation": "mb",
"root": "C:/projects"
},
"dependencies": [],
"name": "mb",
"parent": {"oid": '{}'.format(id)},
"data": {
"path": "C:\\projects\\Test\\Assets\\Cylinder\\publish\\workfile\\workfileLookdev\\v001\\test_Cylinder_workfileLookdev_v001.mb",
"template": "{root}\\{project[name]}\\{hierarchy}\\{asset}\\publish\\{family}\\{subset}\\v{version:0>3}\\{project[code]}_{asset}_{subset}_v{version:0>3}<_{output}><.{frame:0>4}>.{representation}"
},
"type": "representation",
"schema": "pype:representation-2.0"
}
insert_recs.append(document)
print('Prepared {} records in {} collection'.
format(no_of_records, self.collection_name))
self.collection.insert_many(insert_recs)
# TODO refactore to produce real array and not needeing ugly regex
self.collection.insert_one({"inserted_id": self.inserted_ids})
print('-' * 50)
def run(self, queries=1000, loops=3):
'''
Run X'queries' that are searching collection Y'loops' times
:param queries: how many times do ..find(...)
:param loops: loop of testing X queries
:return: None
'''
print('Testing version {} on {}'.format(self.version,
self.collection_name))
inserted_ids = list(self.collection.
find({"inserted_id": {"$exists": True}}))
import re
self.ids = re.findall("'[0-9a-z]*'", str(inserted_ids))
import time
found_cnt = 0
for _ in range(loops):
start = time.time()
for _ in range(queries):
val = random.choice(self.ids)
val = val.replace("'", '')
if (self.version == 'array'):
# prepared for partial index, without 'files': exists
# wont engage
found = self.collection.\
find_one({'files': {"$exists": True},
'files._id': "{}".format(val)})
else:
key = "files.{}".format(val)
found = self.collection.find_one({key: {"$exists": True}})
if found:
found_cnt += 1
end = time.time()
print('duration per loop {}'.format(end - start))
print("found_cnt {}".format(found_cnt))
def get_files(self, mode, i, file_id, file_id2, file_id3):
'''
Wrapper to decide if 'array' or document version should be used
:param mode: 'array'|'doc'
:param i: step number
:param file_id: ObjectId of first dummy file
:param file_id2: ..
:param file_id3: ..
:return:
'''
if mode == 'array':
return self.get_files_array(i, file_id, file_id2, file_id3)
else:
return self.get_files_doc(i, file_id, file_id2, file_id3)
def get_files_array(self, i, file_id, file_id2, file_id3):
return [
{
"path": "c:/Test/Assets/Cylinder/publish/workfile/"
"workfileLookdev/v001/"
"test_CylinderA_workfileLookdev_v{0:03}.mb".format(i),
"_id": '{}'.format(file_id),
"hash": "temphash",
"sites": ["studio"],
"size":87236
},
{
"path": "c:/Test/Assets/Cylinder/publish/workfile/"
"workfileLookdev/v001/"
"test_CylinderB_workfileLookdev_v{0:03}.mb".format(i),
"_id": '{}'.format(file_id2),
"hash": "temphash",
"sites": ["studio"],
"size": 87236
},
{
"path": "c:/Test/Assets/Cylinder/publish/workfile/"
"workfileLookdev/v001/"
"test_CylinderC_workfileLookdev_v{0:03}.mb".format(i),
"_id": '{}'.format(file_id3),
"hash": "temphash",
"sites": ["studio"],
"size": 87236
}
]
def get_files_doc(self, i, file_id, file_id2, file_id3):
ret = {}
ret['{}'.format(file_id)] = {
"path": "c:/Test/Assets/Cylinder/publish/workfile/workfileLookdev/"
"v001/test_CylinderA_workfileLookdev_v{0:03}.mb".format(i),
"hash": "temphash",
"sites": ["studio"],
"size": 87236
}
ret['{}'.format(file_id2)] = {
"path": "c:/Test/Assets/Cylinder/publish/workfile/workfileLookdev/"
"v001/test_CylinderB_workfileLookdev_v{0:03}.mb".format(i),
"hash": "temphash",
"sites": ["studio"],
"size": 87236
}
ret['{}'.format(file_id3)] = {
"path": "c:/Test/Assets/Cylinder/publish/workfile/workfileLookdev/"
"v001/test_CylinderC_workfileLookdev_v{0:03}.mb".format(i),
"hash": "temphash",
"sites": ["studio"],
"size": 87236
}
return ret
if __name__ == '__main__':
tp = TestPerformance('array')
tp.prepare() # enable to prepare data
tp.run(1000, 3)
print('-'*50)
tp = TestPerformance('doc')
tp.prepare() # enable to prepare data
tp.run(1000, 3)

View file

@ -0,0 +1,7 @@
from .window import LauncherWindow
from . import actions
__all__ = [
"LauncherWindow",
"actions"
]

View file

@ -0,0 +1,104 @@
import os
import importlib
from avalon import api, lib
class ProjectManagerAction(api.Action):
name = "projectmanager"
label = "Project Manager"
icon = "gear"
order = 999 # at the end
def is_compatible(self, session):
return "AVALON_PROJECT" in session
def process(self, session, **kwargs):
return lib.launch(
executable="python",
args=[
"-u", "-m", "avalon.tools.projectmanager",
session['AVALON_PROJECT']
]
)
class LoaderAction(api.Action):
name = "loader"
label = "Loader"
icon = "cloud-download"
order = 998
def is_compatible(self, session):
return "AVALON_PROJECT" in session
def process(self, session, **kwargs):
return lib.launch(
executable="python",
args=[
"-u", "-m", "avalon.tools.loader", session['AVALON_PROJECT']
]
)
class LoaderLibrary(api.Action):
name = "loader_os"
label = "Library Loader"
icon = "book"
order = 997 # at the end
def is_compatible(self, session):
return True
def process(self, session, **kwargs):
return lib.launch(
executable="python",
args=["-u", "-m", "avalon.tools.libraryloader"]
)
def register_default_actions():
"""Register default actions for Launcher"""
api.register_plugin(api.Action, ProjectManagerAction)
api.register_plugin(api.Action, LoaderAction)
api.register_plugin(api.Action, LoaderLibrary)
def register_config_actions():
"""Register actions from the configuration for Launcher"""
module_name = os.environ["AVALON_CONFIG"]
config = importlib.import_module(module_name)
if not hasattr(config, "register_launcher_actions"):
print(
"Current configuration `%s` has no 'register_launcher_actions'"
% config.__name__
)
return
config.register_launcher_actions()
def register_environment_actions():
"""Register actions from AVALON_ACTIONS for Launcher."""
paths = os.environ.get("AVALON_ACTIONS")
if not paths:
return
for path in paths.split(os.pathsep):
api.register_plugin_path(api.Action, path)
# Run "register" if found.
for module in lib.modules_from_path(path):
if "register" not in dir(module):
continue
try:
module.register()
except Exception as e:
print(
"Register method in {0} failed: {1}".format(
module, str(e)
)
)

View file

@ -0,0 +1,50 @@
from Qt import QtCore, QtWidgets, QtGui
class ActionDelegate(QtWidgets.QStyledItemDelegate):
extender_lines = 2
extender_bg_brush = QtGui.QBrush(QtGui.QColor(100, 100, 100, 160))
extender_fg = QtGui.QColor(255, 255, 255, 160)
def __init__(self, group_roles, *args, **kwargs):
super(ActionDelegate, self).__init__(*args, **kwargs)
self.group_roles = group_roles
def paint(self, painter, option, index):
super(ActionDelegate, self).paint(painter, option, index)
is_group = False
for group_role in self.group_roles:
is_group = index.data(group_role)
if is_group:
break
if not is_group:
return
extender_width = int(option.decorationSize.width() / 2)
extender_height = int(option.decorationSize.height() / 2)
exteder_rect = QtCore.QRectF(
option.rect.x() + (option.rect.width() / 10),
option.rect.y() + (option.rect.height() / 10),
extender_width,
extender_height
)
path = QtGui.QPainterPath()
path.addRoundedRect(exteder_rect, 2, 2)
painter.fillPath(path, self.extender_bg_brush)
painter.setPen(self.extender_fg)
painter.drawPath(path)
divider = (2 * self.extender_lines) + 1
line_height = extender_height / divider
line_width = extender_width - (extender_width / 5)
pos_x = exteder_rect.x() + extender_width / 10
pos_y = exteder_rect.y() + line_height
for _ in range(self.extender_lines):
line_rect = QtCore.QRectF(
pos_x, pos_y, line_width, round(line_height)
)
painter.fillRect(line_rect, self.extender_fg)
pos_y += 2 * line_height

View file

@ -0,0 +1,304 @@
"""
This based on the flickcharm-python code from:
https://code.google.com/archive/p/flickcharm-python/
Which states:
This is a Python (PyQt) port of Ariya Hidayat's elegant FlickCharm
hack which adds kinetic scrolling to any scrollable Qt widget.
Licensed under GNU GPL version 2 or later.
It has been altered to fix edge cases where clicks and drags would not
propagate correctly under some conditions. It also allows a small "dead zone"
threshold in which it will still propagate the user pressed click if he or she
travelled only very slightly with the cursor.
"""
import copy
from Qt import QtWidgets, QtCore, QtGui
class FlickData(object):
Steady = 0
Pressed = 1
ManualScroll = 2
AutoScroll = 3
Stop = 4
def __init__(self):
self.state = FlickData.Steady
self.widget = None
self.pressPos = QtCore.QPoint(0, 0)
self.offset = QtCore.QPoint(0, 0)
self.dragPos = QtCore.QPoint(0, 0)
self.speed = QtCore.QPoint(0, 0)
self.travelled = 0
self.ignored = []
class FlickCharm(QtCore.QObject):
"""Make scrollable widgets flickable.
For example:
charm = FlickCharm()
charm.activateOn(widget)
It can `activateOn` multiple widgets with a single FlickCharm instance.
Be aware that the FlickCharm object must be kept around for it not
to get garbage collected and losing the flickable behavior.
Flick away!
"""
def __init__(self, parent=None):
super(FlickCharm, self).__init__(parent=parent)
self.flickData = {}
self.ticker = QtCore.QBasicTimer()
# The flick button to use
self.button = QtCore.Qt.LeftButton
# The time taken per update tick of flicking behavior
self.tick_time = 20
# Allow a item click/press directly when AutoScroll is slower than
# this threshold velocity
self.click_in_autoscroll_threshold = 10
# Allow an item click/press to propagate as opposed to scrolling
# when the cursor travelled less than this amount of pixels
# Note: back & forth motion increases the value too
self.travel_threshold = 20
self.max_speed = 64 # max scroll speed
self.drag = 1 # higher drag will stop autoscroll faster
def activateOn(self, widget):
viewport = widget.viewport()
viewport.installEventFilter(self)
widget.installEventFilter(self)
self.flickData[viewport] = FlickData()
self.flickData[viewport].widget = widget
self.flickData[viewport].state = FlickData.Steady
def deactivateFrom(self, widget):
viewport = widget.viewport()
viewport.removeEventFilter(self)
widget.removeEventFilter(self)
self.flickData.pop(viewport)
def eventFilter(self, obj, event):
if not obj.isWidgetType():
return False
eventType = event.type()
if eventType != QtCore.QEvent.MouseButtonPress and \
eventType != QtCore.QEvent.MouseButtonRelease and \
eventType != QtCore.QEvent.MouseMove:
return False
if event.modifiers() != QtCore.Qt.NoModifier:
return False
if obj not in self.flickData:
return False
data = self.flickData[obj]
found, newIgnored = removeAll(data.ignored, event)
if found:
data.ignored = newIgnored
return False
if data.state == FlickData.Steady:
if eventType == QtCore.QEvent.MouseButtonPress:
if event.buttons() == self.button:
self._set_press_pos_and_offset(event, data)
data.state = FlickData.Pressed
return True
elif data.state == FlickData.Pressed:
if eventType == QtCore.QEvent.MouseButtonRelease:
# User didn't actually scroll but clicked in
# the widget. Let the original press and release
# event be evaluated on the Widget
data.state = FlickData.Steady
event1 = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonPress,
data.pressPos,
QtCore.Qt.LeftButton,
QtCore.Qt.LeftButton,
QtCore.Qt.NoModifier)
# Copy the current event
event2 = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonRelease,
event.pos(),
event.button(),
event.buttons(),
event.modifiers())
data.ignored.append(event1)
data.ignored.append(event2)
QtWidgets.QApplication.postEvent(obj, event1)
QtWidgets.QApplication.postEvent(obj, event2)
return True
elif eventType == QtCore.QEvent.MouseMove:
data.state = FlickData.ManualScroll
data.dragPos = QtGui.QCursor.pos()
if not self.ticker.isActive():
self.ticker.start(self.tick_time, self)
return True
elif data.state == FlickData.ManualScroll:
if eventType == QtCore.QEvent.MouseMove:
pos = event.pos()
delta = pos - data.pressPos
data.travelled += delta.manhattanLength()
setScrollOffset(data.widget, data.offset - delta)
return True
elif eventType == QtCore.QEvent.MouseButtonRelease:
if data.travelled <= self.travel_threshold:
# If the user travelled less than the threshold
# don't go into autoscroll mode but assume the user
# intended to click instead
return self._propagate_click(obj, event, data)
data.state = FlickData.AutoScroll
return True
elif data.state == FlickData.AutoScroll:
if eventType == QtCore.QEvent.MouseButtonPress:
# Allow pressing when auto scroll is already slower than
# the click in autoscroll threshold
velocity = data.speed.manhattanLength()
if velocity <= self.click_in_autoscroll_threshold:
self._set_press_pos_and_offset(event, data)
data.state = FlickData.Pressed
else:
data.state = FlickData.Stop
data.speed = QtCore.QPoint(0, 0)
return True
elif eventType == QtCore.QEvent.MouseButtonRelease:
data.state = FlickData.Steady
data.speed = QtCore.QPoint(0, 0)
return True
elif data.state == FlickData.Stop:
if eventType == QtCore.QEvent.MouseButtonRelease:
data.state = FlickData.Steady
# If the user had a very limited scroll smaller than the
# threshold consider it a regular press and release.
if data.travelled < self.travel_threshold:
return self._propagate_click(obj, event, data)
return True
elif eventType == QtCore.QEvent.MouseMove:
# Reset the press position and offset to allow us to "continue"
# the scroll from the new point the user clicked and then held
# down to continue scrolling after AutoScroll.
self._set_press_pos_and_offset(event, data)
data.state = FlickData.ManualScroll
data.dragPos = QtGui.QCursor.pos()
if not self.ticker.isActive():
self.ticker.start(self.tick_time, self)
return True
return False
def _set_press_pos_and_offset(self, event, data):
"""Store current event position on Press"""
data.state = FlickData.Pressed
data.pressPos = copy.copy(event.pos())
data.offset = scrollOffset(data.widget)
data.travelled = 0
def _propagate_click(self, obj, event, data):
"""Propagate from Pressed state with MouseButtonRelease event.
Use only on button release in certain states to propagate a click,
for example when the user dragged only a slight distance under the
travel threshold.
"""
data.state = FlickData.Pressed
data.pressPos = copy.copy(event.pos())
data.offset = scrollOffset(data.widget)
data.travelled = 0
self.eventFilter(obj, event)
return True
def timerEvent(self, event):
count = 0
for data in self.flickData.values():
if data.state == FlickData.ManualScroll:
count += 1
cursorPos = QtGui.QCursor.pos()
data.speed = cursorPos - data.dragPos
data.dragPos = cursorPos
elif data.state == FlickData.AutoScroll:
count += 1
data.speed = deaccelerate(data.speed,
a=self.drag,
maxVal=self.max_speed)
p = scrollOffset(data.widget)
new_p = p - data.speed
setScrollOffset(data.widget, new_p)
if scrollOffset(data.widget) == p:
# If this scroll resulted in no change on the widget
# we reached the end of the list and set the speed to
# zero.
data.speed = QtCore.QPoint(0, 0)
if data.speed == QtCore.QPoint(0, 0):
data.state = FlickData.Steady
if count == 0:
self.ticker.stop()
super(FlickCharm, self).timerEvent(event)
def scrollOffset(widget):
x = widget.horizontalScrollBar().value()
y = widget.verticalScrollBar().value()
return QtCore.QPoint(x, y)
def setScrollOffset(widget, p):
widget.horizontalScrollBar().setValue(p.x())
widget.verticalScrollBar().setValue(p.y())
def deaccelerate(speed, a=1, maxVal=64):
x = max(min(speed.x(), maxVal), -maxVal)
y = max(min(speed.y(), maxVal), -maxVal)
if x > 0:
x = max(0, x - a)
elif x < 0:
x = min(0, x + a)
if y > 0:
y = max(0, y - a)
elif y < 0:
y = min(0, y + a)
return QtCore.QPoint(x, y)
def removeAll(list, val):
found = False
ret = []
for element in list:
if element == val:
found = True
else:
ret.append(element)
return found, ret

113
pype/tools/launcher/lib.py Normal file
View file

@ -0,0 +1,113 @@
"""Utility script for updating database with configuration files
Until assets are created entirely in the database, this script
provides a bridge between the file-based project inventory and configuration.
- Migrating an old project:
$ python -m avalon.inventory --extract --silo-parent=f02_prod
$ python -m avalon.inventory --upload
- Managing an existing project:
1. Run `python -m avalon.inventory --load`
2. Update the .inventory.toml or .config.toml
3. Run `python -m avalon.inventory --save`
"""
import os
from Qt import QtGui
from avalon import lib
from avalon.vendor import qtawesome
from pype.api import resources
from pype.lib import ApplicationAction
ICON_CACHE = {}
NOT_FOUND = type("NotFound", (object, ), {})
def get_application_actions(project):
"""Define dynamic Application classes for project using `.toml` files
Args:
project (dict): project document from the database
Returns:
list: list of dictionaries
"""
apps = []
for app in project["config"]["apps"]:
try:
app_name = app["name"]
app_definition = lib.get_application(app_name)
except Exception as exc:
print("Unable to load application: %s - %s" % (app['name'], exc))
continue
# Get from app definition, if not there from app in project
icon = app_definition.get("icon", app.get("icon", "folder-o"))
color = app_definition.get("color", app.get("color", None))
order = app_definition.get("order", app.get("order", 0))
label = app_definition.get("label") or app.get("label") or app_name
label_variant = app_definition.get("label_variant")
group = app_definition.get("group") or app.get("group")
action = type(
"app_{}".format(app_name),
(ApplicationAction,),
{
"name": app_name,
"label": label,
"label_variant": label_variant,
"group": group,
"icon": icon,
"color": color,
"order": order,
"config": app_definition.copy()
}
)
apps.append(action)
return apps
def get_action_icon(action):
icon_name = action.icon
if not icon_name:
return None
global ICON_CACHE
icon = ICON_CACHE.get(icon_name)
if icon is NOT_FOUND:
return None
elif icon:
return icon
icon_path = resources.get_resource(icon_name)
if os.path.exists(icon_path):
icon = QtGui.QIcon(icon_path)
ICON_CACHE[icon_name] = icon
return icon
try:
icon_color = getattr(action, "color", None) or "white"
icon = qtawesome.icon(
"fa.{}".format(icon_name), color=icon_color
)
except Exception:
ICON_CACHE[icon_name] = NOT_FOUND
print("Can't load icon \"{}\"".format(icon_name))
return icon
def get_action_label(action):
label = getattr(action, "label", None)
if not label:
return action.name
label_variant = getattr(action, "label_variant", None)
if not label_variant:
return label
return " ".join([label, label_variant])

View file

@ -0,0 +1,306 @@
import copy
import logging
import collections
from . import lib
from Qt import QtCore, QtGui
from avalon.vendor import qtawesome
from avalon import style, api
log = logging.getLogger(__name__)
class TaskModel(QtGui.QStandardItemModel):
"""A model listing the tasks combined for a list of assets"""
def __init__(self, dbcon, parent=None):
super(TaskModel, self).__init__(parent=parent)
self.dbcon = dbcon
self._num_assets = 0
self.default_icon = qtawesome.icon(
"fa.male", color=style.colors.default
)
self.no_task_icon = qtawesome.icon(
"fa.exclamation-circle", color=style.colors.mid
)
self._icons = {}
self._get_task_icons()
def _get_task_icons(self):
if not self.dbcon.Session.get("AVALON_PROJECT"):
return
# Get the project configured icons from database
project = self.dbcon.find_one({"type": "project"})
for task in project["config"].get("tasks") or []:
icon_name = task.get("icon")
if icon_name:
self._icons[task["name"]] = qtawesome.icon(
"fa.{}".format(icon_name), color=style.colors.default
)
def set_assets(self, asset_ids=None, asset_docs=None):
"""Set assets to track by their database id
Arguments:
asset_ids (list): List of asset ids.
asset_docs (list): List of asset entities from MongoDB.
"""
if asset_docs is None and asset_ids is not None:
# find assets in db by query
asset_docs = list(self.dbcon.find({
"type": "asset",
"_id": {"$in": asset_ids}
}))
db_assets_ids = tuple(asset_doc["_id"] for asset_doc in asset_docs)
# check if all assets were found
not_found = tuple(
str(asset_id)
for asset_id in asset_ids
if asset_id not in db_assets_ids
)
assert not not_found, "Assets not found by id: {0}".format(
", ".join(not_found)
)
self.clear()
if not asset_docs:
return
task_names = set()
for asset_doc in asset_docs:
asset_tasks = asset_doc.get("data", {}).get("tasks") or set()
task_names.update(asset_tasks)
self.beginResetModel()
if not task_names:
item = QtGui.QStandardItem(self.no_task_icon, "No task")
item.setEnabled(False)
self.appendRow(item)
else:
for task_name in sorted(task_names):
icon = self._icons.get(task_name, self.default_icon)
item = QtGui.QStandardItem(icon, task_name)
self.appendRow(item)
self.endResetModel()
def headerData(self, section, orientation, role):
if (
role == QtCore.Qt.DisplayRole
and orientation == QtCore.Qt.Horizontal
and section == 0
):
return "Tasks"
return super(TaskModel, self).headerData(section, orientation, role)
class ActionModel(QtGui.QStandardItemModel):
ACTION_ROLE = QtCore.Qt.UserRole
GROUP_ROLE = QtCore.Qt.UserRole + 1
VARIANT_GROUP_ROLE = QtCore.Qt.UserRole + 2
def __init__(self, dbcon, parent=None):
super(ActionModel, self).__init__(parent=parent)
self.dbcon = dbcon
self._session = {}
self._groups = {}
self.default_icon = qtawesome.icon("fa.cube", color="white")
# Cache of available actions
self._registered_actions = list()
self.discover()
def discover(self):
"""Set up Actions cache. Run this for each new project."""
if not self.dbcon.Session.get("AVALON_PROJECT"):
self._registered_actions = list()
return
# Discover all registered actions
actions = api.discover(api.Action)
# Get available project actions and the application actions
project_doc = self.dbcon.find_one({"type": "project"})
app_actions = lib.get_application_actions(project_doc)
actions.extend(app_actions)
self._registered_actions = actions
def get_icon(self, action, skip_default=False):
icon = lib.get_action_icon(action)
if not icon and not skip_default:
return self.default_icon
return icon
def refresh(self):
# Validate actions based on compatibility
self.clear()
self._groups.clear()
actions = self.filter_compatible_actions(self._registered_actions)
self.beginResetModel()
single_actions = []
varianted_actions = collections.defaultdict(list)
grouped_actions = collections.defaultdict(list)
for action in actions:
# Groups
group_name = getattr(action, "group", None)
# Lable variants
label = getattr(action, "label", None)
label_variant = getattr(action, "label_variant", None)
if label_variant and not label:
print((
"Invalid action \"{}\" has set `label_variant` to \"{}\""
", but doesn't have set `label` attribute"
).format(action.name, label_variant))
action.label_variant = None
label_variant = None
if group_name:
grouped_actions[group_name].append(action)
elif label_variant:
varianted_actions[label].append(action)
else:
single_actions.append(action)
items_by_order = collections.defaultdict(list)
for label, actions in tuple(varianted_actions.items()):
if len(actions) == 1:
varianted_actions.pop(label)
single_actions.append(actions[0])
continue
icon = None
order = None
for action in actions:
if icon is None:
_icon = lib.get_action_icon(action)
if _icon:
icon = _icon
if order is None or action.order < order:
order = action.order
if icon is None:
icon = self.default_icon
item = QtGui.QStandardItem(icon, action.label)
item.setData(actions, self.ACTION_ROLE)
item.setData(True, self.VARIANT_GROUP_ROLE)
items_by_order[order].append(item)
for action in single_actions:
icon = self.get_icon(action)
item = QtGui.QStandardItem(icon, lib.get_action_label(action))
item.setData(action, self.ACTION_ROLE)
items_by_order[action.order].append(item)
for group_name, actions in grouped_actions.items():
icon = None
order = None
for action in actions:
if order is None or action.order < order:
order = action.order
if icon is None:
_icon = lib.get_action_icon(action)
if _icon:
icon = _icon
if icon is None:
icon = self.default_icon
item = QtGui.QStandardItem(icon, group_name)
item.setData(actions, self.ACTION_ROLE)
item.setData(True, self.GROUP_ROLE)
items_by_order[order].append(item)
for order in sorted(items_by_order.keys()):
for item in items_by_order[order]:
self.appendRow(item)
self.endResetModel()
def set_session(self, session):
assert isinstance(session, dict)
self._session = copy.deepcopy(session)
self.refresh()
def filter_compatible_actions(self, actions):
"""Collect all actions which are compatible with the environment
Each compatible action will be translated to a dictionary to ensure
the action can be visualized in the launcher.
Args:
actions (list): list of classes
Returns:
list: collection of dictionaries sorted on order int he
"""
compatible = []
for action in actions:
if action().is_compatible(self._session):
compatible.append(action)
# Sort by order and name
return sorted(
compatible,
key=lambda action: (action.order, action.name)
)
class ProjectModel(QtGui.QStandardItemModel):
"""List of projects"""
def __init__(self, dbcon, parent=None):
super(ProjectModel, self).__init__(parent=parent)
self.dbcon = dbcon
self.hide_invisible = False
self.project_icon = qtawesome.icon("fa.map", color="white")
def refresh(self):
self.clear()
self.beginResetModel()
for project_doc in self.get_projects():
item = QtGui.QStandardItem(self.project_icon, project_doc["name"])
self.appendRow(item)
self.endResetModel()
def get_projects(self):
project_docs = []
for project_doc in sorted(
self.dbcon.projects(), key=lambda x: x["name"]
):
if (
self.hide_invisible
and not project_doc["data"].get("visible", True)
):
continue
project_docs.append(project_doc)
return project_docs

View file

@ -0,0 +1,444 @@
import copy
import collections
from Qt import QtWidgets, QtCore, QtGui
from avalon.vendor import qtawesome
from .delegates import ActionDelegate
from . import lib
from .models import TaskModel, ActionModel, ProjectModel
from .flickcharm import FlickCharm
class ProjectBar(QtWidgets.QWidget):
project_changed = QtCore.Signal(int)
def __init__(self, dbcon, parent=None):
super(ProjectBar, self).__init__(parent)
self.dbcon = dbcon
self.model = ProjectModel(self.dbcon)
self.model.hide_invisible = True
self.project_combobox = QtWidgets.QComboBox()
self.project_combobox.setModel(self.model)
self.project_combobox.setRootModelIndex(QtCore.QModelIndex())
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.project_combobox)
self.setSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding,
QtWidgets.QSizePolicy.Maximum
)
# Initialize
self.refresh()
# Signals
self.project_combobox.currentIndexChanged.connect(self.project_changed)
# Set current project by default if it's set.
project_name = self.dbcon.Session.get("AVALON_PROJECT")
if project_name:
self.set_project(project_name)
def get_current_project(self):
return self.project_combobox.currentText()
def set_project(self, project_name):
index = self.project_combobox.findText(project_name)
if index >= 0:
self.project_combobox.setCurrentIndex(index)
def refresh(self):
prev_project_name = self.get_current_project()
# Refresh without signals
self.project_combobox.blockSignals(True)
self.model.refresh()
self.set_project(prev_project_name)
self.project_combobox.blockSignals(False)
self.project_changed.emit(self.project_combobox.currentIndex())
class ActionBar(QtWidgets.QWidget):
"""Launcher interface"""
action_clicked = QtCore.Signal(object)
def __init__(self, dbcon, parent=None):
super(ActionBar, self).__init__(parent)
self.dbcon = dbcon
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(8, 0, 8, 0)
view = QtWidgets.QListView(self)
view.setProperty("mode", "icon")
view.setObjectName("IconView")
view.setViewMode(QtWidgets.QListView.IconMode)
view.setResizeMode(QtWidgets.QListView.Adjust)
view.setSelectionMode(QtWidgets.QListView.NoSelection)
view.setEditTriggers(QtWidgets.QListView.NoEditTriggers)
view.setWrapping(True)
view.setGridSize(QtCore.QSize(70, 75))
view.setIconSize(QtCore.QSize(30, 30))
view.setSpacing(0)
view.setWordWrap(True)
model = ActionModel(self.dbcon, self)
view.setModel(model)
# TODO better group delegate
delegate = ActionDelegate(
[model.GROUP_ROLE, model.VARIANT_GROUP_ROLE],
self
)
view.setItemDelegate(delegate)
layout.addWidget(view)
self.model = model
self.view = view
# Make view flickable
flick = FlickCharm(parent=view)
flick.activateOn(view)
self.set_row_height(1)
view.clicked.connect(self.on_clicked)
def set_row_height(self, rows):
self.setMinimumHeight(rows * 75)
def on_clicked(self, index):
if not index.isValid():
return
is_group = index.data(self.model.GROUP_ROLE)
is_variant_group = index.data(self.model.VARIANT_GROUP_ROLE)
if not is_group and not is_variant_group:
action = index.data(self.model.ACTION_ROLE)
self.action_clicked.emit(action)
return
actions = index.data(self.model.ACTION_ROLE)
menu = QtWidgets.QMenu(self)
actions_mapping = {}
if is_variant_group:
for action in actions:
menu_action = QtWidgets.QAction(
lib.get_action_label(action)
)
menu.addAction(menu_action)
actions_mapping[menu_action] = action
else:
by_variant_label = collections.defaultdict(list)
orders = []
for action in actions:
# Lable variants
label = getattr(action, "label", None)
label_variant = getattr(action, "label_variant", None)
if label_variant and not label:
label_variant = None
if not label_variant:
orders.append(action)
continue
if label not in orders:
orders.append(label)
by_variant_label[label].append(action)
for action_item in orders:
actions = by_variant_label.get(action_item)
if not actions:
action = action_item
elif len(actions) == 1:
action = actions[0]
else:
action = None
if action:
menu_action = QtWidgets.QAction(
lib.get_action_label(action)
)
menu.addAction(menu_action)
actions_mapping[menu_action] = action
continue
sub_menu = QtWidgets.QMenu(label, menu)
for action in actions:
menu_action = QtWidgets.QAction(
lib.get_action_label(action)
)
sub_menu.addAction(menu_action)
actions_mapping[menu_action] = action
menu.addMenu(sub_menu)
result = menu.exec_(QtGui.QCursor.pos())
if result:
action = actions_mapping[result]
self.action_clicked.emit(action)
class TasksWidget(QtWidgets.QWidget):
"""Widget showing active Tasks"""
task_changed = QtCore.Signal()
selection_mode = (
QtCore.QItemSelectionModel.Select | QtCore.QItemSelectionModel.Rows
)
def __init__(self, dbcon, parent=None):
super(TasksWidget, self).__init__(parent)
self.dbcon = dbcon
view = QtWidgets.QTreeView(self)
view.setIndentation(0)
view.setEditTriggers(QtWidgets.QTreeView.NoEditTriggers)
model = TaskModel(self.dbcon)
view.setModel(model)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(view)
view.selectionModel().selectionChanged.connect(self.task_changed)
self.model = model
self.view = view
self._last_selected_task = None
def set_asset(self, asset_id):
if asset_id is None:
# Asset deselected
self.model.set_assets()
return
# Try and preserve the last selected task and reselect it
# after switching assets. If there's no currently selected
# asset keep whatever the "last selected" was prior to it.
current = self.get_current_task()
if current:
self._last_selected_task = current
self.model.set_assets([asset_id])
if self._last_selected_task:
self.select_task(self._last_selected_task)
# Force a task changed emit.
self.task_changed.emit()
def select_task(self, task_name):
"""Select a task by name.
If the task does not exist in the current model then selection is only
cleared.
Args:
task (str): Name of the task to select.
"""
# Clear selection
self.view.selectionModel().clearSelection()
# Select the task
for row in range(self.model.rowCount()):
index = self.model.index(row, 0)
_task_name = index.data(QtCore.Qt.DisplayRole)
if _task_name == task_name:
self.view.selectionModel().select(index, self.selection_mode)
# Set the currently active index
self.view.setCurrentIndex(index)
break
def get_current_task(self):
"""Return name of task at current index (selected)
Returns:
str: Name of the current task.
"""
index = self.view.currentIndex()
if self.view.selectionModel().isSelected(index):
return index.data(QtCore.Qt.DisplayRole)
class ActionHistory(QtWidgets.QPushButton):
trigger_history = QtCore.Signal(tuple)
def __init__(self, parent=None):
super(ActionHistory, self).__init__(parent=parent)
self.max_history = 15
self.setFixedWidth(25)
self.setFixedHeight(25)
self.setIcon(qtawesome.icon("fa.history", color="#CCCCCC"))
self.setIconSize(QtCore.QSize(15, 15))
self._history = []
self.clicked.connect(self.show_history)
def show_history(self):
# Show history popup
if not self._history:
return
widget = QtWidgets.QListWidget()
widget.setSelectionMode(widget.NoSelection)
widget.setStyleSheet("""
* {
font-family: "Courier New";
}
""")
largest_label_num_chars = 0
largest_action_label = max(len(x[0].label) for x in self._history)
action_session_role = QtCore.Qt.UserRole + 1
for action, session in reversed(self._history):
project = session.get("AVALON_PROJECT")
asset = session.get("AVALON_ASSET")
task = session.get("AVALON_TASK")
breadcrumb = " > ".join(x for x in [project, asset, task] if x)
m = "{{action:{0}}} | {{breadcrumb}}".format(largest_action_label)
label = m.format(action=action.label, breadcrumb=breadcrumb)
icon = lib.get_action_icon(action)
item = QtWidgets.QListWidgetItem(icon, label)
item.setData(action_session_role, (action, session))
largest_label_num_chars = max(largest_label_num_chars, len(label))
widget.addItem(item)
# Show history
dialog = QtWidgets.QDialog(parent=self)
dialog.setWindowTitle("Action History")
dialog.setWindowFlags(
QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup
)
dialog.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored
)
layout = QtWidgets.QVBoxLayout(dialog)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(widget)
def on_clicked(index):
data = index.data(action_session_role)
self.trigger_history.emit(data)
dialog.close()
widget.clicked.connect(on_clicked)
# padding + icon + text
width = 40 + (largest_label_num_chars * 7)
entry_height = 21
height = entry_height * len(self._history)
point = QtGui.QCursor().pos()
dialog.setGeometry(
point.x() - width,
point.y() - height,
width,
height
)
dialog.exec_()
self.widget_popup = widget
def add_action(self, action, session):
key = (action, copy.deepcopy(session))
# Remove entry if already exists
if key in self._history:
self._history.remove(key)
self._history.append(key)
# Slice the end of the list if we exceed the max history
if len(self._history) > self.max_history:
self._history = self._history[-self.max_history:]
def clear_history(self):
self._history.clear()
class SlidePageWidget(QtWidgets.QStackedWidget):
"""Stacked widget that nicely slides between its pages"""
directions = {
"left": QtCore.QPoint(-1, 0),
"right": QtCore.QPoint(1, 0),
"up": QtCore.QPoint(0, 1),
"down": QtCore.QPoint(0, -1)
}
def slide_view(self, index, direction="right"):
if self.currentIndex() == index:
return
offset_direction = self.directions.get(direction)
if offset_direction is None:
print("BUG: invalid slide direction: {}".format(direction))
return
width = self.frameRect().width()
height = self.frameRect().height()
offset = QtCore.QPoint(
offset_direction.x() * width,
offset_direction.y() * height
)
new_page = self.widget(index)
new_page.setGeometry(0, 0, width, height)
curr_pos = new_page.pos()
new_page.move(curr_pos + offset)
new_page.show()
new_page.raise_()
current_page = self.currentWidget()
b_pos = QtCore.QByteArray(b"pos")
anim_old = QtCore.QPropertyAnimation(current_page, b_pos, self)
anim_old.setDuration(250)
anim_old.setStartValue(curr_pos)
anim_old.setEndValue(curr_pos - offset)
anim_old.setEasingCurve(QtCore.QEasingCurve.OutQuad)
anim_new = QtCore.QPropertyAnimation(new_page, b_pos, self)
anim_new.setDuration(250)
anim_new.setStartValue(curr_pos + offset)
anim_new.setEndValue(curr_pos)
anim_new.setEasingCurve(QtCore.QEasingCurve.OutQuad)
anim_group = QtCore.QParallelAnimationGroup(self)
anim_group.addAnimation(anim_old)
anim_group.addAnimation(anim_new)
def slide_finished():
self.setCurrentWidget(new_page)
anim_group.finished.connect(slide_finished)
anim_group.start()

View file

@ -0,0 +1,467 @@
import copy
import logging
from Qt import QtWidgets, QtCore, QtGui
from avalon import style
from avalon.api import AvalonMongoDB
from pype.api import resources
from avalon.tools import lib as tools_lib
from avalon.tools.widgets import AssetWidget
from avalon.vendor import qtawesome
from .models import ProjectModel
from .widgets import (
ProjectBar, ActionBar, TasksWidget, ActionHistory, SlidePageWidget
)
from .flickcharm import FlickCharm
class IconListView(QtWidgets.QListView):
"""Styled ListView that allows to toggle between icon and list mode.
Toggling between the two modes is done by Right Mouse Click.
"""
IconMode = 0
ListMode = 1
def __init__(self, parent=None, mode=ListMode):
super(IconListView, self).__init__(parent=parent)
# Workaround for scrolling being super slow or fast when
# toggling between the two visual modes
self.setVerticalScrollMode(self.ScrollPerPixel)
self.setObjectName("IconView")
self._mode = None
self.set_mode(mode)
def set_mode(self, mode):
if mode == self._mode:
return
self._mode = mode
if mode == self.IconMode:
self.setViewMode(QtWidgets.QListView.IconMode)
self.setResizeMode(QtWidgets.QListView.Adjust)
self.setWrapping(True)
self.setWordWrap(True)
self.setGridSize(QtCore.QSize(151, 90))
self.setIconSize(QtCore.QSize(50, 50))
self.setSpacing(0)
self.setAlternatingRowColors(False)
self.setProperty("mode", "icon")
self.style().polish(self)
self.verticalScrollBar().setSingleStep(30)
elif self.ListMode:
self.setProperty("mode", "list")
self.style().polish(self)
self.setViewMode(QtWidgets.QListView.ListMode)
self.setResizeMode(QtWidgets.QListView.Adjust)
self.setWrapping(False)
self.setWordWrap(False)
self.setIconSize(QtCore.QSize(20, 20))
self.setGridSize(QtCore.QSize(100, 25))
self.setSpacing(0)
self.setAlternatingRowColors(False)
self.verticalScrollBar().setSingleStep(33.33)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
self.set_mode(int(not self._mode))
return super(IconListView, self).mousePressEvent(event)
class ProjectsPanel(QtWidgets.QWidget):
"""Projects Page"""
project_clicked = QtCore.Signal(str)
def __init__(self, dbcon, parent=None):
super(ProjectsPanel, self).__init__(parent=parent)
layout = QtWidgets.QVBoxLayout(self)
self.dbcon = dbcon
self.dbcon.install()
view = IconListView(parent=self)
view.setSelectionMode(QtWidgets.QListView.NoSelection)
flick = FlickCharm(parent=self)
flick.activateOn(view)
model = ProjectModel(self.dbcon)
model.hide_invisible = True
model.refresh()
view.setModel(model)
layout.addWidget(view)
view.clicked.connect(self.on_clicked)
self.model = model
self.view = view
def on_clicked(self, index):
if index.isValid():
project_name = index.data(QtCore.Qt.DisplayRole)
self.project_clicked.emit(project_name)
class AssetsPanel(QtWidgets.QWidget):
"""Assets page"""
back_clicked = QtCore.Signal()
def __init__(self, dbcon, parent=None):
super(AssetsPanel, self).__init__(parent=parent)
self.dbcon = dbcon
# project bar
project_bar_widget = QtWidgets.QWidget(self)
layout = QtWidgets.QHBoxLayout(project_bar_widget)
layout.setSpacing(4)
btn_back_icon = qtawesome.icon("fa.angle-left", color="white")
btn_back = QtWidgets.QPushButton(project_bar_widget)
btn_back.setIcon(btn_back_icon)
btn_back.setFixedWidth(23)
btn_back.setFixedHeight(23)
project_bar = ProjectBar(self.dbcon, project_bar_widget)
layout.addWidget(btn_back)
layout.addWidget(project_bar)
# assets
assets_proxy_widgets = QtWidgets.QWidget(self)
assets_proxy_widgets.setContentsMargins(0, 0, 0, 0)
assets_layout = QtWidgets.QVBoxLayout(assets_proxy_widgets)
assets_widget = AssetWidget(
dbcon=self.dbcon, parent=assets_proxy_widgets
)
# Make assets view flickable
flick = FlickCharm(parent=self)
flick.activateOn(assets_widget.view)
assets_widget.view.setVerticalScrollMode(
assets_widget.view.ScrollPerPixel
)
assets_layout.addWidget(assets_widget)
# tasks
tasks_widget = TasksWidget(self.dbcon, self)
body = QtWidgets.QSplitter()
body.setContentsMargins(0, 0, 0, 0)
body.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding
)
body.setOrientation(QtCore.Qt.Horizontal)
body.addWidget(assets_proxy_widgets)
body.addWidget(tasks_widget)
body.setStretchFactor(0, 100)
body.setStretchFactor(1, 65)
# main layout
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(project_bar_widget)
layout.addWidget(body)
self.project_bar = project_bar
self.assets_widget = assets_widget
self.tasks_widget = tasks_widget
# signals
project_bar.project_changed.connect(self.on_project_changed)
assets_widget.selection_changed.connect(self.on_asset_changed)
btn_back.clicked.connect(self.back_clicked)
# Force initial refresh for the assets since we might not be
# trigging a Project switch if we click the project that was set
# prior to launching the Launcher
# todo: remove this behavior when AVALON_PROJECT is not required
assets_widget.refresh()
def set_project(self, project):
before = self.project_bar.get_current_project()
self.project_bar.set_project(project)
if project == before:
# Force a refresh on the assets if the project hasn't changed
self.assets_widget.refresh()
def on_project_changed(self):
project_name = self.project_bar.get_current_project()
self.dbcon.Session["AVALON_PROJECT"] = project_name
self.assets_widget.refresh()
# Force asset change callback to ensure tasks are correctly reset
tools_lib.schedule(self.on_asset_changed, 0.05, channel="assets")
def on_asset_changed(self):
"""Callback on asset selection changed
This updates the task view.
"""
print("Asset changed..")
asset_doc = self.assets_widget.get_active_asset_document()
if asset_doc:
self.tasks_widget.set_asset(asset_doc["_id"])
else:
self.tasks_widget.set_asset(None)
def get_current_session(self):
asset_doc = self.assets_widget.get_active_asset_document()
session = copy.deepcopy(self.dbcon.Session)
# Clear some values that we are about to collect if available
session.pop("AVALON_SILO", None)
session.pop("AVALON_ASSET", None)
session.pop("AVALON_TASK", None)
if asset_doc:
session["AVALON_ASSET"] = asset_doc["name"]
task_name = self.tasks_widget.get_current_task()
if task_name:
session["AVALON_TASK"] = task_name
return session
class LauncherWindow(QtWidgets.QDialog):
"""Launcher interface"""
def __init__(self, parent=None):
super(LauncherWindow, self).__init__(parent)
self.log = logging.getLogger(
".".join([__name__, self.__class__.__name__])
)
self.dbcon = AvalonMongoDB()
self.setWindowTitle("Launcher")
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, False)
icon = QtGui.QIcon(resources.pype_icon_filepath())
self.setWindowIcon(icon)
self.setStyleSheet(style.load_stylesheet())
# Allow minimize
self.setWindowFlags(
self.windowFlags() | QtCore.Qt.WindowMinimizeButtonHint
)
project_panel = ProjectsPanel(self.dbcon)
asset_panel = AssetsPanel(self.dbcon)
page_slider = SlidePageWidget()
page_slider.addWidget(project_panel)
page_slider.addWidget(asset_panel)
# actions
actions_bar = ActionBar(self.dbcon, self)
# statusbar
statusbar = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout(statusbar)
message_label = QtWidgets.QLabel()
message_label.setFixedHeight(15)
action_history = ActionHistory()
action_history.setStatusTip("Show Action History")
layout.addWidget(message_label)
layout.addWidget(action_history)
# Vertically split Pages and Actions
body = QtWidgets.QSplitter()
body.setContentsMargins(0, 0, 0, 0)
body.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding
)
body.setOrientation(QtCore.Qt.Vertical)
body.addWidget(page_slider)
body.addWidget(actions_bar)
# Set useful default sizes and set stretch
# for the pages so that is the only one that
# stretches on UI resize.
body.setStretchFactor(0, 10)
body.setSizes([580, 160])
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(body)
layout.addWidget(statusbar)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self.message_label = message_label
self.project_panel = project_panel
self.asset_panel = asset_panel
self.actions_bar = actions_bar
self.action_history = action_history
self.page_slider = page_slider
self._page = 0
# signals
actions_bar.action_clicked.connect(self.on_action_clicked)
action_history.trigger_history.connect(self.on_history_action)
project_panel.project_clicked.connect(self.on_project_clicked)
asset_panel.back_clicked.connect(self.on_back_clicked)
# Add some signals to propagate from the asset panel
for signal in (
asset_panel.project_bar.project_changed,
asset_panel.assets_widget.selection_changed,
asset_panel.tasks_widget.task_changed
):
signal.connect(self.on_session_changed)
# todo: Simplify this callback connection
asset_panel.project_bar.project_changed.connect(
self.on_project_changed
)
self.resize(520, 740)
def set_page(self, page):
current = self.page_slider.currentIndex()
if current == page and self._page == page:
return
direction = "right" if page > current else "left"
self._page = page
self.page_slider.slide_view(page, direction=direction)
def refresh(self):
self.asset_panel.assets_widget.refresh()
self.refresh_actions()
def echo(self, message):
self.message_label.setText(str(message))
QtCore.QTimer.singleShot(5000, lambda: self.message_label.setText(""))
self.log.debug(message)
def on_project_changed(self):
project_name = self.asset_panel.project_bar.get_current_project()
self.dbcon.Session["AVALON_PROJECT"] = project_name
# Update the Action plug-ins available for the current project
self.actions_bar.model.discover()
def on_session_changed(self):
self.refresh_actions()
def refresh_actions(self, delay=1):
tools_lib.schedule(self.on_refresh_actions, delay)
def on_project_clicked(self, project_name):
self.dbcon.Session["AVALON_PROJECT"] = project_name
# Refresh projects
self.asset_panel.project_bar.refresh()
self.asset_panel.set_project(project_name)
self.set_page(1)
self.refresh_actions()
def on_back_clicked(self):
self.set_page(0)
self.project_panel.model.refresh() # Refresh projects
self.refresh_actions()
def on_refresh_actions(self):
session = self.get_current_session()
self.actions_bar.model.set_session(session)
self.actions_bar.model.refresh()
def on_action_clicked(self, action):
self.echo("Running action: {}".format(action.name))
self.run_action(action)
def on_history_action(self, history_data):
action, session = history_data
app = QtWidgets.QApplication.instance()
modifiers = app.keyboardModifiers()
is_control_down = QtCore.Qt.ControlModifier & modifiers
if is_control_down:
# Revert to that "session" location
self.set_session(session)
else:
# User is holding control, rerun the action
self.run_action(action, session=session)
def get_current_session(self):
if self._page == 1:
# Assets page
return self.asset_panel.get_current_session()
session = copy.deepcopy(self.dbcon.Session)
# Remove some potential invalid session values
# that we know are not set when not browsing in
# a project.
session.pop("AVALON_PROJECT", None)
session.pop("AVALON_ASSET", None)
session.pop("AVALON_SILO", None)
session.pop("AVALON_TASK", None)
return session
def run_action(self, action, session=None):
if session is None:
session = self.get_current_session()
# Add to history
self.action_history.add_action(action, session)
# Process the Action
try:
action().process(session)
except Exception as exc:
self.log.warning("Action launch failed.", exc_info=True)
self.echo("Failed: {}".format(str(exc)))
def set_session(self, session):
project_name = session.get("AVALON_PROJECT")
silo = session.get("AVALON_SILO")
asset_name = session.get("AVALON_ASSET")
task_name = session.get("AVALON_TASK")
if project_name:
# Force the "in project" view.
self.page_slider.slide_view(1, direction="right")
index = self.asset_panel.project_bar.project_combobox.findText(
project_name
)
if index >= 0:
self.asset_panel.project_bar.project_combobox.setCurrentIndex(
index
)
if silo:
self.asset_panel.assets_widget.set_silo(silo)
if asset_name:
self.asset_panel.assets_widget.select_assets([asset_name])
if task_name:
# requires a forced refresh first
self.asset_panel.on_asset_changed()
self.asset_panel.tasks_widget.select_task(task_name)

View file

@ -250,6 +250,8 @@ class Controller(QtCore.QObject):
self.processing["current_group_order"] is not None
and plugin.order > self.processing["current_group_order"]
):
current_group_order = self.processing["current_group_order"]
new_next_group_order = None
new_current_group_order = self.processing["next_group_order"]
if new_current_group_order is not None:
@ -270,12 +272,13 @@ class Controller(QtCore.QObject):
if self.collect_state == 0:
self.collect_state = 1
self.switch_toggleability.emit(True)
self.passed_group.emit(new_current_group_order)
self.passed_group.emit(current_group_order)
yield IterationBreak("Collected")
self.passed_group.emit(new_current_group_order)
if self.errored:
yield IterationBreak("Last group errored")
else:
self.passed_group.emit(current_group_order)
if self.errored:
yield IterationBreak("Last group errored")
if self.collect_state == 1:
self.collect_state = 2

View file

@ -440,9 +440,6 @@ class PluginModel(QtGui.QStandardItemModel):
if label is None:
label = "Other"
if order is None:
order = 99999999999999
group_item = self.group_items.get(label)
if not group_item:
group_item = GroupItem(label, order=order)
@ -873,13 +870,18 @@ class ArtistProxy(QtCore.QAbstractProxyModel):
self.rowsInserted.emit(self.parent(), new_from, new_to + 1)
def _remove_rows(self, parent_row, from_row, to_row):
removed_rows = []
increment_num = self.mapping_from[parent_row][from_row]
to_end_index = len(self.mapping_from[parent_row]) - 1
for _idx in range(0, parent_row):
to_end_index += len(self.mapping_from[_idx])
removed_rows = 0
_emit_last = None
for row_num in reversed(range(from_row, to_row + 1)):
row = self.mapping_from[parent_row].pop(row_num)
_emit_last = row
removed_rows.append(row)
removed_rows += 1
_emit_first = int(increment_num)
mapping_from_len = len(self.mapping_from)
@ -899,11 +901,8 @@ class ArtistProxy(QtCore.QAbstractProxyModel):
self.mapping_from[idx_i][idx_j] = increment_num
increment_num += 1
first_to_row = None
for row in removed_rows:
if first_to_row is None:
first_to_row = row
self.mapping_to.pop(row)
for idx in range(removed_rows):
self.mapping_to.pop(to_end_index - idx)
return (_emit_first, _emit_last)

View file

@ -779,10 +779,10 @@ class Window(QtWidgets.QDialog):
for group_item in self.plugin_model.group_items.values():
# TODO check only plugins from the group
if (
group_item.publish_states & GroupStates.HasFinished
or (order is not None and group_item.order >= order)
):
if group_item.publish_states & GroupStates.HasFinished:
continue
if order != group_item.order:
continue
if group_item.publish_states & GroupStates.HasError:

View file

@ -0,0 +1,8 @@
from .app import (
show,
cli
)
__all__ = [
"show",
"cli"
]

View file

@ -0,0 +1,24 @@
import os
import sys
import app
import signal
from Qt import QtWidgets
from avalon import style
if __name__ == "__main__":
qt_app = QtWidgets.QApplication([])
# app.setQuitOnLastWindowClosed(False)
qt_app.setStyleSheet(style.load_stylesheet())
def signal_handler(sig, frame):
print("You pressed Ctrl+C. Process ended.")
qt_app.quit()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
window = app.Window(sys.argv[-1].split(os.pathsep))
window.show()
sys.exit(qt_app.exec_())

View file

@ -1,18 +1,8 @@
import os
import sys
import json
from subprocess import Popen
from bson.objectid import ObjectId
from pype import lib as pypelib
from avalon.vendor.Qt import QtWidgets, QtCore
from avalon import api, style, schema
from avalon.tools import lib as parentlib
from .widgets import *
# Move this to pype lib?
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
from Qt import QtWidgets, QtCore
from widgets import AssetWidget, FamilyWidget, ComponentsWidget, ShadowWidget
from avalon.api import AvalonMongoDB
module = sys.modules[__name__]
module.window = None
class Window(QtWidgets.QDialog):
"""Main window of Standalone publisher.
@ -20,7 +10,7 @@ class Window(QtWidgets.QDialog):
:param parent: Main widget that cares about all GUIs
:type parent: QtWidgets.QMainWindow
"""
_db = DbConnector()
_db = AvalonMongoDB()
_jobs = {}
valid_family = False
valid_components = False
@ -28,14 +18,15 @@ class Window(QtWidgets.QDialog):
WIDTH = 1100
HEIGHT = 500
def __init__(self, parent=None):
def __init__(self, pyblish_paths, parent=None):
super(Window, self).__init__(parent=parent)
self._db.install()
self.pyblish_paths = pyblish_paths
self.setWindowTitle("Standalone Publish")
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setStyleSheet(style.load_stylesheet())
# Validators
self.valid_parent = False
@ -99,8 +90,14 @@ class Window(QtWidgets.QDialog):
def resizeEvent(self, event=None):
''' Helps resize shadow widget
'''
position_x = (self.frameGeometry().width()-self.shadow_widget.frameGeometry().width())/2
position_y = (self.frameGeometry().height()-self.shadow_widget.frameGeometry().height())/2
position_x = (
self.frameGeometry().width()
- self.shadow_widget.frameGeometry().width()
) / 2
position_y = (
self.frameGeometry().height()
- self.shadow_widget.frameGeometry().height()
) / 2
self.shadow_widget.move(position_x, position_y)
w = self.frameGeometry().width()
h = self.frameGeometry().height()
@ -144,7 +141,10 @@ class Window(QtWidgets.QDialog):
- files/folders in clipboard (tested only on Windows OS)
- copied path of file/folder in clipboard ('c:/path/to/folder')
'''
if event.key() == QtCore.Qt.Key_V and event.modifiers() == QtCore.Qt.ControlModifier:
if (
event.key() == QtCore.Qt.Key_V
and event.modifiers() == QtCore.Qt.ControlModifier
):
clip = QtWidgets.QApplication.clipboard()
self.widget_components.process_mime_data(clip)
super().keyPressEvent(event)
@ -190,29 +190,3 @@ class Window(QtWidgets.QDialog):
data.update(self.widget_components.collect_data())
return data
def show(parent=None, debug=False):
try:
module.window.close()
del module.window
except (RuntimeError, AttributeError):
pass
with parentlib.application():
window = Window(parent)
window.show()
module.window = window
def cli(args):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("project")
parser.add_argument("asset")
args = parser.parse_args(args)
# project = args.project
# asset = args.asset
show()

View file

@ -0,0 +1,35 @@
import os
import sys
import pype
import pyblish.api
def main(env):
from avalon.tools import publish
# Registers pype's Global pyblish plugins
pype.install()
# Register additional paths
addition_paths_str = env.get("PUBLISH_PATHS") or ""
addition_paths = addition_paths_str.split(os.pathsep)
for path in addition_paths:
path = os.path.normpath(path)
if not os.path.exists(path):
continue
pyblish.api.register_plugin_path(path)
# Register project specific plugins
project_name = os.environ["AVALON_PROJECT"]
project_plugins_paths = env.get("PYPE_PROJECT_PLUGINS") or ""
for path in project_plugins_paths.split(os.pathsep):
plugin_path = os.path.join(path, project_name, "plugins")
if os.path.exists(plugin_path):
pyblish.api.register_plugin_path(plugin_path)
return publish.show()
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

View file

@ -1,6 +1,4 @@
from avalon.vendor.Qt import *
from avalon.vendor import qtawesome
from avalon import style
from Qt import QtCore
HelpRole = QtCore.Qt.UserRole + 2
FamilyRole = QtCore.Qt.UserRole + 3
@ -8,9 +6,6 @@ ExistsRole = QtCore.Qt.UserRole + 4
PluginRole = QtCore.Qt.UserRole + 5
PluginKeyRole = QtCore.Qt.UserRole + 6
from ..resources import get_resource
from .button_from_svgs import SvgResizable, SvgButton
from .model_node import Node
from .model_tree import TreeModel
from .model_asset import AssetModel, _iter_model_rows

View file

@ -1,8 +1,9 @@
import logging
import collections
from . import QtCore, QtGui
from Qt import QtCore, QtGui
from . import TreeModel, Node
from . import style, qtawesome
from avalon.vendor import qtawesome
from avalon import style
log = logging.getLogger(__name__)

View file

@ -1,4 +1,4 @@
from . import QtCore
from Qt import QtCore
class ExactMatchesFilterProxyModel(QtCore.QSortFilterProxyModel):

View file

@ -1,6 +1,7 @@
from . import QtCore, TreeModel
from . import Node
from . import qtawesome, style
from Qt import QtCore
from . import Node, TreeModel
from avalon.vendor import qtawesome
from avalon import style
class TasksTemplateModel(TreeModel):

View file

@ -1,4 +1,4 @@
from . import QtCore
from Qt import QtCore
from . import Node

View file

@ -1,4 +1,4 @@
from . import QtWidgets, QtCore
from Qt import QtWidgets, QtCore
class DeselectableTreeView(QtWidgets.QTreeView):

View file

Before

Width:  |  Height:  |  Size: 730 B

After

Width:  |  Height:  |  Size: 730 B

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 803 B

After

Width:  |  Height:  |  Size: 803 B

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 484 B

After

Width:  |  Height:  |  Size: 484 B

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 257 KiB

After

Width:  |  Height:  |  Size: 257 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 5 KiB

After

Width:  |  Height:  |  Size: 5 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 8.4 KiB

After

Width:  |  Height:  |  Size: 8.4 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 1 KiB

After

Width:  |  Height:  |  Size: 1 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 41 KiB

After

Width:  |  Height:  |  Size: 41 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

Before After
Before After

Some files were not shown because too many files have changed in this diff Show more