mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 08:54:53 +01:00
Merge branch 'release/2.14.0' into develop
This commit is contained in:
commit
038b28f3b5
54 changed files with 3950 additions and 735 deletions
|
|
@ -1,7 +1,8 @@
|
|||
pr-wo-labels=False
|
||||
exclude-labels=duplicate,question,invalid,wontfix,weekly-digest
|
||||
author=False
|
||||
unreleased=False
|
||||
unreleased=True
|
||||
since-tag=2.11.0
|
||||
release-branch=master
|
||||
enhancement-label=**Enhancements:**
|
||||
issues=False
|
||||
|
|
|
|||
68
CHANGELOG.md
68
CHANGELOG.md
|
|
@ -1,5 +1,69 @@
|
|||
# Changelog
|
||||
|
||||
## [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) (2020-11-15)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.5...2.13.6)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Maya workfile version wasn't syncing with renders properly [\#711](https://github.com/pypeclub/pype/pull/711)
|
||||
- Maya: Fix for publishing multiple cameras with review from the same scene [\#710](https://github.com/pypeclub/pype/pull/710)
|
||||
|
||||
## [2.13.5](https://github.com/pypeclub/pype/tree/2.13.5) (2020-11-12)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.4...2.13.5)
|
||||
|
||||
**Enhancements:**
|
||||
|
||||
- 3.0 lib refactor [\#664](https://github.com/pypeclub/pype/issues/664)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Wrong thumbnail file was picked when publishing sequence in standalone publisher [\#703](https://github.com/pypeclub/pype/pull/703)
|
||||
- Fix: Burnin data pass and FFmpeg tool check [\#701](https://github.com/pypeclub/pype/pull/701)
|
||||
|
||||
## [2.13.4](https://github.com/pypeclub/pype/tree/2.13.4) (2020-11-09)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.3...2.13.4)
|
||||
|
||||
**Enhancements:**
|
||||
|
||||
- AfterEffects integration with Websocket [\#663](https://github.com/pypeclub/pype/issues/663)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Photoshop uhiding hidden layers [\#688](https://github.com/pypeclub/pype/issues/688)
|
||||
- \#688 - Fix publishing hidden layers [\#692](https://github.com/pypeclub/pype/pull/692)
|
||||
|
||||
**Closed issues:**
|
||||
|
||||
- Nuke Favorite directories "shot dir" "project dir" - not working [\#684](https://github.com/pypeclub/pype/issues/684)
|
||||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Nuke Favorite directories "shot dir" "project dir" - not working \#684 [\#685](https://github.com/pypeclub/pype/pull/685)
|
||||
|
||||
## [2.13.3](https://github.com/pypeclub/pype/tree/2.13.3) (2020-11-03)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.2...2.13.3)
|
||||
|
||||
**Enhancements:**
|
||||
|
||||
- TV paint base integration [\#612](https://github.com/pypeclub/pype/issues/612)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Fix ffmpeg executable path with spaces [\#680](https://github.com/pypeclub/pype/pull/680)
|
||||
- Hotfix: Added default version number [\#679](https://github.com/pypeclub/pype/pull/679)
|
||||
|
||||
## [2.13.2](https://github.com/pypeclub/pype/tree/2.13.2) (2020-10-28)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.1...2.13.2)
|
||||
|
||||
**Fixed bugs:**
|
||||
|
||||
- Nuke: wrong conditions when fixing legacy write nodes [\#665](https://github.com/pypeclub/pype/pull/665)
|
||||
|
||||
## [2.13.1](https://github.com/pypeclub/pype/tree/2.13.1) (2020-10-23)
|
||||
|
||||
[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.0...2.13.1)
|
||||
|
|
@ -12,6 +76,7 @@
|
|||
|
||||
- Layer name is not propagating to metadata in Photoshop [\#654](https://github.com/pypeclub/pype/issues/654)
|
||||
- Loader in Photoshop fails with "can't set attribute" [\#650](https://github.com/pypeclub/pype/issues/650)
|
||||
- Nuke Load mp4 wrong frame range [\#661](https://github.com/pypeclub/pype/issues/661)
|
||||
- Hiero: Review video file adding one frame to the end [\#659](https://github.com/pypeclub/pype/issues/659)
|
||||
|
||||
## [2.13.0](https://github.com/pypeclub/pype/tree/2.13.0) (2020-10-18)
|
||||
|
|
@ -60,7 +125,6 @@
|
|||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Audio file existence check [\#614](https://github.com/pypeclub/pype/pull/614)
|
||||
- Avalon module without Qt [\#581](https://github.com/pypeclub/pype/pull/581)
|
||||
- Ftrack module without Qt [\#577](https://github.com/pypeclub/pype/pull/577)
|
||||
|
||||
|
|
@ -135,6 +199,7 @@
|
|||
|
||||
**Merged pull requests:**
|
||||
|
||||
- Audio file existence check [\#614](https://github.com/pypeclub/pype/pull/614)
|
||||
- NKS small fixes [\#587](https://github.com/pypeclub/pype/pull/587)
|
||||
- Standalone publisher editorial plugins interfering [\#580](https://github.com/pypeclub/pype/pull/580)
|
||||
|
||||
|
|
@ -185,7 +250,6 @@
|
|||
**Fixed bugs:**
|
||||
|
||||
- Maya: Fix tile order for Draft Tile Assembler [\#511](https://github.com/pypeclub/pype/pull/511)
|
||||
- NukeStudio: Fix comment tag collection and integration. [\#508](https://github.com/pypeclub/pype/pull/508)
|
||||
- Remove extra dash [\#501](https://github.com/pypeclub/pype/pull/501)
|
||||
- Fix: strip dot from repre names in single frame renders [\#498](https://github.com/pypeclub/pype/pull/498)
|
||||
- Better handling of destination during integrating [\#485](https://github.com/pypeclub/pype/pull/485)
|
||||
|
|
|
|||
|
|
@ -378,14 +378,8 @@ class AExpectedFiles:
|
|||
renderable = False
|
||||
if self.maya_is_true(cmds.getAttr("{}.renderable".format(cam))):
|
||||
renderable = True
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
"{}.renderable".format(cam), self.layer
|
||||
):
|
||||
renderable = self.maya_is_true(override)
|
||||
|
||||
if renderable:
|
||||
renderable_cameras.append(cam)
|
||||
|
||||
return renderable_cameras
|
||||
|
||||
def maya_is_true(self, attr_val):
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import logging
|
||||
|
||||
from avalon.tvpaint.communication_server import register_localization_file
|
||||
from avalon.tvpaint import pipeline
|
||||
import avalon.api
|
||||
import pyblish.api
|
||||
from pype import PLUGINS_DIR
|
||||
|
|
@ -13,6 +14,23 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "tvpaint", "load")
|
|||
CREATE_PATH = os.path.join(PLUGINS_DIR, "tvpaint", "create")
|
||||
|
||||
|
||||
def on_instance_toggle(instance, old_value, new_value):
|
||||
instance_id = instance.data["uuid"]
|
||||
found_idx = None
|
||||
current_instances = pipeline.list_instances()
|
||||
for idx, workfile_instance in enumerate(current_instances):
|
||||
if workfile_instance["uuid"] == instance_id:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
if found_idx is None:
|
||||
return
|
||||
|
||||
if "active" in current_instances[found_idx]:
|
||||
current_instances[found_idx]["active"] = new_value
|
||||
pipeline._write_instances(current_instances)
|
||||
|
||||
|
||||
def install():
|
||||
log.info("Pype - Installing TVPaint integration")
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
|
@ -23,6 +41,12 @@ def install():
|
|||
avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH)
|
||||
avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH)
|
||||
|
||||
registered_callbacks = (
|
||||
pyblish.api.registered_callbacks().get("instanceToggled") or []
|
||||
)
|
||||
if on_instance_toggle not in registered_callbacks:
|
||||
pyblish.api.register_callback("instanceToggled", on_instance_toggle)
|
||||
|
||||
|
||||
def uninstall():
|
||||
log.info("Pype - Uninstalling TVPaint integration")
|
||||
|
|
|
|||
257
pype/lib/abstract_collect_render.py
Normal file
257
pype/lib/abstract_collect_render.py
Normal file
|
|
@ -0,0 +1,257 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect render template.
|
||||
|
||||
TODO: use @dataclass when times come.
|
||||
|
||||
"""
|
||||
from abc import abstractmethod
|
||||
|
||||
import attr
|
||||
import six
|
||||
|
||||
from avalon import api
|
||||
import pyblish.api
|
||||
|
||||
from .abstract_metaplugins import AbstractMetaContextPlugin
|
||||
|
||||
|
||||
@attr.s
|
||||
class RenderInstance(object):
|
||||
"""Data collected by collectors.
|
||||
|
||||
This data class later on passed to collected instances.
|
||||
Those attributes are required later on.
|
||||
|
||||
"""
|
||||
|
||||
# metadata
|
||||
version = attr.ib() # instance version
|
||||
time = attr.ib() # time of instance creation (avalon.api.time())
|
||||
source = attr.ib() # path to source scene file
|
||||
label = attr.ib() # label to show in GUI
|
||||
subset = attr.ib() # subset name
|
||||
asset = attr.ib() # asset name (AVALON_ASSET)
|
||||
attachTo = attr.ib() # subset name to attach render to
|
||||
setMembers = attr.ib() # list of nodes/members producing render output
|
||||
publish = attr.ib() # bool, True to publish instance
|
||||
name = attr.ib() # instance name
|
||||
|
||||
# format settings
|
||||
resolutionWidth = attr.ib() # resolution width (1920)
|
||||
resolutionHeight = attr.ib() # resolution height (1080)
|
||||
pixelAspect = attr.ib() # pixel aspect (1.0)
|
||||
|
||||
# time settings
|
||||
frameStart = attr.ib() # start frame
|
||||
frameEnd = attr.ib() # start end
|
||||
frameStep = attr.ib() # frame step
|
||||
|
||||
# --------------------
|
||||
# With default values
|
||||
# metadata
|
||||
renderer = attr.ib(default="") # renderer - can be used in Deadline
|
||||
review = attr.ib(default=False) # genereate review from instance (bool)
|
||||
priority = attr.ib(default=50) # job priority on farm
|
||||
|
||||
family = attr.ib(default="renderlayer")
|
||||
families = attr.ib(default=["renderlayer"]) # list of families
|
||||
|
||||
# format settings
|
||||
multipartExr = attr.ib(default=False) # flag for multipart exrs
|
||||
convertToScanline = attr.ib(default=False) # flag for exr conversion
|
||||
|
||||
tileRendering = attr.ib(default=False) # bool: treat render as tiles
|
||||
tilesX = attr.ib(default=0) # number of tiles in X
|
||||
tilesY = attr.ib(default=0) # number of tiles in Y
|
||||
|
||||
# submit_publish_job
|
||||
toBeRenderedOn = attr.ib(default=None)
|
||||
deadlineSubmissionJob = attr.ib(default=None)
|
||||
anatomyData = attr.ib(default=None)
|
||||
outputDir = attr.ib(default=None)
|
||||
|
||||
@frameStart.validator
|
||||
def check_frame_start(self, _, value):
|
||||
"""Validate if frame start is not larger then end."""
|
||||
if value >= self.frameEnd:
|
||||
raise ValueError("frameStart must be smaller "
|
||||
"or equal then frameEnd")
|
||||
|
||||
@frameEnd.validator
|
||||
def check_frame_end(self, _, value):
|
||||
"""Validate if frame end is not less then start."""
|
||||
if value <= self.frameStart:
|
||||
raise ValueError("frameEnd must be smaller "
|
||||
"or equal then frameStart")
|
||||
|
||||
@tilesX.validator
|
||||
def check_tiles_x(self, _, value):
|
||||
"""Validate if tile x isn't less then 1."""
|
||||
if not self.tileRendering:
|
||||
return
|
||||
if value < 1:
|
||||
raise ValueError("tile X size cannot be less then 1")
|
||||
|
||||
if value == 1 and self.tilesY == 1:
|
||||
raise ValueError("both tiles X a Y sizes are set to 1")
|
||||
|
||||
@tilesY.validator
|
||||
def check_tiles_y(self, _, value):
|
||||
"""Validate if tile y isn't less then 1."""
|
||||
if not self.tileRendering:
|
||||
return
|
||||
if value < 1:
|
||||
raise ValueError("tile Y size cannot be less then 1")
|
||||
|
||||
if value == 1 and self.tilesX == 1:
|
||||
raise ValueError("both tiles X a Y sizes are set to 1")
|
||||
|
||||
|
||||
@six.add_metaclass(AbstractMetaContextPlugin)
|
||||
class AbstractCollectRender(pyblish.api.ContextPlugin):
|
||||
"""Gather all publishable render layers from renderSetup."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect Render"
|
||||
sync_workfile_version = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Constructor."""
|
||||
super(AbstractCollectRender, self).__init__(*args, **kwargs)
|
||||
self._file_path = None
|
||||
self._asset = api.Session["AVALON_ASSET"]
|
||||
self._context = None
|
||||
|
||||
def process(self, context):
|
||||
"""Entry point to collector."""
|
||||
self._context = context
|
||||
for instance in context:
|
||||
# make sure workfile instance publishing is enabled
|
||||
try:
|
||||
if "workfile" in instance.data["families"]:
|
||||
instance.data["publish"] = True
|
||||
if "renderFarm" in instance.data["families"]:
|
||||
instance.data["remove"] = True
|
||||
except KeyError:
|
||||
# be tolerant if 'families' is missing.
|
||||
pass
|
||||
|
||||
self._file_path = context.data["currentFile"].replace("\\", "/")
|
||||
|
||||
render_instances = self.get_instances(context)
|
||||
for render_instance in render_instances:
|
||||
exp_files = self.get_expected_files(render_instance)
|
||||
assert exp_files, "no file names were generated, this is bug"
|
||||
|
||||
# if we want to attach render to subset, check if we have AOV's
|
||||
# in expectedFiles. If so, raise error as we cannot attach AOV
|
||||
# (considered to be subset on its own) to another subset
|
||||
if render_instance.attachTo:
|
||||
assert isinstance(exp_files, list), (
|
||||
"attaching multiple AOVs or renderable cameras to "
|
||||
"subset is not supported"
|
||||
)
|
||||
|
||||
frame_start_render = int(render_instance.frameStart)
|
||||
frame_end_render = int(render_instance.frameEnd)
|
||||
|
||||
if (int(context.data['frameStartHandle']) == frame_start_render
|
||||
and int(context.data['frameEndHandle']) == frame_end_render): # noqa: W503, E501
|
||||
|
||||
handle_start = context.data['handleStart']
|
||||
handle_end = context.data['handleEnd']
|
||||
frame_start = context.data['frameStart']
|
||||
frame_end = context.data['frameEnd']
|
||||
frame_start_handle = context.data['frameStartHandle']
|
||||
frame_end_handle = context.data['frameEndHandle']
|
||||
else:
|
||||
handle_start = 0
|
||||
handle_end = 0
|
||||
frame_start = frame_start_render
|
||||
frame_end = frame_end_render
|
||||
frame_start_handle = frame_start_render
|
||||
frame_end_handle = frame_end_render
|
||||
|
||||
data = {
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartHandle": frame_start_handle,
|
||||
"frameEndHandle": frame_end_handle,
|
||||
"byFrameStep": int(render_instance.frameStep),
|
||||
|
||||
"author": context.data["user"],
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"expectedFiles": exp_files,
|
||||
}
|
||||
if self.sync_workfile_version:
|
||||
data["version"] = context.data["version"]
|
||||
|
||||
# add additional data
|
||||
data = self.add_additional_data(data)
|
||||
render_instance_dict = attr.asdict(render_instance)
|
||||
|
||||
instance = context.create_instance(render_instance.name)
|
||||
instance.data["label"] = render_instance.label
|
||||
instance.data.update(render_instance_dict)
|
||||
instance.data.update(data)
|
||||
|
||||
self.post_collecting_action()
|
||||
|
||||
@abstractmethod
|
||||
def get_instances(self, context):
|
||||
"""Get all renderable instances and their data.
|
||||
|
||||
Args:
|
||||
context (pyblish.api.Context): Context object.
|
||||
|
||||
Returns:
|
||||
list of :class:`RenderInstance`: All collected renderable instances
|
||||
(like render layers, write nodes, etc.)
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_expected_files(self, render_instance):
|
||||
"""Get list of expected files.
|
||||
|
||||
Returns:
|
||||
list: expected files. This can be either simple list of files with
|
||||
their paths, or list of dictionaries, where key is name of AOV
|
||||
for example and value is list of files for that AOV.
|
||||
|
||||
Example::
|
||||
|
||||
['/path/to/file.001.exr', '/path/to/file.002.exr']
|
||||
|
||||
or as dictionary:
|
||||
|
||||
[
|
||||
{
|
||||
"beauty": ['/path/to/beauty.001.exr', ...],
|
||||
"mask": ['/path/to/mask.001.exr']
|
||||
}
|
||||
]
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def add_additional_data(self, data):
|
||||
"""Add additional data to collected instance.
|
||||
|
||||
This can be overridden by host implementation to add custom
|
||||
additional data.
|
||||
|
||||
"""
|
||||
return data
|
||||
|
||||
def post_collecting_action(self):
|
||||
"""Execute some code after collection is done.
|
||||
|
||||
This is useful for example for restoring current render layer.
|
||||
|
||||
"""
|
||||
pass
|
||||
53
pype/lib/abstract_expected_files.py
Normal file
53
pype/lib/abstract_expected_files.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Abstract ExpectedFile class definition."""
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class ExpectedFiles:
|
||||
"""Class grouping functionality for all supported renderers.
|
||||
|
||||
Attributes:
|
||||
multipart (bool): Flag if multipart exrs are used.
|
||||
|
||||
"""
|
||||
|
||||
multipart = False
|
||||
|
||||
@abstractmethod
|
||||
def get(self, render_instance):
|
||||
"""Get expected files for given renderer and render layer.
|
||||
|
||||
This method should return dictionary of all files we are expecting
|
||||
to be rendered from the host. Usually `render_instance` corresponds
|
||||
to *render layer*. Result can be either flat list with the file
|
||||
paths or it can be list of dictionaries. Each key corresponds to
|
||||
for example AOV name or channel, etc.
|
||||
|
||||
Example::
|
||||
|
||||
['/path/to/file.001.exr', '/path/to/file.002.exr']
|
||||
|
||||
or as dictionary:
|
||||
|
||||
[
|
||||
{
|
||||
"beauty": ['/path/to/beauty.001.exr', ...],
|
||||
"mask": ['/path/to/mask.001.exr']
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
Args:
|
||||
render_instance (:class:`RenderInstance`): Data passed from
|
||||
collector to determine files. This should be instance of
|
||||
:class:`abstract_collect_render.RenderInstance`
|
||||
|
||||
Returns:
|
||||
list: Full paths to expected rendered files.
|
||||
list of dict: Path to expected rendered files categorized by
|
||||
AOVs, etc.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
10
pype/lib/abstract_metaplugins.py
Normal file
10
pype/lib/abstract_metaplugins.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
from abc import ABCMeta
|
||||
from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin
|
||||
|
||||
|
||||
class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin):
|
||||
pass
|
||||
|
||||
|
||||
class AbstractMetaContextPlugin(ABCMeta, ExplicitMetaPlugin):
|
||||
pass
|
||||
626
pype/lib/abstract_submit_deadline.py
Normal file
626
pype/lib/abstract_submit_deadline.py
Normal file
|
|
@ -0,0 +1,626 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Abstract package for submitting jobs to Deadline.
|
||||
|
||||
It provides Deadline JobInfo data class.
|
||||
|
||||
"""
|
||||
import os
|
||||
from abc import abstractmethod
|
||||
import platform
|
||||
import getpass
|
||||
from collections import OrderedDict
|
||||
|
||||
import six
|
||||
import attr
|
||||
import requests
|
||||
|
||||
import pyblish.api
|
||||
from .abstract_metaplugins import AbstractMetaInstancePlugin
|
||||
|
||||
|
||||
@attr.s
|
||||
class DeadlineJobInfo(object):
|
||||
"""Mapping of all Deadline *JobInfo* attributes.
|
||||
|
||||
This contains all JobInfo attributes plus their default values.
|
||||
Those attributes set to `None` shouldn't be posted to Deadline as
|
||||
the only required one is `Plugin`. Their default values used by Deadline
|
||||
are stated in
|
||||
comments.
|
||||
|
||||
..seealso:
|
||||
https://docs.thinkboxsoftware.com/products/deadline/10.1/1_User%20Manual/manual/manual-submission.html
|
||||
|
||||
"""
|
||||
|
||||
# Required
|
||||
# ----------------------------------------------
|
||||
Plugin = attr.ib()
|
||||
|
||||
# General
|
||||
Frames = attr.ib(default=None) # default: 0
|
||||
Name = attr.ib(default="Untitled")
|
||||
Comment = attr.ib(default=None) # default: empty
|
||||
Department = attr.ib(default=None) # default: empty
|
||||
BatchName = attr.ib(default=None) # default: empty
|
||||
UserName = attr.ib(default=getpass.getuser())
|
||||
MachineName = attr.ib(default=platform.node())
|
||||
Pool = attr.ib(default=None) # default: "none"
|
||||
SecondaryPool = attr.ib(default=None)
|
||||
Group = attr.ib(default=None) # default: "none"
|
||||
Priority = attr.ib(default=50)
|
||||
ChunkSize = attr.ib(default=1)
|
||||
ConcurrentTasks = attr.ib(default=1)
|
||||
LimitConcurrentTasksToNumberOfCpus = attr.ib(
|
||||
default=None) # default: "true"
|
||||
OnJobComplete = attr.ib(default="Nothing")
|
||||
SynchronizeAllAuxiliaryFiles = attr.ib(default=None) # default: false
|
||||
ForceReloadPlugin = attr.ib(default=None) # default: false
|
||||
Sequential = attr.ib(default=None) # default: false
|
||||
SuppressEvents = attr.ib(default=None) # default: false
|
||||
Protected = attr.ib(default=None) # default: false
|
||||
InitialStatus = attr.ib(default="Active")
|
||||
NetworkRoot = attr.ib(default=None)
|
||||
|
||||
# Timeouts
|
||||
# ----------------------------------------------
|
||||
MinRenderTimeSeconds = attr.ib(default=None) # Default: 0
|
||||
MinRenderTimeMinutes = attr.ib(default=None) # Default: 0
|
||||
TaskTimeoutSeconds = attr.ib(default=None) # Default: 0
|
||||
TaskTimeoutMinutes = attr.ib(default=None) # Default: 0
|
||||
StartJobTimeoutSeconds = attr.ib(default=None) # Default: 0
|
||||
StartJobTimeoutMinutes = attr.ib(default=None) # Default: 0
|
||||
InitializePluginTimeoutSeconds = attr.ib(default=None) # Default: 0
|
||||
# can be one of <Error/Notify/ErrorAndNotify/Complete>
|
||||
OnTaskTimeout = attr.ib(default=None) # Default: Error
|
||||
EnableTimeoutsForScriptTasks = attr.ib(default=None) # Default: false
|
||||
EnableFrameTimeouts = attr.ib(default=None) # Default: false
|
||||
EnableAutoTimeout = attr.ib(default=None) # Default: false
|
||||
|
||||
# Interruptible
|
||||
# ----------------------------------------------
|
||||
Interruptible = attr.ib(default=None) # Default: false
|
||||
InterruptiblePercentage = attr.ib(default=None)
|
||||
RemTimeThreshold = attr.ib(default=None)
|
||||
|
||||
# Notifications
|
||||
# ----------------------------------------------
|
||||
# can be comma separated list of users
|
||||
NotificationTargets = attr.ib(default=None) # Default: blank
|
||||
ClearNotificationTargets = attr.ib(default=None) # Default: false
|
||||
# A comma separated list of additional email addresses
|
||||
NotificationEmails = attr.ib(default=None) # Default: blank
|
||||
OverrideNotificationMethod = attr.ib(default=None) # Default: false
|
||||
EmailNotification = attr.ib(default=None) # Default: false
|
||||
PopupNotification = attr.ib(default=None) # Default: false
|
||||
# String with `[EOL]` used for end of line
|
||||
NotificationNote = attr.ib(default=None) # Default: blank
|
||||
|
||||
# Machine Limit
|
||||
# ----------------------------------------------
|
||||
MachineLimit = attr.ib(default=None) # Default: 0
|
||||
MachineLimitProgress = attr.ib(default=None) # Default: -1.0
|
||||
Whitelist = attr.ib(default=None) # Default: blank
|
||||
Blacklist = attr.ib(default=None) # Default: blank
|
||||
|
||||
# Limits
|
||||
# ----------------------------------------------
|
||||
# comma separated list of limit groups
|
||||
LimitGroups = attr.ib(default=None) # Default: blank
|
||||
|
||||
# Dependencies
|
||||
# ----------------------------------------------
|
||||
# comma separated list of job IDs
|
||||
JobDependencies = attr.ib(default=None) # Default: blank
|
||||
JobDependencyPercentage = attr.ib(default=None) # Default: -1
|
||||
IsFrameDependent = attr.ib(default=None) # Default: false
|
||||
FrameDependencyOffsetStart = attr.ib(default=None) # Default: 0
|
||||
FrameDependencyOffsetEnd = attr.ib(default=None) # Default: 0
|
||||
ResumeOnCompleteDependencies = attr.ib(default=None) # Default: true
|
||||
ResumeOnDeletedDependencies = attr.ib(default=None) # Default: false
|
||||
ResumeOnFailedDependencies = attr.ib(default=None) # Default: false
|
||||
# comma separated list of asset paths
|
||||
RequiredAssets = attr.ib(default=None) # Default: blank
|
||||
# comma separated list of script paths
|
||||
ScriptDependencies = attr.ib(default=None) # Default: blank
|
||||
|
||||
# Failure Detection
|
||||
# ----------------------------------------------
|
||||
OverrideJobFailureDetection = attr.ib(default=None) # Default: false
|
||||
FailureDetectionJobErrors = attr.ib(default=None) # 0..x
|
||||
OverrideTaskFailureDetection = attr.ib(default=None) # Default: false
|
||||
FailureDetectionTaskErrors = attr.ib(default=None) # 0..x
|
||||
IgnoreBadJobDetection = attr.ib(default=None) # Default: false
|
||||
SendJobErrorWarning = attr.ib(default=None) # Default: false
|
||||
|
||||
# Cleanup
|
||||
# ----------------------------------------------
|
||||
DeleteOnComplete = attr.ib(default=None) # Default: false
|
||||
ArchiveOnComplete = attr.ib(default=None) # Default: false
|
||||
OverrideAutoJobCleanup = attr.ib(default=None) # Default: false
|
||||
OverrideJobCleanup = attr.ib(default=None)
|
||||
JobCleanupDays = attr.ib(default=None) # Default: false
|
||||
# <ArchiveJobs/DeleteJobs>
|
||||
OverrideJobCleanupType = attr.ib(default=None)
|
||||
|
||||
# Scheduling
|
||||
# ----------------------------------------------
|
||||
# <None/Once/Daily/Custom>
|
||||
ScheduledType = attr.ib(default=None) # Default: None
|
||||
# <dd/MM/yyyy HH:mm>
|
||||
ScheduledStartDateTime = attr.ib(default=None)
|
||||
ScheduledDays = attr.ib(default=None) # Default: 1
|
||||
# <dd:hh:mm:ss>
|
||||
JobDelay = attr.ib(default=None)
|
||||
# <Day of the Week><Start/Stop>Time=<HH:mm:ss>
|
||||
Scheduled = attr.ib(default=None)
|
||||
|
||||
# Scripts
|
||||
# ----------------------------------------------
|
||||
# all accept path to script
|
||||
PreJobScript = attr.ib(default=None) # Default: blank
|
||||
PostJobScript = attr.ib(default=None) # Default: blank
|
||||
PreTaskScript = attr.ib(default=None) # Default: blank
|
||||
PostTaskScript = attr.ib(default=None) # Default: blank
|
||||
|
||||
# Event Opt-Ins
|
||||
# ----------------------------------------------
|
||||
# comma separated list of plugins
|
||||
EventOptIns = attr.ib(default=None) # Default: blank
|
||||
|
||||
# Environment
|
||||
# ----------------------------------------------
|
||||
_environmentKeyValue = attr.ib(factory=list)
|
||||
|
||||
@property
|
||||
def EnvironmentKeyValue(self): # noqa: N802
|
||||
"""Return all environment key values formatted for Deadline.
|
||||
|
||||
Returns:
|
||||
dict: as `{'EnvironmentKeyValue0', 'key=value'}`
|
||||
|
||||
"""
|
||||
out = {}
|
||||
for index, v in enumerate(self._environmentKeyValue):
|
||||
out["EnvironmentKeyValue{}".format(index)] = v
|
||||
return out
|
||||
|
||||
@EnvironmentKeyValue.setter
|
||||
def EnvironmentKeyValue(self, val): # noqa: N802
|
||||
self._environmentKeyValue.append(val)
|
||||
|
||||
IncludeEnvironment = attr.ib(default=None) # Default: false
|
||||
UseJobEnvironmentOnly = attr.ib(default=None) # Default: false
|
||||
CustomPluginDirectory = attr.ib(default=None) # Default: blank
|
||||
|
||||
# Job Extra Info
|
||||
# ----------------------------------------------
|
||||
_extraInfos = attr.ib(factory=list)
|
||||
_extraInfoKeyValues = attr.ib(factory=list)
|
||||
|
||||
@property
|
||||
def ExtraInfo(self): # noqa: N802
|
||||
"""Return all ExtraInfo values formatted for Deadline.
|
||||
|
||||
Returns:
|
||||
dict: as `{'ExtraInfo0': 'value'}`
|
||||
|
||||
"""
|
||||
out = {}
|
||||
for index, v in enumerate(self._extraInfos):
|
||||
out["ExtraInfo{}".format(index)] = v
|
||||
return out
|
||||
|
||||
@ExtraInfo.setter
|
||||
def ExtraInfo(self, val): # noqa: N802
|
||||
self._extraInfos.append(val)
|
||||
|
||||
@property
|
||||
def ExtraInfoKeyValue(self): # noqa: N802
|
||||
"""Return all ExtraInfoKeyValue values formatted for Deadline.
|
||||
|
||||
Returns:
|
||||
dict: as {'ExtraInfoKeyValue0': 'key=value'}`
|
||||
|
||||
"""
|
||||
out = {}
|
||||
for index, v in enumerate(self._extraInfoKeyValues):
|
||||
out["ExtraInfoKeyValue{}".format(index)] = v
|
||||
return out
|
||||
|
||||
@ExtraInfoKeyValue.setter
|
||||
def ExtraInfoKeyValue(self, val): # noqa: N802
|
||||
self._extraInfoKeyValues.append(val)
|
||||
|
||||
# Task Extra Info Names
|
||||
# ----------------------------------------------
|
||||
OverrideTaskExtraInfoNames = attr.ib(default=None) # Default: false
|
||||
_taskExtraInfos = attr.ib(factory=list)
|
||||
|
||||
@property
|
||||
def TaskExtraInfoName(self): # noqa: N802
|
||||
"""Return all TaskExtraInfoName values formatted for Deadline.
|
||||
|
||||
Returns:
|
||||
dict: as `{'TaskExtraInfoName0': 'value'}`
|
||||
|
||||
"""
|
||||
out = {}
|
||||
for index, v in enumerate(self._taskExtraInfos):
|
||||
out["TaskExtraInfoName{}".format(index)] = v
|
||||
return out
|
||||
|
||||
@TaskExtraInfoName.setter
|
||||
def TaskExtraInfoName(self, val): # noqa: N802
|
||||
self._taskExtraInfos.append(val)
|
||||
|
||||
# Output
|
||||
# ----------------------------------------------
|
||||
_outputFilename = attr.ib(factory=list)
|
||||
_outputFilenameTile = attr.ib(factory=list)
|
||||
_outputDirectory = attr.ib(factory=list)
|
||||
|
||||
@property
|
||||
def OutputFilename(self): # noqa: N802
|
||||
"""Return all OutputFilename values formatted for Deadline.
|
||||
|
||||
Returns:
|
||||
dict: as `{'OutputFilename0': 'filename'}`
|
||||
|
||||
"""
|
||||
out = {}
|
||||
for index, v in enumerate(self._outputFilename):
|
||||
out["OutputFilename{}".format(index)] = v
|
||||
return out
|
||||
|
||||
@OutputFilename.setter
|
||||
def OutputFilename(self, val): # noqa: N802
|
||||
self._outputFilename.append(val)
|
||||
|
||||
@property
|
||||
def OutputFilenameTile(self): # noqa: N802
|
||||
"""Return all OutputFilename#Tile values formatted for Deadline.
|
||||
|
||||
Returns:
|
||||
dict: as `{'OutputFilenme#Tile': 'tile'}`
|
||||
|
||||
"""
|
||||
out = {}
|
||||
for index, v in enumerate(self._outputFilenameTile):
|
||||
out["OutputFilename{}Tile".format(index)] = v
|
||||
return out
|
||||
|
||||
@OutputFilenameTile.setter
|
||||
def OutputFilenameTile(self, val): # noqa: N802
|
||||
self._outputFilenameTile.append(val)
|
||||
|
||||
@property
|
||||
def OutputDirectory(self): # noqa: N802
|
||||
"""Return all OutputDirectory values formatted for Deadline.
|
||||
|
||||
Returns:
|
||||
dict: as `{'OutputDirectory0': 'dir'}`
|
||||
|
||||
"""
|
||||
out = {}
|
||||
for index, v in enumerate(self._outputDirectory):
|
||||
out["OutputDirectory{}".format(index)] = v
|
||||
return out
|
||||
|
||||
@OutputDirectory.setter
|
||||
def OutputDirectory(self, val): # noqa: N802
|
||||
self._outputDirectory.append(val)
|
||||
|
||||
# Tile Job
|
||||
# ----------------------------------------------
|
||||
TileJob = attr.ib(default=None) # Default: false
|
||||
TileJobFrame = attr.ib(default=None) # Default: 0
|
||||
TileJobTilesInX = attr.ib(default=None) # Default: 0
|
||||
TileJobTilesInY = attr.ib(default=None) # Default: 0
|
||||
TileJobTileCount = attr.ib(default=None) # Default: 0
|
||||
|
||||
# Maintenance Job
|
||||
# ----------------------------------------------
|
||||
MaintenanceJob = attr.ib(default=None) # Default: false
|
||||
MaintenanceJobStartFrame = attr.ib(default=None) # Default: 0
|
||||
MaintenanceJobEndFrame = attr.ib(default=None) # Default: 0
|
||||
|
||||
def serialize(self):
|
||||
"""Return all data serialized as dictionary.
|
||||
|
||||
Returns:
|
||||
OrderedDict: all serialized data.
|
||||
|
||||
"""
|
||||
def filter_data(a, v):
|
||||
if a.name.startswith("_"):
|
||||
return False
|
||||
if v is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
serialized = attr.asdict(
|
||||
self, dict_factory=OrderedDict, filter=filter_data)
|
||||
serialized.update(self.EnvironmentKeyValue)
|
||||
serialized.update(self.ExtraInfo)
|
||||
serialized.update(self.ExtraInfoKeyValue)
|
||||
serialized.update(self.TaskExtraInfoName)
|
||||
serialized.update(self.OutputFilename)
|
||||
serialized.update(self.OutputFilenameTile)
|
||||
serialized.update(self.OutputDirectory)
|
||||
return serialized
|
||||
|
||||
|
||||
@six.add_metaclass(AbstractMetaInstancePlugin)
|
||||
class AbstractSubmitDeadline(pyblish.api.InstancePlugin):
|
||||
"""Class abstracting access to Deadline."""
|
||||
|
||||
label = "Submit to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
use_published = True
|
||||
asset_dependencies = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AbstractSubmitDeadline, self).__init__(*args, **kwargs)
|
||||
self._instance = None
|
||||
self._deadline_url = None
|
||||
self.scene_path = None
|
||||
self.job_info = None
|
||||
self.plugin_info = None
|
||||
self.aux_files = None
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
self._instance = instance
|
||||
context = instance.context
|
||||
self._deadline_url = os.environ.get(
|
||||
"DEADLINE_REST_URL", "http://localhost:8082")
|
||||
assert self._deadline_url, "Requires DEADLINE_REST_URL"
|
||||
|
||||
file_path = None
|
||||
if self.use_published:
|
||||
file_path = self.from_published_scene()
|
||||
|
||||
# fallback if nothing was set
|
||||
if not file_path:
|
||||
self.log.warning("Falling back to workfile")
|
||||
file_path = context.data["currentFile"]
|
||||
|
||||
self.scene_path = file_path
|
||||
self.log.info("Using {} for render/export.".format(file_path))
|
||||
|
||||
self.job_info = self.get_job_info()
|
||||
self.plugin_info = self.get_plugin_info()
|
||||
self.aux_files = self.get_aux_files()
|
||||
|
||||
self.process_submission()
|
||||
|
||||
def process_submission(self):
|
||||
"""Process data for submission.
|
||||
|
||||
This takes Deadline JobInfo, PluginInfo, AuxFile, creates payload
|
||||
from them and submit it do Deadline.
|
||||
|
||||
Returns:
|
||||
str: Deadline job ID
|
||||
|
||||
"""
|
||||
payload = self.assemble_payload()
|
||||
return self.submit(payload)
|
||||
|
||||
@abstractmethod
|
||||
def get_job_info(self):
|
||||
"""Return filled Deadline JobInfo.
|
||||
|
||||
This is host/plugin specific implementation of how to fill data in.
|
||||
|
||||
See:
|
||||
:class:`DeadlineJobInfo`
|
||||
|
||||
Returns:
|
||||
:class:`DeadlineJobInfo`: Filled Deadline JobInfo.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_plugin_info(self):
|
||||
"""Return filled Deadline PluginInfo.
|
||||
|
||||
This is host/plugin specific implementation of how to fill data in.
|
||||
|
||||
See:
|
||||
:class:`DeadlineJobInfo`
|
||||
|
||||
Returns:
|
||||
dict: Filled Deadline JobInfo.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_aux_files(self):
|
||||
"""Return list of auxiliary files for Deadline job.
|
||||
|
||||
If needed this should be overriden, otherwise return empty list as
|
||||
that field even empty must be present on Deadline submission.
|
||||
|
||||
Returns:
|
||||
list: List of files.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
def from_published_scene(self, replace_in_path=True):
|
||||
"""Switch work scene for published scene.
|
||||
|
||||
If rendering/exporting from published scenes is enabled, this will
|
||||
replace paths from working scene to published scene.
|
||||
|
||||
Args:
|
||||
replace_in_path (bool): if True, it will try to find
|
||||
old scene name in path of expected files and replace it
|
||||
with name of published scene.
|
||||
|
||||
Returns:
|
||||
str: Published scene path.
|
||||
None: if no published scene is found.
|
||||
|
||||
Note:
|
||||
Published scene path is actually determined from project Anatomy
|
||||
as at the time this plugin is running scene can still no be
|
||||
published.
|
||||
|
||||
"""
|
||||
anatomy = self._instance.context.data['anatomy']
|
||||
file_path = None
|
||||
for i in self._instance.context:
|
||||
if "workfile" in i.data["families"] \
|
||||
or i.data["family"] == "workfile":
|
||||
# test if there is instance of workfile waiting
|
||||
# to be published.
|
||||
assert i.data["publish"] is True, (
|
||||
"Workfile (scene) must be published along")
|
||||
# determine published path from Anatomy.
|
||||
template_data = i.data.get("anatomyData")
|
||||
rep = i.data.get("representations")[0].get("ext")
|
||||
template_data["representation"] = rep
|
||||
template_data["ext"] = rep
|
||||
template_data["comment"] = None
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
template_filled = anatomy_filled["publish"]["path"]
|
||||
file_path = os.path.normpath(template_filled)
|
||||
|
||||
self.log.info("Using published scene for render {}".format(
|
||||
file_path))
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
self.log.error("published scene does not exist!")
|
||||
raise
|
||||
|
||||
if not replace_in_path:
|
||||
return file_path
|
||||
|
||||
# now we need to switch scene in expected files
|
||||
# because <scene> token will now point to published
|
||||
# scene file and that might differ from current one
|
||||
new_scene = os.path.splitext(
|
||||
os.path.basename(file_path))[0]
|
||||
orig_scene = os.path.splitext(
|
||||
os.path.basename(
|
||||
self._instance.context.data["currentFile"]))[0]
|
||||
exp = self._instance.data.get("expectedFiles")
|
||||
|
||||
if isinstance(exp[0], dict):
|
||||
# we have aovs and we need to iterate over them
|
||||
new_exp = {}
|
||||
for aov, files in exp[0].items():
|
||||
replaced_files = []
|
||||
for f in files:
|
||||
replaced_files.append(
|
||||
f.replace(orig_scene, new_scene)
|
||||
)
|
||||
new_exp[aov] = replaced_files
|
||||
self._instance.data["expectedFiles"] = [new_exp]
|
||||
else:
|
||||
new_exp = []
|
||||
for f in exp:
|
||||
new_exp.append(
|
||||
f.replace(orig_scene, new_scene)
|
||||
)
|
||||
self._instance.data["expectedFiles"] = [new_exp]
|
||||
self.log.info("Scene name was switched {} -> {}".format(
|
||||
orig_scene, new_scene
|
||||
))
|
||||
|
||||
return file_path
|
||||
|
||||
def assemble_payload(
|
||||
self, job_info=None, plugin_info=None, aux_files=None):
|
||||
"""Assemble payload data from its various parts.
|
||||
|
||||
Args:
|
||||
job_info (DeadlineJobInfo): Deadline JobInfo. You can use
|
||||
:class:`DeadlineJobInfo` for it.
|
||||
plugin_info (dict): Deadline PluginInfo. Plugin specific options.
|
||||
aux_files (list, optional): List of auxiliary file to submit with
|
||||
the job.
|
||||
|
||||
Returns:
|
||||
dict: Deadline Payload.
|
||||
|
||||
"""
|
||||
job = job_info or self.job_info
|
||||
return {
|
||||
"JobInfo": job.serialize(),
|
||||
"PluginInfo": plugin_info or self.plugin_info,
|
||||
"AuxFiles": aux_files or self.aux_files
|
||||
}
|
||||
|
||||
def submit(self, payload):
|
||||
"""Submit payload to Deadline API end-point.
|
||||
|
||||
This takes payload in the form of JSON file and POST it to
|
||||
Deadline jobs end-point.
|
||||
|
||||
Args:
|
||||
payload (dict): dict to become json in deadline submission.
|
||||
|
||||
Returns:
|
||||
str: resulting Deadline job id.
|
||||
|
||||
Throws:
|
||||
RuntimeError: if submission fails.
|
||||
|
||||
"""
|
||||
url = "{}/api/jobs".format(self._deadline_url)
|
||||
response = self._requests_post(url, json=payload)
|
||||
if not response.ok:
|
||||
self.log.error("Submission failed!")
|
||||
self.log.error(response.status_code)
|
||||
self.log.error(response.content)
|
||||
self.log.debug(payload)
|
||||
raise RuntimeError(response.text)
|
||||
|
||||
result = response.json()
|
||||
# for submit publish job
|
||||
self._instance.data["deadlineSubmissionJob"] = result
|
||||
|
||||
return result["_id"]
|
||||
|
||||
def _requests_post(self, *args, **kwargs):
|
||||
"""Wrap request post method.
|
||||
|
||||
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
|
||||
variable is found. This is useful when Deadline or Muster server are
|
||||
running with self-signed certificates and their certificate is not
|
||||
added to trusted certificates on client machines.
|
||||
|
||||
Warning:
|
||||
Disabling SSL certificate validation is defeating one line
|
||||
of defense SSL is providing and it is not recommended.
|
||||
|
||||
"""
|
||||
if 'verify' not in kwargs:
|
||||
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
|
||||
# add 10sec timeout before bailing out
|
||||
kwargs['timeout'] = 10
|
||||
return requests.post(*args, **kwargs)
|
||||
|
||||
def _requests_get(self, *args, **kwargs):
|
||||
"""Wrap request get method.
|
||||
|
||||
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
|
||||
variable is found. This is useful when Deadline or Muster server are
|
||||
running with self-signed certificates and their certificate is not
|
||||
added to trusted certificates on client machines.
|
||||
|
||||
Warning:
|
||||
Disabling SSL certificate validation is defeating one line
|
||||
of defense SSL is providing and it is not recommended.
|
||||
|
||||
"""
|
||||
if 'verify' not in kwargs:
|
||||
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
|
||||
# add 10sec timeout before bailing out
|
||||
kwargs['timeout'] = 10
|
||||
return requests.get(*args, **kwargs)
|
||||
|
|
@ -27,6 +27,9 @@ class NextTaskUpdate(BaseEvent):
|
|||
|
||||
first_filtered_entities.append(entity_info)
|
||||
|
||||
if not first_filtered_entities:
|
||||
return first_filtered_entities
|
||||
|
||||
status_ids = [
|
||||
entity_info["changes"]["statusid"]["new"]
|
||||
for entity_info in first_filtered_entities
|
||||
|
|
@ -34,10 +37,16 @@ class NextTaskUpdate(BaseEvent):
|
|||
statuses_by_id = self.get_statuses_by_id(
|
||||
session, status_ids=status_ids
|
||||
)
|
||||
# Make sure `entity_type` is "Task"
|
||||
task_object_type = session.query(
|
||||
"select id, name from ObjectType where name is \"Task\""
|
||||
).one()
|
||||
|
||||
# Care only about tasks having status with state `Done`
|
||||
filtered_entities = []
|
||||
for entity_info in first_filtered_entities:
|
||||
if entity_info["objectTypeId"] != task_object_type["id"]:
|
||||
continue
|
||||
status_id = entity_info["changes"]["statusid"]["new"]
|
||||
status_entity = statuses_by_id[status_id]
|
||||
if status_entity["state"]["name"].lower() == "done":
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
import collections
|
||||
import datetime
|
||||
|
||||
import ftrack_api
|
||||
from pype.modules.ftrack import BaseEvent
|
||||
|
||||
|
|
@ -10,17 +12,24 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
cust_attrs_query = (
|
||||
"select id, key, object_type_id, is_hierarchical, default"
|
||||
" from CustomAttributeConfiguration"
|
||||
" where key in ({}) and object_type_id in ({})"
|
||||
" where key in ({}) and"
|
||||
" (object_type_id in ({}) or is_hierarchical is true)"
|
||||
)
|
||||
|
||||
cust_attr_query = (
|
||||
"select value, entity_id from ContextCustomAttributeValue "
|
||||
"where entity_id in ({}) and configuration_id in ({})"
|
||||
)
|
||||
|
||||
interest_entity_types = {"Shot"}
|
||||
interest_attributes = {"frameStart", "frameEnd"}
|
||||
interest_attr_mapping = {
|
||||
"frameStart": "fstart",
|
||||
"frameEnd": "fend"
|
||||
}
|
||||
_cached_task_object_id = None
|
||||
_cached_interest_object_ids = None
|
||||
_cached_user_id = None
|
||||
_cached_changes = []
|
||||
_max_delta = 30
|
||||
|
||||
# Configrable (lists)
|
||||
interest_entity_types = {"Shot"}
|
||||
interest_attributes = {"frameStart", "frameEnd"}
|
||||
|
||||
@staticmethod
|
||||
def join_keys(keys):
|
||||
|
|
@ -49,8 +58,18 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
)
|
||||
return cls._cached_interest_object_ids
|
||||
|
||||
def session_user_id(self, session):
|
||||
if self._cached_user_id is None:
|
||||
user = session.query(
|
||||
"User where username is \"{}\"".format(session.api_user)
|
||||
).one()
|
||||
self._cached_user_id = user["id"]
|
||||
return self._cached_user_id
|
||||
|
||||
def launch(self, session, event):
|
||||
interesting_data = self.extract_interesting_data(session, event)
|
||||
interesting_data, changed_keys_by_object_id = (
|
||||
self.extract_interesting_data(session, event)
|
||||
)
|
||||
if not interesting_data:
|
||||
return
|
||||
|
||||
|
|
@ -66,92 +85,165 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if entity_id not in entities_by_id:
|
||||
interesting_data.pop(entity_id)
|
||||
|
||||
task_entities = self.get_task_entities(session, interesting_data)
|
||||
attrs_by_obj_id, hier_attrs = self.attrs_configurations(session)
|
||||
|
||||
task_object_id = self.task_object_id(session)
|
||||
task_attrs = attrs_by_obj_id.get(task_object_id)
|
||||
# Skip keys that are not both in hierachical and type specific
|
||||
for object_id, keys in changed_keys_by_object_id.items():
|
||||
object_id_attrs = attrs_by_obj_id.get(object_id)
|
||||
for key in keys:
|
||||
if key not in hier_attrs:
|
||||
attrs_by_obj_id[object_id].pop(key)
|
||||
continue
|
||||
|
||||
if (
|
||||
(not object_id_attrs or key not in object_id_attrs)
|
||||
and (not task_attrs or key not in task_attrs)
|
||||
):
|
||||
hier_attrs.pop(key)
|
||||
|
||||
# Clean up empty values
|
||||
for key, value in tuple(attrs_by_obj_id.items()):
|
||||
if not value:
|
||||
attrs_by_obj_id.pop(key)
|
||||
|
||||
attrs_by_obj_id = self.attrs_configurations(session)
|
||||
if not attrs_by_obj_id:
|
||||
self.log.warning((
|
||||
"There is not created Custom Attributes {}"
|
||||
" for \"Task\" entity type."
|
||||
).format(self.join_keys(self.interest_attributes)))
|
||||
"There is not created Custom Attributes {} "
|
||||
" for entity types: {}"
|
||||
).format(
|
||||
self.join_keys(self.interest_attributes),
|
||||
self.join_keys(self.interest_entity_types)
|
||||
))
|
||||
return
|
||||
|
||||
task_entities_by_parent_id = collections.defaultdict(list)
|
||||
# Prepare task entities
|
||||
task_entities = []
|
||||
# If task entity does not contain changed attribute then skip
|
||||
if task_attrs:
|
||||
task_entities = self.get_task_entities(session, interesting_data)
|
||||
|
||||
task_entities_by_id = {}
|
||||
parent_id_by_task_id = {}
|
||||
for task_entity in task_entities:
|
||||
task_entities_by_parent_id[task_entity["parent_id"]].append(
|
||||
task_entity
|
||||
)
|
||||
task_entities_by_id[task_entity["id"]] = task_entity
|
||||
parent_id_by_task_id[task_entity["id"]] = task_entity["parent_id"]
|
||||
|
||||
missing_keys_by_object_name = collections.defaultdict(set)
|
||||
for parent_id, values in interesting_data.items():
|
||||
entities = task_entities_by_parent_id.get(parent_id) or []
|
||||
entities.append(entities_by_id[parent_id])
|
||||
changed_keys = set()
|
||||
for keys in changed_keys_by_object_id.values():
|
||||
changed_keys |= set(keys)
|
||||
|
||||
for hier_key, value in values.items():
|
||||
changed_ids = []
|
||||
for entity in entities:
|
||||
key = self.interest_attr_mapping[hier_key]
|
||||
entity_attrs_mapping = (
|
||||
attrs_by_obj_id.get(entity["object_type_id"])
|
||||
attr_id_to_key = {}
|
||||
for attr_confs in attrs_by_obj_id.values():
|
||||
for key in changed_keys:
|
||||
custom_attr_id = attr_confs.get(key)
|
||||
if custom_attr_id:
|
||||
attr_id_to_key[custom_attr_id] = key
|
||||
|
||||
for key in changed_keys:
|
||||
custom_attr_id = hier_attrs.get(key)
|
||||
if custom_attr_id:
|
||||
attr_id_to_key[custom_attr_id] = key
|
||||
|
||||
entity_ids = (
|
||||
set(interesting_data.keys()) | set(task_entities_by_id.keys())
|
||||
)
|
||||
attr_ids = set(attr_id_to_key.keys())
|
||||
|
||||
current_values_by_id = self.current_values(
|
||||
session, attr_ids, entity_ids, task_entities_by_id, hier_attrs
|
||||
)
|
||||
|
||||
for entity_id, current_values in current_values_by_id.items():
|
||||
parent_id = parent_id_by_task_id.get(entity_id)
|
||||
if not parent_id:
|
||||
parent_id = entity_id
|
||||
values = interesting_data[parent_id]
|
||||
|
||||
for attr_id, old_value in current_values.items():
|
||||
attr_key = attr_id_to_key.get(attr_id)
|
||||
if not attr_key:
|
||||
continue
|
||||
|
||||
# Convert new value from string
|
||||
new_value = values.get(attr_key)
|
||||
if new_value is not None and old_value is not None:
|
||||
try:
|
||||
new_value = type(old_value)(new_value)
|
||||
except Exception:
|
||||
self.log.warning((
|
||||
"Couldn't convert from {} to {}."
|
||||
" Skipping update values."
|
||||
).format(type(new_value), type(old_value)))
|
||||
if new_value == old_value:
|
||||
continue
|
||||
|
||||
entity_key = collections.OrderedDict({
|
||||
"configuration_id": attr_id,
|
||||
"entity_id": entity_id
|
||||
})
|
||||
self._cached_changes.append({
|
||||
"attr_key": attr_key,
|
||||
"entity_id": entity_id,
|
||||
"value": new_value,
|
||||
"time": datetime.datetime.now()
|
||||
})
|
||||
if new_value is None:
|
||||
op = ftrack_api.operation.DeleteEntityOperation(
|
||||
"CustomAttributeValue",
|
||||
entity_key
|
||||
)
|
||||
else:
|
||||
op = ftrack_api.operation.UpdateEntityOperation(
|
||||
"ContextCustomAttributeValue",
|
||||
entity_key,
|
||||
"value",
|
||||
ftrack_api.symbol.NOT_SET,
|
||||
new_value
|
||||
)
|
||||
if not entity_attrs_mapping:
|
||||
missing_keys_by_object_name[entity.entity_type].add(
|
||||
key
|
||||
)
|
||||
continue
|
||||
|
||||
configuration_id = entity_attrs_mapping.get(key)
|
||||
if not configuration_id:
|
||||
missing_keys_by_object_name[entity.entity_type].add(
|
||||
key
|
||||
)
|
||||
continue
|
||||
|
||||
changed_ids.append(entity["id"])
|
||||
entity_key = collections.OrderedDict({
|
||||
"configuration_id": configuration_id,
|
||||
"entity_id": entity["id"]
|
||||
})
|
||||
if value is None:
|
||||
op = ftrack_api.operation.DeleteEntityOperation(
|
||||
"CustomAttributeValue",
|
||||
entity_key
|
||||
)
|
||||
else:
|
||||
op = ftrack_api.operation.UpdateEntityOperation(
|
||||
"ContextCustomAttributeValue",
|
||||
entity_key,
|
||||
"value",
|
||||
ftrack_api.symbol.NOT_SET,
|
||||
value
|
||||
)
|
||||
|
||||
session.recorded_operations.push(op)
|
||||
session.recorded_operations.push(op)
|
||||
self.log.info((
|
||||
"Changing Custom Attribute \"{}\" to value"
|
||||
" \"{}\" on entities: {}"
|
||||
).format(key, value, self.join_keys(changed_ids)))
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning(
|
||||
"Changing of values failed.",
|
||||
exc_info=True
|
||||
)
|
||||
if not missing_keys_by_object_name:
|
||||
return
|
||||
" \"{}\" on entity: {}"
|
||||
).format(attr_key, new_value, entity_id))
|
||||
try:
|
||||
session.commit()
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning("Changing of values failed.", exc_info=True)
|
||||
|
||||
msg_items = []
|
||||
for object_name, missing_keys in missing_keys_by_object_name.items():
|
||||
msg_items.append(
|
||||
"{}: ({})".format(object_name, self.join_keys(missing_keys))
|
||||
def current_values(
|
||||
self, session, attr_ids, entity_ids, task_entities_by_id, hier_attrs
|
||||
):
|
||||
current_values_by_id = {}
|
||||
if not attr_ids or not entity_ids:
|
||||
return current_values_by_id
|
||||
joined_conf_ids = self.join_keys(attr_ids)
|
||||
joined_entity_ids = self.join_keys(entity_ids)
|
||||
|
||||
call_expr = [{
|
||||
"action": "query",
|
||||
"expression": self.cust_attr_query.format(
|
||||
joined_entity_ids, joined_conf_ids
|
||||
)
|
||||
}]
|
||||
if hasattr(session, "call"):
|
||||
[values] = session.call(call_expr)
|
||||
else:
|
||||
[values] = session._call(call_expr)
|
||||
|
||||
self.log.warning((
|
||||
"Missing Custom Attribute configuration"
|
||||
" per specific object types: {}"
|
||||
).format(", ".join(msg_items)))
|
||||
for item in values["data"]:
|
||||
entity_id = item["entity_id"]
|
||||
attr_id = item["configuration_id"]
|
||||
if entity_id in task_entities_by_id and attr_id in hier_attrs:
|
||||
continue
|
||||
|
||||
if entity_id not in current_values_by_id:
|
||||
current_values_by_id[entity_id] = {}
|
||||
current_values_by_id[entity_id][attr_id] = item["value"]
|
||||
return current_values_by_id
|
||||
|
||||
def extract_interesting_data(self, session, event):
|
||||
# Filter if event contain relevant data
|
||||
|
|
@ -159,7 +251,18 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if not entities_info:
|
||||
return
|
||||
|
||||
# for key, value in event["data"].items():
|
||||
# self.log.info("{}: {}".format(key, value))
|
||||
session_user_id = self.session_user_id(session)
|
||||
user_data = event["data"].get("user")
|
||||
changed_by_session = False
|
||||
if user_data and user_data.get("userid") == session_user_id:
|
||||
changed_by_session = True
|
||||
|
||||
current_time = datetime.datetime.now()
|
||||
|
||||
interesting_data = {}
|
||||
changed_keys_by_object_id = {}
|
||||
for entity_info in entities_info:
|
||||
# Care only about tasks
|
||||
if entity_info.get("entityType") != "task":
|
||||
|
|
@ -176,16 +279,47 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
if key in changes:
|
||||
entity_changes[key] = changes[key]["new"]
|
||||
|
||||
entity_id = entity_info["entityId"]
|
||||
if changed_by_session:
|
||||
for key, new_value in tuple(entity_changes.items()):
|
||||
for cached in tuple(self._cached_changes):
|
||||
if (
|
||||
cached["entity_id"] != entity_id
|
||||
or cached["attr_key"] != key
|
||||
):
|
||||
continue
|
||||
|
||||
cached_value = cached["value"]
|
||||
try:
|
||||
new_value = type(cached_value)(new_value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if cached_value == new_value:
|
||||
self._cached_changes.remove(cached)
|
||||
entity_changes.pop(key)
|
||||
break
|
||||
|
||||
delta = (current_time - cached["time"]).seconds
|
||||
if delta > self._max_delta:
|
||||
self._cached_changes.remove(cached)
|
||||
|
||||
if not entity_changes:
|
||||
continue
|
||||
|
||||
# Do not care about "Task" entity_type
|
||||
task_object_id = self.task_object_id(session)
|
||||
if entity_info.get("objectTypeId") == task_object_id:
|
||||
object_id = entity_info.get("objectTypeId")
|
||||
if not object_id or object_id == task_object_id:
|
||||
continue
|
||||
|
||||
interesting_data[entity_info["entityId"]] = entity_changes
|
||||
return interesting_data
|
||||
interesting_data[entity_id] = entity_changes
|
||||
if object_id not in changed_keys_by_object_id:
|
||||
changed_keys_by_object_id[object_id] = set()
|
||||
|
||||
changed_keys_by_object_id[object_id] |= set(entity_changes.keys())
|
||||
|
||||
return interesting_data, changed_keys_by_object_id
|
||||
|
||||
def get_entities(self, session, interesting_data):
|
||||
entities = session.query(
|
||||
|
|
@ -213,17 +347,21 @@ class PushFrameValuesToTaskEvent(BaseEvent):
|
|||
object_ids.append(self.task_object_id(session))
|
||||
|
||||
attrs = session.query(self.cust_attrs_query.format(
|
||||
self.join_keys(self.interest_attr_mapping.values()),
|
||||
self.join_keys(self.interest_attributes),
|
||||
self.join_keys(object_ids)
|
||||
)).all()
|
||||
|
||||
output = {}
|
||||
hiearchical = {}
|
||||
for attr in attrs:
|
||||
if attr["is_hierarchical"]:
|
||||
hiearchical[attr["key"]] = attr["id"]
|
||||
continue
|
||||
obj_id = attr["object_type_id"]
|
||||
if obj_id not in output:
|
||||
output[obj_id] = {}
|
||||
output[obj_id][attr["key"]] = attr["id"]
|
||||
return output
|
||||
return output, hiearchical
|
||||
|
||||
|
||||
def register(session, plugins_presets):
|
||||
|
|
|
|||
399
pype/modules/ftrack/events/event_task_to_parent_status.py
Normal file
399
pype/modules/ftrack/events/event_task_to_parent_status.py
Normal file
|
|
@ -0,0 +1,399 @@
|
|||
import collections
|
||||
from pype.modules.ftrack import BaseEvent
|
||||
|
||||
|
||||
class TaskStatusToParent(BaseEvent):
|
||||
# Parent types where we care about changing of status
|
||||
parent_types = ["shot", "asset build"]
|
||||
|
||||
# All parent's tasks must have status name in `task_statuses` key to apply
|
||||
# status name in `new_status`
|
||||
parent_status_match_all_task_statuses = [
|
||||
{
|
||||
"new_status": "approved",
|
||||
"task_statuses": [
|
||||
"approved", "omitted"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
# Task's status was changed to something in `task_statuses` to apply
|
||||
# `new_status` on it's parent
|
||||
# - this is done only if `parent_status_match_all_task_statuses` filtering
|
||||
# didn't found matching status
|
||||
parent_status_by_task_status = [
|
||||
{
|
||||
"new_status": "in progress",
|
||||
"task_statuses": [
|
||||
"in progress"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
def register(self, *args, **kwargs):
|
||||
result = super(TaskStatusToParent, self).register(*args, **kwargs)
|
||||
# Clean up presetable attributes
|
||||
_new_all_match = []
|
||||
if self.parent_status_match_all_task_statuses:
|
||||
for item in self.parent_status_match_all_task_statuses:
|
||||
_new_all_match.append({
|
||||
"new_status": item["new_status"].lower(),
|
||||
"task_statuses": [
|
||||
status_name.lower()
|
||||
for status_name in item["task_statuses"]
|
||||
]
|
||||
})
|
||||
self.parent_status_match_all_task_statuses = _new_all_match
|
||||
|
||||
_new_single_match = []
|
||||
if self.parent_status_by_task_status:
|
||||
for item in self.parent_status_by_task_status:
|
||||
_new_single_match.append({
|
||||
"new_status": item["new_status"].lower(),
|
||||
"task_statuses": [
|
||||
status_name.lower()
|
||||
for status_name in item["task_statuses"]
|
||||
]
|
||||
})
|
||||
self.parent_status_by_task_status = _new_single_match
|
||||
|
||||
self.parent_types = [
|
||||
parent_type.lower()
|
||||
for parent_type in self.parent_types
|
||||
]
|
||||
|
||||
return result
|
||||
|
||||
def filter_entities_info(self, session, event):
|
||||
# Filter if event contain relevant data
|
||||
entities_info = event["data"].get("entities")
|
||||
if not entities_info:
|
||||
return
|
||||
|
||||
filtered_entities = []
|
||||
for entity_info in entities_info:
|
||||
# Care only about tasks
|
||||
if entity_info.get("entityType") != "task":
|
||||
continue
|
||||
|
||||
# Care only about changes of status
|
||||
changes = entity_info.get("changes") or {}
|
||||
statusid_changes = changes.get("statusid") or {}
|
||||
if (
|
||||
statusid_changes.get("new") is None
|
||||
or statusid_changes.get("old") is None
|
||||
):
|
||||
continue
|
||||
|
||||
filtered_entities.append(entity_info)
|
||||
|
||||
if not filtered_entities:
|
||||
return
|
||||
|
||||
status_ids = [
|
||||
entity_info["changes"]["statusid"]["new"]
|
||||
for entity_info in filtered_entities
|
||||
]
|
||||
statuses_by_id = self.get_statuses_by_id(
|
||||
session, status_ids=status_ids
|
||||
)
|
||||
|
||||
# Care only about tasks having status with state `Done`
|
||||
output = []
|
||||
for entity_info in filtered_entities:
|
||||
status_id = entity_info["changes"]["statusid"]["new"]
|
||||
entity_info["status_entity"] = statuses_by_id[status_id]
|
||||
output.append(entity_info)
|
||||
return output
|
||||
|
||||
def get_parents_by_id(self, session, entities_info, object_types):
|
||||
task_type_id = None
|
||||
valid_object_type_ids = []
|
||||
for object_type in object_types:
|
||||
object_name_low = object_type["name"].lower()
|
||||
if object_name_low == "task":
|
||||
task_type_id = object_type["id"]
|
||||
|
||||
if object_name_low in self.parent_types:
|
||||
valid_object_type_ids.append(object_type["id"])
|
||||
|
||||
parent_ids = [
|
||||
"\"{}\"".format(entity_info["parentId"])
|
||||
for entity_info in entities_info
|
||||
if entity_info["objectTypeId"] == task_type_id
|
||||
]
|
||||
if not parent_ids:
|
||||
return {}
|
||||
|
||||
parent_entities = session.query((
|
||||
"TypedContext where id in ({}) and object_type_id in ({})"
|
||||
).format(
|
||||
", ".join(parent_ids), ", ".join(valid_object_type_ids))
|
||||
).all()
|
||||
|
||||
return {
|
||||
entity["id"]: entity
|
||||
for entity in parent_entities
|
||||
}
|
||||
|
||||
def get_tasks_by_id(self, session, parent_ids):
|
||||
joined_parent_ids = ",".join([
|
||||
"\"{}\"".format(parent_id)
|
||||
for parent_id in parent_ids
|
||||
])
|
||||
task_entities = session.query(
|
||||
"Task where parent_id in ({})".format(joined_parent_ids)
|
||||
).all()
|
||||
|
||||
return {
|
||||
entity["id"]: entity
|
||||
for entity in task_entities
|
||||
}
|
||||
|
||||
def get_statuses_by_id(self, session, task_entities=None, status_ids=None):
|
||||
if task_entities is None and status_ids is None:
|
||||
return {}
|
||||
|
||||
if status_ids is None:
|
||||
status_ids = []
|
||||
for task_entity in task_entities:
|
||||
status_ids.append(task_entity["status_id"])
|
||||
|
||||
if not status_ids:
|
||||
return {}
|
||||
|
||||
status_entities = session.query(
|
||||
"Status where id in ({})".format(", ".join(status_ids))
|
||||
).all()
|
||||
|
||||
return {
|
||||
entity["id"]: entity
|
||||
for entity in status_entities
|
||||
}
|
||||
|
||||
def launch(self, session, event):
|
||||
'''Propagates status from version to task when changed'''
|
||||
|
||||
entities_info = self.filter_entities_info(session, event)
|
||||
if not entities_info:
|
||||
return
|
||||
|
||||
object_types = session.query("select id, name from ObjectType").all()
|
||||
parents_by_id = self.get_parents_by_id(
|
||||
session, entities_info, object_types
|
||||
)
|
||||
if not parents_by_id:
|
||||
return
|
||||
tasks_by_id = self.get_tasks_by_id(
|
||||
session, tuple(parents_by_id.keys())
|
||||
)
|
||||
|
||||
# Just collect them in one variable
|
||||
entities_by_id = {}
|
||||
for entity_id, entity in parents_by_id.items():
|
||||
entities_by_id[entity_id] = entity
|
||||
for entity_id, entity in tasks_by_id.items():
|
||||
entities_by_id[entity_id] = entity
|
||||
|
||||
# Map task entities by their parents
|
||||
tasks_by_parent_id = collections.defaultdict(list)
|
||||
for task_entity in tasks_by_id.values():
|
||||
tasks_by_parent_id[task_entity["parent_id"]].append(task_entity)
|
||||
|
||||
# Found status entities for all queried entities
|
||||
statuses_by_id = self.get_statuses_by_id(
|
||||
session,
|
||||
entities_by_id.values()
|
||||
)
|
||||
|
||||
# New status determination logic
|
||||
new_statuses_by_parent_id = self.new_status_by_all_task_statuses(
|
||||
parents_by_id.keys(), tasks_by_parent_id, statuses_by_id
|
||||
)
|
||||
|
||||
# Check if there are remaining any parents that does not have
|
||||
# determined new status yet
|
||||
remainder_tasks_by_parent_id = collections.defaultdict(list)
|
||||
for entity_info in entities_info:
|
||||
parent_id = entity_info["parentId"]
|
||||
if (
|
||||
# Skip if already has determined new status
|
||||
parent_id in new_statuses_by_parent_id
|
||||
# Skip if parent is not in parent mapping
|
||||
# - if was not found or parent type is not interesting
|
||||
or parent_id not in parents_by_id
|
||||
):
|
||||
continue
|
||||
|
||||
remainder_tasks_by_parent_id[parent_id].append(
|
||||
entities_by_id[entity_info["entityId"]]
|
||||
)
|
||||
|
||||
# Try to find new status for remained parents
|
||||
new_statuses_by_parent_id.update(
|
||||
self.new_status_by_remainders(
|
||||
remainder_tasks_by_parent_id,
|
||||
statuses_by_id
|
||||
)
|
||||
)
|
||||
|
||||
# Make sure new_status is set to valid value
|
||||
for parent_id in tuple(new_statuses_by_parent_id.keys()):
|
||||
new_status_name = new_statuses_by_parent_id[parent_id]
|
||||
if not new_status_name:
|
||||
new_statuses_by_parent_id.pop(parent_id)
|
||||
|
||||
# If there are not new statuses then just skip
|
||||
if not new_statuses_by_parent_id:
|
||||
return
|
||||
|
||||
# Get project schema from any available entity
|
||||
_entity = None
|
||||
for _ent in entities_by_id.values():
|
||||
_entity = _ent
|
||||
break
|
||||
|
||||
project_entity = self.get_project_from_entity(_entity)
|
||||
project_schema = project_entity["project_schema"]
|
||||
|
||||
# Map type names by lowere type names
|
||||
types_mapping = {
|
||||
_type.lower(): _type
|
||||
for _type in session.types
|
||||
}
|
||||
# Map object type id by lowered and modified object type name
|
||||
object_type_mapping = {}
|
||||
for object_type in object_types:
|
||||
mapping_name = object_type["name"].lower().replace(" ", "")
|
||||
object_type_mapping[object_type["id"]] = mapping_name
|
||||
|
||||
statuses_by_obj_id = {}
|
||||
for parent_id, new_status_name in new_statuses_by_parent_id.items():
|
||||
if not new_status_name:
|
||||
continue
|
||||
parent_entity = entities_by_id[parent_id]
|
||||
obj_id = parent_entity["object_type_id"]
|
||||
|
||||
# Find statuses for entity type by object type name
|
||||
# in project's schema and cache them
|
||||
if obj_id not in statuses_by_obj_id:
|
||||
mapping_name = object_type_mapping[obj_id]
|
||||
mapped_name = types_mapping.get(mapping_name)
|
||||
statuses = project_schema.get_statuses(mapped_name)
|
||||
statuses_by_obj_id[obj_id] = {
|
||||
status["name"].lower(): status
|
||||
for status in statuses
|
||||
}
|
||||
|
||||
statuses_by_name = statuses_by_obj_id[obj_id]
|
||||
new_status = statuses_by_name.get(new_status_name)
|
||||
ent_path = "/".join(
|
||||
[ent["name"] for ent in parent_entity["link"]]
|
||||
)
|
||||
if not new_status:
|
||||
self.log.warning((
|
||||
"\"{}\" Couldn't change status to \"{}\"."
|
||||
" Status is not available for entity type \"{}\"."
|
||||
).format(
|
||||
ent_path, new_status_name, parent_entity.entity_type
|
||||
))
|
||||
continue
|
||||
|
||||
current_status_name = parent_entity["status"]["name"]
|
||||
# Do nothing if status is already set
|
||||
if new_status["name"] == current_status_name:
|
||||
self.log.debug(
|
||||
"\"{}\" Status \"{}\" already set.".format(
|
||||
ent_path, current_status_name
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
parent_entity["status"] = new_status
|
||||
session.commit()
|
||||
self.log.info(
|
||||
"\"{}\" changed status to \"{}\"".format(
|
||||
ent_path, new_status["name"]
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
session.rollback()
|
||||
self.log.warning(
|
||||
"\"{}\" status couldnt be set to \"{}\"".format(
|
||||
ent_path, new_status["name"]
|
||||
),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
def new_status_by_all_task_statuses(
|
||||
self, parent_ids, tasks_by_parent_id, statuses_by_id
|
||||
):
|
||||
"""All statuses of parent entity must match specific status names.
|
||||
|
||||
Only if all task statuses match the condition parent's status name is
|
||||
determined.
|
||||
"""
|
||||
output = {}
|
||||
for parent_id in parent_ids:
|
||||
task_statuses_lowered = set()
|
||||
for task_entity in tasks_by_parent_id[parent_id]:
|
||||
task_status = statuses_by_id[task_entity["status_id"]]
|
||||
low_status_name = task_status["name"].lower()
|
||||
task_statuses_lowered.add(low_status_name)
|
||||
|
||||
new_status = None
|
||||
for item in self.parent_status_match_all_task_statuses:
|
||||
valid_item = True
|
||||
for status_name_low in task_statuses_lowered:
|
||||
if status_name_low not in item["task_statuses"]:
|
||||
valid_item = False
|
||||
break
|
||||
|
||||
if valid_item:
|
||||
new_status = item["new_status"]
|
||||
break
|
||||
|
||||
if new_status is not None:
|
||||
output[parent_id] = new_status
|
||||
|
||||
return output
|
||||
|
||||
def new_status_by_remainders(
|
||||
self, remainder_tasks_by_parent_id, statuses_by_id
|
||||
):
|
||||
"""By new task status can be determined new status of parent."""
|
||||
output = {}
|
||||
if not remainder_tasks_by_parent_id:
|
||||
return output
|
||||
|
||||
for parent_id, task_entities in remainder_tasks_by_parent_id.items():
|
||||
if not task_entities:
|
||||
continue
|
||||
|
||||
# For cases there are multiple tasks in changes
|
||||
# - task status which match any new status item by order in the
|
||||
# list `parent_status_by_task_status` is preffered
|
||||
best_order = len(self.parent_status_by_task_status)
|
||||
best_order_status = None
|
||||
for task_entity in task_entities:
|
||||
task_status = statuses_by_id[task_entity["status_id"]]
|
||||
low_status_name = task_status["name"].lower()
|
||||
for order, item in enumerate(
|
||||
self.parent_status_by_task_status
|
||||
):
|
||||
if order >= best_order:
|
||||
break
|
||||
|
||||
if low_status_name in item["task_statuses"]:
|
||||
best_order = order
|
||||
best_order_status = item["new_status"]
|
||||
break
|
||||
|
||||
if best_order_status:
|
||||
output[parent_id] = best_order_status
|
||||
return output
|
||||
|
||||
|
||||
def register(session, plugins_presets):
|
||||
TaskStatusToParent(session, plugins_presets).register()
|
||||
|
|
@ -205,6 +205,19 @@ class AfterEffectsServerStub():
|
|||
item_id=item.id,
|
||||
path=path, item_name=item_name))
|
||||
|
||||
def rename_item(self, item, item_name):
|
||||
""" Replace item with item_name
|
||||
|
||||
Args:
|
||||
item (dict):
|
||||
item_name (string): label on item in Project list
|
||||
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.rename_item',
|
||||
item_id=item.id,
|
||||
item_name=item_name))
|
||||
|
||||
def delete_item(self, item):
|
||||
""" Deletes FootageItem with new file
|
||||
Args:
|
||||
|
|
@ -234,6 +247,43 @@ class AfterEffectsServerStub():
|
|||
color_idx=color_idx
|
||||
))
|
||||
|
||||
def get_work_area(self, item_id):
|
||||
""" Get work are information for render purposes
|
||||
Args:
|
||||
item_id (int):
|
||||
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call
|
||||
('AfterEffects.get_work_area',
|
||||
item_id=item_id
|
||||
))
|
||||
|
||||
records = self._to_records(res)
|
||||
if records:
|
||||
return records.pop()
|
||||
|
||||
log.debug("Couldn't get work area")
|
||||
|
||||
def set_work_area(self, item, start, duration, frame_rate):
|
||||
"""
|
||||
Set work area to predefined values (from Ftrack).
|
||||
Work area directs what gets rendered.
|
||||
Beware of rounding, AE expects seconds, not frames directly.
|
||||
|
||||
Args:
|
||||
item (dict):
|
||||
start (float): workAreaStart in seconds
|
||||
duration (float): in seconds
|
||||
frame_rate (float): frames in seconds
|
||||
"""
|
||||
self.websocketserver.call(self.client.call
|
||||
('AfterEffects.set_work_area',
|
||||
item_id=item.id,
|
||||
start=start,
|
||||
duration=duration,
|
||||
frame_rate=frame_rate
|
||||
))
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Saves active document
|
||||
|
|
|
|||
|
|
@ -11,42 +11,37 @@ class CreateRender(api.Creator):
|
|||
"""Render folder for publish."""
|
||||
|
||||
name = "renderDefault"
|
||||
label = "Render"
|
||||
label = "Render on Farm"
|
||||
family = "render"
|
||||
|
||||
def process(self):
|
||||
# Photoshop can have multiple LayerSets with the same name, which does
|
||||
# not work with Avalon.
|
||||
txt = "Instance with name \"{}\" already exists.".format(self.name)
|
||||
stub = aftereffects.stub() # only after After Effects is up
|
||||
for layer in stub.get_items(comps=True,
|
||||
folders=False,
|
||||
footages=False):
|
||||
if self.name.lower() == layer.name.lower():
|
||||
msg = Qt.QtWidgets.QMessageBox()
|
||||
msg.setIcon(Qt.QtWidgets.QMessageBox.Warning)
|
||||
msg.setText(txt)
|
||||
msg.exec_()
|
||||
return False
|
||||
log.debug("options:: {}".format(self.options))
|
||||
print("options:: {}".format(self.options))
|
||||
if (self.options or {}).get("useSelection"):
|
||||
log.debug("useSelection")
|
||||
print("useSelection")
|
||||
items = stub.get_selected_items(comps=True,
|
||||
folders=False,
|
||||
footages=False)
|
||||
else:
|
||||
items = stub.get_items(comps=True,
|
||||
folders=False,
|
||||
footages=False)
|
||||
log.debug("items:: {}".format(items))
|
||||
print("items:: {}".format(items))
|
||||
self._show_msg("Please select only single composition at time.")
|
||||
return False
|
||||
|
||||
if not items:
|
||||
raise ValueError("Nothing to create. Select composition " +
|
||||
"if 'useSelection' or create at least " +
|
||||
"one composition.")
|
||||
self._show_msg("Nothing to create. Select composition " +
|
||||
"if 'useSelection' or create at least " +
|
||||
"one composition.")
|
||||
return False
|
||||
|
||||
for item in items:
|
||||
txt = "Instance with name \"{}\" already exists.".format(self.name)
|
||||
if self.name.lower() == item.name.lower():
|
||||
self._show_msg(txt)
|
||||
return False
|
||||
|
||||
stub.imprint(item, self.data)
|
||||
stub.set_label_color(item.id, 14) # Cyan options 0 - 16
|
||||
stub.rename_item(item, self.data["subset"])
|
||||
|
||||
def _show_msg(self, txt):
|
||||
msg = Qt.QtWidgets.QMessageBox()
|
||||
msg.setIcon(Qt.QtWidgets.QMessageBox.Warning)
|
||||
msg.setText(txt)
|
||||
msg.exec_()
|
||||
|
|
|
|||
18
pype/plugins/aftereffects/publish/collect_current_file.py
Normal file
18
pype/plugins/aftereffects/publish/collect_current_file.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import aftereffects
|
||||
|
||||
|
||||
class CollectCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
label = "Current File"
|
||||
hosts = ["aftereffects"]
|
||||
|
||||
def process(self, context):
|
||||
context.data["currentFile"] = os.path.normpath(
|
||||
aftereffects.stub().get_active_document_full_name()
|
||||
).replace("\\", "/")
|
||||
144
pype/plugins/aftereffects/publish/collect_render.py
Normal file
144
pype/plugins/aftereffects/publish/collect_render.py
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
from pype.lib import abstract_collect_render
|
||||
from pype.lib.abstract_collect_render import RenderInstance
|
||||
import pyblish.api
|
||||
import attr
|
||||
import os
|
||||
|
||||
from avalon import aftereffects
|
||||
|
||||
|
||||
@attr.s
|
||||
class AERenderInstance(RenderInstance):
|
||||
# extend generic, composition name is needed
|
||||
comp_name = attr.ib(default=None)
|
||||
|
||||
|
||||
class CollectAERender(abstract_collect_render.AbstractCollectRender):
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.498
|
||||
label = "Collect After Effects Render Layers"
|
||||
hosts = ["aftereffects"]
|
||||
|
||||
padding_width = 6
|
||||
rendered_extension = 'png'
|
||||
|
||||
def get_instances(self, context):
|
||||
instances = []
|
||||
|
||||
current_file = context.data["currentFile"]
|
||||
version = context.data["version"]
|
||||
asset_entity = context.data["assetEntity"]
|
||||
project_entity = context.data["projectEntity"]
|
||||
|
||||
compositions = aftereffects.stub().get_items(True)
|
||||
compositions_by_id = {item.id: item for item in compositions}
|
||||
for item_id, inst in aftereffects.stub().get_metadata().items():
|
||||
schema = inst.get('schema')
|
||||
# loaded asset container skip it
|
||||
if schema and 'container' in schema:
|
||||
continue
|
||||
|
||||
work_area_info = aftereffects.stub().get_work_area(int(item_id))
|
||||
frameStart = round(float(work_area_info.workAreaStart) *
|
||||
float(work_area_info.frameRate))
|
||||
|
||||
frameEnd = round(float(work_area_info.workAreaStart) *
|
||||
float(work_area_info.frameRate) +
|
||||
float(work_area_info.workAreaDuration) *
|
||||
float(work_area_info.frameRate))
|
||||
|
||||
if inst["family"] == "render" and inst["active"]:
|
||||
instance = AERenderInstance(
|
||||
family="render.farm", # other way integrate would catch it
|
||||
families=["render.farm"],
|
||||
version=version,
|
||||
time="",
|
||||
source=current_file,
|
||||
label="{} - farm".format(inst["subset"]),
|
||||
subset=inst["subset"],
|
||||
asset=context.data["assetEntity"]["name"],
|
||||
attachTo=False,
|
||||
setMembers='',
|
||||
publish=True,
|
||||
renderer='aerender',
|
||||
name=inst["subset"],
|
||||
resolutionWidth=asset_entity["data"].get(
|
||||
"resolutionWidth",
|
||||
project_entity["data"]["resolutionWidth"]),
|
||||
resolutionHeight=asset_entity["data"].get(
|
||||
"resolutionHeight",
|
||||
project_entity["data"]["resolutionHeight"]),
|
||||
pixelAspect=1,
|
||||
tileRendering=False,
|
||||
tilesX=0,
|
||||
tilesY=0,
|
||||
frameStart=frameStart,
|
||||
frameEnd=frameEnd,
|
||||
frameStep=1,
|
||||
toBeRenderedOn='deadline'
|
||||
)
|
||||
|
||||
comp = compositions_by_id.get(int(item_id))
|
||||
if not comp:
|
||||
raise ValueError("There is no composition for item {}".
|
||||
format(item_id))
|
||||
instance.comp_name = comp.name
|
||||
instance._anatomy = context.data["anatomy"]
|
||||
instance.anatomyData = context.data["anatomyData"]
|
||||
|
||||
instance.outputDir = self._get_output_dir(instance)
|
||||
|
||||
instances.append(instance)
|
||||
|
||||
return instances
|
||||
|
||||
def get_expected_files(self, render_instance):
|
||||
"""
|
||||
Returns list of rendered files that should be created by
|
||||
Deadline. These are not published directly, they are source
|
||||
for later 'submit_publish_job'.
|
||||
|
||||
Args:
|
||||
render_instance (RenderInstance): to pull anatomy and parts used
|
||||
in url
|
||||
|
||||
Returns:
|
||||
(list) of absolut urls to rendered file
|
||||
"""
|
||||
start = render_instance.frameStart
|
||||
end = render_instance.frameEnd
|
||||
|
||||
base_dir = self._get_output_dir(render_instance)
|
||||
expected_files = []
|
||||
for frame in range(start, end + 1):
|
||||
path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format(
|
||||
render_instance.asset,
|
||||
render_instance.subset,
|
||||
"v{:03d}".format(render_instance.version),
|
||||
str(frame).zfill(self.padding_width),
|
||||
self.rendered_extension
|
||||
))
|
||||
expected_files.append(path)
|
||||
|
||||
return expected_files
|
||||
|
||||
def _get_output_dir(self, render_instance):
|
||||
"""
|
||||
Returns dir path of rendered files, used in submit_publish_job
|
||||
for metadata.json location.
|
||||
Should be in separate folder inside of work area.
|
||||
|
||||
Args:
|
||||
render_instance (RenderInstance):
|
||||
|
||||
Returns:
|
||||
(str): absolute path to rendered files
|
||||
"""
|
||||
# render to folder of workfile
|
||||
base_dir = os.path.dirname(render_instance.source)
|
||||
file_name, _ = os.path.splitext(
|
||||
os.path.basename(render_instance.source))
|
||||
base_dir = os.path.join(base_dir, 'renders', 'aftereffects', file_name)
|
||||
|
||||
# for submit_publish_job
|
||||
return base_dir
|
||||
70
pype/plugins/aftereffects/publish/collect_workfile.py
Normal file
70
pype/plugins/aftereffects/publish/collect_workfile.py
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
import os
|
||||
from avalon import api
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.ContextPlugin):
|
||||
""" Adds the AE render instances """
|
||||
|
||||
label = "Collect After Effects Workfile Instance"
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
task = api.Session["AVALON_TASK"]
|
||||
current_file = context.data["currentFile"]
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
scene_file = os.path.basename(current_file)
|
||||
version = context.data["version"]
|
||||
asset_entity = context.data["assetEntity"]
|
||||
project_entity = context.data["projectEntity"]
|
||||
|
||||
shared_instance_data = {
|
||||
"asset": asset_entity["name"],
|
||||
"frameStart": asset_entity["data"]["frameStart"],
|
||||
"frameEnd": asset_entity["data"]["frameEnd"],
|
||||
"handleStart": asset_entity["data"]["handleStart"],
|
||||
"handleEnd": asset_entity["data"]["handleEnd"],
|
||||
"fps": asset_entity["data"]["fps"],
|
||||
"resolutionWidth": asset_entity["data"].get(
|
||||
"resolutionWidth",
|
||||
project_entity["data"]["resolutionWidth"]),
|
||||
"resolutionHeight": asset_entity["data"].get(
|
||||
"resolutionHeight",
|
||||
project_entity["data"]["resolutionHeight"]),
|
||||
"pixelAspect": 1,
|
||||
"step": 1,
|
||||
"version": version
|
||||
}
|
||||
|
||||
# workfile instance
|
||||
family = "workfile"
|
||||
subset = family + task.capitalize()
|
||||
# Create instance
|
||||
instance = context.create_instance(subset)
|
||||
|
||||
# creating instance data
|
||||
instance.data.update({
|
||||
"subset": subset,
|
||||
"label": scene_file,
|
||||
"family": family,
|
||||
"families": [family, "ftrack"],
|
||||
"representations": list()
|
||||
})
|
||||
|
||||
# adding basic script data
|
||||
instance.data.update(shared_instance_data)
|
||||
|
||||
# creating representation
|
||||
representation = {
|
||||
'name': 'aep',
|
||||
'ext': 'aep',
|
||||
'files': scene_file,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.info('Publishing After Effects workfile')
|
||||
|
||||
for i in context:
|
||||
self.log.debug(f"{i.data['families']}")
|
||||
30
pype/plugins/aftereffects/publish/increment_workfile.py
Normal file
30
pype/plugins/aftereffects/publish/increment_workfile.py
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
import pyblish.api
|
||||
from pype.action import get_errored_plugins_from_data
|
||||
from pype.lib import version_up
|
||||
|
||||
from avalon import aftereffects
|
||||
|
||||
|
||||
class IncrementWorkfile(pyblish.api.InstancePlugin):
|
||||
"""Increment the current workfile.
|
||||
|
||||
Saves the current scene with an increased version number.
|
||||
"""
|
||||
|
||||
label = "Increment Workfile"
|
||||
order = pyblish.api.IntegratorOrder + 9.0
|
||||
hosts = ["aftereffects"]
|
||||
families = ["workfile"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
errored_plugins = get_errored_plugins_from_data(instance.context)
|
||||
if errored_plugins:
|
||||
raise RuntimeError(
|
||||
"Skipping incrementing current file because publishing failed."
|
||||
)
|
||||
|
||||
scene_path = version_up(instance.context.data["currentFile"])
|
||||
aftereffects.stub().saveAs(scene_path, True)
|
||||
|
||||
self.log.info("Incremented workfile to: {}".format(scene_path))
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
from pype.lib import abstract_submit_deadline
|
||||
from pype.lib.abstract_submit_deadline import DeadlineJobInfo
|
||||
import pyblish.api
|
||||
import os
|
||||
import attr
|
||||
import getpass
|
||||
from avalon import api
|
||||
|
||||
|
||||
@attr.s
|
||||
class DeadlinePluginInfo():
|
||||
Comp = attr.ib(default=None)
|
||||
SceneFile = attr.ib(default=None)
|
||||
OutputFilePath = attr.ib(default=None)
|
||||
Output = attr.ib(default=None)
|
||||
StartupDirectory = attr.ib(default=None)
|
||||
Arguments = attr.ib(default=None)
|
||||
ProjectPath = attr.ib(default=None)
|
||||
AWSAssetFile0 = attr.ib(default=None)
|
||||
Version = attr.ib(default=None)
|
||||
|
||||
|
||||
class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
||||
|
||||
label = "Submit AE to Deadline"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
hosts = ["aftereffects"]
|
||||
families = ["render.farm"] # cannot be "render' as that is integrated
|
||||
use_published = False
|
||||
|
||||
def get_job_info(self):
|
||||
dln_job_info = DeadlineJobInfo(Plugin="AfterEffects")
|
||||
|
||||
context = self._instance.context
|
||||
|
||||
dln_job_info.Name = self._instance.data["name"]
|
||||
dln_job_info.BatchName = os.path.basename(self._instance.
|
||||
data["source"])
|
||||
dln_job_info.Plugin = "AfterEffects"
|
||||
dln_job_info.UserName = context.data.get(
|
||||
"deadlineUser", getpass.getuser())
|
||||
frame_range = "{}-{}".format(self._instance.data["frameStart"],
|
||||
self._instance.data["frameEnd"])
|
||||
dln_job_info.Frames = frame_range
|
||||
dln_job_info.OutputFilename = \
|
||||
os.path.basename(self._instance.data["expectedFiles"][0])
|
||||
dln_job_info.OutputDirectory = \
|
||||
os.path.dirname(self._instance.data["expectedFiles"][0])
|
||||
dln_job_info.JobDelay = "00:00:00"
|
||||
|
||||
keys = [
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_SERVER",
|
||||
"AVALON_PROJECT",
|
||||
"AVALON_ASSET",
|
||||
"AVALON_TASK",
|
||||
"PYPE_USERNAME",
|
||||
"PYPE_DEV",
|
||||
"PYPE_LOG_NO_COLORS"
|
||||
]
|
||||
|
||||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **api.Session)
|
||||
for key in keys:
|
||||
val = environment.get(key)
|
||||
if val:
|
||||
dln_job_info.EnvironmentKeyValue = "{key}={value}".format(
|
||||
key=key,
|
||||
value=val)
|
||||
|
||||
return dln_job_info
|
||||
|
||||
def get_plugin_info(self):
|
||||
deadline_plugin_info = DeadlinePluginInfo()
|
||||
context = self._instance.context
|
||||
script_path = context.data["currentFile"]
|
||||
|
||||
render_path = self._instance.data["expectedFiles"][0]
|
||||
# replace frame info ('000001') with Deadline's required '[#######]'
|
||||
# expects filename in format project_asset_subset_version.FRAME.ext
|
||||
render_dir = os.path.dirname(render_path)
|
||||
file_name = os.path.basename(render_path)
|
||||
arr = file_name.split('.')
|
||||
assert len(arr) == 3, \
|
||||
"Unable to parse frames from {}".format(file_name)
|
||||
hashed = '[{}]'.format(len(arr[1]) * "#")
|
||||
|
||||
render_path = os.path.join(render_dir,
|
||||
'{}.{}.{}'.format(arr[0], hashed, arr[2]))
|
||||
|
||||
deadline_plugin_info.Comp = self._instance.data["comp_name"]
|
||||
deadline_plugin_info.Version = "17.5"
|
||||
deadline_plugin_info.SceneFile = script_path
|
||||
deadline_plugin_info.Output = render_path.replace("\\", "/")
|
||||
|
||||
return attr.asdict(deadline_plugin_info)
|
||||
|
|
@ -30,7 +30,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"premiere",
|
||||
"harmony",
|
||||
"standalonepublisher",
|
||||
"fusion"
|
||||
"fusion",
|
||||
"tvpaint"
|
||||
]
|
||||
|
||||
# Supported extensions
|
||||
|
|
@ -121,11 +122,24 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
# Loop through representations
|
||||
for repre in tuple(instance.data["representations"]):
|
||||
repre_name = str(repre.get("name"))
|
||||
tags = repre.get("tags") or []
|
||||
if "review" not in tags or "thumbnail" in tags:
|
||||
if "review" not in tags:
|
||||
self.log.debug((
|
||||
"Repre: {} - Didn't found \"review\" in tags. Skipping"
|
||||
).format(repre_name))
|
||||
continue
|
||||
|
||||
if "thumbnail" in tags:
|
||||
self.log.debug((
|
||||
"Repre: {} - Found \"thumbnail\" in tags. Skipping"
|
||||
).format(repre_name))
|
||||
continue
|
||||
|
||||
if "passing" in tags:
|
||||
self.log.debug((
|
||||
"Repre: {} - Found \"passing\" in tags. Skipping"
|
||||
).format(repre_name))
|
||||
continue
|
||||
|
||||
input_ext = repre["ext"]
|
||||
|
|
|
|||
|
|
@ -91,7 +91,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"harmony.template",
|
||||
"harmony.palette",
|
||||
"editorial",
|
||||
"background"
|
||||
"background",
|
||||
"camerarig"
|
||||
]
|
||||
exclude_families = ["clip"]
|
||||
db_representation_context_keys = [
|
||||
|
|
|
|||
|
|
@ -128,12 +128,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.IntegratorOrder + 0.2
|
||||
icon = "tractor"
|
||||
|
||||
hosts = ["fusion", "maya", "nuke", "celaction"]
|
||||
hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects"]
|
||||
|
||||
families = ["render.farm", "prerener",
|
||||
"renderlayer", "imagesequence", "vrayscene"]
|
||||
|
||||
aov_filter = {"maya": ["beauty"]}
|
||||
aov_filter = {"maya": [r".+(?:\.|_)([Bb]eauty)(?:\.|_).*"],
|
||||
"aftereffects": [r".*"], # for everything from AE
|
||||
"celaction": [r".*"]}
|
||||
|
||||
enviro_filter = [
|
||||
"FTRACK_API_USER",
|
||||
|
|
@ -447,8 +449,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
preview = False
|
||||
if app in self.aov_filter.keys():
|
||||
if aov in self.aov_filter[app]:
|
||||
preview = True
|
||||
for aov_pattern in self.aov_filter[app]:
|
||||
if re.match(aov_pattern,
|
||||
aov
|
||||
):
|
||||
preview = True
|
||||
break
|
||||
|
||||
new_instance = copy(instance_data)
|
||||
new_instance["subset"] = subset_name
|
||||
|
|
@ -511,7 +517,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
representations = []
|
||||
collections, remainders = clique.assemble(exp_files)
|
||||
bake_render_path = instance.get("bakeRenderPath")
|
||||
bake_render_path = instance.get("bakeRenderPath", [])
|
||||
|
||||
# create representation for every collected sequence
|
||||
for collection in collections:
|
||||
|
|
@ -519,23 +525,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
preview = False
|
||||
# if filtered aov name is found in filename, toggle it for
|
||||
# preview video rendering
|
||||
for app in self.aov_filter:
|
||||
for app in self.aov_filter.keys():
|
||||
if os.environ.get("AVALON_APP", "") == app:
|
||||
for aov in self.aov_filter[app]:
|
||||
if re.match(
|
||||
r".+(?:\.|_)({})(?:\.|_).*".format(aov),
|
||||
aov,
|
||||
list(collection)[0]
|
||||
):
|
||||
preview = True
|
||||
break
|
||||
break
|
||||
|
||||
if bake_render_path:
|
||||
preview = False
|
||||
|
||||
if "celaction" in pyblish.api.registered_hosts():
|
||||
preview = True
|
||||
|
||||
staging = os.path.dirname(list(collection)[0])
|
||||
success, rootless_staging_dir = (
|
||||
self.anatomy.find_root_template_from_path(staging)
|
||||
|
|
@ -557,7 +559,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"fps": instance.get("fps"),
|
||||
"tags": ["review", "preview"] if preview else [],
|
||||
"tags": ["review"] if preview else [],
|
||||
}
|
||||
|
||||
# poor man exclusion
|
||||
|
|
@ -709,8 +711,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"resolutionWidth": data.get("resolutionWidth", 1920),
|
||||
"resolutionHeight": data.get("resolutionHeight", 1080),
|
||||
"multipartExr": data.get("multipartExr", False),
|
||||
"jobBatchName": data.get("jobBatchName", ""),
|
||||
"review": data.get("review", True)
|
||||
"jobBatchName": data.get("jobBatchName", "")
|
||||
}
|
||||
|
||||
if "prerender" in instance.data["families"]:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from pype.hosts.maya import lib
|
|||
class CreateCamera(avalon.maya.Creator):
|
||||
"""Single baked camera"""
|
||||
|
||||
name = "cameraDefault"
|
||||
name = "cameraMain"
|
||||
label = "Camera"
|
||||
family = "camera"
|
||||
icon = "video-camera"
|
||||
|
|
@ -22,3 +22,13 @@ class CreateCamera(avalon.maya.Creator):
|
|||
# Bake to world space by default, when this is False it will also
|
||||
# include the parent hierarchy in the baked results
|
||||
self.data['bakeToWorldSpace'] = True
|
||||
|
||||
|
||||
class CreateCameraRig(avalon.maya.Creator):
|
||||
"""Complex hierarchy with camera."""
|
||||
|
||||
name = "camerarigMain"
|
||||
label = "Camera Rig"
|
||||
family = "camerarig"
|
||||
icon = "video-camera"
|
||||
defaults = ['Main']
|
||||
|
|
|
|||
|
|
@ -15,7 +15,8 @@ class ReferenceLoader(pype.hosts.maya.plugin.ReferenceLoader):
|
|||
"setdress",
|
||||
"layout",
|
||||
"camera",
|
||||
"rig"]
|
||||
"rig",
|
||||
"camerarig"]
|
||||
representations = ["ma", "abc", "fbx", "mb"]
|
||||
tool_names = ["loader"]
|
||||
|
||||
|
|
|
|||
|
|
@ -39,20 +39,20 @@ class VRayProxyLoader(api.Loader):
|
|||
with lib.maintained_selection():
|
||||
cmds.namespace(addNamespace=namespace)
|
||||
with namespaced(namespace, new=False):
|
||||
nodes = self.create_vray_proxy(name,
|
||||
nodes, group_node = self.create_vray_proxy(name,
|
||||
filename=self.fname)
|
||||
|
||||
self[:] = nodes
|
||||
if not nodes:
|
||||
return
|
||||
|
||||
# colour the group node
|
||||
presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
|
||||
colors = presets['plugins']['maya']['load']['colors']
|
||||
|
||||
c = colors.get(family)
|
||||
if c is not None:
|
||||
cmds.setAttr("{0}_{1}.useOutlinerColor".format(name, "GRP"), 1)
|
||||
cmds.setAttr("{0}_{1}.outlinerColor".format(name, "GRP"),
|
||||
cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1)
|
||||
cmds.setAttr("{0}.outlinerColor".format(group_node),
|
||||
c[0], c[1], c[2])
|
||||
|
||||
return containerise(
|
||||
|
|
@ -158,4 +158,4 @@ class VRayProxyLoader(api.Loader):
|
|||
cmds.refresh()
|
||||
cmds.setAttr("{}.geomType".format(vray_mesh), 2)
|
||||
|
||||
return nodes
|
||||
return nodes, group_node
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ class ExtractMayaSceneRaw(pype.api.Extractor):
|
|||
hosts = ["maya"]
|
||||
families = ["mayaAscii",
|
||||
"setdress",
|
||||
"layout"]
|
||||
"layout",
|
||||
"camerarig"]
|
||||
scene_type = "ma"
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,21 @@
|
|||
"""
|
||||
Optional:
|
||||
instance.data["remove"] -> mareker for removing
|
||||
"""
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectClearInstances(pyblish.api.InstancePlugin):
|
||||
"""Clear all marked instances"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.4999
|
||||
label = "Clear Instances"
|
||||
hosts = ["standalonepublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
self.log.debug(
|
||||
f"Instance: `{instance}` | "
|
||||
f"families: `{instance.data['families']}`")
|
||||
if instance.data.get("remove"):
|
||||
self.log.info(f"Removing: {instance}")
|
||||
instance.context.remove(instance)
|
||||
|
|
@ -1,3 +1,19 @@
|
|||
"""
|
||||
Optional:
|
||||
presets -> extensions (
|
||||
example of use:
|
||||
[".mov", ".mp4"]
|
||||
)
|
||||
presets -> source_dir (
|
||||
example of use:
|
||||
"C:/pathToFolder"
|
||||
"{root}/{project[name]}/inputs"
|
||||
"{root[work]}/{project[name]}/inputs"
|
||||
"./input"
|
||||
"../input"
|
||||
)
|
||||
"""
|
||||
|
||||
import os
|
||||
import opentimelineio as otio
|
||||
import pyblish.api
|
||||
|
|
@ -33,8 +49,10 @@ class CollectEditorial(pyblish.api.InstancePlugin):
|
|||
|
||||
# presets
|
||||
extensions = [".mov", ".mp4"]
|
||||
source_dir = None
|
||||
|
||||
def process(self, instance):
|
||||
root_dir = None
|
||||
# remove context test attribute
|
||||
if instance.context.data.get("subsetNamesCheck"):
|
||||
instance.context.data.pop("subsetNamesCheck")
|
||||
|
|
@ -53,19 +71,42 @@ class CollectEditorial(pyblish.api.InstancePlugin):
|
|||
# get video file path
|
||||
video_path = None
|
||||
basename = os.path.splitext(os.path.basename(file_path))[0]
|
||||
for f in os.listdir(staging_dir):
|
||||
self.log.debug(f"__ test file: `{f}`")
|
||||
# filter out by not sharing the same name
|
||||
if os.path.splitext(f)[0] not in basename:
|
||||
continue
|
||||
# filter out by respected extensions
|
||||
if os.path.splitext(f)[1] not in self.extensions:
|
||||
continue
|
||||
video_path = os.path.join(
|
||||
staging_dir, f
|
||||
)
|
||||
self.log.debug(f"__ video_path: `{video_path}`")
|
||||
instance.data["editorialVideoPath"] = video_path
|
||||
|
||||
if self.source_dir:
|
||||
source_dir = self.source_dir.replace("\\", "/")
|
||||
if ("./" in source_dir) or ("../" in source_dir):
|
||||
# get current working dir
|
||||
cwd = os.getcwd()
|
||||
# set cwd to staging dir for absolute path solving
|
||||
os.chdir(staging_dir)
|
||||
root_dir = os.path.abspath(source_dir)
|
||||
# set back original cwd
|
||||
os.chdir(cwd)
|
||||
elif "{" in source_dir:
|
||||
root_dir = source_dir
|
||||
else:
|
||||
root_dir = os.path.normpath(source_dir)
|
||||
|
||||
if root_dir:
|
||||
# search for source data will need to be done
|
||||
instance.data["editorialSourceRoot"] = root_dir
|
||||
instance.data["editorialSourcePath"] = None
|
||||
else:
|
||||
# source data are already found
|
||||
for f in os.listdir(staging_dir):
|
||||
# filter out by not sharing the same name
|
||||
if os.path.splitext(f)[0] not in basename:
|
||||
continue
|
||||
# filter out by respected extensions
|
||||
if os.path.splitext(f)[1] not in self.extensions:
|
||||
continue
|
||||
video_path = os.path.join(
|
||||
staging_dir, f
|
||||
)
|
||||
self.log.debug(f"__ video_path: `{video_path}`")
|
||||
instance.data["editorialSourceRoot"] = staging_dir
|
||||
instance.data["editorialSourcePath"] = video_path
|
||||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
# get editorial sequence file into otio timeline object
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import pyblish.api
|
|||
import re
|
||||
import os
|
||||
from avalon import io
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
||||
"""Collecting hierarchy context from `parents` and `hierarchy` data
|
||||
|
|
@ -60,7 +60,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
|
||||
def create_hierarchy(self, instance):
|
||||
parents = list()
|
||||
hierarchy = ""
|
||||
hierarchy = list()
|
||||
visual_hierarchy = [instance.context.data["assetEntity"]]
|
||||
while True:
|
||||
visual_parent = io.find_one(
|
||||
|
|
@ -81,27 +81,74 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
})
|
||||
|
||||
if self.shot_add_hierarchy:
|
||||
parent_template_patern = re.compile(r"\{([a-z]*?)\}")
|
||||
# fill the parents parts from presets
|
||||
shot_add_hierarchy = self.shot_add_hierarchy.copy()
|
||||
hierarchy_parents = shot_add_hierarchy["parents"].copy()
|
||||
for parent in hierarchy_parents:
|
||||
hierarchy_parents[parent] = hierarchy_parents[parent].format(
|
||||
**instance.data["anatomyData"])
|
||||
|
||||
# fill parent keys data template from anatomy data
|
||||
for parent_key in hierarchy_parents:
|
||||
hierarchy_parents[parent_key] = hierarchy_parents[
|
||||
parent_key].format(**instance.data["anatomyData"])
|
||||
|
||||
for _index, _parent in enumerate(
|
||||
shot_add_hierarchy["parents_path"].split("/")):
|
||||
parent_filled = _parent.format(**hierarchy_parents)
|
||||
parent_key = parent_template_patern.findall(_parent).pop()
|
||||
|
||||
# in case SP context is set to the same folder
|
||||
if (_index == 0) and ("folder" in parent_key) \
|
||||
and (parents[-1]["entityName"] == parent_filled):
|
||||
self.log.debug(f" skiping : {parent_filled}")
|
||||
continue
|
||||
|
||||
# in case first parent is project then start parents from start
|
||||
if (_index == 0) and ("project" in parent_key):
|
||||
self.log.debug("rebuilding parents from scratch")
|
||||
project_parent = parents[0]
|
||||
parents = [project_parent]
|
||||
self.log.debug(f"project_parent: {project_parent}")
|
||||
self.log.debug(f"parents: {parents}")
|
||||
continue
|
||||
|
||||
prnt = self.convert_to_entity(
|
||||
parent, hierarchy_parents[parent])
|
||||
parent_key, parent_filled)
|
||||
parents.append(prnt)
|
||||
hierarchy.append(parent_filled)
|
||||
|
||||
hierarchy = shot_add_hierarchy[
|
||||
"parents_path"].format(**hierarchy_parents)
|
||||
# convert hierarchy to string
|
||||
hierarchy = "/".join(hierarchy)
|
||||
|
||||
# assing to instance data
|
||||
instance.data["hierarchy"] = hierarchy
|
||||
instance.data["parents"] = parents
|
||||
|
||||
# print
|
||||
self.log.debug(f"Hierarchy: {hierarchy}")
|
||||
self.log.debug(f"parents: {parents}")
|
||||
|
||||
if self.shot_add_tasks:
|
||||
instance.data["tasks"] = self.shot_add_tasks
|
||||
tasks_to_add = dict()
|
||||
project_tasks = io.find_one({"type": "project"})["config"]["tasks"]
|
||||
for task_name, task_data in self.shot_add_tasks.items():
|
||||
try:
|
||||
if task_data["type"] in project_tasks.keys():
|
||||
tasks_to_add.update({task_name: task_data})
|
||||
else:
|
||||
raise KeyError(
|
||||
"Wrong FtrackTaskType `{}` for `{}` is not"
|
||||
" existing in `{}``".format(
|
||||
task_data["type"],
|
||||
task_name,
|
||||
list(project_tasks.keys())))
|
||||
except KeyError as error:
|
||||
raise KeyError(
|
||||
"Wrong presets: `{0}`".format(error)
|
||||
)
|
||||
|
||||
instance.data["tasks"] = tasks_to_add
|
||||
else:
|
||||
instance.data["tasks"] = list()
|
||||
instance.data["tasks"] = dict()
|
||||
|
||||
# updating hierarchy data
|
||||
instance.data["anatomyData"].update({
|
||||
|
|
@ -117,7 +164,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
def processing_instance(self, instance):
|
||||
self.log.info(f"_ instance: {instance}")
|
||||
# adding anatomyData for burnins
|
||||
instance.data["anatomyData"] = instance.context.data["anatomyData"]
|
||||
instance.data["anatomyData"] = deepcopy(
|
||||
instance.context.data["anatomyData"])
|
||||
|
||||
asset = instance.data["asset"]
|
||||
assets_shared = instance.context.data.get("assetsShared")
|
||||
|
|
@ -133,9 +181,6 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
shot_name = instance.data["asset"]
|
||||
self.log.debug(f"Shot Name: {shot_name}")
|
||||
|
||||
if instance.data["hierarchy"] not in shot_name:
|
||||
self.log.warning("wrong parent")
|
||||
|
||||
label = f"{shot_name} ({frame_start}-{frame_end})"
|
||||
instance.data["label"] = label
|
||||
|
||||
|
|
@ -150,7 +195,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
"asset": instance.data["asset"],
|
||||
"hierarchy": instance.data["hierarchy"],
|
||||
"parents": instance.data["parents"],
|
||||
"tasks": instance.data["tasks"]
|
||||
"tasks": instance.data["tasks"],
|
||||
"anatomyData": instance.data["anatomyData"]
|
||||
})
|
||||
|
||||
|
||||
|
|
@ -194,6 +240,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
instance.data["parents"] = s_asset_data["parents"]
|
||||
instance.data["hierarchy"] = s_asset_data["hierarchy"]
|
||||
instance.data["tasks"] = s_asset_data["tasks"]
|
||||
instance.data["anatomyData"] = s_asset_data["anatomyData"]
|
||||
|
||||
# generate hierarchy data only on shot instances
|
||||
if 'shot' not in instance.data.get('family', ''):
|
||||
|
|
@ -224,7 +271,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
|
||||
in_info['tasks'] = instance.data['tasks']
|
||||
|
||||
from pprint import pformat
|
||||
parents = instance.data.get('parents', [])
|
||||
self.log.debug(f"parents: {pformat(parents)}")
|
||||
|
||||
actual = {name: in_info}
|
||||
|
||||
|
|
@ -240,4 +289,5 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
|
||||
# adding hierarchy context to instance
|
||||
context.data["hierarchyContext"] = final_context
|
||||
self.log.debug(f"hierarchyContext: {pformat(final_context)}")
|
||||
self.log.info("Hierarchy instance collected")
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
fps = instance.context.data["fps"]
|
||||
|
||||
instance.data.update({
|
||||
"fps": fps
|
||||
})
|
||||
|
|
|
|||
|
|
@ -0,0 +1,266 @@
|
|||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import pyblish.api
|
||||
from copy import deepcopy
|
||||
import clique
|
||||
|
||||
|
||||
class CollectInstanceResources(pyblish.api.InstancePlugin):
|
||||
"""Collect instance's resources"""
|
||||
|
||||
# must be after `CollectInstances`
|
||||
order = pyblish.api.CollectorOrder + 0.011
|
||||
label = "Collect Instance Resources"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
self.context = instance.context
|
||||
self.log.info(f"Processing instance: {instance}")
|
||||
self.new_instances = []
|
||||
subset_files = dict()
|
||||
subset_dirs = list()
|
||||
anatomy = self.context.data["anatomy"]
|
||||
anatomy_data = deepcopy(self.context.data["anatomyData"])
|
||||
anatomy_data.update({"root": anatomy.roots})
|
||||
|
||||
subset = instance.data["subset"]
|
||||
clip_name = instance.data["clipName"]
|
||||
|
||||
editorial_source_root = instance.data["editorialSourceRoot"]
|
||||
editorial_source_path = instance.data["editorialSourcePath"]
|
||||
|
||||
# if `editorial_source_path` then loop trough
|
||||
if editorial_source_path:
|
||||
# add family if mov or mp4 found which is longer for
|
||||
# cutting `trimming` to enable `ExtractTrimmingVideoAudio` plugin
|
||||
staging_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
instance.data["families"] += ["trimming"]
|
||||
return
|
||||
|
||||
# if template patern in path then fill it with `anatomy_data`
|
||||
if "{" in editorial_source_root:
|
||||
editorial_source_root = editorial_source_root.format(
|
||||
**anatomy_data)
|
||||
|
||||
self.log.debug(f"root: {editorial_source_root}")
|
||||
# loop `editorial_source_root` and find clip name in folders
|
||||
# and look for any subset name alternatives
|
||||
for root, dirs, _files in os.walk(editorial_source_root):
|
||||
# search only for directories related to clip name
|
||||
correct_clip_dir = None
|
||||
for _d_search in dirs:
|
||||
# avoid all non clip dirs
|
||||
if _d_search not in clip_name:
|
||||
continue
|
||||
# found correct dir for clip
|
||||
correct_clip_dir = _d_search
|
||||
|
||||
# continue if clip dir was not found
|
||||
if not correct_clip_dir:
|
||||
continue
|
||||
|
||||
clip_dir_path = os.path.join(root, correct_clip_dir)
|
||||
subset_files_items = list()
|
||||
# list content of clip dir and search for subset items
|
||||
for subset_item in os.listdir(clip_dir_path):
|
||||
# avoid all items which are not defined as subsets by name
|
||||
if subset not in subset_item:
|
||||
continue
|
||||
|
||||
subset_item_path = os.path.join(
|
||||
clip_dir_path, subset_item)
|
||||
# if it is dir store it to `subset_dirs` list
|
||||
if os.path.isdir(subset_item_path):
|
||||
subset_dirs.append(subset_item_path)
|
||||
|
||||
# if it is file then store it to `subset_files` list
|
||||
if os.path.isfile(subset_item_path):
|
||||
subset_files_items.append(subset_item_path)
|
||||
|
||||
if subset_files_items:
|
||||
subset_files.update({clip_dir_path: subset_files_items})
|
||||
|
||||
# break the loop if correct_clip_dir was captured
|
||||
# no need to cary on if corect folder was found
|
||||
if correct_clip_dir:
|
||||
break
|
||||
|
||||
if subset_dirs:
|
||||
# look all dirs and check for subset name alternatives
|
||||
for _dir in subset_dirs:
|
||||
instance_data = deepcopy(
|
||||
{k: v for k, v in instance.data.items()})
|
||||
sub_dir = os.path.basename(_dir)
|
||||
# if subset name is only alternative then create new instance
|
||||
if sub_dir != subset:
|
||||
instance_data = self.duplicate_instance(
|
||||
instance_data, subset, sub_dir)
|
||||
|
||||
# create all representations
|
||||
self.create_representations(
|
||||
os.listdir(_dir), instance_data, _dir)
|
||||
|
||||
if sub_dir == subset:
|
||||
self.new_instances.append(instance_data)
|
||||
# instance.data.update(instance_data)
|
||||
|
||||
if subset_files:
|
||||
unique_subset_names = list()
|
||||
root_dir = list(subset_files.keys()).pop()
|
||||
files_list = subset_files[root_dir]
|
||||
search_patern = f"({subset}[A-Za-z0-9]+)(?=[\\._\\s])"
|
||||
for _file in files_list:
|
||||
patern = re.compile(search_patern)
|
||||
match = patern.findall(_file)
|
||||
if not match:
|
||||
continue
|
||||
match_subset = match.pop()
|
||||
if match_subset in unique_subset_names:
|
||||
continue
|
||||
unique_subset_names.append(match_subset)
|
||||
|
||||
self.log.debug(f"unique_subset_names: {unique_subset_names}")
|
||||
|
||||
for _un_subs in unique_subset_names:
|
||||
instance_data = self.duplicate_instance(
|
||||
instance.data, subset, _un_subs)
|
||||
|
||||
# create all representations
|
||||
self.create_representations(
|
||||
[os.path.basename(f) for f in files_list
|
||||
if _un_subs in f],
|
||||
instance_data, root_dir)
|
||||
|
||||
# remove the original instance as it had been used only
|
||||
# as template and is duplicated
|
||||
self.context.remove(instance)
|
||||
|
||||
# create all instances in self.new_instances into context
|
||||
for new_instance in self.new_instances:
|
||||
_new_instance = self.context.create_instance(
|
||||
new_instance["name"])
|
||||
_new_instance.data.update(new_instance)
|
||||
|
||||
def duplicate_instance(self, instance_data, subset, new_subset):
|
||||
|
||||
new_instance_data = dict()
|
||||
for _key, _value in instance_data.items():
|
||||
new_instance_data[_key] = _value
|
||||
if not isinstance(_value, str):
|
||||
continue
|
||||
if subset in _value:
|
||||
new_instance_data[_key] = _value.replace(
|
||||
subset, new_subset)
|
||||
|
||||
self.log.info(f"Creating new instance: {new_instance_data['name']}")
|
||||
self.new_instances.append(new_instance_data)
|
||||
return new_instance_data
|
||||
|
||||
def create_representations(
|
||||
self, files_list, instance_data, staging_dir):
|
||||
""" Create representations from Collection object
|
||||
"""
|
||||
# collecting frames for later frame start/end reset
|
||||
frames = list()
|
||||
# break down Collection object to collections and reminders
|
||||
collections, remainder = clique.assemble(files_list)
|
||||
# add staging_dir to instance_data
|
||||
instance_data["stagingDir"] = staging_dir
|
||||
# add representations to instance_data
|
||||
instance_data["representations"] = list()
|
||||
|
||||
collection_head_name = None
|
||||
# loop trough collections and create representations
|
||||
for _collection in collections:
|
||||
ext = _collection.tail
|
||||
collection_head_name = _collection.head
|
||||
frame_start = list(_collection.indexes)[0]
|
||||
frame_end = list(_collection.indexes)[-1]
|
||||
repre_data = {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"files": [item for item in _collection],
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
|
||||
if "review" in instance_data["families"]:
|
||||
repre_data.update({
|
||||
"thumbnail": True,
|
||||
"frameStartFtrack": frame_start,
|
||||
"frameEndFtrack": frame_end,
|
||||
"step": 1,
|
||||
"fps": self.context.data.get("fps"),
|
||||
"name": "review",
|
||||
"tags": ["review", "ftrackreview", "delete"],
|
||||
})
|
||||
instance_data["representations"].append(repre_data)
|
||||
|
||||
# add to frames for frame range reset
|
||||
frames.append(frame_start)
|
||||
frames.append(frame_end)
|
||||
|
||||
# loop trough reminders and create representations
|
||||
for _reminding_file in remainder:
|
||||
ext = os.path.splitext(_reminding_file)[-1]
|
||||
if ext not in instance_data["extensions"]:
|
||||
continue
|
||||
if collection_head_name and (
|
||||
(collection_head_name + ext[1:]) not in _reminding_file
|
||||
) and (ext in [".mp4", ".mov"]):
|
||||
self.log.info(f"Skipping file: {_reminding_file}")
|
||||
continue
|
||||
frame_start = 1
|
||||
frame_end = 1
|
||||
|
||||
repre_data = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"files": _reminding_file,
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
|
||||
# exception for thumbnail
|
||||
if "thumb" in _reminding_file:
|
||||
repre_data.update({
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True
|
||||
})
|
||||
|
||||
# exception for mp4 preview
|
||||
if ".mp4" in _reminding_file:
|
||||
frame_start = 0
|
||||
frame_end = (
|
||||
(instance_data["frameEnd"] - instance_data["frameStart"])
|
||||
+ 1)
|
||||
# add review ftrack family into families
|
||||
for _family in ["review", "ftrack"]:
|
||||
if _family not in instance_data["families"]:
|
||||
instance_data["families"].append(_family)
|
||||
repre_data.update({
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartFtrack": frame_start,
|
||||
"frameEndFtrack": frame_end,
|
||||
"step": 1,
|
||||
"fps": self.context.data.get("fps"),
|
||||
"name": "review",
|
||||
"tags": ["review", "ftrackreview", "delete"],
|
||||
})
|
||||
|
||||
# add to frames for frame range reset only if no collection
|
||||
if not collections:
|
||||
frames.append(frame_start)
|
||||
frames.append(frame_end)
|
||||
|
||||
instance_data["representations"].append(repre_data)
|
||||
|
||||
# reset frame start / end
|
||||
instance_data["frameStart"] = min(frames)
|
||||
instance_data["frameEnd"] = max(frames)
|
||||
|
|
@ -1,15 +1,14 @@
|
|||
import os
|
||||
import opentimelineio as otio
|
||||
import tempfile
|
||||
import pyblish.api
|
||||
from pype import lib as plib
|
||||
|
||||
|
||||
class CollectClipInstances(pyblish.api.InstancePlugin):
|
||||
"""Collect Clips instances from editorial's OTIO sequence"""
|
||||
class CollectInstances(pyblish.api.InstancePlugin):
|
||||
"""Collect instances from editorial's OTIO sequence"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect Clips"
|
||||
label = "Collect Instances"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["editorial"]
|
||||
|
||||
|
|
@ -18,31 +17,31 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
|
|||
"referenceMain": {
|
||||
"family": "review",
|
||||
"families": ["clip", "ftrack"],
|
||||
# "ftrackFamily": "review",
|
||||
"extension": ".mp4"
|
||||
"extensions": [".mp4"]
|
||||
},
|
||||
"audioMain": {
|
||||
"family": "audio",
|
||||
"families": ["clip", "ftrack"],
|
||||
# "ftrackFamily": "audio",
|
||||
"extension": ".wav",
|
||||
# "version": 1
|
||||
"extensions": [".wav"],
|
||||
},
|
||||
"shotMain": {
|
||||
"family": "shot",
|
||||
"families": []
|
||||
}
|
||||
}
|
||||
timeline_frame_offset = None # if 900000 for edl default then -900000
|
||||
timeline_frame_start = 900000 # starndard edl default (10:00:00:00)
|
||||
timeline_frame_offset = None
|
||||
custom_start_frame = None
|
||||
|
||||
def process(self, instance):
|
||||
staging_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
# get context
|
||||
context = instance.context
|
||||
|
||||
instance_data_filter = [
|
||||
"editorialSourceRoot",
|
||||
"editorialSourcePath"
|
||||
]
|
||||
|
||||
# attribute for checking duplicity during creation
|
||||
if not context.data.get("assetNameCheck"):
|
||||
context.data["assetNameCheck"] = list()
|
||||
|
|
@ -68,15 +67,19 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
|
|||
handle_start = int(asset_data["handleStart"])
|
||||
handle_end = int(asset_data["handleEnd"])
|
||||
|
||||
instances = []
|
||||
for track in tracks:
|
||||
self.log.debug(f"track.name: {track.name}")
|
||||
try:
|
||||
track_start_frame = (
|
||||
abs(track.source_range.start_time.value)
|
||||
)
|
||||
self.log.debug(f"track_start_frame: {track_start_frame}")
|
||||
track_start_frame -= self.timeline_frame_start
|
||||
except AttributeError:
|
||||
track_start_frame = 0
|
||||
|
||||
self.log.debug(f"track_start_frame: {track_start_frame}")
|
||||
|
||||
for clip in track.each_child():
|
||||
if clip.name is None:
|
||||
continue
|
||||
|
|
@ -103,7 +106,10 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
|
|||
|
||||
# frame ranges data
|
||||
clip_in = clip.range_in_parent().start_time.value
|
||||
clip_in += track_start_frame
|
||||
clip_out = clip.range_in_parent().end_time_inclusive().value
|
||||
clip_out += track_start_frame
|
||||
self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}")
|
||||
|
||||
# add offset in case there is any
|
||||
if self.timeline_frame_offset:
|
||||
|
|
@ -131,14 +137,11 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
|
|||
|
||||
# create shared new instance data
|
||||
instance_data = {
|
||||
"stagingDir": staging_dir,
|
||||
|
||||
# shared attributes
|
||||
"asset": name,
|
||||
"assetShareName": name,
|
||||
"editorialVideoPath": instance.data[
|
||||
"editorialVideoPath"],
|
||||
"item": clip,
|
||||
"clipName": clip_name,
|
||||
|
||||
# parent time properities
|
||||
"trackStartFrame": track_start_frame,
|
||||
|
|
@ -167,6 +170,10 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
|
|||
"frameEndH": frame_end + handle_end
|
||||
}
|
||||
|
||||
for data_key in instance_data_filter:
|
||||
instance_data.update({
|
||||
data_key: instance.data.get(data_key)})
|
||||
|
||||
# adding subsets to context as instances
|
||||
for subset, properities in self.subsets.items():
|
||||
# adding Review-able instance
|
||||
|
|
@ -174,14 +181,20 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
|
|||
subset_instance_data.update(properities)
|
||||
subset_instance_data.update({
|
||||
# unique attributes
|
||||
"name": f"{subset}_{name}",
|
||||
"label": f"{subset} {name} ({clip_in}-{clip_out})",
|
||||
"name": f"{name}_{subset}",
|
||||
"label": f"{name} {subset} ({clip_in}-{clip_out})",
|
||||
"subset": subset
|
||||
})
|
||||
instances.append(instance.context.create_instance(
|
||||
**subset_instance_data))
|
||||
# create new instance
|
||||
_instance = instance.context.create_instance(
|
||||
**subset_instance_data)
|
||||
self.log.debug(
|
||||
f"Instance: `{_instance}` | "
|
||||
f"families: `{subset_instance_data['families']}`")
|
||||
|
||||
context.data["assetsShared"][name] = {
|
||||
"_clipIn": clip_in,
|
||||
"_clipOut": clip_out
|
||||
}
|
||||
|
||||
self.log.debug("Instance: `{}` | families: `{}`")
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
import os
|
||||
import clique
|
||||
import pype.api
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class ExtractShotData(pype.api.Extractor):
|
||||
"""Extract shot "mov" and "wav" files."""
|
||||
|
||||
label = "Extract Shot Data"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["clip"]
|
||||
|
||||
# presets
|
||||
|
||||
def process(self, instance):
|
||||
representation = instance.data.get("representations")
|
||||
self.log.debug(f"_ representation: {representation}")
|
||||
|
||||
if not representation:
|
||||
instance.data["representations"] = list()
|
||||
|
||||
# get ffmpet path
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
# get staging dir
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Staging dir set to: `{}`".format(staging_dir))
|
||||
|
||||
# Generate mov file.
|
||||
fps = instance.data["fps"]
|
||||
video_file_path = instance.data["editorialVideoPath"]
|
||||
ext = instance.data.get("extension", ".mov")
|
||||
|
||||
clip_trimed_path = os.path.join(
|
||||
staging_dir, instance.data["name"] + ext)
|
||||
#
|
||||
# # check video file metadata
|
||||
# input_data = plib.ffprobe_streams(video_file_path)[0]
|
||||
# self.log.debug(f"__ input_data: `{input_data}`")
|
||||
|
||||
start = float(instance.data["clipInH"])
|
||||
dur = float(instance.data["clipDurationH"])
|
||||
|
||||
if ext in ".wav":
|
||||
start += 0.5
|
||||
|
||||
args = [
|
||||
"\"{}\"".format(ffmpeg_path),
|
||||
"-ss", str(start / fps),
|
||||
"-i", f"\"{video_file_path}\"",
|
||||
"-t", str(dur / fps)
|
||||
]
|
||||
if ext in [".mov", ".mp4"]:
|
||||
args.extend([
|
||||
"-crf", "18",
|
||||
"-pix_fmt", "yuv420p"])
|
||||
elif ext in ".wav":
|
||||
args.extend([
|
||||
"-vn -acodec pcm_s16le",
|
||||
"-ar 48000 -ac 2"
|
||||
])
|
||||
|
||||
# add output path
|
||||
args.append(f"\"{clip_trimed_path}\"")
|
||||
|
||||
self.log.info(f"Processing: {args}")
|
||||
ffmpeg_args = " ".join(args)
|
||||
output = pype.api.subprocess(ffmpeg_args, shell=True)
|
||||
self.log.info(output)
|
||||
|
||||
repr = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"files": os.path.basename(clip_trimed_path),
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": int(instance.data["frameStart"]),
|
||||
"frameEnd": int(instance.data["frameEnd"]),
|
||||
"frameStartFtrack": int(instance.data["frameStartH"]),
|
||||
"frameEndFtrack": int(instance.data["frameEndH"]),
|
||||
"fps": fps,
|
||||
}
|
||||
|
||||
if ext[1:] in ["mov", "mp4"]:
|
||||
repr.update({
|
||||
"thumbnail": True,
|
||||
"tags": ["review", "ftrackreview", "delete"]})
|
||||
|
||||
instance.data["representations"].append(repr)
|
||||
|
||||
self.log.debug(f"Instance data: {pformat(instance.data)}")
|
||||
|
|
@ -46,6 +46,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
files_len = 1
|
||||
file = files
|
||||
|
||||
staging_dir = None
|
||||
is_jpeg = False
|
||||
if file.endswith(".jpeg") or file.endswith(".jpg"):
|
||||
is_jpeg = True
|
||||
|
|
@ -106,7 +107,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
thumbnail_repre.pop("thumbnail")
|
||||
|
||||
filename = os.path.basename(full_thumbnail_path)
|
||||
staging_dir = os.path.dirname(full_thumbnail_path)
|
||||
staging_dir = staging_dir or os.path.dirname(full_thumbnail_path)
|
||||
|
||||
# create new thumbnail representation
|
||||
representation = {
|
||||
|
|
@ -121,4 +122,5 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
if not is_jpeg:
|
||||
representation["tags"].append("delete")
|
||||
|
||||
self.log.info(f"New representation {representation}")
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,105 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class ExtractTrimVideoAudio(pype.api.Extractor):
|
||||
"""Trim with ffmpeg "mov" and "wav" files."""
|
||||
|
||||
# must be before `ExtractThumbnailSP`
|
||||
order = pyblish.api.ExtractorOrder - 0.01
|
||||
label = "Extract Trim Video/Audio"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["clip", "trimming"]
|
||||
|
||||
# make sure it is enabled only if at least both families are available
|
||||
match = pyblish.api.Subset
|
||||
|
||||
# presets
|
||||
|
||||
def process(self, instance):
|
||||
representation = instance.data.get("representations")
|
||||
self.log.debug(f"_ representation: {representation}")
|
||||
|
||||
if not representation:
|
||||
instance.data["representations"] = list()
|
||||
|
||||
# get ffmpet path
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
# get staging dir
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Staging dir set to: `{}`".format(staging_dir))
|
||||
|
||||
# Generate mov file.
|
||||
fps = instance.data["fps"]
|
||||
video_file_path = instance.data["editorialSourcePath"]
|
||||
extensions = instance.data.get("extensions", [".mov"])
|
||||
|
||||
for ext in extensions:
|
||||
self.log.info("Processing ext: `{}`".format(ext))
|
||||
|
||||
clip_trimed_path = os.path.join(
|
||||
staging_dir, instance.data["name"] + ext)
|
||||
# # check video file metadata
|
||||
# input_data = plib.ffprobe_streams(video_file_path)[0]
|
||||
# self.log.debug(f"__ input_data: `{input_data}`")
|
||||
|
||||
start = float(instance.data["clipInH"])
|
||||
dur = float(instance.data["clipDurationH"])
|
||||
|
||||
if ext == ".wav":
|
||||
# offset time as ffmpeg is having bug
|
||||
start += 0.5
|
||||
# remove "review" from families
|
||||
instance.data["families"] = [
|
||||
fml for fml in instance.data["families"]
|
||||
if "trimming" not in fml
|
||||
]
|
||||
|
||||
args = [
|
||||
ffmpeg_path,
|
||||
"-ss", str(start / fps),
|
||||
"-i", f"\"{video_file_path}\"",
|
||||
"-t", str(dur / fps)
|
||||
]
|
||||
if ext in [".mov", ".mp4"]:
|
||||
args.extend([
|
||||
"-crf", "18",
|
||||
"-pix_fmt", "yuv420p"])
|
||||
elif ext in ".wav":
|
||||
args.extend([
|
||||
"-vn -acodec pcm_s16le",
|
||||
"-ar 48000 -ac 2"
|
||||
])
|
||||
|
||||
# add output path
|
||||
args.append(f"\"{clip_trimed_path}\"")
|
||||
|
||||
self.log.info(f"Processing: {args}")
|
||||
ffmpeg_args = " ".join(args)
|
||||
output = pype.api.subprocess(ffmpeg_args, shell=True)
|
||||
self.log.info(output)
|
||||
|
||||
repr = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"files": os.path.basename(clip_trimed_path),
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": int(instance.data["frameStart"]),
|
||||
"frameEnd": int(instance.data["frameEnd"]),
|
||||
"frameStartFtrack": int(instance.data["frameStartH"]),
|
||||
"frameEndFtrack": int(instance.data["frameEndH"]),
|
||||
"fps": fps,
|
||||
}
|
||||
|
||||
if ext in [".mov", ".mp4"]:
|
||||
repr.update({
|
||||
"thumbnail": True,
|
||||
"tags": ["review", "ftrackreview", "delete"]})
|
||||
|
||||
instance.data["representations"].append(repr)
|
||||
|
||||
self.log.debug(f"Instance data: {pformat(instance.data)}")
|
||||
|
|
@ -7,7 +7,10 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Validate Editorial Resources"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["clip"]
|
||||
families = ["clip", "trimming"]
|
||||
|
||||
# make sure it is enabled only if at least both families are available
|
||||
match = pyblish.api.Subset
|
||||
|
||||
order = pype.api.ValidateContentsOrder
|
||||
|
||||
|
|
@ -15,6 +18,6 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
|
|||
self.log.debug(
|
||||
f"Instance: {instance}, Families: "
|
||||
f"{[instance.data['family']] + instance.data['families']}")
|
||||
check_file = instance.data["editorialVideoPath"]
|
||||
check_file = instance.data["editorialSourcePath"]
|
||||
msg = f"Missing \"{check_file}\"."
|
||||
assert check_file, msg
|
||||
|
|
|
|||
150
pype/plugins/tvpaint/create/create_render_layer.py
Normal file
150
pype/plugins/tvpaint/create/create_render_layer.py
Normal file
|
|
@ -0,0 +1,150 @@
|
|||
from avalon.tvpaint import pipeline, lib
|
||||
|
||||
|
||||
class CreateRenderlayer(pipeline.Creator):
|
||||
"""Mark layer group as one instance."""
|
||||
name = "render_layer"
|
||||
label = "RenderLayer"
|
||||
family = "renderLayer"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
rename_group = True
|
||||
|
||||
subset_template = "{family}_{name}"
|
||||
rename_script_template = (
|
||||
"tv_layercolor \"setcolor\""
|
||||
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
|
||||
)
|
||||
|
||||
def process(self):
|
||||
self.log.debug("Query data from workfile.")
|
||||
instances = pipeline.list_instances()
|
||||
layers_data = lib.layers_data()
|
||||
|
||||
self.log.debug("Checking for selection groups.")
|
||||
# Collect group ids from selection
|
||||
group_ids = set()
|
||||
for layer in layers_data:
|
||||
if layer["selected"]:
|
||||
group_ids.add(layer["group_id"])
|
||||
|
||||
# Raise if there is no selection
|
||||
if not group_ids:
|
||||
raise AssertionError("Nothing is selected.")
|
||||
|
||||
# This creator should run only on one group
|
||||
if len(group_ids) > 1:
|
||||
raise AssertionError("More than one group is in selection.")
|
||||
|
||||
group_id = tuple(group_ids)[0]
|
||||
# If group id is `0` it is `default` group which is invalid
|
||||
if group_id == 0:
|
||||
raise AssertionError(
|
||||
"Selection is not in group. Can't mark selection as Beauty."
|
||||
)
|
||||
|
||||
self.log.debug(f"Selected group id is \"{group_id}\".")
|
||||
self.data["group_id"] = group_id
|
||||
|
||||
family = self.data["family"]
|
||||
# Extract entered name
|
||||
name = self.data["subset"][len(family):]
|
||||
self.log.info(f"Extracted name from subset name \"{name}\".")
|
||||
self.data["name"] = name
|
||||
|
||||
# Change subset name by template
|
||||
subset_name = self.subset_template.format(**{
|
||||
"family": self.family,
|
||||
"name": name
|
||||
})
|
||||
self.log.info(f"New subset name \"{subset_name}\".")
|
||||
self.data["subset"] = subset_name
|
||||
|
||||
# Check for instances of same group
|
||||
existing_instance = None
|
||||
existing_instance_idx = None
|
||||
# Check if subset name is not already taken
|
||||
same_subset_instance = None
|
||||
same_subset_instance_idx = None
|
||||
for idx, instance in enumerate(instances):
|
||||
if instance["family"] == family:
|
||||
if instance["group_id"] == group_id:
|
||||
existing_instance = instance
|
||||
existing_instance_idx = idx
|
||||
elif instance["subset"] == subset_name:
|
||||
same_subset_instance = instance
|
||||
same_subset_instance_idx = idx
|
||||
|
||||
if (
|
||||
same_subset_instance_idx is not None
|
||||
and existing_instance_idx is not None
|
||||
):
|
||||
break
|
||||
|
||||
if same_subset_instance_idx is not None:
|
||||
if self._ask_user_subset_override(same_subset_instance):
|
||||
instances.pop(same_subset_instance_idx)
|
||||
else:
|
||||
return
|
||||
|
||||
if existing_instance is not None:
|
||||
self.log.info(
|
||||
f"Beauty instance for group id {group_id} already exists"
|
||||
", overriding"
|
||||
)
|
||||
instances[existing_instance_idx] = self.data
|
||||
else:
|
||||
instances.append(self.data)
|
||||
|
||||
self.write_instances(instances)
|
||||
|
||||
if not self.rename_group:
|
||||
self.log.info("Group rename function is turned off. Skipping")
|
||||
return
|
||||
|
||||
self.log.debug("Querying groups data from workfile.")
|
||||
groups_data = lib.groups_data()
|
||||
|
||||
self.log.debug("Changing name of the group.")
|
||||
selected_group = None
|
||||
for group_data in groups_data:
|
||||
if group_data["group_id"] == group_id:
|
||||
selected_group = group_data
|
||||
|
||||
# Rename TVPaint group (keep color same)
|
||||
# - groups can't contain spaces
|
||||
new_group_name = name.replace(" ", "_")
|
||||
rename_script = self.rename_script_template.format(
|
||||
clip_id=selected_group["clip_id"],
|
||||
group_id=selected_group["group_id"],
|
||||
r=selected_group["red"],
|
||||
g=selected_group["green"],
|
||||
b=selected_group["blue"],
|
||||
name=new_group_name
|
||||
)
|
||||
lib.execute_george_through_file(rename_script)
|
||||
|
||||
self.log.info(
|
||||
f"Name of group with index {group_id}"
|
||||
f" was changed to \"{new_group_name}\"."
|
||||
)
|
||||
|
||||
def _ask_user_subset_override(self, instance):
|
||||
from Qt.QtWidgets import QMessageBox
|
||||
|
||||
title = "Subset \"{}\" already exist".format(instance["subset"])
|
||||
text = (
|
||||
"Instance with subset name \"{}\" already exists."
|
||||
"\n\nDo you want to override existing?"
|
||||
).format(instance["subset"])
|
||||
|
||||
dialog = QMessageBox()
|
||||
dialog.setWindowTitle(title)
|
||||
dialog.setText(text)
|
||||
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
|
||||
dialog.setDefaultButton(QMessageBox.Yes)
|
||||
dialog.exec_()
|
||||
if dialog.result() == QMessageBox.Yes:
|
||||
return True
|
||||
return False
|
||||
105
pype/plugins/tvpaint/create/create_render_pass.py
Normal file
105
pype/plugins/tvpaint/create/create_render_pass.py
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
from avalon.tvpaint import pipeline, lib
|
||||
|
||||
|
||||
class CreateRenderPass(pipeline.Creator):
|
||||
"""Render pass is combination of one or more layers from same group.
|
||||
|
||||
Requirement to create Render Pass is to have already created beauty
|
||||
instance. Beauty instance is used as base for subset name.
|
||||
"""
|
||||
name = "render_pass"
|
||||
label = "RenderPass"
|
||||
family = "renderPass"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
subset_template = "{family}_{render_layer}_{pass}"
|
||||
|
||||
def process(self):
|
||||
self.log.debug("Query data from workfile.")
|
||||
instances = pipeline.list_instances()
|
||||
layers_data = lib.layers_data()
|
||||
|
||||
self.log.debug("Checking selection.")
|
||||
# Get all selected layers and their group ids
|
||||
group_ids = set()
|
||||
selected_layers = []
|
||||
for layer in layers_data:
|
||||
if layer["selected"]:
|
||||
selected_layers.append(layer)
|
||||
group_ids.add(layer["group_id"])
|
||||
|
||||
# Raise if nothing is selected
|
||||
if not selected_layers:
|
||||
raise AssertionError("Nothing is selected.")
|
||||
|
||||
# Raise if layers from multiple groups are selected
|
||||
if len(group_ids) != 1:
|
||||
raise AssertionError("More than one group is in selection.")
|
||||
|
||||
group_id = tuple(group_ids)[0]
|
||||
self.log.debug(f"Selected group id is \"{group_id}\".")
|
||||
|
||||
# Find beauty instance for selected layers
|
||||
beauty_instance = None
|
||||
for instance in instances:
|
||||
if (
|
||||
instance["family"] == "renderLayer"
|
||||
and instance["group_id"] == group_id
|
||||
):
|
||||
beauty_instance = instance
|
||||
break
|
||||
|
||||
# Beauty is required for this creator so raise if was not found
|
||||
if beauty_instance is None:
|
||||
raise AssertionError("Beauty pass does not exist yet.")
|
||||
|
||||
render_layer = beauty_instance["name"]
|
||||
|
||||
# Extract entered name
|
||||
family = self.data["family"]
|
||||
name = self.data["subset"]
|
||||
# Is this right way how to get name?
|
||||
name = name[len(family):]
|
||||
self.log.info(f"Extracted name from subset name \"{name}\".")
|
||||
|
||||
self.data["group_id"] = group_id
|
||||
self.data["pass"] = name
|
||||
self.data["render_layer"] = render_layer
|
||||
|
||||
# Collect selected layer ids to be stored into instance
|
||||
layer_ids = [layer["layer_id"] for layer in selected_layers]
|
||||
self.data["layer_ids"] = layer_ids
|
||||
|
||||
# Replace `beauty` in beauty's subset name with entered name
|
||||
subset_name = self.subset_template.format(**{
|
||||
"family": family,
|
||||
"render_layer": render_layer,
|
||||
"pass": name
|
||||
})
|
||||
self.data["subset"] = subset_name
|
||||
self.log.info(f"New subset name is \"{subset_name}\".")
|
||||
|
||||
# Check if same instance already exists
|
||||
existing_instance = None
|
||||
existing_instance_idx = None
|
||||
for idx, instance in enumerate(instances):
|
||||
if (
|
||||
instance["family"] == family
|
||||
and instance["group_id"] == group_id
|
||||
and instance["pass"] == name
|
||||
):
|
||||
existing_instance = instance
|
||||
existing_instance_idx = idx
|
||||
break
|
||||
|
||||
if existing_instance is not None:
|
||||
self.log.info(
|
||||
f"Render pass instance for group id {group_id}"
|
||||
f" and name \"{name}\" already exists, overriding."
|
||||
)
|
||||
instances[existing_instance_idx] = self.data
|
||||
else:
|
||||
instances.append(self.data)
|
||||
|
||||
self.write_instances(instances)
|
||||
18
pype/plugins/tvpaint/create/create_review.py
Normal file
18
pype/plugins/tvpaint/create/create_review.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
from avalon.tvpaint import pipeline
|
||||
|
||||
|
||||
class CreateReview(pipeline.Creator):
|
||||
"""Review for global review of all layers."""
|
||||
name = "review"
|
||||
label = "Review"
|
||||
family = "review"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
def process(self):
|
||||
instances = pipeline.list_instances()
|
||||
for instance in instances:
|
||||
if instance["family"] == self.family:
|
||||
self.log.info("Review family is already Created.")
|
||||
return
|
||||
super(CreateReview, self).process()
|
||||
|
|
@ -1,9 +1,8 @@
|
|||
from avalon import api
|
||||
from avalon.vendor import qargparse
|
||||
from avalon.tvpaint import CommunicatorWrapper
|
||||
from avalon.tvpaint import lib, pipeline
|
||||
|
||||
|
||||
class ImportImage(api.Loader):
|
||||
class ImportImage(pipeline.Loader):
|
||||
"""Load image or image sequence to TVPaint as new layer."""
|
||||
|
||||
families = ["render", "image", "background", "plate"]
|
||||
|
|
@ -80,4 +79,4 @@ class ImportImage(api.Loader):
|
|||
layer_name,
|
||||
load_options_str
|
||||
)
|
||||
return CommunicatorWrapper.execute_george_through_file(george_script)
|
||||
return lib.execute_george_through_file(george_script)
|
||||
|
|
|
|||
244
pype/plugins/tvpaint/load/load_reference_image.py
Normal file
244
pype/plugins/tvpaint/load/load_reference_image.py
Normal file
|
|
@ -0,0 +1,244 @@
|
|||
from avalon.pipeline import get_representation_context
|
||||
from avalon.vendor import qargparse
|
||||
from avalon.tvpaint import lib, pipeline
|
||||
|
||||
|
||||
class LoadImage(pipeline.Loader):
|
||||
"""Load image or image sequence to TVPaint as new layer."""
|
||||
|
||||
families = ["render", "image", "background", "plate"]
|
||||
representations = ["*"]
|
||||
|
||||
label = "Load Image"
|
||||
order = 1
|
||||
icon = "image"
|
||||
color = "white"
|
||||
|
||||
import_script = (
|
||||
"filepath = \"{}\"\n"
|
||||
"layer_name = \"{}\"\n"
|
||||
"tv_loadsequence filepath {}PARSE layer_id\n"
|
||||
"tv_layerrename layer_id layer_name"
|
||||
)
|
||||
|
||||
defaults = {
|
||||
"stretch": True,
|
||||
"timestretch": True,
|
||||
"preload": True
|
||||
}
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"stretch",
|
||||
label="Stretch to project size",
|
||||
default=True,
|
||||
help="Stretch loaded image/s to project resolution?"
|
||||
),
|
||||
qargparse.Boolean(
|
||||
"timestretch",
|
||||
label="Stretch to timeline length",
|
||||
default=True,
|
||||
help="Clip loaded image/s to timeline length?"
|
||||
),
|
||||
qargparse.Boolean(
|
||||
"preload",
|
||||
label="Preload loaded image/s",
|
||||
default=True,
|
||||
help="Preload image/s?"
|
||||
)
|
||||
]
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
stretch = options.get("stretch", self.defaults["stretch"])
|
||||
timestretch = options.get("timestretch", self.defaults["timestretch"])
|
||||
preload = options.get("preload", self.defaults["preload"])
|
||||
|
||||
load_options = []
|
||||
if stretch:
|
||||
load_options.append("\"STRETCH\"")
|
||||
if timestretch:
|
||||
load_options.append("\"TIMESTRETCH\"")
|
||||
if preload:
|
||||
load_options.append("\"PRELOAD\"")
|
||||
|
||||
load_options_str = ""
|
||||
for load_option in load_options:
|
||||
load_options_str += (load_option + " ")
|
||||
|
||||
# Prepare layer name
|
||||
asset_name = context["asset"]["name"]
|
||||
subset_name = context["subset"]["name"]
|
||||
layer_name = self.get_unique_layer_name(asset_name, subset_name)
|
||||
|
||||
# Fill import script with filename and layer name
|
||||
# - filename mus not contain backwards slashes
|
||||
george_script = self.import_script.format(
|
||||
self.fname.replace("\\", "/"),
|
||||
layer_name,
|
||||
load_options_str
|
||||
)
|
||||
|
||||
lib.execute_george_through_file(george_script)
|
||||
|
||||
loaded_layer = None
|
||||
layers = lib.layers_data()
|
||||
for layer in layers:
|
||||
if layer["name"] == layer_name:
|
||||
loaded_layer = layer
|
||||
break
|
||||
|
||||
if loaded_layer is None:
|
||||
raise AssertionError(
|
||||
"Loading probably failed during execution of george script."
|
||||
)
|
||||
|
||||
layer_ids = [loaded_layer["layer_id"]]
|
||||
namespace = namespace or layer_name
|
||||
return pipeline.containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
layer_ids=layer_ids,
|
||||
context=context,
|
||||
loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def _remove_layers(self, layer_ids, layers=None):
|
||||
if not layer_ids:
|
||||
return
|
||||
|
||||
if layers is None:
|
||||
layers = lib.layers_data()
|
||||
|
||||
available_ids = set(layer["layer_id"] for layer in layers)
|
||||
layer_ids_to_remove = []
|
||||
|
||||
for layer_id in layer_ids:
|
||||
if layer_id in available_ids:
|
||||
layer_ids_to_remove.append(layer_id)
|
||||
|
||||
if not layer_ids_to_remove:
|
||||
return
|
||||
|
||||
george_script_lines = []
|
||||
for layer_id in layer_ids_to_remove:
|
||||
line = "tv_layerkill {}".format(layer_id)
|
||||
george_script_lines.append(line)
|
||||
george_script = "\n".join(george_script_lines)
|
||||
lib.execute_george_through_file(george_script)
|
||||
|
||||
def remove(self, container):
|
||||
layer_ids = self.layer_ids_from_container(container)
|
||||
self._remove_layers(layer_ids)
|
||||
|
||||
current_containers = pipeline.ls()
|
||||
pop_idx = None
|
||||
for idx, cur_con in enumerate(current_containers):
|
||||
if cur_con["objectName"] == container["objectName"]:
|
||||
pop_idx = idx
|
||||
break
|
||||
|
||||
if pop_idx is None:
|
||||
self.log.warning(
|
||||
"Didn't found container in workfile containers. {}".format(
|
||||
container
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
current_containers.pop(pop_idx)
|
||||
pipeline.write_workfile_metadata(
|
||||
pipeline.SECTION_NAME_CONTAINERS, current_containers
|
||||
)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Replace container with different version.
|
||||
|
||||
New layers are loaded as first step. Then is tried to change data in
|
||||
new layers with data from old layers. When that is done old layers are
|
||||
removed.
|
||||
"""
|
||||
# Create new containers first
|
||||
context = get_representation_context(representation)
|
||||
name = container["name"]
|
||||
namespace = container["namespace"]
|
||||
new_container = self.load(context, name, namespace, {})
|
||||
new_layer_ids = self.layer_ids_from_container(new_container)
|
||||
|
||||
# Get layer ids from previous container
|
||||
old_layer_ids = self.layer_ids_from_container(container)
|
||||
|
||||
layers = lib.layers_data()
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers
|
||||
}
|
||||
|
||||
old_layers = []
|
||||
new_layers = []
|
||||
for layer_id in old_layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
if layer:
|
||||
old_layers.append(layer)
|
||||
|
||||
for layer_id in new_layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
if layer:
|
||||
new_layers.append(layer)
|
||||
|
||||
# Prepare few data
|
||||
new_start_position = None
|
||||
new_group_id = None
|
||||
for layer in old_layers:
|
||||
position = layer["position"]
|
||||
group_id = layer["group_id"]
|
||||
if new_start_position is None:
|
||||
new_start_position = position
|
||||
elif new_start_position > position:
|
||||
new_start_position = position
|
||||
|
||||
if new_group_id is None:
|
||||
new_group_id = group_id
|
||||
elif new_group_id < 0:
|
||||
continue
|
||||
elif new_group_id != group_id:
|
||||
new_group_id = -1
|
||||
|
||||
george_script_lines = []
|
||||
# Group new layers to same group as previous container layers had
|
||||
# - all old layers must be under same group
|
||||
if new_group_id is not None and new_group_id > 0:
|
||||
for layer in new_layers:
|
||||
line = "tv_layercolor \"set\" {} {}".format(
|
||||
layer["layer_id"], new_group_id
|
||||
)
|
||||
george_script_lines.append(line)
|
||||
|
||||
# Rename new layer to have same name
|
||||
# - only if both old and new have one layer
|
||||
if len(old_layers) == 1 and len(new_layers) == 1:
|
||||
layer_name = old_layers[0]["name"]
|
||||
george_script_lines.append(
|
||||
"tv_layerrename {} \"{}\"".format(
|
||||
new_layers[0]["layer_id"], layer_name
|
||||
)
|
||||
)
|
||||
|
||||
# Change position of new layer
|
||||
# - this must be done before remove old layers
|
||||
if len(new_layers) == 1 and new_start_position is not None:
|
||||
new_layer = new_layers[0]
|
||||
george_script_lines.extend([
|
||||
"tv_layerset {}".format(new_layer["layer_id"]),
|
||||
"tv_layermove {}".format(new_start_position)
|
||||
])
|
||||
|
||||
# Execute george scripts if there are any
|
||||
if george_script_lines:
|
||||
george_script = "\n".join(george_script_lines)
|
||||
lib.execute_george_through_file(george_script)
|
||||
|
||||
# Remove old container
|
||||
self.remove(container)
|
||||
175
pype/plugins/tvpaint/publish/collect_instances.py
Normal file
175
pype/plugins/tvpaint/publish/collect_instances.py
Normal file
|
|
@ -0,0 +1,175 @@
|
|||
import json
|
||||
import copy
|
||||
import pyblish.api
|
||||
from avalon import io
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
label = "Collect Instances"
|
||||
order = pyblish.api.CollectorOrder - 1
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
workfile_instances = context.data["workfileInstances"]
|
||||
|
||||
self.log.debug("Collected ({}) instances:\n{}".format(
|
||||
len(workfile_instances),
|
||||
json.dumps(workfile_instances, indent=4)
|
||||
))
|
||||
|
||||
for instance_data in workfile_instances:
|
||||
instance_data["fps"] = context.data["fps"]
|
||||
|
||||
# Store workfile instance data to instance data
|
||||
instance_data["originData"] = copy.deepcopy(instance_data)
|
||||
# Global instance data modifications
|
||||
# Fill families
|
||||
family = instance_data["family"]
|
||||
# Add `review` family for thumbnail integration
|
||||
instance_data["families"] = [family, "review"]
|
||||
|
||||
# Instance name
|
||||
subset_name = instance_data["subset"]
|
||||
name = instance_data.get("name", subset_name)
|
||||
instance_data["name"] = name
|
||||
|
||||
active = instance_data.get("active", True)
|
||||
instance_data["active"] = active
|
||||
instance_data["publish"] = active
|
||||
# Add representations key
|
||||
instance_data["representations"] = []
|
||||
|
||||
# Different instance creation based on family
|
||||
instance = None
|
||||
if family == "review":
|
||||
# Change subset name
|
||||
task_name = io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}".format(family, task_name.capitalize())
|
||||
instance_data["subset"] = new_subset_name
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
instance.data["layers"] = context.data["layersData"]
|
||||
# Add ftrack family
|
||||
instance.data["families"].append("ftrack")
|
||||
|
||||
elif family == "renderLayer":
|
||||
instance = self.create_render_layer_instance(
|
||||
context, instance_data
|
||||
)
|
||||
elif family == "renderPass":
|
||||
instance = self.create_render_pass_instance(
|
||||
context, instance_data
|
||||
)
|
||||
else:
|
||||
raise AssertionError(
|
||||
"Instance with unknown family \"{}\": {}".format(
|
||||
family, instance_data
|
||||
)
|
||||
)
|
||||
|
||||
if instance is None:
|
||||
continue
|
||||
|
||||
frame_start = context.data["frameStart"]
|
||||
frame_end = frame_start
|
||||
for layer in instance.data["layers"]:
|
||||
_frame_end = layer["frame_end"]
|
||||
if _frame_end > frame_end:
|
||||
frame_end = _frame_end
|
||||
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
|
||||
self.log.debug("Created instance: {}\n{}".format(
|
||||
instance, json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
||||
def create_render_layer_instance(self, context, instance_data):
|
||||
name = instance_data["name"]
|
||||
# Change label
|
||||
subset_name = instance_data["subset"]
|
||||
instance_data["label"] = "{}_Beauty".format(name)
|
||||
|
||||
# Change subset name
|
||||
# Final family of an instance will be `render`
|
||||
new_family = "render"
|
||||
task_name = io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}_{}_Beauty".format(
|
||||
new_family, task_name.capitalize(), name
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
|
||||
subset_name, new_subset_name
|
||||
))
|
||||
|
||||
# Get all layers for the layer
|
||||
layers_data = context.data["layersData"]
|
||||
group_id = instance_data["group_id"]
|
||||
group_layers = []
|
||||
for layer in layers_data:
|
||||
if layer["group_id"] == group_id and layer["visible"]:
|
||||
group_layers.append(layer)
|
||||
|
||||
if not group_layers:
|
||||
# Should be handled here?
|
||||
self.log.warning((
|
||||
f"Group with id {group_id} does not contain any layers."
|
||||
f" Instance \"{name}\" not created."
|
||||
))
|
||||
return None
|
||||
|
||||
instance_data["layers"] = group_layers
|
||||
|
||||
# Add ftrack family
|
||||
instance_data["families"].append("ftrack")
|
||||
|
||||
return context.create_instance(**instance_data)
|
||||
|
||||
def create_render_pass_instance(self, context, instance_data):
|
||||
pass_name = instance_data["pass"]
|
||||
self.log.info(
|
||||
"Creating render pass instance. \"{}\"".format(pass_name)
|
||||
)
|
||||
# Change label
|
||||
render_layer = instance_data["render_layer"]
|
||||
instance_data["label"] = "{}_{}".format(render_layer, pass_name)
|
||||
|
||||
# Change subset name
|
||||
# Final family of an instance will be `render`
|
||||
new_family = "render"
|
||||
old_subset_name = instance_data["subset"]
|
||||
task_name = io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}_{}_{}".format(
|
||||
new_family, task_name.capitalize(), render_layer, pass_name
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
|
||||
old_subset_name, new_subset_name
|
||||
))
|
||||
|
||||
layers_data = context.data["layersData"]
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers_data
|
||||
}
|
||||
|
||||
layer_ids = instance_data["layer_ids"]
|
||||
render_pass_layers = []
|
||||
for layer_id in layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
if not layer:
|
||||
self.log.warning(f"Layer with id {layer_id} was not found.")
|
||||
continue
|
||||
|
||||
render_pass_layers.append(layer)
|
||||
|
||||
if not render_pass_layers:
|
||||
name = instance_data["name"]
|
||||
self.log.warning(
|
||||
f"None of the layers from the RenderPass \"{name}\""
|
||||
" exist anymore. Instance not created."
|
||||
)
|
||||
return None
|
||||
|
||||
instance_data["layers"] = render_pass_layers
|
||||
return context.create_instance(**instance_data)
|
||||
66
pype/plugins/tvpaint/publish/collect_workfile_data.py
Normal file
66
pype/plugins/tvpaint/publish/collect_workfile_data.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
import json
|
||||
|
||||
import pyblish.api
|
||||
from avalon.tvpaint import pipeline, lib
|
||||
|
||||
|
||||
class CollectWorkfileData(pyblish.api.ContextPlugin):
|
||||
label = "Collect Workfile Data"
|
||||
order = pyblish.api.CollectorOrder - 1.01
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
self.log.info("Collecting instance data from workfile")
|
||||
instance_data = pipeline.list_instances()
|
||||
self.log.debug(
|
||||
"Instance data:\"{}".format(json.dumps(instance_data, indent=4))
|
||||
)
|
||||
context.data["workfileInstances"] = instance_data
|
||||
|
||||
self.log.info("Collecting layers data from workfile")
|
||||
layers_data = lib.layers_data()
|
||||
self.log.debug(
|
||||
"Layers data:\"{}".format(json.dumps(layers_data, indent=4))
|
||||
)
|
||||
context.data["layersData"] = layers_data
|
||||
|
||||
self.log.info("Collecting groups data from workfile")
|
||||
group_data = lib.groups_data()
|
||||
self.log.debug(
|
||||
"Group data:\"{}".format(json.dumps(group_data, indent=4))
|
||||
)
|
||||
context.data["groupsData"] = group_data
|
||||
|
||||
self.log.info("Collecting scene data from workfile")
|
||||
workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ")
|
||||
|
||||
frame_start = int(workfile_info_parts.pop(-1))
|
||||
field_order = workfile_info_parts.pop(-1)
|
||||
frame_rate = float(workfile_info_parts.pop(-1))
|
||||
pixel_apsect = float(workfile_info_parts.pop(-1))
|
||||
height = int(workfile_info_parts.pop(-1))
|
||||
width = int(workfile_info_parts.pop(-1))
|
||||
workfile_path = " ".join(workfile_info_parts).replace("\"", "")
|
||||
|
||||
# TODO This is not porper way of getting last frame
|
||||
# - but don't know better
|
||||
last_frame = frame_start
|
||||
for layer in layers_data:
|
||||
frame_end = layer["frame_end"]
|
||||
if frame_end > last_frame:
|
||||
last_frame = frame_end
|
||||
|
||||
scene_data = {
|
||||
"currentFile": workfile_path,
|
||||
"sceneWidth": width,
|
||||
"sceneHeight": height,
|
||||
"pixelAspect": pixel_apsect,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": last_frame,
|
||||
"fps": frame_rate,
|
||||
"fieldOrder": field_order
|
||||
}
|
||||
self.log.debug(
|
||||
"Scene data: {}".format(json.dumps(scene_data, indent=4))
|
||||
)
|
||||
context.data.update(scene_data)
|
||||
351
pype/plugins/tvpaint/publish/extract_sequence.py
Normal file
351
pype/plugins/tvpaint/publish/extract_sequence.py
Normal file
|
|
@ -0,0 +1,351 @@
|
|||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
import pyblish.api
|
||||
from avalon.tvpaint import lib
|
||||
|
||||
|
||||
class ExtractSequence(pyblish.api.Extractor):
|
||||
label = "Extract Sequence"
|
||||
hosts = ["tvpaint"]
|
||||
families = ["review", "renderPass", "renderLayer"]
|
||||
|
||||
save_mode_to_ext = {
|
||||
"avi": ".avi",
|
||||
"bmp": ".bmp",
|
||||
"cin": ".cin",
|
||||
"deep": ".dip",
|
||||
"dps": ".dps",
|
||||
"dpx": ".dpx",
|
||||
"flc": ".fli",
|
||||
"gif": ".gif",
|
||||
"ilbm": ".iff",
|
||||
"jpeg": ".jpg",
|
||||
"pcx": ".pcx",
|
||||
"png": ".png",
|
||||
"psd": ".psd",
|
||||
"qt": ".qt",
|
||||
"rtv": ".rtv",
|
||||
"sun": ".ras",
|
||||
"tiff": ".tiff",
|
||||
"tga": ".tga",
|
||||
"vpb": ".vpb"
|
||||
}
|
||||
sequential_save_mode = {
|
||||
"bmp",
|
||||
"dpx",
|
||||
"ilbm",
|
||||
"jpeg",
|
||||
"png",
|
||||
"sun",
|
||||
"tiff",
|
||||
"tga"
|
||||
}
|
||||
|
||||
default_save_mode = "\"PNG\""
|
||||
save_mode_for_family = {
|
||||
"review": "\"PNG\"",
|
||||
"renderPass": "\"PNG\"",
|
||||
"renderLayer": "\"PNG\"",
|
||||
}
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info(
|
||||
"* Processing instance \"{}\"".format(instance.data["label"])
|
||||
)
|
||||
|
||||
# Get all layers and filter out not visible
|
||||
layers = instance.data["layers"]
|
||||
filtered_layers = [
|
||||
layer
|
||||
for layer in layers
|
||||
if layer["visible"]
|
||||
]
|
||||
layer_ids = [str(layer["layer_id"]) for layer in filtered_layers]
|
||||
if not layer_ids:
|
||||
self.log.info(
|
||||
f"None of the layers from the instance"
|
||||
" are visible. Extraction skipped."
|
||||
)
|
||||
return
|
||||
|
||||
self.log.debug(
|
||||
"Instance has {} layers with ids: {}".format(
|
||||
len(layer_ids), ", ".join(layer_ids)
|
||||
)
|
||||
)
|
||||
# This is plugin attribe cleanup method
|
||||
self._prepare_save_modes()
|
||||
|
||||
family_lowered = instance.data["family"].lower()
|
||||
save_mode = self.save_mode_for_family.get(
|
||||
family_lowered, self.default_save_mode
|
||||
)
|
||||
save_mode_type = self._get_save_mode_type(save_mode)
|
||||
|
||||
if not bool(save_mode_type in self.sequential_save_mode):
|
||||
raise AssertionError((
|
||||
"Plugin can export only sequential frame output"
|
||||
" but save mode for family \"{}\" is not for sequence > {} <"
|
||||
).format(instance.data["family"], save_mode))
|
||||
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
|
||||
filename_template = self._get_filename_template(
|
||||
save_mode_type, save_mode, frame_end
|
||||
)
|
||||
ext = os.path.splitext(filename_template)[1].replace(".", "")
|
||||
|
||||
self.log.debug(
|
||||
"Using save mode > {} < and file template \"{}\"".format(
|
||||
save_mode, filename_template
|
||||
)
|
||||
)
|
||||
|
||||
# Save to staging dir
|
||||
output_dir = instance.data.get("stagingDir")
|
||||
if not output_dir:
|
||||
# Create temp folder if staging dir is not set
|
||||
output_dir = tempfile.mkdtemp().replace("\\", "/")
|
||||
instance.data["stagingDir"] = output_dir
|
||||
|
||||
self.log.debug(
|
||||
"Files will be rendered to folder: {}".format(output_dir)
|
||||
)
|
||||
|
||||
thumbnail_filename = "thumbnail"
|
||||
|
||||
# Render output
|
||||
output_files_by_frame = self.render(
|
||||
save_mode, filename_template, output_dir,
|
||||
filtered_layers, frame_start, frame_end, thumbnail_filename
|
||||
)
|
||||
thumbnail_fullpath = output_files_by_frame.pop(
|
||||
thumbnail_filename, None
|
||||
)
|
||||
|
||||
# Fill gaps in sequence
|
||||
self.fill_missing_frames(
|
||||
output_files_by_frame,
|
||||
frame_start,
|
||||
frame_end,
|
||||
filename_template
|
||||
)
|
||||
|
||||
# Fill tags and new families
|
||||
tags = []
|
||||
if family_lowered in ("review", "renderlayer"):
|
||||
tags.append("review")
|
||||
|
||||
repre_files = [
|
||||
os.path.basename(filepath)
|
||||
for filepath in output_files_by_frame.values()
|
||||
]
|
||||
new_repre = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": repre_files,
|
||||
"stagingDir": output_dir,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"tags": tags
|
||||
}
|
||||
self.log.debug("Creating new representation: {}".format(new_repre))
|
||||
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
||||
if family_lowered in ("renderpass", "renderlayer"):
|
||||
# Change family to render
|
||||
instance.data["family"] = "render"
|
||||
|
||||
if not thumbnail_fullpath:
|
||||
return
|
||||
|
||||
# Create thumbnail representation
|
||||
thumbnail_repre = {
|
||||
"name": "thumbnail",
|
||||
"ext": ext,
|
||||
"files": os.path.basename(thumbnail_fullpath),
|
||||
"stagingDir": output_dir,
|
||||
"tags": ["thumbnail"]
|
||||
}
|
||||
instance.data["representations"].append(thumbnail_repre)
|
||||
|
||||
def _prepare_save_modes(self):
|
||||
"""Lower family names in keys and skip empty values."""
|
||||
new_specifications = {}
|
||||
for key, value in self.save_mode_for_family.items():
|
||||
if value:
|
||||
new_specifications[key.lower()] = value
|
||||
else:
|
||||
self.log.warning((
|
||||
"Save mode for family \"{}\" has empty value."
|
||||
" The family will use default save mode: > {} <."
|
||||
).format(key, self.default_save_mode))
|
||||
self.save_mode_for_family = new_specifications
|
||||
|
||||
def _get_save_mode_type(self, save_mode):
|
||||
"""Extract type of save mode.
|
||||
|
||||
Helps to define output files extension.
|
||||
"""
|
||||
save_mode_type = (
|
||||
save_mode.lower()
|
||||
.split(" ")[0]
|
||||
.replace("\"", "")
|
||||
)
|
||||
self.log.debug("Save mode type is \"{}\"".format(save_mode_type))
|
||||
return save_mode_type
|
||||
|
||||
def _get_filename_template(self, save_mode_type, save_mode, frame_end):
|
||||
"""Get filetemplate for rendered files.
|
||||
|
||||
This is simple template contains `{frame}{ext}` for sequential outputs
|
||||
and `single_file{ext}` for single file output. Output is rendered to
|
||||
temporary folder so filename should not matter as integrator change
|
||||
them.
|
||||
"""
|
||||
ext = self.save_mode_to_ext.get(save_mode_type)
|
||||
if ext is None:
|
||||
raise AssertionError((
|
||||
"Couldn't find file extension for TVPaint's save mode: > {} <"
|
||||
).format(save_mode))
|
||||
|
||||
frame_padding = 4
|
||||
frame_end_str_len = len(str(frame_end))
|
||||
if frame_end_str_len > frame_padding:
|
||||
frame_padding = frame_end_str_len
|
||||
|
||||
return "{{frame:0>{}}}".format(frame_padding) + ext
|
||||
|
||||
def render(
|
||||
self, save_mode, filename_template, output_dir, layers,
|
||||
first_frame, last_frame, thumbnail_filename
|
||||
):
|
||||
""" Export images from TVPaint.
|
||||
|
||||
Args:
|
||||
save_mode (str): Argument for `tv_savemode` george script function.
|
||||
More about save mode in documentation.
|
||||
filename_template (str): Filename template of an output. Template
|
||||
should already contain extension. Template may contain only
|
||||
keyword argument `{frame}` or index argument (for same value).
|
||||
Extension in template must match `save_mode`.
|
||||
layers (list): List of layers to be exported.
|
||||
first_frame (int): Starting frame from which export will begin.
|
||||
last_frame (int): On which frame export will end.
|
||||
|
||||
Retruns:
|
||||
dict: Mapping frame to output filepath.
|
||||
"""
|
||||
|
||||
# Add save mode arguments to function
|
||||
save_mode = "tv_SaveMode {}".format(save_mode)
|
||||
|
||||
# Map layers by position
|
||||
layers_by_position = {
|
||||
layer["position"]: layer
|
||||
for layer in layers
|
||||
}
|
||||
|
||||
# Sort layer positions in reverse order
|
||||
sorted_positions = list(reversed(sorted(layers_by_position.keys())))
|
||||
if not sorted_positions:
|
||||
return
|
||||
|
||||
# Create temporary layer
|
||||
new_layer_id = lib.execute_george("tv_layercreate _tmp_layer")
|
||||
|
||||
# Merge layers to temp layer
|
||||
george_script_lines = []
|
||||
# Set duplicated layer as current
|
||||
george_script_lines.append("tv_layerset {}".format(new_layer_id))
|
||||
for position in sorted_positions:
|
||||
layer = layers_by_position[position]
|
||||
george_script_lines.append(
|
||||
"tv_layermerge {}".format(layer["layer_id"])
|
||||
)
|
||||
|
||||
lib.execute_george_through_file("\n".join(george_script_lines))
|
||||
|
||||
# Frames with keyframe
|
||||
exposure_frames = lib.get_exposure_frames(
|
||||
new_layer_id, first_frame, last_frame
|
||||
)
|
||||
|
||||
# TODO what if there is not exposue frames?
|
||||
# - this force to have first frame all the time
|
||||
if first_frame not in exposure_frames:
|
||||
exposure_frames.insert(0, first_frame)
|
||||
|
||||
# Restart george script lines
|
||||
george_script_lines = []
|
||||
george_script_lines.append(save_mode)
|
||||
|
||||
all_output_files = {}
|
||||
for frame in exposure_frames:
|
||||
filename = filename_template.format(frame, frame=frame)
|
||||
dst_path = "/".join([output_dir, filename])
|
||||
all_output_files[frame] = os.path.normpath(dst_path)
|
||||
|
||||
# Go to frame
|
||||
george_script_lines.append("tv_layerImage {}".format(frame))
|
||||
# Store image to output
|
||||
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
|
||||
|
||||
# Export thumbnail
|
||||
if thumbnail_filename:
|
||||
basename, ext = os.path.splitext(thumbnail_filename)
|
||||
if not ext:
|
||||
ext = ".png"
|
||||
thumbnail_fullpath = "/".join([output_dir, basename + ext])
|
||||
all_output_files[thumbnail_filename] = thumbnail_fullpath
|
||||
# Force save mode to png for thumbnail
|
||||
george_script_lines.append("tv_SaveMode \"PNG\"")
|
||||
# Go to frame
|
||||
george_script_lines.append("tv_layerImage {}".format(first_frame))
|
||||
# Store image to output
|
||||
george_script_lines.append(
|
||||
"tv_saveimage \"{}\"".format(thumbnail_fullpath)
|
||||
)
|
||||
|
||||
# Delete temporary layer
|
||||
george_script_lines.append("tv_layerkill {}".format(new_layer_id))
|
||||
|
||||
lib.execute_george_through_file("\n".join(george_script_lines))
|
||||
|
||||
return all_output_files
|
||||
|
||||
def fill_missing_frames(
|
||||
self, filepaths_by_frame, first_frame, last_frame, filename_template
|
||||
):
|
||||
"""Fill not rendered frames with previous frame.
|
||||
|
||||
Extractor is rendering only frames with keyframes (exposure frames) to
|
||||
get output faster which means there may be gaps between frames.
|
||||
This function fill the missing frames.
|
||||
"""
|
||||
output_dir = None
|
||||
previous_frame_filepath = None
|
||||
for frame in range(first_frame, last_frame + 1):
|
||||
if frame in filepaths_by_frame:
|
||||
previous_frame_filepath = filepaths_by_frame[frame]
|
||||
continue
|
||||
|
||||
elif previous_frame_filepath is None:
|
||||
self.log.warning(
|
||||
"No frames to fill. Seems like nothing was exported."
|
||||
)
|
||||
break
|
||||
|
||||
if output_dir is None:
|
||||
output_dir = os.path.dirname(previous_frame_filepath)
|
||||
|
||||
filename = filename_template.format(frame=frame)
|
||||
space_filepath = os.path.normpath(
|
||||
os.path.join(output_dir, filename)
|
||||
)
|
||||
filepaths_by_frame[frame] = space_filepath
|
||||
shutil.copy(previous_frame_filepath, space_filepath)
|
||||
76
pype/plugins/tvpaint/publish/validate_frame_range.py
Normal file
76
pype/plugins/tvpaint/publish/validate_frame_range.py
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
import collections
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
||||
"""Validate group ids of renderPass layers.
|
||||
|
||||
Validates that all layers are in same group as they were during creation.
|
||||
"""
|
||||
|
||||
label = "Validate Layers Group"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["renderPass"]
|
||||
|
||||
def process(self, instance):
|
||||
# Prepare layers
|
||||
layers_data = instance.context.data["layersData"]
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers_data
|
||||
}
|
||||
|
||||
# Expected group id for instance layers
|
||||
group_id = instance.data["group_id"]
|
||||
# Layers ids of an instance
|
||||
layer_ids = instance.data["layer_ids"]
|
||||
# Check if all layers from render pass are in right group
|
||||
invalid_layers_by_group_id = collections.defaultdict(list)
|
||||
for layer_id in layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
_group_id = layer["group_id"]
|
||||
if _group_id != group_id:
|
||||
invalid_layers_by_group_id[_group_id].append(layer)
|
||||
|
||||
# Everything is OK and skip exception
|
||||
if not invalid_layers_by_group_id:
|
||||
return
|
||||
|
||||
# Exception message preparations
|
||||
groups_data = instance.context.data["groupsData"]
|
||||
groups_by_id = {
|
||||
group["group_id"]: group
|
||||
for group in groups_data
|
||||
}
|
||||
correct_group = groups_by_id[group_id]
|
||||
|
||||
per_group_msgs = []
|
||||
for _group_id, layers in invalid_layers_by_group_id.items():
|
||||
_group = groups_by_id[_group_id]
|
||||
layers_msgs = []
|
||||
for layer in layers:
|
||||
layers_msgs.append(
|
||||
"\"{}\" (id: {})".format(layer["name"], layer["layer_id"])
|
||||
)
|
||||
per_group_msgs.append(
|
||||
"Group \"{}\" (id: {}) < {} >".format(
|
||||
_group["name"],
|
||||
_group["group_id"],
|
||||
", ".join(layers_msgs)
|
||||
)
|
||||
)
|
||||
|
||||
# Raise an error
|
||||
raise AssertionError((
|
||||
# Short message
|
||||
"Layers in wrong group."
|
||||
# Description what's wrong
|
||||
" Layers from render pass \"{}\" must be in group {} (id: {})."
|
||||
# Detailed message
|
||||
" Layers in wrong group: {}"
|
||||
).format(
|
||||
instance.data["label"],
|
||||
correct_group["name"],
|
||||
correct_group["group_id"],
|
||||
" | ".join(per_group_msgs)
|
||||
))
|
||||
|
|
@ -49,6 +49,14 @@ class ProjectBar(QtWidgets.QWidget):
|
|||
|
||||
def set_project(self, project_name):
|
||||
index = self.project_combobox.findText(project_name)
|
||||
if index < 0:
|
||||
# Try refresh combobox model
|
||||
self.project_combobox.blockSignals(True)
|
||||
self.model.refresh()
|
||||
self.project_combobox.blockSignals(False)
|
||||
|
||||
index = self.project_combobox.findText(project_name)
|
||||
|
||||
if index >= 0:
|
||||
self.project_combobox.setCurrentIndex(index)
|
||||
|
||||
|
|
|
|||
|
|
@ -207,7 +207,7 @@ class AssetsPanel(QtWidgets.QWidget):
|
|||
self.assets_widget.refresh()
|
||||
|
||||
# Force asset change callback to ensure tasks are correctly reset
|
||||
tools_lib.schedule(self.on_asset_changed, 0.05, channel="assets")
|
||||
self.assets_widget.refreshed.connect(self.on_asset_changed)
|
||||
|
||||
def on_asset_changed(self):
|
||||
"""Callback on asset selection changed
|
||||
|
|
@ -375,7 +375,6 @@ class LauncherWindow(QtWidgets.QDialog):
|
|||
def on_project_clicked(self, project_name):
|
||||
self.dbcon.Session["AVALON_PROJECT"] = project_name
|
||||
# Refresh projects
|
||||
self.asset_panel.project_bar.refresh()
|
||||
self.asset_panel.set_project(project_name)
|
||||
self.set_page(1)
|
||||
self.refresh_actions()
|
||||
|
|
|
|||
|
|
@ -222,10 +222,6 @@ QToolButton {
|
|||
background: #444;
|
||||
}
|
||||
|
||||
#Header #ArtistTab {
|
||||
background-image: url("img/tab-home.png");
|
||||
}
|
||||
|
||||
#Header #TerminalTab {
|
||||
background-image: url("img/tab-terminal.png");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -522,168 +522,6 @@ class PluginDelegate(QtWidgets.QStyledItemDelegate):
|
|||
return QtCore.QSize(option.rect.width(), 20)
|
||||
|
||||
|
||||
class ArtistDelegate(QtWidgets.QStyledItemDelegate):
|
||||
"""Delegate used on Artist page"""
|
||||
|
||||
def paint(self, painter, option, index):
|
||||
"""Paint checkbox and text
|
||||
|
||||
_______________________________________________
|
||||
| | label | duration |arrow|
|
||||
|toggle |_____________________| | to |
|
||||
| | families | |persp|
|
||||
|_______|_____________________|___________|_____|
|
||||
|
||||
"""
|
||||
|
||||
# Layout
|
||||
spacing = 10
|
||||
|
||||
body_rect = QtCore.QRectF(option.rect).adjusted(2, 2, -8, -2)
|
||||
content_rect = body_rect.adjusted(5, 5, -5, -5)
|
||||
|
||||
perspective_rect = QtCore.QRectF(body_rect)
|
||||
perspective_rect.setWidth(35)
|
||||
perspective_rect.setHeight(35)
|
||||
perspective_rect.translate(
|
||||
content_rect.width() - (perspective_rect.width() / 2) + 10,
|
||||
(content_rect.height() / 2) - (perspective_rect.height() / 2)
|
||||
)
|
||||
|
||||
toggle_rect = QtCore.QRectF(body_rect)
|
||||
toggle_rect.setWidth(7)
|
||||
toggle_rect.adjust(1, 1, 0, -1)
|
||||
|
||||
icon_rect = QtCore.QRectF(content_rect)
|
||||
icon_rect.translate(toggle_rect.width() + spacing, 3)
|
||||
icon_rect.setWidth(35)
|
||||
icon_rect.setHeight(35)
|
||||
|
||||
duration_rect = QtCore.QRectF(content_rect)
|
||||
duration_rect.translate(content_rect.width() - 50, 0)
|
||||
|
||||
# Colors
|
||||
check_color = colors["idle"]
|
||||
|
||||
publish_states = index.data(Roles.PublishFlagsRole)
|
||||
if publish_states is None:
|
||||
return
|
||||
if publish_states & InstanceStates.InProgress:
|
||||
check_color = colors["active"]
|
||||
|
||||
elif publish_states & InstanceStates.HasError:
|
||||
check_color = colors["error"]
|
||||
|
||||
elif publish_states & InstanceStates.HasWarning:
|
||||
check_color = colors["warning"]
|
||||
|
||||
elif publish_states & InstanceStates.HasFinished:
|
||||
check_color = colors["ok"]
|
||||
|
||||
elif not index.data(Roles.IsEnabledRole):
|
||||
check_color = colors["inactive"]
|
||||
|
||||
perspective_icon = icons["angle-right"]
|
||||
|
||||
if not index.data(QtCore.Qt.CheckStateRole):
|
||||
font_color = colors["inactive"]
|
||||
else:
|
||||
font_color = colors["idle"]
|
||||
|
||||
if (
|
||||
option.state
|
||||
& (
|
||||
QtWidgets.QStyle.State_MouseOver
|
||||
or QtWidgets.QStyle.State_Selected
|
||||
)
|
||||
):
|
||||
perspective_color = colors["idle"]
|
||||
else:
|
||||
perspective_color = colors["inactive"]
|
||||
# Maintan reference to state, so we can restore it once we're done
|
||||
painter.save()
|
||||
|
||||
# Draw background
|
||||
painter.fillRect(body_rect, colors["hover"])
|
||||
|
||||
# Draw icon
|
||||
icon = index.data(QtCore.Qt.DecorationRole)
|
||||
|
||||
painter.setFont(fonts["largeAwesome"])
|
||||
painter.setPen(QtGui.QPen(font_color))
|
||||
painter.drawText(icon_rect, icon)
|
||||
|
||||
# Draw label
|
||||
painter.setFont(fonts["h3"])
|
||||
label_rect = QtCore.QRectF(content_rect)
|
||||
label_x_offset = icon_rect.width() + spacing
|
||||
label_rect.translate(
|
||||
label_x_offset,
|
||||
0
|
||||
)
|
||||
metrics = painter.fontMetrics()
|
||||
label_rect.setHeight(metrics.lineSpacing())
|
||||
label_rect.setWidth(
|
||||
content_rect.width()
|
||||
- label_x_offset
|
||||
- perspective_rect.width()
|
||||
)
|
||||
# Elide label
|
||||
label = index.data(QtCore.Qt.DisplayRole)
|
||||
label = metrics.elidedText(
|
||||
label, QtCore.Qt.ElideRight, label_rect.width()
|
||||
)
|
||||
painter.drawText(label_rect, label)
|
||||
|
||||
# Draw families
|
||||
painter.setFont(fonts["h5"])
|
||||
painter.setPen(QtGui.QPen(colors["inactive"]))
|
||||
|
||||
families = ", ".join(index.data(Roles.FamiliesRole))
|
||||
families = painter.fontMetrics().elidedText(
|
||||
families, QtCore.Qt.ElideRight, label_rect.width()
|
||||
)
|
||||
|
||||
families_rect = QtCore.QRectF(label_rect)
|
||||
families_rect.translate(0, label_rect.height() + spacing)
|
||||
|
||||
painter.drawText(families_rect, families)
|
||||
|
||||
painter.setFont(fonts["largeAwesome"])
|
||||
painter.setPen(QtGui.QPen(perspective_color))
|
||||
painter.drawText(perspective_rect, perspective_icon)
|
||||
|
||||
# Draw checkbox
|
||||
pen = QtGui.QPen(check_color, 1)
|
||||
painter.setPen(pen)
|
||||
|
||||
if index.data(Roles.IsOptionalRole):
|
||||
painter.drawRect(toggle_rect)
|
||||
|
||||
if index.data(QtCore.Qt.CheckStateRole):
|
||||
painter.fillRect(toggle_rect, check_color)
|
||||
|
||||
elif (
|
||||
index.data(QtCore.Qt.CheckStateRole)
|
||||
):
|
||||
painter.fillRect(toggle_rect, check_color)
|
||||
|
||||
if option.state & QtWidgets.QStyle.State_MouseOver:
|
||||
painter.fillRect(body_rect, colors["hover"])
|
||||
|
||||
if option.state & QtWidgets.QStyle.State_Selected:
|
||||
painter.fillRect(body_rect, colors["selected"])
|
||||
|
||||
painter.setPen(colors["outline"])
|
||||
painter.drawRect(body_rect)
|
||||
|
||||
# Ok, we're done, tidy up.
|
||||
painter.restore()
|
||||
|
||||
def sizeHint(self, option, index):
|
||||
return QtCore.QSize(option.rect.width(), 80)
|
||||
|
||||
|
||||
class TerminalItem(QtWidgets.QStyledItemDelegate):
|
||||
"""Delegate used exclusively for the Terminal"""
|
||||
|
||||
|
|
|
|||
Binary file not shown.
|
Before Width: | Height: | Size: 313 B |
|
|
@ -717,15 +717,18 @@ class InstanceModel(QtGui.QStandardItemModel):
|
|||
|
||||
def append(self, instance):
|
||||
new_item = InstanceItem(instance)
|
||||
families = new_item.data(Roles.FamiliesRole)
|
||||
group_item = self.group_items.get(families[0])
|
||||
if not group_item:
|
||||
group_item = GroupItem(families[0])
|
||||
self.appendRow(group_item)
|
||||
self.group_items[families[0]] = group_item
|
||||
self.group_created.emit(group_item.index())
|
||||
if new_item.is_context:
|
||||
self.appendRow(new_item)
|
||||
else:
|
||||
families = new_item.data(Roles.FamiliesRole)
|
||||
group_item = self.group_items.get(families[0])
|
||||
if not group_item:
|
||||
group_item = GroupItem(families[0])
|
||||
self.appendRow(group_item)
|
||||
self.group_items[families[0]] = group_item
|
||||
self.group_created.emit(group_item.index())
|
||||
|
||||
group_item.appendRow(new_item)
|
||||
group_item.appendRow(new_item)
|
||||
instance_id = instance.id
|
||||
self.instance_items[instance_id] = new_item
|
||||
|
||||
|
|
@ -842,162 +845,20 @@ class InstanceModel(QtGui.QStandardItemModel):
|
|||
)
|
||||
|
||||
|
||||
class ArtistProxy(QtCore.QAbstractProxyModel):
|
||||
class InstanceSortProxy(QtCore.QSortFilterProxyModel):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.mapping_from = []
|
||||
self.mapping_to = []
|
||||
super(ArtistProxy, self).__init__(*args, **kwargs)
|
||||
super(InstanceSortProxy, self).__init__(*args, **kwargs)
|
||||
# Do not care about lower/upper case
|
||||
self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
|
||||
|
||||
def on_rows_inserted(self, parent_index, from_row, to_row):
|
||||
if not parent_index.isValid():
|
||||
return
|
||||
|
||||
parent_row = parent_index.row()
|
||||
if parent_row >= len(self.mapping_from):
|
||||
self.mapping_from.append(list())
|
||||
|
||||
new_from = None
|
||||
new_to = None
|
||||
for row_num in range(from_row, to_row + 1):
|
||||
new_row = len(self.mapping_to)
|
||||
new_to = new_row
|
||||
if new_from is None:
|
||||
new_from = new_row
|
||||
|
||||
self.mapping_from[parent_row].insert(row_num, new_row)
|
||||
self.mapping_to.insert(new_row, [parent_row, row_num])
|
||||
|
||||
self.rowsInserted.emit(self.parent(), new_from, new_to + 1)
|
||||
|
||||
def _remove_rows(self, parent_row, from_row, to_row):
|
||||
increment_num = self.mapping_from[parent_row][from_row]
|
||||
|
||||
to_end_index = len(self.mapping_from[parent_row]) - 1
|
||||
for _idx in range(0, parent_row):
|
||||
to_end_index += len(self.mapping_from[_idx])
|
||||
|
||||
removed_rows = 0
|
||||
_emit_last = None
|
||||
for row_num in reversed(range(from_row, to_row + 1)):
|
||||
row = self.mapping_from[parent_row].pop(row_num)
|
||||
_emit_last = row
|
||||
removed_rows += 1
|
||||
|
||||
_emit_first = int(increment_num)
|
||||
mapping_from_len = len(self.mapping_from)
|
||||
mapping_from_parent_len = len(self.mapping_from[parent_row])
|
||||
if parent_row < mapping_from_len:
|
||||
for idx in range(from_row, mapping_from_parent_len):
|
||||
self.mapping_from[parent_row][idx] = increment_num
|
||||
increment_num += 1
|
||||
|
||||
if parent_row < mapping_from_len - 1:
|
||||
for idx_i in range(parent_row + 1, mapping_from_len):
|
||||
sub_values = self.mapping_from[idx_i]
|
||||
if not sub_values:
|
||||
continue
|
||||
|
||||
for idx_j in range(0, len(sub_values)):
|
||||
self.mapping_from[idx_i][idx_j] = increment_num
|
||||
increment_num += 1
|
||||
|
||||
for idx in range(removed_rows):
|
||||
self.mapping_to.pop(to_end_index - idx)
|
||||
|
||||
return (_emit_first, _emit_last)
|
||||
|
||||
def on_rows_removed(self, parent_index, from_row, to_row):
|
||||
if parent_index.isValid():
|
||||
parent_row = parent_index.row()
|
||||
_emit_first, _emit_last = self._remove_rows(
|
||||
parent_row, from_row, to_row
|
||||
)
|
||||
self.rowsRemoved.emit(self.parent(), _emit_first, _emit_last)
|
||||
|
||||
else:
|
||||
removed_rows = False
|
||||
emit_first = None
|
||||
emit_last = None
|
||||
for row_num in reversed(range(from_row, to_row + 1)):
|
||||
remaining_rows = self.mapping_from[row_num]
|
||||
if remaining_rows:
|
||||
removed_rows = True
|
||||
_emit_first, _emit_last = self._remove_rows(
|
||||
row_num, 0, len(remaining_rows) - 1
|
||||
)
|
||||
if emit_first is None:
|
||||
emit_first = _emit_first
|
||||
emit_last = _emit_last
|
||||
|
||||
self.mapping_from.pop(row_num)
|
||||
|
||||
diff = to_row - from_row + 1
|
||||
mapping_to_len = len(self.mapping_to)
|
||||
if from_row < mapping_to_len:
|
||||
for idx in range(from_row, mapping_to_len):
|
||||
self.mapping_to[idx][0] -= diff
|
||||
|
||||
if removed_rows:
|
||||
self.rowsRemoved.emit(self.parent(), emit_first, emit_last)
|
||||
|
||||
def on_reset(self):
|
||||
self.modelReset.emit()
|
||||
self.mapping_from = []
|
||||
self.mapping_to = []
|
||||
|
||||
def setSourceModel(self, source_model):
|
||||
super(ArtistProxy, self).setSourceModel(source_model)
|
||||
source_model.rowsInserted.connect(self.on_rows_inserted)
|
||||
source_model.rowsRemoved.connect(self.on_rows_removed)
|
||||
source_model.modelReset.connect(self.on_reset)
|
||||
source_model.dataChanged.connect(self.on_data_changed)
|
||||
|
||||
def on_data_changed(self, from_index, to_index, roles=None):
|
||||
proxy_from_index = self.mapFromSource(from_index)
|
||||
if from_index == to_index:
|
||||
proxy_to_index = proxy_from_index
|
||||
else:
|
||||
proxy_to_index = self.mapFromSource(to_index)
|
||||
|
||||
args = [proxy_from_index, proxy_to_index]
|
||||
if Qt.__binding__ not in ("PyQt4", "PySide"):
|
||||
args.append(roles or [])
|
||||
self.dataChanged.emit(*args)
|
||||
|
||||
def columnCount(self, parent=QtCore.QModelIndex()):
|
||||
# This is not right for global proxy, but in this case it is enough
|
||||
return self.sourceModel().columnCount()
|
||||
|
||||
def rowCount(self, parent=QtCore.QModelIndex()):
|
||||
if parent.isValid():
|
||||
return 0
|
||||
return len(self.mapping_to)
|
||||
|
||||
def mapFromSource(self, index):
|
||||
if not index.isValid():
|
||||
return QtCore.QModelIndex()
|
||||
|
||||
parent_index = index.parent()
|
||||
if not parent_index.isValid():
|
||||
return QtCore.QModelIndex()
|
||||
|
||||
parent_idx = self.mapping_from[parent_index.row()]
|
||||
my_row = parent_idx[index.row()]
|
||||
return self.index(my_row, index.column())
|
||||
|
||||
def mapToSource(self, index):
|
||||
if not index.isValid() or index.row() > len(self.mapping_to):
|
||||
return self.sourceModel().index(index.row(), index.column())
|
||||
|
||||
parent_row, item_row = self.mapping_to[index.row()]
|
||||
parent_index = self.sourceModel().index(parent_row, 0)
|
||||
return self.sourceModel().index(item_row, 0, parent_index)
|
||||
|
||||
def index(self, row, column, parent=QtCore.QModelIndex()):
|
||||
return self.createIndex(row, column, QtCore.QModelIndex())
|
||||
|
||||
def parent(self, index=None):
|
||||
return QtCore.QModelIndex()
|
||||
def lessThan(self, x_index, y_index):
|
||||
x_type = x_index.data(Roles.TypeRole)
|
||||
y_type = y_index.data(Roles.TypeRole)
|
||||
if x_type != y_type:
|
||||
if x_type == GroupType:
|
||||
return False
|
||||
return True
|
||||
return super(InstanceSortProxy, self).lessThan(x_index, y_index)
|
||||
|
||||
|
||||
class TerminalDetailItem(QtGui.QStandardItem):
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ UseLabel = True
|
|||
|
||||
# Customize which tab to start on. Possible choices are: "artist", "overview"
|
||||
# and "terminal".
|
||||
InitialTab = "artist"
|
||||
InitialTab = "overview"
|
||||
|
||||
# Customize the window size.
|
||||
WindowSize = (430, 600)
|
||||
|
|
|
|||
|
|
@ -11,61 +11,6 @@ def _import_widgets():
|
|||
from . import widgets
|
||||
|
||||
|
||||
class ArtistView(QtWidgets.QListView):
|
||||
# An item is requesting to be toggled, with optional forced-state
|
||||
toggled = QtCore.Signal(QtCore.QModelIndex, object)
|
||||
show_perspective = QtCore.Signal(QtCore.QModelIndex)
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super(ArtistView, self).__init__(parent)
|
||||
|
||||
self.horizontalScrollBar().hide()
|
||||
self.viewport().setAttribute(QtCore.Qt.WA_Hover, True)
|
||||
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
|
||||
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
|
||||
self.setResizeMode(QtWidgets.QListView.Adjust)
|
||||
self.setVerticalScrollMode(QtWidgets.QListView.ScrollPerPixel)
|
||||
|
||||
def event(self, event):
|
||||
if not event.type() == QtCore.QEvent.KeyPress:
|
||||
return super(ArtistView, self).event(event)
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Space:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, None)
|
||||
|
||||
return True
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Backspace:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, False)
|
||||
|
||||
return True
|
||||
|
||||
elif event.key() == QtCore.Qt.Key_Return:
|
||||
for index in self.selectionModel().selectedIndexes():
|
||||
self.toggled.emit(index, True)
|
||||
|
||||
return True
|
||||
|
||||
return super(ArtistView, self).event(event)
|
||||
|
||||
def focusOutEvent(self, event):
|
||||
self.selectionModel().clear()
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
if event.button() == QtCore.Qt.LeftButton:
|
||||
indexes = self.selectionModel().selectedIndexes()
|
||||
if len(indexes) <= 1 and event.pos().x() < 20:
|
||||
for index in indexes:
|
||||
self.toggled.emit(index, None)
|
||||
if len(indexes) == 1 and event.pos().x() > self.width() - 40:
|
||||
for index in indexes:
|
||||
self.show_perspective.emit(index)
|
||||
|
||||
return super(ArtistView, self).mouseReleaseEvent(event)
|
||||
|
||||
|
||||
class OverviewView(QtWidgets.QTreeView):
|
||||
# An item is requesting to be toggled, with optional forced-state
|
||||
toggled = QtCore.Signal(QtCore.QModelIndex, object)
|
||||
|
|
@ -160,6 +105,8 @@ class PluginView(OverviewView):
|
|||
class InstanceView(OverviewView):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(InstanceView, self).__init__(*args, **kwargs)
|
||||
self.setSortingEnabled(True)
|
||||
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
|
||||
self.viewport().setMouseTracking(True)
|
||||
self._pressed_group_index = None
|
||||
self._pressed_expander = None
|
||||
|
|
|
|||
|
|
@ -97,7 +97,6 @@ class Window(QtWidgets.QDialog):
|
|||
header_widget = QtWidgets.QWidget(parent=main_widget)
|
||||
|
||||
header_tab_widget = QtWidgets.QWidget(header_widget)
|
||||
header_tab_artist = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_tab_overview = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_tab_terminal = QtWidgets.QRadioButton(header_tab_widget)
|
||||
header_spacer = QtWidgets.QWidget(header_tab_widget)
|
||||
|
|
@ -125,7 +124,6 @@ class Window(QtWidgets.QDialog):
|
|||
layout_tab = QtWidgets.QHBoxLayout(header_tab_widget)
|
||||
layout_tab.setContentsMargins(0, 0, 0, 0)
|
||||
layout_tab.setSpacing(0)
|
||||
layout_tab.addWidget(header_tab_artist, 0)
|
||||
layout_tab.addWidget(header_tab_overview, 0)
|
||||
layout_tab.addWidget(header_tab_terminal, 0)
|
||||
layout_tab.addWidget(button_suspend_logs_widget, 0)
|
||||
|
|
@ -141,27 +139,6 @@ class Window(QtWidgets.QDialog):
|
|||
|
||||
header_widget.setLayout(layout)
|
||||
|
||||
# Artist Page
|
||||
instance_model = model.InstanceModel(controller)
|
||||
|
||||
artist_page = QtWidgets.QWidget()
|
||||
|
||||
artist_view = view.ArtistView()
|
||||
artist_view.show_perspective.connect(self.toggle_perspective_widget)
|
||||
artist_proxy = model.ArtistProxy()
|
||||
artist_proxy.setSourceModel(instance_model)
|
||||
artist_view.setModel(artist_proxy)
|
||||
|
||||
artist_delegate = delegate.ArtistDelegate()
|
||||
artist_view.setItemDelegate(artist_delegate)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(artist_page)
|
||||
layout.addWidget(artist_view)
|
||||
layout.setContentsMargins(5, 5, 5, 5)
|
||||
layout.setSpacing(0)
|
||||
|
||||
artist_page.setLayout(layout)
|
||||
|
||||
# Overview Page
|
||||
# TODO add parent
|
||||
overview_page = QtWidgets.QWidget()
|
||||
|
|
@ -172,8 +149,12 @@ class Window(QtWidgets.QDialog):
|
|||
overview_instance_delegate = delegate.InstanceDelegate(
|
||||
parent=overview_instance_view
|
||||
)
|
||||
instance_model = model.InstanceModel(controller)
|
||||
instance_sort_proxy = model.InstanceSortProxy()
|
||||
instance_sort_proxy.setSourceModel(instance_model)
|
||||
|
||||
overview_instance_view.setItemDelegate(overview_instance_delegate)
|
||||
overview_instance_view.setModel(instance_model)
|
||||
overview_instance_view.setModel(instance_sort_proxy)
|
||||
|
||||
overview_plugin_view = view.PluginView(
|
||||
animated=settings.Animated, parent=overview_page
|
||||
|
|
@ -223,7 +204,6 @@ class Window(QtWidgets.QDialog):
|
|||
body_widget = QtWidgets.QWidget(main_widget)
|
||||
layout = QtWidgets.QHBoxLayout(body_widget)
|
||||
layout.setContentsMargins(5, 5, 5, 1)
|
||||
layout.addWidget(artist_page)
|
||||
layout.addWidget(overview_page)
|
||||
layout.addWidget(terminal_page)
|
||||
|
||||
|
|
@ -361,12 +341,10 @@ class Window(QtWidgets.QDialog):
|
|||
"Footer": footer_widget,
|
||||
|
||||
# Pages
|
||||
"Artist": artist_page,
|
||||
"Overview": overview_page,
|
||||
"Terminal": terminal_page,
|
||||
|
||||
# Tabs
|
||||
"ArtistTab": header_tab_artist,
|
||||
"OverviewTab": header_tab_overview,
|
||||
"TerminalTab": header_tab_terminal,
|
||||
|
||||
|
|
@ -399,7 +377,6 @@ class Window(QtWidgets.QDialog):
|
|||
pages_widget,
|
||||
header_widget,
|
||||
body_widget,
|
||||
artist_page,
|
||||
comment_box,
|
||||
overview_page,
|
||||
terminal_page,
|
||||
|
|
@ -415,9 +392,6 @@ class Window(QtWidgets.QDialog):
|
|||
_widget.setAttribute(QtCore.Qt.WA_StyledBackground)
|
||||
|
||||
# Signals
|
||||
header_tab_artist.toggled.connect(
|
||||
lambda: self.on_tab_changed("artist")
|
||||
)
|
||||
header_tab_overview.toggled.connect(
|
||||
lambda: self.on_tab_changed("overview")
|
||||
)
|
||||
|
|
@ -450,7 +424,6 @@ class Window(QtWidgets.QDialog):
|
|||
QtCore.Qt.DirectConnection
|
||||
)
|
||||
|
||||
artist_view.toggled.connect(self.on_instance_toggle)
|
||||
overview_instance_view.toggled.connect(self.on_instance_toggle)
|
||||
overview_plugin_view.toggled.connect(self.on_plugin_toggle)
|
||||
|
||||
|
|
@ -466,9 +439,7 @@ class Window(QtWidgets.QDialog):
|
|||
self.on_plugin_action_menu_requested
|
||||
)
|
||||
|
||||
instance_model.group_created.connect(
|
||||
overview_instance_view.expand
|
||||
)
|
||||
instance_model.group_created.connect(self.on_instance_group_created)
|
||||
|
||||
self.main_widget = main_widget
|
||||
|
||||
|
|
@ -490,9 +461,7 @@ class Window(QtWidgets.QDialog):
|
|||
self.plugin_model = plugin_model
|
||||
self.plugin_proxy = plugin_proxy
|
||||
self.instance_model = instance_model
|
||||
|
||||
self.artist_proxy = artist_proxy
|
||||
self.artist_view = artist_view
|
||||
self.instance_sort_proxy = instance_sort_proxy
|
||||
|
||||
self.presets_button = presets_button
|
||||
|
||||
|
|
@ -510,17 +479,15 @@ class Window(QtWidgets.QDialog):
|
|||
self.perspective_widget = perspective_widget
|
||||
|
||||
self.tabs = {
|
||||
"artist": header_tab_artist,
|
||||
"overview": header_tab_overview,
|
||||
"terminal": header_tab_terminal
|
||||
}
|
||||
self.pages = (
|
||||
("artist", artist_page),
|
||||
("overview", overview_page),
|
||||
("terminal", terminal_page)
|
||||
)
|
||||
|
||||
current_page = settings.InitialTab or "artist"
|
||||
current_page = settings.InitialTab or "overview"
|
||||
self.comment_main_widget.setVisible(
|
||||
not current_page == "terminal"
|
||||
)
|
||||
|
|
@ -620,6 +587,10 @@ class Window(QtWidgets.QDialog):
|
|||
|
||||
self.update_compatibility()
|
||||
|
||||
def on_instance_group_created(self, index):
|
||||
_index = self.instance_sort_proxy.mapFromSource(index)
|
||||
self.overview_instance_view.expand(_index)
|
||||
|
||||
def on_plugin_toggle(self, index, state=None):
|
||||
"""An item is requesting to be toggled"""
|
||||
if not index.data(Roles.IsOptionalRole):
|
||||
|
|
@ -1016,11 +987,14 @@ class Window(QtWidgets.QDialog):
|
|||
|
||||
def on_passed_group(self, order):
|
||||
for group_item in self.instance_model.group_items.values():
|
||||
if self.overview_instance_view.isExpanded(group_item.index()):
|
||||
group_index = self.instance_sort_proxy.mapFromSource(
|
||||
group_item.index()
|
||||
)
|
||||
if self.overview_instance_view.isExpanded(group_index):
|
||||
continue
|
||||
|
||||
if group_item.publish_states & GroupStates.HasError:
|
||||
self.overview_instance_view.expand(group_item.index())
|
||||
self.overview_instance_view.expand(group_index)
|
||||
|
||||
for group_item in self.plugin_model.group_items.values():
|
||||
# TODO check only plugins from the group
|
||||
|
|
@ -1030,19 +1004,16 @@ class Window(QtWidgets.QDialog):
|
|||
if order != group_item.order:
|
||||
continue
|
||||
|
||||
group_index = self.plugin_proxy.mapFromSource(group_item.index())
|
||||
if group_item.publish_states & GroupStates.HasError:
|
||||
self.overview_plugin_view.expand(
|
||||
self.plugin_proxy.mapFromSource(group_item.index())
|
||||
)
|
||||
self.overview_plugin_view.expand(group_index)
|
||||
continue
|
||||
|
||||
group_item.setData(
|
||||
{GroupStates.HasFinished: True},
|
||||
Roles.PublishFlagsRole
|
||||
)
|
||||
self.overview_plugin_view.collapse(
|
||||
self.plugin_proxy.mapFromSource(group_item.index())
|
||||
)
|
||||
self.overview_plugin_view.collapse(group_index)
|
||||
|
||||
def on_was_stopped(self):
|
||||
errored = self.controller.errored
|
||||
|
|
@ -1122,11 +1093,6 @@ class Window(QtWidgets.QDialog):
|
|||
for instance_id in existing_ids:
|
||||
self.instance_model.remove(instance_id)
|
||||
|
||||
if result.get("error"):
|
||||
# Toggle from artist to overview tab on error
|
||||
if self.tabs["artist"].isChecked():
|
||||
self.tabs["overview"].toggle()
|
||||
|
||||
result["records"] = self.terminal_model.prepare_records(
|
||||
result,
|
||||
self._suspend_logs
|
||||
|
|
@ -1274,7 +1240,6 @@ class Window(QtWidgets.QDialog):
|
|||
self.terminal_proxy.deleteLater()
|
||||
self.plugin_proxy.deleteLater()
|
||||
|
||||
self.artist_view.setModel(None)
|
||||
self.overview_instance_view.setModel(None)
|
||||
self.overview_plugin_view.setModel(None)
|
||||
self.terminal_view.setModel(None)
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
__version__ = "2.13.6"
|
||||
__version__ = "2.14.0"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue