Merge branch 'release/2.14.2'

This commit is contained in:
unknown 2020-12-04 21:21:22 +01:00
commit 461dc0eb48
18 changed files with 511 additions and 113 deletions

View file

@ -183,11 +183,11 @@ PypeHarmony.color = function(rgba) {
/**
* get all dependencies for given node.
* @function
* @param {string} node node path.
* @param {string} _node node path.
* @return {array} List of dependent nodes.
*/
PypeHarmony.getDependencies = function(node) {
var target_node = node;
PypeHarmony.getDependencies = function(_node) {
var target_node = _node;
var numInput = node.numberOfInputPorts(target_node);
var dependencies = [];
for (var i = 0 ; i < numInput; i++) {

View file

@ -25,7 +25,12 @@ from .applications import (
_subprocess
)
from .plugin_tools import filter_pyblish_plugins, source_hash
from .plugin_tools import (
filter_pyblish_plugins,
source_hash,
get_unique_layer_name,
get_background_layers
)
from .path_tools import (
version_up,
@ -57,6 +62,8 @@ __all__ = [
"ApplicationAction",
"filter_pyblish_plugins",
"get_unique_layer_name",
"get_background_layers",
"version_up",
"get_version_from_path",

View file

@ -73,14 +73,14 @@ class RenderInstance(object):
@frameStart.validator
def check_frame_start(self, _, value):
"""Validate if frame start is not larger then end."""
if value >= self.frameEnd:
if value > self.frameEnd:
raise ValueError("frameStart must be smaller "
"or equal then frameEnd")
@frameEnd.validator
def check_frame_end(self, _, value):
"""Validate if frame end is not less then start."""
if value <= self.frameStart:
if value < self.frameStart:
raise ValueError("frameEnd must be smaller "
"or equal then frameStart")

View file

@ -3,6 +3,8 @@
import os
import inspect
import logging
import re
import json
from ..api import config
@ -78,3 +80,57 @@ def source_hash(filepath, *args):
time = str(os.path.getmtime(filepath))
size = str(os.path.getsize(filepath))
return "|".join([file_name, time, size] + list(args)).replace(".", ",")
def get_unique_layer_name(layers, name):
"""
Gets all layer names and if 'name' is present in them, increases
suffix by 1 (eg. creates unique layer name - for Loader)
Args:
layers (list): of strings, names only
name (string): checked value
Returns:
(string): name_00X (without version)
"""
names = {}
for layer in layers:
layer_name = re.sub(r'_\d{3}$', '', layer)
if layer_name in names.keys():
names[layer_name] = names[layer_name] + 1
else:
names[layer_name] = 1
occurrences = names.get(name, 0)
return "{}_{:0>3d}".format(name, occurrences + 1)
def get_background_layers(file_url):
"""
Pulls file name from background json file, enrich with folder url for
AE to be able import files.
Order is important, follows order in json.
Args:
file_url (str): abs url of background json
Returns:
(list): of abs paths to images
"""
with open(file_url) as json_file:
data = json.load(json_file)
layers = list()
bg_folder = os.path.dirname(file_url)
for child in data['children']:
if child.get("filename"):
layers.append(os.path.join(bg_folder, child.get("filename")).
replace("\\", "/"))
else:
for layer in child['children']:
if layer.get("filename"):
layers.append(os.path.join(bg_folder,
layer.get("filename")).
replace("\\", "/"))
return layers

View file

@ -4,13 +4,31 @@ from pype.modules.websocket_server import WebSocketServer
Used anywhere solution is calling client methods.
"""
import json
from collections import namedtuple
import attr
import logging
log = logging.getLogger(__name__)
@attr.s
class AEItem(object):
"""
Object denoting Item in AE. Each item is created in AE by any Loader,
but contains same fields, which are being used in later processing.
"""
# metadata
id = attr.ib() # id created by AE, could be used for querying
name = attr.ib() # name of item
item_type = attr.ib(default=None) # item type (footage, folder, comp)
# all imported elements, single for
# regular image, array for Backgrounds
members = attr.ib(factory=list)
workAreaStart = attr.ib(default=None)
workAreaDuration = attr.ib(default=None)
frameRate = attr.ib(default=None)
file_name = attr.ib(default=None)
class AfterEffectsServerStub():
"""
Stub for calling function on client (Photoshop js) side.
@ -34,22 +52,14 @@ class AfterEffectsServerStub():
('AfterEffects.open', path=path)
)
def read(self, layer, layers_meta=None):
"""
Parses layer metadata from Label field of active document
Args:
layer: <namedTuple Layer("id":XX, "name":"YYY")
layers_meta: full list from Headline (for performance in loops)
Returns:
"""
if layers_meta is None:
layers_meta = self.get_metadata()
return layers_meta.get(str(layer.id))
def get_metadata(self):
"""
Get stored JSON with metadata from AE.Metadata.Label field
Get complete stored JSON with metadata from AE.Metadata.Label
field.
It contains containers loaded by any Loader OR instances creted
by Creator.
Returns:
(dict)
"""
@ -57,54 +67,85 @@ class AfterEffectsServerStub():
('AfterEffects.get_metadata')
)
try:
layers_data = json.loads(res)
metadata = json.loads(res)
except json.decoder.JSONDecodeError:
raise ValueError("Unparsable metadata {}".format(res))
return layers_data or {}
return metadata or []
def imprint(self, layer, data, all_layers=None, layers_meta=None):
def read(self, item, layers_meta=None):
"""
Save layer metadata to Label field of metadata of active document
Parses item metadata from Label field of active document.
Used as filter to pick metadata for specific 'item' only.
Args:
layer (namedtuple): Layer("id": XXX, "name":'YYY')
item (AEItem): pulled info from AE
layers_meta (dict): full list from Headline
(load and inject for better performance in loops)
Returns:
(dict):
"""
if layers_meta is None:
layers_meta = self.get_metadata()
for item_meta in layers_meta:
if 'container' in item_meta.get('id') and \
str(item.id) == str(item_meta.get('members')[0]):
return item_meta
log.debug("Couldn't find layer metadata")
def imprint(self, item, data, all_items=None, items_meta=None):
"""
Save item metadata to Label field of metadata of active document
Args:
item (AEItem):
data(string): json representation for single layer
all_layers (list of namedtuples): for performance, could be
all_items (list of item): for performance, could be
injected for usage in loop, if not, single call will be
triggered
layers_meta(string): json representation from Headline
items_meta(string): json representation from Headline
(for performance - provide only if imprint is in
loop - value should be same)
Returns: None
"""
if not layers_meta:
layers_meta = self.get_metadata()
if not items_meta:
items_meta = self.get_metadata()
# json.dumps writes integer values in a dictionary to string, so
# anticipating it here.
if str(layer.id) in layers_meta and layers_meta[str(layer.id)]:
if data:
layers_meta[str(layer.id)].update(data)
result_meta = []
# fix existing
is_new = True
for item_meta in items_meta:
if item_meta.get('members') \
and str(item.id) == str(item_meta.get('members')[0]):
is_new = False
if data:
item_meta.update(data)
result_meta.append(item_meta)
else:
layers_meta.pop(str(layer.id))
else:
layers_meta[str(layer.id)] = data
result_meta.append(item_meta)
if is_new:
result_meta.append(data)
# Ensure only valid ids are stored.
if not all_layers:
if not all_items:
# loaders create FootageItem now
all_layers = self.get_items(comps=True,
folders=False,
footages=True)
item_ids = [int(item.id) for item in all_layers]
cleaned_data = {}
for id in layers_meta:
if int(id) in item_ids:
cleaned_data[id] = layers_meta[id]
all_items = self.get_items(comps=True,
folders=True,
footages=True)
item_ids = [int(item.id) for item in all_items]
cleaned_data = []
for meta in result_meta:
# for creation of instance OR loaded container
if 'instance' in meta.get('id') or \
int(meta.get('members')[0]) in item_ids:
cleaned_data.append(meta)
payload = json.dumps(cleaned_data, indent=4)
self.websocketserver.call(self.client.call
('AfterEffects.imprint', payload=payload)
)
('AfterEffects.imprint', payload=payload))
def get_active_document_full_name(self):
"""
@ -130,8 +171,10 @@ class AfterEffectsServerStub():
"""
Get all items from Project panel according to arguments.
There are multiple different types:
CompItem (could have multiple layers - source for Creator)
FolderItem (collection type, currently not used
CompItem (could have multiple layers - source for Creator,
will be rendered)
FolderItem (collection type, currently used for Background
loading)
FootageItem (imported file - created by Loader)
Args:
comps (bool): return CompItems
@ -218,15 +261,15 @@ class AfterEffectsServerStub():
item_id=item.id,
item_name=item_name))
def delete_item(self, item):
""" Deletes FootageItem with new file
def delete_item(self, item_id):
""" Deletes *Item in a file
Args:
item (dict):
item_id (int):
"""
self.websocketserver.call(self.client.call
('AfterEffects.delete_item',
item_id=item.id
item_id=item_id
))
def is_saved(self):
@ -340,12 +383,95 @@ class AfterEffectsServerStub():
def close(self):
self.client.close()
def import_background(self, comp_id, comp_name, files):
"""
Imports backgrounds images to existing or new composition.
If comp_id is not provided, new composition is created, basic
values (width, heights, frameRatio) takes from first imported
image.
All images from background json are imported as a FootageItem and
separate layer is created for each of them under composition.
Order of imported 'files' is important.
Args:
comp_id (int): id of existing composition (null if new)
comp_name (str): used when new composition
files (list): list of absolute paths to import and
add as layers
Returns:
(AEItem): object with id of created folder, all imported images
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.import_background',
comp_id=comp_id,
comp_name=comp_name,
files=files))
records = self._to_records(res)
if records:
return records.pop()
log.debug("Import background failed.")
def reload_background(self, comp_id, comp_name, files):
"""
Reloads backgrounds images to existing composition.
It actually deletes complete folder with imported images and
created composition for safety.
Args:
comp_id (int): id of existing composition to be overwritten
comp_name (str): new name of composition (could be same as old
if version up only)
files (list): list of absolute paths to import and
add as layers
Returns:
(AEItem): object with id of created folder, all imported images
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.reload_background',
comp_id=comp_id,
comp_name=comp_name,
files=files))
records = self._to_records(res)
if records:
return records.pop()
log.debug("Reload of background failed.")
def add_item_as_layer(self, comp_id, item_id):
"""
Adds already imported FootageItem ('item_id') as a new
layer to composition ('comp_id').
Args:
comp_id (int): id of target composition
item_id (int): FootageItem.id
comp already found previously
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_item_as_layer',
comp_id=comp_id,
item_id=item_id))
records = self._to_records(res)
if records:
return records.pop()
log.debug("Adding new layer failed.")
def _to_records(self, res):
"""
Converts string json representation into list of named tuples for
Converts string json representation into list of AEItem
dot notation access to work.
Returns: <list of named tuples>
res(string): - json representation
Returns: <list of AEItem>
res(string): - json representation
"""
if not res:
return []
@ -358,9 +484,19 @@ class AfterEffectsServerStub():
return []
ret = []
# convert to namedtuple to use dot donation
if isinstance(layers_data, dict): # TODO refactore
# convert to AEItem to use dot donation
if isinstance(layers_data, dict):
layers_data = [layers_data]
for d in layers_data:
ret.append(namedtuple('Layer', d.keys())(*d.values()))
# currently implemented and expected fields
item = AEItem(d.get('id'),
d.get('name'),
d.get('type'),
d.get('members'),
d.get('workAreaStart'),
d.get('workAreaDuration'),
d.get('frameRate'),
d.get('file_name'))
ret.append(item)
return ret

View file

@ -35,7 +35,7 @@ class CreateRender(api.Creator):
if self.name.lower() == item.name.lower():
self._show_msg(txt)
return False
self.data["members"] = [item.id]
stub.imprint(item, self.data)
stub.set_label_color(item.id, 14) # Cyan options 0 - 16
stub.rename_item(item, self.data["subset"])

View file

@ -0,0 +1,99 @@
import re
from avalon import api, aftereffects
from pype.lib import get_background_layers, get_unique_layer_name
stub = aftereffects.stub()
class BackgroundLoader(api.Loader):
"""
Load images from Background family
Creates for each background separate folder with all imported images
from background json AND automatically created composition with layers,
each layer for separate image.
For each load container is created and stored in project (.aep)
metadata
"""
families = ["background"]
representations = ["json"]
def load(self, context, name=None, namespace=None, data=None):
items = stub.get_items(comps=True)
existing_items = [layer.name for layer in items]
comp_name = get_unique_layer_name(
existing_items,
"{}_{}".format(context["asset"]["name"], name))
layers = get_background_layers(self.fname)
comp = stub.import_background(None, comp_name, layers)
if not comp:
self.log.warning(
"Import background failed.")
self.log.warning("Check host app for alert error.")
return
self[:] = [comp]
namespace = namespace or comp_name
return aftereffects.containerise(
name,
namespace,
comp,
context,
self.__class__.__name__
)
def update(self, container, representation):
""" Switch asset or change version """
context = representation.get("context", {})
_ = container.pop("layer")
# without iterator number (_001, 002...)
namespace_from_container = re.sub(r'_\d{3}$', '',
container["namespace"])
comp_name = "{}_{}".format(context["asset"], context["subset"])
# switching assets
if namespace_from_container != comp_name:
items = stub.get_items(comps=True)
existing_items = [layer.name for layer in items]
comp_name = get_unique_layer_name(
existing_items,
"{}_{}".format(context["asset"], context["subset"]))
else: # switching version - keep same name
comp_name = container["namespace"]
path = api.get_representation_path(representation)
layers = get_background_layers(path)
comp = stub.reload_background(container["members"][1],
comp_name,
layers)
# update container
container["representation"] = str(representation["_id"])
container["name"] = context["subset"]
container["namespace"] = comp_name
container["members"] = comp.members
stub.imprint(comp, container)
def remove(self, container):
"""
Removes element from scene: deletes layer + removes from file
metadata.
Args:
container (dict): container to be removed - used to get layer_id
"""
print("!!!! container:: {}".format(container))
layer = container.pop("layer")
stub.imprint(layer, {})
stub.delete_item(layer.id)
def switch(self, container, representation):
self.update(container, representation)

View file

@ -1,5 +1,5 @@
from avalon import api, aftereffects
from pype.plugins import lib
from pype import lib
import re
stub = aftereffects.stub()
@ -21,9 +21,10 @@ class FileLoader(api.Loader):
representations = ["*"]
def load(self, context, name=None, namespace=None, data=None):
comp_name = lib.get_unique_layer_name(stub.get_items(comps=True),
context["asset"]["name"],
name)
layers = stub.get_items(comps=True, folders=True, footages=True)
existing_layers = [layer.name for layer in layers]
comp_name = lib.get_unique_layer_name(
existing_layers, "{}_{}".format(context["asset"]["name"], name))
import_options = {}
@ -77,9 +78,11 @@ class FileLoader(api.Loader):
layer_name = "{}_{}".format(context["asset"], context["subset"])
# switching assets
if namespace_from_container != layer_name:
layer_name = lib.get_unique_layer_name(stub.get_items(comps=True),
context["asset"],
context["subset"])
layers = stub.get_items(comps=True)
existing_layers = [layer.name for layer in layers]
layer_name = lib.get_unique_layer_name(
existing_layers,
"{}_{}".format(context["asset"], context["subset"]))
else: # switching version - keep same name
layer_name = container["namespace"]
path = api.get_representation_path(representation)

View file

@ -33,12 +33,16 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
compositions = aftereffects.stub().get_items(True)
compositions_by_id = {item.id: item for item in compositions}
for item_id, inst in aftereffects.stub().get_metadata().items():
for inst in aftereffects.stub().get_metadata():
schema = inst.get('schema')
# loaded asset container skip it
if schema and 'container' in schema:
continue
if not inst["members"]:
raise ValueError("Couldn't find id, unable to publish. " +
"Please recreate instance.")
item_id = inst["members"][0]
work_area_info = aftereffects.stub().get_work_area(int(item_id))
frameStart = work_area_info.workAreaStart
@ -110,7 +114,10 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender):
# pull file name from Render Queue Output module
render_q = aftereffects.stub().get_render_info()
if not render_q:
raise ValueError("No file extension set in Render Queue")
_, ext = os.path.splitext(os.path.basename(render_q.file_name))
base_dir = self._get_output_dir(render_instance)
expected_files = []
if "#" not in render_q.file_name: # single frame (mov)W

View file

@ -105,3 +105,13 @@ class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
deadline_plugin_info.Output = render_path.replace("\\", "/")
return attr.asdict(deadline_plugin_info)
def from_published_scene(self):
""" Do not overwrite expected files.
Use published is set to True, so rendering will be triggered
from published scene (in 'publish' folder). Default implementation
of abstract class renames expected (eg. rendered) files accordingly
which is not needed here.
"""
return super().from_published_scene(False)

View file

@ -348,6 +348,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
+ 1
)
duration_seconds = float(output_frames_len / temp_data["fps"])
if temp_data["input_is_sequence"]:
# Set start frame of input sequence (just frame in filename)
# - definition of input filepath
@ -375,33 +377,39 @@ class ExtractReview(pyblish.api.InstancePlugin):
# Change output's duration and start point if should not contain
# handles
start_sec = 0
if temp_data["without_handles"] and temp_data["handles_are_set"]:
# Set start time without handles
# - check if handle_start is bigger than 0 to avoid zero division
if temp_data["handle_start"] > 0:
start_sec = float(temp_data["handle_start"]) / temp_data["fps"]
ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec))
ffmpeg_input_args.append("-ss {:0.10f}".format(start_sec))
# Set output duration inn seconds
duration_sec = float(output_frames_len / temp_data["fps"])
ffmpeg_output_args.append("-t {:0.2f}".format(duration_sec))
ffmpeg_output_args.append("-t {:0.10}".format(duration_seconds))
# Set frame range of output when input or output is sequence
elif temp_data["input_is_sequence"] or temp_data["output_is_sequence"]:
elif temp_data["output_is_sequence"]:
ffmpeg_output_args.append("-frames:v {}".format(output_frames_len))
# Add duration of an input sequence if output is video
if (
temp_data["input_is_sequence"]
and not temp_data["output_is_sequence"]
):
ffmpeg_input_args.append("-to {:0.10f}".format(
duration_seconds + start_sec
))
# Add video/image input path
ffmpeg_input_args.append(
"-i \"{}\"".format(temp_data["full_input_path"])
)
# Use shortest input
ffmpeg_output_args.append("-shortest")
# Add audio arguments if there are any. Skipped when output are images.
if not temp_data["output_ext_is_image"] and temp_data["with_audio"]:
audio_in_args, audio_filters, audio_out_args = self.audio_args(
instance, temp_data
instance, temp_data, duration_seconds
)
ffmpeg_input_args.extend(audio_in_args)
ffmpeg_audio_filters.extend(audio_filters)
@ -616,7 +624,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.debug("Input path {}".format(full_input_path))
self.log.debug("Output path {}".format(full_output_path))
def audio_args(self, instance, temp_data):
def audio_args(self, instance, temp_data, duration_seconds):
"""Prepares FFMpeg arguments for audio inputs."""
audio_in_args = []
audio_filters = []
@ -639,11 +647,19 @@ class ExtractReview(pyblish.api.InstancePlugin):
audio_in_args.append(
"-ss {}".format(offset_seconds)
)
elif offset_seconds < 0:
audio_in_args.append(
"-itsoffset {}".format(abs(offset_seconds))
)
# Audio duration is offset from `-ss`
audio_duration = duration_seconds + offset_seconds
# Set audio duration
audio_in_args.append("-to {:0.10f}".format(audio_duration))
# Add audio input path
audio_in_args.append("-i \"{}\"".format(audio["filename"]))
# NOTE: These were changed from input to output arguments.

View file

@ -38,7 +38,7 @@ class ExtractPalette(pype.api.Extractor):
os.path.basename(palette_file)
.split(".plt")[0] + "_swatches.png"
)
self.log.info(f"Temporary humbnail path {tmp_thumb_path}")
self.log.info(f"Temporary thumbnail path {tmp_thumb_path}")
palette_version = str(instance.data.get("version")).zfill(3)
@ -52,6 +52,11 @@ class ExtractPalette(pype.api.Extractor):
palette_version,
palette_file,
tmp_thumb_path)
except OSError as e:
# FIXME: this happens on Mac where PIL cannot access fonts
# for some reason.
self.log.warning("Thumbnail generation failed")
self.log.warning(e)
except ValueError:
self.log.error("Unsupported palette type for thumbnail.")

View file

@ -31,7 +31,11 @@ class ExtractTemplate(pype.api.Extractor):
for backdrop in self.get_backdrops(dependency):
backdrops[backdrop["title"]["text"]] = backdrop
unique_backdrops = [backdrops[x] for x in set(backdrops.keys())]
if not unique_backdrops:
self.log.error(("No backdrops detected for template. "
"Please move template instance node onto "
"some backdrop and try again."))
raise AssertionError("No backdrop detected")
# Get non-connected nodes within backdrops.
all_nodes = instance.context.data.get("allNodes")
for node in [x for x in all_nodes if x not in dependencies]:

View file

@ -1,26 +0,0 @@
import re
def get_unique_layer_name(layers, asset_name, subset_name):
"""
Gets all layer names and if 'name' is present in them, increases
suffix by 1 (eg. creates unique layer name - for Loader)
Args:
layers (list): of namedtuples, expects 'name' field present
asset_name (string): in format asset_subset (Hero)
subset_name (string): (LOD)
Returns:
(string): name_00X (without version)
"""
name = "{}_{}".format(asset_name, subset_name)
names = {}
for layer in layers:
layer_name = re.sub(r'_\d{3}$', '', layer.name)
if layer_name in names.keys():
names[layer_name] = names[layer_name] + 1
else:
names[layer_name] = 1
occurrences = names.get(name, 0)
return "{}_{:0>3d}".format(name, occurrences + 1)

View file

@ -104,6 +104,7 @@ class LoadImage(pipeline.Loader):
def _remove_layers(self, layer_ids, layers=None):
if not layer_ids:
self.log.warning("Got empty layer ids list.")
return
if layers is None:
@ -117,6 +118,7 @@ class LoadImage(pipeline.Loader):
layer_ids_to_remove.append(layer_id)
if not layer_ids_to_remove:
self.log.warning("No layers to delete.")
return
george_script_lines = []
@ -128,12 +130,14 @@ class LoadImage(pipeline.Loader):
def remove(self, container):
layer_ids = self.layer_ids_from_container(container)
self.log.warning("Layers to delete {}".format(layer_ids))
self._remove_layers(layer_ids)
current_containers = pipeline.ls()
pop_idx = None
for idx, cur_con in enumerate(current_containers):
if cur_con["objectName"] == container["objectName"]:
cur_con_layer_ids = self.layer_ids_from_container(cur_con)
if cur_con_layer_ids == layer_ids:
pop_idx = idx
break

View file

@ -1,6 +1,8 @@
import os
import json
import pyblish.api
import avalon.api
from avalon.tvpaint import pipeline, lib
@ -10,26 +12,64 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
hosts = ["tvpaint"]
def process(self, context):
current_project_id = lib.execute_george("tv_projectcurrentid")
lib.execute_george("tv_projectselect {}".format(current_project_id))
# Collect and store current context to have reference
current_context = {
"project": avalon.api.Session["AVALON_PROJECT"],
"asset": avalon.api.Session["AVALON_ASSET"],
"task": avalon.api.Session["AVALON_TASK"]
}
context.data["previous_context"] = current_context
self.log.debug("Current context is: {}".format(current_context))
# Collect context from workfile metadata
self.log.info("Collecting workfile context")
workfile_context = pipeline.get_current_workfile_context()
if workfile_context:
# Change current context with context from workfile
key_map = (
("AVALON_ASSET", "asset"),
("AVALON_TASK", "task")
)
for env_key, key in key_map:
avalon.api.Session[env_key] = workfile_context[key]
os.environ[env_key] = workfile_context[key]
else:
# Handle older workfiles or workfiles without metadata
self.log.warning(
"Workfile does not contain information about context."
" Using current Session context."
)
workfile_context = current_context.copy()
context.data["workfile_context"] = workfile_context
self.log.info("Context changed to: {}".format(workfile_context))
# Collect instances
self.log.info("Collecting instance data from workfile")
instance_data = pipeline.list_instances()
context.data["workfileInstances"] = instance_data
self.log.debug(
"Instance data:\"{}".format(json.dumps(instance_data, indent=4))
)
context.data["workfileInstances"] = instance_data
# Collect information about layers
self.log.info("Collecting layers data from workfile")
layers_data = lib.layers_data()
context.data["layersData"] = layers_data
self.log.debug(
"Layers data:\"{}".format(json.dumps(layers_data, indent=4))
)
context.data["layersData"] = layers_data
# Collect information about groups
self.log.info("Collecting groups data from workfile")
group_data = lib.groups_data()
context.data["groupsData"] = group_data
self.log.debug(
"Group data:\"{}".format(json.dumps(group_data, indent=4))
)
context.data["groupsData"] = group_data
self.log.info("Collecting scene data from workfile")
workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ")

View file

@ -0,0 +1,37 @@
import os
import pyblish.api
class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
"""Validate project name stored in workfile metadata.
It is not possible to publish from different project than is set in
environment variable "AVALON_PROJECT".
"""
label = "Validate Workfile Project Name"
order = pyblish.api.ValidatorOrder
def process(self, context):
workfile_context = context.data["workfile_context"]
workfile_project_name = workfile_context["project"]
env_project_name = os.environ["AVALON_PROJECT"]
if workfile_project_name == env_project_name:
self.log.info((
"Both workfile project and environment project are same. {}"
).format(env_project_name))
return
# Raise an error
raise AssertionError((
# Short message
"Workfile from different Project ({})."
# Description what's wrong
" It is not possible to publish when TVPaint was launched in"
"context of different project. Current context project is \"{}\"."
" Launch TVPaint in context of project \"{}\" and then publish."
).format(
workfile_project_name,
env_project_name,
workfile_project_name,
))

View file

@ -1 +1 @@
__version__ = "2.14.1"
__version__ = "2.14.2"