removed webpublisher host

This commit is contained in:
Jakub Trllo 2024-02-05 17:30:54 +01:00
parent 052b77449a
commit 1bf3decc1c
17 changed files with 0 additions and 2746 deletions

View file

@ -1,6 +0,0 @@
Webpublisher
-------------
Plugins meant for processing of Webpublisher.
Gets triggered by calling `openpype_console modules webpublisher publish` with appropriate arguments.

View file

@ -1,10 +0,0 @@
from .addon import (
WebpublisherAddon,
WEBPUBLISHER_ROOT_DIR,
)
__all__ = (
"WebpublisherAddon",
"WEBPUBLISHER_ROOT_DIR",
)

View file

@ -1,102 +0,0 @@
import os
from ayon_core.modules import click_wrap, OpenPypeModule, IHostAddon
WEBPUBLISHER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class WebpublisherAddon(OpenPypeModule, IHostAddon):
name = "webpublisher"
host_name = "webpublisher"
def initialize(self, module_settings):
self.enabled = True
def headless_publish(self, log, close_plugin_name=None, is_test=False):
"""Runs publish in a opened host with a context.
Close Python process at the end.
"""
from .lib import get_webpublish_conn, publish_and_log, publish_in_test
if is_test:
publish_in_test(log, close_plugin_name)
return
dbcon = get_webpublish_conn()
_id = os.environ.get("BATCH_LOG_ID")
if not _id:
log.warning("Unable to store log records, "
"batch will be unfinished!")
return
publish_and_log(
dbcon, _id, log, close_plugin_name=close_plugin_name
)
def cli(self, click_group):
click_group.add_command(cli_main.to_click_obj())
@click_wrap.group(
WebpublisherAddon.name,
help="Webpublisher related commands.")
def cli_main():
pass
@cli_main.command()
@click_wrap.argument("path")
@click_wrap.option("-u", "--user", help="User email address")
@click_wrap.option("-p", "--project", help="Project")
@click_wrap.option("-t", "--targets", help="Targets", default=None,
multiple=True)
def publish(project, path, user=None, targets=None):
"""Start publishing (Inner command).
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
from .publish_functions import cli_publish
cli_publish(project, path, user, targets)
@cli_main.command()
@click_wrap.argument("path")
@click_wrap.option("-p", "--project", help="Project")
@click_wrap.option("-h", "--host", help="Host")
@click_wrap.option("-u", "--user", help="User email address")
@click_wrap.option("-t", "--targets", help="Targets", default=None,
multiple=True)
def publishfromapp(project, path, host, user=None, targets=None):
"""Start publishing through application (Inner command).
Publish collects json from paths provided as an argument.
More than one path is allowed.
"""
from .publish_functions import cli_publish_from_app
cli_publish_from_app(project, path, host, user, targets)
@cli_main.command()
@click_wrap.option("-e", "--executable", help="Executable")
@click_wrap.option("-u", "--upload_dir", help="Upload dir")
@click_wrap.option("-h", "--host", help="Host", default=None)
@click_wrap.option("-p", "--port", help="Port", default=None)
def webserver(executable, upload_dir, host=None, port=None):
"""Start service for communication with Webpublish Front end.
OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND
FTRACK_BOT_API_KEY provided with api key from Ftrack.
Expect "pype.club" user created on Ftrack.
"""
from .webserver_service import run_webserver
run_webserver(executable, upload_dir, host, port)

View file

@ -1,23 +0,0 @@
import os
import logging
import pyblish.api
from ayon_core.host import HostBase
from ayon_core.hosts.webpublisher import WEBPUBLISHER_ROOT_DIR
log = logging.getLogger("ayon_core.hosts.webpublisher")
class WebpublisherHost(HostBase):
name = "webpublisher"
def install(self):
print("Installing Pype config...")
pyblish.api.register_host(self.name)
publish_plugin_dir = os.path.join(
WEBPUBLISHER_ROOT_DIR, "plugins", "publish"
)
pyblish.api.register_plugin_path(publish_plugin_dir)
self.log.info(publish_plugin_dir)

View file

@ -1,308 +0,0 @@
import os
from datetime import datetime
import collections
import json
from bson.objectid import ObjectId
import pyblish.util
import pyblish.api
from ayon_core.client.mongo import OpenPypeMongoConnection
from ayon_core.settings import get_project_settings
from ayon_core.lib import Logger
from ayon_core.lib.profiles_filtering import filter_profiles
ERROR_STATUS = "error"
IN_PROGRESS_STATUS = "in_progress"
REPROCESS_STATUS = "reprocess"
SENT_REPROCESSING_STATUS = "sent_for_reprocessing"
FINISHED_REPROCESS_STATUS = "republishing_finished"
FINISHED_OK_STATUS = "finished_ok"
log = Logger.get_logger(__name__)
def parse_json(path):
"""Parses json file at 'path' location
Returns:
(dict) or None if unparsable
Raises:
AssertionError if 'path' doesn't exist
"""
path = path.strip('\"')
assert os.path.isfile(path), (
"Path to json file doesn't exist. \"{}\"".format(path)
)
data = None
with open(path, "r") as json_file:
try:
data = json.load(json_file)
except Exception as exc:
log.error(
"Error loading json: {} - Exception: {}".format(path, exc)
)
return data
def get_batch_asset_task_info(ctx):
"""Parses context data from webpublisher's batch metadata
Returns:
(tuple): asset, task_name (Optional), task_type
"""
task_type = "default_task_type"
task_name = None
asset = None
if ctx["type"] == "task":
items = ctx["path"].split('/')
asset = items[-2]
task_name = ctx["name"]
task_type = ctx["attributes"]["type"]
else:
asset = ctx["name"]
return asset, task_name, task_type
def find_close_plugin(close_plugin_name, log):
if close_plugin_name:
plugins = pyblish.api.discover()
for plugin in plugins:
if plugin.__name__ == close_plugin_name:
return plugin
log.debug("Close plugin not found, app might not close.")
def publish_in_test(log, close_plugin_name=None):
"""Loops through all plugins, logs to console. Used for tests.
Args:
log (Logger)
close_plugin_name (Optional[str]): Name of plugin with responsibility
to close application.
"""
# Error exit as soon as any error occurs.
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
close_plugin = find_close_plugin(close_plugin_name, log)
for result in pyblish.util.publish_iter():
for record in result["records"]:
# Why do we log again? pyblish logger is logging to stdout...
log.info("{}: {}".format(result["plugin"].label, record.msg))
if not result["error"]:
continue
# QUESTION We don't break on error?
error_message = error_format.format(**result)
log.error(error_message)
if close_plugin: # close host app explicitly after error
context = pyblish.api.Context()
close_plugin().process(context)
def get_webpublish_conn():
"""Get connection to OP 'webpublishes' collection."""
mongo_client = OpenPypeMongoConnection.get_mongo_client()
database_name = os.environ["OPENPYPE_DATABASE_NAME"]
return mongo_client[database_name]["webpublishes"]
def start_webpublish_log(dbcon, batch_id, user):
"""Start new log record for 'batch_id'
Args:
dbcon (OpenPypeMongoConnection)
batch_id (str)
user (str)
Returns
(ObjectId) from DB
"""
return dbcon.insert_one({
"batch_id": batch_id,
"start_date": datetime.now(),
"user": user,
"status": IN_PROGRESS_STATUS,
"progress": 0 # integer 0-100, percentage
}).inserted_id
def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None):
"""Loops through all plugins, logs ok and fails into OP DB.
Args:
dbcon (OpenPypeMongoConnection)
_id (str) - id of current job in DB
log (ayon_core.lib.Logger)
batch_id (str) - id sent from frontend
close_plugin_name (str): name of plugin with responsibility to
close host app
"""
# Error exit as soon as any error occurs.
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}\n"
error_format += "-" * 80 + "\n"
close_plugin = find_close_plugin(close_plugin_name, log)
if isinstance(_id, str):
_id = ObjectId(_id)
log_lines = []
processed = 0
log_every = 5
for result in pyblish.util.publish_iter():
for record in result["records"]:
log_lines.append("{}: {}".format(
result["plugin"].label, record.msg))
processed += 1
if result["error"]:
log.error(error_format.format(**result))
log_lines = [error_format.format(**result)] + log_lines
dbcon.update_one(
{"_id": _id},
{"$set":
{
"finish_date": datetime.now(),
"status": ERROR_STATUS,
"log": os.linesep.join(log_lines)
}}
)
if close_plugin: # close host app explicitly after error
context = pyblish.api.Context()
close_plugin().process(context)
return
elif processed % log_every == 0:
# pyblish returns progress in 0.0 - 2.0
progress = min(round(result["progress"] / 2 * 100), 99)
dbcon.update_one(
{"_id": _id},
{"$set":
{
"progress": progress,
"log": os.linesep.join(log_lines)
}}
)
# final update
if batch_id:
dbcon.update_many(
{"batch_id": batch_id, "status": SENT_REPROCESSING_STATUS},
{
"$set":
{
"finish_date": datetime.now(),
"status": FINISHED_REPROCESS_STATUS,
}
}
)
dbcon.update_one(
{"_id": _id},
{
"$set":
{
"finish_date": datetime.now(),
"status": FINISHED_OK_STATUS,
"progress": 100,
"log": os.linesep.join(log_lines)
}
}
)
def fail_batch(_id, dbcon, msg):
"""Set current batch as failed as there is some problem.
Raises:
ValueError
"""
dbcon.update_one(
{"_id": _id},
{"$set":
{
"finish_date": datetime.now(),
"status": ERROR_STATUS,
"log": msg
}}
)
raise ValueError(msg)
def find_variant_key(application_manager, host):
"""Searches for latest installed variant for 'host'
Args:
application_manager (ApplicationManager)
host (str)
Returns
(string) (optional)
Raises:
(ValueError) if no variant found
"""
app_group = application_manager.app_groups.get(host)
if not app_group or not app_group.enabled:
raise ValueError("No application {} configured".format(host))
found_variant_key = None
# finds most up-to-date variant if any installed
sorted_variants = collections.OrderedDict(
sorted(app_group.variants.items()))
for variant_key, variant in sorted_variants.items():
for executable in variant.executables:
if executable.exists():
found_variant_key = variant_key
if not found_variant_key:
raise ValueError("No executable for {} found".format(host))
return found_variant_key
def get_task_data(batch_dir):
"""Return parsed data from first task manifest.json
Used for `publishfromapp` command where batch contains only
single task with publishable workfile.
Returns:
(dict)
Throws:
(ValueError) if batch or task manifest not found or broken
"""
batch_data = parse_json(os.path.join(batch_dir, "manifest.json"))
if not batch_data:
raise ValueError(
"Cannot parse batch meta in {} folder".format(batch_dir))
task_dir_name = batch_data["tasks"][0]
task_data = parse_json(os.path.join(batch_dir, task_dir_name,
"manifest.json"))
if not task_data:
raise ValueError(
"Cannot parse batch meta in {} folder".format(task_data))
return task_data
def get_timeout(project_name, host_name, task_type):
"""Returns timeout(seconds) from Setting profile."""
filter_data = {
"task_types": task_type,
"hosts": host_name
}
timeout_profiles = (get_project_settings(project_name)["webpublisher"]
["timeout_profiles"])
matching_item = filter_profiles(timeout_profiles, filter_data)
timeout = 3600
if matching_item:
timeout = matching_item["timeout"]
return timeout

View file

@ -1,93 +0,0 @@
"""Parses batch context from json and continues in publish process.
Provides:
context -> Loaded batch file.
- asset
- task (task name)
- taskType
- project_name
- variant
"""
import os
import pyblish.api
from ayon_core.pipeline import legacy_io
from openpype_modules.webpublisher.lib import (
parse_json,
get_batch_asset_task_info,
get_webpublish_conn,
IN_PROGRESS_STATUS
)
class CollectBatchData(pyblish.api.ContextPlugin):
"""Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir.
The directory must contain 'manifest.json' file where batch data should be
stored.
"""
# must be really early, context values are only in json file
order = pyblish.api.CollectorOrder - 0.495
label = "Collect batch data"
hosts = ["webpublisher"]
def process(self, context):
batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
assert batch_dir, (
"Missing `OPENPYPE_PUBLISH_DATA`")
assert os.path.exists(batch_dir), \
"Folder {} doesn't exist".format(batch_dir)
project_name = os.environ.get("AVALON_PROJECT")
if project_name is None:
raise AssertionError(
"Environment `AVALON_PROJECT` was not found."
"Could not set project `root` which may cause issues."
)
batch_data = parse_json(os.path.join(batch_dir, "manifest.json"))
context.data["batchDir"] = batch_dir
context.data["batchData"] = batch_data
asset_name, task_name, task_type = get_batch_asset_task_info(
batch_data["context"]
)
os.environ["AVALON_ASSET"] = asset_name
legacy_io.Session["AVALON_ASSET"] = asset_name
os.environ["AVALON_TASK"] = task_name
legacy_io.Session["AVALON_TASK"] = task_name
context.data["asset"] = asset_name
context.data["task"] = task_name
context.data["taskType"] = task_type
context.data["project_name"] = project_name
context.data["variant"] = batch_data["variant"]
self._set_ctx_path(batch_data)
def _set_ctx_path(self, batch_data):
dbcon = get_webpublish_conn()
batch_id = batch_data["batch"]
ctx_path = batch_data["context"]["path"]
self.log.info("ctx_path: {}".format(ctx_path))
self.log.info("batch_id: {}".format(batch_id))
if ctx_path and batch_id:
self.log.info("Updating log record")
dbcon.update_one(
{
"batch_id": batch_id,
"status": IN_PROGRESS_STATUS
},
{
"$set": {
"path": ctx_path
}
}
)

View file

@ -1,27 +0,0 @@
"""
Requires:
Nothing
Provides:
Instance
"""
import pyblish.api
from pprint import pformat
class CollectFPS(pyblish.api.InstancePlugin):
"""
Adds fps from context to instance because of ExtractReview
"""
label = "Collect fps"
order = pyblish.api.CollectorOrder + 0.49
hosts = ["webpublisher"]
def process(self, instance):
instance_fps = instance.data.get("fps")
if instance_fps is None:
instance.data["fps"] = instance.context.data["fps"]
self.log.debug(f"instance.data: {pformat(instance.data)}")

View file

@ -1,351 +0,0 @@
"""Create instances from batch data and continues in publish process.
Requires:
CollectBatchData
Provides:
context, instances -> All data from previous publishing process.
"""
import os
import clique
import tempfile
import math
import pyblish.api
from ayon_core.client import (
get_asset_by_name,
get_last_version_by_subset_name
)
from ayon_core.lib import (
prepare_template_data,
get_ffprobe_streams,
convert_ffprobe_fps_value,
)
from ayon_core.pipeline.create import get_subset_name
from openpype_modules.webpublisher.lib import parse_json
from ayon_core.pipeline.version_start import get_versioning_start
class CollectPublishedFiles(pyblish.api.ContextPlugin):
"""
This collector will try to find json files in provided
`OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
This covers 'basic' webpublishes, eg artists uses Standalone Publisher to
publish rendered frames or assets.
This is not applicable for 'studio' processing where host application is
called to process uploaded workfile and render frames itself.
For each task configure what properties should resulting instance have
based on uploaded files:
- uploading sequence of 'png' >> create instance of 'render' family,
by adding 'review' to 'Families' and 'Create review' to Tags it will
produce review.
There might be difference between single(>>image) and sequence(>>render)
uploaded files.
"""
# must be really early, context values are only in json file
order = pyblish.api.CollectorOrder - 0.490
label = "Collect rendered frames"
hosts = ["webpublisher"]
targets = ["filespublish"]
# from Settings
task_type_to_family = []
sync_next_version = False # find max version to be published, use for all
def process(self, context):
batch_dir = context.data["batchDir"]
task_subfolders = []
for folder_name in os.listdir(batch_dir):
full_path = os.path.join(batch_dir, folder_name)
if os.path.isdir(full_path):
task_subfolders.append(full_path)
self.log.info("task_sub:: {}".format(task_subfolders))
project_name = context.data["project_name"]
asset_name = context.data["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
task_name = context.data["task"]
task_type = context.data["taskType"]
project_name = context.data["project_name"]
variant = context.data["variant"]
next_versions = []
instances = []
for task_dir in task_subfolders:
task_data = parse_json(os.path.join(task_dir,
"manifest.json"))
self.log.info("task_data:: {}".format(task_data))
is_sequence = len(task_data["files"]) > 1
first_file = task_data["files"][0]
_, extension = os.path.splitext(first_file)
extension = extension.lower()
family, families, tags = self._get_family(
self.task_type_to_family,
task_type,
is_sequence,
extension.replace(".", ''))
subset_name = get_subset_name(
family,
variant,
task_name,
asset_doc,
project_name=project_name,
host_name="webpublisher",
project_settings=context.data["project_settings"]
)
version = self._get_next_version(
project_name,
asset_doc,
task_name,
task_type,
family,
subset_name,
context
)
next_versions.append(version)
instance = context.create_instance(subset_name)
instance.data["asset"] = asset_name
instance.data["subset"] = subset_name
# set configurable result family
instance.data["family"] = family
# set configurable additional families
instance.data["families"] = families
instance.data["version"] = version
instance.data["stagingDir"] = tempfile.mkdtemp()
instance.data["source"] = "webpublisher"
# to convert from email provided into Ftrack username
instance.data["user_email"] = task_data["user"]
if is_sequence:
instance.data["representations"] = self._process_sequence(
task_data["files"], task_dir, tags
)
instance.data["frameStart"] = \
instance.data["representations"][0]["frameStart"]
instance.data["frameEnd"] = \
instance.data["representations"][0]["frameEnd"]
else:
frame_start = asset_doc["data"]["frameStart"]
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = asset_doc["data"]["frameEnd"]
instance.data["representations"] = self._get_single_repre(
task_dir, task_data["files"], tags
)
if family != 'workfile':
file_url = os.path.join(task_dir, task_data["files"][0])
try:
no_of_frames = self._get_number_of_frames(file_url)
if no_of_frames:
frame_end = (
int(frame_start) + math.ceil(no_of_frames)
)
frame_end = math.ceil(frame_end) - 1
instance.data["frameEnd"] = frame_end
self.log.debug("frameEnd:: {}".format(
instance.data["frameEnd"]))
except Exception:
self.log.warning("Unable to count frames duration.")
instance.data["handleStart"] = asset_doc["data"]["handleStart"]
instance.data["handleEnd"] = asset_doc["data"]["handleEnd"]
if "review" in tags:
first_file_path = os.path.join(task_dir, first_file)
instance.data["thumbnailSource"] = first_file_path
instances.append(instance)
self.log.info("instance.data:: {}".format(instance.data))
if not self.sync_next_version:
return
# overwrite specific version with same version for all
max_next_version = max(next_versions)
for inst in instances:
inst.data["version"] = max_next_version
self.log.debug("overwritten version:: {}".format(max_next_version))
def _get_subset_name(self, family, subset_template, task_name, variant):
fill_pairs = {
"variant": variant,
"family": family,
"task": task_name
}
subset = subset_template.format(**prepare_template_data(fill_pairs))
return subset
def _get_single_repre(self, task_dir, files, tags):
_, ext = os.path.splitext(files[0])
ext = ext.lower()
repre_data = {
"name": ext[1:],
"ext": ext[1:],
"files": files[0],
"stagingDir": task_dir,
"tags": tags
}
self.log.info("single file repre_data.data:: {}".format(repre_data))
return [repre_data]
def _process_sequence(self, files, task_dir, tags):
"""Prepare representation for sequence of files."""
collections, remainder = clique.assemble(files)
assert len(collections) == 1, \
"Too many collections in {}".format(files)
frame_start = list(collections[0].indexes)[0]
frame_end = list(collections[0].indexes)[-1]
ext = collections[0].tail
ext = ext.lower()
repre_data = {
"frameStart": frame_start,
"frameEnd": frame_end,
"name": ext[1:],
"ext": ext[1:],
"files": files,
"stagingDir": task_dir,
"tags": tags # configurable tags from Settings
}
self.log.info("sequences repre_data.data:: {}".format(repre_data))
return [repre_data]
def _get_family(self, settings, task_type, is_sequence, extension):
"""Guess family based on input data.
Args:
settings (dict): configuration per task_type
task_type (str): Animation|Art etc
is_sequence (bool): single file or sequence
extension (str): without '.'
Returns:
(family, [families], tags) tuple
AssertionError if not matching family found
"""
task_type = task_type.lower()
lower_cased_task_types = {}
for t_type, task in settings.items():
lower_cased_task_types[t_type.lower()] = task
task_obj = lower_cased_task_types.get(task_type)
assert task_obj, "No family configuration for '{}'".format(task_type)
found_family = None
families_config = []
# backward compatibility, should be removed pretty soon
if isinstance(task_obj, dict):
for family, config in task_obj:
config["result_family"] = family
families_config.append(config)
else:
families_config = task_obj
for config in families_config:
if is_sequence != config["is_sequence"]:
continue
extensions = config.get("extensions") or []
lower_extensions = set()
for ext in extensions:
if ext:
ext = ext.lower()
if ext.startswith("."):
ext = ext[1:]
lower_extensions.add(ext)
# all extensions setting
if not lower_extensions or extension in lower_extensions:
found_family = config["result_family"]
break
msg = "No family found for combination of " +\
"task_type: {}, is_sequence:{}, extension: {}".format(
task_type, is_sequence, extension)
assert found_family, msg
return (found_family,
config["families"],
config["tags"])
def _get_next_version(
self,
project_name,
asset_doc,
task_name,
task_type,
family,
subset_name,
context
):
"""Returns version number or 1 for 'asset' and 'subset'"""
version_doc = get_last_version_by_subset_name(
project_name,
subset_name,
asset_doc["_id"],
fields=["name"]
)
if version_doc:
version = int(version_doc["name"]) + 1
else:
version = get_versioning_start(
project_name,
"webpublisher",
task_name=task_name,
task_type=task_type,
family=family,
subset=subset_name,
project_settings=context.data["project_settings"]
)
return version
def _get_number_of_frames(self, file_url):
"""Return duration in frames"""
try:
streams = get_ffprobe_streams(file_url, self.log)
except Exception as exc:
raise AssertionError((
"FFprobe couldn't read information about input file: \"{}\"."
" Error message: {}"
).format(file_url, str(exc)))
first_video_stream = None
for stream in streams:
if "width" in stream and "height" in stream:
first_video_stream = stream
break
if first_video_stream:
nb_frames = stream.get("nb_frames")
if nb_frames:
try:
return int(nb_frames)
except ValueError:
self.log.warning(
"nb_frames {} not convertible".format(nb_frames))
duration = stream.get("duration")
frame_rate = convert_ffprobe_fps_value(
stream.get("r_frame_rate", '0/0')
)
self.log.debug("duration:: {} frame_rate:: {}".format(
duration, frame_rate))
try:
return float(duration) * float(frame_rate)
except ValueError:
self.log.warning(
"{} or {} cannot be converted".format(duration,
frame_rate))
self.log.warning("Cannot get number of frames")

View file

@ -1,259 +0,0 @@
"""
Requires:
CollectTVPaintWorkfileData
Provides:
Instances
"""
import os
import re
import copy
import pyblish.api
from ayon_core.pipeline.create import get_subset_name
class CollectTVPaintInstances(pyblish.api.ContextPlugin):
label = "Collect TVPaint Instances"
order = pyblish.api.CollectorOrder + 0.2
hosts = ["webpublisher"]
targets = ["tvpaint_worker"]
workfile_family = "workfile"
workfile_variant = ""
review_family = "review"
review_variant = "Main"
render_pass_family = "renderPass"
render_layer_family = "renderLayer"
render_layer_pass_name = "beauty"
# Set by settings
# Regex must contain 'layer' and 'variant' groups which are extracted from
# name when instances are created
layer_name_regex = r"(?P<layer>L[0-9]{3}_\w+)_(?P<pass>.+)"
def process(self, context):
# Prepare compiled regex
layer_name_regex = re.compile(self.layer_name_regex)
layers_data = context.data["layersData"]
host_name = "tvpaint"
task_name = context.data.get("task")
asset_doc = context.data["assetEntity"]
project_doc = context.data["projectEntity"]
project_name = project_doc["name"]
new_instances = []
# Workfile instance
workfile_subset_name = get_subset_name(
self.workfile_family,
self.workfile_variant,
task_name,
asset_doc,
project_name,
host_name,
project_settings=context.data["project_settings"]
)
workfile_instance = self._create_workfile_instance(
context, workfile_subset_name
)
new_instances.append(workfile_instance)
# Review instance
review_subset_name = get_subset_name(
self.review_family,
self.review_variant,
task_name,
asset_doc,
project_name,
host_name,
project_settings=context.data["project_settings"]
)
review_instance = self._create_review_instance(
context, review_subset_name
)
new_instances.append(review_instance)
# Get render layers and passes from TVPaint layers
# - it's based on regex extraction
layers_by_layer_and_pass = {}
for layer in layers_data:
# Filter only visible layers
if not layer["visible"]:
continue
result = layer_name_regex.search(layer["name"])
# Layer name not matching layer name regex
# should raise an exception?
if result is None:
continue
render_layer = result.group("layer")
render_pass = result.group("pass")
render_pass_maping = layers_by_layer_and_pass.get(
render_layer
)
if render_pass_maping is None:
render_pass_maping = {}
layers_by_layer_and_pass[render_layer] = render_pass_maping
if render_pass not in render_pass_maping:
render_pass_maping[render_pass] = []
render_pass_maping[render_pass].append(copy.deepcopy(layer))
layers_by_render_layer = {}
for render_layer, render_passes in layers_by_layer_and_pass.items():
render_layer_layers = []
layers_by_render_layer[render_layer] = render_layer_layers
for render_pass, layers in render_passes.items():
render_layer_layers.extend(copy.deepcopy(layers))
dynamic_data = {
"render_pass": render_pass,
"render_layer": render_layer,
# Override family for subset name
"family": "render"
}
subset_name = get_subset_name(
self.render_pass_family,
render_pass,
task_name,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data,
project_settings=context.data["project_settings"]
)
instance = self._create_render_pass_instance(
context, layers, subset_name
)
new_instances.append(instance)
for render_layer, layers in layers_by_render_layer.items():
variant = render_layer
dynamic_data = {
"render_pass": self.render_layer_pass_name,
"render_layer": render_layer,
# Override family for subset name
"family": "render"
}
subset_name = get_subset_name(
self.render_layer_family,
variant,
task_name,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data,
project_settings=context.data["project_settings"]
)
instance = self._create_render_layer_instance(
context, layers, subset_name
)
new_instances.append(instance)
# Set data same for all instances
frame_start = context.data.get("frameStart")
frame_end = context.data.get("frameEnd")
for instance in new_instances:
if (
instance.data.get("frameStart") is None
or instance.data.get("frameEnd") is None
):
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
if instance.data.get("asset") is None:
instance.data["asset"] = asset_doc["name"]
if instance.data.get("task") is None:
instance.data["task"] = task_name
if "representations" not in instance.data:
instance.data["representations"] = []
if "source" not in instance.data:
instance.data["source"] = "webpublisher"
def _create_workfile_instance(self, context, subset_name):
workfile_path = context.data["workfilePath"]
staging_dir = os.path.dirname(workfile_path)
filename = os.path.basename(workfile_path)
ext = os.path.splitext(filename)[-1]
return context.create_instance(**{
"name": subset_name,
"label": subset_name,
"subset": subset_name,
"family": self.workfile_family,
"families": [],
"stagingDir": staging_dir,
"representations": [{
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": filename,
"stagingDir": staging_dir
}]
})
def _create_review_instance(self, context, subset_name):
staging_dir = self._create_staging_dir(context, subset_name)
layers_data = context.data["layersData"]
# Filter hidden layers
filtered_layers_data = [
copy.deepcopy(layer)
for layer in layers_data
if layer["visible"]
]
return context.create_instance(**{
"name": subset_name,
"label": subset_name,
"subset": subset_name,
"family": self.review_family,
"families": [],
"layers": filtered_layers_data,
"stagingDir": staging_dir
})
def _create_render_pass_instance(self, context, layers, subset_name):
staging_dir = self._create_staging_dir(context, subset_name)
# Global instance data modifications
# Fill families
return context.create_instance(**{
"name": subset_name,
"subset": subset_name,
"label": subset_name,
"family": "render",
# Add `review` family for thumbnail integration
"families": [self.render_pass_family, "review"],
"representations": [],
"layers": layers,
"stagingDir": staging_dir
})
def _create_render_layer_instance(self, context, layers, subset_name):
staging_dir = self._create_staging_dir(context, subset_name)
# Global instance data modifications
# Fill families
return context.create_instance(**{
"name": subset_name,
"subset": subset_name,
"label": subset_name,
"family": "render",
# Add `review` family for thumbnail integration
"families": [self.render_layer_family, "review"],
"representations": [],
"layers": layers,
"stagingDir": staging_dir
})
def _create_staging_dir(self, context, subset_name):
context_staging_dir = context.data["contextStagingDir"]
staging_dir = os.path.join(context_staging_dir, subset_name)
if not os.path.exists(staging_dir):
os.makedirs(staging_dir)
return staging_dir

View file

@ -1,142 +0,0 @@
"""
Requires:
CollectPublishedFiles
CollectModules
Provides:
workfilePath - Path to tvpaint workfile
sceneData - Scene data loaded from the workfile
groupsData -
layersData
layersExposureFrames
layersPrePostBehavior
"""
import os
import uuid
import json
import shutil
import pyblish.api
from ayon_core.hosts.tvpaint.worker import (
SenderTVPaintCommands,
CollectSceneData
)
from openpype_modules.webpublisher.lib import parse_json
class CollectTVPaintWorkfileData(pyblish.api.ContextPlugin):
label = "Collect TVPaint Workfile data"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["webpublisher"]
targets = ["tvpaint_worker"]
def process(self, context):
# Get JobQueue module
modules = context.data["openPypeModules"]
job_queue_module = modules["job_queue"]
jobs_root = job_queue_module.get_jobs_root()
if not jobs_root:
raise ValueError("Job Queue root is not set.")
context.data["jobsRoot"] = jobs_root
context_staging_dir = self._create_context_staging_dir(jobs_root)
workfile_path = self._extract_workfile_path(
context, context_staging_dir
)
context.data["contextStagingDir"] = context_staging_dir
context.data["workfilePath"] = workfile_path
# Prepare tvpaint command
collect_scene_data_command = CollectSceneData()
# Create TVPaint sender commands
commands = SenderTVPaintCommands(workfile_path, job_queue_module)
commands.add_command(collect_scene_data_command)
# Send job and wait for answer
commands.send_job_and_wait()
collected_data = collect_scene_data_command.result()
layers_data = collected_data["layers_data"]
groups_data = collected_data["groups_data"]
scene_data = collected_data["scene_data"]
exposure_frames_by_layer_id = (
collected_data["exposure_frames_by_layer_id"]
)
pre_post_beh_by_layer_id = (
collected_data["pre_post_beh_by_layer_id"]
)
# Store results
# scene data store the same way as TVPaint collector
scene_data = {
"sceneWidth": scene_data["width"],
"sceneHeight": scene_data["height"],
"scenePixelAspect": scene_data["pixel_aspect"],
"sceneFps": scene_data["fps"],
"sceneFieldOrder": scene_data["field_order"],
"sceneMarkIn": scene_data["mark_in"],
# scene_data["mark_in_state"],
"sceneMarkInState": scene_data["mark_in_set"],
"sceneMarkOut": scene_data["mark_out"],
# scene_data["mark_out_state"],
"sceneMarkOutState": scene_data["mark_out_set"],
"sceneStartFrame": scene_data["start_frame"],
"sceneBgColor": scene_data["bg_color"]
}
context.data["sceneData"] = scene_data
# Store only raw data
context.data["groupsData"] = groups_data
context.data["layersData"] = layers_data
context.data["layersExposureFrames"] = exposure_frames_by_layer_id
context.data["layersPrePostBehavior"] = pre_post_beh_by_layer_id
self.log.debug(
(
"Collected data"
"\nScene data: {}"
"\nLayers data: {}"
"\nExposure frames: {}"
"\nPre/Post behavior: {}"
).format(
json.dumps(scene_data, indent=4),
json.dumps(layers_data, indent=4),
json.dumps(exposure_frames_by_layer_id, indent=4),
json.dumps(pre_post_beh_by_layer_id, indent=4)
)
)
def _create_context_staging_dir(self, jobs_root):
if not os.path.exists(jobs_root):
os.makedirs(jobs_root)
random_folder_name = str(uuid.uuid4())
full_path = os.path.join(jobs_root, random_folder_name)
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
def _extract_workfile_path(self, context, context_staging_dir):
"""Find first TVPaint file in tasks and use it."""
batch_dir = context.data["batchDir"]
batch_data = context.data["batchData"]
src_workfile_path = None
for task_id in batch_data["tasks"]:
if src_workfile_path is not None:
break
task_dir = os.path.join(batch_dir, task_id)
task_manifest_path = os.path.join(task_dir, "manifest.json")
task_data = parse_json(task_manifest_path)
task_files = task_data["files"]
for filename in task_files:
_, ext = os.path.splitext(filename)
if ext.lower() == ".tvpp":
src_workfile_path = os.path.join(task_dir, filename)
break
# Copy workfile to job queue work root
new_workfile_path = os.path.join(
context_staging_dir, os.path.basename(src_workfile_path)
)
shutil.copy(src_workfile_path, new_workfile_path)
return new_workfile_path

View file

@ -1,535 +0,0 @@
import os
import copy
from ayon_core.hosts.tvpaint.worker import (
SenderTVPaintCommands,
ExecuteSimpleGeorgeScript,
ExecuteGeorgeScript
)
import pyblish.api
from ayon_core.hosts.tvpaint.lib import (
calculate_layers_extraction_data,
get_frame_filename_template,
fill_reference_frames,
composite_rendered_layers,
rename_filepaths_by_frame_start
)
from PIL import Image
class ExtractTVPaintSequences(pyblish.api.Extractor):
label = "Extract TVPaint Sequences"
hosts = ["webpublisher"]
targets = ["tvpaint_worker"]
# Context plugin does not have families filtering
families_filter = ["review", "renderPass", "renderLayer"]
job_queue_root_key = "jobs_root"
# Modifiable with settings
review_bg = [255, 255, 255, 255]
def process(self, context):
# Get workfle path
workfile_path = context.data["workfilePath"]
jobs_root = context.data["jobsRoot"]
jobs_root_slashed = jobs_root.replace("\\", "/")
# Prepare scene data
scene_data = context.data["sceneData"]
scene_mark_in = scene_data["sceneMarkIn"]
scene_mark_out = scene_data["sceneMarkOut"]
scene_start_frame = scene_data["sceneStartFrame"]
scene_bg_color = scene_data["sceneBgColor"]
# Prepare layers behavior
behavior_by_layer_id = context.data["layersPrePostBehavior"]
exposure_frames_by_layer_id = context.data["layersExposureFrames"]
# Handles are not stored per instance but on Context
handle_start = context.data["handleStart"]
handle_end = context.data["handleEnd"]
# Get JobQueue module
modules = context.data["openPypeModules"]
job_queue_module = modules["job_queue"]
tvpaint_commands = SenderTVPaintCommands(
workfile_path, job_queue_module
)
# Change scene Start Frame to 0 to prevent frame index issues
# - issue is that TVPaint versions deal with frame indexes in a
# different way when Start Frame is not `0`
# NOTE It will be set back after rendering
tvpaint_commands.add_command(
ExecuteSimpleGeorgeScript("tv_startframe 0")
)
root_key_replacement = "{" + self.job_queue_root_key + "}"
after_render_instances = []
for instance in context:
instance_families = set(instance.data.get("families", []))
instance_families.add(instance.data["family"])
valid = False
for family in instance_families:
if family in self.families_filter:
valid = True
break
if not valid:
continue
self.log.info("* Preparing commands for instance \"{}\"".format(
instance.data["label"]
))
# Get all layers and filter out not visible
layers = instance.data["layers"]
filtered_layers = [layer for layer in layers if layer["visible"]]
if not filtered_layers:
self.log.info(
"None of the layers from the instance"
" are visible. Extraction skipped."
)
continue
joined_layer_names = ", ".join([
"\"{}\"".format(str(layer["name"]))
for layer in filtered_layers
])
self.log.debug(
"Instance has {} layers with names: {}".format(
len(filtered_layers), joined_layer_names
)
)
# Staging dir must be created during collection
staging_dir = instance.data["stagingDir"].replace("\\", "/")
job_root_template = staging_dir.replace(
jobs_root_slashed, root_key_replacement
)
# Frame start/end may be stored as float
frame_start = int(instance.data["frameStart"])
frame_end = int(instance.data["frameEnd"])
# Prepare output frames
output_frame_start = frame_start - handle_start
output_frame_end = frame_end + handle_end
# Change output frame start to 0 if handles cause it's negative
# number
if output_frame_start < 0:
self.log.warning((
"Frame start with handles has negative value."
" Changed to \"0\". Frames start: {}, Handle Start: {}"
).format(frame_start, handle_start))
output_frame_start = 0
# Create copy of scene Mark In/Out
mark_in, mark_out = scene_mark_in, scene_mark_out
# Fix possible changes of output frame
mark_out, output_frame_end = self._fix_range_changes(
mark_in, mark_out, output_frame_start, output_frame_end
)
filename_template = get_frame_filename_template(
max(scene_mark_out, output_frame_end)
)
# -----------------------------------------------------------------
self.log.debug(
"Files will be rendered to folder: {}".format(staging_dir)
)
output_filepaths_by_frame_idx = {}
for frame_idx in range(mark_in, mark_out + 1):
filename = filename_template.format(frame=frame_idx)
filepath = os.path.join(staging_dir, filename)
output_filepaths_by_frame_idx[frame_idx] = filepath
# Prepare data for post render processing
post_render_data = {
"output_dir": staging_dir,
"layers": filtered_layers,
"output_filepaths_by_frame_idx": output_filepaths_by_frame_idx,
"instance": instance,
"is_layers_render": False,
"output_frame_start": output_frame_start,
"output_frame_end": output_frame_end
}
# Store them to list
after_render_instances.append(post_render_data)
# Review rendering
if instance.data["family"] == "review":
self.add_render_review_command(
tvpaint_commands, mark_in, mark_out, scene_bg_color,
job_root_template, filename_template
)
continue
# Layers rendering
extraction_data_by_layer_id = calculate_layers_extraction_data(
filtered_layers,
exposure_frames_by_layer_id,
behavior_by_layer_id,
mark_in,
mark_out
)
filepaths_by_layer_id = self.add_render_command(
tvpaint_commands,
job_root_template,
staging_dir,
filtered_layers,
extraction_data_by_layer_id
)
# Add more data to post render processing
post_render_data.update({
"is_layers_render": True,
"extraction_data_by_layer_id": extraction_data_by_layer_id,
"filepaths_by_layer_id": filepaths_by_layer_id
})
# Change scene frame Start back to previous value
tvpaint_commands.add_command(
ExecuteSimpleGeorgeScript(
"tv_startframe {}".format(scene_start_frame)
)
)
self.log.info("Sending the job and waiting for response...")
tvpaint_commands.send_job_and_wait()
self.log.info("Render job finished")
for post_render_data in after_render_instances:
self._post_render_processing(post_render_data, mark_in, mark_out)
def _fix_range_changes(
self, mark_in, mark_out, output_frame_start, output_frame_end
):
# Check Marks range and output range
output_range = output_frame_end - output_frame_start
marks_range = mark_out - mark_in
# Lower Mark Out if mark range is bigger than output
# - do not rendered not used frames
if output_range < marks_range:
new_mark_out = mark_out - (marks_range - output_range)
self.log.warning((
"Lowering render range to {} frames. Changed Mark Out {} -> {}"
).format(marks_range + 1, mark_out, new_mark_out))
# Assign new mark out to variable
mark_out = new_mark_out
# Lower output frame end so representation has right `frameEnd` value
elif output_range > marks_range:
new_output_frame_end = (
output_frame_end - (output_range - marks_range)
)
self.log.warning((
"Lowering representation range to {} frames."
" Changed frame end {} -> {}"
).format(output_range + 1, mark_out, new_output_frame_end))
output_frame_end = new_output_frame_end
return mark_out, output_frame_end
def _post_render_processing(self, post_render_data, mark_in, mark_out):
# Unpack values
instance = post_render_data["instance"]
output_filepaths_by_frame_idx = (
post_render_data["output_filepaths_by_frame_idx"]
)
is_layers_render = post_render_data["is_layers_render"]
output_dir = post_render_data["output_dir"]
layers = post_render_data["layers"]
output_frame_start = post_render_data["output_frame_start"]
output_frame_end = post_render_data["output_frame_end"]
# Trigger post processing of layers rendering
# - only few frames were rendered this will complete the sequence
# - multiple layers can be in single instance they must be composite
# over each other
if is_layers_render:
self._finish_layer_render(
layers,
post_render_data["extraction_data_by_layer_id"],
post_render_data["filepaths_by_layer_id"],
mark_in,
mark_out,
output_filepaths_by_frame_idx
)
# Create thumbnail
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
thumbnail_src_path = output_filepaths_by_frame_idx[mark_in]
self._create_thumbnail(thumbnail_src_path, thumbnail_filepath)
# Rename filepaths to final frames
repre_files = self._rename_output_files(
output_filepaths_by_frame_idx,
mark_in,
mark_out,
output_frame_start
)
# Fill tags and new families
family_lowered = instance.data["family"].lower()
tags = []
if family_lowered in ("review", "renderlayer"):
tags.append("review")
# Sequence of one frame
single_file = len(repre_files) == 1
if single_file:
repre_files = repre_files[0]
# Extension is hardcoded
# - changing extension would require change code
new_repre = {
"name": "png",
"ext": "png",
"files": repre_files,
"stagingDir": output_dir,
"tags": tags
}
if not single_file:
new_repre["frameStart"] = output_frame_start
new_repre["frameEnd"] = output_frame_end
self.log.debug("Creating new representation: {}".format(new_repre))
instance.data["representations"].append(new_repre)
if family_lowered in ("renderpass", "renderlayer"):
# Change family to render
instance.data["family"] = "render"
thumbnail_ext = os.path.splitext(thumbnail_filepath)[1]
# Create thumbnail representation
thumbnail_repre = {
"name": "thumbnail",
"ext": thumbnail_ext.replace(".", ""),
"outputName": "thumb",
"files": os.path.basename(thumbnail_filepath),
"stagingDir": output_dir,
"tags": ["thumbnail"]
}
instance.data["representations"].append(thumbnail_repre)
def _rename_output_files(
self, filepaths_by_frame, mark_in, mark_out, output_frame_start
):
new_filepaths_by_frame = rename_filepaths_by_frame_start(
filepaths_by_frame, mark_in, mark_out, output_frame_start
)
repre_filenames = []
for filepath in new_filepaths_by_frame.values():
repre_filenames.append(os.path.basename(filepath))
if mark_in < output_frame_start:
repre_filenames = list(reversed(repre_filenames))
return repre_filenames
def add_render_review_command(
self,
tvpaint_commands,
mark_in,
mark_out,
scene_bg_color,
job_root_template,
filename_template
):
""" Export images from TVPaint using `tv_savesequence` command.
Args:
output_dir (str): Directory where files will be stored.
mark_in (int): Starting frame index from which export will begin.
mark_out (int): On which frame index export will end.
scene_bg_color (list): Bg color set in scene. Result of george
script command `tv_background`.
"""
self.log.debug("Preparing data for rendering.")
bg_color = self._get_review_bg_color()
first_frame_filepath = "/".join([
job_root_template,
filename_template.format(frame=mark_in)
])
george_script_lines = [
# Change bg color to color from settings
"tv_background \"color\" {} {} {}".format(*bg_color),
"tv_SaveMode \"PNG\"",
"export_path = \"{}\"".format(
first_frame_filepath.replace("\\", "/")
),
"tv_savesequence '\"'export_path'\"' {} {}".format(
mark_in, mark_out
)
]
if scene_bg_color:
# Change bg color back to previous scene bg color
_scene_bg_color = copy.deepcopy(scene_bg_color)
bg_type = _scene_bg_color.pop(0)
orig_color_command = [
"tv_background",
"\"{}\"".format(bg_type)
]
orig_color_command.extend(_scene_bg_color)
george_script_lines.append(" ".join(orig_color_command))
tvpaint_commands.add_command(
ExecuteGeorgeScript(
george_script_lines,
root_dir_key=self.job_queue_root_key
)
)
def add_render_command(
self,
tvpaint_commands,
job_root_template,
staging_dir,
layers,
extraction_data_by_layer_id
):
""" Export images from TVPaint.
Args:
output_dir (str): Directory where files will be stored.
mark_in (int): Starting frame index from which export will begin.
mark_out (int): On which frame index export will end.
layers (list): List of layers to be exported.
Returns:
tuple: With 2 items first is list of filenames second is path to
thumbnail.
"""
# Map layers by position
layers_by_id = {
layer["layer_id"]: layer
for layer in layers
}
# Render layers
filepaths_by_layer_id = {}
for layer_id, render_data in extraction_data_by_layer_id.items():
layer = layers_by_id[layer_id]
frame_references = render_data["frame_references"]
filenames_by_frame_index = render_data["filenames_by_frame_index"]
filepaths_by_frame = {}
command_filepath_by_frame = {}
for frame_idx, ref_idx in frame_references.items():
# None reference is skipped because does not have source
if ref_idx is None:
filepaths_by_frame[frame_idx] = None
continue
filename = filenames_by_frame_index[frame_idx]
filepaths_by_frame[frame_idx] = os.path.join(
staging_dir, filename
)
if frame_idx == ref_idx:
command_filepath_by_frame[frame_idx] = "/".join(
[job_root_template, filename]
)
self._add_render_layer_command(
tvpaint_commands, layer, command_filepath_by_frame
)
filepaths_by_layer_id[layer_id] = filepaths_by_frame
return filepaths_by_layer_id
def _add_render_layer_command(
self, tvpaint_commands, layer, filepaths_by_frame
):
george_script_lines = [
# Set current layer by position
"tv_layergetid {}".format(layer["position"]),
"layer_id = result",
"tv_layerset layer_id",
"tv_SaveMode \"PNG\""
]
for frame_idx, filepath in filepaths_by_frame.items():
if filepath is None:
continue
# Go to frame
george_script_lines.append("tv_layerImage {}".format(frame_idx))
# Store image to output
george_script_lines.append(
"tv_saveimage \"{}\"".format(filepath.replace("\\", "/"))
)
tvpaint_commands.add_command(
ExecuteGeorgeScript(
george_script_lines,
root_dir_key=self.job_queue_root_key
)
)
def _finish_layer_render(
self,
layers,
extraction_data_by_layer_id,
filepaths_by_layer_id,
mark_in,
mark_out,
output_filepaths_by_frame_idx
):
# Fill frames between `frame_start_index` and `frame_end_index`
self.log.debug("Filling frames not rendered frames.")
for layer_id, render_data in extraction_data_by_layer_id.items():
frame_references = render_data["frame_references"]
filepaths_by_frame = filepaths_by_layer_id[layer_id]
fill_reference_frames(frame_references, filepaths_by_frame)
# Prepare final filepaths where compositing should store result
self.log.info("Started compositing of layer frames.")
composite_rendered_layers(
layers, filepaths_by_layer_id,
mark_in, mark_out,
output_filepaths_by_frame_idx
)
def _create_thumbnail(self, thumbnail_src_path, thumbnail_filepath):
if not os.path.exists(thumbnail_src_path):
return
source_img = Image.open(thumbnail_src_path)
# Composite background only on rgba images
# - just making sure
if source_img.mode.lower() == "rgba":
bg_color = self._get_review_bg_color()
self.log.debug("Adding thumbnail background color {}.".format(
" ".join([str(val) for val in bg_color])
))
bg_image = Image.new("RGBA", source_img.size, bg_color)
thumbnail_obj = Image.alpha_composite(bg_image, source_img)
thumbnail_obj.convert("RGB").save(thumbnail_filepath)
else:
self.log.info((
"Source for thumbnail has mode \"{}\" (Expected: RGBA)."
" Can't use thubmanail background color."
).format(source_img.mode))
source_img.save(thumbnail_filepath)
def _get_review_bg_color(self):
red = green = blue = 255
if self.review_bg:
if len(self.review_bg) == 4:
red, green, blue, _ = self.review_bg
elif len(self.review_bg) == 3:
red, green, blue = self.review_bg
return (red, green, blue)

View file

@ -1,31 +0,0 @@
# -*- coding: utf-8 -*-
"""Cleanup leftover files from publish."""
import os
import shutil
import pyblish.api
class CleanUpJobRoot(pyblish.api.ContextPlugin):
"""Cleans up the job root directory after a successful publish.
Remove all files in job root as all of them should be published.
"""
order = pyblish.api.IntegratorOrder + 1
label = "Clean Up Job Root"
optional = True
active = True
def process(self, context):
context_staging_dir = context.data.get("contextStagingDir")
if not context_staging_dir:
self.log.info("Key 'contextStagingDir' is empty.")
elif not os.path.exists(context_staging_dir):
self.log.info((
"Job root directory for this publish does not"
" exists anymore \"{}\"."
).format(context_staging_dir))
else:
self.log.info("Deleting job root with all files.")
shutil.rmtree(context_staging_dir)

View file

@ -1,36 +0,0 @@
import pyblish.api
class ValidateWorkfileData(pyblish.api.ContextPlugin):
"""Validate mark in and out are enabled and it's duration.
Mark In/Out does not have to match frameStart and frameEnd but duration is
important.
"""
label = "Validate Workfile Data"
order = pyblish.api.ValidatorOrder
targets = ["tvpaint_worker"]
def process(self, context):
# Data collected in `CollectContextEntities`
frame_start = context.data["frameStart"]
frame_end = context.data["frameEnd"]
handle_start = context.data["handleStart"]
handle_end = context.data["handleEnd"]
scene_data = context.data["sceneData"]
scene_mark_in = scene_data["sceneMarkIn"]
scene_mark_out = scene_data["sceneMarkOut"]
expected_range = (
(frame_end - frame_start + 1)
+ handle_start
+ handle_end
)
marks_range = scene_mark_out - scene_mark_in + 1
if expected_range != marks_range:
raise AssertionError((
"Wrong Mark In/Out range."
" Expected range is {} frames got {} frames"
).format(expected_range, marks_range))

View file

@ -1,206 +0,0 @@
import os
import time
import pyblish.api
import pyblish.util
from ayon_core.lib import Logger
from ayon_core.lib.applications import (
ApplicationManager,
LaunchTypes,
)
from ayon_core.pipeline import install_host
from ayon_core.hosts.webpublisher.api import WebpublisherHost
from .lib import (
get_batch_asset_task_info,
get_webpublish_conn,
start_webpublish_log,
publish_and_log,
fail_batch,
find_variant_key,
get_task_data,
get_timeout,
IN_PROGRESS_STATUS
)
def cli_publish(project_name, batch_path, user_email, targets):
"""Start headless publishing.
Used to publish rendered assets, workfiles etc via Webpublisher.
Eventually should be yanked out to Webpublisher cli.
Publish use json from passed paths argument.
Args:
project_name (str): project to publish (only single context is
expected per call of 'publish')
batch_path (str): Path batch folder. Contains subfolders with
resources (workfile, another subfolder 'renders' etc.)
user_email (string): email address for webpublisher - used to
find Ftrack user with same email
targets (list): Pyblish targets
(to choose validator for example)
Raises:
RuntimeError: When there is no path to process.
"""
if not batch_path:
raise RuntimeError("No publish paths specified")
log = Logger.get_logger("Webpublish")
log.info("Webpublish command")
# Register target and host
webpublisher_host = WebpublisherHost()
os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path
os.environ["AVALON_PROJECT"] = project_name
os.environ["AVALON_APP"] = webpublisher_host.name
os.environ["USER_EMAIL"] = user_email
os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib
if targets:
if isinstance(targets, str):
targets = [targets]
for target in targets:
pyblish.api.register_target(target)
install_host(webpublisher_host)
log.info("Running publish ...")
_, batch_id = os.path.split(batch_path)
dbcon = get_webpublish_conn()
_id = start_webpublish_log(dbcon, batch_id, user_email)
task_data = get_task_data(batch_path)
if not task_data["context"]:
msg = "Batch manifest must contain context data"
msg += "Create new batch and set context properly."
fail_batch(_id, dbcon, msg)
publish_and_log(dbcon, _id, log, batch_id=batch_id)
log.info("Publish finished.")
def cli_publish_from_app(
project_name, batch_path, host_name, user_email, targets
):
"""Opens installed variant of 'host' and run remote publish there.
Eventually should be yanked out to Webpublisher cli.
Currently implemented and tested for Photoshop where customer
wants to process uploaded .psd file and publish collected layers
from there. Triggered by Webpublisher.
Checks if no other batches are running (status =='in_progress). If
so, it sleeps for SLEEP (this is separate process),
waits for WAIT_FOR seconds altogether.
Requires installed host application on the machine.
Runs publish process as user would, in automatic fashion.
Args:
project_name (str): project to publish (only single context is
expected per call of publish
batch_path (str): Path batch folder. Contains subfolders with
resources (workfile, another subfolder 'renders' etc.)
host_name (str): 'photoshop'
user_email (string): email address for webpublisher - used to
find Ftrack user with same email
targets (list): Pyblish targets
(to choose validator for example)
"""
log = Logger.get_logger("PublishFromApp")
log.info("Webpublish photoshop command")
task_data = get_task_data(batch_path)
workfile_path = os.path.join(batch_path,
task_data["task"],
task_data["files"][0])
print("workfile_path {}".format(workfile_path))
batch_id = task_data["batch"]
dbcon = get_webpublish_conn()
# safer to start logging here, launch might be broken altogether
_id = start_webpublish_log(dbcon, batch_id, user_email)
batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS}))
if len(batches_in_progress) > 1:
running_batches = [str(batch["_id"])
for batch in batches_in_progress
if batch["_id"] != _id]
msg = "There are still running batches {}\n". \
format("\n".join(running_batches))
msg += "Ask admin to check them and reprocess current batch"
fail_batch(_id, dbcon, msg)
if not task_data["context"]:
msg = "Batch manifest must contain context data"
msg += "Create new batch and set context properly."
fail_batch(_id, dbcon, msg)
asset_name, task_name, task_type = get_batch_asset_task_info(
task_data["context"])
application_manager = ApplicationManager()
found_variant_key = find_variant_key(application_manager, host_name)
app_name = "{}/{}".format(host_name, found_variant_key)
data = {
"last_workfile_path": workfile_path,
"start_last_workfile": True,
"project_name": project_name,
"asset_name": asset_name,
"task_name": task_name,
"launch_type": LaunchTypes.automated,
}
launch_context = application_manager.create_launch_context(
app_name, **data)
launch_context.run_prelaunch_hooks()
# must have for proper launch of app
env = launch_context.env
print("env:: {}".format(env))
env["OPENPYPE_PUBLISH_DATA"] = batch_path
# must pass identifier to update log lines for a batch
env["BATCH_LOG_ID"] = str(_id)
env["HEADLESS_PUBLISH"] = 'true' # to use in app lib
env["USER_EMAIL"] = user_email
os.environ.update(env)
# Why is this here? Registered host in this process does not affect
# regitered host in launched process.
pyblish.api.register_host(host_name)
if targets:
if isinstance(targets, str):
targets = [targets]
current_targets = os.environ.get("PYBLISH_TARGETS", "").split(
os.pathsep)
for target in targets:
current_targets.append(target)
os.environ["PYBLISH_TARGETS"] = os.pathsep.join(
set(current_targets))
launched_app = application_manager.launch_with_context(launch_context)
timeout = get_timeout(project_name, host_name, task_type)
time_start = time.time()
while launched_app.poll() is None:
time.sleep(0.5)
if time.time() - time_start > timeout:
launched_app.terminate()
msg = "Timeout reached"
fail_batch(_id, dbcon, msg)

View file

@ -1,6 +0,0 @@
from .webserver import run_webserver
__all__ = (
"run_webserver",
)

View file

@ -1,429 +0,0 @@
"""Routes and etc. for webpublisher API."""
import os
import json
import datetime
import collections
import subprocess
from bson.objectid import ObjectId
from aiohttp.web_response import Response
from ayon_core.client import (
get_projects,
get_assets,
)
from ayon_core.lib import Logger
from ayon_core.settings import get_project_settings
from openpype_modules.webserver.base_routes import RestApiEndpoint
from openpype_modules.webpublisher import WebpublisherAddon
from openpype_modules.webpublisher.lib import (
get_webpublish_conn,
get_task_data,
ERROR_STATUS,
REPROCESS_STATUS
)
log = Logger.get_logger("WebpublishRoutes")
class ResourceRestApiEndpoint(RestApiEndpoint):
def __init__(self, resource):
self.resource = resource
super(ResourceRestApiEndpoint, self).__init__()
class WebpublishApiEndpoint(ResourceRestApiEndpoint):
@property
def dbcon(self):
return self.resource.dbcon
class JsonApiResource:
"""Resource for json manipulation.
All resources handling sending output to REST should inherit from
"""
@staticmethod
def json_dump_handler(value):
if isinstance(value, datetime.datetime):
return value.isoformat()
if isinstance(value, ObjectId):
return str(value)
if isinstance(value, set):
return list(value)
raise TypeError(value)
@classmethod
def encode(cls, data):
return json.dumps(
data,
indent=4,
default=cls.json_dump_handler
).encode("utf-8")
class RestApiResource(JsonApiResource):
"""Resource carrying needed info and Avalon DB connection for publish."""
def __init__(self, server_manager, executable, upload_dir,
studio_task_queue=None):
self.server_manager = server_manager
self.upload_dir = upload_dir
self.executable = executable
if studio_task_queue is None:
studio_task_queue = collections.deque().dequeu
self.studio_task_queue = studio_task_queue
class WebpublishRestApiResource(JsonApiResource):
"""Resource carrying OP DB connection for storing batch info into DB."""
def __init__(self):
self.dbcon = get_webpublish_conn()
class ProjectsEndpoint(ResourceRestApiEndpoint):
"""Returns list of dict with project info (id, name)."""
async def get(self) -> Response:
output = []
for project_doc in get_projects():
ret_val = {
"id": project_doc["_id"],
"name": project_doc["name"]
}
output.append(ret_val)
return Response(
status=200,
body=self.resource.encode(output),
content_type="application/json"
)
class HiearchyEndpoint(ResourceRestApiEndpoint):
"""Returns dictionary with context tree from assets."""
async def get(self, project_name) -> Response:
query_projection = {
"_id": 1,
"data.tasks": 1,
"data.visualParent": 1,
"data.entityType": 1,
"name": 1,
"type": 1,
}
asset_docs = get_assets(project_name, fields=query_projection.keys())
asset_docs_by_id = {
asset_doc["_id"]: asset_doc
for asset_doc in asset_docs
}
asset_docs_by_parent_id = collections.defaultdict(list)
for asset_doc in asset_docs_by_id.values():
parent_id = asset_doc["data"].get("visualParent")
asset_docs_by_parent_id[parent_id].append(asset_doc)
assets = collections.defaultdict(list)
for parent_id, children in asset_docs_by_parent_id.items():
for child in children:
node = assets.get(child["_id"])
if not node:
node = Node(child["_id"],
child["data"].get("entityType", "Folder"),
child["name"])
assets[child["_id"]] = node
tasks = child["data"].get("tasks", {})
for t_name, t_con in tasks.items():
task_node = TaskNode("task", t_name)
task_node["attributes"]["type"] = t_con.get("type")
task_node.parent = node
parent_node = assets.get(parent_id)
if not parent_node:
asset_doc = asset_docs_by_id.get(parent_id)
if asset_doc: # regular node
parent_node = Node(parent_id,
asset_doc["data"].get("entityType",
"Folder"),
asset_doc["name"])
else: # root
parent_node = Node(parent_id,
"project",
project_name)
assets[parent_id] = parent_node
node.parent = parent_node
roots = [x for x in assets.values() if x.parent is None]
return Response(
status=200,
body=self.resource.encode(roots[0]),
content_type="application/json"
)
class Node(dict):
"""Node element in context tree."""
def __init__(self, uid, node_type, name):
self._parent = None # pointer to parent Node
self["type"] = node_type
self["name"] = name
self['id'] = uid # keep reference to id #
self['children'] = [] # collection of pointers to child Nodes
@property
def parent(self):
return self._parent # simply return the object at the _parent pointer
@parent.setter
def parent(self, node):
self._parent = node
# add this node to parent's list of children
node['children'].append(self)
class TaskNode(Node):
"""Special node type only for Tasks."""
def __init__(self, node_type, name):
self._parent = None
self["type"] = node_type
self["name"] = name
self["attributes"] = {}
class BatchPublishEndpoint(WebpublishApiEndpoint):
"""Triggers headless publishing of batch."""
async def post(self, request) -> Response:
# Validate existence of openpype executable
openpype_app = self.resource.executable
if not openpype_app or not os.path.exists(openpype_app):
msg = "Non existent OpenPype executable {}".format(openpype_app)
raise RuntimeError(msg)
log.info("BatchPublishEndpoint called")
content = await request.json()
# Each filter have extensions which are checked on first task item
# - first filter with extensions that are on first task is used
# - filter defines command and can extend arguments dictionary
# This is used only if 'studio_processing' is enabled on batch
studio_processing_filters = [
# TVPaint filter
{
"extensions": [".tvpp"],
"command": "publish",
"arguments": {
"targets": ["tvpaint_worker", "webpublish"]
},
"add_to_queue": False
},
# Photoshop filter
{
"extensions": [".psd", ".psb"],
"command": "publishfromapp",
"arguments": {
# Command 'publishfromapp' requires --host argument
"host": "photoshop",
# Make sure targets are set to None for cases that default
# would change
# - targets argument is not used in 'publishfromapp'
"targets": ["automated", "webpublish"]
},
# does publish need to be handled by a queue, eg. only
# single process running concurrently?
"add_to_queue": True
}
]
batch_dir = os.path.join(self.resource.upload_dir, content["batch"])
# Default command and arguments
command = "publish"
add_args = {
# All commands need 'project' and 'user'
"project": content["project_name"],
"user": content["user"],
"targets": ["filespublish", "webpublish"]
}
add_to_queue = False
if content.get("studio_processing"):
log.info("Post processing called for {}".format(batch_dir))
task_data = get_task_data(batch_dir)
for process_filter in studio_processing_filters:
filter_extensions = process_filter.get("extensions") or []
for file_name in task_data["files"]:
file_ext = os.path.splitext(file_name)[-1].lower()
if file_ext in filter_extensions:
# Change command
command = process_filter["command"]
# Update arguments
add_args.update(
process_filter.get("arguments") or {}
)
add_to_queue = process_filter["add_to_queue"]
break
args = [
openpype_app,
"module",
WebpublisherAddon.name,
command,
batch_dir
]
for key, value in add_args.items():
# Skip key values where value is None
if value is None:
continue
arg_key = "--{}".format(key)
if not isinstance(value, (tuple, list)):
value = [value]
for item in value:
args += [arg_key, item]
log.info("args:: {}".format(args))
if add_to_queue:
log.debug("Adding to queue")
self.resource.studio_task_queue.append(args)
else:
subprocess.Popen(args)
return Response(
status=200,
content_type="application/json"
)
class TaskPublishEndpoint(WebpublishApiEndpoint):
"""Prepared endpoint triggered after each task - for future development."""
async def post(self, request) -> Response:
return Response(
status=200,
body=self.resource.encode([]),
content_type="application/json"
)
class BatchStatusEndpoint(WebpublishApiEndpoint):
"""Returns dict with info for batch_id.
Uses 'WebpublishRestApiResource'.
"""
async def get(self, batch_id) -> Response:
output = self.dbcon.find_one({"batch_id": batch_id})
if output:
status = 200
else:
output = {"msg": "Batch id {} not found".format(batch_id),
"status": "queued",
"progress": 0}
status = 404
body = self.resource.encode(output)
return Response(
status=status,
body=body,
content_type="application/json"
)
class UserReportEndpoint(WebpublishApiEndpoint):
"""Returns list of dict with batch info for user (email address).
Uses 'WebpublishRestApiResource'.
"""
async def get(self, user) -> Response:
output = list(self.dbcon.find({"user": user},
projection={"log": False}))
if output:
status = 200
else:
output = {"msg": "User {} not found".format(user)}
status = 404
body = self.resource.encode(output)
return Response(
status=status,
body=body,
content_type="application/json"
)
class ConfiguredExtensionsEndpoint(WebpublishApiEndpoint):
"""Returns dict of extensions which have mapping to family.
Returns:
{
"file_exts": [],
"sequence_exts": []
}
"""
async def get(self, project_name=None) -> Response:
sett = get_project_settings(project_name)
configured = {
"file_exts": set(),
"sequence_exts": set(),
# workfiles that could have "Studio Processing" hardcoded for now
"studio_exts": set(["psd", "psb", "tvpp", "tvp"])
}
collect_conf = sett["webpublisher"]["publish"]["CollectPublishedFiles"]
configs = collect_conf.get("task_type_to_family", [])
mappings = []
for _, conf_mappings in configs.items():
if isinstance(conf_mappings, dict):
conf_mappings = conf_mappings.values()
for conf_mapping in conf_mappings:
mappings.append(conf_mapping)
for mapping in mappings:
if mapping["is_sequence"]:
configured["sequence_exts"].update(mapping["extensions"])
else:
configured["file_exts"].update(mapping["extensions"])
return Response(
status=200,
body=self.resource.encode(dict(configured)),
content_type="application/json"
)
class BatchReprocessEndpoint(WebpublishApiEndpoint):
"""Marks latest 'batch_id' for reprocessing, returns 404 if not found.
Uses 'WebpublishRestApiResource'.
"""
async def post(self, batch_id) -> Response:
batches = self.dbcon.find({"batch_id": batch_id,
"status": ERROR_STATUS}).sort("_id", -1)
if batches:
self.dbcon.update_one(
{"_id": batches[0]["_id"]},
{"$set": {"status": REPROCESS_STATUS}}
)
output = [{"msg": "Batch id {} set to reprocess".format(batch_id)}]
status = 200
else:
output = [{"msg": "Batch id {} not found".format(batch_id)}]
status = 404
body = self.resource.encode(output)
return Response(
status=status,
body=body,
content_type="application/json"
)

View file

@ -1,182 +0,0 @@
import collections
import time
import os
from datetime import datetime
import requests
import json
import subprocess
from ayon_core.client import OpenPypeMongoConnection
from ayon_core.modules import ModulesManager
from ayon_core.lib import Logger
from openpype_modules.webpublisher.lib import (
ERROR_STATUS,
REPROCESS_STATUS,
SENT_REPROCESSING_STATUS
)
from .webpublish_routes import (
RestApiResource,
WebpublishRestApiResource,
HiearchyEndpoint,
ProjectsEndpoint,
ConfiguredExtensionsEndpoint,
BatchPublishEndpoint,
BatchReprocessEndpoint,
BatchStatusEndpoint,
TaskPublishEndpoint,
UserReportEndpoint
)
log = Logger.get_logger("webserver_gui")
def run_webserver(executable, upload_dir, host=None, port=None):
"""Runs webserver in command line, adds routes."""
if not host:
host = "localhost"
if not port:
port = 8079
manager = ModulesManager()
webserver_module = manager.modules_by_name["webserver"]
server_manager = webserver_module.create_new_server_manager(port, host)
webserver_url = server_manager.url
# queue for publishfromapp tasks
studio_task_queue = collections.deque()
resource = RestApiResource(server_manager,
upload_dir=upload_dir,
executable=executable,
studio_task_queue=studio_task_queue)
projects_endpoint = ProjectsEndpoint(resource)
server_manager.add_route(
"GET",
"/api/projects",
projects_endpoint.dispatch
)
hiearchy_endpoint = HiearchyEndpoint(resource)
server_manager.add_route(
"GET",
"/api/hierarchy/{project_name}",
hiearchy_endpoint.dispatch
)
configured_ext_endpoint = ConfiguredExtensionsEndpoint(resource)
server_manager.add_route(
"GET",
"/api/webpublish/configured_ext/{project_name}",
configured_ext_endpoint.dispatch
)
# triggers publish
webpublisher_task_publish_endpoint = BatchPublishEndpoint(resource)
server_manager.add_route(
"POST",
"/api/webpublish/batch",
webpublisher_task_publish_endpoint.dispatch
)
webpublisher_batch_publish_endpoint = TaskPublishEndpoint(resource)
server_manager.add_route(
"POST",
"/api/webpublish/task",
webpublisher_batch_publish_endpoint.dispatch
)
# reporting
webpublish_resource = WebpublishRestApiResource()
batch_status_endpoint = BatchStatusEndpoint(webpublish_resource)
server_manager.add_route(
"GET",
"/api/batch_status/{batch_id}",
batch_status_endpoint.dispatch
)
user_status_endpoint = UserReportEndpoint(webpublish_resource)
server_manager.add_route(
"GET",
"/api/publishes/{user}",
user_status_endpoint.dispatch
)
batch_reprocess_endpoint = BatchReprocessEndpoint(webpublish_resource)
server_manager.add_route(
"POST",
"/api/webpublish/reprocess/{batch_id}",
batch_reprocess_endpoint.dispatch
)
server_manager.start_server()
last_reprocessed = time.time()
while True:
if time.time() - last_reprocessed > 20:
reprocess_failed(upload_dir, webserver_url)
last_reprocessed = time.time()
if studio_task_queue:
args = studio_task_queue.popleft()
subprocess.call(args) # blocking call
time.sleep(1.0)
def reprocess_failed(upload_dir, webserver_url):
# log.info("check_reprocesable_records")
mongo_client = OpenPypeMongoConnection.get_mongo_client()
database_name = os.environ["OPENPYPE_DATABASE_NAME"]
dbcon = mongo_client[database_name]["webpublishes"]
results = dbcon.find({"status": REPROCESS_STATUS})
reprocessed_batches = set()
for batch in results:
if batch["batch_id"] in reprocessed_batches:
continue
batch_url = os.path.join(upload_dir,
batch["batch_id"],
"manifest.json")
log.info("batch:: {} {}".format(os.path.exists(batch_url), batch_url))
if not os.path.exists(batch_url):
msg = "Manifest {} not found".format(batch_url)
print(msg)
dbcon.update_one(
{"_id": batch["_id"]},
{"$set":
{
"finish_date": datetime.now(),
"status": ERROR_STATUS,
"progress": 100,
"log": batch.get("log") + msg
}}
)
continue
server_url = "{}/api/webpublish/batch".format(webserver_url)
with open(batch_url) as f:
data = json.loads(f.read())
dbcon.update_many(
{
"batch_id": batch["batch_id"],
"status": {"$in": [ERROR_STATUS, REPROCESS_STATUS]}
},
{
"$set": {
"finish_date": datetime.now(),
"status": SENT_REPROCESSING_STATUS,
"progress": 100
}
}
)
try:
r = requests.post(server_url, json=data)
log.info("response{}".format(r))
except Exception:
log.info("exception", exc_info=True)
reprocessed_batches.add(batch["batch_id"])