removed tvpaint addon

This commit is contained in:
Jakub Trllo 2024-06-26 18:45:39 +02:00
parent 10adfbab0e
commit f67845991f
70 changed files with 0 additions and 9310 deletions

View file

@ -1,15 +0,0 @@
from .version import __version__
from .addon import (
get_launch_script_path,
TVPaintAddon,
TVPAINT_ROOT_DIR,
)
__all__ = (
"__version__",
"get_launch_script_path",
"TVPaintAddon",
"TVPAINT_ROOT_DIR",
)

View file

@ -1,40 +0,0 @@
import os
from ayon_core.addon import AYONAddon, IHostAddon
from .version import __version__
TVPAINT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def get_launch_script_path():
return os.path.join(
TVPAINT_ROOT_DIR,
"api",
"launch_script.py"
)
class TVPaintAddon(AYONAddon, IHostAddon):
name = "tvpaint"
version = __version__
host_name = "tvpaint"
def add_implementation_envs(self, env, _app):
"""Modify environments to contain all required for implementation."""
defaults = {
"AYON_LOG_NO_COLORS": "1"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(TVPAINT_ROOT_DIR, "hooks")
]
def get_workfile_extensions(self):
return [".tvpp"]

View file

@ -1,11 +0,0 @@
from .communication_server import CommunicationWrapper
from .pipeline import (
TVPaintHost,
)
__all__ = (
"CommunicationWrapper",
"TVPaintHost",
)

View file

@ -1,925 +0,0 @@
import os
import json
import time
import subprocess
import collections
import asyncio
import logging
import socket
import platform
import filecmp
import tempfile
import threading
import shutil
from contextlib import closing
from aiohttp import web
from aiohttp_json_rpc import JsonRpc
from aiohttp_json_rpc.protocol import (
encode_request, encode_error, decode_msg, JsonRpcMsgTyp
)
from aiohttp_json_rpc.exceptions import RpcError
from ayon_core.lib import emit_event
from ayon_tvpaint.tvpaint_plugin import get_plugin_files_path
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class CommunicationWrapper:
# TODO add logs and exceptions
communicator = None
log = logging.getLogger("CommunicationWrapper")
@classmethod
def create_qt_communicator(cls, *args, **kwargs):
"""Create communicator for Artist usage."""
communicator = QtCommunicator(*args, **kwargs)
cls.set_communicator(communicator)
return communicator
@classmethod
def set_communicator(cls, communicator):
if not cls.communicator:
cls.communicator = communicator
else:
cls.log.warning("Communicator was set multiple times.")
@classmethod
def client(cls):
if not cls.communicator:
return None
return cls.communicator.client()
@classmethod
def execute_george(cls, george_script):
"""Execute passed goerge script in TVPaint."""
if not cls.communicator:
return
return cls.communicator.execute_george(george_script)
class WebSocketServer:
def __init__(self):
self.client = None
self.loop = asyncio.new_event_loop()
self.app = web.Application(loop=self.loop)
self.port = self.find_free_port()
self.websocket_thread = WebsocketServerThread(
self, self.port, loop=self.loop
)
@property
def server_is_running(self):
return self.websocket_thread.server_is_running
def add_route(self, *args, **kwargs):
self.app.router.add_route(*args, **kwargs)
@staticmethod
def find_free_port():
with closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
) as sock:
sock.bind(("", 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = sock.getsockname()[1]
return port
def start(self):
self.websocket_thread.start()
def stop(self):
try:
if self.websocket_thread.is_running:
log.debug("Stopping websocket server")
self.websocket_thread.is_running = False
self.websocket_thread.stop()
except Exception:
log.warning(
"Error has happened during Killing websocket server",
exc_info=True
)
class WebsocketServerThread(threading.Thread):
""" Listener for websocket rpc requests.
It would be probably better to "attach" this to main thread (as for
example Harmony needs to run something on main thread), but currently
it creates separate thread and separate asyncio event loop
"""
def __init__(self, module, port, loop):
super(WebsocketServerThread, self).__init__()
self.is_running = False
self.server_is_running = False
self.port = port
self.module = module
self.loop = loop
self.runner = None
self.site = None
self.tasks = []
def run(self):
self.is_running = True
try:
log.debug("Starting websocket server")
self.loop.run_until_complete(self.start_server())
log.info(
"Running Websocket server on URL:"
" \"ws://localhost:{}\"".format(self.port)
)
asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
self.server_is_running = True
self.loop.run_forever()
except Exception:
log.warning(
"Websocket Server service has failed", exc_info=True
)
finally:
self.server_is_running = False
# optional
self.loop.close()
self.is_running = False
log.info("Websocket server stopped")
async def start_server(self):
""" Starts runner and TCPsite """
self.runner = web.AppRunner(self.module.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, "localhost", self.port)
await self.site.start()
def stop(self):
"""Sets is_running flag to false, 'check_shutdown' shuts server down"""
self.is_running = False
async def check_shutdown(self):
""" Future that is running and checks if server should be running
periodically.
"""
while self.is_running:
while self.tasks:
task = self.tasks.pop(0)
log.debug("waiting for task {}".format(task))
await task
log.debug("returned value {}".format(task.result))
await asyncio.sleep(0.5)
log.debug("## Server shutdown started")
await self.site.stop()
log.debug("# Site stopped")
await self.runner.cleanup()
log.debug("# Server runner stopped")
tasks = [
task for task in asyncio.all_tasks()
if task is not asyncio.current_task()
]
list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
results = await asyncio.gather(*tasks, return_exceptions=True)
log.debug(f"Finished awaiting cancelled tasks, results: {results}...")
await self.loop.shutdown_asyncgens()
# to really make sure everything else has time to stop
await asyncio.sleep(0.07)
self.loop.stop()
class BaseTVPaintRpc(JsonRpc):
def __init__(self, communication_obj, route_name="", **kwargs):
super().__init__(**kwargs)
self.requests_ids = collections.defaultdict(lambda: 0)
self.waiting_requests = collections.defaultdict(list)
self.responses = collections.defaultdict(list)
self.route_name = route_name
self.communication_obj = communication_obj
async def _handle_rpc_msg(self, http_request, raw_msg):
# This is duplicated code from super but there is no way how to do it
# to be able handle server->client requests
host = http_request.host
if host in self.waiting_requests:
try:
_raw_message = raw_msg.data
msg = decode_msg(_raw_message)
except RpcError as error:
await self._ws_send_str(http_request, encode_error(error))
return
if msg.type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR):
msg_data = json.loads(_raw_message)
if msg_data.get("id") in self.waiting_requests[host]:
self.responses[host].append(msg_data)
return
return await super()._handle_rpc_msg(http_request, raw_msg)
def client_connected(self):
# TODO This is poor check. Add check it is client from TVPaint
if self.clients:
return True
return False
def send_notification(self, client, method, params=None):
if params is None:
params = []
asyncio.run_coroutine_threadsafe(
client.ws.send_str(encode_request(method, params=params)),
loop=self.loop
)
def send_request(self, client, method, params=None, timeout=0):
if params is None:
params = []
client_host = client.host
request_id = self.requests_ids[client_host]
self.requests_ids[client_host] += 1
self.waiting_requests[client_host].append(request_id)
log.debug("Sending request to client {} ({}, {}) id: {}".format(
client_host, method, params, request_id
))
future = asyncio.run_coroutine_threadsafe(
client.ws.send_str(encode_request(method, request_id, params)),
loop=self.loop
)
result = future.result()
not_found = object()
response = not_found
start = time.time()
while True:
if client.ws.closed:
return None
for _response in self.responses[client_host]:
_id = _response.get("id")
if _id == request_id:
response = _response
break
if response is not not_found:
break
if timeout > 0 and (time.time() - start) > timeout:
raise Exception("Timeout passed")
return
time.sleep(0.1)
if response is not_found:
raise Exception("Connection closed")
self.responses[client_host].remove(response)
error = response.get("error")
result = response.get("result")
if error:
raise Exception("Error happened: {}".format(error))
return result
class QtTVPaintRpc(BaseTVPaintRpc):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from ayon_core.tools.utils import host_tools
self.tools_helper = host_tools.HostToolsHelper()
route_name = self.route_name
# Register methods
self.add_methods(
(route_name, self.workfiles_tool),
(route_name, self.loader_tool),
(route_name, self.publish_tool),
(route_name, self.scene_inventory_tool),
(route_name, self.library_loader_tool),
(route_name, self.experimental_tools)
)
# Panel routes for tools
async def workfiles_tool(self):
log.info("Triggering Workfile tool")
item = MainThreadItem(self.tools_helper.show_workfiles)
self._execute_in_main_thread(item, wait=False)
return
async def loader_tool(self):
log.info("Triggering Loader tool")
item = MainThreadItem(self.tools_helper.show_loader)
self._execute_in_main_thread(item, wait=False)
return
async def publish_tool(self):
log.info("Triggering Publish tool")
item = MainThreadItem(self.tools_helper.show_publisher_tool)
self._execute_in_main_thread(item, wait=False)
return
async def scene_inventory_tool(self):
"""Open Scene Inventory tool.
Function can't confirm if tool was opened becauise one part of
SceneInventory initialization is calling websocket request to host but
host can't response because is waiting for response from this call.
"""
log.info("Triggering Scene inventory tool")
item = MainThreadItem(self.tools_helper.show_scene_inventory)
# Do not wait for result of callback
self._execute_in_main_thread(item, wait=False)
return
async def library_loader_tool(self):
log.info("Triggering Library loader tool")
item = MainThreadItem(self.tools_helper.show_library_loader)
self._execute_in_main_thread(item, wait=False)
return
async def experimental_tools(self):
log.info("Triggering Library loader tool")
item = MainThreadItem(self.tools_helper.show_experimental_tools_dialog)
self._execute_in_main_thread(item, wait=False)
return
async def _async_execute_in_main_thread(self, item, **kwargs):
await self.communication_obj.async_execute_in_main_thread(
item, **kwargs
)
def _execute_in_main_thread(self, item, **kwargs):
return self.communication_obj.execute_in_main_thread(item, **kwargs)
class MainThreadItem:
"""Structure to store information about callback in main thread.
Item should be used to execute callback in main thread which may be needed
for execution of Qt objects.
Item store callback (callable variable), arguments and keyword arguments
for the callback. Item hold information about it's process.
"""
not_set = object()
sleep_time = 0.1
def __init__(self, callback, *args, **kwargs):
self.done = False
self.exception = self.not_set
self.result = self.not_set
self.callback = callback
self.args = args
self.kwargs = kwargs
def execute(self):
"""Execute callback and store its result.
Method must be called from main thread. Item is marked as `done`
when callback execution finished. Store output of callback of exception
information when callback raises one.
"""
log.debug("Executing process in main thread")
if self.done:
log.warning("- item is already processed")
return
callback = self.callback
args = self.args
kwargs = self.kwargs
log.info("Running callback: {}".format(str(callback)))
try:
result = callback(*args, **kwargs)
self.result = result
except Exception as exc:
self.exception = exc
finally:
self.done = True
def wait(self):
"""Wait for result from main thread.
This method stops current thread until callback is executed.
Returns:
object: Output of callback. May be any type or object.
Raises:
Exception: Reraise any exception that happened during callback
execution.
"""
while not self.done:
time.sleep(self.sleep_time)
if self.exception is self.not_set:
return self.result
raise self.exception
async def async_wait(self):
"""Wait for result from main thread.
Returns:
object: Output of callback. May be any type or object.
Raises:
Exception: Reraise any exception that happened during callback
execution.
"""
while not self.done:
await asyncio.sleep(self.sleep_time)
if self.exception is self.not_set:
return self.result
raise self.exception
class BaseCommunicator:
def __init__(self):
self.process = None
self.websocket_server = None
self.websocket_rpc = None
self.exit_code = None
self._connected_client = None
@property
def server_is_running(self):
if self.websocket_server is None:
return False
return self.websocket_server.server_is_running
def _windows_file_process(self, src_dst_mapping, to_remove):
"""Windows specific file processing asking for admin permissions.
It is required to have administration permissions to modify plugin
files in TVPaint installation folder.
Method requires `pywin32` python module.
Args:
src_dst_mapping (list, tuple, set): Mapping of source file to
destination. Both must be full path. Each item must be iterable
of size 2 `(C:/src/file.dll, C:/dst/file.dll)`.
to_remove (list): Fullpath to files that should be removed.
"""
import pythoncom
from win32comext.shell import shell
# Create temp folder where plugin files are temporary copied
# - reason is that copy to TVPaint requires administartion permissions
# but admin may not have access to source folder
tmp_dir = os.path.normpath(
tempfile.mkdtemp(prefix="tvpaint_copy_")
)
# Copy source to temp folder and create new mapping
dst_folders = collections.defaultdict(list)
new_src_dst_mapping = []
for old_src, dst in src_dst_mapping:
new_src = os.path.join(tmp_dir, os.path.split(old_src)[1])
shutil.copy(old_src, new_src)
new_src_dst_mapping.append((new_src, dst))
for src, dst in new_src_dst_mapping:
src = os.path.normpath(src)
dst = os.path.normpath(dst)
dst_filename = os.path.basename(dst)
dst_folder_path = os.path.dirname(dst)
dst_folders[dst_folder_path].append((dst_filename, src))
# create an instance of IFileOperation
fo = pythoncom.CoCreateInstance(
shell.CLSID_FileOperation,
None,
pythoncom.CLSCTX_ALL,
shell.IID_IFileOperation
)
# Add delete command to file operation object
for filepath in to_remove:
item = shell.SHCreateItemFromParsingName(
filepath, None, shell.IID_IShellItem
)
fo.DeleteItem(item)
# here you can use SetOperationFlags, progress Sinks, etc.
for folder_path, items in dst_folders.items():
# create an instance of IShellItem for the target folder
folder_item = shell.SHCreateItemFromParsingName(
folder_path, None, shell.IID_IShellItem
)
for _dst_filename, source_file_path in items:
# create an instance of IShellItem for the source item
copy_item = shell.SHCreateItemFromParsingName(
source_file_path, None, shell.IID_IShellItem
)
# queue the copy operation
fo.CopyItem(copy_item, folder_item, _dst_filename, None)
# commit
fo.PerformOperations()
# Remove temp folder
shutil.rmtree(tmp_dir)
def _prepare_windows_plugin(self, launch_args):
"""Copy plugin to TVPaint plugins and set PATH to dependencies.
Check if plugin in TVPaint's plugins exist and match to plugin
version to current implementation version. Based on 64-bit or 32-bit
version of the plugin. Path to libraries required for plugin is added
to PATH variable.
"""
host_executable = launch_args[0]
executable_file = os.path.basename(host_executable)
if "64bit" in executable_file:
subfolder = "windows_x64"
elif "32bit" in executable_file:
subfolder = "windows_x86"
else:
raise ValueError(
"Can't determine if executable "
"leads to 32-bit or 64-bit TVPaint!"
)
plugin_files_path = get_plugin_files_path()
# Folder for right windows plugin files
source_plugins_dir = os.path.join(plugin_files_path, subfolder)
# Path to libraries (.dll) required for plugin library
# - additional libraries can be copied to TVPaint installation folder
# (next to executable) or added to PATH environment variable
additional_libs_folder = os.path.join(
source_plugins_dir,
"additional_libraries"
)
additional_libs_folder = additional_libs_folder.replace("\\", "/")
if (
os.path.exists(additional_libs_folder)
and additional_libs_folder not in os.environ["PATH"]
):
os.environ["PATH"] += (os.pathsep + additional_libs_folder)
# Path to TVPaint's plugins folder (where we want to add our plugin)
host_plugins_path = os.path.join(
os.path.dirname(host_executable),
"plugins"
)
# Files that must be copied to TVPaint's plugin folder
plugin_dir = os.path.join(source_plugins_dir, "plugin")
to_copy = []
to_remove = []
# Remove old plugin name
deprecated_filepath = os.path.join(
host_plugins_path, "AvalonPlugin.dll"
)
if os.path.exists(deprecated_filepath):
to_remove.append(deprecated_filepath)
for filename in os.listdir(plugin_dir):
src_full_path = os.path.join(plugin_dir, filename)
dst_full_path = os.path.join(host_plugins_path, filename)
if dst_full_path in to_remove:
to_remove.remove(dst_full_path)
if (
not os.path.exists(dst_full_path)
or not filecmp.cmp(src_full_path, dst_full_path)
):
to_copy.append((src_full_path, dst_full_path))
# Skip copy if everything is done
if not to_copy and not to_remove:
return
# Try to copy
try:
self._windows_file_process(to_copy, to_remove)
except Exception:
log.error("Plugin copy failed", exc_info=True)
# Validate copy was done
invalid_copy = []
for src, dst in to_copy:
if not os.path.exists(dst) or not filecmp.cmp(src, dst):
invalid_copy.append((src, dst))
# Validate delete was dones
invalid_remove = []
for filepath in to_remove:
if os.path.exists(filepath):
invalid_remove.append(filepath)
if not invalid_remove and not invalid_copy:
return
msg_parts = []
if invalid_remove:
msg_parts.append(
"Failed to remove files: {}".format(", ".join(invalid_remove))
)
if invalid_copy:
_invalid = [
"\"{}\" -> \"{}\"".format(src, dst)
for src, dst in invalid_copy
]
msg_parts.append(
"Failed to copy files: {}".format(", ".join(_invalid))
)
raise RuntimeError(" & ".join(msg_parts))
def _launch_tv_paint(self, launch_args):
flags = (
subprocess.DETACHED_PROCESS
| subprocess.CREATE_NEW_PROCESS_GROUP
)
env = os.environ.copy()
# Remove QuickTime from PATH on windows
# - quicktime overrides TVPaint's ffmpeg encode/decode which may
# cause issues on loading
if platform.system().lower() == "windows":
new_path = []
for path in env["PATH"].split(os.pathsep):
if path and "quicktime" not in path.lower():
new_path.append(path)
env["PATH"] = os.pathsep.join(new_path)
kwargs = {
"env": env,
"creationflags": flags
}
self.process = subprocess.Popen(launch_args, **kwargs)
def _create_routes(self):
self.websocket_rpc = BaseTVPaintRpc(
self, loop=self.websocket_server.loop
)
self.websocket_server.add_route(
"*", "/", self.websocket_rpc.handle_request
)
def _start_webserver(self):
self.websocket_server.start()
# Make sure RPC is using same loop as websocket server
while not self.websocket_server.server_is_running:
time.sleep(0.1)
def _stop_webserver(self):
self.websocket_server.stop()
def _exit(self, exit_code=None):
self._stop_webserver()
if exit_code is not None:
self.exit_code = exit_code
if self.exit_code is None:
self.exit_code = 0
def stop(self):
"""Stop communication and currently running python process."""
log.info("Stopping communication")
self._exit()
def launch(self, launch_args):
"""Prepare all required data and launch host.
First is prepared websocket server as communication point for host,
when server is ready to use host is launched as subprocess.
"""
if platform.system().lower() == "windows":
self._prepare_windows_plugin(launch_args)
# Launch TVPaint and the websocket server.
log.info("Launching TVPaint")
self.websocket_server = WebSocketServer()
self._create_routes()
os.environ["WEBSOCKET_URL"] = "ws://localhost:{}".format(
self.websocket_server.port
)
log.info("Added request handler for url: {}".format(
os.environ["WEBSOCKET_URL"]
))
self._start_webserver()
# Start TVPaint when server is running
self._launch_tv_paint(launch_args)
log.info("Waiting for client connection")
while True:
if self.process.poll() is not None:
log.debug("Host process is not alive. Exiting")
self._exit(1)
return
if self.websocket_rpc.client_connected():
log.info("Client has connected")
break
time.sleep(0.5)
self._on_client_connect()
emit_event("application.launched")
def _on_client_connect(self):
self._initial_textfile_write()
def _initial_textfile_write(self):
"""Show popup about Write to file at start of TVPaint."""
tmp_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
tmp_file.close()
tmp_filepath = tmp_file.name.replace("\\", "/")
george_script = (
"tv_writetextfile \"strict\" \"append\" \"{}\" \"empty\""
).format(tmp_filepath)
result = CommunicationWrapper.execute_george(george_script)
# Remote the file
os.remove(tmp_filepath)
if result is None:
log.warning(
"Host was probably closed before plugin was initialized."
)
elif result.lower() == "forbidden":
log.warning("User didn't confirm saving files.")
def _client(self):
if not self.websocket_rpc:
log.warning("Communicator's server did not start yet.")
return None
for client in self.websocket_rpc.clients:
if not client.ws.closed:
return client
log.warning("Client is not yet connected to Communicator.")
return None
def client(self):
if not self._connected_client or self._connected_client.ws.closed:
self._connected_client = self._client()
return self._connected_client
def send_request(self, method, params=None):
client = self.client()
if not client:
return
return self.websocket_rpc.send_request(
client, method, params
)
def send_notification(self, method, params=None):
client = self.client()
if not client:
return
self.websocket_rpc.send_notification(
client, method, params
)
def execute_george(self, george_script):
"""Execute passed goerge script in TVPaint."""
return self.send_request(
"execute_george", [george_script]
)
def execute_george_through_file(self, george_script):
"""Execute george script with temp file.
Allows to execute multiline george script without stopping websocket
client.
On windows make sure script does not contain paths with backwards
slashes in paths, TVPaint won't execute properly in that case.
Args:
george_script (str): George script to execute. May be multilined.
"""
temporary_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".grg", delete=False
)
temporary_file.write(george_script)
temporary_file.close()
temp_file_path = temporary_file.name.replace("\\", "/")
self.execute_george("tv_runscript {}".format(temp_file_path))
os.remove(temp_file_path)
class QtCommunicator(BaseCommunicator):
label = os.getenv("AYON_MENU_LABEL") or "AYON"
title = "{} Tools".format(label)
menu_definitions = {
"title": title,
"menu_items": [
{
"callback": "workfiles_tool",
"label": "Workfiles",
"help": "Open workfiles tool"
}, {
"callback": "loader_tool",
"label": "Load",
"help": "Open loader tool"
}, {
"callback": "scene_inventory_tool",
"label": "Scene inventory",
"help": "Open scene inventory tool"
}, {
"callback": "publish_tool",
"label": "Publish",
"help": "Open publisher"
}, {
"callback": "library_loader_tool",
"label": "Library",
"help": "Open library loader tool"
}, {
"callback": "experimental_tools",
"label": "Experimental tools",
"help": "Open experimental tools dialog"
}
]
}
def __init__(self, qt_app):
super().__init__()
self.callback_queue = collections.deque()
self.qt_app = qt_app
def _create_routes(self):
self.websocket_rpc = QtTVPaintRpc(
self, loop=self.websocket_server.loop
)
self.websocket_server.add_route(
"*", "/", self.websocket_rpc.handle_request
)
def execute_in_main_thread(self, main_thread_item, wait=True):
"""Add `MainThreadItem` to callback queue and wait for result."""
self.callback_queue.append(main_thread_item)
if wait:
return main_thread_item.wait()
return
async def async_execute_in_main_thread(self, main_thread_item, wait=True):
"""Add `MainThreadItem` to callback queue and wait for result."""
self.callback_queue.append(main_thread_item)
if wait:
return await main_thread_item.async_wait()
def main_thread_listen(self):
"""Get last `MainThreadItem` from queue.
Must be called from main thread.
Method checks if host process is still running as it may cause
issues if not.
"""
# check if host still running
if self.process.poll() is not None:
self._exit()
return None
if self.callback_queue:
return self.callback_queue.popleft()
return None
def _on_client_connect(self):
super()._on_client_connect()
self._build_menu()
def _build_menu(self):
self.send_request(
"define_menu", [self.menu_definitions]
)
def _exit(self, *args, **kwargs):
super()._exit(*args, **kwargs)
emit_event("application.exit")
self.qt_app.exit(self.exit_code)

View file

@ -1,85 +0,0 @@
import os
import sys
import signal
import traceback
import ctypes
import platform
import logging
from qtpy import QtWidgets, QtCore, QtGui
from ayon_core import style
from ayon_core.pipeline import install_host
from ayon_tvpaint.api import (
TVPaintHost,
CommunicationWrapper,
)
log = logging.getLogger(__name__)
def safe_excepthook(*args):
traceback.print_exception(*args)
def main(launch_args):
# Be sure server won't crash at any moment but just print traceback
sys.excepthook = safe_excepthook
# Create QtApplication for tools
# - QApplicaiton is also main thread/event loop of the server
qt_app = QtWidgets.QApplication([])
tvpaint_host = TVPaintHost()
# Execute pipeline installation
install_host(tvpaint_host)
# Create Communicator object and trigger launch
# - this must be done before anything is processed
communicator = CommunicationWrapper.create_qt_communicator(qt_app)
communicator.launch(launch_args)
def process_in_main_thread():
"""Execution of `MainThreadItem`."""
item = communicator.main_thread_listen()
if item:
item.execute()
timer = QtCore.QTimer()
timer.setInterval(100)
timer.timeout.connect(process_in_main_thread)
timer.start()
# Register terminal signal handler
def signal_handler(*_args):
print("You pressed Ctrl+C. Process ended.")
communicator.stop()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
qt_app.setQuitOnLastWindowClosed(False)
qt_app.setStyleSheet(style.load_stylesheet())
# Load avalon icon
icon_path = style.app_icon_path()
if icon_path:
icon = QtGui.QIcon(icon_path)
qt_app.setWindowIcon(icon)
# Set application name to be able show application icon in task bar
if platform.system().lower() == "windows":
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(
u"WebsocketServer"
)
# Run Qt application event processing
sys.exit(qt_app.exec_())
if __name__ == "__main__":
args = list(sys.argv)
if os.path.abspath(__file__) == os.path.normpath(args[0]):
# Pop path to script
args.pop(0)
main(args)

View file

@ -1,542 +0,0 @@
import os
import logging
import tempfile
from .communication_server import CommunicationWrapper
log = logging.getLogger(__name__)
def execute_george(george_script, communicator=None):
if not communicator:
communicator = CommunicationWrapper.communicator
return communicator.execute_george(george_script)
def execute_george_through_file(george_script, communicator=None):
"""Execute george script with temp file.
Allows to execute multiline george script without stopping websocket
client.
On windows make sure script does not contain paths with backwards
slashes in paths, TVPaint won't execute properly in that case.
Args:
george_script (str): George script to execute. May be multilined.
"""
if not communicator:
communicator = CommunicationWrapper.communicator
return communicator.execute_george_through_file(george_script)
def parse_layers_data(data):
"""Parse layers data loaded in 'get_layers_data'."""
layers = []
layers_raw = data.split("\n")
for layer_raw in layers_raw:
layer_raw = layer_raw.strip()
if not layer_raw:
continue
(
layer_id, group_id, visible, position, opacity, name,
layer_type,
frame_start, frame_end, prelighttable, postlighttable,
selected, editable, sencil_state, is_current
) = layer_raw.split("|")
layer = {
"layer_id": int(layer_id),
"group_id": int(group_id),
"visible": visible == "ON",
"position": int(position),
# Opacity from 'tv_layerinfo' is always set to '0' so it's unusable
# "opacity": int(opacity),
"name": name,
"type": layer_type,
"frame_start": int(frame_start),
"frame_end": int(frame_end),
"prelighttable": prelighttable == "1",
"postlighttable": postlighttable == "1",
"selected": selected == "1",
"editable": editable == "1",
"sencil_state": sencil_state,
"is_current": is_current == "1"
}
layers.append(layer)
return layers
def get_layers_data_george_script(output_filepath, layer_ids=None):
"""Prepare george script which will collect all layers from workfile."""
output_filepath = output_filepath.replace("\\", "/")
george_script_lines = [
# Variable containing full path to output file
"output_path = \"{}\"".format(output_filepath),
# Get Current Layer ID
"tv_LayerCurrentID",
"current_layer_id = result"
]
# Script part for getting and storing layer information to temp
layer_data_getter = (
# Get information about layer's group
"tv_layercolor \"get\" layer_id",
"group_id = result",
"tv_LayerInfo layer_id",
(
"PARSE result visible position opacity name"
" type startFrame endFrame prelighttable postlighttable"
" selected editable sencilState"
),
# Check if layer ID match `tv_LayerCurrentID`
"is_current=0",
"IF CMP(current_layer_id, layer_id)==1",
# - mark layer as selected if layer id match to current layer id
"is_current=1",
"selected=1",
"END",
# Prepare line with data separated by "|"
(
"line = layer_id'|'group_id'|'visible'|'position'|'opacity'|'"
"name'|'type'|'startFrame'|'endFrame'|'prelighttable'|'"
"postlighttable'|'selected'|'editable'|'sencilState'|'is_current"
),
# Write data to output file
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line",
)
# Collect data for all layers if layers are not specified
if layer_ids is None:
george_script_lines.extend((
# Layer loop variables
"loop = 1",
"idx = 0",
# Layers loop
"WHILE loop",
"tv_LayerGetID idx",
"layer_id = result",
"idx = idx + 1",
# Stop loop if layer_id is "NONE"
"IF CMP(layer_id, \"NONE\")==1",
"loop = 0",
"ELSE",
*layer_data_getter,
"END",
"END"
))
else:
for layer_id in layer_ids:
george_script_lines.append("layer_id = {}".format(layer_id))
george_script_lines.extend(layer_data_getter)
return "\n".join(george_script_lines)
def layers_data(layer_ids=None, communicator=None):
"""Backwards compatible function of 'get_layers_data'."""
return get_layers_data(layer_ids, communicator)
def get_layers_data(layer_ids=None, communicator=None):
"""Collect all layers information from currently opened workfile."""
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
output_file.close()
if layer_ids is not None and isinstance(layer_ids, int):
layer_ids = [layer_ids]
output_filepath = output_file.name
george_script = get_layers_data_george_script(output_filepath, layer_ids)
execute_george_through_file(george_script, communicator)
with open(output_filepath, "r") as stream:
data = stream.read()
output = parse_layers_data(data)
os.remove(output_filepath)
return output
def parse_group_data(data):
"""Parse group data collected in 'get_groups_data'."""
output = []
groups_raw = data.split("\n")
for group_raw in groups_raw:
group_raw = group_raw.strip()
if not group_raw:
continue
parts = group_raw.split("|")
# Check for length and concatenate 2 last items until length match
# - this happens if name contain spaces
while len(parts) > 6:
last_item = parts.pop(-1)
parts[-1] = "|".join([parts[-1], last_item])
clip_id, group_id, red, green, blue, name = parts
group = {
"group_id": int(group_id),
"name": name,
"clip_id": int(clip_id),
"red": int(red),
"green": int(green),
"blue": int(blue),
}
output.append(group)
return output
def groups_data(communicator=None):
"""Backwards compatible function of 'get_groups_data'."""
return get_groups_data(communicator)
def get_groups_data(communicator=None):
"""Information about groups from current workfile."""
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
output_file.close()
output_filepath = output_file.name.replace("\\", "/")
george_script_lines = (
# Variable containing full path to output file
"output_path = \"{}\"".format(output_filepath),
"empty = 0",
# Loop over 26 groups which is ATM maximum possible (in 11.7)
# - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880
"FOR idx = 1 TO 26",
# Receive information about groups
"tv_layercolor \"getcolor\" 0 idx",
"PARSE result clip_id group_index c_red c_green c_blue group_name",
# Create and add line to output file
"line = clip_id'|'group_index'|'c_red'|'c_green'|'c_blue'|'group_name",
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line",
"END",
)
george_script = "\n".join(george_script_lines)
execute_george_through_file(george_script, communicator)
with open(output_filepath, "r") as stream:
data = stream.read()
output = parse_group_data(data)
os.remove(output_filepath)
return output
def get_layers_pre_post_behavior(layer_ids, communicator=None):
"""Collect data about pre and post behavior of layer ids.
Pre and Post behaviors is enumerator of possible values:
- "none"
- "repeat"
- "pingpong"
- "hold"
Example output:
```json
{
0: {
"pre": "none",
"post": "repeat"
}
}
```
Returns:
dict: Key is layer id value is dictionary with "pre" and "post" keys.
"""
# Skip if is empty
if not layer_ids:
return {}
# Auto convert to list
if not isinstance(layer_ids, (list, set, tuple)):
layer_ids = [layer_ids]
# Prepare temp file
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
output_file.close()
output_filepath = output_file.name.replace("\\", "/")
george_script_lines = [
# Variable containing full path to output file
"output_path = \"{}\"".format(output_filepath),
]
for layer_id in layer_ids:
george_script_lines.extend([
"layer_id = {}".format(layer_id),
"tv_layerprebehavior layer_id",
"pre_beh = result",
"tv_layerpostbehavior layer_id",
"post_beh = result",
"line = layer_id'|'pre_beh'|'post_beh",
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line"
])
george_script = "\n".join(george_script_lines)
execute_george_through_file(george_script, communicator)
# Read data
with open(output_filepath, "r") as stream:
data = stream.read()
# Remove temp file
os.remove(output_filepath)
# Parse data
output = {}
raw_lines = data.split("\n")
for raw_line in raw_lines:
line = raw_line.strip()
if not line:
continue
parts = line.split("|")
if len(parts) != 3:
continue
layer_id, pre_beh, post_beh = parts
output[int(layer_id)] = {
"pre": pre_beh.lower(),
"post": post_beh.lower()
}
return output
def get_layers_exposure_frames(layer_ids, layers_data=None, communicator=None):
"""Get exposure frames.
Easily said returns frames where keyframes are. Recognized with george
function `tv_exposureinfo` returning "Head".
Args:
layer_ids (list): Ids of a layers for which exposure frames should
look for.
layers_data (list): Precollected layers data. If are not passed then
'get_layers_data' is used.
communicator (BaseCommunicator): Communicator used for communication
with TVPaint.
Returns:
dict: Frames where exposure is set to "Head" by layer id.
"""
if layers_data is None:
layers_data = get_layers_data(layer_ids)
_layers_by_id = {
layer["layer_id"]: layer
for layer in layers_data
}
layers_by_id = {
layer_id: _layers_by_id.get(layer_id)
for layer_id in layer_ids
}
tmp_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
tmp_file.close()
tmp_output_path = tmp_file.name.replace("\\", "/")
george_script_lines = [
"output_path = \"{}\"".format(tmp_output_path)
]
output = {}
layer_id_mapping = {}
for layer_id, layer_data in layers_by_id.items():
layer_id_mapping[str(layer_id)] = layer_id
output[layer_id] = []
if not layer_data:
continue
first_frame = layer_data["frame_start"]
last_frame = layer_data["frame_end"]
george_script_lines.extend([
"line = \"\"",
"layer_id = {}".format(layer_id),
"line = line''layer_id",
"tv_layerset layer_id",
"frame = {}".format(first_frame),
"WHILE (frame <= {})".format(last_frame),
"tv_exposureinfo frame",
"exposure = result",
"IF (CMP(exposure, \"Head\") == 1)",
"line = line'|'frame",
"END",
"frame = frame + 1",
"END",
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line"
])
execute_george_through_file("\n".join(george_script_lines), communicator)
with open(tmp_output_path, "r") as stream:
data = stream.read()
os.remove(tmp_output_path)
lines = []
for line in data.split("\n"):
line = line.strip()
if line:
lines.append(line)
for line in lines:
line_items = list(line.split("|"))
layer_id = line_items.pop(0)
_layer_id = layer_id_mapping[layer_id]
output[_layer_id] = [int(frame) for frame in line_items]
return output
def get_exposure_frames(
layer_id, first_frame=None, last_frame=None, communicator=None
):
"""Get exposure frames.
Easily said returns frames where keyframes are. Recognized with george
function `tv_exposureinfo` returning "Head".
Args:
layer_id (int): Id of a layer for which exposure frames should
look for.
first_frame (int): From which frame will look for exposure frames.
Used layers first frame if not entered.
last_frame (int): Last frame where will look for exposure frames.
Used layers last frame if not entered.
Returns:
list: Frames where exposure is set to "Head".
"""
if first_frame is None or last_frame is None:
layer = layers_data(layer_id)[0]
if first_frame is None:
first_frame = layer["frame_start"]
if last_frame is None:
last_frame = layer["frame_end"]
tmp_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
tmp_file.close()
tmp_output_path = tmp_file.name.replace("\\", "/")
george_script_lines = [
"tv_layerset {}".format(layer_id),
"output_path = \"{}\"".format(tmp_output_path),
"output = \"\"",
"frame = {}".format(first_frame),
"WHILE (frame <= {})".format(last_frame),
"tv_exposureinfo frame",
"exposure = result",
"IF (CMP(exposure, \"Head\") == 1)",
"IF (CMP(output, \"\") == 1)",
"output = output''frame",
"ELSE",
"output = output'|'frame",
"END",
"END",
"frame = frame + 1",
"END",
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' output"
]
execute_george_through_file("\n".join(george_script_lines), communicator)
with open(tmp_output_path, "r") as stream:
data = stream.read()
os.remove(tmp_output_path)
lines = []
for line in data.split("\n"):
line = line.strip()
if line:
lines.append(line)
exposure_frames = []
for line in lines:
for frame in line.split("|"):
exposure_frames.append(int(frame))
return exposure_frames
def get_scene_data(communicator=None):
"""Scene data of currently opened scene.
Result contains resolution, pixel aspect, fps mark in/out with states,
frame start and background color.
Returns:
dict: Scene data collected in many ways.
"""
workfile_info = execute_george("tv_projectinfo", communicator)
workfile_info_parts = workfile_info.split(" ")
# Project frame start - not used
workfile_info_parts.pop(-1)
field_order = workfile_info_parts.pop(-1)
frame_rate = float(workfile_info_parts.pop(-1))
pixel_apsect = float(workfile_info_parts.pop(-1))
height = int(workfile_info_parts.pop(-1))
width = int(workfile_info_parts.pop(-1))
# Marks return as "{frame - 1} {state} ", example "0 set".
result = execute_george("tv_markin", communicator)
mark_in_frame, mark_in_state, _ = result.split(" ")
result = execute_george("tv_markout", communicator)
mark_out_frame, mark_out_state, _ = result.split(" ")
start_frame = execute_george("tv_startframe", communicator)
return {
"width": width,
"height": height,
"pixel_aspect": pixel_apsect,
"fps": frame_rate,
"field_order": field_order,
"mark_in": int(mark_in_frame),
"mark_in_state": mark_in_state,
"mark_in_set": mark_in_state == "set",
"mark_out": int(mark_out_frame),
"mark_out_state": mark_out_state,
"mark_out_set": mark_out_state == "set",
"start_frame": int(start_frame),
"bg_color": get_scene_bg_color(communicator)
}
def get_scene_bg_color(communicator=None):
"""Background color set on scene.
Is important for review exporting where scene bg color is used as
background.
"""
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
output_file.close()
output_filepath = output_file.name.replace("\\", "/")
george_script_lines = [
# Variable containing full path to output file
"output_path = \"{}\"".format(output_filepath),
"tv_background",
"bg_color = result",
# Write data to output file
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' bg_color"
]
george_script = "\n".join(george_script_lines)
execute_george_through_file(george_script, communicator)
with open(output_filepath, "r") as stream:
data = stream.read()
os.remove(output_filepath)
data = data.strip()
if not data:
return None
return data.split(" ")

View file

@ -1,518 +0,0 @@
import os
import json
import tempfile
import logging
import requests
import ayon_api
import pyblish.api
from ayon_tvpaint import TVPAINT_ROOT_DIR
from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
from ayon_core.settings import get_current_project_settings
from ayon_core.lib import register_event_callback
from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from ayon_core.pipeline.context_tools import get_global_context
from .lib import (
execute_george,
execute_george_through_file
)
log = logging.getLogger(__name__)
METADATA_SECTION = "avalon"
SECTION_NAME_CONTEXT = "context"
SECTION_NAME_CREATE_CONTEXT = "create_context"
SECTION_NAME_INSTANCES = "instances"
SECTION_NAME_CONTAINERS = "containers"
# Maximum length of metadata chunk string
# TODO find out the max (500 is safe enough)
TVPAINT_CHUNK_LENGTH = 500
"""TVPaint's Metadata
Metadata are stored to TVPaint's workfile.
Workfile works similar to .ini file but has few limitation. Most important
limitation is that value under key has limited length. Due to this limitation
each metadata section/key stores number of "subkeys" that are related to
the section.
Example:
Metadata key `"instances"` may have stored value "2". In that case it is
expected that there are also keys `["instances0", "instances1"]`.
Workfile data looks like:
```
[avalon]
instances0=[{{__dq__}id{__dq__}: {__dq__}ayon.create.instance{__dq__...
instances1=...more data...
instances=2
```
"""
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "tvpaint"
def install(self):
"""Install TVPaint-specific functionality."""
log.info("AYON - Installing TVPaint integration")
# Create workdir folder if does not exist yet
workdir = os.getenv("AYON_WORKDIR")
if not os.path.exists(workdir):
os.makedirs(workdir)
plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins")
publish_dir = os.path.join(plugins_dir, "publish")
load_dir = os.path.join(plugins_dir, "load")
create_dir = os.path.join(plugins_dir, "create")
pyblish.api.register_host("tvpaint")
pyblish.api.register_plugin_path(publish_dir)
register_loader_plugin_path(load_dir)
register_creator_plugin_path(create_dir)
register_event_callback("application.launched", self.initial_launch)
register_event_callback("application.exit", self.application_exit)
def get_current_project_name(self):
"""
Returns:
Union[str, None]: Current project name.
"""
return self.get_current_context().get("project_name")
def get_current_folder_path(self):
"""
Returns:
Union[str, None]: Current folder path.
"""
return self.get_current_context().get("folder_path")
def get_current_task_name(self):
"""
Returns:
Union[str, None]: Current task name.
"""
return self.get_current_context().get("task_name")
def get_current_context(self):
context = get_current_workfile_context()
if not context:
return get_global_context()
if "project_name" in context:
if "asset_name" in context:
context["folder_path"] = context["asset_name"]
return context
# This is legacy way how context was stored
return {
"project_name": context.get("project"),
"folder_path": context.get("asset"),
"task_name": context.get("task")
}
# --- Create ---
def get_context_data(self):
return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {})
def update_context_data(self, data, changes):
return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data)
def list_instances(self):
"""List all created instances from current workfile."""
return list_instances()
def write_instances(self, data):
return write_instances(data)
# --- Workfile ---
def open_workfile(self, filepath):
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath.replace("\\", "/")
)
return execute_george_through_file(george_script)
def save_workfile(self, filepath=None):
if not filepath:
filepath = self.get_current_workfile()
context = get_global_context()
save_current_workfile_context(context)
# Execute george script to save workfile.
george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/"))
return execute_george(george_script)
def work_root(self, session):
return session["AYON_WORKDIR"]
def get_current_workfile(self):
return execute_george("tv_GetProjectName")
def workfile_has_unsaved_changes(self):
return None
def get_workfile_extensions(self):
return [".tvpp"]
# --- Load ---
def get_containers(self):
return get_containers()
def initial_launch(self):
# Setup project settings if its the template that's launched.
# TODO also check for template creation when it's possible to define
# templates
last_workfile = os.environ.get("AYON_LAST_WORKFILE")
if not last_workfile or os.path.exists(last_workfile):
return
log.info("Setting up project...")
global_context = get_global_context()
project_name = global_context.get("project_name")
folder_path = global_context.get("folder_path")
if not project_name or not folder_path:
return
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
set_context_settings(project_name, folder_entity)
def application_exit(self):
"""Logic related to TimerManager.
Todo:
This should be handled out of TVPaint integration logic.
"""
data = get_current_project_settings()
stop_timer = data["tvpaint"]["stop_timer_on_application_exit"]
if not stop_timer:
return
# Stop application timer.
webserver_url = os.environ.get("AYON_WEBSERVER_URL")
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
requests.post(rest_api_url)
def containerise(
name, namespace, members, context, loader, current_containers=None
):
"""Add new container to metadata.
Args:
name (str): Container name.
namespace (str): Container namespace.
members (list): List of members that were loaded and belongs
to the container (layer names).
current_containers (list): Preloaded containers. Should be used only
on update/switch when containers were modified during the process.
Returns:
dict: Container data stored to workfile metadata.
"""
container_data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"members": members,
"name": name,
"namespace": namespace,
"loader": str(loader),
"representation": context["representation"]["id"]
}
if current_containers is None:
current_containers = get_containers()
# Add container to containers list
current_containers.append(container_data)
# Store data to metadata
write_workfile_metadata(SECTION_NAME_CONTAINERS, current_containers)
return container_data
def split_metadata_string(text, chunk_length=None):
"""Split string by length.
Split text to chunks by entered length.
Example:
```python
text = "ABCDEFGHIJKLM"
result = split_metadata_string(text, 3)
print(result)
>>> ['ABC', 'DEF', 'GHI', 'JKL']
```
Args:
text (str): Text that will be split into chunks.
chunk_length (int): Single chunk size. Default chunk_length is
set to global variable `TVPAINT_CHUNK_LENGTH`.
Returns:
list: List of strings with at least one item.
"""
if chunk_length is None:
chunk_length = TVPAINT_CHUNK_LENGTH
chunks = []
for idx in range(chunk_length, len(text) + chunk_length, chunk_length):
start_idx = idx - chunk_length
chunks.append(text[start_idx:idx])
return chunks
def get_workfile_metadata_string_for_keys(metadata_keys):
"""Read metadata for specific keys from current project workfile.
All values from entered keys are stored to single string without separator.
Function is designed to help get all values for one metadata key at once.
So order of passed keys matteres.
Args:
metadata_keys (list, str): Metadata keys for which data should be
retrieved. Order of keys matters! It is possible to enter only
single key as string.
"""
# Add ability to pass only single key
if isinstance(metadata_keys, str):
metadata_keys = [metadata_keys]
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
output_file.close()
output_filepath = output_file.name.replace("\\", "/")
george_script_parts = []
george_script_parts.append(
"output_path = \"{}\"".format(output_filepath)
)
# Store data for each index of metadata key
for metadata_key in metadata_keys:
george_script_parts.append(
"tv_readprojectstring \"{}\" \"{}\" \"\"".format(
METADATA_SECTION, metadata_key
)
)
george_script_parts.append(
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' result"
)
# Execute the script
george_script = "\n".join(george_script_parts)
execute_george_through_file(george_script)
# Load data from temp file
with open(output_filepath, "r") as stream:
file_content = stream.read()
# Remove `\n` from content
output_string = file_content.replace("\n", "")
# Delete temp file
os.remove(output_filepath)
return output_string
def get_workfile_metadata_string(metadata_key):
"""Read metadata for specific key from current project workfile."""
result = get_workfile_metadata_string_for_keys([metadata_key])
if not result:
return None
stripped_result = result.strip()
if not stripped_result:
return None
# NOTE Backwards compatibility when metadata key did not store range of key
# indexes but the value itself
# NOTE We don't have to care about negative values with `isdecimal` check
if not stripped_result.isdecimal():
metadata_string = result
else:
keys = []
for idx in range(int(stripped_result)):
keys.append("{}{}".format(metadata_key, idx))
metadata_string = get_workfile_metadata_string_for_keys(keys)
# Replace quotes plaholders with their values
metadata_string = (
metadata_string
.replace("{__sq__}", "'")
.replace("{__dq__}", "\"")
)
return metadata_string
def get_workfile_metadata(metadata_key, default=None):
"""Read and parse metadata for specific key from current project workfile.
Pipeline use function to store loaded and created instances within keys
stored in `SECTION_NAME_INSTANCES` and `SECTION_NAME_CONTAINERS`
constants.
Args:
metadata_key (str): Key defying which key should read. It is expected
value contain json serializable string.
"""
if default is None:
default = []
json_string = get_workfile_metadata_string(metadata_key)
if json_string:
try:
return json.loads(json_string)
except json.decoder.JSONDecodeError:
# TODO remove when backwards compatibility of storing metadata
# will be removed
print((
"Fixed invalid metadata in workfile."
" Not serializable string was: {}"
).format(json_string))
write_workfile_metadata(metadata_key, default)
return default
def write_workfile_metadata(metadata_key, value):
"""Write metadata for specific key into current project workfile.
George script has specific way how to work with quotes which should be
solved automatically with this function.
Args:
metadata_key (str): Key defying under which key value will be stored.
value (dict,list,str): Data to store they must be json serializable.
"""
if isinstance(value, (dict, list)):
value = json.dumps(value)
if not value:
value = ""
# Handle quotes in dumped json string
# - replace single and double quotes with placeholders
value = (
value
.replace("'", "{__sq__}")
.replace("\"", "{__dq__}")
)
chunks = split_metadata_string(value)
chunks_len = len(chunks)
write_template = "tv_writeprojectstring \"{}\" \"{}\" \"{}\""
george_script_parts = []
# Add information about chunks length to metadata key itself
george_script_parts.append(
write_template.format(METADATA_SECTION, metadata_key, chunks_len)
)
# Add chunk values to indexed metadata keys
for idx, chunk_value in enumerate(chunks):
sub_key = "{}{}".format(metadata_key, idx)
george_script_parts.append(
write_template.format(METADATA_SECTION, sub_key, chunk_value)
)
george_script = "\n".join(george_script_parts)
return execute_george_through_file(george_script)
def get_current_workfile_context():
"""Return context in which was workfile saved."""
return get_workfile_metadata(SECTION_NAME_CONTEXT, {})
def save_current_workfile_context(context):
"""Save context which was used to create a workfile."""
return write_workfile_metadata(SECTION_NAME_CONTEXT, context)
def list_instances():
"""List all created instances from current workfile."""
return get_workfile_metadata(SECTION_NAME_INSTANCES)
def write_instances(data):
return write_workfile_metadata(SECTION_NAME_INSTANCES, data)
def get_containers():
output = get_workfile_metadata(SECTION_NAME_CONTAINERS)
if output:
for item in output:
if "objectName" not in item and "members" in item:
members = item["members"]
if isinstance(members, list):
members = "|".join([str(member) for member in members])
item["objectName"] = members
return output
def set_context_settings(project_name, folder_entity):
"""Set workfile settings by folder entity attributes.
Change fps, resolution and frame start/end.
Args:
project_name (str): Project name.
folder_entity (dict[str, Any]): Folder entity.
"""
if not folder_entity:
return
folder_attributes = folder_entity["attrib"]
width = folder_attributes.get("resolutionWidth")
height = folder_attributes.get("resolutionHeight")
if width is None or height is None:
print("Resolution was not found!")
else:
execute_george(
"tv_resizepage {} {} 0".format(width, height)
)
framerate = folder_attributes.get("fps")
if framerate is not None:
execute_george(
"tv_framerate {} \"timestretch\"".format(framerate)
)
else:
print("Framerate was not found!")
frame_start = folder_attributes.get("frameStart")
frame_end = folder_attributes.get("frameEnd")
if frame_start is None or frame_end is None:
print("Frame range was not found!")
return
handle_start = folder_attributes.get("handleStart")
handle_end = folder_attributes.get("handleEnd")
# Always start from 0 Mark In and set only Mark Out
mark_in = 0
mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end
execute_george("tv_markin {} set".format(mark_in))
execute_george("tv_markout {} set".format(mark_out))

View file

@ -1,205 +0,0 @@
import re
from ayon_core.pipeline import LoaderPlugin
from ayon_core.pipeline.create import (
CreatedInstance,
get_product_name,
AutoCreator,
Creator,
)
from ayon_core.pipeline.create.creator_plugins import cache_and_get_instances
from .lib import get_layers_data
SHARED_DATA_KEY = "ayon.tvpaint.instances"
class TVPaintCreatorCommon:
@property
def product_template_product_type(self):
return self.product_type
def _cache_and_get_instances(self):
return cache_and_get_instances(
self, SHARED_DATA_KEY, self.host.list_instances
)
def _collect_create_instances(self):
instances_by_identifier = self._cache_and_get_instances()
for instance_data in instances_by_identifier[self.identifier]:
instance = CreatedInstance.from_existing(instance_data, self)
self._add_instance_to_context(instance)
def _update_create_instances(self, update_list):
if not update_list:
return
cur_instances = self.host.list_instances()
cur_instances_by_id = {}
for instance_data in cur_instances:
instance_id = instance_data.get("instance_id")
if instance_id:
cur_instances_by_id[instance_id] = instance_data
for instance, changes in update_list:
instance_data = changes.new_value
cur_instance_data = cur_instances_by_id.get(instance.id)
if cur_instance_data is None:
cur_instances.append(instance_data)
continue
for key in set(cur_instance_data) - set(instance_data):
cur_instance_data.pop(key)
cur_instance_data.update(instance_data)
self.host.write_instances(cur_instances)
def _custom_get_product_name(
self,
project_name,
folder_entity,
task_entity,
variant,
host_name=None,
instance=None
):
dynamic_data = self.get_dynamic_data(
project_name,
folder_entity,
task_entity,
variant,
host_name,
instance
)
task_name = task_type = None
if task_entity:
task_name = task_entity["name"]
task_type = task_entity["taskType"]
return get_product_name(
project_name,
task_name,
task_type,
host_name,
self.product_type,
variant,
dynamic_data=dynamic_data,
project_settings=self.project_settings,
product_type_filter=self.product_template_product_type
)
class TVPaintCreator(Creator, TVPaintCreatorCommon):
settings_category = "tvpaint"
def collect_instances(self):
self._collect_create_instances()
def update_instances(self, update_list):
self._update_create_instances(update_list)
def remove_instances(self, instances):
ids_to_remove = {
instance.id
for instance in instances
}
cur_instances = self.host.list_instances()
changed = False
new_instances = []
for instance_data in cur_instances:
if instance_data.get("instance_id") in ids_to_remove:
changed = True
else:
new_instances.append(instance_data)
if changed:
self.host.write_instances(new_instances)
for instance in instances:
self._remove_instance_from_context(instance)
def get_dynamic_data(self, *args, **kwargs):
# Change folder and name by current workfile context
create_context = self.create_context
folder_path = create_context.get_current_folder_path()
task_name = create_context.get_current_task_name()
output = {}
if folder_path:
folder_name = folder_path.rsplit("/")[-1]
output["asset"] = folder_name
output["folder"] = {"name": folder_name}
if task_name:
output["task"] = task_name
return output
def get_product_name(self, *args, **kwargs):
return self._custom_get_product_name(*args, **kwargs)
def _store_new_instance(self, new_instance):
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon):
settings_category = "tvpaint"
def collect_instances(self):
self._collect_create_instances()
def update_instances(self, update_list):
self._update_create_instances(update_list)
def get_product_name(self, *args, **kwargs):
return self._custom_get_product_name(*args, **kwargs)
class Loader(LoaderPlugin):
hosts = ["tvpaint"]
settings_category = "tvpaint"
@staticmethod
def get_members_from_container(container):
if "members" not in container and "objectName" in container:
# Backwards compatibility
layer_ids_str = container.get("objectName")
return [
int(layer_id) for layer_id in layer_ids_str.split("|")
]
return container["members"]
def get_unique_layer_name(self, namespace, name):
"""Layer name with counter as suffix.
Find higher 3 digit suffix from all layer names in scene matching regex
`{namespace}_{name}_{suffix}`. Higher 3 digit suffix is used
as base for next number if scene does not contain layer matching regex
`0` is used ase base.
Args:
namespace (str): Usually folder name.
name (str): Name of loaded product.
Returns:
str: `{namespace}_{name}_{higher suffix + 1}`
"""
layer_name_base = "{}_{}".format(namespace, name)
counter_regex = re.compile(r"_(\d{3})$")
higher_counter = 0
for layer in get_layers_data():
layer_name = layer["name"]
if not layer_name.startswith(layer_name_base):
continue
number_subpart = layer_name[len(layer_name_base):]
groups = counter_regex.findall(number_subpart)
if len(groups) != 1:
continue
counter = int(groups[0])
if counter > higher_counter:
higher_counter = counter
continue
return "{}_{:0>3d}".format(layer_name_base, higher_counter + 1)

View file

@ -1,42 +0,0 @@
from ayon_core.lib import get_ayon_launcher_args
from ayon_applications import PreLaunchHook, LaunchTypes
class TvpaintPrelaunchHook(PreLaunchHook):
"""Launch arguments preparation.
Hook add python executable and script path to tvpaint implementation before
tvpaint executable and add last workfile path to launch arguments.
Existence of last workfile is checked. If workfile does not exists tries
to copy templated workfile from predefined path.
"""
app_groups = {"tvpaint"}
launch_types = {LaunchTypes.local}
def execute(self):
# Pop tvpaint executable
executable_path = self.launch_context.launch_args.pop(0)
# Pop rest of launch arguments - There should not be other arguments!
remainders = []
while self.launch_context.launch_args:
remainders.append(self.launch_context.launch_args.pop(0))
new_launch_args = get_ayon_launcher_args(
"run", self.launch_script_path(), executable_path
)
# Append as whole list as these areguments should not be separated
self.launch_context.launch_args.append(new_launch_args)
if remainders:
self.log.warning((
"There are unexpected launch arguments in TVPaint launch. {}"
).format(str(remainders)))
self.launch_context.launch_args.extend(remainders)
def launch_script_path(self):
from ayon_tvpaint import get_launch_script_path
return get_launch_script_path()

View file

@ -1,684 +0,0 @@
import os
import shutil
import collections
from PIL import Image, ImageDraw
def backwards_id_conversion(data_by_layer_id):
"""Convert layer ids to strings from integers."""
for key in tuple(data_by_layer_id.keys()):
if not isinstance(key, str):
data_by_layer_id[str(key)] = data_by_layer_id.pop(key)
def get_frame_filename_template(frame_end, filename_prefix=None, ext=None):
"""Get file template with frame key for rendered files.
This is simple template contains `{frame}{ext}` for sequential outputs
and `single_file{ext}` for single file output. Output is rendered to
temporary folder so filename should not matter as integrator change
them.
"""
frame_padding = 4
frame_end_str_len = len(str(frame_end))
if frame_end_str_len > frame_padding:
frame_padding = frame_end_str_len
ext = ext or ".png"
filename_prefix = filename_prefix or ""
return "{}{{frame:0>{}}}{}".format(filename_prefix, frame_padding, ext)
def get_layer_pos_filename_template(range_end, filename_prefix=None, ext=None):
filename_prefix = filename_prefix or ""
new_filename_prefix = filename_prefix + "pos_{pos}."
return get_frame_filename_template(range_end, new_filename_prefix, ext)
def _calculate_pre_behavior_copy(
range_start, exposure_frames, pre_beh,
layer_frame_start, layer_frame_end,
output_idx_by_frame_idx
):
"""Calculate frames before first exposure frame based on pre behavior.
Function may skip whole processing if first exposure frame is before
layer's first frame. In that case pre behavior does not make sense.
Args:
range_start(int): First frame of range which should be rendered.
exposure_frames(list): List of all exposure frames on layer.
pre_beh(str): Pre behavior of layer (enum of 4 strings).
layer_frame_start(int): First frame of layer.
layer_frame_end(int): Last frame of layer.
output_idx_by_frame_idx(dict): References to already prepared frames
and where result will be stored.
"""
# Check if last layer frame is after range end
if layer_frame_start < range_start:
return
first_exposure_frame = min(exposure_frames)
# Skip if last exposure frame is after range end
if first_exposure_frame < range_start:
return
# Calculate frame count of layer
frame_count = layer_frame_end - layer_frame_start + 1
if pre_beh == "none":
# Just fill all frames from last exposure frame to range end with None
for frame_idx in range(range_start, layer_frame_start):
output_idx_by_frame_idx[frame_idx] = None
elif pre_beh == "hold":
# Keep first frame for whole time
for frame_idx in range(range_start, layer_frame_start):
output_idx_by_frame_idx[frame_idx] = first_exposure_frame
elif pre_beh == "repeat":
# Loop backwards from last frame of layer
for frame_idx in reversed(range(range_start, layer_frame_start)):
eq_frame_idx_offset = (
(layer_frame_end - frame_idx) % frame_count
)
eq_frame_idx = layer_frame_start + (
layer_frame_end - eq_frame_idx_offset
)
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
elif pre_beh == "pingpong":
half_seq_len = frame_count - 1
seq_len = half_seq_len * 2
for frame_idx in reversed(range(range_start, layer_frame_start)):
eq_frame_idx_offset = (layer_frame_start - frame_idx) % seq_len
if eq_frame_idx_offset > half_seq_len:
eq_frame_idx_offset = (seq_len - eq_frame_idx_offset)
eq_frame_idx = layer_frame_start + eq_frame_idx_offset
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
def _calculate_post_behavior_copy(
range_end, exposure_frames, post_beh,
layer_frame_start, layer_frame_end,
output_idx_by_frame_idx
):
"""Calculate frames after last frame of layer based on post behavior.
Function may skip whole processing if last layer frame is after range_end.
In that case post behavior does not make sense.
Args:
range_end(int): Last frame of range which should be rendered.
exposure_frames(list): List of all exposure frames on layer.
post_beh(str): Post behavior of layer (enum of 4 strings).
layer_frame_start(int): First frame of layer.
layer_frame_end(int): Last frame of layer.
output_idx_by_frame_idx(dict): References to already prepared frames
and where result will be stored.
"""
# Check if last layer frame is after range end
if layer_frame_end >= range_end:
return
last_exposure_frame = max(exposure_frames)
# Skip if last exposure frame is after range end
# - this is probably irrelevant with layer frame end check?
if last_exposure_frame >= range_end:
return
# Calculate frame count of layer
frame_count = layer_frame_end - layer_frame_start + 1
if post_beh == "none":
# Just fill all frames from last exposure frame to range end with None
for frame_idx in range(layer_frame_end + 1, range_end + 1):
output_idx_by_frame_idx[frame_idx] = None
elif post_beh == "hold":
# Keep last exposure frame to the end
for frame_idx in range(layer_frame_end + 1, range_end + 1):
output_idx_by_frame_idx[frame_idx] = last_exposure_frame
elif post_beh == "repeat":
# Loop backwards from last frame of layer
for frame_idx in range(layer_frame_end + 1, range_end + 1):
eq_frame_idx = layer_frame_start + (frame_idx % frame_count)
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
elif post_beh == "pingpong":
half_seq_len = frame_count - 1
seq_len = half_seq_len * 2
for frame_idx in range(layer_frame_end + 1, range_end + 1):
eq_frame_idx_offset = (frame_idx - layer_frame_end) % seq_len
if eq_frame_idx_offset > half_seq_len:
eq_frame_idx_offset = seq_len - eq_frame_idx_offset
eq_frame_idx = layer_frame_end - eq_frame_idx_offset
output_idx_by_frame_idx[frame_idx] = eq_frame_idx
def _calculate_in_range_frames(
range_start, range_end,
exposure_frames, layer_frame_end,
output_idx_by_frame_idx
):
"""Calculate frame references in defined range.
Function may skip whole processing if last layer frame is after range_end.
In that case post behavior does not make sense.
Args:
range_start(int): First frame of range which should be rendered.
range_end(int): Last frame of range which should be rendered.
exposure_frames(list): List of all exposure frames on layer.
layer_frame_end(int): Last frame of layer.
output_idx_by_frame_idx(dict): References to already prepared frames
and where result will be stored.
"""
# Calculate in range frames
in_range_frames = []
for frame_idx in exposure_frames:
if range_start <= frame_idx <= range_end:
output_idx_by_frame_idx[frame_idx] = frame_idx
in_range_frames.append(frame_idx)
if in_range_frames:
first_in_range_frame = min(in_range_frames)
# Calculate frames from first exposure frames to range end or last
# frame of layer (post behavior should be calculated since that time)
previous_exposure = first_in_range_frame
for frame_idx in range(first_in_range_frame, range_end + 1):
if frame_idx > layer_frame_end:
break
if frame_idx in exposure_frames:
previous_exposure = frame_idx
else:
output_idx_by_frame_idx[frame_idx] = previous_exposure
# There can be frames before first exposure frame in range
# First check if we don't alreade have first range frame filled
if range_start in output_idx_by_frame_idx:
return
first_exposure_frame = max(exposure_frames)
last_exposure_frame = max(exposure_frames)
# Check if is first exposure frame smaller than defined range
# if not then skip
if first_exposure_frame >= range_start:
return
# Check is if last exposure frame is also before range start
# in that case we can't use fill frames before out range
if last_exposure_frame < range_start:
return
closest_exposure_frame = first_exposure_frame
for frame_idx in exposure_frames:
if frame_idx >= range_start:
break
if frame_idx > closest_exposure_frame:
closest_exposure_frame = frame_idx
output_idx_by_frame_idx[closest_exposure_frame] = closest_exposure_frame
for frame_idx in range(range_start, range_end + 1):
if frame_idx in output_idx_by_frame_idx:
break
output_idx_by_frame_idx[frame_idx] = closest_exposure_frame
def _cleanup_frame_references(output_idx_by_frame_idx):
"""Cleanup frame references to frame reference.
Cleanup not direct references to rendered frame.
```
// Example input
{
1: 1,
2: 1,
3: 2
}
// Result
{
1: 1,
2: 1,
3: 1 // Changed reference to final rendered frame
}
```
Result is dictionary where keys leads to frame that should be rendered.
"""
for frame_idx in tuple(output_idx_by_frame_idx.keys()):
reference_idx = output_idx_by_frame_idx[frame_idx]
# Skip transparent frames
if reference_idx is None or reference_idx == frame_idx:
continue
real_reference_idx = reference_idx
_tmp_reference_idx = reference_idx
while True:
_temp = output_idx_by_frame_idx[_tmp_reference_idx]
if _temp == _tmp_reference_idx:
real_reference_idx = _tmp_reference_idx
break
_tmp_reference_idx = _temp
if real_reference_idx != reference_idx:
output_idx_by_frame_idx[frame_idx] = real_reference_idx
def _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end):
"""Cleanup frame references to frames out of passed range.
First available frame in range is used
```
// Example input. Range 2-3
{
1: 1,
2: 1,
3: 1
}
// Result
{
2: 2, // Redirect to self as is first that reference out range
3: 2 // Redirect to first redirected frame
}
```
Result is dictionary where keys leads to frame that should be rendered.
"""
in_range_frames_by_out_frames = collections.defaultdict(set)
out_range_frames = set()
for frame_idx in tuple(output_idx_by_frame_idx.keys()):
# Skip frames that are already out of range
if frame_idx < range_start or frame_idx > range_end:
out_range_frames.add(frame_idx)
continue
reference_idx = output_idx_by_frame_idx[frame_idx]
# Skip transparent frames
if reference_idx is None:
continue
# Skip references in range
if reference_idx < range_start or reference_idx > range_end:
in_range_frames_by_out_frames[reference_idx].add(frame_idx)
for reference_idx in tuple(in_range_frames_by_out_frames.keys()):
frame_indexes = in_range_frames_by_out_frames.pop(reference_idx)
new_reference = None
for frame_idx in frame_indexes:
if new_reference is None:
new_reference = frame_idx
output_idx_by_frame_idx[frame_idx] = new_reference
# Finally remove out of range frames
for frame_idx in out_range_frames:
output_idx_by_frame_idx.pop(frame_idx)
def calculate_layer_frame_references(
range_start, range_end,
layer_frame_start,
layer_frame_end,
exposure_frames,
pre_beh, post_beh
):
"""Calculate frame references for one layer based on it's data.
Output is dictionary where key is frame index referencing to rendered frame
index. If frame index should be rendered then is referencing to self.
```
// Example output
{
1: 1, // Reference to self - will be rendered
2: 1, // Reference to frame 1 - will be copied
3: 1, // Reference to frame 1 - will be copied
4: 4, // Reference to self - will be rendered
...
20: 4 // Reference to frame 4 - will be copied
21: None // Has reference to None - transparent image
}
```
Args:
range_start(int): First frame of range which should be rendered.
range_end(int): Last frame of range which should be rendered.
layer_frame_start(int)L First frame of layer.
layer_frame_end(int): Last frame of layer.
exposure_frames(list): List of all exposure frames on layer.
pre_beh(str): Pre behavior of layer (enum of 4 strings).
post_beh(str): Post behavior of layer (enum of 4 strings).
"""
# Output variable
output_idx_by_frame_idx = {}
# Skip if layer does not have any exposure frames
if not exposure_frames:
return output_idx_by_frame_idx
# First calculate in range frames
_calculate_in_range_frames(
range_start, range_end,
exposure_frames, layer_frame_end,
output_idx_by_frame_idx
)
# Calculate frames by pre behavior of layer
_calculate_pre_behavior_copy(
range_start, exposure_frames, pre_beh,
layer_frame_start, layer_frame_end,
output_idx_by_frame_idx
)
# Calculate frames by post behavior of layer
_calculate_post_behavior_copy(
range_end, exposure_frames, post_beh,
layer_frame_start, layer_frame_end,
output_idx_by_frame_idx
)
# Cleanup of referenced frames
_cleanup_frame_references(output_idx_by_frame_idx)
# Remove frames out of range
_cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end)
return output_idx_by_frame_idx
def calculate_layers_extraction_data(
layers_data,
exposure_frames_by_layer_id,
behavior_by_layer_id,
range_start,
range_end,
skip_not_visible=True,
filename_prefix=None,
ext=None
):
"""Calculate extraction data for passed layers data.
```
{
<layer_id>: {
"frame_references": {...},
"filenames_by_frame_index": {...}
},
...
}
```
Frame references contains frame index reference to rendered frame index.
Filename by frame index represents filename under which should be frame
stored. Directory is not handled here because each usage may need different
approach.
Args:
layers_data(list): Layers data loaded from TVPaint.
exposure_frames_by_layer_id(dict): Exposure frames of layers stored by
layer id.
behavior_by_layer_id(dict): Pre and Post behavior of layers stored by
layer id.
range_start(int): First frame of rendered range.
range_end(int): Last frame of rendered range.
skip_not_visible(bool): Skip calculations for hidden layers (Skipped
by default).
filename_prefix(str): Prefix before filename.
ext(str): Extension which filenames will have ('.png' is default).
Returns:
dict: Prepared data for rendering by layer position.
"""
# Make sure layer ids are strings
# backwards compatibility when layer ids were integers
backwards_id_conversion(exposure_frames_by_layer_id)
backwards_id_conversion(behavior_by_layer_id)
layer_template = get_layer_pos_filename_template(
range_end, filename_prefix, ext
)
output = {}
for layer_data in layers_data:
if skip_not_visible and not layer_data["visible"]:
continue
orig_layer_id = layer_data["layer_id"]
layer_id = str(orig_layer_id)
# Skip if does not have any exposure frames (empty layer)
exposure_frames = exposure_frames_by_layer_id[layer_id]
if not exposure_frames:
continue
layer_position = layer_data["position"]
layer_frame_start = layer_data["frame_start"]
layer_frame_end = layer_data["frame_end"]
layer_behavior = behavior_by_layer_id[layer_id]
pre_behavior = layer_behavior["pre"]
post_behavior = layer_behavior["post"]
frame_references = calculate_layer_frame_references(
range_start, range_end,
layer_frame_start,
layer_frame_end,
exposure_frames,
pre_behavior, post_behavior
)
# All values in 'frame_references' reference to a frame that must be
# rendered out
frames_to_render = set(frame_references.values())
# Remove 'None' reference (transparent image)
if None in frames_to_render:
frames_to_render.remove(None)
# Skip layer if has nothing to render
if not frames_to_render:
continue
# All filenames that should be as output (not final output)
filename_frames = (
set(range(range_start, range_end + 1))
| frames_to_render
)
filenames_by_frame_index = {}
for frame_idx in filename_frames:
filenames_by_frame_index[frame_idx] = layer_template.format(
pos=layer_position,
frame=frame_idx
)
# Store objects under the layer id
output[orig_layer_id] = {
"frame_references": frame_references,
"filenames_by_frame_index": filenames_by_frame_index
}
return output
def create_transparent_image_from_source(src_filepath, dst_filepath):
"""Create transparent image of same type and size as source image."""
img_obj = Image.open(src_filepath)
painter = ImageDraw.Draw(img_obj)
painter.rectangle((0, 0, *img_obj.size), fill=(0, 0, 0, 0))
img_obj.save(dst_filepath)
def fill_reference_frames(frame_references, filepaths_by_frame):
# Store path to first transparent image if there is any
for frame_idx, ref_idx in frame_references.items():
# Frame referencing to self should be rendered and used as source
# and reference indexes with None can't be filled
if ref_idx is None or frame_idx == ref_idx:
continue
# Get destination filepath
src_filepath = filepaths_by_frame[ref_idx]
dst_filepath = filepaths_by_frame[frame_idx]
if hasattr(os, "link"):
os.link(src_filepath, dst_filepath)
else:
shutil.copy(src_filepath, dst_filepath)
def copy_render_file(src_path, dst_path):
"""Create copy file of an image."""
if hasattr(os, "link"):
os.link(src_path, dst_path)
else:
shutil.copy(src_path, dst_path)
def cleanup_rendered_layers(filepaths_by_layer_id):
"""Delete all files for each individual layer files after compositing."""
# Collect all filepaths from data
all_filepaths = []
for filepaths_by_frame in filepaths_by_layer_id.values():
all_filepaths.extend(filepaths_by_frame.values())
# Loop over loop
for filepath in set(all_filepaths):
if filepath is not None and os.path.exists(filepath):
os.remove(filepath)
def composite_rendered_layers(
layers_data, filepaths_by_layer_id,
range_start, range_end,
dst_filepaths_by_frame, cleanup=True
):
"""Composite multiple rendered layers by their position.
Result is single frame sequence with transparency matching content
created in TVPaint. Missing source filepaths are replaced with transparent
images but at least one image must be rendered and exist.
Function can be used even if single layer was created to fill transparent
filepaths.
Args:
layers_data(list): Layers data loaded from TVPaint.
filepaths_by_layer_id(dict): Rendered filepaths stored by frame index
per layer id. Used as source for compositing.
range_start(int): First frame of rendered range.
range_end(int): Last frame of rendered range.
dst_filepaths_by_frame(dict): Output filepaths by frame where final
image after compositing will be stored. Path must not clash with
source filepaths.
cleanup(bool): Remove all source filepaths when done with compositing.
"""
# Prepare layers by their position
# - position tells in which order will compositing happen
layer_ids_by_position = {}
for layer in layers_data:
layer_position = layer["position"]
layer_ids_by_position[layer_position] = layer["layer_id"]
# Sort layer positions
sorted_positions = tuple(reversed(sorted(layer_ids_by_position.keys())))
# Prepare variable where filepaths without any rendered content
# - transparent will be created
transparent_filepaths = set()
# Store first final filepath
first_dst_filepath = None
for frame_idx in range(range_start, range_end + 1):
dst_filepath = dst_filepaths_by_frame[frame_idx]
src_filepaths = []
for layer_position in sorted_positions:
layer_id = layer_ids_by_position[layer_position]
filepaths_by_frame = filepaths_by_layer_id[layer_id]
src_filepath = filepaths_by_frame.get(frame_idx)
if src_filepath is not None:
src_filepaths.append(src_filepath)
if not src_filepaths:
transparent_filepaths.add(dst_filepath)
continue
# Store first destination filepath to be used for transparent images
if first_dst_filepath is None:
first_dst_filepath = dst_filepath
if len(src_filepaths) == 1:
src_filepath = src_filepaths[0]
if cleanup:
os.rename(src_filepath, dst_filepath)
else:
copy_render_file(src_filepath, dst_filepath)
else:
composite_images(src_filepaths, dst_filepath)
# Store first transparent filepath to be able copy it
transparent_filepath = None
for dst_filepath in transparent_filepaths:
if transparent_filepath is None:
create_transparent_image_from_source(
first_dst_filepath, dst_filepath
)
transparent_filepath = dst_filepath
else:
copy_render_file(transparent_filepath, dst_filepath)
# Remove all files that were used as source for compositing
if cleanup:
cleanup_rendered_layers(filepaths_by_layer_id)
def composite_images(input_image_paths, output_filepath):
"""Composite images in order from passed list.
Raises:
ValueError: When entered list is empty.
"""
if not input_image_paths:
raise ValueError("Nothing to composite.")
img_obj = None
for image_filepath in input_image_paths:
_img_obj = Image.open(image_filepath)
if img_obj is None:
img_obj = _img_obj
else:
img_obj.alpha_composite(_img_obj)
img_obj.save(output_filepath)
def rename_filepaths_by_frame_start(
filepaths_by_frame, range_start, range_end, new_frame_start
):
"""Change frames in filenames of finished images to new frame start."""
# Calculate frame end
new_frame_end = range_end + (new_frame_start - range_start)
# Create filename template
filename_template = get_frame_filename_template(
max(range_end, new_frame_end)
)
# Use different ranges based on Mark In and output Frame Start values
# - this is to make sure that filename renaming won't affect files that
# are not renamed yet
if range_start < new_frame_start:
source_range = range(range_end, range_start - 1, -1)
output_range = range(new_frame_end, new_frame_start - 1, -1)
else:
# This is less possible situation as frame start will be in most
# cases higher than Mark In.
source_range = range(range_start, range_end + 1)
output_range = range(new_frame_start, new_frame_end + 1)
# Skip if source first frame is same as destination first frame
new_dst_filepaths = {}
for src_frame, dst_frame in zip(source_range, output_range):
src_filepath = os.path.normpath(filepaths_by_frame[src_frame])
dirpath, src_filename = os.path.split(src_filepath)
dst_filename = filename_template.format(frame=dst_frame)
dst_filepath = os.path.join(dirpath, dst_filename)
if src_filename != dst_filename:
os.rename(src_filepath, dst_filepath)
new_dst_filepaths[dst_frame] = dst_filepath
return new_dst_filepaths

View file

@ -1,150 +0,0 @@
import collections
from ayon_core.pipeline.create.creator_plugins import (
ProductConvertorPlugin,
cache_and_get_instances,
)
from ayon_tvpaint.api.plugin import SHARED_DATA_KEY
from ayon_tvpaint.api.lib import get_groups_data
class TVPaintLegacyConverted(ProductConvertorPlugin):
"""Conversion of legacy instances in scene to new creators.
This convertor handles only instances created by core creators.
All instances that would be created using auto-creators are removed as at
the moment of finding them would there already be existing instances.
"""
identifier = "tvpaint.legacy.converter"
def find_instances(self):
instances_by_identifier = cache_and_get_instances(
self, SHARED_DATA_KEY, self.host.list_instances
)
if instances_by_identifier[None]:
self.add_convertor_item("Convert legacy instances")
def convert(self):
current_instances = self.host.list_instances()
to_convert = collections.defaultdict(list)
converted = False
for instance in current_instances:
if instance.get("creator_identifier") is not None:
continue
converted = True
family = instance.get("family")
if family in (
"renderLayer",
"renderPass",
"renderScene",
"review",
"workfile",
):
to_convert[family].append(instance)
else:
instance["keep"] = False
# Skip if nothing was changed
if not converted:
self.remove_convertor_item()
return
self._convert_render_layers(
to_convert["renderLayer"], current_instances)
self._convert_render_passes(
to_convert["renderPass"], current_instances)
self._convert_render_scenes(
to_convert["renderScene"], current_instances)
self._convert_workfiles(
to_convert["workfile"], current_instances)
self._convert_reviews(
to_convert["review"], current_instances)
new_instances = [
instance
for instance in current_instances
if instance.get("keep") is not False
]
self.host.write_instances(new_instances)
# remove legacy item if all is fine
self.remove_convertor_item()
def _convert_render_layers(self, render_layers, current_instances):
if not render_layers:
return
# Look for possible existing render layers in scene
render_layers_by_group_id = {}
for instance in current_instances:
if instance.get("creator_identifier") == "render.layer":
group_id = instance["creator_identifier"]["group_id"]
render_layers_by_group_id[group_id] = instance
groups_by_id = {
group["group_id"]: group
for group in get_groups_data()
}
for render_layer in render_layers:
group_id = render_layer.pop("group_id")
# Just remove legacy instance if group is already occupied
if group_id in render_layers_by_group_id:
render_layer["keep"] = False
continue
# Add identifier
render_layer["creator_identifier"] = "render.layer"
# Change 'uuid' to 'instance_id'
render_layer["instance_id"] = render_layer.pop("uuid")
# Fill creator attributes
render_layer["creator_attributes"] = {
"group_id": group_id
}
render_layer["productType"] = "render"
group = groups_by_id[group_id]
# Use group name for variant
group["variant"] = group["name"]
def _convert_render_passes(self, render_passes, current_instances):
if not render_passes:
return
# Render passes must have available render layers so we look for render
# layers first
# - '_convert_render_layers' must be called before this method
render_layers_by_group_id = {}
for instance in current_instances:
if instance.get("creator_identifier") == "render.layer":
group_id = instance["creator_attributes"]["group_id"]
render_layers_by_group_id[group_id] = instance
for render_pass in render_passes:
group_id = render_pass.pop("group_id")
render_layer = render_layers_by_group_id.get(group_id)
if not render_layer:
render_pass["keep"] = False
continue
render_pass["creator_identifier"] = "render.pass"
render_pass["instance_id"] = render_pass.pop("uuid")
render_pass["productType"] = "render"
render_pass["creator_attributes"] = {
"render_layer_instance_id": render_layer["instance_id"]
}
render_pass["variant"] = render_pass.pop("pass")
render_pass.pop("renderlayer")
# Rest of instances are just marked for deletion
def _convert_render_scenes(self, render_scenes, current_instances):
for render_scene in render_scenes:
render_scene["keep"] = False
def _convert_workfiles(self, workfiles, current_instances):
for render_scene in workfiles:
render_scene["keep"] = False
def _convert_reviews(self, reviews, current_instances):
for render_scene in reviews:
render_scene["keep"] = False

View file

@ -1,92 +0,0 @@
import ayon_api
from ayon_core.pipeline import CreatedInstance
from ayon_tvpaint.api.plugin import TVPaintAutoCreator
class TVPaintReviewCreator(TVPaintAutoCreator):
product_type = "review"
identifier = "scene.review"
label = "Review"
icon = "ei.video"
# Settings
active_on_create = True
def apply_settings(self, project_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_review"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
self.active_on_create = plugin_settings["active_on_create"]
def create(self):
existing_instance = None
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
existing_instance = instance
break
create_context = self.create_context
host_name = create_context.host_name
project_name = create_context.get_current_project_name()
folder_path = create_context.get_current_folder_path()
task_name = create_context.get_current_task_name()
existing_folder_path = None
if existing_instance is not None:
existing_folder_path = existing_instance["folderPath"]
if existing_instance is None:
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
folder_entity,
task_entity,
self.default_variant,
host_name
)
data = {
"folderPath": folder_path,
"task": task_name,
"variant": self.default_variant,
}
if not self.active_on_create:
data["active"] = False
new_instance = CreatedInstance(
self.product_type, product_name, data, self
)
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
elif (
existing_folder_path != folder_path
or existing_instance["task"] != task_name
):
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
folder_entity,
task_entity,
existing_instance["variant"],
host_name,
existing_instance
)
existing_instance["folderPath"] = folder_path
existing_instance["task"] = task_name
existing_instance["productName"] = product_name

View file

@ -1,85 +0,0 @@
import ayon_api
from ayon_core.pipeline import CreatedInstance
from ayon_tvpaint.api.plugin import TVPaintAutoCreator
class TVPaintWorkfileCreator(TVPaintAutoCreator):
product_type = "workfile"
identifier = "workfile"
label = "Workfile"
icon = "fa.file-o"
def apply_settings(self, project_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_workfile"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
def create(self):
existing_instance = None
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
existing_instance = instance
break
create_context = self.create_context
host_name = create_context.host_name
project_name = create_context.get_current_project_name()
folder_path = create_context.get_current_folder_path()
task_name = create_context.get_current_task_name()
existing_folder_path = None
if existing_instance is not None:
existing_folder_path = existing_instance["folderPath"]
if existing_instance is None:
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
folder_entity,
task_entity,
self.default_variant,
host_name
)
data = {
"folderPath": folder_path,
"task": task_name,
"variant": self.default_variant
}
new_instance = CreatedInstance(
self.product_type, product_name, data, self
)
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
elif (
existing_folder_path != folder_path
or existing_instance["task"] != task_name
):
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
folder_entity,
task_entity,
existing_instance["variant"],
host_name,
existing_instance
)
existing_instance["folderPath"] = folder_path
existing_instance["task"] = task_name
existing_instance["productName"] = product_name

View file

@ -1,87 +0,0 @@
from ayon_core.lib.attribute_definitions import BoolDef
from ayon_tvpaint.api import plugin
from ayon_tvpaint.api.lib import execute_george_through_file
class ImportImage(plugin.Loader):
"""Load image or image sequence to TVPaint as new layer."""
product_types = {"render", "image", "background", "plate", "review"}
representations = {"*"}
settings_category = "tvpaint"
label = "Import Image"
order = 1
icon = "image"
color = "white"
import_script = (
"filepath = \"{}\"\n"
"layer_name = \"{}\"\n"
"tv_loadsequence filepath {}PARSE layer_id\n"
"tv_layerrename layer_id layer_name"
)
defaults = {
"stretch": True,
"timestretch": True,
"preload": True
}
@classmethod
def get_options(cls, contexts):
return [
BoolDef(
"stretch",
label="Stretch to project size",
default=cls.defaults["stretch"],
tooltip="Stretch loaded image/s to project resolution?"
),
BoolDef(
"timestretch",
label="Stretch to timeline length",
default=cls.defaults["timestretch"],
tooltip="Clip loaded image/s to timeline length?"
),
BoolDef(
"preload",
label="Preload loaded image/s",
default=cls.defaults["preload"],
tooltip="Preload image/s?"
)
]
def load(self, context, name, namespace, options):
stretch = options.get("stretch", self.defaults["stretch"])
timestretch = options.get("timestretch", self.defaults["timestretch"])
preload = options.get("preload", self.defaults["preload"])
load_options = []
if stretch:
load_options.append("\"STRETCH\"")
if timestretch:
load_options.append("\"TIMESTRETCH\"")
if preload:
load_options.append("\"PRELOAD\"")
load_options_str = ""
for load_option in load_options:
load_options_str += (load_option + " ")
# Prepare layer name
folder_name = context["folder"]["name"]
version_name = context["version"]["name"]
layer_name = "{}_{}_v{:0>3}".format(
folder_name,
name,
version_name
)
# Fill import script with filename and layer name
# - filename mus not contain backwards slashes
path = self.filepath_from_context(context).replace("\\", "/")
george_script = self.import_script.format(
path,
layer_name,
load_options_str
)
return execute_george_through_file(george_script)

View file

@ -1,319 +0,0 @@
import collections
from ayon_core.lib.attribute_definitions import BoolDef
from ayon_core.pipeline import registered_host
from ayon_tvpaint.api import plugin
from ayon_tvpaint.api.lib import (
get_layers_data,
execute_george_through_file,
)
from ayon_tvpaint.api.pipeline import (
write_workfile_metadata,
SECTION_NAME_CONTAINERS,
containerise,
)
class LoadImage(plugin.Loader):
"""Load image or image sequence to TVPaint as new layer."""
product_types = {"render", "image", "background", "plate", "review"}
representations = {"*"}
settings_category = "tvpaint"
label = "Load Image"
order = 1
icon = "image"
color = "white"
import_script = (
"filepath = '\"'\"{}\"'\"'\n"
"layer_name = \"{}\"\n"
"tv_loadsequence filepath {}PARSE layer_id\n"
"tv_layerrename layer_id layer_name"
)
defaults = {
"stretch": True,
"timestretch": True,
"preload": True
}
@classmethod
def get_options(cls, contexts):
return [
BoolDef(
"stretch",
label="Stretch to project size",
default=cls.defaults["stretch"],
tooltip="Stretch loaded image/s to project resolution?"
),
BoolDef(
"timestretch",
label="Stretch to timeline length",
default=cls.defaults["timestretch"],
tooltip="Clip loaded image/s to timeline length?"
),
BoolDef(
"preload",
label="Preload loaded image/s",
default=cls.defaults["preload"],
tooltip="Preload image/s?"
)
]
def load(self, context, name, namespace, options):
stretch = options.get("stretch", self.defaults["stretch"])
timestretch = options.get("timestretch", self.defaults["timestretch"])
preload = options.get("preload", self.defaults["preload"])
load_options = []
if stretch:
load_options.append("\"STRETCH\"")
if timestretch:
load_options.append("\"TIMESTRETCH\"")
if preload:
load_options.append("\"PRELOAD\"")
load_options_str = ""
for load_option in load_options:
load_options_str += (load_option + " ")
# Prepare layer name
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
layer_name = self.get_unique_layer_name(folder_name, product_name)
path = self.filepath_from_context(context)
# Fill import script with filename and layer name
# - filename mus not contain backwards slashes
george_script = self.import_script.format(
path.replace("\\", "/"),
layer_name,
load_options_str
)
execute_george_through_file(george_script)
loaded_layer = None
layers = get_layers_data()
for layer in layers:
if layer["name"] == layer_name:
loaded_layer = layer
break
if loaded_layer is None:
raise AssertionError(
"Loading probably failed during execution of george script."
)
layer_names = [loaded_layer["name"]]
namespace = namespace or layer_name
return containerise(
name=name,
namespace=namespace,
members=layer_names,
context=context,
loader=self.__class__.__name__
)
def _remove_layers(self, layer_names=None, layer_ids=None, layers=None):
if not layer_names and not layer_ids:
self.log.warning("Got empty layer names list.")
return
if layers is None:
layers = get_layers_data()
available_ids = set(layer["layer_id"] for layer in layers)
if layer_ids is None:
# Backwards compatibility (layer ids were stored instead of names)
layer_names_are_ids = True
for layer_name in layer_names:
if (
not isinstance(layer_name, int)
and not layer_name.isnumeric()
):
layer_names_are_ids = False
break
if layer_names_are_ids:
layer_ids = layer_names
layer_ids_to_remove = []
if layer_ids is not None:
for layer_id in layer_ids:
if layer_id in available_ids:
layer_ids_to_remove.append(layer_id)
else:
layers_by_name = collections.defaultdict(list)
for layer in layers:
layers_by_name[layer["name"]].append(layer)
for layer_name in layer_names:
layers = layers_by_name[layer_name]
if len(layers) == 1:
layer_ids_to_remove.append(layers[0]["layer_id"])
if not layer_ids_to_remove:
self.log.warning("No layers to delete.")
return
george_script_lines = []
for layer_id in layer_ids_to_remove:
line = "tv_layerkill {}".format(layer_id)
george_script_lines.append(line)
george_script = "\n".join(george_script_lines)
execute_george_through_file(george_script)
def _remove_container(self, container):
if not container:
return
representation = container["representation"]
members = self.get_members_from_container(container)
host = registered_host()
current_containers = host.get_containers()
pop_idx = None
for idx, cur_con in enumerate(current_containers):
cur_members = self.get_members_from_container(cur_con)
if (
cur_members == members
and cur_con["representation"] == representation
):
pop_idx = idx
break
if pop_idx is None:
self.log.warning(
"Didn't find container in workfile containers. {}".format(
container
)
)
return
current_containers.pop(pop_idx)
write_workfile_metadata(
SECTION_NAME_CONTAINERS, current_containers
)
def remove(self, container):
members = self.get_members_from_container(container)
self.log.warning("Layers to delete {}".format(members))
self._remove_layers(members)
self._remove_container(container)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, context):
"""Replace container with different version.
New layers are loaded as first step. Then is tried to change data in
new layers with data from old layers. When that is done old layers are
removed.
"""
# Create new containers first
# Get layer ids from previous container
old_layer_names = self.get_members_from_container(container)
# Backwards compatibility (layer ids were stored instead of names)
old_layers_are_ids = True
for name in old_layer_names:
if isinstance(name, int) or name.isnumeric():
continue
old_layers_are_ids = False
break
old_layers = []
layers = get_layers_data()
previous_layer_ids = set(layer["layer_id"] for layer in layers)
if old_layers_are_ids:
for layer in layers:
if layer["layer_id"] in old_layer_names:
old_layers.append(layer)
else:
layers_by_name = collections.defaultdict(list)
for layer in layers:
layers_by_name[layer["name"]].append(layer)
for layer_name in old_layer_names:
layers = layers_by_name[layer_name]
if len(layers) == 1:
old_layers.append(layers[0])
# Prepare few data
new_start_position = None
new_group_id = None
layer_ids_to_remove = set()
for layer in old_layers:
layer_ids_to_remove.add(layer["layer_id"])
position = layer["position"]
group_id = layer["group_id"]
if new_start_position is None:
new_start_position = position
elif new_start_position > position:
new_start_position = position
if new_group_id is None:
new_group_id = group_id
elif new_group_id < 0:
continue
elif new_group_id != group_id:
new_group_id = -1
# Remove old container
self._remove_container(container)
# Remove old layers
self._remove_layers(layer_ids=layer_ids_to_remove)
name = container["name"]
namespace = container["namespace"]
new_container = self.load(context, name, namespace, {})
new_layer_names = self.get_members_from_container(new_container)
layers = get_layers_data()
new_layers = []
for layer in layers:
if layer["layer_id"] in previous_layer_ids:
continue
if layer["name"] in new_layer_names:
new_layers.append(layer)
george_script_lines = []
# Group new layers to same group as previous container layers had
# - all old layers must be under same group
if new_group_id is not None and new_group_id > 0:
for layer in new_layers:
line = "tv_layercolor \"set\" {} {}".format(
layer["layer_id"], new_group_id
)
george_script_lines.append(line)
# Rename new layer to have same name
# - only if both old and new have one layer
if len(old_layers) == 1 and len(new_layers) == 1:
layer_name = old_layers[0]["name"]
george_script_lines.append(
"tv_layerrename {} \"{}\"".format(
new_layers[0]["layer_id"], layer_name
)
)
# Change position of new layer
# - this must be done before remove old layers
if len(new_layers) == 1 and new_start_position is not None:
new_layer = new_layers[0]
george_script_lines.extend([
"tv_layerset {}".format(new_layer["layer_id"]),
"tv_layermove {}".format(new_start_position)
])
# Execute george scripts if there are any
if george_script_lines:
george_script = "\n".join(george_script_lines)
execute_george_through_file(george_script)

View file

@ -1,123 +0,0 @@
import os
import tempfile
from ayon_tvpaint.api import plugin
from ayon_tvpaint.api.lib import (
execute_george_through_file,
)
class ImportSound(plugin.Loader):
"""Load sound to TVPaint.
Sound layers does not have ids but only position index so we can't
reference them as we can't say which is which input.
We might do that (in future) by input path. Which may be identifier if
we'll allow only one loaded instance of the representation as an audio.
This plugin does not work for all version of TVPaint. Known working
version is TVPaint 11.0.10 .
It is allowed to load video files as sound but it does not check if video
file contain any audio.
"""
product_types = {"audio", "review", "plate"}
representations = {"*"}
label = "Import Sound"
order = 1
icon = "image"
color = "white"
import_script_lines = (
"sound_path = '\"'\"{}\"'\"'",
"output_path = \"{}\"",
# Try to get sound clip info to check if we are in TVPaint that can
# load sound
"tv_clipcurrentid",
"clip_id = result",
"tv_soundclipinfo clip_id 0",
"IF CMP(result,\"\")==1",
(
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"'"
" 'success|'"
),
"EXIT",
"END",
"tv_soundclipnew sound_path",
"line = 'success|'result",
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line"
)
def load(self, context, name, namespace, options):
# Create temp file for output
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="ayon_tvp_", suffix=".txt", delete=False
)
output_file.close()
output_filepath = output_file.name.replace("\\", "/")
# Prepare george script
path = self.filepath_from_context(context).replace("\\", "/")
import_script = "\n".join(self.import_script_lines)
george_script = import_script.format(
path,
output_filepath
)
self.log.info("*** George script:\n{}\n***".format(george_script))
# Execute geoge script
execute_george_through_file(george_script)
# Read output file
lines = []
with open(output_filepath, "r") as file_stream:
for line in file_stream:
line = line.rstrip()
if line:
lines.append(line)
# Clean up temp file
os.remove(output_filepath)
output = {}
for line in lines:
key, value = line.split("|")
output[key] = value
success = output.get("success")
# Successfully loaded sound
if success == "0":
return
if success == "":
raise ValueError(
"Your TVPaint version does not support loading of"
" sound through George script. Please use manual load."
)
if success is None:
raise ValueError(
"Unknown error happened during load."
" Please report and try to use manual load."
)
# Possible errors by TVPaint documentation
# https://www.tvpaint.com/doc/tvpaint-animation-11/george-commands#tv_soundclipnew
if success == "-1":
raise ValueError(
"BUG: George command did not get enough arguments."
)
if success == "-2":
# Who know what does that mean?
raise ValueError("No current clip without mixer.")
if success == "-3":
raise ValueError("TVPaint couldn't read the file.")
if success == "-4":
raise ValueError("TVPaint couldn't add the track.")
raise ValueError("BUG: Unknown success value {}.".format(success))

View file

@ -1,115 +0,0 @@
import os
from ayon_core.pipeline import (
registered_host,
get_current_context,
Anatomy,
)
from ayon_core.pipeline.workfile import (
get_workfile_template_key_from_context,
get_last_workfile_with_version,
)
from ayon_core.pipeline.template_data import get_template_data_with_names
from ayon_tvpaint.api import plugin
from ayon_tvpaint.api.lib import (
execute_george_through_file,
)
from ayon_tvpaint.api.pipeline import (
get_current_workfile_context,
)
from ayon_core.pipeline.version_start import get_versioning_start
class LoadWorkfile(plugin.Loader):
"""Load workfile."""
product_types = {"workfile"}
representations = {"tvpp"}
label = "Load Workfile"
def load(self, context, name, namespace, options):
# Load context of current workfile as first thing
# - which context and extension has
filepath = self.filepath_from_context(context)
filepath = filepath.replace("\\", "/")
if not os.path.exists(filepath):
raise FileExistsError(
"The loaded file does not exist. Try downloading it first."
)
host = registered_host()
current_file = host.get_current_workfile()
work_context = get_current_workfile_context()
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath
)
execute_george_through_file(george_script)
# Save workfile.
host_name = "tvpaint"
if "project_name" in work_context:
project_name = context["project_name"]
folder_path = context["folder_path"]
task_name = context["task_name"]
else:
project_name = work_context.get("project")
folder_path = work_context.get("asset")
task_name = work_context.get("task")
# Far cases when there is workfile without work_context
if not folder_path:
context = get_current_context()
project_name = context["project_name"]
folder_path = context["folder_path"]
task_name = context["task_name"]
template_key = get_workfile_template_key_from_context(
project_name,
folder_path,
task_name,
host_name,
)
anatomy = Anatomy(project_name)
data = get_template_data_with_names(
project_name, folder_path, task_name, host_name
)
data["root"] = anatomy.roots
work_template = anatomy.get_template_item("work", template_key)
# Define saving file extension
extensions = host.get_workfile_extensions()
if current_file:
# Match the extension of current file
_, extension = os.path.splitext(current_file)
else:
# Fall back to the first extension supported for this host.
extension = extensions[0]
data["ext"] = extension.lstrip(".")
work_root = work_template["directory"].format_strict(data)
version = get_last_workfile_with_version(
work_root, work_template["file"].template, data, extensions
)[1]
if version is None:
version = get_versioning_start(
project_name,
"tvpaint",
task_name=task_name,
task_type=data["task"]["type"],
product_type="workfile"
)
else:
version += 1
data["version"] = version
filename = work_template["file"].format_strict(data)
path = os.path.join(work_root, filename)
host.save_workfile(path)

View file

@ -1,38 +0,0 @@
import pyblish.api
class CollectOutputFrameRange(pyblish.api.InstancePlugin):
"""Collect frame start/end from context.
When instances are collected context does not contain `frameStart` and
`frameEnd` keys yet. They are collected in global plugin
`CollectContextEntities`.
"""
label = "Collect output frame range"
order = pyblish.api.CollectorOrder + 0.4999
hosts = ["tvpaint"]
families = ["review", "render"]
settings_category = "tvpaint"
def process(self, instance):
folder_entity = instance.data.get("folderEntity")
if not folder_entity:
return
context = instance.context
frame_start = folder_entity["attrib"]["frameStart"]
fps = folder_entity["attrib"]["fps"]
frame_end = frame_start + (
context.data["sceneMarkOut"] - context.data["sceneMarkIn"]
)
instance.data["fps"] = fps
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
self.log.info(
"Set frames {}-{} on instance {} ".format(
frame_start, frame_end, instance.data["productName"]
)
)

View file

@ -1,115 +0,0 @@
import copy
import pyblish.api
from ayon_core.lib import prepare_template_data
class CollectRenderInstances(pyblish.api.InstancePlugin):
label = "Collect Render Instances"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
families = ["render", "review"]
settings_category = "tvpaint"
ignore_render_pass_transparency = False
def process(self, instance):
context = instance.context
creator_identifier = instance.data["creator_identifier"]
if creator_identifier == "render.layer":
self._collect_data_for_render_layer(instance)
elif creator_identifier == "render.pass":
self._collect_data_for_render_pass(instance)
elif creator_identifier == "render.scene":
self._collect_data_for_render_scene(instance)
else:
if creator_identifier == "scene.review":
self._collect_data_for_review(instance)
return
product_name = instance.data["productName"]
instance.data["name"] = product_name
instance.data["label"] = "{} [{}-{}]".format(
product_name,
context.data["sceneMarkIn"] + 1,
context.data["sceneMarkOut"] + 1
)
def _collect_data_for_render_layer(self, instance):
instance.data["families"].append("renderLayer")
creator_attributes = instance.data["creator_attributes"]
group_id = creator_attributes["group_id"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
layers_data = instance.context.data["layersData"]
instance.data["layers"] = [
copy.deepcopy(layer)
for layer in layers_data
if layer["group_id"] == group_id
]
def _collect_data_for_render_pass(self, instance):
instance.data["families"].append("renderPass")
layer_names = set(instance.data["layer_names"])
layers_data = instance.context.data["layersData"]
creator_attributes = instance.data["creator_attributes"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
instance.data["layers"] = [
copy.deepcopy(layer)
for layer in layers_data
if layer["name"] in layer_names
]
instance.data["ignoreLayersTransparency"] = (
self.ignore_render_pass_transparency
)
render_layer_data = None
render_layer_id = creator_attributes["render_layer_instance_id"]
for in_data in instance.context.data["workfileInstances"]:
if (
in_data.get("creator_identifier") == "render.layer"
and in_data["instance_id"] == render_layer_id
):
render_layer_data = in_data
break
instance.data["renderLayerData"] = copy.deepcopy(render_layer_data)
# Invalid state
if render_layer_data is None:
return
render_layer_name = render_layer_data["variant"]
product_name = instance.data["productName"]
instance.data["productName"] = product_name.format(
**prepare_template_data({"renderlayer": render_layer_name})
)
def _collect_data_for_render_scene(self, instance):
instance.data["families"].append("renderScene")
creator_attributes = instance.data["creator_attributes"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
instance.data["layers"] = copy.deepcopy(
instance.context.data["layersData"]
)
render_pass_name = (
instance.data["creator_attributes"]["render_pass_name"]
)
product_name = instance.data["productName"]
instance.data["productName"] = product_name.format(
**prepare_template_data({"renderpass": render_pass_name})
)
def _collect_data_for_review(self, instance):
instance.data["layers"] = copy.deepcopy(
instance.context.data["layersData"]
)

View file

@ -1,34 +0,0 @@
import os
import json
import pyblish.api
class CollectWorkfile(pyblish.api.InstancePlugin):
label = "Collect Workfile"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
families = ["workfile"]
settings_category = "tvpaint"
def process(self, instance):
context = instance.context
current_file = context.data["currentFile"]
self.log.info(
"Workfile path used for workfile product: {}".format(current_file)
)
dirpath, filename = os.path.split(current_file)
basename, ext = os.path.splitext(filename)
instance.data["representations"].append({
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": filename,
"stagingDir": dirpath
})
self.log.info("Collected workfile instance: {}".format(
json.dumps(instance.data, indent=4)
))

View file

@ -1,221 +0,0 @@
import os
import json
import tempfile
import pyblish.api
from ayon_tvpaint.api.lib import (
execute_george,
execute_george_through_file,
get_layers_data,
get_groups_data,
)
from ayon_tvpaint.api.pipeline import (
SECTION_NAME_CONTEXT,
SECTION_NAME_INSTANCES,
SECTION_NAME_CONTAINERS,
get_workfile_metadata_string,
write_workfile_metadata,
get_current_workfile_context,
list_instances,
)
class ResetTVPaintWorkfileMetadata(pyblish.api.Action):
"""Fix invalid metadata in workfile."""
label = "Reset invalid workfile metadata"
on = "failed"
def process(self, context, plugin):
metadata_keys = {
SECTION_NAME_CONTEXT: {},
SECTION_NAME_INSTANCES: [],
SECTION_NAME_CONTAINERS: []
}
for metadata_key, default in metadata_keys.items():
json_string = get_workfile_metadata_string(metadata_key)
if not json_string:
continue
try:
return json.loads(json_string)
except Exception:
self.log.warning(
(
"Couldn't parse metadata from key \"{}\"."
" Will reset to default value \"{}\"."
" Loaded value was: {}"
).format(metadata_key, default, json_string),
exc_info=True
)
write_workfile_metadata(metadata_key, default)
class CollectWorkfileData(pyblish.api.ContextPlugin):
label = "Collect Workfile Data"
order = pyblish.api.CollectorOrder - 0.45
hosts = ["tvpaint"]
actions = [ResetTVPaintWorkfileMetadata]
settings_category = "tvpaint"
def process(self, context):
current_project_id = execute_george("tv_projectcurrentid")
execute_george("tv_projectselect {}".format(current_project_id))
# Collect and store current context to have reference
current_context = {
"project_name": context.data["projectName"],
"folder_path": context.data["folderPath"],
"task_name": context.data["task"]
}
self.log.debug("Current context is: {}".format(current_context))
# Collect context from workfile metadata
self.log.info("Collecting workfile context")
workfile_context = get_current_workfile_context()
if "project" in workfile_context:
workfile_context = {
"project_name": workfile_context.get("project"),
"folder_path": workfile_context.get("asset"),
"task_name": workfile_context.get("task"),
}
# Store workfile context to pyblish context
context.data["workfile_context"] = workfile_context
if workfile_context:
# Change current context with context from workfile
key_map = (
("AYON_FOLDER_PATH", "folder_path"),
("AYON_TASK_NAME", "task_name")
)
for env_key, key in key_map:
os.environ[env_key] = workfile_context[key]
self.log.info("Context changed to: {}".format(workfile_context))
folder_path = workfile_context["folder_path"]
task_name = workfile_context["task_name"]
else:
folder_path = current_context["folder_path"]
task_name = current_context["task_name"]
# Handle older workfiles or workfiles without metadata
self.log.warning((
"Workfile does not contain information about context."
" Using current Session context."
))
# Store context folder path
context.data["folderPath"] = folder_path
context.data["task"] = task_name
self.log.info(
"Context is set to Folder: \"{}\" and Task: \"{}\"".format(
folder_path, task_name
)
)
# Collect instances
self.log.info("Collecting instance data from workfile")
instance_data = list_instances()
context.data["workfileInstances"] = instance_data
self.log.debug(
"Instance data:\"{}".format(json.dumps(instance_data, indent=4))
)
# Collect information about layers
self.log.info("Collecting layers data from workfile")
layers_data = get_layers_data()
layers_by_name = {}
for layer in layers_data:
layer_name = layer["name"]
if layer_name not in layers_by_name:
layers_by_name[layer_name] = []
layers_by_name[layer_name].append(layer)
context.data["layersData"] = layers_data
context.data["layersByName"] = layers_by_name
self.log.debug(
"Layers data:\"{}".format(json.dumps(layers_data, indent=4))
)
# Collect information about groups
self.log.info("Collecting groups data from workfile")
group_data = get_groups_data()
context.data["groupsData"] = group_data
self.log.debug(
"Group data:\"{}".format(json.dumps(group_data, indent=4))
)
self.log.info("Collecting scene data from workfile")
workfile_info_parts = execute_george("tv_projectinfo").split(" ")
# Project frame start - not used
workfile_info_parts.pop(-1)
field_order = workfile_info_parts.pop(-1)
frame_rate = float(workfile_info_parts.pop(-1))
pixel_apsect = float(workfile_info_parts.pop(-1))
height = int(workfile_info_parts.pop(-1))
width = int(workfile_info_parts.pop(-1))
workfile_path = " ".join(workfile_info_parts).replace("\"", "")
# Marks return as "{frame - 1} {state} ", example "0 set".
result = execute_george("tv_markin")
mark_in_frame, mark_in_state, _ = result.split(" ")
result = execute_george("tv_markout")
mark_out_frame, mark_out_state, _ = result.split(" ")
scene_data = {
"currentFile": workfile_path,
"sceneWidth": width,
"sceneHeight": height,
"scenePixelAspect": pixel_apsect,
"sceneFps": frame_rate,
"sceneFieldOrder": field_order,
"sceneMarkIn": int(mark_in_frame),
"sceneMarkInState": mark_in_state == "set",
"sceneMarkOut": int(mark_out_frame),
"sceneMarkOutState": mark_out_state == "set",
"sceneStartFrame": int(execute_george("tv_startframe")),
"sceneBgColor": self._get_bg_color()
}
self.log.debug(
"Scene data: {}".format(json.dumps(scene_data, indent=4))
)
context.data.update(scene_data)
def _get_bg_color(self):
"""Background color set on scene.
Is important for review exporting where scene bg color is used as
background.
"""
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".txt", delete=False
)
output_file.close()
output_filepath = output_file.name.replace("\\", "/")
george_script_lines = [
# Variable containing full path to output file
"output_path = \"{}\"".format(output_filepath),
"tv_background",
"bg_color = result",
# Write data to output file
(
"tv_writetextfile"
" \"strict\" \"append\" '\"'output_path'\"' bg_color"
)
]
george_script = "\n".join(george_script_lines)
execute_george_through_file(george_script)
with open(output_filepath, "r") as stream:
data = stream.read()
os.remove(output_filepath)
data = data.strip()
if not data:
return None
return data.split(" ")

View file

@ -1,103 +0,0 @@
"""Plugin converting png files from ExtractSequence into exrs.
Requires:
ExtractSequence - source of PNG
ExtractReview - review was already created so we can convert to any exr
"""
import os
import json
import pyblish.api
from ayon_core.lib import (
get_oiio_tool_args,
ToolNotFoundError,
run_subprocess,
)
from ayon_core.pipeline import KnownPublishError
class ExtractConvertToEXR(pyblish.api.InstancePlugin):
# Offset to get after ExtractSequence plugin.
order = pyblish.api.ExtractorOrder + 0.1
label = "Extract Sequence EXR"
hosts = ["tvpaint"]
families = ["render"]
settings_category = "tvpaint"
enabled = False
# Replace source PNG files or just add
replace_pngs = True
# EXR compression
exr_compression = "ZIP"
def process(self, instance):
repres = instance.data.get("representations")
if not repres:
return
try:
oiio_args = get_oiio_tool_args("oiiotool")
except ToolNotFoundError:
# Raise an exception when oiiotool is not available
# - this can currently happen on MacOS machines
raise KnownPublishError(
"OpenImageIO tool is not available on this machine."
)
new_repres = []
for repre in repres:
if repre["name"] != "png":
continue
self.log.info(
"Processing representation: {}".format(
json.dumps(repre, sort_keys=True, indent=4)
)
)
src_filepaths = set()
new_filenames = []
for src_filename in repre["files"]:
dst_filename = os.path.splitext(src_filename)[0] + ".exr"
new_filenames.append(dst_filename)
src_filepath = os.path.join(repre["stagingDir"], src_filename)
dst_filepath = os.path.join(repre["stagingDir"], dst_filename)
src_filepaths.add(src_filepath)
args = oiio_args + [
src_filepath,
"--compression", self.exr_compression,
# TODO how to define color conversion?
"--colorconvert", "sRGB", "linear",
"-o", dst_filepath
]
run_subprocess(args)
new_repres.append(
{
"name": "exr",
"ext": "exr",
"files": new_filenames,
"stagingDir": repre["stagingDir"],
"tags": list(repre["tags"])
}
)
if self.replace_pngs:
instance.data["representations"].remove(repre)
for filepath in src_filepaths:
instance.context.data["cleanupFullPaths"].append(filepath)
instance.data["representations"].extend(new_repres)
self.log.info(
"Representations: {}".format(
json.dumps(
instance.data["representations"], sort_keys=True, indent=4
)
)
)

View file

@ -1,449 +0,0 @@
import os
import copy
import tempfile
from PIL import Image
import pyblish.api
from ayon_core.pipeline.publish import (
KnownPublishError,
get_publish_instance_families,
)
from ayon_tvpaint.api.lib import (
execute_george,
execute_george_through_file,
get_layers_pre_post_behavior,
get_layers_exposure_frames,
)
from ayon_tvpaint.lib import (
calculate_layers_extraction_data,
get_frame_filename_template,
fill_reference_frames,
composite_rendered_layers,
rename_filepaths_by_frame_start,
)
class ExtractSequence(pyblish.api.InstancePlugin):
label = "Extract Sequence"
order = pyblish.api.ExtractorOrder
hosts = ["tvpaint"]
families = ["review", "render"]
settings_category = "tvpaint"
# Modifiable with settings
review_bg = [255, 255, 255, 1.0]
def process(self, instance):
self.log.info(
"* Processing instance \"{}\"".format(instance.data["label"])
)
# Get all layers and filter out not visible
layers = instance.data["layers"]
filtered_layers = [
layer
for layer in layers
if layer["visible"]
]
layer_names = [str(layer["name"]) for layer in filtered_layers]
if not layer_names:
self.log.info(
"None of the layers from the instance"
" are visible. Extraction skipped."
)
return
joined_layer_names = ", ".join(
["\"{}\"".format(name) for name in layer_names]
)
self.log.debug(
"Instance has {} layers with names: {}".format(
len(layer_names), joined_layer_names
)
)
ignore_layers_transparency = instance.data.get(
"ignoreLayersTransparency", False
)
mark_in = instance.context.data["sceneMarkIn"]
mark_out = instance.context.data["sceneMarkOut"]
# Change scene Start Frame to 0 to prevent frame index issues
# - issue is that TVPaint versions deal with frame indexes in a
# different way when Start Frame is not `0`
# NOTE It will be set back after rendering
scene_start_frame = instance.context.data["sceneStartFrame"]
execute_george("tv_startframe 0")
# Frame start/end may be stored as float
frame_start = int(instance.data["frameStart"])
# Handles are not stored per instance but on Context
handle_start = instance.context.data["handleStart"]
scene_bg_color = instance.context.data["sceneBgColor"]
# Prepare output frames
output_frame_start = frame_start - handle_start
# Change output frame start to 0 if handles cause it's negative number
if output_frame_start < 0:
self.log.warning((
"Frame start with handles has negative value."
" Changed to \"0\". Frames start: {}, Handle Start: {}"
).format(frame_start, handle_start))
output_frame_start = 0
# Calculate frame end
output_frame_end = output_frame_start + (mark_out - mark_in)
# Save to staging dir
output_dir = instance.data.get("stagingDir")
if not output_dir:
# Create temp folder if staging dir is not set
output_dir = (
tempfile.mkdtemp(prefix="tvpaint_render_")
).replace("\\", "/")
instance.data["stagingDir"] = output_dir
self.log.debug(
"Files will be rendered to folder: {}".format(output_dir)
)
if instance.data["productType"] == "review":
result = self.render_review(
output_dir, mark_in, mark_out, scene_bg_color
)
else:
# Render output
result = self.render(
output_dir,
mark_in,
mark_out,
filtered_layers,
ignore_layers_transparency
)
output_filepaths_by_frame_idx, thumbnail_fullpath = result
# Change scene frame Start back to previous value
execute_george("tv_startframe {}".format(scene_start_frame))
# Sequence of one frame
if not output_filepaths_by_frame_idx:
self.log.warning("Extractor did not create any output.")
return
repre_files = self._rename_output_files(
output_filepaths_by_frame_idx,
mark_in,
mark_out,
output_frame_start
)
# Fill tags and new families from project settings
instance_families = get_publish_instance_families(instance)
tags = []
if "review" in instance_families:
tags.append("review")
# Sequence of one frame
single_file = len(repre_files) == 1
if single_file:
repre_files = repre_files[0]
# Extension is hardcoded
# - changing extension would require change code
new_repre = {
"name": "png",
"ext": "png",
"files": repre_files,
"stagingDir": output_dir,
"tags": tags
}
if not single_file:
new_repre["frameStart"] = output_frame_start
new_repre["frameEnd"] = output_frame_end
self.log.debug("Creating new representation: {}".format(new_repre))
instance.data["representations"].append(new_repre)
if not thumbnail_fullpath:
return
thumbnail_ext = os.path.splitext(
thumbnail_fullpath
)[1].replace(".", "")
# Create thumbnail representation
thumbnail_repre = {
"name": "thumbnail",
"ext": thumbnail_ext,
"outputName": "thumb",
"files": os.path.basename(thumbnail_fullpath),
"stagingDir": output_dir,
"tags": ["thumbnail"]
}
instance.data["representations"].append(thumbnail_repre)
def _rename_output_files(
self, filepaths_by_frame, mark_in, mark_out, output_frame_start
):
new_filepaths_by_frame = rename_filepaths_by_frame_start(
filepaths_by_frame, mark_in, mark_out, output_frame_start
)
repre_filenames = []
for filepath in new_filepaths_by_frame.values():
repre_filenames.append(os.path.basename(filepath))
if mark_in < output_frame_start:
repre_filenames = list(reversed(repre_filenames))
return repre_filenames
def render_review(
self, output_dir, mark_in, mark_out, scene_bg_color
):
""" Export images from TVPaint using `tv_savesequence` command.
Args:
output_dir (str): Directory where files will be stored.
mark_in (int): Starting frame index from which export will begin.
mark_out (int): On which frame index export will end.
scene_bg_color (list): Bg color set in scene. Result of george
script command `tv_background`.
Returns:
tuple: With 2 items first is list of filenames second is path to
thumbnail.
"""
filename_template = get_frame_filename_template(mark_out)
self.log.debug("Preparing data for rendering.")
first_frame_filepath = os.path.join(
output_dir,
filename_template.format(frame=mark_in)
)
bg_color = self._get_review_bg_color()
george_script_lines = [
# Change bg color to color from settings
"tv_background \"color\" {} {} {}".format(*bg_color),
"tv_SaveMode \"PNG\"",
"export_path = \"{}\"".format(
first_frame_filepath.replace("\\", "/")
),
"tv_savesequence '\"'export_path'\"' {} {}".format(
mark_in, mark_out
)
]
if scene_bg_color:
# Change bg color back to previous scene bg color
_scene_bg_color = copy.deepcopy(scene_bg_color)
bg_type = _scene_bg_color.pop(0)
orig_color_command = [
"tv_background",
"\"{}\"".format(bg_type)
]
orig_color_command.extend(_scene_bg_color)
george_script_lines.append(" ".join(orig_color_command))
execute_george_through_file("\n".join(george_script_lines))
first_frame_filepath = None
output_filepaths_by_frame_idx = {}
for frame_idx in range(mark_in, mark_out + 1):
filename = filename_template.format(frame=frame_idx)
filepath = os.path.join(output_dir, filename)
output_filepaths_by_frame_idx[frame_idx] = filepath
if not os.path.exists(filepath):
raise KnownPublishError(
"Output was not rendered. File was not found {}".format(
filepath
)
)
if first_frame_filepath is None:
first_frame_filepath = filepath
thumbnail_filepath = None
if first_frame_filepath and os.path.exists(first_frame_filepath):
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
source_img = Image.open(first_frame_filepath)
if source_img.mode.lower() != "rgb":
source_img = source_img.convert("RGB")
source_img.save(thumbnail_filepath)
return output_filepaths_by_frame_idx, thumbnail_filepath
def render(
self, output_dir, mark_in, mark_out, layers, ignore_layer_opacity
):
""" Export images from TVPaint.
Args:
output_dir (str): Directory where files will be stored.
mark_in (int): Starting frame index from which export will begin.
mark_out (int): On which frame index export will end.
layers (list): List of layers to be exported.
ignore_layer_opacity (bool): Layer's opacity will be ignored.
Returns:
tuple: With 2 items first is list of filenames second is path to
thumbnail.
"""
self.log.debug("Preparing data for rendering.")
# Map layers by position
layers_by_position = {}
layers_by_id = {}
layer_ids = []
for layer in layers:
layer_id = layer["layer_id"]
position = layer["position"]
layers_by_position[position] = layer
layers_by_id[layer_id] = layer
layer_ids.append(layer_id)
# Sort layer positions in reverse order
sorted_positions = list(reversed(sorted(layers_by_position.keys())))
if not sorted_positions:
return [], None
self.log.debug("Collecting pre/post behavior of individual layers.")
behavior_by_layer_id = get_layers_pre_post_behavior(layer_ids)
exposure_frames_by_layer_id = get_layers_exposure_frames(
layer_ids, layers
)
extraction_data_by_layer_id = calculate_layers_extraction_data(
layers,
exposure_frames_by_layer_id,
behavior_by_layer_id,
mark_in,
mark_out
)
# Render layers
filepaths_by_layer_id = {}
for layer_id, render_data in extraction_data_by_layer_id.items():
layer = layers_by_id[layer_id]
filepaths_by_layer_id[layer_id] = self._render_layer(
render_data, layer, output_dir, ignore_layer_opacity
)
# Prepare final filepaths where compositing should store result
output_filepaths_by_frame = {}
thumbnail_src_filepath = None
finale_template = get_frame_filename_template(mark_out)
for frame_idx in range(mark_in, mark_out + 1):
filename = finale_template.format(frame=frame_idx)
filepath = os.path.join(output_dir, filename)
output_filepaths_by_frame[frame_idx] = filepath
if thumbnail_src_filepath is None:
thumbnail_src_filepath = filepath
self.log.info("Started compositing of layer frames.")
composite_rendered_layers(
layers, filepaths_by_layer_id,
mark_in, mark_out,
output_filepaths_by_frame
)
self.log.info("Compositing finished")
thumbnail_filepath = None
if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath):
source_img = Image.open(thumbnail_src_filepath)
thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg")
# Composite background only on rgba images
# - just making sure
if source_img.mode.lower() == "rgba":
bg_color = self._get_review_bg_color()
self.log.debug("Adding thumbnail background color {}.".format(
" ".join([str(val) for val in bg_color])
))
bg_image = Image.new("RGBA", source_img.size, bg_color)
thumbnail_obj = Image.alpha_composite(bg_image, source_img)
thumbnail_obj.convert("RGB").save(thumbnail_filepath)
else:
self.log.info((
"Source for thumbnail has mode \"{}\" (Expected: RGBA)."
" Can't use thubmanail background color."
).format(source_img.mode))
source_img.save(thumbnail_filepath)
return output_filepaths_by_frame, thumbnail_filepath
def _get_review_bg_color(self):
red = green = blue = 255
if self.review_bg:
if len(self.review_bg) == 4:
red, green, blue, _ = self.review_bg
elif len(self.review_bg) == 3:
red, green, blue = self.review_bg
return (red, green, blue)
def _render_layer(
self, render_data, layer, output_dir, ignore_layer_opacity
):
frame_references = render_data["frame_references"]
filenames_by_frame_index = render_data["filenames_by_frame_index"]
layer_id = layer["layer_id"]
george_script_lines = [
"tv_layerset {}".format(layer_id),
"tv_SaveMode \"PNG\""
]
# Set density to 100 and store previous opacity
if ignore_layer_opacity:
george_script_lines.extend([
"tv_layerdensity 100",
"orig_opacity = result",
])
filepaths_by_frame = {}
frames_to_render = []
for frame_idx, ref_idx in frame_references.items():
# None reference is skipped because does not have source
if ref_idx is None:
filepaths_by_frame[frame_idx] = None
continue
filename = filenames_by_frame_index[frame_idx]
dst_path = "/".join([output_dir, filename])
filepaths_by_frame[frame_idx] = dst_path
if frame_idx != ref_idx:
continue
frames_to_render.append(str(frame_idx))
# Go to frame
george_script_lines.append("tv_layerImage {}".format(frame_idx))
# Store image to output
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
# Set density back to origin opacity
if ignore_layer_opacity:
george_script_lines.append("tv_layerdensity orig_opacity")
self.log.debug("Rendering Exposure frames {} of layer {} ({})".format(
",".join(frames_to_render), layer_id, layer["name"]
))
# Let TVPaint render layer's image
execute_george_through_file("\n".join(george_script_lines))
# Fill frames between `frame_start_index` and `frame_end_index`
self.log.debug("Filling frames not rendered frames.")
fill_reference_frames(frame_references, filepaths_by_frame)
return filepaths_by_frame

View file

@ -1,22 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Product context</title>
<description>## Invalid product context
Context of the given product doesn't match your current scene.
### How to repair?
Yout can fix this with "Repair" button on the right. This will use '{expected_folder}' folder path and overwrite '{found_folder}' folder path in scene metadata.
After that restart publishing with Reload button.
</description>
<detail>
### How could this happen?
The product was created in different scene with different context
or the scene file was copy pasted from different context.
</detail>
</error>
</root>

View file

@ -1,22 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Layer names</title>
<description>## Duplicated layer names
Can't determine which layers should be published because there are duplicated layer names in the scene.
### Duplicated layer names
{layer_names}
*Check layer names for all products in list on left side.*
### How to repair?
Hide/rename/remove layers that should not be published.
If all of them should be published then you have duplicated product names in the scene. In that case you have to recrete them and use different variant name.
</description>
</error>
</root>

View file

@ -1,20 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Layers visibility</title>
<description>## All layers are not visible
Layers visibility was changed during publishing which caused that all layers for product "{instance_name}" are hidden.
### Layer names for **{instance_name}**
{layer_names}
*Check layer names for all products in the list on the left side.*
### How to repair?
Reset publishing and do not change visibility of layers after hitting publish button.
</description>
</error>
</root>

View file

@ -1,21 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Frame range</title>
<description>## Invalid render frame range
Scene frame range which will be rendered is defined by MarkIn and MarkOut. Expected frame range is {expected_frame_range} and current frame range is {current_frame_range}.
It is also required that MarkIn and MarkOut are enabled in the scene. Their color is highlighted on timeline when are enabled.
- MarkIn is {mark_in_enable_state}
- MarkOut is {mark_out_enable_state}
### How to repair?
Yout can fix this with "Repair" button on the right. That will change MarkOut to {expected_mark_out}.
Or you can manually modify MarkIn and MarkOut in the scene timeline.
</description>
</error>
</root>

View file

@ -1,18 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Missing layers</title>
<description>## Missing layers for render pass
Render pass product "{instance_name}" has stored layer names that belong to it's rendering scope but layers were not found in scene.
### Missing layer names
{layer_names}
### How to repair?
Find layers that belong to product {instance_name} and rename them back to expected layer names or remove the product and create new with right layers.
</description>
</error>
</root>

View file

@ -1,18 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Overused Color group</title>
<description>## One Color group is used by multiple Render Layers
Single color group used by multiple Render Layers would cause clashes of rendered TVPaint layers. The same layers would be used for output files of both groups.
### Missing layer names
{groups_information}
### How to repair?
Refresh, go to 'Publish' tab and go through Render Layers and change their groups to not clash each other. If you reach limit of TVPaint color groups there is nothing you can do about it to fix the issue.
</description>
</error>
</root>

View file

@ -1,14 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Render pass group</title>
<description>## Invalid group of Render Pass layers
Layers of Render Pass {instance_name} belong to Render Group which is defined by TVPaint color group {expected_group}. But the layers are not in the group.
### How to repair?
Change the color group to {expected_group} on layers {layer_names}.
</description>
</error>
</root>

View file

@ -1,26 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Scene settings</title>
<description>## Invalid scene settings
Scene settings do not match to expected values.
**FPS**
- Expected value: {expected_fps}
- Current value: {current_fps}
**Resolution**
- Expected value: {expected_width}x{expected_height}
- Current value: {current_width}x{current_height}
**Pixel ratio**
- Expected value: {expected_pixel_ratio}
- Current value: {current_pixel_ratio}
### How to repair?
FPS and Pixel ratio can be modified in scene setting. Wrong resolution can be fixed with changing resolution of scene but due to TVPaint limitations it is possible that you will need to create new scene.
</description>
</error>
</root>

View file

@ -1,14 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>First frame</title>
<description>## MarkIn is not set to 0
MarkIn in your scene must start from 0 fram index but MarkIn is set to {current_start_frame}.
### How to repair?
You can modify MarkIn manually or hit the "Repair" button on the right which will change MarkIn to 0 (does not change MarkOut).
</description>
</error>
</root>

View file

@ -1,19 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Missing metadata</title>
<description>## Your scene miss context metadata
Your scene does not contain metadata about {missing_metadata}.
### How to repair?
Resave the scene using Workfiles tool or hit the "Repair" button on the right.
</description>
<detail>
### How this could happen?
You're using scene file that was not created using Workfiles tool.
</detail>
</error>
</root>

View file

@ -1,24 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Project name</title>
<description>## Your scene is from different project
It is not possible to publish into project "{workfile_project_name}" when TVPaint was opened with project "{env_project_name}" in context.
### How to repair?
If the workfile belongs to project "{env_project_name}" then use Workfiles tool to resave it.
Otherwise close TVPaint and launch it again from project you want to publish in.
</description>
<detail>
### How this could happen?
You've opened workfile from different project. You've opened TVPaint on a task from "{env_project_name}" then you've opened TVPaint again on task from "{workfile_project_name}" without closing the TVPaint. Because TVPaint can run only once the project didn't change.
### Why it is important?
Because project may affect how TVPaint works or change publishing behavior it is dangerous to allow change project context in many ways. For example publishing will not run as expected.
</detail>
</error>
</root>

View file

@ -1,25 +0,0 @@
import pyblish.api
from ayon_core.lib import version_up
from ayon_core.pipeline import registered_host
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
"""Increment current workfile version."""
order = pyblish.api.IntegratorOrder + 1
label = "Increment Workfile Version"
optional = True
hosts = ["tvpaint"]
settings_category = "tvpaint"
def process(self, context):
assert all(result["success"] for result in context.data["results"]), (
"Publishing not successful so version is not increased.")
host = registered_host()
path = context.data["currentFile"]
host.save_workfile(version_up(path))
self.log.info('Incrementing workfile version')

View file

@ -1,79 +0,0 @@
import pyblish.api
from ayon_core.pipeline import (
PublishXmlValidationError,
OptionalPyblishPluginMixin,
)
from ayon_tvpaint.api.pipeline import (
list_instances,
write_instances,
)
class FixFolderPaths(pyblish.api.Action):
"""Repair the folder paths.
Change instanace metadata in the workfile.
"""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
context_folder_path = context.data["folderPath"]
old_instance_items = list_instances()
new_instance_items = []
for instance_item in old_instance_items:
instance_folder_path = instance_item.get("folderPath")
if (
instance_folder_path
and instance_folder_path != context_folder_path
):
instance_item["folderPath"] = context_folder_path
new_instance_items.append(instance_item)
write_instances(new_instance_items)
class ValidateAssetName(
OptionalPyblishPluginMixin,
pyblish.api.ContextPlugin
):
"""Validate folder path present on instance.
Folder path on instance should be the same as context's.
"""
label = "Validate Folder Paths"
order = pyblish.api.ValidatorOrder
hosts = ["tvpaint"]
actions = [FixFolderPaths]
settings_category = "tvpaint"
def process(self, context):
if not self.is_active(context.data):
return
context_folder_path = context.data["folderPath"]
for instance in context:
folder_path = instance.data.get("folderPath")
if folder_path and folder_path == context_folder_path:
continue
instance_label = (
instance.data.get("label") or instance.data["name"]
)
raise PublishXmlValidationError(
self,
(
"Different folder path on instance then context's."
" Instance \"{}\" has folder path: \"{}\""
" Context folder path is: \"{}\""
).format(
instance_label, folder_path, context_folder_path
),
formatting_data={
"expected_folder": context_folder_path,
"found_folder": folder_path
}
)

View file

@ -1,55 +0,0 @@
import pyblish.api
from ayon_core.pipeline import PublishXmlValidationError
class ValidateLayersGroup(pyblish.api.InstancePlugin):
"""Validate layer names for publishing are unique for whole workfile."""
label = "Validate Duplicated Layers Names"
order = pyblish.api.ValidatorOrder
families = ["renderPass"]
settings_category = "tvpaint"
def process(self, instance):
# Prepare layers
layers_by_name = instance.context.data["layersByName"]
# Layers ids of an instance
layer_names = instance.data["layer_names"]
# Check if all layers from render pass are in right group
duplicated_layer_names = []
for layer_name in layer_names:
layers = layers_by_name.get(layer_name)
# It is not job of this validator to handle missing layers
if layers is None:
continue
if len(layers) > 1:
duplicated_layer_names.append(layer_name)
# Everything is OK and skip exception
if not duplicated_layer_names:
return
layers_msg = ", ".join([
"\"{}\"".format(layer_name)
for layer_name in duplicated_layer_names
])
detail_lines = [
"- {}".format(layer_name)
for layer_name in set(duplicated_layer_names)
]
raise PublishXmlValidationError(
self,
(
"Layers have duplicated names for instance {}."
# Description what's wrong
" There are layers with same name and one of them is marked"
" for publishing so it is not possible to know which should"
" be published. Please look for layers with names: {}"
).format(instance.data["label"], layers_msg),
formatting_data={
"layer_names": "<br/>".join(detail_lines)
}
)

View file

@ -1,43 +0,0 @@
import pyblish.api
from ayon_core.pipeline import PublishXmlValidationError
# TODO @iLLiCiTiT add repair action to disable instances?
class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
"""Validate existence of renderPass layers."""
label = "Validate Layers Visibility"
order = pyblish.api.ValidatorOrder
families = ["review", "render"]
settings_category = "tvpaint"
def process(self, instance):
layers = instance.data.get("layers")
# Instance have empty layers
# - it is not job of this validator to check that
if not layers:
return
layer_names = set()
for layer in layers:
layer_names.add(layer["name"])
if layer["visible"]:
return
instance_label = (
instance.data.get("label") or instance.data["name"]
)
raise PublishXmlValidationError(
self,
"All layers of instance \"{}\" are not visible.".format(
instance_label
),
formatting_data={
"instance_name": instance_label,
"layer_names": "<br/>".join([
"- {}".format(layer_name)
for layer_name in layer_names
])
}
)

View file

@ -1,118 +0,0 @@
import json
import pyblish.api
from ayon_core.pipeline import (
PublishXmlValidationError,
OptionalPyblishPluginMixin,
)
from ayon_tvpaint.api.lib import execute_george
class ValidateMarksRepair(pyblish.api.Action):
"""Repair the marks."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
expected_data = ValidateMarks.get_expected_data(context)
execute_george(
"tv_markin {} set".format(expected_data["markIn"])
)
execute_george(
"tv_markout {} set".format(expected_data["markOut"])
)
class ValidateMarks(
OptionalPyblishPluginMixin,
pyblish.api.ContextPlugin
):
"""Validate mark in and out are enabled and it's duration.
Mark In/Out does not have to match frameStart and frameEnd but duration is
important.
"""
label = "Validate Mark In/Out"
order = pyblish.api.ValidatorOrder
optional = True
actions = [ValidateMarksRepair]
settings_category = "tvpaint"
@staticmethod
def get_expected_data(context):
scene_mark_in = context.data["sceneMarkIn"]
# Data collected in `CollectContextEntities`
frame_end = context.data["frameEnd"]
frame_start = context.data["frameStart"]
handle_start = context.data["handleStart"]
handle_end = context.data["handleEnd"]
# Calculate expected Mark out (Mark In + duration - 1)
expected_mark_out = (
scene_mark_in
+ (frame_end - frame_start)
+ handle_start + handle_end
)
return {
"markIn": scene_mark_in,
"markInState": True,
"markOut": expected_mark_out,
"markOutState": True
}
def process(self, context):
if not self.is_active(context.data):
return
current_data = {
"markIn": context.data["sceneMarkIn"],
"markInState": context.data["sceneMarkInState"],
"markOut": context.data["sceneMarkOut"],
"markOutState": context.data["sceneMarkOutState"]
}
expected_data = self.get_expected_data(context)
invalid = {}
for k in current_data.keys():
if current_data[k] != expected_data[k]:
invalid[k] = {
"current": current_data[k],
"expected": expected_data[k]
}
# Validation ends
if not invalid:
return
current_frame_range = (
(current_data["markOut"] - current_data["markIn"]) + 1
)
expected_frame_range = (
(expected_data["markOut"] - expected_data["markIn"]) + 1
)
mark_in_enable_state = "disabled"
if current_data["markInState"]:
mark_in_enable_state = "enabled"
mark_out_enable_state = "disabled"
if current_data["markOutState"]:
mark_out_enable_state = "enabled"
raise PublishXmlValidationError(
self,
"Marks does not match database:\n{}".format(
json.dumps(invalid, sort_keys=True, indent=4)
),
formatting_data={
"current_frame_range": str(current_frame_range),
"expected_frame_range": str(expected_frame_range),
"mark_in_enable_state": mark_in_enable_state,
"mark_out_enable_state": mark_out_enable_state,
"expected_mark_out": expected_data["markOut"]
}
)

View file

@ -1,57 +0,0 @@
import pyblish.api
from ayon_core.pipeline import PublishXmlValidationError
class ValidateMissingLayers(pyblish.api.InstancePlugin):
"""Validate existence of renderPass layers."""
label = "Validate Missing Layers Names"
order = pyblish.api.ValidatorOrder
families = ["renderPass"]
settings_category = "tvpaint"
def process(self, instance):
# Prepare layers
layers_by_name = instance.context.data["layersByName"]
# Layers ids of an instance
layer_names = instance.data["layer_names"]
# Check if all layers from render pass are in right group
missing_layer_names = []
for layer_name in layer_names:
layers = layers_by_name.get(layer_name)
if not layers:
missing_layer_names.append(layer_name)
# Everything is OK and skip exception
if not missing_layer_names:
return
layers_msg = ", ".join([
"\"{}\"".format(layer_name)
for layer_name in missing_layer_names
])
instance_label = (
instance.data.get("label") or instance.data["name"]
)
description_layer_names = "<br/>".join([
"- {}".format(layer_name)
for layer_name in missing_layer_names
])
# Raise an error
raise PublishXmlValidationError(
self,
(
"Layers were not found by name for instance \"{}\"."
# Description what's wrong
" Layer names marked for publishing are not available"
" in layers list. Missing layer names: {}"
).format(instance.data["label"], layers_msg),
formatting_data={
"instance_name": instance_label,
"layer_names": description_layer_names
}
)

View file

@ -1,78 +0,0 @@
import collections
import pyblish.api
from ayon_core.pipeline import PublishXmlValidationError
class ValidateRenderLayerGroups(pyblish.api.ContextPlugin):
"""Validate group ids of renderLayer products.
Validate that there are not 2 render layers using the same group.
"""
label = "Validate Render Layers Group"
order = pyblish.api.ValidatorOrder + 0.1
settings_category = "tvpaint"
def process(self, context):
# Prepare layers
render_layers_by_group_id = collections.defaultdict(list)
for instance in context:
families = instance.data.get("families")
if not families or "renderLayer" not in families:
continue
group_id = instance.data["creator_attributes"]["group_id"]
render_layers_by_group_id[group_id].append(instance)
duplicated_instances = []
for group_id, instances in render_layers_by_group_id.items():
if len(instances) > 1:
duplicated_instances.append((group_id, instances))
if not duplicated_instances:
return
# Exception message preparations
groups_data = context.data["groupsData"]
groups_by_id = {
group["group_id"]: group
for group in groups_data
}
per_group_msgs = []
groups_information_lines = []
for group_id, instances in duplicated_instances:
group = groups_by_id[group_id]
group_label = "Group \"{}\" ({})".format(
group["name"],
group["group_id"],
)
line_join_product_names = "\n".join([
f" - {instance['productName']}"
for instance in instances
])
joined_product_names = ", ".join([
f"\"{instance['productName']}\""
for instance in instances
])
per_group_msgs.append(
"{} < {} >".format(group_label, joined_product_names)
)
groups_information_lines.append(
"<b>{}</b>\n{}".format(
group_label, line_join_product_names
)
)
# Raise an error
raise PublishXmlValidationError(
self,
(
"More than one Render Layer is using the same TVPaint"
" group color. {}"
).format(" | ".join(per_group_msgs)),
formatting_data={
"groups_information": "\n".join(groups_information_lines)
}
)

View file

@ -1,91 +0,0 @@
import collections
import pyblish.api
from ayon_core.pipeline import PublishXmlValidationError
class ValidateLayersGroup(pyblish.api.InstancePlugin):
"""Validate group ids of renderPass layers.
Validates that all layers are in same group as they were during creation.
"""
label = "Validate Layers Group"
order = pyblish.api.ValidatorOrder + 0.1
families = ["renderPass"]
settings_category = "tvpaint"
def process(self, instance):
# Prepare layers
layers_data = instance.context.data["layersData"]
layers_by_name = {
layer["name"]: layer
for layer in layers_data
}
# Expected group id for instance layers
group_id = instance.data["group_id"]
# Layers ids of an instance
layer_names = instance.data["layer_names"]
# Check if all layers from render pass are in right group
invalid_layers_by_group_id = collections.defaultdict(list)
invalid_layer_names = set()
for layer_name in layer_names:
layer = layers_by_name.get(layer_name)
_group_id = layer["group_id"]
if _group_id != group_id:
invalid_layers_by_group_id[_group_id].append(layer)
invalid_layer_names.add(layer_name)
# Everything is OK and skip exception
if not invalid_layers_by_group_id:
return
# Exception message preparations
groups_data = instance.context.data["groupsData"]
groups_by_id = {
group["group_id"]: group
for group in groups_data
}
correct_group = groups_by_id[group_id]
per_group_msgs = []
for _group_id, layers in invalid_layers_by_group_id.items():
_group = groups_by_id[_group_id]
layers_msgs = []
for layer in layers:
layers_msgs.append(
"\"{}\" (id: {})".format(layer["name"], layer["layer_id"])
)
per_group_msgs.append(
"Group \"{}\" (id: {}) < {} >".format(
_group["name"],
_group["group_id"],
", ".join(layers_msgs)
)
)
# Raise an error
raise PublishXmlValidationError(
self,
(
# Short message
"Layers in wrong group."
# Description what's wrong
" Layers from render pass \"{}\" must be in group {} (id: {})."
# Detailed message
" Layers in wrong group: {}"
).format(
instance.data["label"],
correct_group["name"],
correct_group["group_id"],
" | ".join(per_group_msgs)
),
formatting_data={
"instance_name": (
instance.data.get("label") or instance.data["name"]
),
"expected_group": correct_group["name"],
"layer_names": ", ".join(invalid_layer_names)
}
)

View file

@ -1,60 +0,0 @@
import json
import pyblish.api
from ayon_core.pipeline import (
PublishXmlValidationError,
OptionalPyblishPluginMixin,
)
# TODO @iLliCiTiT add fix action for fps
class ValidateProjectSettings(
OptionalPyblishPluginMixin,
pyblish.api.ContextPlugin
):
"""Validate scene settings against database."""
label = "Validate Scene Settings"
order = pyblish.api.ValidatorOrder
settings_category = "tvpaint"
optional = True
def process(self, context):
if not self.is_active(context.data):
return
folder_attributes = context.data["folderEntity"]["attrib"]
scene_data = {
"fps": context.data.get("sceneFps"),
"resolutionWidth": context.data.get("sceneWidth"),
"resolutionHeight": context.data.get("sceneHeight"),
"pixelAspect": context.data.get("scenePixelAspect")
}
invalid = {}
for k in scene_data.keys():
expected_value = folder_attributes[k]
if scene_data[k] != expected_value:
invalid[k] = {
"current": scene_data[k], "expected": expected_value
}
if not invalid:
return
raise PublishXmlValidationError(
self,
"Scene settings does not match database:\n{}".format(
json.dumps(invalid, sort_keys=True, indent=4)
),
formatting_data={
"expected_fps": folder_attributes["fps"],
"current_fps": scene_data["fps"],
"expected_width": folder_attributes["resolutionWidth"],
"expected_height": folder_attributes["resolutionHeight"],
"current_width": scene_data["resolutionWidth"],
"current_height": scene_data["resolutionHeight"],
"expected_pixel_ratio": folder_attributes["pixelAspect"],
"current_pixel_ratio": scene_data["pixelAspect"]
}
)

View file

@ -1,48 +0,0 @@
import pyblish.api
from ayon_core.pipeline import (
PublishXmlValidationError,
OptionalPyblishPluginMixin,
)
from ayon_tvpaint.api.lib import execute_george
class RepairStartFrame(pyblish.api.Action):
"""Repair start frame."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
execute_george("tv_startframe 0")
class ValidateStartFrame(
OptionalPyblishPluginMixin,
pyblish.api.ContextPlugin
):
"""Validate start frame being at frame 0."""
label = "Validate Start Frame"
order = pyblish.api.ValidatorOrder
hosts = ["tvpaint"]
actions = [RepairStartFrame]
settings_category = "tvpaint"
optional = True
def process(self, context):
if not self.is_active(context.data):
return
start_frame = execute_george("tv_startframe")
if start_frame == 0:
return
raise PublishXmlValidationError(
self,
"Start frame has to be frame 0.",
formatting_data={
"current_start_frame": start_frame
}
)

View file

@ -1,65 +0,0 @@
import pyblish.api
from ayon_core.pipeline import (
PublishXmlValidationError,
PublishValidationError,
registered_host,
)
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
"""Store current context into workfile metadata."""
label = "Use current context"
icon = "wrench"
on = "failed"
def process(self, context, _plugin):
"""Save current workfile which should trigger storing of metadata."""
current_file = context.data["currentFile"]
host = registered_host()
# Save file should trigger
host.save_workfile(current_file)
class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
"""Validate if wokrfile contain required metadata for publising."""
label = "Validate Workfile Metadata"
order = pyblish.api.ValidatorOrder
families = ["workfile"]
actions = [ValidateWorkfileMetadataRepair]
settings_category = "tvpaint"
required_keys = {"project_name", "folder_path", "task_name"}
def process(self, context):
workfile_context = context.data["workfile_context"]
if not workfile_context:
raise PublishValidationError(
"Current workfile is missing whole metadata about context.",
"Missing context",
(
"Current workfile is missing metadata about task."
" To fix this issue save the file using Workfiles tool."
)
)
missing_keys = []
for key in self.required_keys:
value = workfile_context.get(key)
if not value:
missing_keys.append(key)
if missing_keys:
raise PublishXmlValidationError(
self,
"Current workfile is missing metadata about {}.".format(
", ".join(missing_keys)
),
formatting_data={
"missing_metadata": ", ".join(missing_keys)
}
)

View file

@ -1,55 +0,0 @@
import pyblish.api
from ayon_core.pipeline import PublishXmlValidationError
class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
"""Validate project name stored in workfile metadata.
It is not possible to publish from different project than is set in
environment variable "AYON_PROJECT_NAME".
"""
label = "Validate Workfile Project Name"
order = pyblish.api.ValidatorOrder
settings_category = "tvpaint"
def process(self, context):
workfile_context = context.data.get("workfile_context")
# If workfile context is missing than project is matching to
# global project
if not workfile_context:
self.log.info(
"Workfile context (\"workfile_context\") is not filled."
)
return
workfile_project_name = workfile_context["project_name"]
env_project_name = context.data["projectName"]
if workfile_project_name == env_project_name:
self.log.info((
"Both workfile project and environment project are same. {}"
).format(env_project_name))
return
# Raise an error
raise PublishXmlValidationError(
self,
(
# Short message
"Workfile from different Project ({})."
# Description what's wrong
" It is not possible to publish when TVPaint was launched in"
"context of different project. Current context project is"
" \"{}\". Launch TVPaint in context of project \"{}\""
" and then publish."
).format(
workfile_project_name,
env_project_name,
workfile_project_name,
),
formatting_data={
"workfile_project_name": workfile_project_name,
"expected_project_name": env_project_name
}
)

View file

@ -1,6 +0,0 @@
import os
def get_plugin_files_path():
current_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(current_dir, "plugin_files")

View file

@ -1,56 +0,0 @@
cmake_minimum_required(VERSION 3.17)
project(OpenPypePlugin C CXX)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_EXTENSIONS OFF)
set(IP_ENABLE_UNICODE OFF)
set(IP_ENABLE_DOCTEST OFF)
if(MSVC)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
# Define WIN64 or WIN32 for TVPaint SDK
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
message("64bit")
add_definitions(-DWIN64)
elseif(CMAKE_SIZEOF_VOID_P EQUAL 4)
message("32bit")
add_definitions(-DWIN32)
endif()
endif()
# TODO better options
option(BOOST_ROOT "Path to root of Boost" "")
option(OPENSSL_INCLUDE "OpenSSL include path" "")
option(OPENSSL_LIB_DIR "OpenSSL lib path" "")
option(WEBSOCKETPP_INCLUDE "Websocketpp include path" "")
option(JSONRPCPP_INCLUDE "Jsonrpcpp include path" "")
# Use static boost libraries
set(Boost_USE_STATIC_LIBS ON)
find_package(Boost COMPONENTS random chrono date_time regex REQUIRED)
include_directories(
"${TVPAINT_SDK_INCLUDE}"
"${OPENSSL_INCLUDE}"
"${WEBSOCKETPP_INCLUDE}"
"${JSONRPCPP_INCLUDE}"
"${Boost_INCLUDE_DIRS}"
)
link_directories(
"${OPENSSL_LIB_DIR}"
"${Boost_LIBRARY_DIRS}"
)
add_library(jsonrpcpp INTERFACE)
add_library(${PROJECT_NAME} SHARED library.cpp library.def "${TVPAINT_SDK_LIB}/dllx.c")
target_link_libraries(${PROJECT_NAME} ${Boost_LIBRARIES})
target_link_libraries(${PROJECT_NAME} jsonrpcpp)

View file

@ -1,34 +0,0 @@
README for TVPaint Avalon plugin
================================
Introduction
------------
This project is dedicated to integrate Avalon functionality to TVPaint.
This implementation is using TVPaint plugin (C/C++) which can communicate with python process. The communication should allow to trigger tools or pipeline functions from TVPaint and accept requests from python process at the same time.
Current implementation is based on websocket protocol, using json-rpc communication (specification 2.0). Project is in beta stage, tested only on Windows.
To be able to load plugin, environment variable `WEBSOCKET_URL` must be set otherwise plugin won't load at all. Plugin should not affect TVPaint if python server crash, but buttons won't work.
## Requirements - Python server
- python >= 3.6
- aiohttp
- aiohttp-json-rpc
### Windows
- pywin32 - required only for plugin installation
## Requirements - Plugin compilation
- TVPaint SDK - Ask for SDK on TVPaint support.
- Boost 1.72.0 - Boost is used across other plugins (Should be possible to use different version with CMakeLists modification)
- Websocket++/Websocketpp - Websocket library (https://github.com/zaphoyd/websocketpp)
- OpenSSL library - Required by Websocketpp
- jsonrpcpp - C++ library handling json-rpc 2.0 (https://github.com/badaix/jsonrpcpp)
- nlohmann/json - Required for jsonrpcpp (https://github.com/nlohmann/json)
### jsonrpcpp
This library has `nlohmann/json` as it's part, but current `master` has old version which has bug and probably won't be possible to use library on windows without using last `nlohmann/json`.
## TODO
- modify code and CMake to be able to compile on MacOS/Linux
- separate websocket logic from plugin logic
- hide buttons and show error message if server is closed

View file

@ -1,807 +0,0 @@
#ifdef _WIN32
// Include <winsock2.h> before <windows.h>
#include <winsock2.h>
#endif
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cstring>
#include <map>
#include <string>
#include <queue>
#include "plugdllx.h"
#include <boost/chrono.hpp>
#include <websocketpp/config/asio_no_tls_client.hpp>
#include <websocketpp/client.hpp>
#include "json.hpp"
#include "jsonrpcpp.hpp"
// All functions not exported should be static.
// All global variables should be static.
// mReq Identification of the requester. (=0 closed, !=0 requester ID)
static struct {
bool firstParams;
DWORD mReq;
void* mLocalFile;
PIFilter *current_filter;
// Id counter for client requests
int client_request_id;
// There are new menu items
bool newMenuItems;
// Menu item definitions received from connection
nlohmann::json menuItems;
// Menu items used in requester by their ID
nlohmann::json menuItemsById;
std::list<int> menuItemsIds;
// Messages from server before processing.
// - messages can't be process at the moment of receive as client is running in thread
std::queue<std::string> messages;
// Responses to requests mapped by request id
std::map<int, jsonrpcpp::Response> responses;
} Data = {
true,
0,
nullptr,
nullptr,
1,
false,
nlohmann::json::object(),
nlohmann::json::object()
};
// Json rpc 2.0 parser - for handling messages and callbacks
jsonrpcpp::Parser parser;
typedef websocketpp::client<websocketpp::config::asio_client> client;
class connection_metadata {
private:
websocketpp::connection_hdl m_hdl;
client *m_endpoint;
std::string m_status;
public:
typedef websocketpp::lib::shared_ptr<connection_metadata> ptr;
connection_metadata(websocketpp::connection_hdl hdl, client *endpoint)
: m_hdl(hdl), m_status("Connecting") {
m_endpoint = endpoint;
}
void on_open(client *c, websocketpp::connection_hdl hdl) {
m_status = "Open";
}
void on_fail(client *c, websocketpp::connection_hdl hdl) {
m_status = "Failed";
}
void on_close(client *c, websocketpp::connection_hdl hdl) {
m_status = "Closed";
}
void on_message(websocketpp::connection_hdl, client::message_ptr msg) {
std::string json_str;
if (msg->get_opcode() == websocketpp::frame::opcode::text) {
json_str = msg->get_payload();
} else {
json_str = websocketpp::utility::to_hex(msg->get_payload());
}
process_message(json_str);
}
void process_message(std::string msg) {
std::cout << "--> " << msg << "\n";
try {
jsonrpcpp::entity_ptr entity = parser.do_parse(msg);
if (!entity) {
// Return error code?
} else if (entity->is_response()) {
jsonrpcpp::Response response = jsonrpcpp::Response(entity->to_json());
Data.responses[response.id().int_id()] = response;
} else if (entity->is_request() || entity->is_notification()) {
Data.messages.push(msg);
}
}
catch (const jsonrpcpp::RequestException &e) {
std::string message = e.to_json().dump();
std::cout << "<-- " << e.to_json().dump() << "\n";
send(message);
}
catch (const jsonrpcpp::ParseErrorException &e) {
std::string message = e.to_json().dump();
std::cout << "<-- " << message << "\n";
send(message);
}
catch (const jsonrpcpp::RpcException &e) {
std::cerr << "RpcException: " << e.what() << "\n";
std::string message = jsonrpcpp::ParseErrorException(e.what()).to_json().dump();
std::cout << "<-- " << message << "\n";
send(message);
}
catch (const std::exception &e) {
std::cerr << "Exception: " << e.what() << "\n";
}
}
void send(std::string message) {
if (get_status() != "Open") {
return;
}
websocketpp::lib::error_code ec;
m_endpoint->send(m_hdl, message, websocketpp::frame::opcode::text, ec);
if (ec) {
std::cout << "> Error sending message: " << ec.message() << std::endl;
return;
}
}
void send_notification(jsonrpcpp::Notification *notification) {
send(notification->to_json().dump());
}
void send_response(jsonrpcpp::Response *response) {
send(response->to_json().dump());
}
void send_request(jsonrpcpp::Request *request) {
send(request->to_json().dump());
}
websocketpp::connection_hdl get_hdl() const {
return m_hdl;
}
std::string get_status() const {
return m_status;
}
};
class websocket_endpoint {
private:
client m_endpoint;
connection_metadata::ptr client_metadata;
websocketpp::lib::shared_ptr<websocketpp::lib::thread> m_thread;
bool thread_is_running = false;
public:
websocket_endpoint() {
m_endpoint.clear_access_channels(websocketpp::log::alevel::all);
m_endpoint.clear_error_channels(websocketpp::log::elevel::all);
}
~websocket_endpoint() {
close_connection();
}
void close_connection() {
m_endpoint.stop_perpetual();
if (connected())
{
// Close client
close(websocketpp::close::status::normal, "");
}
if (thread_is_running) {
// Join thread
m_thread->join();
thread_is_running = false;
}
}
bool connected()
{
return (client_metadata && client_metadata->get_status() == "Open");
}
int connect(std::string const &uri) {
if (client_metadata && client_metadata->get_status() == "Open") {
std::cout << "> Already connected" << std::endl;
return 0;
}
m_endpoint.init_asio();
m_endpoint.start_perpetual();
m_thread.reset(new websocketpp::lib::thread(&client::run, &m_endpoint));
thread_is_running = true;
websocketpp::lib::error_code ec;
client::connection_ptr con = m_endpoint.get_connection(uri, ec);
if (ec) {
std::cout << "> Connect initialization error: " << ec.message() << std::endl;
return -1;
}
client_metadata = websocketpp::lib::make_shared<connection_metadata>(con->get_handle(), &m_endpoint);
con->set_open_handler(websocketpp::lib::bind(
&connection_metadata::on_open,
client_metadata,
&m_endpoint,
websocketpp::lib::placeholders::_1
));
con->set_fail_handler(websocketpp::lib::bind(
&connection_metadata::on_fail,
client_metadata,
&m_endpoint,
websocketpp::lib::placeholders::_1
));
con->set_close_handler(websocketpp::lib::bind(
&connection_metadata::on_close,
client_metadata,
&m_endpoint,
websocketpp::lib::placeholders::_1
));
con->set_message_handler(websocketpp::lib::bind(
&connection_metadata::on_message,
client_metadata,
websocketpp::lib::placeholders::_1,
websocketpp::lib::placeholders::_2
));
m_endpoint.connect(con);
return 1;
}
void close(websocketpp::close::status::value code, std::string reason) {
if (!client_metadata || client_metadata->get_status() != "Open") {
std::cout << "> Not connected yet" << std::endl;
return;
}
websocketpp::lib::error_code ec;
m_endpoint.close(client_metadata->get_hdl(), code, reason, ec);
if (ec) {
std::cout << "> Error initiating close: " << ec.message() << std::endl;
}
}
void send(std::string message) {
if (!client_metadata || client_metadata->get_status() != "Open") {
std::cout << "> Not connected yet" << std::endl;
return;
}
client_metadata->send(message);
}
void send_notification(jsonrpcpp::Notification *notification) {
client_metadata->send_notification(notification);
}
void send_response(jsonrpcpp::Response *response) {
client_metadata->send(response->to_json().dump());
}
void send_response(std::shared_ptr<jsonrpcpp::Entity> response) {
client_metadata->send(response->to_json().dump());
}
void send_request(jsonrpcpp::Request *request) {
client_metadata->send_request(request);
}
};
class Communicator {
private:
// URL to websocket server
std::string websocket_url;
// Should be avalon plugin available?
// - this may change during processing if websocketet url is not set or server is down
bool server_available;
public:
Communicator(std::string url);
Communicator();
websocket_endpoint endpoint;
bool is_connected();
bool is_usable();
void connect();
void process_requests();
jsonrpcpp::Response call_method(std::string method_name, nlohmann::json params);
void call_notification(std::string method_name, nlohmann::json params);
};
Communicator::Communicator(std::string url) {
// URL to websocket server
websocket_url = url;
// Should be avalon plugin available?
// - this may change during processing if websocketet url is not set or server is down
if (url == "") {
server_available = false;
} else {
server_available = true;
}
}
bool Communicator::is_connected(){
return endpoint.connected();
}
bool Communicator::is_usable(){
return server_available;
}
void Communicator::connect()
{
if (!server_available) {
return;
}
int con_result;
con_result = endpoint.connect(websocket_url);
if (con_result == -1)
{
server_available = false;
} else {
server_available = true;
}
}
void Communicator::call_notification(std::string method_name, nlohmann::json params) {
if (!server_available || !is_connected()) {return;}
jsonrpcpp::Notification notification = {method_name, params};
endpoint.send_notification(&notification);
}
jsonrpcpp::Response Communicator::call_method(std::string method_name, nlohmann::json params) {
jsonrpcpp::Response response;
if (!server_available || !is_connected())
{
return response;
}
int request_id = Data.client_request_id++;
jsonrpcpp::Request request = {request_id, method_name, params};
endpoint.send_request(&request);
bool found = false;
while (!found) {
std::map<int, jsonrpcpp::Response>::iterator iter = Data.responses.find(request_id);
if (iter != Data.responses.end()) {
//element found == was found response
response = iter->second;
Data.responses.erase(request_id);
found = true;
} else {
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
}
return response;
}
void Communicator::process_requests() {
if (!server_available || !is_connected() || Data.messages.empty()) {return;}
std::string msg = Data.messages.front();
Data.messages.pop();
std::cout << "Parsing: " << msg << std::endl;
// TODO: add try->except block
auto response = parser.parse(msg);
if (response->is_response()) {
endpoint.send_response(response);
} else {
jsonrpcpp::request_ptr request = std::dynamic_pointer_cast<jsonrpcpp::Request>(response);
jsonrpcpp::Error error("Method \"" + request->method() + "\" not found", -32601);
jsonrpcpp::Response _response(request->id(), error);
endpoint.send_response(&_response);
}
}
jsonrpcpp::response_ptr define_menu(const jsonrpcpp::Id &id, const jsonrpcpp::Parameter &params) {
/* Define plugin menu.
Menu is defined with json with "title" and "menu_items".
Each item in "menu_items" must have keys:
- "callback" - callback called with RPC when button is clicked
- "label" - label of button
- "help" - tooltip of button
```
{
"title": "< Menu title>",
"menu_items": [
{
"callback": "workfiles_tool",
"label": "Workfiles",
"help": "Open workfiles tool"
},
...
]
}
```
*/
Data.menuItems = params.to_json()[0];
Data.newMenuItems = true;
std::string output;
return std::make_shared<jsonrpcpp::Response>(id, output);
}
jsonrpcpp::response_ptr execute_george(const jsonrpcpp::Id &id, const jsonrpcpp::Parameter &params) {
const char *george_script;
char cmd_output[1024] = {0};
char empty_char = {0};
std::string std_george_script;
std::string output;
nlohmann::json json_params = params.to_json();
std_george_script = json_params[0];
george_script = std_george_script.c_str();
// Result of `TVSendCmd` is int with length of output string
TVSendCmd(Data.current_filter, george_script, cmd_output);
for (int i = 0; i < sizeof(cmd_output); i++)
{
if (cmd_output[i] == empty_char){
break;
}
output += cmd_output[i];
}
return std::make_shared<jsonrpcpp::Response>(id, output);
}
void register_callbacks(){
parser.register_request_callback("define_menu", define_menu);
parser.register_request_callback("execute_george", execute_george);
}
Communicator* communication = nullptr;
////////////////////////////////////////////////////////////////////////////////////////
static char* GetLocalString( PIFilter* iFilter, int iNum, char* iDefault )
{
char* str;
if( Data.mLocalFile == NULL )
return iDefault;
str = TVGetLocalString( iFilter, Data.mLocalFile, iNum );
if( str == NULL || strlen( str ) == 0 )
return iDefault;
return str;
}
/**************************************************************************************/
// Localisation
// numbers (like 10011) are IDs in the localized file.
// strings are the default values to use when the ID is not found
// in the localized file (or the localized file doesn't exist).
std::string label_from_evn()
{
std::string _plugin_label = "OpenPype";
if (std::getenv("AYON_MENU_LABEL") && std::getenv("AYON_MENU_LABEL") != "")
{
_plugin_label = std::getenv("AYON_MENU_LABEL");
}
return _plugin_label;
}
std::string plugin_label = label_from_evn();
#define TXT_REQUESTER GetLocalString( iFilter, 100, "OpenPype Tools" )
#define TXT_REQUESTER_ERROR GetLocalString( iFilter, 30001, "Can't Open Requester !" )
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
// The functions directly called by Aura through the plugin interface
/**************************************************************************************/
// "About" function.
void FAR PASCAL PI_About( PIFilter* iFilter )
{
char text[256];
sprintf( text, "%s %d,%d", iFilter->PIName, iFilter->PIVersion, iFilter->PIRevision );
// Just open a warning popup with the filter name and version.
// You can open a much nicer requester if you want.
TVWarning( iFilter, text );
}
/**************************************************************************************/
// Function called at Aura startup, when the filter is loaded.
// Should do as little as possible to keep Aura's startup time small.
int FAR PASCAL PI_Open( PIFilter* iFilter )
{
Data.current_filter = iFilter;
char tmp[256];
strcpy( iFilter->PIName, plugin_label.c_str() );
iFilter->PIVersion = 1;
iFilter->PIRevision = 0;
// If this plugin was the one open at Aura shutdown, re-open it
TVReadUserString( iFilter, iFilter->PIName, "Open", tmp, "0", 255 );
if( atoi( tmp ) )
{
PI_Parameters( iFilter, NULL ); // NULL as iArg means "open the requester"
}
char *env_value = std::getenv("WEBSOCKET_URL");
if (env_value != NULL) {
communication = new Communicator(env_value);
communication->connect();
register_callbacks();
}
return 1; // OK
}
/**************************************************************************************/
// Aura shutdown: we make all the necessary cleanup
void FAR PASCAL PI_Close( PIFilter* iFilter )
{
if( Data.mLocalFile )
{
TVCloseLocalFile( iFilter, Data.mLocalFile );
}
if( Data.mReq )
{
TVCloseReq( iFilter, Data.mReq );
}
if (communication != nullptr) {
communication->endpoint.close_connection();
delete communication;
}
}
int newMenuItemsProcess(PIFilter* iFilter) {
// Menu items defined with `define_menu` should be propagated.
// Change flag that there are new menu items (avoid infinite loop)
Data.newMenuItems = false;
// Skip if requester does not exists
if (Data.mReq == 0) {
return 0;
}
// Remove all previous menu items
for (int menu_id : Data.menuItemsIds)
{
TVRemoveButtonReq(iFilter, Data.mReq, menu_id);
}
// Clear caches
Data.menuItemsById.clear();
Data.menuItemsIds.clear();
// We use a variable to contains the vertical position of the buttons.
// Each time we create a button, we add its size to this variable.
// This makes it very easy to add/remove/displace buttons in a requester.
int x_pos = 9;
int y_pos = 5;
// Menu width
int menu_width = 185;
// Single menu item width
int btn_width = menu_width - 19;
// Single row height (btn height is 18)
int row_height = 20;
// Additional height to menu
int height_offset = 5;
// This is a very simple requester, so we create it's content right here instead
// of waiting for the PICBREQ_OPEN message...
// Not recommended for more complex requesters. (see the other examples)
const char *menu_title = TXT_REQUESTER;
if (Data.menuItems.contains("title"))
{
menu_title = Data.menuItems["title"].get<nlohmann::json::string_t*>()->c_str();
}
// Sets the title of the requester.
TVSetReqTitle( iFilter, Data.mReq, menu_title );
// Resize menu
// First get current position and sizes (we only need the position)
int current_x = 0;
int current_y = 0;
int current_width = 0;
int current_height = 0;
TVInfoReq(iFilter, Data.mReq, &current_x, &current_y, &current_width, &current_height);
// Calculate new height
int menu_height = (row_height * Data.menuItems["menu_items"].size()) + height_offset;
// Resize
TVResizeReq(iFilter, Data.mReq, current_x, current_y, menu_width, menu_height);
// Add menu items
int item_counter = 1;
for (auto& item : Data.menuItems["menu_items"].items())
{
int item_id = item_counter * 10;
item_counter ++;
std::string item_id_str = std::to_string(item_id);
nlohmann::json item_data = item.value();
const char *item_label = item_data["label"].get<nlohmann::json::string_t*>()->c_str();
const char *help_text = item_data["help"].get<nlohmann::json::string_t*>()->c_str();
std::string item_callback = item_data["callback"].get<std::string>();
TVAddButtonReq(iFilter, Data.mReq, x_pos, y_pos, btn_width, 0, item_id, PIRBF_BUTTON_NORMAL|PIRBF_BUTTON_ACTION, item_label);
TVSetButtonInfoText( iFilter, Data.mReq, item_id, help_text );
y_pos += row_height;
Data.menuItemsById[std::to_string(item_id)] = item_callback;
Data.menuItemsIds.push_back(item_id);
}
return 1;
}
/**************************************************************************************/
// we have something to do !
int FAR PASCAL PI_Parameters( PIFilter* iFilter, char* iArg )
{
if( !iArg )
{
// If the requester is not open, we open it.
if( Data.mReq == 0)
{
// Create empty requester because menu items are defined with
// `define_menu` callback
DWORD req = TVOpenFilterReqEx(
iFilter,
185,
20,
NULL,
NULL,
PIRF_STANDARD_REQ | PIRF_COLLAPSABLE_REQ,
FILTERREQ_NO_TBAR
);
if( req == 0 )
{
TVWarning( iFilter, TXT_REQUESTER_ERROR );
return 0;
}
Data.mReq = req;
// This is a very simple requester, so we create it's content right here instead
// of waiting for the PICBREQ_OPEN message...
// Not recommended for more complex requesters. (see the other examples)
// Sets the title of the requester.
TVSetReqTitle( iFilter, Data.mReq, TXT_REQUESTER );
// Request to listen to ticks
TVGrabTicks(iFilter, req, PITICKS_FLAG_ON);
if ( Data.firstParams == true ) {
Data.firstParams = false;
} else {
newMenuItemsProcess(iFilter);
}
}
else
{
// If it is already open, we just put it on front of all other requesters.
TVReqToFront( iFilter, Data.mReq );
}
}
return 1;
}
/**************************************************************************************/
// something happened that needs our attention.
// Global variable where current button up data are stored
std::string button_up_item_id_str;
int FAR PASCAL PI_Msg( PIFilter* iFilter, INTPTR iEvent, INTPTR iReq, INTPTR* iArgs )
{
Data.current_filter = iFilter;
// what did happen ?
switch( iEvent )
{
// The user just 'clicked' on a normal button
case PICBREQ_BUTTON_UP:
button_up_item_id_str = std::to_string(iArgs[0]);
if (Data.menuItemsById.contains(button_up_item_id_str))
{
std::string callback_name = Data.menuItemsById[button_up_item_id_str].get<std::string>();
communication->call_method(callback_name, nlohmann::json::array());
}
TVExecute( iFilter );
break;
// The requester was just closed.
case PICBREQ_CLOSE:
// requester doesn't exists anymore
Data.mReq = 0;
char tmp[256];
// Save the requester state (opened or closed)
// iArgs[4] contains a flag which tells us if the requester
// has been closed by the user (flag=0) or by Aura's shutdown (flag=1).
// If it was by Aura's shutdown, that means this requester was the
// last one open, so we should reopen this one the next time Aura
// is started. Else we won't open it next time.
sprintf( tmp, "%d", (int)(iArgs[4]) );
// Save it in Aura's init file.
TVWriteUserString( iFilter, iFilter->PIName, "Open", tmp );
break;
case PICBREQ_TICKS:
if (Data.newMenuItems)
{
newMenuItemsProcess(iFilter);
}
if (communication != nullptr) {
communication->process_requests();
}
}
return 1;
}
/**************************************************************************************/
// Start of the 'execution' of the filter for a new sequence.
// - iNumImages contains the total number of frames to be processed.
// Here you should allocate memory that is used for all frames,
// and precompute all the stuff that doesn't change from frame to frame.
int FAR PASCAL PI_SequenceStart( PIFilter* iFilter, int iNumImages )
{
// In this simple example we don't have anything to allocate/precompute.
// 1 means 'continue', 0 means 'error, abort' (like 'not enough memory')
return 1;
}
// Here you should cleanup what you've done in PI_SequenceStart
void FAR PASCAL PI_SequenceFinish( PIFilter* iFilter )
{}
/**************************************************************************************/
// This is called before each frame.
// Here you should allocate memory and precompute all the stuff you can.
int FAR PASCAL PI_Start( PIFilter* iFilter, double iPos, double iSize )
{
return 1;
}
void FAR PASCAL PI_Finish( PIFilter* iFilter )
{
// nothing special to cleanup
}
/**************************************************************************************/
// 'Execution' of the filter.
int FAR PASCAL PI_Work( PIFilter* iFilter )
{
return 1;
}

View file

@ -1,10 +0,0 @@
LIBRARY Avalonplugin
EXPORTS
PI_Msg
PI_Open
PI_About
PI_Parameters
PI_Start
PI_Work
PI_Finish
PI_Close

View file

@ -1,3 +0,0 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'tvpaint' version."""
__version__ = "0.2.2"

View file

@ -1,21 +0,0 @@
from .worker_job import (
JobFailed,
ExecuteSimpleGeorgeScript,
ExecuteGeorgeScript,
CollectSceneData,
SenderTVPaintCommands,
ProcessTVPaintCommands
)
from .worker import main
__all__ = (
"JobFailed",
"ExecuteSimpleGeorgeScript",
"ExecuteGeorgeScript",
"CollectSceneData",
"SenderTVPaintCommands",
"ProcessTVPaintCommands",
"main"
)

View file

@ -1,156 +0,0 @@
import os
import signal
import time
import tempfile
import shutil
import asyncio
from ayon_tvpaint.api.communication_server import (
BaseCommunicator,
CommunicationWrapper
)
from ayon_jobqueue.job_workers import WorkerJobsConnection
from .worker_job import ProcessTVPaintCommands
class TVPaintWorkerCommunicator(BaseCommunicator):
"""Modified commuicator which cares about processing jobs.
Received jobs are send to TVPaint by parsing 'ProcessTVPaintCommands'.
"""
def __init__(self, server_url):
super().__init__()
self.return_code = 1
self._server_url = server_url
self._worker_connection = None
def _start_webserver(self):
"""Create connection to workers server before TVPaint server."""
loop = self.websocket_server.loop
self._worker_connection = WorkerJobsConnection(
self._server_url, "tvpaint", loop
)
asyncio.ensure_future(
self._worker_connection.main_loop(register_worker=False),
loop=loop
)
super()._start_webserver()
def _open_init_file(self):
"""Open init TVPaint file.
File triggers dialog missing path to audio file which must be closed
once and is ignored for rest of running process.
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
init_filepath = os.path.join(current_dir, "init_file.tvpp")
with tempfile.NamedTemporaryFile(
mode="w", prefix="a_tvp_", suffix=".tvpp"
) as tmp_file:
tmp_filepath = tmp_file.name.replace("\\", "/")
shutil.copy(init_filepath, tmp_filepath)
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(tmp_filepath)
self.execute_george_through_file(george_script)
self.execute_george("tv_projectclose")
os.remove(tmp_filepath)
def _on_client_connect(self, *args, **kwargs):
super()._on_client_connect(*args, **kwargs)
self._open_init_file()
# Register as "ready to work" worker
self._worker_connection.register_as_worker()
def stop(self):
"""Stop worker connection and TVPaint server."""
self._worker_connection.stop()
self.return_code = 0
super().stop()
@property
def current_job(self):
"""Retrieve job which should be processed."""
if self._worker_connection:
return self._worker_connection.current_job
return None
def _check_process(self):
if self.process is None:
return True
if self.process.poll() is not None:
asyncio.ensure_future(
self._worker_connection.disconnect(),
loop=self.websocket_server.loop
)
self._exit()
return False
return True
def _process_job(self):
job = self.current_job
if job is None:
return
# Prepare variables used for sendig
success = False
message = "Unknown function"
data = None
job_data = job["data"]
workfile = job_data["workfile"]
# Currently can process only "commands" function
if job_data.get("function") == "commands":
try:
commands = ProcessTVPaintCommands(
workfile, job_data["commands"], self
)
commands.execute()
data = commands.response_data()
success = True
message = "Executed"
except Exception as exc:
message = "Error on worker: {}".format(str(exc))
self._worker_connection.finish_job(success, message, data)
def main_loop(self):
"""Main loop where jobs are processed.
Server is stopped by killing this process or TVPaint process.
"""
while self.server_is_running:
if self._check_process():
self._process_job()
time.sleep(1)
return self.return_code
def _start_tvpaint(tvpaint_executable_path, server_url):
communicator = TVPaintWorkerCommunicator(server_url)
CommunicationWrapper.set_communicator(communicator)
communicator.launch([tvpaint_executable_path])
def main(tvpaint_executable_path, server_url):
# Register terminal signal handler
def signal_handler(*_args):
print("Termination signal received. Stopping.")
if CommunicationWrapper.communicator is not None:
CommunicationWrapper.communicator.stop()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
_start_tvpaint(tvpaint_executable_path, server_url)
communicator = CommunicationWrapper.communicator
if communicator is None:
print("Communicator is not set")
return 1
return communicator.main_loop()

View file

@ -1,537 +0,0 @@
import os
import tempfile
import inspect
import copy
import json
import time
from uuid import uuid4
from abc import ABCMeta, abstractmethod, abstractproperty
import six
from ayon_core.lib import Logger
from ayon_core.addons import AddonsManger
TMP_FILE_PREFIX = "opw_tvp_"
class JobFailed(Exception):
"""Raised when job was sent and finished unsuccessfully."""
def __init__(self, job_status):
job_state = job_status["state"]
job_message = job_status["message"] or "Unknown issue"
error_msg = (
"Job didn't finish properly."
" Job state: \"{}\" | Job message: \"{}\""
).format(job_state, job_message)
self.job_status = job_status
super().__init__(error_msg)
@six.add_metaclass(ABCMeta)
class BaseCommand:
"""Abstract TVPaint command which can be executed through worker.
Each command must have unique name and implemented 'execute' and
'from_existing' methods.
Command also have id which is created on command creation.
The idea is that command is just a data container on sender side send
through server to a worker where is replicated one by one, executed and
result sent back to sender through server.
"""
@abstractproperty
def name(self):
"""Command name (must be unique)."""
pass
def __init__(self, data=None):
if data is None:
data = {}
else:
data = copy.deepcopy(data)
# Use 'id' from data when replicating on process side
command_id = data.get("id")
if command_id is None:
command_id = str(uuid4())
data["id"] = command_id
data["command"] = self.name
self._parent = None
self._result = None
self._command_data = data
self._done = False
def job_queue_root(self):
"""Access to job queue root.
Job queue root is shared access point to files shared across senders
and workers.
"""
if self._parent is None:
return None
return self._parent.job_queue_root()
def set_parent(self, parent):
self._parent = parent
@property
def id(self):
"""Command id."""
return self._command_data["id"]
@property
def parent(self):
"""Parent of command expected type of 'TVPaintCommands'."""
return self._parent
@property
def communicator(self):
"""TVPaint communicator.
Available only on worker side.
"""
return self._parent.communicator
@property
def done(self):
"""Is command done."""
return self._done
def set_done(self):
"""Change state of done."""
self._done = True
def set_result(self, result):
"""Set result of executed command."""
self._result = result
def result(self):
"""Result of command."""
return copy.deepcopy(self._result)
def response_data(self):
"""Data send as response to sender."""
return {
"id": self.id,
"result": self._result,
"done": self._done
}
def command_data(self):
"""Raw command data."""
return copy.deepcopy(self._command_data)
@abstractmethod
def execute(self):
"""Execute command on worker side."""
pass
@classmethod
@abstractmethod
def from_existing(cls, data):
"""Recreate object based on passed data."""
pass
def execute_george(self, george_script):
"""Execute george script in TVPaint."""
return self.parent.execute_george(george_script)
def execute_george_through_file(self, george_script):
"""Execute george script through temp file in TVPaint."""
return self.parent.execute_george_through_file(george_script)
class ExecuteSimpleGeorgeScript(BaseCommand):
"""Execute simple george script in TVPaint.
Args:
script(str): Script that will be executed.
"""
name = "execute_george_simple"
def __init__(self, script, data=None):
data = data or {}
data["script"] = script
self._script = script
super().__init__(data)
def execute(self):
self._result = self.execute_george(self._script)
@classmethod
def from_existing(cls, data):
script = data.pop("script")
return cls(script, data)
class ExecuteGeorgeScript(BaseCommand):
"""Execute multiline george script in TVPaint.
Args:
script_lines(list): Lines that will be executed in george script
through temp george file.
tmp_file_keys(list): List of formatting keys in george script that
require replacement with path to a temp file where result will be
stored. The content of file is stored to result by the key.
root_dir_key(str): Formatting key that will be replaced in george
script with job queue root which can be different on worker side.
data(dict): Raw data about command.
"""
name = "execute_george_through_file"
def __init__(
self, script_lines, tmp_file_keys=None, root_dir_key=None, data=None
):
data = data or {}
if not tmp_file_keys:
tmp_file_keys = data.get("tmp_file_keys") or []
data["script_lines"] = script_lines
data["tmp_file_keys"] = tmp_file_keys
data["root_dir_key"] = root_dir_key
self._script_lines = script_lines
self._tmp_file_keys = tmp_file_keys
self._root_dir_key = root_dir_key
super().__init__(data)
def execute(self):
filepath_by_key = {}
script = self._script_lines
if isinstance(script, list):
script = "\n".join(script)
# Replace temporary files in george script
for key in self._tmp_file_keys:
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix=TMP_FILE_PREFIX, suffix=".txt", delete=False
)
output_file.close()
format_key = "{" + key + "}"
output_path = output_file.name.replace("\\", "/")
script = script.replace(format_key, output_path)
filepath_by_key[key] = output_path
# Replace job queue root in script
if self._root_dir_key:
job_queue_root = self.job_queue_root()
format_key = "{" + self._root_dir_key + "}"
script = script.replace(
format_key, job_queue_root.replace("\\", "/")
)
# Execute the script
self.execute_george_through_file(script)
# Store result of temporary files
result = {}
for key, filepath in filepath_by_key.items():
with open(filepath, "r") as stream:
data = stream.read()
result[key] = data
os.remove(filepath)
self._result = result
@classmethod
def from_existing(cls, data):
"""Recreate the object from data."""
script_lines = data.pop("script_lines")
tmp_file_keys = data.pop("tmp_file_keys", None)
root_dir_key = data.pop("root_dir_key", None)
return cls(script_lines, tmp_file_keys, root_dir_key, data)
class CollectSceneData(BaseCommand):
"""Helper command which will collect all useful info about workfile.
Result is dictionary with all layers data, exposure frames by layer ids
pre/post behavior of layers by their ids, group information and scene data.
"""
name = "collect_scene_data"
def execute(self):
from ayon_tvpaint.api.lib import (
get_layers_data,
get_groups_data,
get_layers_pre_post_behavior,
get_layers_exposure_frames,
get_scene_data
)
groups_data = get_groups_data(communicator=self.communicator)
layers_data = get_layers_data(communicator=self.communicator)
layer_ids = [
layer_data["layer_id"]
for layer_data in layers_data
]
pre_post_beh_by_layer_id = get_layers_pre_post_behavior(
layer_ids, communicator=self.communicator
)
exposure_frames_by_layer_id = get_layers_exposure_frames(
layer_ids, layers_data, communicator=self.communicator
)
self._result = {
"layers_data": layers_data,
"exposure_frames_by_layer_id": exposure_frames_by_layer_id,
"pre_post_beh_by_layer_id": pre_post_beh_by_layer_id,
"groups_data": groups_data,
"scene_data": get_scene_data(self.communicator)
}
@classmethod
def from_existing(cls, data):
return cls(data)
@six.add_metaclass(ABCMeta)
class TVPaintCommands:
"""Wrapper around TVPaint commands to be able send multiple commands.
Commands may send one or multiple commands at once. Also gives api access
for commands info.
Base for sender and receiver which are extending the logic for their
purposes. One of differences is preparation of workfile path.
Args:
workfile(str): Path to workfile.
job_queue_module(JobQueueModule): Object of OpenPype module JobQueue.
"""
def __init__(self, workfile, job_queue_module=None):
self._log = None
self._commands = []
self._command_classes_by_name = None
if job_queue_module is None:
manager = AddonsManger()
job_queue_module = manager["job_queue"]
self._job_queue_module = job_queue_module
self._workfile = self._prepare_workfile(workfile)
@abstractmethod
def _prepare_workfile(self, workfile):
"""Modification of workfile path on initialization to match platorm."""
pass
def job_queue_root(self):
"""Job queue root for current platform using current settings."""
return self._job_queue_module.get_jobs_root_from_settings()
@property
def log(self):
"""Access to logger object."""
if self._log is None:
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
@property
def classes_by_name(self):
"""Prepare commands classes for validation and recreation of commands.
It is expected that all commands are defined in this python file so
we're looking for all implementation of BaseCommand in globals.
"""
if self._command_classes_by_name is None:
command_classes_by_name = {}
for attr in globals().values():
if (
not inspect.isclass(attr)
or not issubclass(attr, BaseCommand)
or attr is BaseCommand
):
continue
if inspect.isabstract(attr):
self.log.debug(
"Skipping abstract class {}".format(attr.__name__)
)
command_classes_by_name[attr.name] = attr
self._command_classes_by_name = command_classes_by_name
return self._command_classes_by_name
def add_command(self, command):
"""Add command to process."""
command.set_parent(self)
self._commands.append(command)
def result(self):
"""Result of commands in list in which they were processed."""
return [
command.result()
for command in self._commands
]
def response_data(self):
"""Data which should be send from worker."""
return [
command.response_data()
for command in self._commands
]
class SenderTVPaintCommands(TVPaintCommands):
"""Sender implementation of TVPaint Commands."""
def _prepare_workfile(self, workfile):
"""Remove job queue root from workfile path.
It is expected that worker will add it's root before passed workfile.
"""
new_workfile = workfile.replace("\\", "/")
job_queue_root = self.job_queue_root().replace("\\", "/")
if job_queue_root not in new_workfile:
raise ValueError((
"Workfile is not located in JobQueue root."
" Workfile path: \"{}\". JobQueue root: \"{}\""
).format(workfile, job_queue_root))
return new_workfile.replace(job_queue_root, "")
def commands_data(self):
"""Commands data to be able recreate them."""
return [
command.command_data()
for command in self._commands
]
def to_job_data(self):
"""Convert commands to job data before sending to workers server."""
return {
"workfile": self._workfile,
"function": "commands",
"commands": self.commands_data()
}
def set_result(self, result):
commands_by_id = {
command.id: command
for command in self._commands
}
for item in result:
command = commands_by_id[item["id"]]
command.set_result(item["result"])
command.set_done()
def _send_job(self):
"""Send job to a workers server."""
# Send job data to job queue server
job_data = self.to_job_data()
self.log.debug("Sending job to JobQueue server.\n{}".format(
json.dumps(job_data, indent=4)
))
job_id = self._job_queue_module.send_job("tvpaint", job_data)
self.log.info((
"Job sent to JobQueue server and got id \"{}\"."
" Waiting for finishing the job."
).format(job_id))
return job_id
def send_job_and_wait(self):
"""Send job to workers server and wait for response.
Result of job is stored into the object.
Raises:
JobFailed: When job was finished but not successfully.
"""
job_id = self._send_job()
while True:
job_status = self._job_queue_module.get_job_status(job_id)
if job_status["done"]:
break
time.sleep(1)
# Check if job state is done
if job_status["state"] != "done":
raise JobFailed(job_status)
self.set_result(job_status["result"])
self.log.debug("Job is done and result is stored.")
class ProcessTVPaintCommands(TVPaintCommands):
"""Worker side of TVPaint Commands.
It is expected this object is created only on worker's side from existing
data loaded from job.
Workfile path logic is based on 'SenderTVPaintCommands'.
"""
def __init__(self, workfile, commands, communicator):
super(ProcessTVPaintCommands, self).__init__(workfile)
self._communicator = communicator
self.commands_from_data(commands)
def _prepare_workfile(self, workfile):
"""Preprend job queue root before passed workfile."""
workfile = workfile.replace("\\", "/")
job_queue_root = self.job_queue_root().replace("\\", "/")
new_workfile = "/".join([job_queue_root, workfile])
while "//" in new_workfile:
new_workfile = new_workfile.replace("//", "/")
return os.path.normpath(new_workfile)
@property
def communicator(self):
"""Access to TVPaint communicator."""
return self._communicator
def commands_from_data(self, commands_data):
"""Recreate command from passed data."""
for command_data in commands_data:
command_name = command_data["command"]
klass = self.classes_by_name[command_name]
command = klass.from_existing(command_data)
self.add_command(command)
def execute_george(self, george_script):
"""Helper method to execute george script."""
return self.communicator.execute_george(george_script)
def execute_george_through_file(self, george_script):
"""Helper method to execute george script through temp file."""
temporary_file = tempfile.NamedTemporaryFile(
mode="w", prefix=TMP_FILE_PREFIX, suffix=".grg", delete=False
)
temporary_file.write(george_script)
temporary_file.close()
temp_file_path = temporary_file.name.replace("\\", "/")
self.execute_george("tv_runscript {}".format(temp_file_path))
os.remove(temp_file_path)
def _open_workfile(self):
"""Open workfile in TVPaint."""
workfile = self._workfile
print("Opening workfile {}".format(workfile))
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(workfile)
self.execute_george_through_file(george_script)
def _close_workfile(self):
"""Close workfile in TVPaint."""
print("Closing workfile")
self.execute_george_through_file("tv_projectclose")
def execute(self):
"""Execute commands."""
# First open the workfile
self._open_workfile()
# Execute commands one by one
# TODO maybe stop processing when command fails?
print("Commands execution started ({})".format(len(self._commands)))
for command in self._commands:
command.execute()
command.set_done()
# Finally close workfile
self._close_workfile()

View file

@ -1,11 +0,0 @@
name = "tvpaint"
title = "TVPaint"
version = "0.2.2"
client_dir = "ayon_tvpaint"
ayon_required_addons = {
"core": ">0.3.2",
}
ayon_compatible_addons = {
"jobqueue": ">=1.1.0",
}

View file

@ -1,6 +0,0 @@
[project]
name="tvpaint"
description="AYON TVPaint addon."
[ayon.runtimeDependencies]
aiohttp_json_rpc = "*"

View file

@ -1,13 +0,0 @@
from typing import Type
from ayon_server.addons import BaseServerAddon
from .settings import TvpaintSettings, DEFAULT_VALUES
class TvpaintAddon(BaseServerAddon):
settings_model: Type[TvpaintSettings] = TvpaintSettings
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
return settings_model_cls(**DEFAULT_VALUES)

View file

@ -1,10 +0,0 @@
from .main import (
TvpaintSettings,
DEFAULT_VALUES,
)
__all__ = (
"TvpaintSettings",
"DEFAULT_VALUES",
)

View file

@ -1,136 +0,0 @@
from ayon_server.settings import BaseSettingsModel, SettingsField
class CreateWorkfileModel(BaseSettingsModel):
enabled: bool = SettingsField(True)
default_variant: str = SettingsField(title="Default variant")
default_variants: list[str] = SettingsField(
default_factory=list, title="Default variants")
class CreateReviewModel(BaseSettingsModel):
enabled: bool = SettingsField(True)
active_on_create: bool = SettingsField(True, title="Active by default")
default_variant: str = SettingsField(title="Default variant")
default_variants: list[str] = SettingsField(
default_factory=list, title="Default variants")
class CreateRenderSceneModel(BaseSettingsModel):
enabled: bool = SettingsField(True)
active_on_create: bool = SettingsField(True, title="Active by default")
mark_for_review: bool = SettingsField(True, title="Review by default")
default_pass_name: str = SettingsField(title="Default beauty pass")
default_variant: str = SettingsField(title="Default variant")
default_variants: list[str] = SettingsField(
default_factory=list, title="Default variants")
class CreateRenderLayerModel(BaseSettingsModel):
mark_for_review: bool = SettingsField(True, title="Review by default")
default_pass_name: str = SettingsField(title="Default beauty pass")
default_variant: str = SettingsField(title="Default variant")
default_variants: list[str] = SettingsField(
default_factory=list, title="Default variants")
class CreateRenderPassModel(BaseSettingsModel):
mark_for_review: bool = SettingsField(True, title="Review by default")
default_variant: str = SettingsField(title="Default variant")
default_variants: list[str] = SettingsField(
default_factory=list, title="Default variants")
class AutoDetectCreateRenderModel(BaseSettingsModel):
"""The creator tries to auto-detect Render Layers and Render Passes in scene.
For Render Layers is used group name as a variant and for Render Passes is
used TVPaint layer name.
Group names can be renamed by their used order in scene. The renaming
template where can be used '{group_index}' formatting key which is
filled by "used position index of group".
- Template: 'L{group_index}'
- Group offset: '10'
- Group padding: '3'
Would create group names "L010", "L020", ...
"""
enabled: bool = SettingsField(True)
allow_group_rename: bool = SettingsField(title="Allow group rename")
group_name_template: str = SettingsField(title="Group name template")
group_idx_offset: int = SettingsField(
1, title="Group index Offset", ge=1
)
group_idx_padding: int = SettingsField(
4, title="Group index Padding", ge=1
)
class CreatePluginsModel(BaseSettingsModel):
create_workfile: CreateWorkfileModel = SettingsField(
default_factory=CreateWorkfileModel,
title="Create Workfile"
)
create_review: CreateReviewModel = SettingsField(
default_factory=CreateReviewModel,
title="Create Review"
)
create_render_scene: CreateRenderSceneModel = SettingsField(
default_factory=CreateReviewModel,
title="Create Render Scene"
)
create_render_layer: CreateRenderLayerModel = SettingsField(
default_factory=CreateRenderLayerModel,
title="Create Render Layer"
)
create_render_pass: CreateRenderPassModel = SettingsField(
default_factory=CreateRenderPassModel,
title="Create Render Pass"
)
auto_detect_render: AutoDetectCreateRenderModel = SettingsField(
default_factory=AutoDetectCreateRenderModel,
title="Auto-Detect Create Render",
)
DEFAULT_CREATE_SETTINGS = {
"create_workfile": {
"enabled": True,
"default_variant": "Main",
"default_variants": []
},
"create_review": {
"enabled": True,
"active_on_create": True,
"default_variant": "Main",
"default_variants": []
},
"create_render_scene": {
"enabled": True,
"active_on_create": False,
"mark_for_review": True,
"default_pass_name": "beauty",
"default_variant": "Main",
"default_variants": []
},
"create_render_layer": {
"mark_for_review": False,
"default_pass_name": "beauty",
"default_variant": "Main",
"default_variants": []
},
"create_render_pass": {
"mark_for_review": False,
"default_variant": "Main",
"default_variants": []
},
"auto_detect_render": {
"enabled": False,
"allow_group_rename": True,
"group_name_template": "L{group_index}",
"group_idx_offset": 10,
"group_idx_padding": 3
}
}

View file

@ -1,17 +0,0 @@
from ayon_server.settings import BaseSettingsModel, SettingsField
class FiltersSubmodel(BaseSettingsModel):
_layout = "compact"
name: str = SettingsField(title="Name")
value: str = SettingsField(
"",
title="Textarea",
widget="textarea",
)
class PublishFiltersModel(BaseSettingsModel):
env_search_replace_values: list[FiltersSubmodel] = SettingsField(
default_factory=list
)

View file

@ -1,63 +0,0 @@
from pydantic import validator
from ayon_server.settings import BaseSettingsModel, SettingsField
from ayon_server.settings.validators import ensure_unique_names
class ImageIOConfigModel(BaseSettingsModel):
"""[DEPRECATED] Addon OCIO config settings. Please set the OCIO config
path in the Core addon profiles here
(ayon+settings://core/imageio/ocio_config_profiles).
"""
override_global_config: bool = SettingsField(
False,
title="Override global OCIO config",
description=(
"DEPRECATED functionality. Please set the OCIO config path in the "
"Core addon profiles here (ayon+settings://core/imageio/"
"ocio_config_profiles)."
),
)
filepath: list[str] = SettingsField(
default_factory=list,
title="Config path",
description=(
"DEPRECATED functionality. Please set the OCIO config path in the "
"Core addon profiles here (ayon+settings://core/imageio/"
"ocio_config_profiles)."
),
)
class ImageIOFileRuleModel(BaseSettingsModel):
name: str = SettingsField("", title="Rule name")
pattern: str = SettingsField("", title="Regex pattern")
colorspace: str = SettingsField("", title="Colorspace name")
ext: str = SettingsField("", title="File extension")
class ImageIOFileRulesModel(BaseSettingsModel):
activate_host_rules: bool = SettingsField(False)
rules: list[ImageIOFileRuleModel] = SettingsField(
default_factory=list,
title="Rules"
)
@validator("rules")
def validate_unique_outputs(cls, value):
ensure_unique_names(value)
return value
class TVPaintImageIOModel(BaseSettingsModel):
activate_host_color_management: bool = SettingsField(
True, title="Enable Color Management"
)
ocio_config: ImageIOConfigModel = SettingsField(
default_factory=ImageIOConfigModel,
title="OCIO config"
)
file_rules: ImageIOFileRulesModel = SettingsField(
default_factory=ImageIOFileRulesModel,
title="File Rules"
)

View file

@ -1,64 +0,0 @@
from ayon_server.settings import (
BaseSettingsModel,
SettingsField,
)
from .imageio import TVPaintImageIOModel
from .workfile_builder import WorkfileBuilderPlugin
from .create_plugins import CreatePluginsModel, DEFAULT_CREATE_SETTINGS
from .publish_plugins import (
PublishPluginsModel,
LoadPluginsModel,
DEFAULT_PUBLISH_SETTINGS,
)
class TvpaintSettings(BaseSettingsModel):
imageio: TVPaintImageIOModel = SettingsField(
default_factory=TVPaintImageIOModel,
title="Color Management (ImageIO)"
)
stop_timer_on_application_exit: bool = SettingsField(
title="Stop timer on application exit")
create: CreatePluginsModel = SettingsField(
default_factory=CreatePluginsModel,
title="Create plugins"
)
publish: PublishPluginsModel = SettingsField(
default_factory=PublishPluginsModel,
title="Publish plugins")
load: LoadPluginsModel = SettingsField(
default_factory=LoadPluginsModel,
title="Load plugins")
workfile_builder: WorkfileBuilderPlugin = SettingsField(
default_factory=WorkfileBuilderPlugin,
title="Workfile Builder"
)
DEFAULT_VALUES = {
"stop_timer_on_application_exit": False,
"create": DEFAULT_CREATE_SETTINGS,
"publish": DEFAULT_PUBLISH_SETTINGS,
"load": {
"LoadImage": {
"defaults": {
"stretch": True,
"timestretch": True,
"preload": True
}
},
"ImportImage": {
"defaults": {
"stretch": True,
"timestretch": True,
"preload": True
}
}
},
"workfile_builder": {
"create_first_version": False,
"custom_templates": []
},
"filters": []
}

View file

@ -1,133 +0,0 @@
from ayon_server.settings import BaseSettingsModel, SettingsField
from ayon_server.types import ColorRGBA_uint8
class CollectRenderInstancesModel(BaseSettingsModel):
ignore_render_pass_transparency: bool = SettingsField(
title="Ignore Render Pass opacity"
)
class ExtractSequenceModel(BaseSettingsModel):
"""Review BG color is used for whole scene review and for thumbnails."""
review_bg: ColorRGBA_uint8 = SettingsField(
(255, 255, 255, 1.0),
title="Review BG color")
# review_bg: ColorRGB_uint8 = SettingsField(
# (255, 255, 255),
# title="Review BG color")
class ValidatePluginModel(BaseSettingsModel):
enabled: bool = True
optional: bool = SettingsField(True, title="Optional")
active: bool = SettingsField(True, title="Active")
def compression_enum():
return [
{"value": "ZIP", "label": "ZIP"},
{"value": "ZIPS", "label": "ZIPS"},
{"value": "DWAA", "label": "DWAA"},
{"value": "DWAB", "label": "DWAB"},
{"value": "PIZ", "label": "PIZ"},
{"value": "RLE", "label": "RLE"},
{"value": "PXR24", "label": "PXR24"},
{"value": "B44", "label": "B44"},
{"value": "B44A", "label": "B44A"},
{"value": "none", "label": "None"}
]
class ExtractConvertToEXRModel(BaseSettingsModel):
"""WARNING: This plugin does not work on MacOS (using OIIO tool)."""
enabled: bool = False
replace_pngs: bool = True
exr_compression: str = SettingsField(
"ZIP",
enum_resolver=compression_enum,
title="EXR Compression"
)
class LoadImageDefaultModel(BaseSettingsModel):
_layout = "expanded"
stretch: bool = SettingsField(title="Stretch")
timestretch: bool = SettingsField(title="TimeStretch")
preload: bool = SettingsField(title="Preload")
class LoadImageModel(BaseSettingsModel):
defaults: LoadImageDefaultModel = SettingsField(
default_factory=LoadImageDefaultModel
)
class PublishPluginsModel(BaseSettingsModel):
CollectRenderInstances: CollectRenderInstancesModel = SettingsField(
default_factory=CollectRenderInstancesModel,
title="Collect Render Instances")
ExtractSequence: ExtractSequenceModel = SettingsField(
default_factory=ExtractSequenceModel,
title="Extract Sequence")
ValidateProjectSettings: ValidatePluginModel = SettingsField(
default_factory=ValidatePluginModel,
title="Validate Project Settings")
ValidateMarks: ValidatePluginModel = SettingsField(
default_factory=ValidatePluginModel,
title="Validate MarkIn/Out")
ValidateStartFrame: ValidatePluginModel = SettingsField(
default_factory=ValidatePluginModel,
title="Validate Scene Start Frame")
ValidateAssetName: ValidatePluginModel = SettingsField(
default_factory=ValidatePluginModel,
title="Validate Folder Name")
ExtractConvertToEXR: ExtractConvertToEXRModel = SettingsField(
default_factory=ExtractConvertToEXRModel,
title="Extract Convert To EXR")
class LoadPluginsModel(BaseSettingsModel):
LoadImage: LoadImageModel = SettingsField(
default_factory=LoadImageModel,
title="Load Image")
ImportImage: LoadImageModel = SettingsField(
default_factory=LoadImageModel,
title="Import Image")
DEFAULT_PUBLISH_SETTINGS = {
"CollectRenderInstances": {
"ignore_render_pass_transparency": False
},
"ExtractSequence": {
# "review_bg": [255, 255, 255]
"review_bg": [255, 255, 255, 1.0]
},
"ValidateProjectSettings": {
"enabled": True,
"optional": True,
"active": True
},
"ValidateMarks": {
"enabled": True,
"optional": True,
"active": True
},
"ValidateStartFrame": {
"enabled": False,
"optional": True,
"active": True
},
"ValidateAssetName": {
"enabled": True,
"optional": True,
"active": True
},
"ExtractConvertToEXR": {
"enabled": False,
"replace_pngs": True,
"exr_compression": "ZIP"
}
}

View file

@ -1,29 +0,0 @@
from ayon_server.settings import (
BaseSettingsModel,
SettingsField,
MultiplatformPathModel,
task_types_enum,
)
class CustomBuilderTemplate(BaseSettingsModel):
task_types: list[str] = SettingsField(
default_factory=list,
title="Task types",
enum_resolver=task_types_enum
)
template_path: MultiplatformPathModel = SettingsField(
default_factory=MultiplatformPathModel
)
class WorkfileBuilderPlugin(BaseSettingsModel):
_title = "Workfile Builder"
create_first_version: bool = SettingsField(
False,
title="Create first workfile"
)
custom_templates: list[CustomBuilderTemplate] = SettingsField(
default_factory=CustomBuilderTemplate
)