Merge branch 'develop' into maya_new_publisher

# Conflicts:
#	openpype/hosts/maya/api/lib.py
#	openpype/hosts/maya/api/plugin.py
#	openpype/hosts/maya/plugins/create/create_animation.py
#	openpype/hosts/maya/plugins/create/create_review.py
#	openpype/hosts/maya/plugins/load/load_reference.py
#	openpype/modules/deadline/plugins/publish/submit_maya_deadline.py
This commit is contained in:
Toke Stuart Jepsen 2023-05-16 10:23:11 +01:00
commit 544c043ece
451 changed files with 15825 additions and 3076 deletions

View file

@ -311,6 +311,7 @@ def _load_modules():
# Look for OpenPype modules in paths defined with `get_module_dirs`
# - dynamically imported OpenPype modules and addons
module_dirs = get_module_dirs()
# Add current directory at first place
# - has small differences in import logic
current_dir = os.path.abspath(os.path.dirname(__file__))
@ -318,8 +319,11 @@ def _load_modules():
module_dirs.insert(0, hosts_dir)
module_dirs.insert(0, current_dir)
addons_dir = os.path.join(os.path.dirname(current_dir), "addons")
module_dirs.append(addons_dir)
processed_paths = set()
for dirpath in module_dirs:
for dirpath in frozenset(module_dirs):
# Skip already processed paths
if dirpath in processed_paths:
continue

View file

@ -538,8 +538,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
template_data["comment"] = None
anatomy = instance.context.data['anatomy']
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled["publish"]["path"]
template_obj = anatomy.templates_obj["publish"]["path"]
template_filled = template_obj.format_strict(template_data)
file_path = os.path.normpath(template_filled)
self.log.info("Using published scene for render {}".format(file_path))

View file

@ -170,6 +170,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
job_info.Comment = context.data.get("comment")
job_info.Priority = instance.data.get("priority", self.priority)
if self.group != "none" and self.group:
job_info.Group = self.group
@ -366,6 +367,11 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
job_info = copy.deepcopy(payload_job_info)
plugin_info = copy.deepcopy(payload_plugin_info)
# Force plugin reload for vray cause the region does not get flushed
# between tile renders.
if plugin_info["Renderer"] == "vray":
job_info.ForceReloadPlugin = True
# if we have sequence of files, we need to create tile job for
# every frame
job_info.TileJob = True
@ -477,6 +483,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
assembly_payloads = []
output_dir = self.job_info.OutputDirectory[0]
config_files = []
for file in assembly_files:
frame = re.search(R_FRAME_NUMBER, file).group("frame")
@ -502,6 +509,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
)
)
config_files.append(config_file)
try:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
@ -510,8 +518,6 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
self.log.warning("Path is unreachable: "
"`{}`".format(output_dir))
assembly_plugin_info["ConfigFile"] = config_file
with open(config_file, "w") as cf:
print("TileCount={}".format(tiles_count), file=cf)
print("ImageFileName={}".format(file), file=cf)
@ -520,6 +526,10 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
print("ImageHeight={}".format(
instance.data.get("resolutionHeight")), file=cf)
reversed_y = False
if plugin_info["Renderer"] == "arnold":
reversed_y = True
with open(config_file, "a") as cf:
# Need to reverse the order of the y tiles, because image
# coordinates are calculated from bottom left corner.
@ -530,7 +540,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
instance.data.get("resolutionWidth"),
instance.data.get("resolutionHeight"),
payload_plugin_info["OutputFilePrefix"],
reversed_y=True
reversed_y=reversed_y
)[1]
for k, v in sorted(tiles.items()):
print("{}={}".format(k, v), file=cf)
@ -558,6 +568,11 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
instance.data["assemblySubmissionJobs"] = assembly_job_ids
# Remove config files to avoid confusion about where data is coming
# from in Deadline.
for config_file in config_files:
os.remove(config_file)
def _get_maya_payload(self, data):
job_info = copy.deepcopy(self.job_info)
@ -956,8 +971,6 @@ def _format_tiles(
out["PluginInfo"]["RegionRight{}".format(tile)] = right
# Tile config
cfg["Tile{}".format(tile)] = new_filename
cfg["Tile{}Tile".format(tile)] = new_filename
cfg["Tile{}FileName".format(tile)] = new_filename
cfg["Tile{}X".format(tile)] = left
cfg["Tile{}Y".format(tile)] = top

View file

@ -445,7 +445,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
"Finished copying %i files" % len(resource_files))
def _create_instances_for_aov(
self, instance_data, exp_files, additional_data
self, instance_data, exp_files, additional_data, do_not_add_review
):
"""Create instance for each AOV found.
@ -456,6 +456,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
instance_data (pyblish.plugin.Instance): skeleton data for instance
(those needed) later by collector
exp_files (list): list of expected files divided by aovs
additional_data (dict):
do_not_add_review (bool): explicitly skip review
Returns:
list of instances
@ -521,8 +523,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
app = os.environ.get("AVALON_APP", "")
preview = False
if isinstance(col, list):
render_file_name = os.path.basename(col[0])
else:
@ -539,6 +539,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
new_instance = deepcopy(instance_data)
new_instance["subset"] = subset_name
new_instance["subsetGroup"] = group_name
preview = preview and not do_not_add_review
if preview:
new_instance["review"] = True
@ -598,7 +600,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
self.log.debug("instances:{}".format(instances))
return instances
def _get_representations(self, instance, exp_files):
def _get_representations(self, instance, exp_files, do_not_add_review):
"""Create representations for file sequences.
This will return representations of expected files if they are not
@ -609,6 +611,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
instance (dict): instance data for which we are
setting representations
exp_files (list): list of expected files
do_not_add_review (bool): explicitly skip review
Returns:
list of representations
@ -658,6 +661,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
if instance.get("slate"):
frame_start -= 1
preview = preview and not do_not_add_review
rep = {
"name": ext,
"ext": ext,
@ -712,6 +716,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
preview = match_aov_pattern(
host_name, self.aov_filter, remainder
)
preview = preview and not do_not_add_review
if preview:
rep.update({
"fps": instance.get("fps"),
@ -827,8 +832,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
families = [family]
# pass review to families if marked as review
do_not_add_review = False
if data.get("review"):
families.append("review")
elif data.get("review") == False:
self.log.debug("Instance has review explicitly disabled.")
do_not_add_review = True
instance_skeleton_data = {
"family": family,
@ -984,7 +993,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
instances = self._create_instances_for_aov(
instance_skeleton_data,
data.get("expectedFiles"),
additional_data
additional_data,
do_not_add_review
)
self.log.info("got {} instance{}".format(
len(instances),
@ -993,7 +1003,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
else:
representations = self._get_representations(
instance_skeleton_data,
data.get("expectedFiles")
data.get("expectedFiles"),
do_not_add_review
)
if "representations" not in instance_skeleton_data.keys():
@ -1209,10 +1220,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
template_data["family"] = "render"
template_data["version"] = version
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["render"]:
publish_folder = anatomy_filled["render"]["folder"]
render_templates = anatomy.templates_obj["render"]
if "folder" in render_templates:
publish_folder = render_templates["folder"].format_strict(
template_data
)
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
@ -1222,8 +1234,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["render"]["path"]
# Directory
file_path = render_templates["path"].format_strict(template_data)
publish_folder = os.path.dirname(file_path)
return publish_folder

View file

@ -378,7 +378,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
existing_tasks.append(task_name_low)
for instance in instances_by_task_name[task_name_low]:
instance["ftrackTask"] = child
instance.data["ftrackTask"] = child
for task_name in tasks:
task_type = tasks[task_name]["type"]

View file

@ -9,7 +9,7 @@ class IntegrateKitsuNote(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder
label = "Kitsu Note and Status"
families = ["render", "kitsu"]
families = ["render", "image", "online", "plate", "kitsu"]
# status settings
set_status_note = False
@ -52,8 +52,9 @@ class IntegrateKitsuNote(pyblish.api.ContextPlugin):
for instance in context:
# Check if instance is a review by checking its family
# Allow a match to primary family or any of families
families = set([instance.data["family"]] +
instance.data.get("families", []))
families = set(
[instance.data["family"]] + instance.data.get("families", [])
)
if "review" not in families:
continue

View file

@ -8,11 +8,10 @@ class IntegrateKitsuReview(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder + 0.01
label = "Kitsu Review"
families = ["render", "kitsu"]
families = ["render", "image", "online", "plate", "kitsu"]
optional = True
def process(self, instance):
# Check comment has been created
comment_id = instance.data.get("kitsu_comment", {}).get("id")
if not comment_id:

View file

@ -0,0 +1,186 @@
import os
import shutil
from openpype.client.entities import (
get_representations,
get_project
)
from openpype.lib import PreLaunchHook
from openpype.lib.profiles_filtering import filter_profiles
from openpype.modules.sync_server.sync_server import (
download_last_published_workfile,
)
from openpype.pipeline.template_data import get_template_data
from openpype.pipeline.workfile.path_resolving import (
get_workfile_template_key,
)
from openpype.settings.lib import get_project_settings
class CopyLastPublishedWorkfile(PreLaunchHook):
"""Copy last published workfile as first workfile.
Prelaunch hook works only if last workfile leads to not existing file.
- That is possible only if it's first version.
"""
# Before `AddLastWorkfileToLaunchArgs`
order = -1
# any DCC could be used but TrayPublisher and other specials
app_groups = ["blender", "photoshop", "tvpaint", "aftereffects",
"nuke", "nukeassist", "nukex", "hiero", "nukestudio",
"maya", "harmony", "celaction", "flame", "fusion",
"houdini", "tvpaint"]
def execute(self):
"""Check if local workfile doesn't exist, else copy it.
1- Check if setting for this feature is enabled
2- Check if workfile in work area doesn't exist
3- Check if published workfile exists and is copied locally in publish
4- Substitute copied published workfile as first workfile
with incremented version by +1
Returns:
None: This is a void method.
"""
sync_server = self.modules_manager.get("sync_server")
if not sync_server or not sync_server.enabled:
self.log.debug("Sync server module is not enabled or available")
return
# Check there is no workfile available
last_workfile = self.data.get("last_workfile_path")
if os.path.exists(last_workfile):
self.log.debug(
"Last workfile exists. Skipping {} process.".format(
self.__class__.__name__
)
)
return
# Get data
project_name = self.data["project_name"]
asset_name = self.data["asset_name"]
task_name = self.data["task_name"]
task_type = self.data["task_type"]
host_name = self.application.host_name
# Check settings has enabled it
project_settings = get_project_settings(project_name)
profiles = project_settings["global"]["tools"]["Workfiles"][
"last_workfile_on_startup"
]
filter_data = {
"tasks": task_name,
"task_types": task_type,
"hosts": host_name,
}
last_workfile_settings = filter_profiles(profiles, filter_data)
if not last_workfile_settings:
return
use_last_published_workfile = last_workfile_settings.get(
"use_last_published_workfile"
)
if use_last_published_workfile is None:
self.log.info(
(
"Seems like old version of settings is used."
' Can\'t access custom templates in host "{}".'.format(
host_name
)
)
)
return
elif use_last_published_workfile is False:
self.log.info(
(
'Project "{}" has turned off to use last published'
' workfile as first workfile for host "{}"'.format(
project_name, host_name
)
)
)
return
max_retries = int((sync_server.sync_project_settings[project_name]
["config"]
["retry_cnt"]))
self.log.info("Trying to fetch last published workfile...")
asset_doc = self.data.get("asset_doc")
anatomy = self.data.get("anatomy")
context_filters = {
"asset": asset_name,
"family": "workfile",
"task": {"name": task_name, "type": task_type}
}
workfile_representations = list(get_representations(
project_name,
context_filters=context_filters
))
if not workfile_representations:
self.log.debug(
'No published workfile for task "{}" and host "{}".'.format(
task_name, host_name
)
)
return
filtered_repres = filter(
lambda r: r["context"].get("version") is not None,
workfile_representations
)
workfile_representation = max(
filtered_repres, key=lambda r: r["context"]["version"]
)
# Copy file and substitute path
last_published_workfile_path = download_last_published_workfile(
host_name,
project_name,
task_name,
workfile_representation,
max_retries,
anatomy=anatomy
)
if not last_published_workfile_path:
self.log.debug(
"Couldn't download {}".format(last_published_workfile_path)
)
return
project_doc = self.data["project_doc"]
project_settings = self.data["project_settings"]
template_key = get_workfile_template_key(
task_name, host_name, project_name, project_settings
)
# Get workfile data
workfile_data = get_template_data(
project_doc, asset_doc, task_name, host_name
)
extension = last_published_workfile_path.split(".")[-1]
workfile_data["version"] = (
workfile_representation["context"]["version"] + 1)
workfile_data["ext"] = extension
anatomy_result = anatomy.format(workfile_data)
local_workfile_path = anatomy_result[template_key]["path"]
# Copy last published workfile to local workfile directory
shutil.copy(
last_published_workfile_path,
local_workfile_path,
)
self.data["last_workfile_path"] = local_workfile_path
# Keep source filepath for further path conformation
self.data["source_filepath"] = last_published_workfile_path

View file

@ -3,10 +3,15 @@ import os
import asyncio
import threading
import concurrent.futures
from concurrent.futures._base import CancelledError
from time import sleep
from .providers import lib
from openpype.client.entity_links import get_linked_representation_id
from openpype.lib import Logger
from openpype.lib.local_settings import get_local_site_id
from openpype.modules.base import ModulesManager
from openpype.pipeline import Anatomy
from openpype.pipeline.load.utils import get_representation_path_with_anatomy
from .utils import SyncStatus, ResumableError
@ -189,6 +194,98 @@ def _site_is_working(module, project_name, site_name, site_config):
return handler.is_active()
def download_last_published_workfile(
host_name: str,
project_name: str,
task_name: str,
workfile_representation: dict,
max_retries: int,
anatomy: Anatomy = None,
) -> str:
"""Download the last published workfile
Args:
host_name (str): Host name.
project_name (str): Project name.
task_name (str): Task name.
workfile_representation (dict): Workfile representation.
max_retries (int): complete file failure only after so many attempts
anatomy (Anatomy, optional): Anatomy (Used for optimization).
Defaults to None.
Returns:
str: last published workfile path localized
"""
if not anatomy:
anatomy = Anatomy(project_name)
# Get sync server module
sync_server = ModulesManager().modules_by_name.get("sync_server")
if not sync_server or not sync_server.enabled:
print("Sync server module is disabled or unavailable.")
return
if not workfile_representation:
print(
"Not published workfile for task '{}' and host '{}'.".format(
task_name, host_name
)
)
return
last_published_workfile_path = get_representation_path_with_anatomy(
workfile_representation, anatomy
)
if (not last_published_workfile_path or
not os.path.exists(last_published_workfile_path)):
return
# If representation isn't available on remote site, then return.
if not sync_server.is_representation_on_site(
project_name,
workfile_representation["_id"],
sync_server.get_remote_site(project_name),
):
print(
"Representation for task '{}' and host '{}'".format(
task_name, host_name
)
)
return
# Get local site
local_site_id = get_local_site_id()
# Add workfile representation to local site
representation_ids = {workfile_representation["_id"]}
representation_ids.update(
get_linked_representation_id(
project_name, repre_id=workfile_representation["_id"]
)
)
for repre_id in representation_ids:
if not sync_server.is_representation_on_site(project_name, repre_id,
local_site_id):
sync_server.add_site(
project_name,
repre_id,
local_site_id,
force=True,
priority=99
)
sync_server.reset_timer()
print("Starting to download:{}".format(last_published_workfile_path))
# While representation unavailable locally, wait.
while not sync_server.is_representation_on_site(
project_name, workfile_representation["_id"], local_site_id,
max_retries=max_retries
):
sleep(5)
return last_published_workfile_path
class SyncServerThread(threading.Thread):
"""
Separate thread running synchronization server with asyncio loop.
@ -358,7 +455,6 @@ class SyncServerThread(threading.Thread):
duration = time.time() - start_time
self.log.debug("One loop took {:.2f}s".format(duration))
delay = self.module.get_loop_delay(project_name)
self.log.debug(
"Waiting for {} seconds to new loop".format(delay)
@ -370,8 +466,8 @@ class SyncServerThread(threading.Thread):
self.log.warning(
"ConnectionResetError in sync loop, trying next loop",
exc_info=True)
except CancelledError:
# just stopping server
except asyncio.exceptions.CancelledError:
# cancelling timer
pass
except ResumableError:
self.log.warning(

View file

@ -838,6 +838,18 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
return ret_dict
def get_launch_hook_paths(self):
"""Implementation for applications launch hooks.
Returns:
(str): full absolut path to directory with hooks for the module
"""
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"launch_hooks"
)
# Needs to be refactored after Settings are updated
# # Methods for Settings to get appriate values to fill forms
# def get_configurable_items(self, scope=None):
@ -1045,9 +1057,23 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
self.sync_server_thread.reset_timer()
def is_representation_on_site(
self, project_name, representation_id, site_name
self, project_name, representation_id, site_name, max_retries=None
):
"""Checks if 'representation_id' has all files avail. on 'site_name'"""
"""Checks if 'representation_id' has all files avail. on 'site_name'
Args:
project_name (str)
representation_id (str)
site_name (str)
max_retries (int) (optional) - provide only if method used in while
loop to bail out
Returns:
(bool): True if 'representation_id' has all files correctly on the
'site_name'
Raises:
(ValueError) Only If 'max_retries' provided if upload/download
failed too many times to limit infinite loop check.
"""
representation = get_representation_by_id(project_name,
representation_id,
fields=["_id", "files"])
@ -1060,6 +1086,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
if site["name"] != site_name:
continue
if max_retries:
tries = self._get_tries_count_from_rec(site)
if tries >= max_retries:
raise ValueError("Failed too many times")
if (site.get("progress") or site.get("error") or
not site.get("created_dt")):
return False