diff --git a/openpype/api.py b/openpype/api.py index 0466eb7f78..b60cd21d2b 100644 --- a/openpype/api.py +++ b/openpype/api.py @@ -11,7 +11,6 @@ from .lib import ( PypeLogger, Logger, Anatomy, - config, execute, run_subprocess, version_up, @@ -72,7 +71,6 @@ __all__ = [ "PypeLogger", "Logger", "Anatomy", - "config", "execute", "get_default_components", "ApplicationManager", diff --git a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py index 9ac0561ff3..84b9dd1a6e 100644 --- a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py +++ b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py @@ -3,7 +3,7 @@ from typing import List import bpy import pyblish.api -import openpype.api + import openpype.hosts.blender.api.action from openpype.pipeline.publish import ValidateContentsOrder diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py index 83146c641e..cee855671d 100644 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py @@ -3,14 +3,15 @@ from typing import List import bpy import pyblish.api -import openpype.api + +from openpype.pipeline.publish import ValidateContentsOrder import openpype.hosts.blender.api.action class ValidateMeshHasUvs(pyblish.api.InstancePlugin): """Validate that the current mesh has UV's.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["blender"] families = ["model"] category = "geometry" diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py index 329a8d80c3..45ac08811d 100644 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py +++ b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py @@ -3,14 +3,15 @@ from typing import List import bpy import pyblish.api -import openpype.api + +from openpype.pipeline.publish import ValidateContentsOrder import openpype.hosts.blender.api.action class ValidateMeshNoNegativeScale(pyblish.api.Validator): """Ensure that meshes don't have a negative scale.""" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder hosts = ["blender"] families = ["model"] category = "geometry" diff --git a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py index 3d7c5294f6..f5dc9fdd5c 100644 --- a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py +++ b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py @@ -3,7 +3,7 @@ from typing import List import bpy import pyblish.api -import openpype.api + import openpype.hosts.blender.api.action from openpype.pipeline.publish import ValidateContentsOrder diff --git a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py index 249b14743b..742826d3d9 100644 --- a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py +++ b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py @@ -4,7 +4,7 @@ import mathutils import bpy import pyblish.api -import openpype.api + import openpype.hosts.blender.api.action from openpype.pipeline.publish import ValidateContentsOrder diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 292b95da84..7e15a91eca 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -2459,182 +2459,120 @@ def bake_to_world_space(nodes, def load_capture_preset(data=None): + """Convert OpenPype Extract Playblast settings to `capture` arguments + + Input data is the settings from: + `project_settings/maya/publish/ExtractPlayblast/capture_preset` + + Args: + data (dict): Capture preset settings from OpenPype settings + + Returns: + dict: `capture.capture` compatible keyword arguments + + """ + import capture - preset = data - options = dict() + viewport_options = dict() + viewport2_options = dict() + camera_options = dict() - # CODEC - id = 'Codec' - for key in preset[id]: - options[str(key)] = preset[id][key] + # Straight key-value match from settings to capture arguments + options.update(data["Codec"]) + options.update(data["Generic"]) + options.update(data["Resolution"]) - # GENERIC - id = 'Generic' - for key in preset[id]: - options[str(key)] = preset[id][key] - - # RESOLUTION - id = 'Resolution' - options['height'] = preset[id]['height'] - options['width'] = preset[id]['width'] + camera_options.update(data['Camera Options']) + viewport_options.update(data["Renderer"]) # DISPLAY OPTIONS - id = 'Display Options' disp_options = {} - for key in preset[id]: + for key, value in data['Display Options'].items(): if key.startswith('background'): - disp_options[key] = preset['Display Options'][key] - if len(disp_options[key]) == 4: - disp_options[key][0] = (float(disp_options[key][0])/255) - disp_options[key][1] = (float(disp_options[key][1])/255) - disp_options[key][2] = (float(disp_options[key][2])/255) - disp_options[key].pop() + # Convert background, backgroundTop, backgroundBottom colors + if len(value) == 4: + # Ignore alpha + convert RGB to float + value = [ + float(value[0]) / 255, + float(value[1]) / 255, + float(value[2]) / 255 + ] + disp_options[key] = value else: disp_options['displayGradient'] = True options['display_options'] = disp_options - # VIEWPORT OPTIONS - temp_options = {} - id = 'Renderer' - for key in preset[id]: - temp_options[str(key)] = preset[id][key] + # Viewport Options has a mixture of Viewport2 Options and Viewport Options + # to pass along to capture. So we'll need to differentiate between the two + VIEWPORT2_OPTIONS = { + "textureMaxResolution", + "renderDepthOfField", + "ssaoEnable", + "ssaoSamples", + "ssaoAmount", + "ssaoRadius", + "ssaoFilterRadius", + "hwFogStart", + "hwFogEnd", + "hwFogAlpha", + "hwFogFalloff", + "hwFogColorR", + "hwFogColorG", + "hwFogColorB", + "hwFogDensity", + "motionBlurEnable", + "motionBlurSampleCount", + "motionBlurShutterOpenFraction", + "lineAAEnable" + } + for key, value in data['Viewport Options'].items(): - temp_options2 = {} - id = 'Viewport Options' - for key in preset[id]: + # There are some keys we want to ignore + if key in {"override_viewport_options", "high_quality"}: + continue + + # First handle special cases where we do value conversion to + # separate option values if key == 'textureMaxResolution': - if preset[id][key] > 0: - temp_options2['textureMaxResolution'] = preset[id][key] - temp_options2['enableTextureMaxRes'] = True - temp_options2['textureMaxResMode'] = 1 + viewport2_options['textureMaxResolution'] = value + if value > 0: + viewport2_options['enableTextureMaxRes'] = True + viewport2_options['textureMaxResMode'] = 1 else: - temp_options2['textureMaxResolution'] = preset[id][key] - temp_options2['enableTextureMaxRes'] = False - temp_options2['textureMaxResMode'] = 0 + viewport2_options['enableTextureMaxRes'] = False + viewport2_options['textureMaxResMode'] = 0 - if key == 'multiSample': - if preset[id][key] > 0: - temp_options2['multiSampleEnable'] = True - temp_options2['multiSampleCount'] = preset[id][key] - else: - temp_options2['multiSampleEnable'] = False - temp_options2['multiSampleCount'] = preset[id][key] + elif key == 'multiSample': + viewport2_options['multiSampleEnable'] = value > 0 + viewport2_options['multiSampleCount'] = value - if key == 'renderDepthOfField': - temp_options2['renderDepthOfField'] = preset[id][key] + elif key == 'alphaCut': + viewport2_options['transparencyAlgorithm'] = 5 + viewport2_options['transparencyQuality'] = 1 - if key == 'ssaoEnable': - if preset[id][key] is True: - temp_options2['ssaoEnable'] = True - else: - temp_options2['ssaoEnable'] = False + elif key == 'hwFogFalloff': + # Settings enum value string to integer + viewport2_options['hwFogFalloff'] = int(value) - if key == 'ssaoSamples': - temp_options2['ssaoSamples'] = preset[id][key] - - if key == 'ssaoAmount': - temp_options2['ssaoAmount'] = preset[id][key] - - if key == 'ssaoRadius': - temp_options2['ssaoRadius'] = preset[id][key] - - if key == 'hwFogDensity': - temp_options2['hwFogDensity'] = preset[id][key] - - if key == 'ssaoFilterRadius': - temp_options2['ssaoFilterRadius'] = preset[id][key] - - if key == 'alphaCut': - temp_options2['transparencyAlgorithm'] = 5 - temp_options2['transparencyQuality'] = 1 - - if key == 'headsUpDisplay': - temp_options['headsUpDisplay'] = True - - if key == 'fogging': - temp_options['fogging'] = preset[id][key] or False - - if key == 'hwFogStart': - temp_options2['hwFogStart'] = preset[id][key] - - if key == 'hwFogEnd': - temp_options2['hwFogEnd'] = preset[id][key] - - if key == 'hwFogAlpha': - temp_options2['hwFogAlpha'] = preset[id][key] - - if key == 'hwFogFalloff': - temp_options2['hwFogFalloff'] = int(preset[id][key]) - - if key == 'hwFogColorR': - temp_options2['hwFogColorR'] = preset[id][key] - - if key == 'hwFogColorG': - temp_options2['hwFogColorG'] = preset[id][key] - - if key == 'hwFogColorB': - temp_options2['hwFogColorB'] = preset[id][key] - - if key == 'motionBlurEnable': - if preset[id][key] is True: - temp_options2['motionBlurEnable'] = True - else: - temp_options2['motionBlurEnable'] = False - - if key == 'motionBlurSampleCount': - temp_options2['motionBlurSampleCount'] = preset[id][key] - - if key == 'motionBlurShutterOpenFraction': - temp_options2['motionBlurShutterOpenFraction'] = preset[id][key] - - if key == 'lineAAEnable': - if preset[id][key] is True: - temp_options2['lineAAEnable'] = True - else: - temp_options2['lineAAEnable'] = False + # Then handle Viewport 2.0 Options + elif key in VIEWPORT2_OPTIONS: + viewport2_options[key] = value + # Then assume remainder is Viewport Options else: - temp_options[str(key)] = preset[id][key] + viewport_options[key] = value - for key in ['override_viewport_options', - 'high_quality', - 'alphaCut', - 'gpuCacheDisplayFilter', - 'multiSample', - 'ssaoEnable', - 'ssaoSamples', - 'ssaoAmount', - 'ssaoFilterRadius', - 'ssaoRadius', - 'hwFogStart', - 'hwFogEnd', - 'hwFogAlpha', - 'hwFogFalloff', - 'hwFogColorR', - 'hwFogColorG', - 'hwFogColorB', - 'hwFogDensity', - 'textureMaxResolution', - 'motionBlurEnable', - 'motionBlurSampleCount', - 'motionBlurShutterOpenFraction', - 'lineAAEnable', - 'renderDepthOfField' - ]: - temp_options.pop(key, None) - - options['viewport_options'] = temp_options - options['viewport2_options'] = temp_options2 + options['viewport_options'] = viewport_options + options['viewport2_options'] = viewport2_options + options['camera_options'] = camera_options # use active sound track scene = capture.parse_active_scene() options['sound'] = scene['sound'] - # options['display_options'] = temp_options - return options diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 1e883ea43f..1ab771cfe6 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -80,7 +80,7 @@ IMAGE_PREFIXES = { "mayahardware2": "defaultRenderGlobals.imageFilePrefix" } -RENDERMAN_IMAGE_DIR = "maya//" +RENDERMAN_IMAGE_DIR = "/" def has_tokens(string, tokens): diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py index b0a283da5a..2b996702c3 100644 --- a/openpype/hosts/maya/api/lib_rendersettings.py +++ b/openpype/hosts/maya/api/lib_rendersettings.py @@ -29,7 +29,7 @@ class RenderSettings(object): _image_prefixes = { 'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa 'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa - 'renderman': 'maya///{aov_separator}', + 'renderman': '//{aov_separator}', 'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa } diff --git a/openpype/hosts/maya/plugins/publish/extract_layout.py b/openpype/hosts/maya/plugins/publish/extract_layout.py index 0f499b09b1..a801d99f42 100644 --- a/openpype/hosts/maya/plugins/publish/extract_layout.py +++ b/openpype/hosts/maya/plugins/publish/extract_layout.py @@ -34,14 +34,15 @@ class ExtractLayout(publish.Extractor): for asset in cmds.sets(str(instance), query=True): # Find the container grp_name = asset.split(':')[0] - containers = cmds.ls(f"{grp_name}*_CON") + containers = cmds.ls("{}*_CON".format(grp_name)) assert len(containers) == 1, \ - f"More than one container found for {asset}" + "More than one container found for {}".format(asset) container = containers[0] - representation_id = cmds.getAttr(f"{container}.representation") + representation_id = cmds.getAttr( + "{}.representation".format(container)) representation = get_representation_by_id( project_name, @@ -56,7 +57,8 @@ class ExtractLayout(publish.Extractor): json_element = { "family": family, - "instance_name": cmds.getAttr(f"{container}.name"), + "instance_name": cmds.getAttr( + "{}.namespace".format(container)), "representation": str(representation_id), "version": str(version_id) } diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index 8df7edc3f4..1b5b8d34e4 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -77,8 +77,10 @@ class ExtractPlayblast(publish.Extractor): preset['height'] = asset_height preset['start_frame'] = start preset['end_frame'] = end - camera_option = preset.get("camera_option", {}) - camera_option["depthOfField"] = cmds.getAttr( + + # Enforce persisting camera depth of field + camera_options = preset.setdefault("camera_options", {}) + camera_options["depthOfField"] = cmds.getAttr( "{0}.depthOfField".format(camera)) stagingdir = self.staging_dir(instance) diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index 854301ea48..712159c2be 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -1,5 +1,6 @@ import os import glob +import tempfile import capture @@ -81,9 +82,17 @@ class ExtractThumbnail(publish.Extractor): elif asset_width and asset_height: preset['width'] = asset_width preset['height'] = asset_height - stagingDir = self.staging_dir(instance) + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + instance.context.data["cleanupFullPaths"].append(dst_staging) filename = "{0}".format(instance.name) - path = os.path.join(stagingDir, filename) + path = os.path.join(dst_staging, filename) self.log.info("Outputting images to %s" % path) @@ -137,7 +146,7 @@ class ExtractThumbnail(publish.Extractor): 'name': 'thumbnail', 'ext': 'jpg', 'files': thumbnail, - "stagingDir": stagingDir, + "stagingDir": dst_staging, "thumbnail": True } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index df06220fd3..1a6463fb9d 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -118,7 +118,7 @@ def preview_fname(folder, scene, layer, padding, ext): """ # Following hardcoded "/_/" - output = "maya/{scene}/{layer}/{layer}.{number}.{ext}".format( + output = "{scene}/{layer}/{layer}.{number}.{ext}".format( scene=scene, layer=layer, number="#" * padding, diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index ca44e98605..94e2633593 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -22,10 +22,10 @@ def get_redshift_image_format_labels(): class ValidateRenderSettings(pyblish.api.InstancePlugin): """Validates the global render settings - * File Name Prefix must start with: `maya/` + * File Name Prefix must start with: `` all other token are customizable but sane values for Arnold are: - `maya///_` + `//_` token is supported also, useful for multiple renderable cameras per render layer. @@ -64,12 +64,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): } ImagePrefixTokens = { - 'mentalray': 'maya///{aov_separator}', # noqa: E501 - 'arnold': 'maya///{aov_separator}', # noqa: E501 - 'redshift': 'maya///', - 'vray': 'maya///', + 'mentalray': '//{aov_separator}', # noqa: E501 + 'arnold': '//{aov_separator}', # noqa: E501 + 'redshift': '//', + 'vray': '//', 'renderman': '{aov_separator}..', - 'mayahardware2': 'maya///', + 'mayahardware2': '//', } _aov_chars = { @@ -80,7 +80,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): redshift_AOV_prefix = "/{aov_separator}" # noqa: E501 - renderman_dir_prefix = "maya//" + renderman_dir_prefix = "/" R_AOV_TOKEN = re.compile( r'%a||', re.IGNORECASE) @@ -90,8 +90,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): R_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) DEFAULT_PADDING = 4 - VRAY_PREFIX = "maya///" - DEFAULT_PREFIX = "maya///_" + VRAY_PREFIX = "//" + DEFAULT_PREFIX = "//_" def process(self, instance): @@ -123,7 +123,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): prefix = prefix.replace( "{aov_separator}", instance.data.get("aovSeparator", "_")) - required_prefix = "maya/" default_prefix = cls.ImagePrefixTokens[renderer] if not anim_override: @@ -131,15 +130,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.log.error("Animation needs to be enabled. Use the same " "frame for start and end to render single frame") - if renderer != "renderman" and not prefix.lower().startswith( - required_prefix): - invalid = True - cls.log.error( - ("Wrong image prefix [ {} ] " - " - doesn't start with: '{}'").format( - prefix, required_prefix) - ) - if not re.search(cls.R_LAYER_TOKEN, prefix): invalid = True cls.log.error("Wrong image prefix [ {} ] - " diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index 1c8d9dc01c..899cb825bb 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -244,7 +244,7 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( instance, old_value, new_value)) - from openpype.hosts.resolve import ( + from openpype.hosts.resolve.api import ( set_publish_attribute ) diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 3995077d21..0ed7beee59 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -4,13 +4,15 @@ import uuid import qargparse from Qt import QtWidgets, QtCore +from openpype.settings import get_current_project_settings +from openpype.pipeline.context_tools import get_current_project_asset from openpype.pipeline import ( LegacyCreator, LoaderPlugin, ) -from openpype.pipeline.context_tools import get_current_project_asset -from openpype.hosts import resolve + from . import lib +from .menu import load_stylesheet class CreatorWidget(QtWidgets.QDialog): @@ -86,7 +88,7 @@ class CreatorWidget(QtWidgets.QDialog): ok_btn.clicked.connect(self._on_ok_clicked) cancel_btn.clicked.connect(self._on_cancel_clicked) - stylesheet = resolve.api.menu.load_stylesheet() + stylesheet = load_stylesheet() self.setStyleSheet(stylesheet) def _on_ok_clicked(self): @@ -438,7 +440,7 @@ class ClipLoader: source_in = int(_clip_property("Start")) source_out = int(_clip_property("End")) - resolve.swap_clips( + lib.swap_clips( timeline_item, media_pool_item, source_in, @@ -504,7 +506,7 @@ class Creator(LegacyCreator): def __init__(self, *args, **kwargs): super(Creator, self).__init__(*args, **kwargs) - from openpype.settings import get_current_project_settings + resolve_p_settings = get_current_project_settings().get("resolve") self.presets = {} if resolve_p_settings: @@ -512,13 +514,13 @@ class Creator(LegacyCreator): self.__class__.__name__, {}) # adding basic current context resolve objects - self.project = resolve.get_current_project() - self.timeline = resolve.get_current_timeline() + self.project = lib.get_current_project() + self.timeline = lib.get_current_timeline() if (self.options or {}).get("useSelection"): - self.selected = resolve.get_current_timeline_items(filter=True) + self.selected = lib.get_current_timeline_items(filter=True) else: - self.selected = resolve.get_current_timeline_items(filter=False) + self.selected = lib.get_current_timeline_items(filter=False) self.widget = CreatorWidget diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py index f37e04d1c9..3d93e2c927 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py @@ -35,12 +35,12 @@ class CollectMovieBatch( "stagingDir": os.path.dirname(file_url), "tags": [] } + instance.data["representations"].append(repre) if creator_attributes["add_review_family"]: repre["tags"].append("review") instance.data["families"].append("review") - - instance.data["representations"].append(repre) + instance.data["thumbnailSource"] = file_url instance.data["source"] = file_url diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py index c0ae694c3c..d91694ef69 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -1,5 +1,6 @@ import os import tempfile +from pathlib import Path import clique import pyblish.api @@ -72,6 +73,8 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): instance.data["source"] = source instance.data["sourceFilepaths"] = list(set(source_filepaths)) + instance.data["originalBasename"] = Path( + instance.data["sourceFilepaths"][0]).stem self.log.debug( ( @@ -148,8 +151,11 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): )) return + item_dir = review_file_item["directory"] + first_filepath = os.path.join(item_dir, filenames[0]) + filepaths = { - os.path.join(review_file_item["directory"], filename) + os.path.join(item_dir, filename) for filename in filenames } source_filepaths.extend(filepaths) @@ -176,6 +182,8 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): if "review" not in instance.data["families"]: instance.data["families"].append("review") + instance.data["thumbnailSource"] = first_filepath + review_representation["tags"].append("review") self.log.debug("Representation {} was marked for review. {}".format( review_representation["name"], review_path diff --git a/openpype/hosts/traypublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/traypublisher/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..7781bb7b3e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/extract_thumbnail.py @@ -0,0 +1,173 @@ +"""Create instance thumbnail from "thumbnailSource" on 'instance.data'. + +Output is new representation with "thumbnail" name on instance. If instance +already have such representation the process is skipped. + +This way a collector can point to a file from which should be thumbnail +generated. This is different approach then what global plugin for thumbnails +does. The global plugin has specific logic which does not support + +Todos: + No size handling. Size of input is used for output thumbnail which can + cause issues. +""" + +import os +import tempfile + +import pyblish.api +from openpype.lib import ( + get_ffmpeg_tool_path, + get_oiio_tools_path, + is_oiio_supported, + + run_subprocess, +) + + +class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): + """Create jpg thumbnail for instance based on 'thumbnailSource'. + + Thumbnail source must be a single image or video filepath. + """ + + label = "Extract Thumbnail (from source)" + # Before 'ExtractThumbnail' in global plugins + order = pyblish.api.ExtractorOrder - 0.00001 + hosts = ["traypublisher"] + + def process(self, instance): + subset_name = instance.data["subset"] + self.log.info( + "Processing instance with subset name {}".format(subset_name) + ) + + thumbnail_source = instance.data.get("thumbnailSource") + if not thumbnail_source: + self.log.debug("Thumbnail source not filled. Skipping.") + return + + elif not os.path.exists(thumbnail_source): + self.log.debug( + "Thumbnail source file was not found {}. Skipping.".format( + thumbnail_source)) + return + + # Check if already has thumbnail created + if self._already_has_thumbnail(instance): + self.log.info("Thumbnail representation already present.") + return + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + instance.context.data["cleanupFullPaths"].append(dst_staging) + + thumbnail_created = False + oiio_supported = is_oiio_supported() + + self.log.info("Thumbnail source: {}".format(thumbnail_source)) + src_basename = os.path.basename(thumbnail_source) + dst_filename = os.path.splitext(src_basename)[0] + ".jpg" + full_output_path = os.path.join(dst_staging, dst_filename) + + if oiio_supported: + self.log.info("Trying to convert with OIIO") + # If the input can read by OIIO then use OIIO method for + # conversion otherwise use ffmpeg + thumbnail_created = self.create_thumbnail_oiio( + thumbnail_source, full_output_path + ) + + # Try to use FFMPEG if OIIO is not supported or for cases when + # oiiotool isn't available + if not thumbnail_created: + if oiio_supported: + self.log.info(( + "Converting with FFMPEG because input" + " can't be read by OIIO." + )) + + thumbnail_created = self.create_thumbnail_ffmpeg( + thumbnail_source, full_output_path + ) + + # Skip representation and try next one if wasn't created + if not thumbnail_created: + self.log.warning("Thumbanil has not been created.") + return + + new_repre = { + "name": "thumbnail", + "ext": "jpg", + "files": dst_filename, + "stagingDir": dst_staging, + "thumbnail": True, + "tags": ["thumbnail"] + } + + # adding representation + self.log.debug( + "Adding thumbnail representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) + + def _already_has_thumbnail(self, instance): + if "representations" not in instance.data: + self.log.warning( + "Instance does not have 'representations' key filled" + ) + instance.data["representations"] = [] + + for repre in instance.data["representations"]: + if repre["name"] == "thumbnail": + return True + return False + + def create_thumbnail_oiio(self, src_path, dst_path): + self.log.info("outputting {}".format(dst_path)) + oiio_tool_path = get_oiio_tools_path() + oiio_cmd = [ + oiio_tool_path, + "-a", src_path, + "-o", dst_path + ] + self.log.info("Running: {}".format(" ".join(oiio_cmd))) + try: + run_subprocess(oiio_cmd, logger=self.log) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using oiiotool", + exc_info=True + ) + return False + + def create_thumbnail_ffmpeg(self, src_path, dst_path): + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") + + max_int = str(2147483647) + ffmpeg_cmd = [ + ffmpeg_path, + "-y", + "-analyzeduration", max_int, + "-probesize", max_int, + "-i", src_path, + "-vframes", "1", + dst_path + ] + + self.log.info("Running: {}".format(" ".join(ffmpeg_cmd))) + try: + run_subprocess(ffmpeg_cmd, logger=self.log) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using ffmpeg", + exc_info=True + ) + return False diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py index 50e498dbb0..a5b9cbd1fc 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -20,15 +20,11 @@ class StaticMeshAlembicLoader(plugin.Loader): icon = "cube" color = "orange" - def get_task(self, filename, asset_dir, asset_name, replace): + @staticmethod + def get_task(filename, asset_dir, asset_name, replace, default_conversion): task = unreal.AssetImportTask() options = unreal.AbcImportSettings() sm_settings = unreal.AbcStaticMeshSettings() - conversion_settings = unreal.AbcConversionSettings( - preset=unreal.AbcConversionPreset.CUSTOM, - flip_u=False, flip_v=False, - rotation=[0.0, 0.0, 0.0], - scale=[1.0, 1.0, 1.0]) task.set_editor_property('filename', filename) task.set_editor_property('destination_path', asset_dir) @@ -44,13 +40,20 @@ class StaticMeshAlembicLoader(plugin.Loader): sm_settings.set_editor_property('merge_meshes', True) + if not default_conversion: + conversion_settings = unreal.AbcConversionSettings( + preset=unreal.AbcConversionPreset.CUSTOM, + flip_u=False, flip_v=False, + rotation=[0.0, 0.0, 0.0], + scale=[1.0, 1.0, 1.0]) + options.conversion_settings = conversion_settings + options.static_mesh_settings = sm_settings - options.conversion_settings = conversion_settings task.options = options return task - def load(self, context, name, namespace, data): + def load(self, context, name, namespace, options): """Load and containerise representation into Content Browser. This is two step process. First, import FBX to temporary path and @@ -82,6 +85,10 @@ class StaticMeshAlembicLoader(plugin.Loader): asset_name = "{}".format(name) version = context.get('version').get('name') + default_conversion = False + if options.get("default_conversion"): + default_conversion = options.get("default_conversion") + tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( f"{root}/{asset}/{name}_v{version:03d}", suffix="") @@ -91,7 +98,8 @@ class StaticMeshAlembicLoader(plugin.Loader): if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): unreal.EditorAssetLibrary.make_directory(asset_dir) - task = self.get_task(self.fname, asset_dir, asset_name, False) + task = self.get_task( + self.fname, asset_dir, asset_name, False, default_conversion) unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 diff --git a/openpype/hosts/unreal/plugins/load/load_layout_existing.py b/openpype/hosts/unreal/plugins/load/load_layout_existing.py new file mode 100644 index 0000000000..3ce99f8ef6 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_layout_existing.py @@ -0,0 +1,418 @@ +import json +from pathlib import Path + +import unreal +from unreal import EditorLevelLibrary + +from bson.objectid import ObjectId + +from openpype import pipeline +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + load_container, + get_representation_path, + AVALON_CONTAINER_ID, + legacy_io, +) +from openpype.api import get_current_project_settings +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api import pipeline as upipeline + + +class ExistingLayoutLoader(plugin.Loader): + """ + Load Layout for an existing scene, and match the existing assets. + """ + + families = ["layout"] + representations = ["json"] + + label = "Load Layout on Existing Scene" + icon = "code-fork" + color = "orange" + ASSET_ROOT = "/Game/OpenPype" + + @staticmethod + def _create_container( + asset_name, asset_dir, asset, representation, parent, family + ): + container_name = f"{asset_name}_CON" + + container = None + if not unreal.EditorAssetLibrary.does_asset_exist( + f"{asset_dir}/{container_name}" + ): + container = upipeline.create_container(container_name, asset_dir) + else: + ar = unreal.AssetRegistryHelpers.get_asset_registry() + obj = ar.get_asset_by_object_path( + f"{asset_dir}/{container_name}.{container_name}") + container = obj.get_asset() + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + # "loader": str(self.__class__.__name__), + "representation": representation, + "parent": parent, + "family": family + } + + upipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + return container.get_path_name() + + @staticmethod + def _get_current_level(): + ue_version = unreal.SystemLibrary.get_engine_version().split('.') + ue_major = ue_version[0] + + if ue_major == '4': + return EditorLevelLibrary.get_editor_world() + elif ue_major == '5': + return unreal.LevelEditorSubsystem().get_current_level() + + raise NotImplementedError( + f"Unreal version {ue_major} not supported") + + def _get_transform(self, ext, import_data, lasset): + conversion = unreal.Matrix.IDENTITY.transform() + fbx_tuning = unreal.Matrix.IDENTITY.transform() + + basis = unreal.Matrix( + lasset.get('basis')[0], + lasset.get('basis')[1], + lasset.get('basis')[2], + lasset.get('basis')[3] + ).transform() + transform = unreal.Matrix( + lasset.get('transform_matrix')[0], + lasset.get('transform_matrix')[1], + lasset.get('transform_matrix')[2], + lasset.get('transform_matrix')[3] + ).transform() + + # Check for the conversion settings. We cannot access + # the alembic conversion settings, so we assume that + # the maya ones have been applied. + if ext == '.fbx': + loc = import_data.import_translation + rot = import_data.import_rotation.to_vector() + scale = import_data.import_uniform_scale + conversion = unreal.Transform( + location=[loc.x, loc.y, loc.z], + rotation=[rot.x, rot.y, rot.z], + scale=[-scale, scale, scale] + ) + fbx_tuning = unreal.Transform( + rotation=[180.0, 0.0, 90.0], + scale=[1.0, 1.0, 1.0] + ) + elif ext == '.abc': + # This is the standard conversion settings for + # alembic files from Maya. + conversion = unreal.Transform( + location=[0.0, 0.0, 0.0], + rotation=[0.0, 0.0, 0.0], + scale=[1.0, -1.0, 1.0] + ) + + new_transform = (basis.inverse() * transform * basis) + return fbx_tuning * conversion.inverse() * new_transform + + def _spawn_actor(self, obj, lasset): + actor = EditorLevelLibrary.spawn_actor_from_object( + obj, unreal.Vector(0.0, 0.0, 0.0) + ) + + actor.set_actor_label(lasset.get('instance_name')) + smc = actor.get_editor_property('static_mesh_component') + mesh = smc.get_editor_property('static_mesh') + import_data = mesh.get_editor_property('asset_import_data') + filename = import_data.get_first_filename() + path = Path(filename) + + transform = self._get_transform( + path.suffix, import_data, lasset) + + actor.set_actor_transform(transform, False, True) + + @staticmethod + def _get_fbx_loader(loaders, family): + name = "" + if family == 'rig': + name = "SkeletalMeshFBXLoader" + elif family == 'model' or family == 'staticMesh': + name = "StaticMeshFBXLoader" + elif family == 'camera': + name = "CameraLoader" + + if name == "": + return None + + for loader in loaders: + if loader.__name__ == name: + return loader + + return None + + @staticmethod + def _get_abc_loader(loaders, family): + name = "" + if family == 'rig': + name = "SkeletalMeshAlembicLoader" + elif family == 'model': + name = "StaticMeshAlembicLoader" + + if name == "": + return None + + for loader in loaders: + if loader.__name__ == name: + return loader + + return None + + def _load_asset(self, representation, version, instance_name, family): + valid_formats = ['fbx', 'abc'] + + repr_data = legacy_io.find_one({ + "type": "representation", + "parent": ObjectId(version), + "name": {"$in": valid_formats} + }) + repr_format = repr_data.get('name') + + all_loaders = discover_loader_plugins() + loaders = loaders_from_representation( + all_loaders, representation) + + loader = None + + if repr_format == 'fbx': + loader = self._get_fbx_loader(loaders, family) + elif repr_format == 'abc': + loader = self._get_abc_loader(loaders, family) + + if not loader: + self.log.error(f"No valid loader found for {representation}") + return [] + + # This option is necessary to avoid importing the assets with a + # different conversion compared to the other assets. For ABC files, + # it is in fact impossible to access the conversion settings. So, + # we must assume that the Maya conversion settings have been applied. + options = { + "default_conversion": True + } + + assets = load_container( + loader, + representation, + namespace=instance_name, + options=options + ) + + return assets + + def _process(self, lib_path): + data = get_current_project_settings() + delete_unmatched = data["unreal"]["delete_unmatched_assets"] + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + actors = EditorLevelLibrary.get_all_level_actors() + + with open(lib_path, "r") as fp: + data = json.load(fp) + + layout_data = [] + + # Get all the representations in the JSON from the database. + for element in data: + if element.get('representation'): + layout_data.append(( + pipeline.legacy_io.find_one({ + "_id": ObjectId(element.get('representation')) + }), + element + )) + + containers = [] + actors_matched = [] + + for (repr_data, lasset) in layout_data: + if not repr_data: + raise AssertionError("Representation not found") + if not (repr_data.get('data') or + repr_data.get('data').get('path')): + raise AssertionError("Representation does not have path") + if not repr_data.get('context'): + raise AssertionError("Representation does not have context") + + # For every actor in the scene, check if it has a representation in + # those we got from the JSON. If so, create a container for it. + # Otherwise, remove it from the scene. + found = False + + for actor in actors: + if not actor.get_class().get_name() == 'StaticMeshActor': + continue + if actor in actors_matched: + continue + + # Get the original path of the file from which the asset has + # been imported. + smc = actor.get_editor_property('static_mesh_component') + mesh = smc.get_editor_property('static_mesh') + import_data = mesh.get_editor_property('asset_import_data') + filename = import_data.get_first_filename() + path = Path(filename) + + if (not path.name or + path.name not in repr_data.get('data').get('path')): + continue + + actor.set_actor_label(lasset.get('instance_name')) + + mesh_path = Path(mesh.get_path_name()).parent.as_posix() + + # Create the container for the asset. + asset = repr_data.get('context').get('asset') + subset = repr_data.get('context').get('subset') + container = self._create_container( + f"{asset}_{subset}", mesh_path, asset, + repr_data.get('_id'), repr_data.get('parent'), + repr_data.get('context').get('family') + ) + containers.append(container) + + # Set the transform for the actor. + transform = self._get_transform( + path.suffix, import_data, lasset) + actor.set_actor_transform(transform, False, True) + + actors_matched.append(actor) + found = True + break + + # If an actor has not been found for this representation, + # we check if it has been loaded already by checking all the + # loaded containers. If so, we add it to the scene. Otherwise, + # we load it. + if found: + continue + + all_containers = upipeline.ls() + + loaded = False + + for container in all_containers: + repr = container.get('representation') + + if not repr == str(repr_data.get('_id')): + continue + + asset_dir = container.get('namespace') + + filter = unreal.ARFilter( + class_names=["StaticMesh"], + package_paths=[asset_dir], + recursive_paths=False) + assets = ar.get_assets(filter) + + for asset in assets: + obj = asset.get_asset() + self._spawn_actor(obj, lasset) + + loaded = True + break + + # If the asset has not been loaded yet, we load it. + if loaded: + continue + + assets = self._load_asset( + lasset.get('representation'), + lasset.get('version'), + lasset.get('instance_name'), + lasset.get('family') + ) + + for asset in assets: + obj = ar.get_asset_by_object_path(asset).get_asset() + if not obj.get_class().get_name() == 'StaticMesh': + continue + self._spawn_actor(obj, lasset) + + break + + # Check if an actor was not matched to a representation. + # If so, remove it from the scene. + for actor in actors: + if not actor.get_class().get_name() == 'StaticMeshActor': + continue + if actor not in actors_matched: + self.log.warning(f"Actor {actor.get_name()} not matched.") + if delete_unmatched: + EditorLevelLibrary.destroy_actor(actor) + + return containers + + def load(self, context, name, namespace, options): + print("Loading Layout and Match Assets") + + asset = context.get('asset').get('name') + asset_name = f"{asset}_{name}" if asset else name + container_name = f"{asset}_{name}_CON" + + curr_level = self._get_current_level() + + if not curr_level: + raise AssertionError("Current level not saved") + + containers = self._process(self.fname) + + curr_level_path = Path( + curr_level.get_outer().get_path_name()).parent.as_posix() + + if not unreal.EditorAssetLibrary.does_asset_exist( + f"{curr_level_path}/{container_name}" + ): + upipeline.create_container( + container=container_name, path=curr_level_path) + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "asset": asset, + "namespace": curr_level_path, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"], + "loaded_assets": containers + } + upipeline.imprint(f"{curr_level_path}/{container_name}", data) + + def update(self, container, representation): + asset_dir = container.get('namespace') + + source_path = get_representation_path(representation) + containers = self._process(source_path) + + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]), + "loaded_assets": containers + } + upipeline.imprint( + "{}/{}".format(asset_dir, container.get('container_name')), data) diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 17aafc3e8b..a64b7c2911 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -203,19 +203,6 @@ from .path_tools import ( get_project_basic_paths, ) -from .editorial import ( - is_overlapping_otio_ranges, - otio_range_to_frame_range, - otio_range_with_handles, - get_media_range_with_retimes, - convert_to_padded_path, - trim_media_range, - range_from_frames, - frames_to_secons, - frames_to_timecode, - make_sequence_collection -) - from .openpype_version import ( op_version_control_available, get_openpype_version, @@ -383,16 +370,6 @@ __all__ = [ "validate_mongo_connection", "OpenPypeMongoConnection", - "is_overlapping_otio_ranges", - "otio_range_with_handles", - "convert_to_padded_path", - "otio_range_to_frame_range", - "get_media_range_with_retimes", - "trim_media_range", - "range_from_frames", - "frames_to_secons", - "frames_to_timecode", - "make_sequence_collection", "create_project_folders", "create_workdir_extra_folders", "get_project_basic_paths", diff --git a/openpype/lib/abstract_collect_render.py b/openpype/lib/abstract_collect_render.py deleted file mode 100644 index e4ff87aa0f..0000000000 --- a/openpype/lib/abstract_collect_render.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -"""Content was moved to 'openpype.pipeline.publish.abstract_collect_render'. - -Please change your imports as soon as possible. - -File will be probably removed in OpenPype 3.14.* -""" - -import warnings -from openpype.pipeline.publish import AbstractCollectRender, RenderInstance - - -class CollectRenderDeprecated(DeprecationWarning): - pass - - -warnings.simplefilter("always", CollectRenderDeprecated) -warnings.warn( - ( - "Content of 'abstract_collect_render' was moved." - "\nUsing deprecated source of 'abstract_collect_render'. Content was" - " move to 'openpype.pipeline.publish.abstract_collect_render'." - " Please change your imports as soon as possible." - ), - category=CollectRenderDeprecated, - stacklevel=4 -) - - -__all__ = ( - "AbstractCollectRender", - "RenderInstance" -) diff --git a/openpype/lib/abstract_expected_files.py b/openpype/lib/abstract_expected_files.py deleted file mode 100644 index f24d844fe5..0000000000 --- a/openpype/lib/abstract_expected_files.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -"""Content was moved to 'openpype.pipeline.publish.abstract_expected_files'. - -Please change your imports as soon as possible. - -File will be probably removed in OpenPype 3.14.* -""" - -import warnings -from openpype.pipeline.publish import ExpectedFiles - - -class ExpectedFilesDeprecated(DeprecationWarning): - pass - - -warnings.simplefilter("always", ExpectedFilesDeprecated) -warnings.warn( - ( - "Content of 'abstract_expected_files' was moved." - "\nUsing deprecated source of 'abstract_expected_files'. Content was" - " move to 'openpype.pipeline.publish.abstract_expected_files'." - " Please change your imports as soon as possible." - ), - category=ExpectedFilesDeprecated, - stacklevel=4 -) - - -__all__ = ( - "ExpectedFiles", -) diff --git a/openpype/lib/abstract_metaplugins.py b/openpype/lib/abstract_metaplugins.py deleted file mode 100644 index 346b5d86b3..0000000000 --- a/openpype/lib/abstract_metaplugins.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Content was moved to 'openpype.pipeline.publish.publish_plugins'. - -Please change your imports as soon as possible. - -File will be probably removed in OpenPype 3.14.* -""" - -import warnings -from openpype.pipeline.publish import ( - AbstractMetaInstancePlugin, - AbstractMetaContextPlugin -) - - -class MetaPluginsDeprecated(DeprecationWarning): - pass - - -warnings.simplefilter("always", MetaPluginsDeprecated) -warnings.warn( - ( - "Content of 'abstract_metaplugins' was moved." - "\nUsing deprecated source of 'abstract_metaplugins'. Content was" - " moved to 'openpype.pipeline.publish.publish_plugins'." - " Please change your imports as soon as possible." - ), - category=MetaPluginsDeprecated, - stacklevel=4 -) - - -__all__ = ( - "AbstractMetaInstancePlugin", - "AbstractMetaContextPlugin", -) diff --git a/openpype/lib/config.py b/openpype/lib/config.py deleted file mode 100644 index 26822649e4..0000000000 --- a/openpype/lib/config.py +++ /dev/null @@ -1,41 +0,0 @@ -import warnings -import functools - - -class ConfigDeprecatedWarning(DeprecationWarning): - pass - - -def deprecated(func): - """Mark functions as deprecated. - - It will result in a warning being emitted when the function is used. - """ - - @functools.wraps(func) - def new_func(*args, **kwargs): - warnings.simplefilter("always", ConfigDeprecatedWarning) - warnings.warn( - ( - "Deprecated import of function '{}'." - " Class was moved to 'openpype.lib.dateutils.{}'." - " Please change your imports." - ).format(func.__name__), - category=ConfigDeprecatedWarning - ) - return func(*args, **kwargs) - return new_func - - -@deprecated -def get_datetime_data(datetime_obj=None): - from .dateutils import get_datetime_data - - return get_datetime_data(datetime_obj) - - -@deprecated -def get_formatted_current_time(): - from .dateutils import get_formatted_current_time - - return get_formatted_current_time() diff --git a/openpype/lib/editorial.py b/openpype/lib/editorial.py deleted file mode 100644 index 49220b4f15..0000000000 --- a/openpype/lib/editorial.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Code related to editorial utility functions was moved -to 'openpype.pipeline.editorial' please change your imports as soon as -possible. File will be probably removed in OpenPype 3.14.* -""" - -import warnings -import functools - - -class EditorialDeprecatedWarning(DeprecationWarning): - pass - - -def editorial_deprecated(func): - """Mark functions as deprecated. - - It will result in a warning being emitted when the function is used. - """ - - @functools.wraps(func) - def new_func(*args, **kwargs): - warnings.simplefilter("always", EditorialDeprecatedWarning) - warnings.warn( - ( - "Call to deprecated function '{}'." - " Function was moved to 'openpype.pipeline.editorial'." - ).format(func.__name__), - category=EditorialDeprecatedWarning, - stacklevel=2 - ) - return func(*args, **kwargs) - return new_func - - -@editorial_deprecated -def otio_range_to_frame_range(*args, **kwargs): - from openpype.pipeline.editorial import otio_range_to_frame_range - - return otio_range_to_frame_range(*args, **kwargs) - - -@editorial_deprecated -def otio_range_with_handles(*args, **kwargs): - from openpype.pipeline.editorial import otio_range_with_handles - - return otio_range_with_handles(*args, **kwargs) - - -@editorial_deprecated -def is_overlapping_otio_ranges(*args, **kwargs): - from openpype.pipeline.editorial import is_overlapping_otio_ranges - - return is_overlapping_otio_ranges(*args, **kwargs) - - -@editorial_deprecated -def convert_to_padded_path(*args, **kwargs): - from openpype.pipeline.editorial import convert_to_padded_path - - return convert_to_padded_path(*args, **kwargs) - - -@editorial_deprecated -def trim_media_range(*args, **kwargs): - from openpype.pipeline.editorial import trim_media_range - - return trim_media_range(*args, **kwargs) - - -@editorial_deprecated -def range_from_frames(*args, **kwargs): - from openpype.pipeline.editorial import range_from_frames - - return range_from_frames(*args, **kwargs) - - -@editorial_deprecated -def frames_to_secons(*args, **kwargs): - from openpype.pipeline.editorial import frames_to_seconds - - return frames_to_seconds(*args, **kwargs) - - -@editorial_deprecated -def frames_to_timecode(*args, **kwargs): - from openpype.pipeline.editorial import frames_to_timecode - - return frames_to_timecode(*args, **kwargs) - - -@editorial_deprecated -def make_sequence_collection(*args, **kwargs): - from openpype.pipeline.editorial import make_sequence_collection - - return make_sequence_collection(*args, **kwargs) - - -@editorial_deprecated -def get_media_range_with_retimes(*args, **kwargs): - from openpype.pipeline.editorial import get_media_range_with_retimes - - return get_media_range_with_retimes(*args, **kwargs) diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 890d10595f..c669f9a814 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -48,7 +48,7 @@ def _validate_deadline_bool_value(instance, attribute, value): @attr.s -class MayaPluginInfo(): +class MayaPluginInfo(object): SceneFile = attr.ib(default=None) # Input OutputFilePath = attr.ib(default=None) # Output directory and filename OutputFilePrefix = attr.ib(default=None) @@ -63,7 +63,7 @@ class MayaPluginInfo(): @attr.s -class PythonPluginInfo(): +class PythonPluginInfo(object): ScriptFile = attr.ib() Version = attr.ib(default="3.6") Arguments = attr.ib(default=None) @@ -71,7 +71,7 @@ class PythonPluginInfo(): @attr.s -class VRayPluginInfo(): +class VRayPluginInfo(object): InputFilename = attr.ib(default=None) # Input SeparateFilesPerFrame = attr.ib(default=None) VRayEngine = attr.ib(default="V-Ray") @@ -82,7 +82,7 @@ class VRayPluginInfo(): @attr.s -class ArnoldPluginInfo(): +class ArnoldPluginInfo(object): ArnoldFile = attr.ib(default=None) @@ -762,10 +762,10 @@ def _format_tiles( Example:: Image prefix is: - `maya///_` + `//_` Result for tile 0 for 4x4 will be: - `maya///_tile_1x1_4x4__` + `//_tile_1x1_4x4__` Calculating coordinates is tricky as in Job they are defined as top, left, bottom, right with zero being in top-left corner. But Assembler diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index 96f573fe25..53c6e69ac0 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -169,7 +169,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): thumbnail_item["thumbnail"] = True # Create copy of item before setting location - if "delete" not in repre["tags"]: + if "delete" not in repre.get("tags", []): src_components_to_add.append(copy.deepcopy(thumbnail_item)) # Create copy of first thumbnail if first_thumbnail_component is None: @@ -284,7 +284,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): not_first_components.append(review_item) # Create copy of item before setting location - if "delete" not in repre["tags"]: + if "delete" not in repre.get("tags", []): src_components_to_add.append(copy.deepcopy(review_item)) # Set location diff --git a/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py b/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py index 1a5da44432..78f9d135b7 100644 --- a/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py +++ b/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py @@ -13,10 +13,9 @@ import functools import itertools import distutils.version import hashlib -import tempfile +import appdirs import threading import atexit -import warnings import requests import requests.auth @@ -241,7 +240,7 @@ class Session(object): ) self._auto_connect_event_hub_thread = None - if auto_connect_event_hub in (None, True): + if auto_connect_event_hub is True: # Connect to event hub in background thread so as not to block main # session usage waiting for event hub connection. self._auto_connect_event_hub_thread = threading.Thread( @@ -252,9 +251,7 @@ class Session(object): # To help with migration from auto_connect_event_hub default changing # from True to False. - self._event_hub._deprecation_warning_auto_connect = ( - auto_connect_event_hub is None - ) + self._event_hub._deprecation_warning_auto_connect = False # Register to auto-close session on exit. atexit.register(WeakMethod(self.close)) @@ -271,8 +268,9 @@ class Session(object): # rebuilding types)? if schema_cache_path is not False: if schema_cache_path is None: + schema_cache_path = appdirs.user_cache_dir() schema_cache_path = os.environ.get( - 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir() + 'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path ) schema_cache_path = os.path.join( diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index 2764b3cb95..82e5de7a8f 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -9,7 +9,9 @@ import os import logging import collections -from openpype.lib import get_subset_name +from openpype.client import get_asset_by_id + +from .subset_name import get_subset_name class LegacyCreator(object): @@ -147,11 +149,15 @@ class LegacyCreator(object): variant, task_name, asset_id, project_name, host_name ) + asset_doc = get_asset_by_id( + project_name, asset_id, fields=["data.tasks"] + ) + return get_subset_name( cls.family, variant, task_name, - asset_id, + asset_doc, project_name, host_name, dynamic_data=dynamic_data diff --git a/openpype/pipeline/workfile/path_resolving.py b/openpype/pipeline/workfile/path_resolving.py index 1243e84148..801cb7223c 100644 --- a/openpype/pipeline/workfile/path_resolving.py +++ b/openpype/pipeline/workfile/path_resolving.py @@ -265,6 +265,10 @@ def get_last_workfile_with_version( if not match: continue + if not match.groups(): + output_filenames.append(filename) + continue + file_version = int(match.group(1)) if version is None or file_version > version: output_filenames[:] = [] diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 00e47d37da..2a0d10c2b2 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -21,7 +21,7 @@ "viewTransform": "sRGB gamma" } }, - "mel_workspace": "workspace -fr \"shaders\" \"renderData/shaders\";\nworkspace -fr \"images\" \"renders\";\nworkspace -fr \"particles\" \"particles\";\nworkspace -fr \"mayaAscii\" \"\";\nworkspace -fr \"mayaBinary\" \"\";\nworkspace -fr \"scene\" \"\";\nworkspace -fr \"alembicCache\" \"cache/alembic\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\n", + "mel_workspace": "workspace -fr \"shaders\" \"renderData/shaders\";\nworkspace -fr \"images\" \"renders/maya\";\nworkspace -fr \"particles\" \"particles\";\nworkspace -fr \"mayaAscii\" \"\";\nworkspace -fr \"mayaBinary\" \"\";\nworkspace -fr \"scene\" \"\";\nworkspace -fr \"alembicCache\" \"cache/alembic\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\n", "ext_mapping": { "model": "ma", "mayaAscii": "ma", @@ -56,12 +56,12 @@ }, "RenderSettings": { "apply_render_settings": true, - "default_render_image_folder": "renders", + "default_render_image_folder": "renders/maya", "enable_all_lights": true, "aov_separator": "underscore", "reset_current_frame": false, "arnold_renderer": { - "image_prefix": "maya///_", + "image_prefix": "//_", "image_format": "exr", "multilayer_exr": true, "tiled": true, @@ -69,14 +69,14 @@ "additional_options": [] }, "vray_renderer": { - "image_prefix": "maya///", + "image_prefix": "//", "engine": "1", "image_format": "exr", "aov_list": [], "additional_options": [] }, "redshift_renderer": { - "image_prefix": "maya///", + "image_prefix": "//", "primary_gi_engine": "0", "secondary_gi_engine": "0", "image_format": "exr", diff --git a/openpype/settings/defaults/project_settings/unreal.json b/openpype/settings/defaults/project_settings/unreal.json index c5f5cdf719..391e2415a5 100644 --- a/openpype/settings/defaults/project_settings/unreal.json +++ b/openpype/settings/defaults/project_settings/unreal.json @@ -1,5 +1,6 @@ { "level_sequences_for_layouts": false, + "delete_unmatched_assets": false, "project_setup": { "dev_mode": true } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json b/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json index d26b5c1ccf..09e5791ac4 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json @@ -10,6 +10,11 @@ "key": "level_sequences_for_layouts", "label": "Generate level sequences when loading layouts" }, + { + "type": "boolean", + "key": "delete_unmatched_assets", + "label": "Delete assets that are not matched" + }, { "type": "dict", "collapsible": true,