mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
c309536191
36 changed files with 1109 additions and 559 deletions
|
|
@ -146,6 +146,7 @@ def _get_assets(
|
|||
project_name,
|
||||
asset_ids=None,
|
||||
asset_names=None,
|
||||
parent_ids=None,
|
||||
standard=True,
|
||||
archived=False,
|
||||
fields=None
|
||||
|
|
@ -161,6 +162,7 @@ def _get_assets(
|
|||
project_name (str): Name of project where to look for queried entities.
|
||||
asset_ids (list[str|ObjectId]): Asset ids that should be found.
|
||||
asset_names (list[str]): Name assets that should be found.
|
||||
parent_ids (list[str|ObjectId]): Parent asset ids.
|
||||
standard (bool): Query standart assets (type 'asset').
|
||||
archived (bool): Query archived assets (type 'archived_asset').
|
||||
fields (list[str]): Fields that should be returned. All fields are
|
||||
|
|
@ -196,6 +198,12 @@ def _get_assets(
|
|||
return []
|
||||
query_filter["name"] = {"$in": list(asset_names)}
|
||||
|
||||
if parent_ids is not None:
|
||||
parent_ids = _convert_ids(parent_ids)
|
||||
if not parent_ids:
|
||||
return []
|
||||
query_filter["data.visualParent"] = {"$in": parent_ids}
|
||||
|
||||
conn = _get_project_connection(project_name)
|
||||
|
||||
return conn.find(query_filter, _prepare_fields(fields))
|
||||
|
|
@ -205,6 +213,7 @@ def get_assets(
|
|||
project_name,
|
||||
asset_ids=None,
|
||||
asset_names=None,
|
||||
parent_ids=None,
|
||||
archived=False,
|
||||
fields=None
|
||||
):
|
||||
|
|
@ -219,6 +228,7 @@ def get_assets(
|
|||
project_name (str): Name of project where to look for queried entities.
|
||||
asset_ids (list[str|ObjectId]): Asset ids that should be found.
|
||||
asset_names (list[str]): Name assets that should be found.
|
||||
parent_ids (list[str|ObjectId]): Parent asset ids.
|
||||
archived (bool): Add also archived assets.
|
||||
fields (list[str]): Fields that should be returned. All fields are
|
||||
returned if 'None' is passed.
|
||||
|
|
@ -229,7 +239,13 @@ def get_assets(
|
|||
"""
|
||||
|
||||
return _get_assets(
|
||||
project_name, asset_ids, asset_names, True, archived, fields
|
||||
project_name,
|
||||
asset_ids,
|
||||
asset_names,
|
||||
parent_ids,
|
||||
True,
|
||||
archived,
|
||||
fields
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -237,6 +253,7 @@ def get_archived_assets(
|
|||
project_name,
|
||||
asset_ids=None,
|
||||
asset_names=None,
|
||||
parent_ids=None,
|
||||
fields=None
|
||||
):
|
||||
"""Archived assets for specified project by passed filters.
|
||||
|
|
@ -250,6 +267,7 @@ def get_archived_assets(
|
|||
project_name (str): Name of project where to look for queried entities.
|
||||
asset_ids (list[str|ObjectId]): Asset ids that should be found.
|
||||
asset_names (list[str]): Name assets that should be found.
|
||||
parent_ids (list[str|ObjectId]): Parent asset ids.
|
||||
fields (list[str]): Fields that should be returned. All fields are
|
||||
returned if 'None' is passed.
|
||||
|
||||
|
|
@ -259,7 +277,7 @@ def get_archived_assets(
|
|||
"""
|
||||
|
||||
return _get_assets(
|
||||
project_name, asset_ids, asset_names, False, True, fields
|
||||
project_name, asset_ids, asset_names, parent_ids, False, True, fields
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,12 @@
|
|||
import re
|
||||
from types import NoneType
|
||||
import pyblish
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
from openpype.hosts.flame.otio import flame_export
|
||||
import openpype.lib as oplib
|
||||
from openpype.pipeline.editorial import (
|
||||
is_overlapping_otio_ranges,
|
||||
get_media_range_with_retimes
|
||||
)
|
||||
|
||||
# # developer reload modules
|
||||
from pprint import pformat
|
||||
|
|
@ -75,6 +79,12 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
marker_data["handleEnd"]
|
||||
)
|
||||
|
||||
# make sure there is not NoneType rather 0
|
||||
if isinstance(head, NoneType):
|
||||
head = 0
|
||||
if isinstance(tail, NoneType):
|
||||
tail = 0
|
||||
|
||||
# make sure value is absolute
|
||||
if head != 0:
|
||||
head = abs(head)
|
||||
|
|
@ -125,7 +135,8 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
"flameAddTasks": self.add_tasks,
|
||||
"tasks": {
|
||||
task["name"]: {"type": task["type"]}
|
||||
for task in self.add_tasks}
|
||||
for task in self.add_tasks},
|
||||
"representations": []
|
||||
})
|
||||
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
|
||||
|
||||
|
|
@ -271,7 +282,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# HACK: it is here to serve for versions bellow 2021.1
|
||||
if not any([head, tail]):
|
||||
retimed_attributes = oplib.get_media_range_with_retimes(
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
|
@ -370,7 +381,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
continue
|
||||
if otio_clip.name not in segment.name.get_value():
|
||||
continue
|
||||
if oplib.is_overlapping_otio_ranges(
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
hosts = ["flame"]
|
||||
|
||||
# plugin defaults
|
||||
keep_original_representation = False
|
||||
|
||||
default_presets = {
|
||||
"thumbnail": {
|
||||
"active": True,
|
||||
|
|
@ -45,7 +47,9 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
export_presets_mapping = {}
|
||||
|
||||
def process(self, instance):
|
||||
if "representations" not in instance.data:
|
||||
|
||||
if not self.keep_original_representation:
|
||||
# remove previeous representation if not needed
|
||||
instance.data["representations"] = []
|
||||
|
||||
# flame objects
|
||||
|
|
@ -82,7 +86,11 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
# add default preset type for thumbnail and reviewable video
|
||||
# update them with settings and override in case the same
|
||||
# are found in there
|
||||
export_presets = deepcopy(self.default_presets)
|
||||
_preset_keys = [k.split('_')[0] for k in self.export_presets_mapping]
|
||||
export_presets = {
|
||||
k: v for k, v in deepcopy(self.default_presets).items()
|
||||
if k not in _preset_keys
|
||||
}
|
||||
export_presets.update(self.export_presets_mapping)
|
||||
|
||||
# loop all preset names and
|
||||
|
|
@ -218,9 +226,14 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
opfapi.export_clip(
|
||||
export_dir_path, exporting_clip, preset_path, **export_kwargs)
|
||||
|
||||
repr_name = unique_name
|
||||
# make sure only first segment is used if underscore in name
|
||||
# HACK: `ftrackreview_withLUT` will result only in `ftrackreview`
|
||||
repr_name = unique_name.split("_")[0]
|
||||
if (
|
||||
"thumbnail" in unique_name
|
||||
or "ftrackreview" in unique_name
|
||||
):
|
||||
repr_name = unique_name.split("_")[0]
|
||||
|
||||
# create representation data
|
||||
representation_data = {
|
||||
|
|
@ -259,7 +272,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
if os.path.splitext(f)[-1] == ".mov"
|
||||
]
|
||||
# then try if thumbnail is not in unique name
|
||||
or unique_name == "thumbnail"
|
||||
or repr_name == "thumbnail"
|
||||
):
|
||||
representation_data["files"] = files.pop()
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import pyblish
|
||||
import openpype
|
||||
from openpype.pipeline.editorial import is_overlapping_otio_ranges
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from openpype.hosts.hiero.api.otio import hiero_export
|
||||
import hiero
|
||||
|
|
@ -275,7 +275,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
parent_range = otio_audio.range_in_parent()
|
||||
|
||||
# if any overaling clip found then return True
|
||||
if openpype.lib.is_overlapping_otio_ranges(
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=False):
|
||||
return True
|
||||
|
||||
|
|
@ -304,7 +304,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
continue
|
||||
self.log.debug("__ parent_range: {}".format(parent_range))
|
||||
self.log.debug("__ timeline_range: {}".format(timeline_range))
|
||||
if openpype.lib.is_overlapping_otio_ranges(
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from contextlib import contextmanager
|
|||
|
||||
import six
|
||||
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.api import get_asset
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
|
@ -74,16 +75,13 @@ def generate_ids(nodes, asset_id=None):
|
|||
"""
|
||||
|
||||
if asset_id is None:
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
# Get the asset ID from the database for the asset of current context
|
||||
asset_data = legacy_io.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"name": legacy_io.Session["AVALON_ASSET"]
|
||||
},
|
||||
projection={"_id": True}
|
||||
)
|
||||
assert asset_data, "No current asset found in Session"
|
||||
asset_id = asset_data['_id']
|
||||
asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"])
|
||||
|
||||
assert asset_doc, "No current asset found in Session"
|
||||
asset_id = asset_doc['_id']
|
||||
|
||||
node_ids = []
|
||||
for node in nodes:
|
||||
|
|
@ -430,26 +428,29 @@ def maintained_selection():
|
|||
def reset_framerange():
|
||||
"""Set frame range to current asset"""
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = legacy_io.find_one({"name": asset_name, "type": "asset"})
|
||||
# Get the asset ID from the database for the asset of current context
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
asset_data = asset_doc["data"]
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
frame_start = asset_data.get("frameStart")
|
||||
frame_end = asset_data.get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
frame_start = asset_data.get("edit_in")
|
||||
frame_end = asset_data.get("edit_out")
|
||||
|
||||
if frame_start is None or frame_end is None:
|
||||
log.warning("No edit information found for %s" % asset_name)
|
||||
return
|
||||
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
handles = asset_data.get("handles") or 0
|
||||
handle_start = asset_data.get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
handle_end = asset_data.get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import logging
|
|||
from Qt import QtWidgets, QtCore, QtGui
|
||||
|
||||
from openpype import style
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget
|
||||
|
||||
|
|
@ -46,10 +47,8 @@ class SelectAssetDialog(QtWidgets.QWidget):
|
|||
select_id = None
|
||||
name = self._parm.eval()
|
||||
if name:
|
||||
db_asset = legacy_io.find_one(
|
||||
{"name": name, "type": "asset"},
|
||||
{"_id": True}
|
||||
)
|
||||
project_name = legacy_io.active_project()
|
||||
db_asset = get_asset_by_name(project_name, name, fields=["_id"])
|
||||
if db_asset:
|
||||
select_id = db_asset["_id"]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import hou
|
||||
|
||||
from openpype.client import (
|
||||
get_asset_by_name,
|
||||
get_subsets,
|
||||
)
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.houdini.api import lib
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
|
|
@ -23,20 +27,16 @@ class CreateHDA(plugin.Creator):
|
|||
# type: (str) -> bool
|
||||
"""Check if existing subset name versions already exists."""
|
||||
# Get all subsets of the current asset
|
||||
asset_id = legacy_io.find_one(
|
||||
{"name": self.data["asset"], "type": "asset"},
|
||||
projection={"_id": True}
|
||||
)['_id']
|
||||
subset_docs = legacy_io.find(
|
||||
{
|
||||
"type": "subset",
|
||||
"parent": asset_id
|
||||
},
|
||||
{"name": 1}
|
||||
project_name = legacy_io.active_project()
|
||||
asset_doc = get_asset_by_name(
|
||||
project_name, self.data["asset"], fields=["_id"]
|
||||
)
|
||||
subset_docs = get_subsets(
|
||||
project_name, asset_ids=[asset_doc["_id"]], fields=["name"]
|
||||
)
|
||||
existing_subset_names = set(subset_docs.distinct("name"))
|
||||
existing_subset_names_low = {
|
||||
_name.lower() for _name in existing_subset_names
|
||||
subset_doc["name"].lower()
|
||||
for subset_doc in subset_docs
|
||||
}
|
||||
return subset_name.lower() in existing_subset_names_low
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,8 @@ class BgeoLoader(load.LoaderPlugin):
|
|||
|
||||
# Explicitly create a file node
|
||||
file_node = container.createNode("file", node_name=node_name)
|
||||
file_node.setParms({"file": self.format_path(self.fname, is_sequence)})
|
||||
file_node.setParms(
|
||||
{"file": self.format_path(self.fname, context["representation"])})
|
||||
|
||||
# Set display on last node
|
||||
file_node.setDisplayFlag(True)
|
||||
|
|
@ -62,15 +63,15 @@ class BgeoLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
@staticmethod
|
||||
def format_path(path, is_sequence):
|
||||
def format_path(path, representation):
|
||||
"""Format file path correctly for single bgeo or bgeo sequence."""
|
||||
if not os.path.exists(path):
|
||||
raise RuntimeError("Path does not exist: %s" % path)
|
||||
|
||||
is_sequence = bool(representation["context"].get("frame"))
|
||||
# The path is either a single file or sequence in a folder.
|
||||
if not is_sequence:
|
||||
filename = path
|
||||
print("single")
|
||||
else:
|
||||
filename = re.sub(r"(.*)\.(\d+)\.(bgeo.*)", "\\1.$F4.\\3", path)
|
||||
|
||||
|
|
@ -94,9 +95,9 @@ class BgeoLoader(load.LoaderPlugin):
|
|||
|
||||
# Update the file path
|
||||
file_path = get_representation_path(representation)
|
||||
file_path = self.format_path(file_path)
|
||||
file_path = self.format_path(file_path, representation)
|
||||
|
||||
file_node.setParms({"fileName": file_path})
|
||||
file_node.setParms({"file": file_path})
|
||||
|
||||
# Update attribute
|
||||
node.setParms({"representation": str(representation["_id"])})
|
||||
|
|
|
|||
|
|
@ -40,7 +40,8 @@ class VdbLoader(load.LoaderPlugin):
|
|||
|
||||
# Explicitly create a file node
|
||||
file_node = container.createNode("file", node_name=node_name)
|
||||
file_node.setParms({"file": self.format_path(self.fname)})
|
||||
file_node.setParms(
|
||||
{"file": self.format_path(self.fname, context["representation"])})
|
||||
|
||||
# Set display on last node
|
||||
file_node.setDisplayFlag(True)
|
||||
|
|
@ -57,30 +58,20 @@ class VdbLoader(load.LoaderPlugin):
|
|||
suffix="",
|
||||
)
|
||||
|
||||
def format_path(self, path):
|
||||
@staticmethod
|
||||
def format_path(path, representation):
|
||||
"""Format file path correctly for single vdb or vdb sequence."""
|
||||
if not os.path.exists(path):
|
||||
raise RuntimeError("Path does not exist: %s" % path)
|
||||
|
||||
is_sequence = bool(representation["context"].get("frame"))
|
||||
# The path is either a single file or sequence in a folder.
|
||||
is_single_file = os.path.isfile(path)
|
||||
if is_single_file:
|
||||
if not is_sequence:
|
||||
filename = path
|
||||
else:
|
||||
# The path points to the publish .vdb sequence folder so we
|
||||
# find the first file in there that ends with .vdb
|
||||
files = sorted(os.listdir(path))
|
||||
first = next((x for x in files if x.endswith(".vdb")), None)
|
||||
if first is None:
|
||||
raise RuntimeError(
|
||||
"Couldn't find first .vdb file of "
|
||||
"sequence in: %s" % path
|
||||
)
|
||||
filename = re.sub(r"(.*)\.(\d+)\.vdb$", "\\1.$F4.vdb", path)
|
||||
|
||||
# Set <frame>.vdb to $F.vdb
|
||||
first = re.sub(r"\.(\d+)\.vdb$", ".$F.vdb", first)
|
||||
|
||||
filename = os.path.join(path, first)
|
||||
filename = os.path.join(path, filename)
|
||||
|
||||
filename = os.path.normpath(filename)
|
||||
filename = filename.replace("\\", "/")
|
||||
|
|
@ -100,7 +91,7 @@ class VdbLoader(load.LoaderPlugin):
|
|||
|
||||
# Update the file path
|
||||
file_path = get_representation_path(representation)
|
||||
file_path = self.format_path(file_path)
|
||||
file_path = self.format_path(file_path, representation)
|
||||
|
||||
file_node.setParms({"file": file_path})
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import pyblish.api
|
||||
|
||||
from openyppe.client import get_subset_by_name, get_asset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.lib.usdlib as usdlib
|
||||
|
||||
|
|
@ -50,10 +51,8 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.debug("Add bootstrap for: %s" % bootstrap)
|
||||
|
||||
asset = legacy_io.find_one({
|
||||
"name": instance.data["asset"],
|
||||
"type": "asset"
|
||||
})
|
||||
project_name = legacy_io.active_project()
|
||||
asset = get_asset_by_name(project_name, instance.data["asset"])
|
||||
assert asset, "Asset must exist: %s" % asset
|
||||
|
||||
# Check which are not about to be created and don't exist yet
|
||||
|
|
@ -70,7 +69,7 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
|
|||
|
||||
self.log.debug("Checking required bootstrap: %s" % required)
|
||||
for subset in required:
|
||||
if self._subset_exists(instance, subset, asset):
|
||||
if self._subset_exists(project_name, instance, subset, asset):
|
||||
continue
|
||||
|
||||
self.log.debug(
|
||||
|
|
@ -93,7 +92,7 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
|
|||
for key in ["asset"]:
|
||||
new.data[key] = instance.data[key]
|
||||
|
||||
def _subset_exists(self, instance, subset, asset):
|
||||
def _subset_exists(self, project_name, instance, subset, asset):
|
||||
"""Return whether subset exists in current context or in database."""
|
||||
# Allow it to be created during this publish session
|
||||
context = instance.context
|
||||
|
|
@ -106,9 +105,8 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
|
|||
|
||||
# Or, if they already exist in the database we can
|
||||
# skip them too.
|
||||
return bool(
|
||||
legacy_io.find_one(
|
||||
{"name": subset, "type": "subset", "parent": asset["_id"]},
|
||||
{"_id": True}
|
||||
)
|
||||
)
|
||||
if get_subset_by_name(
|
||||
project_name, subset, asset["_id"], fields=["_id"]
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -7,6 +7,12 @@ from collections import deque
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
from openpype.client import (
|
||||
get_asset_by_name,
|
||||
get_subset_by_name,
|
||||
get_last_version_by_subset_id,
|
||||
get_representation_by_name,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
get_representation_path,
|
||||
legacy_io,
|
||||
|
|
@ -244,11 +250,14 @@ class ExtractUSDLayered(openpype.api.Extractor):
|
|||
|
||||
# Set up the dependency for publish if they have new content
|
||||
# compared to previous publishes
|
||||
project_name = legacy_io.active_project()
|
||||
for dependency in active_dependencies:
|
||||
dependency_fname = dependency.data["usdFilename"]
|
||||
|
||||
filepath = os.path.join(staging_dir, dependency_fname)
|
||||
similar = self._compare_with_latest_publish(dependency, filepath)
|
||||
similar = self._compare_with_latest_publish(
|
||||
project_name, dependency, filepath
|
||||
)
|
||||
if similar:
|
||||
# Deactivate this dependency
|
||||
self.log.debug(
|
||||
|
|
@ -268,7 +277,7 @@ class ExtractUSDLayered(openpype.api.Extractor):
|
|||
instance.data["files"] = []
|
||||
instance.data["files"].append(fname)
|
||||
|
||||
def _compare_with_latest_publish(self, dependency, new_file):
|
||||
def _compare_with_latest_publish(self, project_name, dependency, new_file):
|
||||
import filecmp
|
||||
|
||||
_, ext = os.path.splitext(new_file)
|
||||
|
|
@ -276,35 +285,29 @@ class ExtractUSDLayered(openpype.api.Extractor):
|
|||
# Compare this dependency with the latest published version
|
||||
# to detect whether we should make this into a new publish
|
||||
# version. If not, skip it.
|
||||
asset = legacy_io.find_one(
|
||||
{"name": dependency.data["asset"], "type": "asset"}
|
||||
asset = get_asset_by_name(
|
||||
project_name, dependency.data["asset"], fields=["_id"]
|
||||
)
|
||||
subset = legacy_io.find_one(
|
||||
{
|
||||
"name": dependency.data["subset"],
|
||||
"type": "subset",
|
||||
"parent": asset["_id"],
|
||||
}
|
||||
subset = get_subset_by_name(
|
||||
project_name,
|
||||
dependency.data["subset"],
|
||||
asset["_id"],
|
||||
fields=["_id"]
|
||||
)
|
||||
if not subset:
|
||||
# Subset doesn't exist yet. Definitely new file
|
||||
self.log.debug("No existing subset..")
|
||||
return False
|
||||
|
||||
version = legacy_io.find_one(
|
||||
{"type": "version", "parent": subset["_id"], },
|
||||
sort=[("name", -1)]
|
||||
version = get_last_version_by_subset_id(
|
||||
project_name, subset["_id"], fields=["_id"]
|
||||
)
|
||||
if not version:
|
||||
self.log.debug("No existing version..")
|
||||
return False
|
||||
|
||||
representation = legacy_io.find_one(
|
||||
{
|
||||
"name": ext.lstrip("."),
|
||||
"type": "representation",
|
||||
"parent": version["_id"],
|
||||
}
|
||||
representation = get_representation_by_name(
|
||||
project_name, ext.lstrip("."), version["_id"]
|
||||
)
|
||||
if not representation:
|
||||
self.log.debug("No existing representation..")
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import re
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_subset_by_name
|
||||
import openpype.api
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
|
@ -15,31 +16,23 @@ class ValidateUSDShadeModelExists(pyblish.api.InstancePlugin):
|
|||
label = "USD Shade model exists"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
asset = instance.data["asset"]
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = instance.data["asset"]
|
||||
subset = instance.data["subset"]
|
||||
|
||||
# Assume shading variation starts after a dot separator
|
||||
shade_subset = subset.split(".", 1)[0]
|
||||
model_subset = re.sub("^usdShade", "usdModel", shade_subset)
|
||||
|
||||
asset_doc = legacy_io.find_one(
|
||||
{"name": asset, "type": "asset"},
|
||||
{"_id": True}
|
||||
)
|
||||
asset_doc = instance.data.get("assetEntity")
|
||||
if not asset_doc:
|
||||
raise RuntimeError("Asset does not exist: %s" % asset)
|
||||
raise RuntimeError("Asset document is not filled on instance.")
|
||||
|
||||
subset_doc = legacy_io.find_one(
|
||||
{
|
||||
"name": model_subset,
|
||||
"type": "subset",
|
||||
"parent": asset_doc["_id"],
|
||||
},
|
||||
{"_id": True}
|
||||
subset_doc = get_subset_by_name(
|
||||
project_name, model_subset, asset_doc["_id"], fields=["_id"]
|
||||
)
|
||||
if not subset_doc:
|
||||
raise RuntimeError(
|
||||
"USD Model subset not found: "
|
||||
"%s (%s)" % (model_subset, asset)
|
||||
"%s (%s)" % (model_subset, asset_name)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -4,19 +4,9 @@ import husdoutputprocessors.base as base
|
|||
|
||||
import colorbleed.usdlib as usdlib
|
||||
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
registered_root,
|
||||
)
|
||||
|
||||
|
||||
def _get_project_publish_template():
|
||||
"""Return publish template from database for current project"""
|
||||
project = legacy_io.find_one(
|
||||
{"type": "project"},
|
||||
projection={"config.template.publish": True}
|
||||
)
|
||||
return project["config"]["template"]["publish"]
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.api import Anatomy
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class AvalonURIOutputProcessor(base.OutputProcessorBase):
|
||||
|
|
@ -35,7 +25,6 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase):
|
|||
ever created in a Houdini session. Therefore be very careful
|
||||
about what data gets put in this object.
|
||||
"""
|
||||
self._template = None
|
||||
self._use_publish_paths = False
|
||||
self._cache = dict()
|
||||
|
||||
|
|
@ -60,14 +49,11 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase):
|
|||
return self._parameters
|
||||
|
||||
def beginSave(self, config_node, t):
|
||||
self._template = _get_project_publish_template()
|
||||
|
||||
parm = self._parms["use_publish_paths"]
|
||||
self._use_publish_paths = config_node.parm(parm).evalAtTime(t)
|
||||
self._cache.clear()
|
||||
|
||||
def endSave(self):
|
||||
self._template = None
|
||||
self._use_publish_paths = None
|
||||
self._cache.clear()
|
||||
|
||||
|
|
@ -138,22 +124,19 @@ class AvalonURIOutputProcessor(base.OutputProcessorBase):
|
|||
"""
|
||||
|
||||
PROJECT = legacy_io.Session["AVALON_PROJECT"]
|
||||
asset_doc = legacy_io.find_one({
|
||||
"name": asset,
|
||||
"type": "asset"
|
||||
})
|
||||
anatomy = Anatomy(PROJECT)
|
||||
asset_doc = get_asset_by_name(PROJECT, asset)
|
||||
if not asset_doc:
|
||||
raise RuntimeError("Invalid asset name: '%s'" % asset)
|
||||
|
||||
root = registered_root()
|
||||
path = self._template.format(**{
|
||||
"root": root,
|
||||
formatted_anatomy = anatomy.format({
|
||||
"project": PROJECT,
|
||||
"asset": asset_doc["name"],
|
||||
"subset": subset,
|
||||
"representation": ext,
|
||||
"version": 0 # stub version zero
|
||||
})
|
||||
path = formatted_anatomy["publish"]["path"]
|
||||
|
||||
# Remove the version folder
|
||||
subset_folder = os.path.dirname(os.path.dirname(path))
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import re
|
|||
import os
|
||||
import contextlib
|
||||
from opentimelineio import opentime
|
||||
import openpype
|
||||
from openpype.pipeline.editorial import is_overlapping_otio_ranges
|
||||
|
||||
from ..otio import davinci_export as otio_export
|
||||
|
||||
|
|
@ -824,7 +824,7 @@ def get_otio_clip_instance_data(otio_timeline, timeline_item_data):
|
|||
continue
|
||||
if otio_clip.name not in timeline_item.GetName():
|
||||
continue
|
||||
if openpype.lib.is_overlapping_otio_ranges(
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import unreal
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.unreal.api import pipeline
|
||||
from openpype.hosts.unreal.api.plugin import Creator
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import unreal
|
|||
from unreal import EditorAssetLibrary
|
||||
from unreal import EditorLevelLibrary
|
||||
from unreal import EditorLevelUtils
|
||||
|
||||
from openpype.client import get_assets, get_asset_by_name
|
||||
from openpype.pipeline import (
|
||||
AVALON_CONTAINER_ID,
|
||||
legacy_io,
|
||||
|
|
@ -24,14 +24,6 @@ class CameraLoader(plugin.Loader):
|
|||
icon = "cube"
|
||||
color = "orange"
|
||||
|
||||
def _get_data(self, asset_name):
|
||||
asset_doc = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
|
||||
return asset_doc.get("data")
|
||||
|
||||
def _set_sequence_hierarchy(
|
||||
self, seq_i, seq_j, min_frame_j, max_frame_j
|
||||
):
|
||||
|
|
@ -177,6 +169,19 @@ class CameraLoader(plugin.Loader):
|
|||
EditorLevelLibrary.save_all_dirty_levels()
|
||||
EditorLevelLibrary.load_level(level)
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
# TODO refactor
|
||||
# - Creationg of hierarchy should be a function in unreal integration
|
||||
# - it's used in multiple loaders but must not be loader's logic
|
||||
# - hard to say what is purpose of the loop
|
||||
# - variables does not match their meaning
|
||||
# - why scene is stored to sequences?
|
||||
# - asset documents vs. elements
|
||||
# - cleanup variable names in whole function
|
||||
# - e.g. 'asset', 'asset_name', 'asset_data', 'asset_doc'
|
||||
# - really inefficient queries of asset documents
|
||||
# - existing asset in scene is considered as "with correct values"
|
||||
# - variable 'elements' is modified during it's loop
|
||||
# Get all the sequences in the hierarchy. It will create them, if
|
||||
# they don't exist.
|
||||
sequences = []
|
||||
|
|
@ -201,26 +206,30 @@ class CameraLoader(plugin.Loader):
|
|||
factory=unreal.LevelSequenceFactoryNew()
|
||||
)
|
||||
|
||||
asset_data = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": h.split('/')[-1]
|
||||
})
|
||||
|
||||
id = asset_data.get('_id')
|
||||
asset_data = get_asset_by_name(
|
||||
project_name,
|
||||
h.split('/')[-1],
|
||||
fields=["_id", "data.fps"]
|
||||
)
|
||||
|
||||
start_frames = []
|
||||
end_frames = []
|
||||
|
||||
elements = list(
|
||||
legacy_io.find({"type": "asset", "data.visualParent": id}))
|
||||
elements = list(get_assets(
|
||||
project_name,
|
||||
parent_ids=[asset_data["_id"]],
|
||||
fields=["_id", "data.clipIn", "data.clipOut"]
|
||||
))
|
||||
|
||||
for e in elements:
|
||||
start_frames.append(e.get('data').get('clipIn'))
|
||||
end_frames.append(e.get('data').get('clipOut'))
|
||||
|
||||
elements.extend(legacy_io.find({
|
||||
"type": "asset",
|
||||
"data.visualParent": e.get('_id')
|
||||
}))
|
||||
elements.extend(get_assets(
|
||||
project_name,
|
||||
parent_ids=[e["_id"]],
|
||||
fields=["_id", "data.clipIn", "data.clipOut"]
|
||||
))
|
||||
|
||||
min_frame = min(start_frames)
|
||||
max_frame = max(end_frames)
|
||||
|
|
@ -256,7 +265,7 @@ class CameraLoader(plugin.Loader):
|
|||
sequences[i], sequences[i + 1],
|
||||
frame_ranges[i + 1][0], frame_ranges[i + 1][1])
|
||||
|
||||
data = self._get_data(asset)
|
||||
data = get_asset_by_name(project_name, asset)["data"]
|
||||
cam_seq.set_display_rate(
|
||||
unreal.FrameRate(data.get("fps"), 1.0))
|
||||
cam_seq.set_playback_start(0)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Loader for layouts."""
|
||||
import os
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
|
|
@ -12,6 +11,7 @@ from unreal import AssetToolsHelpers
|
|||
from unreal import FBXImportType
|
||||
from unreal import MathLibrary as umath
|
||||
|
||||
from openpype.client import get_asset_by_name, get_assets
|
||||
from openpype.pipeline import (
|
||||
discover_loader_plugins,
|
||||
loaders_from_representation,
|
||||
|
|
@ -88,15 +88,6 @@ class LayoutLoader(plugin.Loader):
|
|||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _get_data(asset_name):
|
||||
asset_doc = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
|
||||
return asset_doc.get("data")
|
||||
|
||||
@staticmethod
|
||||
def _set_sequence_hierarchy(
|
||||
seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths
|
||||
|
|
@ -364,26 +355,30 @@ class LayoutLoader(plugin.Loader):
|
|||
factory=unreal.LevelSequenceFactoryNew()
|
||||
)
|
||||
|
||||
asset_data = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": h_dir.split('/')[-1]
|
||||
})
|
||||
|
||||
id = asset_data.get('_id')
|
||||
project_name = legacy_io.active_project()
|
||||
asset_data = get_asset_by_name(
|
||||
project_name,
|
||||
h_dir.split('/')[-1],
|
||||
fields=["_id", "data.fps"]
|
||||
)
|
||||
|
||||
start_frames = []
|
||||
end_frames = []
|
||||
|
||||
elements = list(
|
||||
legacy_io.find({"type": "asset", "data.visualParent": id}))
|
||||
elements = list(get_assets(
|
||||
project_name,
|
||||
parent_ids=[asset_data["_id"]],
|
||||
fields=["_id", "data.clipIn", "data.clipOut"]
|
||||
))
|
||||
for e in elements:
|
||||
start_frames.append(e.get('data').get('clipIn'))
|
||||
end_frames.append(e.get('data').get('clipOut'))
|
||||
|
||||
elements.extend(legacy_io.find({
|
||||
"type": "asset",
|
||||
"data.visualParent": e.get('_id')
|
||||
}))
|
||||
elements.extend(get_assets(
|
||||
project_name,
|
||||
parent_ids=[e["_id"]],
|
||||
fields=["_id", "data.clipIn", "data.clipOut"]
|
||||
))
|
||||
|
||||
min_frame = min(start_frames)
|
||||
max_frame = max(end_frames)
|
||||
|
|
@ -659,7 +654,8 @@ class LayoutLoader(plugin.Loader):
|
|||
frame_ranges[i + 1][0], frame_ranges[i + 1][1],
|
||||
[level])
|
||||
|
||||
data = self._get_data(asset)
|
||||
project_name = legacy_io.active_project()
|
||||
data = get_asset_by_name(project_name, asset)["data"]
|
||||
shot.set_display_rate(
|
||||
unreal.FrameRate(data.get("fps"), 1.0))
|
||||
shot.set_playback_start(0)
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import unreal
|
|||
from unreal import EditorLevelLibrary as ell
|
||||
from unreal import EditorAssetLibrary as eal
|
||||
|
||||
from openpype.client import get_representation_by_name
|
||||
import openpype.api
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
|
@ -34,6 +35,7 @@ class ExtractLayout(openpype.api.Extractor):
|
|||
"Wrong level loaded"
|
||||
|
||||
json_data = []
|
||||
project_name = legacy_io.active_project()
|
||||
|
||||
for member in instance[:]:
|
||||
actor = ell.get_actor_reference(member)
|
||||
|
|
@ -57,17 +59,13 @@ class ExtractLayout(openpype.api.Extractor):
|
|||
self.log.error("AssetContainer not found.")
|
||||
return
|
||||
|
||||
parent = eal.get_metadata_tag(asset_container, "parent")
|
||||
parent_id = eal.get_metadata_tag(asset_container, "parent")
|
||||
family = eal.get_metadata_tag(asset_container, "family")
|
||||
|
||||
self.log.info("Parent: {}".format(parent))
|
||||
blend = legacy_io.find_one(
|
||||
{
|
||||
"type": "representation",
|
||||
"parent": ObjectId(parent),
|
||||
"name": "blend"
|
||||
},
|
||||
projection={"_id": True})
|
||||
self.log.info("Parent: {}".format(parent_id))
|
||||
blend = get_representation_by_name(
|
||||
project_name, "blend", parent_id, fields=["_id"]
|
||||
)
|
||||
blend_id = blend["_id"]
|
||||
|
||||
json_element = {}
|
||||
|
|
|
|||
|
|
@ -1,289 +1,102 @@
|
|||
import os
|
||||
import re
|
||||
import clique
|
||||
from .import_utils import discover_host_vendor_module
|
||||
"""Code related to editorial utility functions was moved
|
||||
to 'openpype.pipeline.editorial' please change your imports as soon as
|
||||
possible. File will be probably removed in OpenPype 3.14.*
|
||||
"""
|
||||
|
||||
try:
|
||||
import opentimelineio as otio
|
||||
from opentimelineio import opentime as _ot
|
||||
except ImportError:
|
||||
if not os.environ.get("AVALON_APP"):
|
||||
raise
|
||||
otio = discover_host_vendor_module("opentimelineio")
|
||||
_ot = discover_host_vendor_module("opentimelineio.opentime")
|
||||
import warnings
|
||||
import functools
|
||||
|
||||
|
||||
def otio_range_to_frame_range(otio_range):
|
||||
start = _ot.to_frames(
|
||||
otio_range.start_time, otio_range.start_time.rate)
|
||||
end = start + _ot.to_frames(
|
||||
otio_range.duration, otio_range.duration.rate)
|
||||
return start, end
|
||||
class EditorialDeprecatedWarning(DeprecationWarning):
|
||||
pass
|
||||
|
||||
|
||||
def otio_range_with_handles(otio_range, instance):
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
handles_duration = handle_start + handle_end
|
||||
fps = float(otio_range.start_time.rate)
|
||||
start = _ot.to_frames(otio_range.start_time, fps)
|
||||
duration = _ot.to_frames(otio_range.duration, fps)
|
||||
def editorial_deprecated(func):
|
||||
"""Mark functions as deprecated.
|
||||
|
||||
return _ot.TimeRange(
|
||||
start_time=_ot.RationalTime((start - handle_start), fps),
|
||||
duration=_ot.RationalTime((duration + handles_duration), fps)
|
||||
)
|
||||
|
||||
|
||||
def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False):
|
||||
test_start, test_end = otio_range_to_frame_range(test_otio_range)
|
||||
main_start, main_end = otio_range_to_frame_range(main_otio_range)
|
||||
covering_exp = bool(
|
||||
(test_start <= main_start) and (test_end >= main_end)
|
||||
)
|
||||
inside_exp = bool(
|
||||
(test_start >= main_start) and (test_end <= main_end)
|
||||
)
|
||||
overlaying_right_exp = bool(
|
||||
(test_start <= main_end) and (test_end >= main_end)
|
||||
)
|
||||
overlaying_left_exp = bool(
|
||||
(test_end >= main_start) and (test_start <= main_start)
|
||||
)
|
||||
|
||||
if not strict:
|
||||
return any((
|
||||
covering_exp,
|
||||
inside_exp,
|
||||
overlaying_right_exp,
|
||||
overlaying_left_exp
|
||||
))
|
||||
else:
|
||||
return covering_exp
|
||||
|
||||
|
||||
def convert_to_padded_path(path, padding):
|
||||
It will result in a warning being emitted when the function is used.
|
||||
"""
|
||||
Return correct padding in sequence string
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
padding (int): number of padding
|
||||
|
||||
Returns:
|
||||
type: string with reformated path
|
||||
|
||||
Example:
|
||||
convert_to_padded_path("plate.%d.exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
if "%d" in path:
|
||||
path = re.sub("%d", "%0{padding}d".format(padding=padding), path)
|
||||
return path
|
||||
@functools.wraps(func)
|
||||
def new_func(*args, **kwargs):
|
||||
warnings.simplefilter("always", EditorialDeprecatedWarning)
|
||||
warnings.warn(
|
||||
(
|
||||
"Call to deprecated function '{}'."
|
||||
" Function was moved to 'openpype.pipeline.editorial'."
|
||||
).format(func.__name__),
|
||||
category=EditorialDeprecatedWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return func(*args, **kwargs)
|
||||
return new_func
|
||||
|
||||
|
||||
def trim_media_range(media_range, source_range):
|
||||
"""
|
||||
Trim input media range with clip source range.
|
||||
@editorial_deprecated
|
||||
def otio_range_to_frame_range(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import otio_range_to_frame_range
|
||||
|
||||
Args:
|
||||
media_range (otio._ot._ot.TimeRange): available range of media
|
||||
source_range (otio._ot._ot.TimeRange): clip required range
|
||||
|
||||
Returns:
|
||||
otio._ot._ot.TimeRange: trimmed media range
|
||||
|
||||
"""
|
||||
rw_media_start = _ot.RationalTime(
|
||||
media_range.start_time.value + source_range.start_time.value,
|
||||
media_range.start_time.rate
|
||||
)
|
||||
rw_media_duration = _ot.RationalTime(
|
||||
source_range.duration.value,
|
||||
media_range.duration.rate
|
||||
)
|
||||
return _ot.TimeRange(
|
||||
rw_media_start, rw_media_duration)
|
||||
return otio_range_to_frame_range(*args, **kwargs)
|
||||
|
||||
|
||||
def range_from_frames(start, duration, fps):
|
||||
"""
|
||||
Returns otio time range.
|
||||
@editorial_deprecated
|
||||
def otio_range_with_handles(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import otio_range_with_handles
|
||||
|
||||
Args:
|
||||
start (int): frame start
|
||||
duration (int): frame duration
|
||||
fps (float): frame range
|
||||
|
||||
Returns:
|
||||
otio._ot._ot.TimeRange: created range
|
||||
|
||||
"""
|
||||
return _ot.TimeRange(
|
||||
_ot.RationalTime(start, fps),
|
||||
_ot.RationalTime(duration, fps)
|
||||
)
|
||||
return otio_range_with_handles(*args, **kwargs)
|
||||
|
||||
|
||||
def frames_to_secons(frames, framerate):
|
||||
"""
|
||||
Returning secons.
|
||||
@editorial_deprecated
|
||||
def is_overlapping_otio_ranges(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import is_overlapping_otio_ranges
|
||||
|
||||
Args:
|
||||
frames (int): frame
|
||||
framerate (float): frame rate
|
||||
|
||||
Returns:
|
||||
float: second value
|
||||
|
||||
"""
|
||||
rt = _ot.from_frames(frames, framerate)
|
||||
return _ot.to_seconds(rt)
|
||||
return is_overlapping_otio_ranges(*args, **kwargs)
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = _ot.from_frames(frames, framerate)
|
||||
return _ot.to_timecode(rt)
|
||||
@editorial_deprecated
|
||||
def convert_to_padded_path(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import convert_to_padded_path
|
||||
|
||||
return convert_to_padded_path(*args, **kwargs)
|
||||
|
||||
|
||||
def make_sequence_collection(path, otio_range, metadata):
|
||||
"""
|
||||
Make collection from path otio range and otio metadata.
|
||||
@editorial_deprecated
|
||||
def trim_media_range(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import trim_media_range
|
||||
|
||||
Args:
|
||||
path (str): path to image sequence with `%d`
|
||||
otio_range (otio._ot._ot.TimeRange): range to be used
|
||||
metadata (dict): data where padding value can be found
|
||||
|
||||
Returns:
|
||||
list: dir_path (str): path to sequence, collection object
|
||||
|
||||
"""
|
||||
if "%" not in path:
|
||||
return None
|
||||
file_name = os.path.basename(path)
|
||||
dir_path = os.path.dirname(path)
|
||||
head = file_name.split("%")[0]
|
||||
tail = os.path.splitext(file_name)[-1]
|
||||
first, last = otio_range_to_frame_range(otio_range)
|
||||
collection = clique.Collection(
|
||||
head=head, tail=tail, padding=metadata["padding"])
|
||||
collection.indexes.update([i for i in range(first, last)])
|
||||
return dir_path, collection
|
||||
return trim_media_range(*args, **kwargs)
|
||||
|
||||
|
||||
def _sequence_resize(source, length):
|
||||
step = float(len(source) - 1) / (length - 1)
|
||||
for i in range(length):
|
||||
low, ratio = divmod(i * step, 1)
|
||||
high = low + 1 if ratio > 0 else low
|
||||
yield (1 - ratio) * source[int(low)] + ratio * source[int(high)]
|
||||
@editorial_deprecated
|
||||
def range_from_frames(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import range_from_frames
|
||||
|
||||
return range_from_frames(*args, **kwargs)
|
||||
|
||||
|
||||
def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
||||
source_range = otio_clip.source_range
|
||||
available_range = otio_clip.available_range()
|
||||
media_in = available_range.start_time.value
|
||||
media_out = available_range.end_time_inclusive().value
|
||||
@editorial_deprecated
|
||||
def frames_to_secons(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import frames_to_seconds
|
||||
|
||||
# modifiers
|
||||
time_scalar = 1.
|
||||
offset_in = 0
|
||||
offset_out = 0
|
||||
time_warp_nodes = []
|
||||
return frames_to_seconds(*args, **kwargs)
|
||||
|
||||
# Check for speed effects and adjust playback speed accordingly
|
||||
for effect in otio_clip.effects:
|
||||
if isinstance(effect, otio.schema.LinearTimeWarp):
|
||||
time_scalar = effect.time_scalar
|
||||
|
||||
elif isinstance(effect, otio.schema.FreezeFrame):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
time_scalar = 0.
|
||||
@editorial_deprecated
|
||||
def frames_to_timecode(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import frames_to_timecode
|
||||
|
||||
elif isinstance(effect, otio.schema.TimeEffect):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
name = effect.name
|
||||
effect_name = effect.effect_name
|
||||
if "TimeWarp" not in effect_name:
|
||||
continue
|
||||
metadata = effect.metadata
|
||||
lookup = metadata.get("lookup")
|
||||
if not lookup:
|
||||
continue
|
||||
return frames_to_timecode(*args, **kwargs)
|
||||
|
||||
# time warp node
|
||||
tw_node = {
|
||||
"Class": "TimeWarp",
|
||||
"name": name
|
||||
}
|
||||
tw_node.update(metadata)
|
||||
tw_node["lookup"] = list(lookup)
|
||||
|
||||
# get first and last frame offsets
|
||||
offset_in += lookup[0]
|
||||
offset_out += lookup[-1]
|
||||
@editorial_deprecated
|
||||
def make_sequence_collection(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import make_sequence_collection
|
||||
|
||||
# add to timewarp nodes
|
||||
time_warp_nodes.append(tw_node)
|
||||
return make_sequence_collection(*args, **kwargs)
|
||||
|
||||
# multiply by time scalar
|
||||
offset_in *= time_scalar
|
||||
offset_out *= time_scalar
|
||||
|
||||
# filip offset if reversed speed
|
||||
if time_scalar < 0:
|
||||
_offset_in = offset_out
|
||||
_offset_out = offset_in
|
||||
offset_in = _offset_in
|
||||
offset_out = _offset_out
|
||||
@editorial_deprecated
|
||||
def get_media_range_with_retimes(*args, **kwargs):
|
||||
from openpype.pipeline.editorial import get_media_range_with_retimes
|
||||
|
||||
# scale handles
|
||||
handle_start *= abs(time_scalar)
|
||||
handle_end *= abs(time_scalar)
|
||||
|
||||
# filip handles if reversed speed
|
||||
if time_scalar < 0:
|
||||
_handle_start = handle_end
|
||||
_handle_end = handle_start
|
||||
handle_start = _handle_start
|
||||
handle_end = _handle_end
|
||||
|
||||
source_in = source_range.start_time.value
|
||||
|
||||
media_in_trimmed = (
|
||||
media_in + source_in + offset_in)
|
||||
media_out_trimmed = (
|
||||
media_in + source_in + (
|
||||
((source_range.duration.value - 1) * abs(
|
||||
time_scalar)) + offset_out))
|
||||
|
||||
# calculate available handles
|
||||
if (media_in_trimmed - media_in) < handle_start:
|
||||
handle_start = (media_in_trimmed - media_in)
|
||||
if (media_out - media_out_trimmed) < handle_end:
|
||||
handle_end = (media_out - media_out_trimmed)
|
||||
|
||||
# create version data
|
||||
version_data = {
|
||||
"versionData": {
|
||||
"retime": True,
|
||||
"speed": time_scalar,
|
||||
"timewarps": time_warp_nodes,
|
||||
"handleStart": round(handle_start),
|
||||
"handleEnd": round(handle_end)
|
||||
}
|
||||
}
|
||||
|
||||
returning_dict = {
|
||||
"mediaIn": media_in_trimmed,
|
||||
"mediaOut": media_out_trimmed,
|
||||
"handleStart": round(handle_start),
|
||||
"handleEnd": round(handle_end)
|
||||
}
|
||||
|
||||
# add version data only if retime
|
||||
if time_warp_nodes or time_scalar != 1.:
|
||||
returning_dict.update(version_data)
|
||||
|
||||
return returning_dict
|
||||
return get_media_range_with_retimes(*args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -1,25 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import importlib
|
||||
from .log import PypeLogger as Logger
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
||||
def discover_host_vendor_module(module_name):
|
||||
host = os.environ["AVALON_APP"]
|
||||
pype_root = os.environ["OPENPYPE_REPOS_ROOT"]
|
||||
main_module = module_name.split(".")[0]
|
||||
module_path = os.path.join(
|
||||
pype_root, "hosts", host, "vendor", main_module)
|
||||
|
||||
log.debug(
|
||||
"Importing module from host vendor path: `{}`".format(module_path))
|
||||
|
||||
if not os.path.exists(module_path):
|
||||
log.warning(
|
||||
"Path not existing: `{}`".format(module_path))
|
||||
return None
|
||||
|
||||
sys.path.insert(1, module_path)
|
||||
return importlib.import_module(module_name)
|
||||
|
|
@ -6,6 +6,7 @@ import logging
|
|||
import six
|
||||
import platform
|
||||
|
||||
from openpype.client import get_project
|
||||
from openpype.settings import get_project_settings
|
||||
|
||||
from .anatomy import Anatomy
|
||||
|
|
@ -171,45 +172,73 @@ def get_last_version_from_path(path_dir, filter):
|
|||
return None
|
||||
|
||||
|
||||
def compute_paths(basic_paths_items, project_root):
|
||||
def concatenate_splitted_paths(split_paths, anatomy):
|
||||
pattern_array = re.compile(r"\[.*\]")
|
||||
project_root_key = "__project_root__"
|
||||
output = []
|
||||
for path_items in basic_paths_items:
|
||||
for path_items in split_paths:
|
||||
clean_items = []
|
||||
if isinstance(path_items, str):
|
||||
path_items = [path_items]
|
||||
|
||||
for path_item in path_items:
|
||||
matches = re.findall(pattern_array, path_item)
|
||||
if len(matches) > 0:
|
||||
path_item = path_item.replace(matches[0], "")
|
||||
if path_item == project_root_key:
|
||||
path_item = project_root
|
||||
if not re.match(r"{.+}", path_item):
|
||||
path_item = re.sub(pattern_array, "", path_item)
|
||||
clean_items.append(path_item)
|
||||
|
||||
# backward compatibility
|
||||
if "__project_root__" in path_items:
|
||||
for root, root_path in anatomy.roots.items():
|
||||
if not os.path.exists(str(root_path)):
|
||||
log.debug("Root {} path path {} not exist on \
|
||||
computer!".format(root, root_path))
|
||||
continue
|
||||
clean_items = ["{{root[{}]}}".format(root),
|
||||
r"{project[name]}"] + clean_items[1:]
|
||||
output.append(os.path.normpath(os.path.sep.join(clean_items)))
|
||||
continue
|
||||
|
||||
output.append(os.path.normpath(os.path.sep.join(clean_items)))
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def get_format_data(anatomy):
|
||||
project_doc = get_project(anatomy.project_name, fields=["data.code"])
|
||||
project_code = project_doc["data"]["code"]
|
||||
|
||||
return {
|
||||
"root": anatomy.roots,
|
||||
"project": {
|
||||
"name": anatomy.project_name,
|
||||
"code": project_code
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def fill_paths(path_list, anatomy):
|
||||
format_data = get_format_data(anatomy)
|
||||
filled_paths = []
|
||||
|
||||
for path in path_list:
|
||||
new_path = path.format(**format_data)
|
||||
filled_paths.append(new_path)
|
||||
|
||||
return filled_paths
|
||||
|
||||
|
||||
def create_project_folders(basic_paths, project_name):
|
||||
anatomy = Anatomy(project_name)
|
||||
roots_paths = []
|
||||
if isinstance(anatomy.roots, dict):
|
||||
for root in anatomy.roots.values():
|
||||
roots_paths.append(root.value)
|
||||
else:
|
||||
roots_paths.append(anatomy.roots.value)
|
||||
|
||||
for root_path in roots_paths:
|
||||
project_root = os.path.join(root_path, project_name)
|
||||
full_paths = compute_paths(basic_paths, project_root)
|
||||
# Create folders
|
||||
for path in full_paths:
|
||||
full_path = path.format(project_root=project_root)
|
||||
if os.path.exists(full_path):
|
||||
log.debug(
|
||||
"Folder already exists: {}".format(full_path)
|
||||
)
|
||||
else:
|
||||
log.debug("Creating folder: {}".format(full_path))
|
||||
os.makedirs(full_path)
|
||||
concat_paths = concatenate_splitted_paths(basic_paths, anatomy)
|
||||
filled_paths = fill_paths(concat_paths, anatomy)
|
||||
|
||||
# Create folders
|
||||
for path in filled_paths:
|
||||
if os.path.exists(path):
|
||||
log.debug("Folder already exists: {}".format(path))
|
||||
else:
|
||||
log.debug("Creating folder: {}".format(path))
|
||||
os.makedirs(path)
|
||||
|
||||
|
||||
def _list_path_items(folder_structure):
|
||||
|
|
@ -308,6 +337,7 @@ class HostDirmap:
|
|||
on_dirmap_enabled: run host code for enabling dirmap
|
||||
do_dirmap: run host code to do actual remapping
|
||||
"""
|
||||
|
||||
def __init__(self, host_name, project_settings, sync_module=None):
|
||||
self.host_name = host_name
|
||||
self.project_settings = project_settings
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import attr
|
||||
from bson.objectid import ObjectId
|
||||
import datetime
|
||||
|
||||
from Qt import QtCore
|
||||
from Qt.QtCore import Qt
|
||||
|
|
@ -413,6 +414,23 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel):
|
|||
return index
|
||||
return None
|
||||
|
||||
def _convert_date(self, date_value, current_date):
|
||||
"""Converts 'date_value' to string.
|
||||
|
||||
Value of date_value might contain date in the future, used for nicely
|
||||
sort queued items next to last downloaded.
|
||||
"""
|
||||
try:
|
||||
converted_date = None
|
||||
# ignore date in the future - for sorting only
|
||||
if date_value and date_value < current_date:
|
||||
converted_date = date_value.strftime("%Y%m%dT%H%M%SZ")
|
||||
except (AttributeError, TypeError):
|
||||
# ignore unparseable values
|
||||
pass
|
||||
|
||||
return converted_date
|
||||
|
||||
|
||||
class SyncRepresentationSummaryModel(_SyncRepresentationModel):
|
||||
"""
|
||||
|
|
@ -560,7 +578,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel):
|
|||
remote_provider = lib.translate_provider_for_icon(self.sync_server,
|
||||
self.project,
|
||||
remote_site)
|
||||
|
||||
current_date = datetime.datetime.now()
|
||||
for repre in result.get("paginatedResults"):
|
||||
files = repre.get("files", [])
|
||||
if isinstance(files, dict): # aggregate returns dictionary
|
||||
|
|
@ -570,14 +588,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel):
|
|||
if not files:
|
||||
continue
|
||||
|
||||
local_updated = remote_updated = None
|
||||
if repre.get('updated_dt_local'):
|
||||
local_updated = \
|
||||
repre.get('updated_dt_local').strftime("%Y%m%dT%H%M%SZ")
|
||||
|
||||
if repre.get('updated_dt_remote'):
|
||||
remote_updated = \
|
||||
repre.get('updated_dt_remote').strftime("%Y%m%dT%H%M%SZ")
|
||||
local_updated = self._convert_date(repre.get('updated_dt_local'),
|
||||
current_date)
|
||||
remote_updated = self._convert_date(repre.get('updated_dt_remote'),
|
||||
current_date)
|
||||
|
||||
avg_progress_remote = lib.convert_progress(
|
||||
repre.get('avg_progress_remote', '0'))
|
||||
|
|
@ -645,6 +659,8 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel):
|
|||
if limit == 0:
|
||||
limit = SyncRepresentationSummaryModel.PAGE_SIZE
|
||||
|
||||
# replace null with value in the future for better sorting
|
||||
dummy_max_date = datetime.datetime(2099, 1, 1)
|
||||
aggr = [
|
||||
{"$match": self.get_match_part()},
|
||||
{'$unwind': '$files'},
|
||||
|
|
@ -687,7 +703,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel):
|
|||
{'$cond': [
|
||||
{'$size': "$order_remote.last_failed_dt"},
|
||||
"$order_remote.last_failed_dt",
|
||||
[]
|
||||
[dummy_max_date]
|
||||
]}
|
||||
]}},
|
||||
'updated_dt_local': {'$first': {
|
||||
|
|
@ -696,7 +712,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel):
|
|||
{'$cond': [
|
||||
{'$size': "$order_local.last_failed_dt"},
|
||||
"$order_local.last_failed_dt",
|
||||
[]
|
||||
[dummy_max_date]
|
||||
]}
|
||||
]}},
|
||||
'files_size': {'$ifNull': ["$files.size", 0]},
|
||||
|
|
@ -1039,6 +1055,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel):
|
|||
self.project,
|
||||
remote_site)
|
||||
|
||||
current_date = datetime.datetime.now()
|
||||
for repre in result.get("paginatedResults"):
|
||||
# log.info("!!! repre:: {}".format(repre))
|
||||
files = repre.get("files", [])
|
||||
|
|
@ -1046,16 +1063,12 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel):
|
|||
files = [files]
|
||||
|
||||
for file in files:
|
||||
local_updated = remote_updated = None
|
||||
if repre.get('updated_dt_local'):
|
||||
local_updated = \
|
||||
repre.get('updated_dt_local').strftime(
|
||||
"%Y%m%dT%H%M%SZ")
|
||||
|
||||
if repre.get('updated_dt_remote'):
|
||||
remote_updated = \
|
||||
repre.get('updated_dt_remote').strftime(
|
||||
"%Y%m%dT%H%M%SZ")
|
||||
local_updated = self._convert_date(
|
||||
repre.get('updated_dt_local'),
|
||||
current_date)
|
||||
remote_updated = self._convert_date(
|
||||
repre.get('updated_dt_remote'),
|
||||
current_date)
|
||||
|
||||
remote_progress = lib.convert_progress(
|
||||
repre.get('progress_remote', '0'))
|
||||
|
|
@ -1104,6 +1117,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel):
|
|||
if limit == 0:
|
||||
limit = SyncRepresentationSummaryModel.PAGE_SIZE
|
||||
|
||||
dummy_max_date = datetime.datetime(2099, 1, 1)
|
||||
aggr = [
|
||||
{"$match": self.get_match_part()},
|
||||
{"$unwind": "$files"},
|
||||
|
|
@ -1147,7 +1161,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel):
|
|||
'$cond': [
|
||||
{'$size': "$order_remote.last_failed_dt"},
|
||||
"$order_remote.last_failed_dt",
|
||||
[]
|
||||
[dummy_max_date]
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
@ -1160,7 +1174,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel):
|
|||
'$cond': [
|
||||
{'$size': "$order_local.last_failed_dt"},
|
||||
"$order_local.last_failed_dt",
|
||||
[]
|
||||
[dummy_max_date]
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
|
|||
283
openpype/modules/webserver/cors_middleware.py
Normal file
283
openpype/modules/webserver/cors_middleware.py
Normal file
|
|
@ -0,0 +1,283 @@
|
|||
r"""
|
||||
===============
|
||||
CORS Middleware
|
||||
===============
|
||||
.. versionadded:: 0.2.0
|
||||
Dealing with CORS headers for aiohttp applications.
|
||||
**IMPORTANT:** There is a `aiohttp-cors
|
||||
<https://pypi.org/project/aiohttp_cors/>`_ library, which handles CORS
|
||||
headers by attaching additional handlers to aiohttp application for
|
||||
OPTIONS (preflight) requests. In same time this CORS middleware mimics the
|
||||
logic of `django-cors-headers <https://pypi.org/project/django-cors-headers>`_,
|
||||
where all handling done in the middleware without any additional handlers. This
|
||||
approach allows aiohttp application to respond with CORS headers for OPTIONS or
|
||||
wildcard handlers, which is not possible with ``aiohttp-cors`` due to
|
||||
https://github.com/aio-libs/aiohttp-cors/issues/241 issue.
|
||||
For detailed information about CORS (Cross Origin Resource Sharing) please
|
||||
visit:
|
||||
- `Wikipedia <https://en.m.wikipedia.org/wiki/Cross-origin_resource_sharing>`_
|
||||
- Or `MDN <https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS>`_
|
||||
Configuration
|
||||
=============
|
||||
**IMPORTANT:** By default, CORS middleware do not allow any origins to access
|
||||
content from your aiohttp appliction. Which means, you need carefully check
|
||||
possible options and provide custom values for your needs.
|
||||
Usage
|
||||
=====
|
||||
.. code-block:: python
|
||||
import re
|
||||
from aiohttp import web
|
||||
from aiohttp_middlewares import cors_middleware
|
||||
from aiohttp_middlewares.cors import DEFAULT_ALLOW_HEADERS
|
||||
# Unsecure configuration to allow all CORS requests
|
||||
app = web.Application(
|
||||
middlewares=[cors_middleware(allow_all=True)]
|
||||
)
|
||||
# Allow CORS requests from URL http://localhost:3000
|
||||
app = web.Application(
|
||||
middlewares=[
|
||||
cors_middleware(origins=["http://localhost:3000"])
|
||||
]
|
||||
)
|
||||
# Allow CORS requests from all localhost urls
|
||||
app = web.Application(
|
||||
middlewares=[
|
||||
cors_middleware(
|
||||
origins=[re.compile(r"^https?\:\/\/localhost")]
|
||||
)
|
||||
]
|
||||
)
|
||||
# Allow CORS requests from https://frontend.myapp.com as well
|
||||
# as allow credentials
|
||||
CORS_ALLOW_ORIGINS = ["https://frontend.myapp.com"]
|
||||
app = web.Application(
|
||||
middlewares=[
|
||||
cors_middleware(
|
||||
origins=CORS_ALLOW_ORIGINS,
|
||||
allow_credentials=True,
|
||||
)
|
||||
]
|
||||
)
|
||||
# Allow CORS requests only for API urls
|
||||
app = web.Application(
|
||||
middelwares=[
|
||||
cors_middleware(
|
||||
origins=CORS_ALLOW_ORIGINS,
|
||||
urls=[re.compile(r"^\/api")],
|
||||
)
|
||||
]
|
||||
)
|
||||
# Allow CORS requests for POST & PATCH methods, and for all
|
||||
# default headers and `X-Client-UID`
|
||||
app = web.Application(
|
||||
middlewares=[
|
||||
cors_middleware(
|
||||
origings=CORS_ALLOW_ORIGINS,
|
||||
allow_methods=("POST", "PATCH"),
|
||||
allow_headers=DEFAULT_ALLOW_HEADERS
|
||||
+ ("X-Client-UID",),
|
||||
)
|
||||
]
|
||||
)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import Pattern, Tuple
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from aiohttp_middlewares.annotations import (
|
||||
Handler,
|
||||
Middleware,
|
||||
StrCollection,
|
||||
UrlCollection,
|
||||
)
|
||||
from aiohttp_middlewares.utils import match_path
|
||||
|
||||
|
||||
ACCESS_CONTROL = "Access-Control"
|
||||
ACCESS_CONTROL_ALLOW = f"{ACCESS_CONTROL}-Allow"
|
||||
ACCESS_CONTROL_ALLOW_CREDENTIALS = f"{ACCESS_CONTROL_ALLOW}-Credentials"
|
||||
ACCESS_CONTROL_ALLOW_HEADERS = f"{ACCESS_CONTROL_ALLOW}-Headers"
|
||||
ACCESS_CONTROL_ALLOW_METHODS = f"{ACCESS_CONTROL_ALLOW}-Methods"
|
||||
ACCESS_CONTROL_ALLOW_ORIGIN = f"{ACCESS_CONTROL_ALLOW}-Origin"
|
||||
ACCESS_CONTROL_EXPOSE_HEADERS = f"{ACCESS_CONTROL}-Expose-Headers"
|
||||
ACCESS_CONTROL_MAX_AGE = f"{ACCESS_CONTROL}-Max-Age"
|
||||
ACCESS_CONTROL_REQUEST_METHOD = f"{ACCESS_CONTROL}-Request-Method"
|
||||
|
||||
DEFAULT_ALLOW_HEADERS = (
|
||||
"accept",
|
||||
"accept-encoding",
|
||||
"authorization",
|
||||
"content-type",
|
||||
"dnt",
|
||||
"origin",
|
||||
"user-agent",
|
||||
"x-csrftoken",
|
||||
"x-requested-with",
|
||||
)
|
||||
DEFAULT_ALLOW_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
|
||||
DEFAULT_URLS: Tuple[Pattern[str]] = (re.compile(r".*"),)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def cors_middleware(
|
||||
*,
|
||||
allow_all: bool = False,
|
||||
origins: UrlCollection = None,
|
||||
urls: UrlCollection = None,
|
||||
expose_headers: StrCollection = None,
|
||||
allow_headers: StrCollection = DEFAULT_ALLOW_HEADERS,
|
||||
allow_methods: StrCollection = DEFAULT_ALLOW_METHODS,
|
||||
allow_credentials: bool = False,
|
||||
max_age: int = None,
|
||||
) -> Middleware:
|
||||
"""Middleware to provide CORS headers for aiohttp applications.
|
||||
:param allow_all:
|
||||
When enabled, allow any Origin to access content from your aiohttp web
|
||||
application. **Please be careful with enabling this option as it may
|
||||
result in security issues for your application.** By default: ``False``
|
||||
:param origins:
|
||||
Allow content access for given list of origins. Support supplying
|
||||
strings for exact origin match or regex instances. By default: ``None``
|
||||
:param urls:
|
||||
Allow contect access for given list of URLs in aiohttp application.
|
||||
By default: *apply CORS headers for all URLs*
|
||||
:param expose_headers:
|
||||
List of headers to be exposed with every CORS request. By default:
|
||||
``None``
|
||||
:param allow_headers:
|
||||
List of allowed headers. By default:
|
||||
.. code-block:: python
|
||||
(
|
||||
"accept",
|
||||
"accept-encoding",
|
||||
"authorization",
|
||||
"content-type",
|
||||
"dnt",
|
||||
"origin",
|
||||
"user-agent",
|
||||
"x-csrftoken",
|
||||
"x-requested-with",
|
||||
)
|
||||
:param allow_methods:
|
||||
List of allowed methods. By default:
|
||||
.. code-block:: python
|
||||
("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
|
||||
:param allow_credentials:
|
||||
When enabled apply allow credentials header in response, which results
|
||||
in sharing cookies on shared resources. **Please be careful with
|
||||
allowing credentials for CORS requests.** By default: ``False``
|
||||
:param max_age: Access control max age in seconds. By default: ``None``
|
||||
"""
|
||||
check_urls: UrlCollection = DEFAULT_URLS if urls is None else urls
|
||||
|
||||
@web.middleware
|
||||
async def middleware(
|
||||
request: web.Request, handler: Handler
|
||||
) -> web.StreamResponse:
|
||||
# Initial vars
|
||||
request_method = request.method
|
||||
request_path = request.rel_url.path
|
||||
|
||||
# Is this an OPTIONS request
|
||||
is_options_request = request_method == "OPTIONS"
|
||||
|
||||
# Is this a preflight request
|
||||
is_preflight_request = (
|
||||
is_options_request
|
||||
and ACCESS_CONTROL_REQUEST_METHOD in request.headers
|
||||
)
|
||||
|
||||
# Log extra data
|
||||
log_extra = {
|
||||
"is_preflight_request": is_preflight_request,
|
||||
"method": request_method.lower(),
|
||||
"path": request_path,
|
||||
}
|
||||
|
||||
# Check whether CORS should be enabled for given URL or not. By default
|
||||
# CORS enabled for all URLs
|
||||
if not match_items(check_urls, request_path):
|
||||
logger.debug(
|
||||
"Request should not be processed via CORS middleware",
|
||||
extra=log_extra,
|
||||
)
|
||||
return await handler(request)
|
||||
|
||||
# If this is a preflight request - generate empty response
|
||||
if is_preflight_request:
|
||||
response = web.StreamResponse()
|
||||
# Otherwise - call actual handler
|
||||
else:
|
||||
response = await handler(request)
|
||||
|
||||
# Now check origin heaer
|
||||
origin = request.headers.get("Origin")
|
||||
# Empty origin - do nothing
|
||||
if not origin:
|
||||
logger.debug(
|
||||
"Request does not have Origin header. CORS headers not "
|
||||
"available for given requests",
|
||||
extra=log_extra,
|
||||
)
|
||||
return response
|
||||
|
||||
# Set allow credentials header if necessary
|
||||
if allow_credentials:
|
||||
response.headers[ACCESS_CONTROL_ALLOW_CREDENTIALS] = "true"
|
||||
|
||||
# Check whether current origin satisfies CORS policy
|
||||
if not allow_all and not (origins and match_items(origins, origin)):
|
||||
logger.debug(
|
||||
"CORS headers not allowed for given Origin", extra=log_extra
|
||||
)
|
||||
return response
|
||||
|
||||
# Now start supplying CORS headers
|
||||
# First one is Access-Control-Allow-Origin
|
||||
if allow_all and not allow_credentials:
|
||||
cors_origin = "*"
|
||||
else:
|
||||
cors_origin = origin
|
||||
response.headers[ACCESS_CONTROL_ALLOW_ORIGIN] = cors_origin
|
||||
|
||||
# Then Access-Control-Expose-Headers
|
||||
if expose_headers:
|
||||
response.headers[ACCESS_CONTROL_EXPOSE_HEADERS] = ", ".join(
|
||||
expose_headers
|
||||
)
|
||||
|
||||
# Now, if this is an options request, respond with extra Allow headers
|
||||
if is_options_request:
|
||||
response.headers[ACCESS_CONTROL_ALLOW_HEADERS] = ", ".join(
|
||||
allow_headers
|
||||
)
|
||||
response.headers[ACCESS_CONTROL_ALLOW_METHODS] = ", ".join(
|
||||
allow_methods
|
||||
)
|
||||
if max_age is not None:
|
||||
response.headers[ACCESS_CONTROL_MAX_AGE] = str(max_age)
|
||||
|
||||
# If this is preflight request - do not allow other middlewares to
|
||||
# process this request
|
||||
if is_preflight_request:
|
||||
logger.debug(
|
||||
"Provide CORS headers with empty response for preflight "
|
||||
"request",
|
||||
extra=log_extra,
|
||||
)
|
||||
raise web.HTTPOk(text="", headers=response.headers)
|
||||
|
||||
# Otherwise return normal response
|
||||
logger.debug("Provide CORS headers for request", extra=log_extra)
|
||||
return response
|
||||
|
||||
return middleware
|
||||
|
||||
|
||||
def match_items(items: UrlCollection, value: str) -> bool:
|
||||
"""Go through all items and try to match item with given value."""
|
||||
return any(match_path(item, value) for item in items)
|
||||
|
|
@ -1,15 +1,18 @@
|
|||
import re
|
||||
import threading
|
||||
import asyncio
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from openpype.lib import PypeLogger
|
||||
from .cors_middleware import cors_middleware
|
||||
|
||||
log = PypeLogger.get_logger("WebServer")
|
||||
|
||||
|
||||
class WebServerManager:
|
||||
"""Manger that care about web server thread."""
|
||||
|
||||
def __init__(self, port=None, host=None):
|
||||
self.port = port or 8079
|
||||
self.host = host or "localhost"
|
||||
|
|
@ -18,7 +21,13 @@ class WebServerManager:
|
|||
self.handlers = {}
|
||||
self.on_stop_callbacks = []
|
||||
|
||||
self.app = web.Application()
|
||||
self.app = web.Application(
|
||||
middlewares=[
|
||||
cors_middleware(
|
||||
origins=[re.compile(r"^https?\:\/\/localhost")]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
# add route with multiple methods for single "external app"
|
||||
|
||||
|
|
|
|||
282
openpype/pipeline/editorial.py
Normal file
282
openpype/pipeline/editorial.py
Normal file
|
|
@ -0,0 +1,282 @@
|
|||
import os
|
||||
import re
|
||||
import clique
|
||||
|
||||
import opentimelineio as otio
|
||||
from opentimelineio import opentime as _ot
|
||||
|
||||
|
||||
def otio_range_to_frame_range(otio_range):
|
||||
start = _ot.to_frames(
|
||||
otio_range.start_time, otio_range.start_time.rate)
|
||||
end = start + _ot.to_frames(
|
||||
otio_range.duration, otio_range.duration.rate)
|
||||
return start, end
|
||||
|
||||
|
||||
def otio_range_with_handles(otio_range, instance):
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
handles_duration = handle_start + handle_end
|
||||
fps = float(otio_range.start_time.rate)
|
||||
start = _ot.to_frames(otio_range.start_time, fps)
|
||||
duration = _ot.to_frames(otio_range.duration, fps)
|
||||
|
||||
return _ot.TimeRange(
|
||||
start_time=_ot.RationalTime((start - handle_start), fps),
|
||||
duration=_ot.RationalTime((duration + handles_duration), fps)
|
||||
)
|
||||
|
||||
|
||||
def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False):
|
||||
test_start, test_end = otio_range_to_frame_range(test_otio_range)
|
||||
main_start, main_end = otio_range_to_frame_range(main_otio_range)
|
||||
covering_exp = bool(
|
||||
(test_start <= main_start) and (test_end >= main_end)
|
||||
)
|
||||
inside_exp = bool(
|
||||
(test_start >= main_start) and (test_end <= main_end)
|
||||
)
|
||||
overlaying_right_exp = bool(
|
||||
(test_start <= main_end) and (test_end >= main_end)
|
||||
)
|
||||
overlaying_left_exp = bool(
|
||||
(test_end >= main_start) and (test_start <= main_start)
|
||||
)
|
||||
|
||||
if not strict:
|
||||
return any((
|
||||
covering_exp,
|
||||
inside_exp,
|
||||
overlaying_right_exp,
|
||||
overlaying_left_exp
|
||||
))
|
||||
else:
|
||||
return covering_exp
|
||||
|
||||
|
||||
def convert_to_padded_path(path, padding):
|
||||
"""
|
||||
Return correct padding in sequence string
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
padding (int): number of padding
|
||||
|
||||
Returns:
|
||||
type: string with reformated path
|
||||
|
||||
Example:
|
||||
convert_to_padded_path("plate.%d.exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
if "%d" in path:
|
||||
path = re.sub("%d", "%0{padding}d".format(padding=padding), path)
|
||||
return path
|
||||
|
||||
|
||||
def trim_media_range(media_range, source_range):
|
||||
"""
|
||||
Trim input media range with clip source range.
|
||||
|
||||
Args:
|
||||
media_range (otio._ot._ot.TimeRange): available range of media
|
||||
source_range (otio._ot._ot.TimeRange): clip required range
|
||||
|
||||
Returns:
|
||||
otio._ot._ot.TimeRange: trimmed media range
|
||||
|
||||
"""
|
||||
rw_media_start = _ot.RationalTime(
|
||||
media_range.start_time.value + source_range.start_time.value,
|
||||
media_range.start_time.rate
|
||||
)
|
||||
rw_media_duration = _ot.RationalTime(
|
||||
source_range.duration.value,
|
||||
media_range.duration.rate
|
||||
)
|
||||
return _ot.TimeRange(
|
||||
rw_media_start, rw_media_duration)
|
||||
|
||||
|
||||
def range_from_frames(start, duration, fps):
|
||||
"""
|
||||
Returns otio time range.
|
||||
|
||||
Args:
|
||||
start (int): frame start
|
||||
duration (int): frame duration
|
||||
fps (float): frame range
|
||||
|
||||
Returns:
|
||||
otio._ot._ot.TimeRange: created range
|
||||
|
||||
"""
|
||||
return _ot.TimeRange(
|
||||
_ot.RationalTime(start, fps),
|
||||
_ot.RationalTime(duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def frames_to_seconds(frames, framerate):
|
||||
"""
|
||||
Returning seconds.
|
||||
|
||||
Args:
|
||||
frames (int): frame
|
||||
framerate (float): frame rate
|
||||
|
||||
Returns:
|
||||
float: second value
|
||||
"""
|
||||
|
||||
rt = _ot.from_frames(frames, framerate)
|
||||
return _ot.to_seconds(rt)
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = _ot.from_frames(frames, framerate)
|
||||
return _ot.to_timecode(rt)
|
||||
|
||||
|
||||
def make_sequence_collection(path, otio_range, metadata):
|
||||
"""
|
||||
Make collection from path otio range and otio metadata.
|
||||
|
||||
Args:
|
||||
path (str): path to image sequence with `%d`
|
||||
otio_range (otio._ot._ot.TimeRange): range to be used
|
||||
metadata (dict): data where padding value can be found
|
||||
|
||||
Returns:
|
||||
list: dir_path (str): path to sequence, collection object
|
||||
|
||||
"""
|
||||
if "%" not in path:
|
||||
return None
|
||||
file_name = os.path.basename(path)
|
||||
dir_path = os.path.dirname(path)
|
||||
head = file_name.split("%")[0]
|
||||
tail = os.path.splitext(file_name)[-1]
|
||||
first, last = otio_range_to_frame_range(otio_range)
|
||||
collection = clique.Collection(
|
||||
head=head, tail=tail, padding=metadata["padding"])
|
||||
collection.indexes.update([i for i in range(first, last)])
|
||||
return dir_path, collection
|
||||
|
||||
|
||||
def _sequence_resize(source, length):
|
||||
step = float(len(source) - 1) / (length - 1)
|
||||
for i in range(length):
|
||||
low, ratio = divmod(i * step, 1)
|
||||
high = low + 1 if ratio > 0 else low
|
||||
yield (1 - ratio) * source[int(low)] + ratio * source[int(high)]
|
||||
|
||||
|
||||
def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
||||
source_range = otio_clip.source_range
|
||||
available_range = otio_clip.available_range()
|
||||
media_in = available_range.start_time.value
|
||||
media_out = available_range.end_time_inclusive().value
|
||||
|
||||
# modifiers
|
||||
time_scalar = 1.
|
||||
offset_in = 0
|
||||
offset_out = 0
|
||||
time_warp_nodes = []
|
||||
|
||||
# Check for speed effects and adjust playback speed accordingly
|
||||
for effect in otio_clip.effects:
|
||||
if isinstance(effect, otio.schema.LinearTimeWarp):
|
||||
time_scalar = effect.time_scalar
|
||||
|
||||
elif isinstance(effect, otio.schema.FreezeFrame):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
time_scalar = 0.
|
||||
|
||||
elif isinstance(effect, otio.schema.TimeEffect):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
name = effect.name
|
||||
effect_name = effect.effect_name
|
||||
if "TimeWarp" not in effect_name:
|
||||
continue
|
||||
metadata = effect.metadata
|
||||
lookup = metadata.get("lookup")
|
||||
if not lookup:
|
||||
continue
|
||||
|
||||
# time warp node
|
||||
tw_node = {
|
||||
"Class": "TimeWarp",
|
||||
"name": name
|
||||
}
|
||||
tw_node.update(metadata)
|
||||
tw_node["lookup"] = list(lookup)
|
||||
|
||||
# get first and last frame offsets
|
||||
offset_in += lookup[0]
|
||||
offset_out += lookup[-1]
|
||||
|
||||
# add to timewarp nodes
|
||||
time_warp_nodes.append(tw_node)
|
||||
|
||||
# multiply by time scalar
|
||||
offset_in *= time_scalar
|
||||
offset_out *= time_scalar
|
||||
|
||||
# filip offset if reversed speed
|
||||
if time_scalar < 0:
|
||||
_offset_in = offset_out
|
||||
_offset_out = offset_in
|
||||
offset_in = _offset_in
|
||||
offset_out = _offset_out
|
||||
|
||||
# scale handles
|
||||
handle_start *= abs(time_scalar)
|
||||
handle_end *= abs(time_scalar)
|
||||
|
||||
# filip handles if reversed speed
|
||||
if time_scalar < 0:
|
||||
_handle_start = handle_end
|
||||
_handle_end = handle_start
|
||||
handle_start = _handle_start
|
||||
handle_end = _handle_end
|
||||
|
||||
source_in = source_range.start_time.value
|
||||
|
||||
media_in_trimmed = (
|
||||
media_in + source_in + offset_in)
|
||||
media_out_trimmed = (
|
||||
media_in + source_in + (
|
||||
((source_range.duration.value - 1) * abs(
|
||||
time_scalar)) + offset_out))
|
||||
|
||||
# calculate available handles
|
||||
if (media_in_trimmed - media_in) < handle_start:
|
||||
handle_start = (media_in_trimmed - media_in)
|
||||
if (media_out - media_out_trimmed) < handle_end:
|
||||
handle_end = (media_out - media_out_trimmed)
|
||||
|
||||
# create version data
|
||||
version_data = {
|
||||
"versionData": {
|
||||
"retime": True,
|
||||
"speed": time_scalar,
|
||||
"timewarps": time_warp_nodes,
|
||||
"handleStart": round(handle_start),
|
||||
"handleEnd": round(handle_end)
|
||||
}
|
||||
}
|
||||
|
||||
returning_dict = {
|
||||
"mediaIn": media_in_trimmed,
|
||||
"mediaOut": media_out_trimmed,
|
||||
"handleStart": round(handle_start),
|
||||
"handleEnd": round(handle_end)
|
||||
}
|
||||
|
||||
# add version data only if retime
|
||||
if time_warp_nodes or time_scalar != 1.:
|
||||
returning_dict.update(version_data)
|
||||
|
||||
return returning_dict
|
||||
|
|
@ -8,8 +8,11 @@ Requires:
|
|||
# import os
|
||||
import opentimelineio as otio
|
||||
import pyblish.api
|
||||
import openpype.lib
|
||||
from pprint import pformat
|
||||
from openpype.pipeline.editorial import (
|
||||
otio_range_to_frame_range,
|
||||
otio_range_with_handles
|
||||
)
|
||||
|
||||
|
||||
class CollectOtioFrameRanges(pyblish.api.InstancePlugin):
|
||||
|
|
@ -31,9 +34,9 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin):
|
|||
otio_tl_range = otio_clip.range_in_parent()
|
||||
otio_src_range = otio_clip.source_range
|
||||
otio_avalable_range = otio_clip.available_range()
|
||||
otio_tl_range_handles = openpype.lib.otio_range_with_handles(
|
||||
otio_tl_range_handles = otio_range_with_handles(
|
||||
otio_tl_range, instance)
|
||||
otio_src_range_handles = openpype.lib.otio_range_with_handles(
|
||||
otio_src_range_handles = otio_range_with_handles(
|
||||
otio_src_range, instance)
|
||||
|
||||
# get source avalable start frame
|
||||
|
|
@ -42,7 +45,7 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin):
|
|||
otio_avalable_range.start_time.rate)
|
||||
|
||||
# convert to frames
|
||||
range_convert = openpype.lib.otio_range_to_frame_range
|
||||
range_convert = otio_range_to_frame_range
|
||||
tl_start, tl_end = range_convert(otio_tl_range)
|
||||
tl_start_h, tl_end_h = range_convert(otio_tl_range_handles)
|
||||
src_start, src_end = range_convert(otio_src_range)
|
||||
|
|
|
|||
|
|
@ -10,7 +10,11 @@ import os
|
|||
import clique
|
||||
import opentimelineio as otio
|
||||
import pyblish.api
|
||||
import openpype.lib as oplib
|
||||
from openpype.pipeline.editorial import (
|
||||
get_media_range_with_retimes,
|
||||
range_from_frames,
|
||||
make_sequence_collection
|
||||
)
|
||||
|
||||
|
||||
class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
||||
|
|
@ -42,7 +46,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
available_duration = otio_avalable_range.duration.value
|
||||
|
||||
# get available range trimmed with processed retimes
|
||||
retimed_attributes = oplib.get_media_range_with_retimes(
|
||||
retimed_attributes = get_media_range_with_retimes(
|
||||
otio_clip, handle_start, handle_end)
|
||||
self.log.debug(
|
||||
">> retimed_attributes: {}".format(retimed_attributes))
|
||||
|
|
@ -64,7 +68,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
a_frame_end_h = media_out + handle_end
|
||||
|
||||
# create trimmed otio time range
|
||||
trimmed_media_range_h = oplib.range_from_frames(
|
||||
trimmed_media_range_h = range_from_frames(
|
||||
a_frame_start_h, (a_frame_end_h - a_frame_start_h) + 1,
|
||||
media_fps
|
||||
)
|
||||
|
|
@ -144,7 +148,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
# in case it is file sequence but not new OTIO schema
|
||||
# `ImageSequenceReference`
|
||||
path = media_ref.target_url
|
||||
collection_data = oplib.make_sequence_collection(
|
||||
collection_data = make_sequence_collection(
|
||||
path, trimmed_media_range_h, metadata)
|
||||
self.staging_dir, collection = collection_data
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,13 @@ import clique
|
|||
import opentimelineio as otio
|
||||
from pyblish import api
|
||||
import openpype
|
||||
from openpype.pipeline.editorial import (
|
||||
otio_range_to_frame_range,
|
||||
trim_media_range,
|
||||
range_from_frames,
|
||||
frames_to_seconds,
|
||||
make_sequence_collection
|
||||
)
|
||||
|
||||
|
||||
class ExtractOTIOReview(openpype.api.Extractor):
|
||||
|
|
@ -161,7 +168,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
dirname = media_ref.target_url_base
|
||||
head = media_ref.name_prefix
|
||||
tail = media_ref.name_suffix
|
||||
first, last = openpype.lib.otio_range_to_frame_range(
|
||||
first, last = otio_range_to_frame_range(
|
||||
available_range)
|
||||
collection = clique.Collection(
|
||||
head=head,
|
||||
|
|
@ -180,7 +187,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
# in case it is file sequence but not new OTIO schema
|
||||
# `ImageSequenceReference`
|
||||
path = media_ref.target_url
|
||||
collection_data = openpype.lib.make_sequence_collection(
|
||||
collection_data = make_sequence_collection(
|
||||
path, available_range, metadata)
|
||||
dir_path, collection = collection_data
|
||||
|
||||
|
|
@ -305,8 +312,8 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
duration = avl_durtation
|
||||
|
||||
# return correct trimmed range
|
||||
return openpype.lib.trim_media_range(
|
||||
avl_range, openpype.lib.range_from_frames(start, duration, fps)
|
||||
return trim_media_range(
|
||||
avl_range, range_from_frames(start, duration, fps)
|
||||
)
|
||||
|
||||
def _render_seqment(self, sequence=None,
|
||||
|
|
@ -357,8 +364,8 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
frame_start = otio_range.start_time.value
|
||||
input_fps = otio_range.start_time.rate
|
||||
frame_duration = otio_range.duration.value
|
||||
sec_start = openpype.lib.frames_to_secons(frame_start, input_fps)
|
||||
sec_duration = openpype.lib.frames_to_secons(
|
||||
sec_start = frames_to_seconds(frame_start, input_fps)
|
||||
sec_duration = frames_to_seconds(
|
||||
frame_duration, input_fps
|
||||
)
|
||||
|
||||
|
|
@ -370,8 +377,7 @@ class ExtractOTIOReview(openpype.api.Extractor):
|
|||
])
|
||||
|
||||
elif gap:
|
||||
sec_duration = openpype.lib.frames_to_secons(
|
||||
gap, self.actual_fps)
|
||||
sec_duration = frames_to_seconds(gap, self.actual_fps)
|
||||
|
||||
# form command for rendering gap files
|
||||
command.extend([
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import os
|
|||
from pyblish import api
|
||||
import openpype
|
||||
from copy import deepcopy
|
||||
from openpype.pipeline.editorial import frames_to_seconds
|
||||
|
||||
|
||||
class ExtractOTIOTrimmingVideo(openpype.api.Extractor):
|
||||
|
|
@ -81,8 +82,8 @@ class ExtractOTIOTrimmingVideo(openpype.api.Extractor):
|
|||
frame_start = otio_range.start_time.value
|
||||
input_fps = otio_range.start_time.rate
|
||||
frame_duration = otio_range.duration.value - 1
|
||||
sec_start = openpype.lib.frames_to_secons(frame_start, input_fps)
|
||||
sec_duration = openpype.lib.frames_to_secons(frame_duration, input_fps)
|
||||
sec_start = frames_to_seconds(frame_start, input_fps)
|
||||
sec_duration = frames_to_seconds(frame_duration, input_fps)
|
||||
|
||||
# form command for rendering gap files
|
||||
command.extend([
|
||||
|
|
|
|||
|
|
@ -401,7 +401,7 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets[ftrack.Library]\": {\"characters[ftrack]\": {}, \"locations[ftrack]\": {}}, \"shots[ftrack.Sequence]\": {\"scripts\": {}, \"editorial[ftrack.Folder]\": {}}}}",
|
||||
"project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets\": {\"characters\": {}, \"locations\": {}}, \"shots\": {}}}",
|
||||
"sync_server": {
|
||||
"enabled": false,
|
||||
"config": {
|
||||
|
|
|
|||
|
|
@ -1418,3 +1418,6 @@ InViewButton, InViewButton:disabled {
|
|||
InViewButton:hover {
|
||||
background: rgba(255, 255, 255, 37);
|
||||
}
|
||||
SupportLabel {
|
||||
color: {color:font-disabled};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -977,7 +977,12 @@ class CreateDialog(QtWidgets.QDialog):
|
|||
elif variant:
|
||||
self.variant_hints_menu.addAction(variant)
|
||||
|
||||
self.variant_input.setText(default_variant or "Main")
|
||||
variant_text = default_variant or "Main"
|
||||
# Make sure subset name is updated to new plugin
|
||||
if variant_text == self.variant_input.text():
|
||||
self._on_variant_change()
|
||||
else:
|
||||
self.variant_input.setText(variant_text)
|
||||
|
||||
def _on_variant_widget_resize(self):
|
||||
self.variant_hints_btn.setFixedHeight(self.variant_input.height())
|
||||
|
|
|
|||
|
|
@ -26,26 +26,110 @@ IS_SEQUENCE_ROLE = QtCore.Qt.UserRole + 7
|
|||
EXT_ROLE = QtCore.Qt.UserRole + 8
|
||||
|
||||
|
||||
class SupportLabel(QtWidgets.QLabel):
|
||||
pass
|
||||
|
||||
|
||||
class DropEmpty(QtWidgets.QWidget):
|
||||
_drop_enabled_text = "Drag & Drop\n(drop files here)"
|
||||
_empty_extensions = "Any file"
|
||||
|
||||
def __init__(self, parent):
|
||||
def __init__(self, single_item, allow_sequences, parent):
|
||||
super(DropEmpty, self).__init__(parent)
|
||||
label_widget = QtWidgets.QLabel(self._drop_enabled_text, self)
|
||||
label_widget.setAlignment(QtCore.Qt.AlignCenter)
|
||||
|
||||
label_widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
|
||||
drop_label_widget = QtWidgets.QLabel("Drag & Drop files here", self)
|
||||
|
||||
layout = QtWidgets.QHBoxLayout(self)
|
||||
items_label_widget = SupportLabel(self)
|
||||
items_label_widget.setWordWrap(True)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addSpacing(10)
|
||||
layout.addSpacing(20)
|
||||
layout.addWidget(
|
||||
label_widget,
|
||||
alignment=QtCore.Qt.AlignCenter
|
||||
drop_label_widget, 0, alignment=QtCore.Qt.AlignCenter
|
||||
)
|
||||
layout.addSpacing(30)
|
||||
layout.addStretch(1)
|
||||
layout.addWidget(
|
||||
items_label_widget, 0, alignment=QtCore.Qt.AlignCenter
|
||||
)
|
||||
layout.addSpacing(10)
|
||||
|
||||
self._label_widget = label_widget
|
||||
for widget in (
|
||||
drop_label_widget,
|
||||
items_label_widget,
|
||||
):
|
||||
widget.setAlignment(QtCore.Qt.AlignCenter)
|
||||
widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
|
||||
|
||||
self._single_item = single_item
|
||||
self._allow_sequences = allow_sequences
|
||||
self._allowed_extensions = set()
|
||||
self._allow_folders = None
|
||||
|
||||
self._drop_label_widget = drop_label_widget
|
||||
self._items_label_widget = items_label_widget
|
||||
|
||||
self.set_allow_folders(False)
|
||||
|
||||
def set_extensions(self, extensions):
|
||||
if extensions:
|
||||
extensions = {
|
||||
ext.replace(".", "")
|
||||
for ext in extensions
|
||||
}
|
||||
if extensions == self._allowed_extensions:
|
||||
return
|
||||
self._allowed_extensions = extensions
|
||||
|
||||
self._update_items_label()
|
||||
|
||||
def set_allow_folders(self, allowed):
|
||||
if self._allow_folders == allowed:
|
||||
return
|
||||
|
||||
self._allow_folders = allowed
|
||||
self._update_items_label()
|
||||
|
||||
def _update_items_label(self):
|
||||
allowed_items = []
|
||||
if self._allow_folders:
|
||||
allowed_items.append("folder")
|
||||
|
||||
if self._allowed_extensions:
|
||||
allowed_items.append("file")
|
||||
if self._allow_sequences:
|
||||
allowed_items.append("sequence")
|
||||
|
||||
if not self._single_item:
|
||||
allowed_items = [item + "s" for item in allowed_items]
|
||||
|
||||
if not allowed_items:
|
||||
self._items_label_widget.setText(
|
||||
"It is not allowed to add anything here!"
|
||||
)
|
||||
return
|
||||
|
||||
items_label = "Multiple "
|
||||
if self._single_item:
|
||||
items_label = "Single "
|
||||
|
||||
if len(allowed_items) == 1:
|
||||
allowed_items_label = allowed_items[0]
|
||||
elif len(allowed_items) == 2:
|
||||
allowed_items_label = " or ".join(allowed_items)
|
||||
else:
|
||||
last_item = allowed_items.pop(-1)
|
||||
new_last_item = " or ".join(last_item, allowed_items.pop(-1))
|
||||
allowed_items.append(new_last_item)
|
||||
allowed_items_label = ", ".join(allowed_items)
|
||||
|
||||
items_label += allowed_items_label
|
||||
if self._allowed_extensions:
|
||||
items_label += " of\n{}".format(
|
||||
", ".join(sorted(self._allowed_extensions))
|
||||
)
|
||||
|
||||
self._items_label_widget.setText(items_label)
|
||||
|
||||
def paintEvent(self, event):
|
||||
super(DropEmpty, self).paintEvent(event)
|
||||
|
|
@ -188,7 +272,12 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel):
|
|||
|
||||
def set_allowed_extensions(self, extensions=None):
|
||||
if extensions is not None:
|
||||
extensions = set(extensions)
|
||||
_extensions = set()
|
||||
for ext in set(extensions):
|
||||
if not ext.startswith("."):
|
||||
ext = ".{}".format(ext)
|
||||
_extensions.add(ext.lower())
|
||||
extensions = _extensions
|
||||
|
||||
if self._allowed_extensions != extensions:
|
||||
self._allowed_extensions = extensions
|
||||
|
|
@ -444,7 +533,7 @@ class FilesWidget(QtWidgets.QFrame):
|
|||
super(FilesWidget, self).__init__(parent)
|
||||
self.setAcceptDrops(True)
|
||||
|
||||
empty_widget = DropEmpty(self)
|
||||
empty_widget = DropEmpty(single_item, allow_sequences, self)
|
||||
|
||||
files_model = FilesModel(single_item, allow_sequences)
|
||||
files_proxy_model = FilesProxyModel()
|
||||
|
|
@ -519,6 +608,8 @@ class FilesWidget(QtWidgets.QFrame):
|
|||
def set_filters(self, folders_allowed, exts_filter):
|
||||
self._files_proxy_model.set_allow_folders(folders_allowed)
|
||||
self._files_proxy_model.set_allowed_extensions(exts_filter)
|
||||
self._empty_widget.set_extensions(exts_filter)
|
||||
self._empty_widget.set_allow_folders(folders_allowed)
|
||||
|
||||
def _on_rows_inserted(self, parent_index, start_row, end_row):
|
||||
for row in range(start_row, end_row + 1):
|
||||
|
|
|
|||
17
poetry.lock
generated
17
poetry.lock
generated
|
|
@ -46,6 +46,19 @@ python-versions = ">=3.5"
|
|||
[package.dependencies]
|
||||
aiohttp = ">=3,<4"
|
||||
|
||||
[[package]]
|
||||
name = "aiohttp-middlewares"
|
||||
version = "2.0.0"
|
||||
description = "Collection of useful middlewares for aiohttp applications."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7,<4.0"
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = ">=3.8.1,<4.0.0"
|
||||
async-timeout = ">=4.0.2,<5.0.0"
|
||||
yarl = ">=1.5.1,<2.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "aiosignal"
|
||||
version = "1.2.0"
|
||||
|
|
@ -1783,6 +1796,10 @@ aiohttp-json-rpc = [
|
|||
{file = "aiohttp-json-rpc-0.13.3.tar.gz", hash = "sha256:6237a104478c22c6ef96c7227a01d6832597b414e4b79a52d85593356a169e99"},
|
||||
{file = "aiohttp_json_rpc-0.13.3-py3-none-any.whl", hash = "sha256:4fbd197aced61bd2df7ae3237ead7d3e08833c2ccf48b8581e1828c95ebee680"},
|
||||
]
|
||||
aiohttp-middlewares = [
|
||||
{file = "aiohttp-middlewares-2.0.0.tar.gz", hash = "sha256:e08ba04dc0e8fe379aa5e9444a68485c275677ee1e18c55cbb855de0c3629502"},
|
||||
{file = "aiohttp_middlewares-2.0.0-py3-none-any.whl", hash = "sha256:29cf1513176b4013844711975ff520e26a8a5d8f9fefbbddb5e91224a86b043e"},
|
||||
]
|
||||
aiosignal = [
|
||||
{file = "aiosignal-1.2.0-py3-none-any.whl", hash = "sha256:26e62109036cd181df6e6ad646f91f0dcfd05fe16d0cb924138ff2ab75d64e3a"},
|
||||
{file = "aiosignal-1.2.0.tar.gz", hash = "sha256:78ed67db6c7b7ced4f98e495e572106d5c432a93e1ddd1bf475e1dc05f5b7df2"},
|
||||
|
|
|
|||
|
|
@ -68,6 +68,7 @@ slack-sdk = "^3.6.0"
|
|||
requests = "^2.25.1"
|
||||
pysftp = "^0.2.9"
|
||||
dropbox = "^11.20.0"
|
||||
aiohttp-middlewares = "^2.0.0"
|
||||
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
|
|
@ -154,4 +155,4 @@ exclude = [
|
|||
ignore = ["website", "docs", ".git"]
|
||||
|
||||
reportMissingImports = true
|
||||
reportMissingTypeStubs = false
|
||||
reportMissingTypeStubs = false
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ $ brew install cmake
|
|||
3) Install [pyenv](https://github.com/pyenv/pyenv):
|
||||
```shell
|
||||
$ brew install pyenv
|
||||
$ echo 'eval "$(pypenv init -)"' >> ~/.zshrc
|
||||
$ echo 'eval "$(pyenv init -)"' >> ~/.zshrc
|
||||
$ pyenv init
|
||||
$ exec "$SHELL"
|
||||
$ PATH=$(pyenv root)/shims:$PATH
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue