Merge branch 'develop' into feature/collect_audio_speed_enhancement

This commit is contained in:
Jakub Trllo 2022-12-06 09:58:44 +01:00
commit 2c36e10f6c
205 changed files with 5581 additions and 2134 deletions

View file

@ -188,7 +188,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
for subset_doc in subset_docs:
subset_id = subset_doc["_id"]
last_version_doc = last_version_docs_by_subset_id.get(subset_id)
if last_version_docs_by_subset_id is None:
if last_version_doc is None:
continue
asset_id = subset_doc["parent"]

View file

@ -1,19 +1,123 @@
"""
Requires:
None
Provides:
context -> comment (str)
"""Collect comment and add option to enter comment per instance.
Combination of plugins. One define optional input for instances in Publisher
UI (CollectInstanceCommentDef) and second cares that each instance during
collection has available "comment" key in data (CollectComment).
Plugin 'CollectInstanceCommentDef' define "comment" attribute which won't be
filled with any value if instance does not match families filter or when
plugin is disabled.
Plugin 'CollectComment' makes sure that each instance in context has
available "comment" key in data which can be set to 'str' or 'None' if is not
set.
- In case instance already has filled comment the plugin's logic is skipped
- The comment is always set and value should be always 'str' even if is empty
Why are separated:
- 'CollectInstanceCommentDef' can have specific settings to show comment
attribute only to defined families in publisher UI
- 'CollectComment' will run all the time
Todos:
The comment per instance is not sent via farm.
"""
import pyblish.api
from openpype.lib.attribute_definitions import TextDef
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
class CollectComment(pyblish.api.ContextPlugin):
"""This plug-ins displays the comment dialog box per default"""
class CollectInstanceCommentDef(
pyblish.api.ContextPlugin,
OpenPypePyblishPluginMixin
):
label = "Comment per instance"
targets = ["local"]
# Disable plugin by default
families = []
enabled = False
label = "Collect Comment"
order = pyblish.api.CollectorOrder
def process(self, instance):
pass
@classmethod
def apply_settings(cls, project_setting, _):
plugin_settings = project_setting["global"]["publish"].get(
"collect_comment_per_instance"
)
if not plugin_settings:
return
if plugin_settings.get("enabled") is not None:
cls.enabled = plugin_settings["enabled"]
if plugin_settings.get("families") is not None:
cls.families = plugin_settings["families"]
@classmethod
def get_attribute_defs(cls):
return [
TextDef("comment", label="Comment")
]
class CollectComment(
pyblish.api.ContextPlugin,
OpenPypePyblishPluginMixin
):
"""Collect comment per each instance.
Plugin makes sure each instance to publish has set "comment" in data so any
further plugin can use it directly.
"""
label = "Collect Instance Comment"
order = pyblish.api.CollectorOrder + 0.49
def process(self, context):
comment = (context.data.get("comment") or "").strip()
context.data["comment"] = comment
context_comment = self.cleanup_comment(context.data.get("comment"))
# Set it back
context.data["comment"] = context_comment
for instance in context:
instance_label = str(instance)
# Check if comment is already set
instance_comment = self.cleanup_comment(
instance.data.get("comment"))
# If comment on instance is not set then look for attributes
if not instance_comment:
attr_values = self.get_attr_values_from_data_for_plugin(
CollectInstanceCommentDef, instance.data
)
instance_comment = self.cleanup_comment(
attr_values.get("comment")
)
# Use context comment if instance has all options of comment
# empty
if not instance_comment:
instance_comment = context_comment
instance.data["comment"] = instance_comment
if instance_comment:
msg_end = " has comment set to: \"{}\"".format(
instance_comment)
else:
msg_end = " does not have set comment"
self.log.debug("Instance {} {}".format(instance_label, msg_end))
def cleanup_comment(self, comment):
"""Cleanup comment value.
Args:
comment (Union[str, None]): Comment value from data.
Returns:
str: Cleaned comment which is stripped or empty string if input
was 'None'.
"""
if comment:
return comment.strip()
return ""

View file

@ -468,7 +468,7 @@ class ExtractBurnin(publish.Extractor):
burnin_data.update({
"version": int(version),
"comment": context.data.get("comment") or ""
"comment": instance.data["comment"]
})
intent_label = context.data.get("intent") or ""

View file

@ -1,9 +1,8 @@
import collections
from copy import deepcopy
import pyblish.api
from openpype.client import (
get_project,
get_asset_by_id,
get_asset_by_name,
get_assets,
get_archived_assets
)
from openpype.pipeline import legacy_io
@ -17,7 +16,6 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
families = ["clip", "shot"]
def process(self, context):
# processing starts here
if "hierarchyContext" not in context.data:
self.log.info("skipping IntegrateHierarchyToAvalon")
return
@ -25,161 +23,236 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
if not legacy_io.Session:
legacy_io.install()
project_name = legacy_io.active_project()
hierarchy_context = self._get_active_assets(context)
self.log.debug("__ hierarchy_context: {}".format(hierarchy_context))
self.project = None
self.import_to_avalon(context, project_name, hierarchy_context)
project_name = context.data["projectName"]
asset_names = self.extract_asset_names(hierarchy_context)
asset_docs_by_name = {}
for asset_doc in get_assets(project_name, asset_names=asset_names):
name = asset_doc["name"]
asset_docs_by_name[name] = asset_doc
archived_asset_docs_by_name = collections.defaultdict(list)
for asset_doc in get_archived_assets(
project_name, asset_names=asset_names
):
name = asset_doc["name"]
archived_asset_docs_by_name[name].append(asset_doc)
project_doc = None
hierarchy_queue = collections.deque()
for name, data in hierarchy_context.items():
hierarchy_queue.append((name, data, None))
while hierarchy_queue:
item = hierarchy_queue.popleft()
name, entity_data, parent = item
def import_to_avalon(
self,
context,
project_name,
input_data,
parent=None,
):
for name in input_data:
self.log.info("input_data[name]: {}".format(input_data[name]))
entity_data = input_data[name]
entity_type = entity_data["entity_type"]
data = {}
data["entityType"] = entity_type
# Custom attributes.
for k, val in entity_data.get("custom_attributes", {}).items():
data[k] = val
if entity_type.lower() != "project":
data["inputs"] = entity_data.get("inputs", [])
# Tasks.
tasks = entity_data.get("tasks", {})
if tasks is not None or len(tasks) > 0:
data["tasks"] = tasks
parents = []
visualParent = None
# do not store project"s id as visualParent
if self.project is not None:
if self.project["_id"] != parent["_id"]:
visualParent = parent["_id"]
parents.extend(
parent.get("data", {}).get("parents", [])
)
parents.append(parent["name"])
data["visualParent"] = visualParent
data["parents"] = parents
update_data = True
# Process project
if entity_type.lower() == "project":
entity = get_project(project_name)
# TODO: should be in validator?
assert (entity is not None), "Did not find project in DB"
# get data from already existing project
cur_entity_data = entity.get("data") or {}
cur_entity_data.update(data)
data = cur_entity_data
self.project = entity
# Raise error if project or parent are not set
elif self.project is None or parent is None:
raise AssertionError(
"Collected items are not in right order!"
new_parent = project_doc = self.sync_project(
context,
entity_data
)
# Else process assset
else:
entity = get_asset_by_name(project_name, name)
if entity:
# Do not override data, only update
cur_entity_data = entity.get("data") or {}
entity_tasks = cur_entity_data["tasks"] or {}
# create tasks as dict by default
if not entity_tasks:
cur_entity_data["tasks"] = entity_tasks
new_tasks = data.pop("tasks", {})
if "tasks" not in cur_entity_data and not new_tasks:
continue
for task_name in new_tasks:
if task_name in entity_tasks.keys():
continue
cur_entity_data["tasks"][task_name] = new_tasks[
task_name]
cur_entity_data.update(data)
data = cur_entity_data
else:
# Skip updating data
update_data = False
archived_entities = get_archived_assets(
project_name,
asset_names=[name]
)
unarchive_entity = None
for archived_entity in archived_entities:
archived_parents = (
archived_entity
.get("data", {})
.get("parents")
)
if data["parents"] == archived_parents:
unarchive_entity = archived_entity
break
if unarchive_entity is None:
# Create entity if doesn"t exist
entity = self.create_avalon_asset(
name, data
)
else:
# Unarchive if entity was archived
entity = self.unarchive_entity(unarchive_entity, data)
new_parent = self.sync_asset(
name,
entity_data,
parent,
project_doc,
asset_docs_by_name,
archived_asset_docs_by_name
)
# make sure all relative instances have correct avalon data
self._set_avalon_data_to_relative_instances(
context,
project_name,
entity
new_parent
)
if update_data:
# Update entity data with input data
legacy_io.update_many(
{"_id": entity["_id"]},
{"$set": {"data": data}}
children = entity_data.get("childs")
if not children:
continue
for child_name, child_data in children.items():
hierarchy_queue.append((child_name, child_data, new_parent))
def extract_asset_names(self, hierarchy_context):
"""Extract all possible asset names from hierarchy context.
Args:
hierarchy_context (Dict[str, Any]): Nested hierarchy structure.
Returns:
Set[str]: All asset names from the hierarchy structure.
"""
hierarchy_queue = collections.deque()
for name, data in hierarchy_context.items():
hierarchy_queue.append((name, data))
asset_names = set()
while hierarchy_queue:
item = hierarchy_queue.popleft()
name, data = item
if data["entity_type"].lower() != "project":
asset_names.add(name)
children = data.get("childs")
if children:
for child_name, child_data in children.items():
hierarchy_queue.append((child_name, child_data))
return asset_names
def sync_project(self, context, entity_data):
project_doc = context.data["projectEntity"]
if "data" not in project_doc:
project_doc["data"] = {}
current_data = project_doc["data"]
changes = {}
entity_type = entity_data["entity_type"]
if current_data.get("entityType") != entity_type:
changes["entityType"] = entity_type
# Custom attributes.
attributes = entity_data.get("custom_attributes") or {}
for key, value in attributes.items():
if key not in current_data or current_data[key] != value:
update_key = "data.{}".format(key)
changes[update_key] = value
current_data[key] = value
if changes:
# Update entity data with input data
legacy_io.update_one(
{"_id": project_doc["_id"]},
{"$set": changes}
)
return project_doc
def sync_asset(
self,
asset_name,
entity_data,
parent,
project,
asset_docs_by_name,
archived_asset_docs_by_name
):
# Prepare data for new asset or for update comparison
data = {
"entityType": entity_data["entity_type"]
}
# Custom attributes.
attributes = entity_data.get("custom_attributes") or {}
for key, value in attributes.items():
data[key] = value
data["inputs"] = entity_data.get("inputs") or []
# Parents and visual parent are empty if parent is project
parents = []
parent_id = None
if project["_id"] != parent["_id"]:
parent_id = parent["_id"]
# Use parent's parents as source value
parents.extend(parent["data"]["parents"])
# Add parent's name to parents
parents.append(parent["name"])
data["visualParent"] = parent_id
data["parents"] = parents
asset_doc = asset_docs_by_name.get(asset_name)
# --- Create/Unarchive asset and end ---
if not asset_doc:
# Just use tasks from entity data as they are
# - this is different from the case when tasks are updated
data["tasks"] = entity_data.get("tasks") or {}
archived_asset_doc = None
for archived_entity in archived_asset_docs_by_name[asset_name]:
archived_parents = (
archived_entity
.get("data", {})
.get("parents")
)
if data["parents"] == archived_parents:
archived_asset_doc = archived_entity
break
# Create entity if doesn't exist
if archived_asset_doc is None:
return self.create_avalon_asset(
asset_name, data, project
)
if "childs" in entity_data:
self.import_to_avalon(
context, project_name, entity_data["childs"], entity
)
return self.unarchive_entity(
archived_asset_doc, data, project
)
def unarchive_entity(self, entity, data):
# --- Update existing asset ---
# Make sure current entity has "data" key
if "data" not in asset_doc:
asset_doc["data"] = {}
cur_entity_data = asset_doc["data"]
cur_entity_tasks = cur_entity_data.get("tasks") or {}
# Tasks
data["tasks"] = {}
new_tasks = entity_data.get("tasks") or {}
for task_name, task_info in new_tasks.items():
task_info = deepcopy(task_info)
if task_name in cur_entity_tasks:
src_task_info = deepcopy(cur_entity_tasks[task_name])
src_task_info.update(task_info)
task_info = src_task_info
data["tasks"][task_name] = task_info
changes = {}
for key, value in data.items():
if key not in cur_entity_data or value != cur_entity_data[key]:
update_key = "data.{}".format(key)
changes[update_key] = value
cur_entity_data[key] = value
# Update asset in database if necessary
if changes:
# Update entity data with input data
legacy_io.update_one(
{"_id": asset_doc["_id"]},
{"$set": changes}
)
return asset_doc
def unarchive_entity(self, archived_doc, data, project):
# Unarchived asset should not use same data
new_entity = {
"_id": entity["_id"],
asset_doc = {
"_id": archived_doc["_id"],
"schema": "openpype:asset-3.0",
"name": entity["name"],
"parent": self.project["_id"],
"name": archived_doc["name"],
"parent": project["_id"],
"type": "asset",
"data": data
}
legacy_io.replace_one(
{"_id": entity["_id"]},
new_entity
{"_id": archived_doc["_id"]},
asset_doc
)
return new_entity
return asset_doc
def create_avalon_asset(self, name, data):
def create_avalon_asset(self, name, data, project):
asset_doc = {
"schema": "openpype:asset-3.0",
"name": name,
"parent": self.project["_id"],
"parent": project["_id"],
"type": "asset",
"data": data
}
@ -194,27 +267,27 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
project_name,
asset_doc
):
asset_name = asset_doc["name"]
new_parents = asset_doc["data"]["parents"]
hierarchy = "/".join(new_parents)
parent_name = project_name
if new_parents:
parent_name = new_parents[-1]
for instance in context:
# Skip instance if has filled asset entity
if instance.data.get("assetEntity"):
# Skip if instance asset does not match
instance_asset_name = instance.data.get("asset")
if asset_name != instance_asset_name:
continue
asset_name = asset_doc["name"]
inst_asset_name = instance.data["asset"]
if asset_name == inst_asset_name:
instance.data["assetEntity"] = asset_doc
instance_asset_doc = instance.data.get("assetEntity")
# Update asset entity with new possible changes of asset document
instance.data["assetEntity"] = asset_doc
# get parenting data
parents = asset_doc["data"].get("parents") or list()
# equire only relative parent
parent_name = project_name
if parents:
parent_name = parents[-1]
# update avalon data on instance
# Update anatomy data if asset was not set on instance
if not instance_asset_doc:
instance.data["anatomyData"].update({
"hierarchy": "/".join(parents),
"hierarchy": hierarchy,
"task": {},
"parent": parent_name
})
@ -241,7 +314,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
hierarchy_context = context.data["hierarchyContext"]
active_assets = []
# filter only the active publishing insatnces
# filter only the active publishing instances
for instance in context:
if instance.data.get("publish") is False:
continue

View file

@ -179,7 +179,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
single_frame_image = False
if len(input_filepaths) == 1:
ext = os.path.splitext(input_filepaths[0])[-1]
single_frame_image = ext in IMAGE_EXTENSIONS
single_frame_image = ext.lower() in IMAGE_EXTENSIONS
filtered_defs = []
for output_def in output_defs:
@ -501,7 +501,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
first_sequence_frame += handle_start
ext = os.path.splitext(repre["files"][0])[1].replace(".", "")
if ext in self.alpha_exts:
if ext.lower() in self.alpha_exts:
input_allow_bg = True
return {
@ -598,8 +598,12 @@ class ExtractReview(pyblish.api.InstancePlugin):
if temp_data["input_is_sequence"]:
# Set start frame of input sequence (just frame in filename)
# - definition of input filepath
# - add handle start if output should be without handles
start_number = temp_data["first_sequence_frame"]
if temp_data["without_handles"] and temp_data["handles_are_set"]:
start_number += temp_data["handle_start"]
ffmpeg_input_args.extend([
"-start_number", str(temp_data["first_sequence_frame"])
"-start_number", str(start_number)
])
# TODO add fps mapping `{fps: fraction}` ?
@ -609,49 +613,50 @@ class ExtractReview(pyblish.api.InstancePlugin):
# "23.976": "24000/1001"
# }
# Add framerate to input when input is sequence
ffmpeg_input_args.append(
"-framerate {}".format(temp_data["fps"])
)
ffmpeg_input_args.extend([
"-framerate", str(temp_data["fps"])
])
# Add duration of an input sequence if output is video
if not temp_data["output_is_sequence"]:
ffmpeg_input_args.extend([
"-to", "{:0.10f}".format(duration_seconds)
])
if temp_data["output_is_sequence"]:
# Set start frame of output sequence (just frame in filename)
# - this is definition of an output
ffmpeg_output_args.append(
"-start_number {}".format(temp_data["output_frame_start"])
)
ffmpeg_output_args.extend([
"-start_number", str(temp_data["output_frame_start"])
])
# Change output's duration and start point if should not contain
# handles
start_sec = 0
if temp_data["without_handles"] and temp_data["handles_are_set"]:
# Set start time without handles
# - check if handle_start is bigger than 0 to avoid zero division
if temp_data["handle_start"] > 0:
start_sec = float(temp_data["handle_start"]) / temp_data["fps"]
ffmpeg_input_args.append("-ss {:0.10f}".format(start_sec))
# Set output duration in seconds
ffmpeg_output_args.extend([
"-t", "{:0.10}".format(duration_seconds)
])
# Set output duration inn seconds
ffmpeg_output_args.append("-t {:0.10}".format(duration_seconds))
# Add -ss (start offset in seconds) if input is not sequence
if not temp_data["input_is_sequence"]:
start_sec = float(temp_data["handle_start"]) / temp_data["fps"]
# Set start time without handles
# - Skip if start sec is 0.0
if start_sec > 0.0:
ffmpeg_input_args.extend([
"-ss", "{:0.10f}".format(start_sec)
])
# Set frame range of output when input or output is sequence
elif temp_data["output_is_sequence"]:
ffmpeg_output_args.append("-frames:v {}".format(output_frames_len))
# Add duration of an input sequence if output is video
if (
temp_data["input_is_sequence"]
and not temp_data["output_is_sequence"]
):
ffmpeg_input_args.append("-to {:0.10f}".format(
duration_seconds + start_sec
))
ffmpeg_output_args.extend([
"-frames:v", str(output_frames_len)
])
# Add video/image input path
ffmpeg_input_args.append(
"-i {}".format(
path_to_subprocess_arg(temp_data["full_input_path"])
)
)
ffmpeg_input_args.extend([
"-i", path_to_subprocess_arg(temp_data["full_input_path"])
])
# Add audio arguments if there are any. Skipped when output are images.
if not temp_data["output_ext_is_image"] and temp_data["with_audio"]:
@ -934,6 +939,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
if output_ext.startswith("."):
output_ext = output_ext[1:]
output_ext = output_ext.lower()
# Store extension to representation
new_repre["ext"] = output_ext

View file

@ -129,7 +129,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"mvUsd",
"mvUsdComposition",
"mvUsdOverride",
"simpleUnrealTexture"
"simpleUnrealTexture",
"online"
]
default_template_name = "publish"
@ -290,6 +291,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
instance)
for src, dst in prepared["transfers"]:
if src == dst:
continue
# todo: add support for hardlink transfers
file_transactions.add(src, dst)
@ -768,7 +772,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"comment": instance.data["comment"],
"machine": context.data.get("machine"),
"fps": instance.data.get("fps", context.data.get("fps"))
}

View file

@ -968,7 +968,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"comment": instance.data["comment"],
"machine": context.data.get("machine"),
"fps": context.data.get(
"fps", instance.data.get("fps")