Merge remote-tracking branch 'upstream/develop' into substance_integration

This commit is contained in:
Roy Nieterau 2023-03-03 12:44:04 +01:00
commit 6d01fb8e14
463 changed files with 14322 additions and 5656 deletions

View file

@ -34,12 +34,24 @@ class AddSyncSite(load.LoaderPlugin):
return self._sync_server
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Adding {} to representation: {}".format(
data["site_name"], data["_id"]))
family = context["representation"]["context"]["family"]
project_name = data["project_name"]
repre_id = data["_id"]
""""Adds site skeleton information on representation_id
Looks for loaded containers for workfile, adds them site skeleton too
(eg. they should be downloaded too).
Args:
context (dict):
name (str):
namespace (str):
data (dict): expects {"site_name": SITE_NAME_TO_ADD}
"""
# self.log wont propagate
project_name = context["project"]["name"]
repre_doc = context["representation"]
family = repre_doc["context"]["family"]
repre_id = repre_doc["_id"]
site_name = data["site_name"]
print("Adding {} to representation: {}".format(
data["site_name"], repre_id))
self.sync_server.add_site(project_name, repre_id, site_name,
force=True)
@ -52,6 +64,8 @@ class AddSyncSite(load.LoaderPlugin):
)
for link_repre_id in links:
try:
print("Adding {} to linked representation: {}".format(
data["site_name"], link_repre_id))
self.sync_server.add_site(project_name, link_repre_id,
site_name,
force=False)

View file

@ -3,7 +3,10 @@ from openpype.pipeline import load
class RemoveSyncSite(load.LoaderPlugin):
"""Remove sync site and its files on representation"""
"""Remove sync site and its files on representation.
Removes files only on local site!
"""
representations = ["*"]
families = ["*"]
@ -24,13 +27,18 @@ class RemoveSyncSite(load.LoaderPlugin):
return self._sync_server
def load(self, context, name=None, namespace=None, data=None):
self.log.info("Removing {} on representation: {}".format(
data["site_name"], data["_id"]))
self.sync_server.remove_site(data["project_name"],
data["_id"],
data["site_name"],
project_name = context["project"]["name"]
repre_doc = context["representation"]
repre_id = repre_doc["_id"]
site_name = data["site_name"]
print("Removing {} on representation: {}".format(site_name, repre_id))
self.sync_server.remove_site(project_name,
repre_id,
site_name,
True)
self.log.debug("Site added.")
self.log.debug("Site removed.")
def filepath_from_context(self, context):
"""No real file loading"""

View file

@ -0,0 +1,80 @@
import pyblish.api
from openpype.lib.attribute_definitions import (
TextDef,
BoolDef
)
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
from openpype.client.entities import (
get_last_version_by_subset_name,
get_representations
)
class CollectFramesFixDef(
pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin
):
"""Provides text field to insert frame(s) to be rerendered.
Published files of last version of an instance subset are collected into
instance.data["last_version_published_files"]. All these but frames
mentioned in text field will be reused for new version.
"""
order = pyblish.api.CollectorOrder + 0.495
label = "Collect Frames to Fix"
targets = ["local"]
hosts = ["nuke"]
families = ["render", "prerender"]
enabled = True
def process(self, instance):
attribute_values = self.get_attr_values_from_data(instance.data)
frames_to_fix = attribute_values.get("frames_to_fix")
rewrite_version = attribute_values.get("rewrite_version")
if frames_to_fix:
instance.data["frames_to_fix"] = frames_to_fix
subset_name = instance.data["subset"]
asset_name = instance.data["asset"]
project_entity = instance.data["projectEntity"]
project_name = project_entity["name"]
version = get_last_version_by_subset_name(project_name,
subset_name,
asset_name=asset_name)
if not version:
self.log.warning("No last version found, "
"re-render not possible")
return
representations = get_representations(project_name,
version_ids=[version["_id"]])
published_files = []
for repre in representations:
if repre["context"]["family"] not in self.families:
continue
for file_info in repre.get("files"):
published_files.append(file_info["path"])
instance.data["last_version_published_files"] = published_files
self.log.debug("last_version_published_files::{}".format(
instance.data["last_version_published_files"]))
if rewrite_version:
instance.data["version"] = version["name"]
# limits triggering version validator
instance.data.pop("latestVersion")
@classmethod
def get_attribute_defs(cls):
return [
TextDef("frames_to_fix", label="Frames to fix",
placeholder="5,10-15",
regex="[0-9,-]+"),
BoolDef("rewrite_version", label="Rewrite latest version",
default=False),
]

View file

@ -32,7 +32,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
thumbnail_paths_by_instance_id.get(None)
)
project_name = create_context.project_name
project_name = create_context.get_current_project_name()
if project_name:
context.data["projectName"] = project_name
@ -53,11 +53,15 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
context.data.update(create_context.context_data_to_store())
context.data["newPublishing"] = True
# Update context data
for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"):
value = create_context.dbcon.Session.get(key)
if value is not None:
legacy_io.Session[key] = value
os.environ[key] = value
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
for key, value in (
("AVALON_PROJECT", project_name),
("AVALON_ASSET", asset_name),
("AVALON_TASK", task_name)
):
legacy_io.Session[key] = value
os.environ[key] = value
def create_instance(
self,

View file

@ -14,16 +14,19 @@ from openpype.pipeline.editorial import (
range_from_frames,
make_sequence_collection
)
from openpype.pipeline.publish import (
get_publish_template_name
)
class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
"""Get Resources for a subset version"""
label = "Collect OTIO Subset Resources"
order = pyblish.api.CollectorOrder - 0.077
order = pyblish.api.CollectorOrder + 0.491
families = ["clip"]
hosts = ["resolve", "hiero", "flame"]
def process(self, instance):
if "audio" in instance.data["family"]:
@ -35,14 +38,21 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
if not instance.data.get("versionData"):
instance.data["versionData"] = {}
template_name = self.get_template_name(instance)
anatomy = instance.context.data["anatomy"]
publish_template_category = anatomy.templates[template_name]
template = os.path.normpath(publish_template_category["path"])
self.log.debug(
">> template: {}".format(template))
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
# get basic variables
otio_clip = instance.data["otioClip"]
otio_avalable_range = otio_clip.available_range()
media_fps = otio_avalable_range.start_time.rate
available_duration = otio_avalable_range.duration.value
otio_available_range = otio_clip.available_range()
media_fps = otio_available_range.start_time.rate
available_duration = otio_available_range.duration.value
# get available range trimmed with processed retimes
retimed_attributes = get_media_range_with_retimes(
@ -84,6 +94,11 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
frame_start = instance.data["frameStart"]
frame_end = frame_start + (media_out - media_in)
# Fit start /end frame to media in /out
if "{originalBasename}" in template:
frame_start = media_in
frame_end = media_out
# add to version data start and end range data
# for loader plugins to be correctly displayed and loaded
instance.data["versionData"].update({
@ -153,7 +168,6 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
repre = self._create_representation(
frame_start, frame_end, collection=collection)
instance.data["originalBasename"] = collection.format("{head}")
else:
_trim = False
dirname, filename = os.path.split(media_ref.target_url)
@ -168,8 +182,6 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
repre = self._create_representation(
frame_start, frame_end, file=filename, trim=_trim)
instance.data["originalBasename"] = os.path.splitext(filename)[0]
instance.data["originalDirname"] = self.staging_dir
if repre:
@ -225,3 +237,26 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
if kwargs.get("trim") is True:
representation_data["tags"] = ["trim"]
return representation_data
def get_template_name(self, instance):
"""Return anatomy template name to use for integration"""
# Anatomy data is pre-filled by Collectors
context = instance.context
project_name = context.data["projectName"]
# Task can be optional in anatomy data
host_name = context.data["hostName"]
family = instance.data["family"]
anatomy_data = instance.data["anatomyData"]
task_info = anatomy_data.get("task") or {}
return get_publish_template_name(
project_name,
host_name,
family,
task_name=task_info.get("name"),
task_type=task_info.get("type"),
project_settings=context.data["project_settings"],
logger=self.log
)

View file

@ -61,7 +61,8 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
"background",
"effect",
"staticMesh",
"skeletalMesh"
"skeletalMesh",
"xgen"
]
def process(self, instance):

View file

@ -0,0 +1,368 @@
import os
import copy
import clique
import pyblish.api
from openpype.pipeline import publish
from openpype.lib import (
is_oiio_supported,
)
from openpype.lib.transcoding import (
convert_colorspace,
get_transcode_temp_directory,
)
from openpype.lib.profiles_filtering import filter_profiles
class ExtractOIIOTranscode(publish.Extractor):
"""
Extractor to convert colors from one colorspace to different.
Expects "colorspaceData" on representation. This dictionary is collected
previously and denotes that representation files should be converted.
This dict contains source colorspace information, collected by hosts.
Target colorspace is selected by profiles in the Settings, based on:
- families
- host
- task types
- task names
- subset names
Can produce one or more representations (with different extensions) based
on output definition in format:
"output_name: {
"extension": "png",
"colorspace": "ACES - ACEScg",
"display": "",
"view": "",
"tags": [],
"custom_tags": []
}
If 'extension' is empty original representation extension is used.
'output_name' will be used as name of new representation. In case of value
'passthrough' name of original representation will be used.
'colorspace' denotes target colorspace to be transcoded into. Could be
empty if transcoding should be only into display and viewer colorspace.
(In that case both 'display' and 'view' must be filled.)
"""
label = "Transcode color spaces"
order = pyblish.api.ExtractorOrder + 0.019
optional = True
# Supported extensions
supported_exts = ["exr", "jpg", "jpeg", "png", "dpx"]
# Configurable by Settings
profiles = None
options = None
def process(self, instance):
if not self.profiles:
self.log.debug("No profiles present for color transcode")
return
if "representations" not in instance.data:
self.log.debug("No representations, skipping.")
return
if not is_oiio_supported():
self.log.warning("OIIO not supported, no transcoding possible.")
return
profile = self._get_profile(instance)
if not profile:
return
new_representations = []
repres = instance.data["representations"]
for idx, repre in enumerate(list(repres)):
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
if not self._repre_is_valid(repre):
continue
added_representations = False
added_review = False
colorspace_data = repre["colorspaceData"]
source_colorspace = colorspace_data["colorspace"]
config_path = colorspace_data.get("config", {}).get("path")
if not config_path or not os.path.exists(config_path):
self.log.warning("Config file doesn't exist, skipping")
continue
for output_name, output_def in profile.get("outputs", {}).items():
new_repre = copy.deepcopy(repre)
original_staging_dir = new_repre["stagingDir"]
new_staging_dir = get_transcode_temp_directory()
new_repre["stagingDir"] = new_staging_dir
if isinstance(new_repre["files"], list):
files_to_convert = copy.deepcopy(new_repre["files"])
else:
files_to_convert = [new_repre["files"]]
output_extension = output_def["extension"]
output_extension = output_extension.replace('.', '')
self._rename_in_representation(new_repre,
files_to_convert,
output_name,
output_extension)
transcoding_type = output_def["transcoding_type"]
target_colorspace = view = display = None
if transcoding_type == "colorspace":
target_colorspace = (output_def["colorspace"] or
colorspace_data.get("colorspace"))
else:
view = output_def["view"] or colorspace_data.get("view")
display = (output_def["display"] or
colorspace_data.get("display"))
# both could be already collected by DCC,
# but could be overwritten when transcoding
if view:
new_repre["colorspaceData"]["view"] = view
if display:
new_repre["colorspaceData"]["display"] = display
if target_colorspace:
new_repre["colorspaceData"]["colorspace"] = \
target_colorspace
additional_command_args = (output_def["oiiotool_args"]
["additional_command_args"])
files_to_convert = self._translate_to_sequence(
files_to_convert)
for file_name in files_to_convert:
input_path = os.path.join(original_staging_dir,
file_name)
output_path = self._get_output_file_path(input_path,
new_staging_dir,
output_extension)
convert_colorspace(
input_path,
output_path,
config_path,
source_colorspace,
target_colorspace,
view,
display,
additional_command_args,
self.log
)
# cleanup temporary transcoded files
for file_name in new_repre["files"]:
transcoded_file_path = os.path.join(new_staging_dir,
file_name)
instance.context.data["cleanupFullPaths"].append(
transcoded_file_path)
custom_tags = output_def.get("custom_tags")
if custom_tags:
if new_repre.get("custom_tags") is None:
new_repre["custom_tags"] = []
new_repre["custom_tags"].extend(custom_tags)
# Add additional tags from output definition to representation
if new_repre.get("tags") is None:
new_repre["tags"] = []
for tag in output_def["tags"]:
if tag not in new_repre["tags"]:
new_repre["tags"].append(tag)
if tag == "review":
added_review = True
new_representations.append(new_repre)
added_representations = True
if added_representations:
self._mark_original_repre_for_deletion(repre, profile,
added_review)
for repre in tuple(instance.data["representations"]):
tags = repre.get("tags") or []
if "delete" in tags and "thumbnail" not in tags:
instance.data["representations"].remove(repre)
instance.data["representations"].extend(new_representations)
def _rename_in_representation(self, new_repre, files_to_convert,
output_name, output_extension):
"""Replace old extension with new one everywhere in representation.
Args:
new_repre (dict)
files_to_convert (list): of filenames from repre["files"],
standardized to always list
output_name (str): key of output definition from Settings,
if "<passthrough>" token used, keep original repre name
output_extension (str): extension from output definition
"""
if output_name != "passthrough":
new_repre["name"] = output_name
if not output_extension:
return
new_repre["ext"] = output_extension
renamed_files = []
for file_name in files_to_convert:
file_name, _ = os.path.splitext(file_name)
file_name = '{}.{}'.format(file_name,
output_extension)
renamed_files.append(file_name)
new_repre["files"] = renamed_files
def _rename_in_representation(self, new_repre, files_to_convert,
output_name, output_extension):
"""Replace old extension with new one everywhere in representation.
Args:
new_repre (dict)
files_to_convert (list): of filenames from repre["files"],
standardized to always list
output_name (str): key of output definition from Settings,
if "<passthrough>" token used, keep original repre name
output_extension (str): extension from output definition
"""
if output_name != "passthrough":
new_repre["name"] = output_name
if not output_extension:
return
new_repre["ext"] = output_extension
renamed_files = []
for file_name in files_to_convert:
file_name, _ = os.path.splitext(file_name)
file_name = '{}.{}'.format(file_name,
output_extension)
renamed_files.append(file_name)
new_repre["files"] = renamed_files
def _translate_to_sequence(self, files_to_convert):
"""Returns original list or list with filename formatted in single
sequence format.
Uses clique to find frame sequence, in this case it merges all frames
into sequence format (FRAMESTART-FRAMEEND#) and returns it.
If sequence not found, it returns original list
Args:
files_to_convert (list): list of file names
Returns:
(list) of [file.1001-1010#.exr] or [fileA.exr, fileB.exr]
"""
pattern = [clique.PATTERNS["frames"]]
collections, remainder = clique.assemble(
files_to_convert, patterns=pattern,
assume_padded_when_ambiguous=True)
if collections:
if len(collections) > 1:
raise ValueError(
"Too many collections {}".format(collections))
collection = collections[0]
frames = list(collection.indexes)
frame_str = "{}-{}#".format(frames[0], frames[-1])
file_name = "{}{}{}".format(collection.head, frame_str,
collection.tail)
files_to_convert = [file_name]
return files_to_convert
def _get_output_file_path(self, input_path, output_dir,
output_extension):
"""Create output file name path."""
file_name = os.path.basename(input_path)
file_name, input_extension = os.path.splitext(file_name)
if not output_extension:
output_extension = input_extension.replace(".", "")
new_file_name = '{}.{}'.format(file_name,
output_extension)
return os.path.join(output_dir, new_file_name)
def _get_profile(self, instance):
"""Returns profile if and how repre should be color transcoded."""
host_name = instance.context.data["hostName"]
family = instance.data["family"]
task_data = instance.data["anatomyData"].get("task", {})
task_name = task_data.get("name")
task_type = task_data.get("type")
subset = instance.data["subset"]
filtering_criteria = {
"hosts": host_name,
"families": family,
"task_names": task_name,
"task_types": task_type,
"subsets": subset
}
profile = filter_profiles(self.profiles, filtering_criteria,
logger=self.log)
if not profile:
self.log.info((
"Skipped instance. None of profiles in presets are for"
" Host: \"{}\" | Families: \"{}\" | Task \"{}\""
" | Task type \"{}\" | Subset \"{}\" "
).format(host_name, family, task_name, task_type, subset))
self.log.debug("profile: {}".format(profile))
return profile
def _repre_is_valid(self, repre):
"""Validation if representation should be processed.
Args:
repre (dict): Representation which should be checked.
Returns:
bool: False if can't be processed else True.
"""
if repre.get("ext") not in self.supported_exts:
self.log.debug((
"Representation '{}' of unsupported extension. Skipped."
).format(repre["name"]))
return False
if not repre.get("files"):
self.log.debug((
"Representation '{}' have empty files. Skipped."
).format(repre["name"]))
return False
if not repre.get("colorspaceData"):
self.log.debug("Representation '{}' has no colorspace data. "
"Skipped.")
return False
return True
def _mark_original_repre_for_deletion(self, repre, profile, added_review):
"""If new transcoded representation created, delete old."""
if not repre.get("tags"):
repre["tags"] = []
delete_original = profile["delete_original"]
if delete_original:
if "delete" not in repre["tags"]:
repre["tags"].append("delete")
if added_review and "review" in repre["tags"]:
repre["tags"].remove("review")

View file

@ -169,7 +169,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
"Skipped representation. All output definitions from"
" selected profile does not match to representation's"
" custom tags. \"{}\""
).format(str(tags)))
).format(str(custom_tags)))
continue
outputs_per_representations.append((repre, outputs))

View file

@ -506,6 +506,43 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
return version_doc
def _validate_repre_files(self, files, is_sequence_representation):
"""Validate representation files before transfer preparation.
Check if files contain only filenames instead of full paths and check
if sequence don't contain more than one sequence or has remainders.
Args:
files (Union[str, List[str]]): Files from representation.
is_sequence_representation (bool): Files are for sequence.
Raises:
KnownPublishError: If validations don't pass.
"""
if not files:
return
if not is_sequence_representation:
files = [files]
if any(os.path.isabs(fname) for fname in files):
raise KnownPublishError("Given file names contain full paths")
if not is_sequence_representation:
return
src_collections, remainders = clique.assemble(files)
if len(files) < 2 or len(src_collections) != 1 or remainders:
raise KnownPublishError((
"Files of representation does not contain proper"
" sequence files.\nCollected collections: {}"
"\nCollected remainders: {}"
).format(
", ".join([str(col) for col in src_collections]),
", ".join([str(rem) for rem in remainders])
))
def prepare_representation(self, repre,
template_name,
existing_repres_by_name,
@ -534,6 +571,9 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
template_data["representation"] = repre["name"]
template_data["ext"] = repre["ext"]
# allow overwriting existing version
template_data["version"] = version["name"]
# add template data for colorspaceData
if repre.get("colorspaceData"):
colorspace = repre["colorspaceData"]["colorspace"]
@ -584,7 +624,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
is_udim = bool(repre.get("udim"))
# handle publish in place
if "originalDirname" in template:
if "{originalDirname}" in template:
# store as originalDirname only original value without project root
# if instance collected originalDirname is present, it should be
# used for all represe
@ -603,24 +643,64 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
template_data["originalDirname"] = without_root
is_sequence_representation = isinstance(files, (list, tuple))
if is_sequence_representation:
# Collection of files (sequence)
if any(os.path.isabs(fname) for fname in files):
raise KnownPublishError("Given file names contain full paths")
self._validate_repre_files(files, is_sequence_representation)
# Output variables of conditions below:
# - transfers (List[Tuple[str, str]]): src -> dst filepaths to copy
# - repre_context (Dict[str, Any]): context data used to fill template
# - template_data (Dict[str, Any]): source data used to fill template
# - to add required data to 'repre_context' not used for
# formatting
# - anatomy_filled (Dict[str, Any]): filled anatomy of last file
# - to fill 'publishDir' on instance.data -> not ideal
# Treat template with 'orignalBasename' in special way
if "{originalBasename}" in template:
# Remove 'frame' from template data
template_data.pop("frame", None)
# Find out first frame string value
first_index_padded = None
if not is_udim and is_sequence_representation:
col = clique.assemble(files)[0][0]
sorted_frames = tuple(sorted(col.indexes))
# First frame used for end value
first_frame = sorted_frames[0]
# Get last frame for padding
last_frame = sorted_frames[-1]
# Use padding from collection of length of last frame as string
padding = max(col.padding, len(str(last_frame)))
first_index_padded = get_frame_padded(
frame=first_frame,
padding=padding
)
# Convert files to list for single file as remaining part is only
# transfers creation (iteration over files)
if not is_sequence_representation:
files = [files]
repre_context = None
transfers = []
for src_file_name in files:
template_data["originalBasename"], _ = os.path.splitext(
src_file_name)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled[template_name]["path"]
src = os.path.join(stagingdir, src_file_name)
transfers.append((src, dst))
if repre_context is None:
repre_context = dst.used_values
if not is_udim and first_index_padded is not None:
repre_context["frame"] = first_index_padded
elif is_sequence_representation:
# Collection of files (sequence)
src_collections, remainders = clique.assemble(files)
if len(files) < 2 or len(src_collections) != 1 or remainders:
raise KnownPublishError((
"Files of representation does not contain proper"
" sequence files.\nCollected collections: {}"
"\nCollected remainders: {}"
).format(
", ".join([str(col) for col in src_collections]),
", ".join([str(rem) for rem in remainders])
))
src_collection = src_collections[0]
template_data["originalBasename"] = src_collection.head[:-1]
destination_indexes = list(src_collection.indexes)
# Use last frame for minimum padding
# - that should cover both 'udim' and 'frame' minimum padding
@ -642,11 +722,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# In case source are published in place we need to
# skip renumbering
repre_frame_start = repre.get("frameStart")
if (
"originalBasename" not in template
and repre_frame_start is not None
):
index_frame_start = int(repre["frameStart"])
if repre_frame_start is not None:
index_frame_start = int(repre_frame_start)
# Shift destination sequence to the start frame
destination_indexes = [
index_frame_start + idx
@ -702,15 +779,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
else:
# Single file
fname = files
if os.path.isabs(fname):
self.log.error(
"Filename in representation is filepath {}".format(fname)
)
raise KnownPublishError(
"This is a bug. Representation file name is full path"
)
template_data["originalBasename"], _ = os.path.splitext(fname)
# Manage anatomy template data
template_data.pop("frame", None)
if is_udim:
@ -722,7 +790,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
dst = os.path.normpath(template_filled)
# Single file transfer
src = os.path.join(stagingdir, fname)
src = os.path.join(stagingdir, files)
transfers = [(src, dst)]
# todo: Are we sure the assumption each representation

View file

@ -386,6 +386,25 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
repre["_id"] = old_repre["_id"]
update_data = prepare_representation_update_data(
old_repre, repre)
# Keep previously synchronized sites up-to-date
# by comparing old and new sites and adding old sites
# if missing in new ones
old_repre_files_sites = [
f.get("sites", []) for f in old_repre.get("files", [])
]
for i, file in enumerate(repre.get("files", [])):
repre_sites_names = {
s["name"] for s in file.get("sites", [])
}
for site in old_repre_files_sites[i]:
if site["name"] not in repre_sites_names:
# Pop the date to tag for sync
site.pop("created_dt", None)
file["sites"].append(site)
update_data["files"][i] = file
op_session.update_entity(
project_name,
old_repre["type"],