Merge remote-tracking branch 'origin/develop' into feature/PYPE-570-maya-renderlayer-creator

This commit is contained in:
Ondrej Samohel 2020-02-18 20:58:45 +01:00
commit 0253722254
51 changed files with 2854 additions and 2250 deletions

View file

@ -12,13 +12,32 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
families = ["ftrack"]
optional = True
# Can be set in presets:
# - Allows only `intent` and `comment` keys
note_with_intent_template = "{intent}: {comment}"
# - note label must exist in Ftrack
note_labels = []
def process(self, instance):
comment = (instance.context.data.get("comment") or "").strip()
if not comment:
self.log.info("Comment is not set.")
return
self.log.debug("Comment is set to {}".format(comment))
self.log.debug("Comment is set to `{}`".format(comment))
intent = instance.context.data.get("intent")
if intent:
msg = "Intent is set to `{}` and was added to comment.".format(
intent
)
comment = self.note_with_intent_template.format(**{
"intent": intent,
"comment": comment
})
else:
msg = "Intent is not set."
self.log.debug(msg)
asset_versions_key = "ftrackIntegratedAssetVersions"
asset_versions = instance.data.get(asset_versions_key)
@ -37,8 +56,22 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
)
)
labels = []
if self.note_labels:
all_labels = session.query("NoteLabel").all()
labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels}
for _label in self.note_labels:
label = labels_by_low_name.get(_label.lower())
if not label:
self.log.warning(
"Note Label `{}` was not found.".format(_label)
)
continue
labels.append(label)
for asset_version in asset_versions:
asset_version.create_note(comment, author=user)
asset_version.create_note(comment, author=user, labels=labels)
try:
session.commit()

View file

@ -18,7 +18,7 @@ def open(filepath):
class Openfile(api.Loader):
"""Open Image Sequence with system default"""
families = ["write"]
families = ["render2d"]
representations = ["*"]
label = "Open"

View file

@ -1,10 +1,24 @@
"""
"""Collect Anatomy and global anatomy data.
Requires:
None
session -> AVALON_TASK
projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder)
username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001)
datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder)
Optional:
comment -> collect_comment *(pyblish.api.CollectorOrder)
intent -> collected in pyblish-lite
Provides:
context -> anatomy (pypeapp.Anatomy)
context -> anatomyData
"""
import os
import json
from avalon import api, lib
from pypeapp import Anatomy
import pyblish.api
@ -12,9 +26,52 @@ import pyblish.api
class CollectAnatomy(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder + 0.002
label = "Collect Anatomy"
def process(self, context):
context.data['anatomy'] = Anatomy()
self.log.info("Anatomy templates collected...")
root_path = api.registered_root()
task_name = api.Session["AVALON_TASK"]
project_entity = context.data["projectEntity"]
asset_entity = context.data["assetEntity"]
project_name = project_entity["name"]
context.data["anatomy"] = Anatomy(project_name)
self.log.info(
"Anatomy object collected for project \"{}\".".format(project_name)
)
hierarchy_items = asset_entity["data"]["parents"]
hierarchy = ""
if hierarchy_items:
hierarchy = os.path.join(*hierarchy_items)
context_data = {
"root": root_path,
"project": {
"name": project_name,
"code": project_entity["data"].get("code")
},
"asset": asset_entity["name"],
"hierarchy": hierarchy.replace("\\", "/"),
"task": task_name,
"username": context.data["user"]
}
avalon_app_name = os.environ.get("AVALON_APP_NAME")
if avalon_app_name:
application_def = lib.get_application(avalon_app_name)
app_dir = application_def.get("application_dir")
if app_dir:
context_data["app"] = app_dir
datetime_data = context.data.get("datetimeData") or {}
context_data.update(datetime_data)
context.data["anatomyData"] = context_data
self.log.info("Global anatomy Data collected")
self.log.debug(json.dumps(context_data, indent=4))

View file

@ -0,0 +1,46 @@
"""Collect Anatomy and global anatomy data.
Requires:
session -> AVALON_PROJECT, AVALON_ASSET
Provides:
context -> projectEntity - project entity from database
context -> assetEntity - asset entity from database
"""
from avalon import io, api
import pyblish.api
class CollectAvalonEntities(pyblish.api.ContextPlugin):
"""Collect Anatomy into Context"""
order = pyblish.api.CollectorOrder
label = "Collect Avalon Entities"
def process(self, context):
project_name = api.Session["AVALON_PROJECT"]
asset_name = api.Session["AVALON_ASSET"]
project_entity = io.find_one({
"type": "project",
"name": project_name
})
assert project_entity, (
"Project '{0}' was not found."
).format(project_name)
self.log.debug("Collected Project entity \"{}\"".format(project_entity))
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name '{0}' in project '{1}'"
).format(asset_name, project_name)
self.log.debug("Collected Asset entity \"{}\"".format(asset_entity))
context.data["projectEntity"] = project_entity
context.data["assetEntity"] = asset_entity

View file

@ -217,12 +217,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
# Get family from the data
families = data.get("families", ["render"])
if "render" not in families:
families.append("render")
if "ftrack" not in families:
families.append("ftrack")
if "write" in instance_family:
families.append("write")
if families_data and "render2d" in families_data:
families.append("render2d")
if families_data and "slate" in families_data:
families.append("slate")
@ -332,8 +330,30 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
}
)
if "representations" not in new_instance.data:
new_instance.data["representations"] = []
if "representations" not in instance.data:
instance.data["representations"] = []
for collection in collections:
self.log.info(" - {}".format(str(collection)))
ext = collection.tail.lstrip(".")
if "slate" in instance.data["families"]:
frame_start += 1
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": frame_start,
"frameEnd": frame_end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
"tags": ["review"] if not baked_mov_path else ["thumb-nuke"],
}
instance.data["representations"].append(
representation)
if regex:
for collection in collections:
@ -403,52 +423,16 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
# we have no subset so we take every collection and create one
# from it
if regex:
for collection in collections:
new_instance = context.create_instance(str(collection))
self.log.info(
"Creating subset from: %s" % str(collection))
# Ensure each instance gets a unique reference to the data
data = copy.deepcopy(data)
# Ensure each instance gets a unique
# reference to the data
data = copy.deepcopy(data)
# If no subset provided, get it from collection's head
subset = data.get("subset", collection.head.rstrip("_. "))
# If no subset provided, get it from collection's head
subset = data.get(
"subset", collection.head.rstrip("_. "))
# If no start or end frame provided,
# get it from collection
indices = list(collection.indexes)
start = data.get("frameStart", indices[0])
end = data.get("frameEnd", indices[-1])
ext = list(collection)[0].split(".")[-1]
if "review" not in families:
families.append("review")
new_instance.data.update(
{
"name": str(collection),
"family": families[0],
"families": list(families),
"subset": subset,
"asset": data.get(
"asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
"frameStart": start,
"frameEnd": end,
"fps": fps,
"source": data.get("source", ""),
"pixelAspect": pixel_aspect,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"version": version
}
)
if lut_path:
new_instance.data.update({"lutPath": lut_path})
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
start = int(data.get("frameStart", indices[0]))
end = int(data.get("frameEnd", indices[-1]))
new_instance.append(collection)
new_instance.context.data["fps"] = fps

View file

@ -0,0 +1,123 @@
"""
Requires:
context -> anatomyData
context -> projectEntity
context -> assetEntity
instance -> asset
instance -> subset
instance -> family
Optional:
instance -> version
instance -> resolutionWidth
instance -> resolutionHeight
instance -> fps
Provides:
instance -> projectEntity
instance -> assetEntity
instance -> anatomyData
instance -> version
instance -> latestVersion
"""
import copy
import json
from avalon import io
import pyblish.api
class CollectInstanceAnatomyData(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
order = pyblish.api.CollectorOrder + 0.49
label = "Collect instance anatomy data"
def process(self, instance):
# get all the stuff from the database
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
asset_name = instance.data["asset"]
# Check if asset name is the same as what is in context
# - they may be different, e.g. in NukeStudio
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
# If version is not specified for instance or context
if version_number is None:
# TODO we should be able to change default version by studio
# preferences (like start with version number `0`)
version_number = 1
# use latest version (+1) if already any exist
if latest_version is not None:
version_number += int(latest_version)
anatomy_updates = {
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number
}
task_name = instance.data.get("task")
if task_name:
anatomy_updates["task"] = task_name
# Version should not be collected since may be instance
anatomy_data.update(anatomy_updates)
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = fps
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
# TODO should be version number set here?
instance.data["version"] = version_number
self.log.info("Instance anatomy Data collected")
self.log.debug(json.dumps(anatomy_data, indent=4))

View file

@ -1,24 +0,0 @@
"""
Requires:
None
Provides:
context -> projectData
"""
import pyblish.api
import pype.api as pype
class CollectProjectData(pyblish.api.ContextPlugin):
"""Collecting project data from avalon db"""
label = "Collect Project Data"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["nukestudio"]
def process(self, context):
# get project data from avalon db
context.data["projectData"] = pype.get_project()["data"]
return

View file

@ -0,0 +1,60 @@
"""
Requires:
context -> anatomy
context -> anatomyData
Provides:
instance -> publishDir
instance -> resourcesDir
"""
import os
import copy
import pyblish.api
from avalon import api
class CollectResourcesPath(pyblish.api.InstancePlugin):
"""Generate directory path where the files and resources will be stored"""
label = "Collect Resources Path"
order = pyblish.api.CollectorOrder + 0.495
def process(self, instance):
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
self.log.debug("publishDir: \"{}\"".format(publish_folder))
self.log.debug("resourcesDir: \"{}\"".format(resources_folder))

View file

@ -1,119 +0,0 @@
"""
Requires:
session -> AVALON_PROJECT
context -> anatomy (pypeapp.Anatomy)
instance -> subset
instance -> asset
instance -> family
Provides:
instance -> template
instance -> assumedTemplateData
instance -> assumedDestination
"""
import os
from avalon import io, api
import pyblish.api
class CollectTemplates(pyblish.api.InstancePlugin):
"""Fill templates with data needed for publish"""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect and fill Templates"
hosts = ["maya", "nuke", "standalonepublisher"]
def process(self, instance):
# get all the stuff from the database
subset_name = instance.data["subset"]
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += int(version["name"])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*hierarchy)
else:
hierarchy = ""
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"version": version_number,
"hierarchy": hierarchy.replace("\\", "/"),
"representation": "TEMP"}
# Add datetime data to template data
datetime_data = instance.context.data.get("datetimeData") or {}
template_data.update(datetime_data)
resolution_width = instance.data.get("resolutionWidth")
resolution_height = instance.data.get("resolutionHeight")
fps = instance.data.get("fps")
if resolution_width:
template_data["resolution_width"] = resolution_width
if resolution_width:
template_data["resolution_height"] = resolution_height
if resolution_width:
template_data["fps"] = fps
instance.data["template"] = template
instance.data["assumedTemplateData"] = template_data
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
(anatomy.format(template_data))["publish"]["path"]
)
self.log.info("Assumed Destination has been created...")
self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"]))
self.log.debug("__ template: `{}`".format(instance.data["template"]))

View file

@ -32,21 +32,15 @@ class ExtractBurnin(pype.api.Extractor):
frame_end = int(instance.data.get("frameEnd") or 1)
duration = frame_end - frame_start + 1
prep_data = {
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
prep_data = copy.deepcopy(instance.data["anatomyData"])
prep_data.update({
"frame_start": frame_start,
"frame_end": frame_end,
"duration": duration,
"version": int(version),
"comment": instance.context.data.get("comment", ""),
"intent": instance.context.data.get("intent", "")
}
# Add datetime data to preparation data
datetime_data = instance.context.data.get("datetimeData") or {}
prep_data.update(datetime_data)
})
slate_frame_start = frame_start
slate_frame_end = frame_end
@ -64,10 +58,6 @@ class ExtractBurnin(pype.api.Extractor):
"slate_duration": slate_duration
})
# Update data with template data
template_data = instance.data.get("assumedTemplateData") or {}
prep_data.update(template_data)
# get anatomy project
anatomy = instance.context.data['anatomy']

View file

@ -10,7 +10,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
label = "Extract Jpeg EXR"
hosts = ["shell"]
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
families = ["imagesequence", "render", "render2d", "source"]
enabled = False
def process(self, instance):
@ -27,8 +27,12 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
for repre in representations:
self.log.debug(repre)
if 'review' not in repre['tags']:
return
valid = 'review' in repre['tags'] or "thumb-nuke" in repre['tags']
if not valid:
continue
if not isinstance(repre['files'], list):
continue
stagingdir = os.path.normpath(repre.get("stagingDir"))
input_file = repre['files'][0]

View file

@ -43,320 +43,328 @@ class ExtractReview(pyblish.api.InstancePlugin):
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
if repre['ext'] in self.ext_filter:
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" in tags:
staging_dir = repre["stagingDir"]
# iterating preset output profiles
for name, profile in output_profiles.items():
repre_new = repre.copy()
ext = profile.get("ext", None)
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# adding control for presets to be sequence
# or single file
is_sequence = ("sequence" in p_tags) and (ext in (
"png", "jpg", "jpeg"))
self.log.debug("Profile name: {}".format(name))
if not ext:
ext = "mov"
self.log.warning(
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
if any(item in instance.data['families'] for item in profile['families']):
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(
repre["files"])
full_input_path = os.path.join(
staging_dir, collections[0].format(
'{head}{padding}{tail}')
)
filename = collections[0].format('{head}')
if filename.endswith('.'):
filename = filename[:-1]
else:
full_input_path = os.path.join(
staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
if is_sequence:
filename_base = filename + "_{0}".format(name)
repr_file = filename_base + ".%08d.{0}".format(
ext)
repre_new["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
new_tags = [x for x in tags if x != "delete"]
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("startFrameReview") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
output_args = []
codec_args = profile.get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
# letter_box
lb = profile.get('letter_box', 0)
if lb != 0:
ffmpet_width = to_width
ffmpet_height = to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
if resolution_ratio != delivery_ratio:
ffmpet_width = resolution_width
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# baking lut file application
lut_path = instance.data.get("lutPath")
if lut_path and ("bake-lut" in p_tags):
# removing Gama info as it is all baked in lut
gamma = next((g for g in input_args
if "-gamma" in g), None)
if gamma:
input_args.remove(gamma)
# create lut argument
lut_arg = "lut3d=file='{}'".format(
lut_path.replace(
"\\", "/").replace(":/", "\\:/")
)
lut_arg += ",colormatrix=bt601:bt709"
vf_back = self.add_video_filter_args(
output_args, lut_arg)
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug(
"_ output_args: `{}`".format(output_args))
if is_sequence:
stg_dir = os.path.dirname(full_output_path)
if not os.path.exists(stg_dir):
self.log.debug(
"creating dir: {}".format(stg_dir))
os.mkdir(stg_dir)
mov_args = [
os.path.join(
os.environ.get(
"FFMPEG_PATH",
""), "ffmpeg"),
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
'name': name,
'ext': ext,
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if is_sequence:
repre_new.update({
"stagingDir": stg_dir,
"files": os.listdir(stg_dir)
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):
repre_new.pop("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
else:
continue
else:
if repre['ext'] not in self.ext_filter:
continue
tags = repre.get("tags", [])
if "thumbnail" in tags:
continue
self.log.info("Try repre: {}".format(repre))
if "review" not in tags:
continue
staging_dir = repre["stagingDir"]
# iterating preset output profiles
for name, profile in output_profiles.items():
repre_new = repre.copy()
ext = profile.get("ext", None)
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# adding control for presets to be sequence
# or single file
is_sequence = ("sequence" in p_tags) and (ext in (
"png", "jpg", "jpeg"))
self.log.debug("Profile name: {}".format(name))
if not ext:
ext = "mov"
self.log.warning(
str("`ext` attribute not in output "
"profile. Setting to default ext: `mov`"))
self.log.debug(
"instance.families: {}".format(
instance.data['families']))
self.log.debug(
"profile.families: {}".format(profile['families']))
profile_family_check = False
for _family in profile['families']:
if _family in instance.data['families']:
profile_family_check = True
break
if not profile_family_check:
continue
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(
repre["files"])
full_input_path = os.path.join(
staging_dir, collections[0].format(
'{head}{padding}{tail}')
)
filename = collections[0].format('{head}')
if filename.endswith('.'):
filename = filename[:-1]
else:
full_input_path = os.path.join(
staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
repr_file = filename + "_{0}.{1}".format(name, ext)
full_output_path = os.path.join(
staging_dir, repr_file)
if is_sequence:
filename_base = filename + "_{0}".format(name)
repr_file = filename_base + ".%08d.{0}".format(
ext)
repre_new["sequence_file"] = repr_file
full_output_path = os.path.join(
staging_dir, filename_base, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
new_tags = [x for x in tags if x != "delete"]
# add families
[instance.data["families"].append(t)
for t in p_tags
if t not in instance.data["families"]]
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get('input', []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
for audio in instance.data.get("audio", []):
offset_frames = (
instance.data.get("startFrameReview") -
audio["offset"]
)
offset_seconds = offset_frames / fps
if offset_seconds > 0:
input_args.append("-ss")
else:
input_args.append("-itsoffset")
input_args.append(str(abs(offset_seconds)))
input_args.extend(
["-i", audio["filename"]]
)
# Need to merge audio if there are more
# than 1 input.
if len(instance.data["audio"]) > 1:
input_args.extend(
[
"-filter_complex",
"amerge",
"-ac",
"2"
]
)
output_args = []
codec_args = profile.get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
# defining image ratios
resolution_ratio = float(resolution_width / (
resolution_height * pixel_aspect))
delivery_ratio = float(to_width) / float(to_height)
self.log.debug(resolution_ratio)
self.log.debug(delivery_ratio)
# get scale factor
scale_factor = to_height / (
resolution_height * pixel_aspect)
self.log.debug(scale_factor)
# letter_box
lb = profile.get('letter_box', 0)
if lb != 0:
ffmpet_width = to_width
ffmpet_height = to_height
if "reformat" not in p_tags:
lb /= pixel_aspect
if resolution_ratio != delivery_ratio:
ffmpet_width = resolution_width
ffmpet_height = int(
resolution_height * pixel_aspect)
else:
if resolution_ratio != delivery_ratio:
lb /= scale_factor
else:
lb /= pixel_aspect
output_args.append(str(
"-filter:v scale={0}x{1}:flags=lanczos,"
"setsar=1,drawbox=0:0:iw:"
"round((ih-(iw*(1/{2})))/2):t=fill:"
"c=black,drawbox=0:ih-round((ih-(iw*("
"1/{2})))/2):iw:round((ih-(iw*(1/{2})))"
"/2):t=fill:c=black").format(
ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
self.log.debug(
"__ pixel_aspect: `{}`".format(pixel_aspect))
self.log.debug(
"__ resolution_width: `{}`".format(
resolution_width))
self.log.debug(
"__ resolution_height: `{}`".format(
resolution_height))
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if resolution_ratio < delivery_ratio:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale)/2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / float(
resolution_width)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale)/2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
self.log.debug(
"__ width_half_pad: `{}`".format(
width_half_pad))
self.log.debug(
"__ height_scale: `{}`".format(
height_scale))
self.log.debug(
"__ height_half_pad: `{}`".format(
height_half_pad))
scaling_arg = str(
"scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1"
).format(width_scale, height_scale,
to_width, to_height,
width_half_pad,
height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# baking lut file application
lut_path = instance.data.get("lutPath")
if lut_path and ("bake-lut" in p_tags):
# removing Gama info as it is all baked in lut
gamma = next((g for g in input_args
if "-gamma" in g), None)
if gamma:
input_args.remove(gamma)
# create lut argument
lut_arg = "lut3d=file='{}'".format(
lut_path.replace(
"\\", "/").replace(":/", "\\:/")
)
lut_arg += ",colormatrix=bt601:bt709"
vf_back = self.add_video_filter_args(
output_args, lut_arg)
# add it to output_args
output_args.insert(0, vf_back)
self.log.info("Added Lut to ffmpeg command")
self.log.debug(
"_ output_args: `{}`".format(output_args))
if is_sequence:
stg_dir = os.path.dirname(full_output_path)
if not os.path.exists(stg_dir):
self.log.debug(
"creating dir: {}".format(stg_dir))
os.mkdir(stg_dir)
mov_args = [
os.path.join(
os.environ.get(
"FFMPEG_PATH",
""), "ffmpeg"),
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
'name': name,
'ext': ext,
'files': repr_file,
"tags": new_tags,
"outputName": name,
"codec": codec_args,
"_profile": profile,
"resolutionHeight": resolution_height,
"resolutionWidth": resolution_width,
})
if is_sequence:
repre_new.update({
"stagingDir": stg_dir,
"files": os.listdir(stg_dir)
})
if repre_new.get('preview'):
repre_new.pop("preview")
if repre_new.get('thumbnail'):
repre_new.pop("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
for repre in representations_new:
if "delete" in repre.get("tags", []):
representations_new.remove(repre)

View file

@ -1,417 +0,0 @@
import os
import logging
import shutil
import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
log = logging.getLogger(__name__)
class IntegrateAsset(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Asset"
order = pyblish.api.IntegratorOrder
families = []
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
self.register(instance)
self.log.info("Integrating Asset in to the database ...")
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
# Ensure at least one file is set up for transfer in staging dir.
files = instance.data.get("files", [])
assert files, "Instance has no files to transfer"
assert isinstance(files, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(files)
)
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
collection = files
# Assert that each member has identical suffix
_, ext = os.path.splitext(collection[0])
assert all(ext == os.path.splitext(name)[1]
for name in collection), (
"Files had varying suffixes, this is a bug"
)
assert not any(os.path.isabs(name) for name in collection)
template_data["representation"] = ext[1:]
for fname in collection:
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = files
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["publish"]["path"]
instance.data["transfers"].append([src, dst])
template = anatomy.templates["publish"]["path"]
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': dst, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
'task': api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": version["name"],
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data.get("transfers", list())
for src, dest in transfers:
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
# Produce hardlinked copies
# Note: hardlink can only be produced between two files on the same
# server/disk and editing one of the two will edit both files at once.
# As such it is recommended to only make hardlinks between static files
# to ensure publishes remain safe and non-edited.
hardlinks = instance.data.get("hardlinks", list())
for src, dest in hardlinks:
self.log.info("Hardlinking file .. {} -> {}".format(src, dest))
self.hardlink_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "avalon-core:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "avalon-core:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
self.log.debug("Registered root: {}".format(api.registered_root()))
# create relative source path for DB
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
self.log.debug("Source: {}".format(source))
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment"),
"machine": context.data.get("machine"),
"fps": context.data.get("fps")}
# Include optional data if present in
optionals = [
"frameStart", "frameEnd", "step", "handles", "sourceHashes"
]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data[key]
return version_data

View file

@ -1,147 +0,0 @@
import pyblish.api
import os
from avalon import io, api
class IntegrateAssumedDestination(pyblish.api.InstancePlugin):
"""Generate the assumed destination path where the file will be stored"""
label = "Integrate Assumed Destination"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
def process(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
template_data = instance.data["assumedTemplateData"]
# self.log.info(anatomy.templates)
anatomy_filled = anatomy.format(template_data)
# self.log.info(anatomy_filled)
mock_template = anatomy_filled["publish"]["path"]
# For now assume resources end up in a "resources" folder in the
# published folder
mock_destination = os.path.join(os.path.dirname(mock_template),
"resources")
# Clean the path
mock_destination = os.path.abspath(
os.path.normpath(mock_destination)).replace("\\", "/")
# Define resource destination and transfers
resources = instance.data.get("resources", list())
transfers = instance.data.get("transfers", list())
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(
resource["source"]).replace("\\", "/")
destination = os.path.join(mock_destination, source_filename)
# Force forward slashes to fix issue with software unable
# to work correctly with backslashes in specific scenarios
# (e.g. escape characters in PLN-151 V-Ray UDIM)
destination = destination.replace("\\", "/")
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(
mock_destination, fname).replace("\\", "/")
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one(
{"type": "project", "name": project_name},
projection={"config": True, "data": True}
)
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
padding = int(a_template['render']['padding'])
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template

View file

@ -2,8 +2,11 @@ import os
from os.path import getsize
import logging
import sys
import copy
import clique
import errno
from pymongo import DeleteOne, InsertOne
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
@ -100,144 +103,147 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
TASK = instance.data.get("task") or api.Session["AVALON_TASK"]
LOCATION = api.Session["AVALON_LOCATION"]
anatomy_data = instance.data["anatomyData"]
io.install()
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
# for result in context.data["results"]:
# if not result["success"]:
# self.log.debug(result)
# exc_type, exc_value, exc_traceback = result["error_info"]
# extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
# self.log.debug(
# "Error at line {}: \"{}\"".format(
# extracted_traceback[1], result["error"]
# )
# )
# assert all(result["success"] for result in context.data["results"]),(
# "Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
project_entity = instance.data["projectEntity"]
asset_name = instance.data["asset"]
asset_entity = instance.data.get("assetEntity")
if not asset_entity:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name \"{0}\" in project \"{1}\""
).format(asset_name, project_entity["name"])
instance.data["assetEntity"] = asset_entity
# update anatomy data with asset specific keys
# - name should already been set
hierarchy = ""
parents = asset_entity["data"]["parents"]
if parents:
hierarchy = "/".join(parents)
anatomy_data["hierarchy"] = hierarchy
task_name = instance.data.get("task")
if task_name:
anatomy_data["task"] = task_name
stagingdir = instance.data.get("stagingDir")
if not stagingdir:
self.log.info('''{} is missing reference to staging
directory Will try to get it from
representation'''.format(instance))
self.log.info((
"{0} is missing reference to staging directory."
" Will try to get it from representation."
).format(instance))
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
else:
self.log.debug(
"Establishing staging directory @ {0}".format(stagingdir)
)
# Ensure at least one file is set up for transfer in staging dir.
repres = instance.data.get("representations", None)
repres = instance.data.get("representations")
assert repres, "Instance has no files to transfer"
assert isinstance(repres, (list, tuple)), (
"Instance 'files' must be a list, got: {0}".format(repres)
"Instance 'files' must be a list, got: {0} {1}".format(
str(type(repres)), str(repres)
)
)
# FIXME: io is not initialized at this point for shell host
io.install()
project = io.find_one({"type": "project"})
subset = self.get_subset(asset_entity, instance)
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_number = instance.data["version"]
self.log.debug("Next version: v{}".format(version_number))
version_data = self.create_version_data(context, instance)
version_data_instance = instance.data.get('versionData')
if version_data_instance:
version_data.update(version_data_instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
# TODO rename method from `create_version` to
# `prepare_version` or similar...
version = self.create_version(
subset=subset,
version_number=version_number,
data=version_data
)
self.log.debug("Creating version ...")
new_repre_names_low = [_repre["name"].lower() for _repre in repres]
existing_version = io.find_one({
'type': 'version',
'parent': subset["_id"],
'name': next_version
'name': version_number
})
if existing_version is None:
version_id = io.insert_one(version).inserted_id
else:
# Check if instance have set `append` mode which cause that
# only replicated representations are set to archive
append_repres = instance.data.get("append", False)
# Update version data
# TODO query by _id and
io.update_many({
'type': 'version',
'parent': subset["_id"],
'name': next_version
}, {'$set': version}
)
'name': version_number
}, {
'$set': version
})
version_id = existing_version['_id']
# Find representations of existing version and archive them
current_repres = list(io.find({
"type": "representation",
"parent": version_id
}))
bulk_writes = []
for repre in current_repres:
if append_repres:
# archive only duplicated representations
if repre["name"].lower() not in new_repre_names_low:
continue
# Representation must change type,
# `_id` must be stored to other key and replaced with new
# - that is because new representations should have same ID
repre_id = repre["_id"]
bulk_writes.append(DeleteOne({"_id": repre_id}))
repre["orig_id"] = repre_id
repre["_id"] = io.ObjectId()
repre["type"] = "archived_representation"
bulk_writes.append(InsertOne(repre))
# bulk updates
if bulk_writes:
io._database[io.Session["AVALON_PROJECT"]].bulk_write(
bulk_writes
)
existing_repres = list(io.find({
"parent": version_id,
"type": "archived_representation"
}))
instance.data['version'] = version['name']
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({
"type": 'asset',
"name": ASSET
})['data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
intent = context.data.get("intent")
if intent is not None:
anatomy_data["intent"] = intent
anatomy = instance.context.data['anatomy']
@ -250,31 +256,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
instance.data['transfers'] = []
for idx, repre in enumerate(instance.data["representations"]):
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
# create template data for Anatomy
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"task": TASK,
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# Add datetime data to template data
datetime_data = context.data.get("datetimeData") or {}
template_data.update(datetime_data)
template_data = copy.deepcopy(anatomy_data)
if intent is not None:
template_data["intent"] = intent
resolution_width = repre.get("resolutionWidth")
resolution_height = repre.get("resolutionHeight")
@ -292,6 +277,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
stagingdir = repre['stagingDir']
if repre.get('anatomy_template'):
template_name = repre['anatomy_template']
template = os.path.normpath(
anatomy.templates[template_name]["path"])
@ -322,7 +308,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template_filled = anatomy_filled[template_name]["path"]
if repre_context is None:
repre_context = template_filled.used_values
test_dest_files.append(
os.path.normpath(template_filled)
)
@ -338,16 +323,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
index_frame_start = None
if repre.get("frameStart"):
frame_start_padding = anatomy.templates["render"]["padding"]
frame_start_padding = (
anatomy.templates["render"]["padding"]
)
index_frame_start = int(repre.get("frameStart"))
# exception for slate workflow
if "slate" in instance.data["families"]:
if index_frame_start and "slate" in instance.data["families"]:
index_frame_start -= 1
dst_padding_exp = src_padding_exp
dst_start_frame = None
for i in src_collection.indexes:
# TODO 1.) do not count padding in each index iteration
# 2.) do not count dst_padding from src_padding before
# index_frame_start check
src_padding = src_padding_exp % i
src_file_name = "{0}{1}{2}".format(
@ -375,7 +365,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if not dst_start_frame:
dst_start_frame = dst_padding
dst = "{0}{1}{2}".format(
dst_head,
dst_start_frame,
@ -419,8 +408,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
continue
repre_context[key] = template_data[key]
# Use previous representation's id if there are any
repre_id = None
repre_name_low = repre["name"].lower()
for _repre in existing_repres:
# NOTE should we check lowered names?
if repre_name_low == _repre["name"]:
repre_id = _repre["orig_id"]
break
# Create new id if existing representations does not match
if repre_id is None:
repre_id = io.ObjectId()
representation = {
"_id": io.ObjectId(),
"_id": repre_id,
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
@ -437,7 +439,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
representation["context"]["output"] = repre['outputName']
if sequence_repre and repre.get("frameStart"):
representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart"))
representation['context']['frame'] = (
src_padding_exp % int(repre.get("frameStart"))
)
self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
@ -446,6 +450,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
representations.append(representation)
self.log.debug("__ representations: {}".format(representations))
# Remove old representations if there are any (before insertion of new)
if existing_repres:
repre_ids_to_remove = []
for repre in existing_repres:
repre_ids_to_remove.append(repre["_id"])
io.delete_many({"_id": {"$in": repre_ids_to_remove}})
self.log.debug("__ representations: {}".format(representations))
for rep in instance.data["representations"]:
self.log.debug("__ represNAME: {}".format(rep['name']))
@ -548,14 +559,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
filelink.create(src, dst, filelink.HARDLINK)
def get_subset(self, asset, instance):
subset_name = instance.data["subset"]
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
"name": subset_name
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
self.log.debug("families. %s" % instance.data.get('families'))
self.log.debug(
@ -584,26 +595,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
return subset
def create_version(self, subset, version_number, locations, data=None):
def create_version(self, subset, version_number, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-3.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
@ -646,6 +652,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"fps": context.data.get(
"fps", instance.data.get("fps"))}
intent = context.data.get("intent")
if intent is not None:
version_data["intent"] = intent
# Include optional data if present in
optionals = [
"frameStart", "frameEnd", "step", "handles",

View file

@ -1,423 +0,0 @@
import os
import logging
import shutil
import clique
import errno
import pyblish.api
from avalon import api, io
log = logging.getLogger(__name__)
class IntegrateFrames(pyblish.api.InstancePlugin):
"""Resolve any dependency issies
This plug-in resolves any paths which, if not updated might break
the published file.
The order of families is important, when working with lookdev you want to
first publish the texture, update the texture paths in the nodes and then
publish the shading network. Same goes for file dependent assets.
"""
label = "Integrate Frames"
order = pyblish.api.IntegratorOrder
families = ["imagesequence"]
family_targets = [".frames", ".local", ".review", "imagesequence", "render", "source"]
exclude_families = ["clip"]
def process(self, instance):
if [ef for ef in self.exclude_families
if instance.data["family"] in ef]:
return
families = [f for f in instance.data["families"]
for search in self.family_targets
if search in f]
if not families:
return
self.register(instance)
# self.log.info("Integrating Asset in to the database ...")
# self.log.info("instance.data: {}".format(instance.data))
if instance.data.get('transfer', True):
self.integrate(instance)
def register(self, instance):
# Required environment variables
PROJECT = api.Session["AVALON_PROJECT"]
ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"]
LOCATION = api.Session["AVALON_LOCATION"]
context = instance.context
# Atomicity
#
# Guarantee atomic publishes - each asset contains
# an identical set of members.
# __
# / o
# / \
# | o |
# \ /
# o __/
#
assert all(result["success"] for result in context.data["results"]), (
"Atomicity not held, aborting.")
# Assemble
#
# |
# v
# ---> <----
# ^
# |
#
stagingdir = instance.data.get("stagingDir")
assert stagingdir, ("Incomplete instance \"%s\": "
"Missing reference to staging area." % instance)
# extra check if stagingDir actually exists and is available
self.log.debug("Establishing staging directory @ %s" % stagingdir)
project = io.find_one({"type": "project"})
asset = io.find_one({
"type": "asset",
"name": ASSET,
"parent": project["_id"]
})
assert all([project, asset]), ("Could not find current project or "
"asset '%s'" % ASSET)
subset = self.get_subset(asset, instance)
# get next version
latest_version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
{"name": True},
sort=[("name", -1)]
)
next_version = 1
if latest_version is not None:
next_version += latest_version["name"]
self.log.info("Verifying version from assumed destination")
assumed_data = instance.data["assumedTemplateData"]
assumed_version = assumed_data["version"]
if assumed_version != next_version:
raise AttributeError("Assumed version 'v{0:03d}' does not match"
"next version in database "
"('v{1:03d}')".format(assumed_version,
next_version))
if instance.data.get('version'):
next_version = int(instance.data.get('version'))
self.log.debug("Next version: v{0:03d}".format(next_version))
version_data = self.create_version_data(context, instance)
version = self.create_version(subset=subset,
version_number=next_version,
locations=[LOCATION],
data=version_data)
self.log.debug("Creating version ...")
version_id = io.insert_one(version).inserted_id
# Write to disk
# _
# | |
# _| |_
# ____\ /
# |\ \ / \
# \ \ v \
# \ \________.
# \|________|
#
root = api.registered_root()
hierarchy = ""
parents = io.find_one({"type": 'asset', "name": ASSET})[
'data']['parents']
if parents and len(parents) > 0:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = os.path.join(*parents)
template_data = {"root": root,
"project": {"name": PROJECT,
"code": project['data']['code']},
"silo": asset.get('silo'),
"task": api.Session["AVALON_TASK"],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy}
# template_publish = project["config"]["template"]["publish"]
anatomy = instance.context.data['anatomy']
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
destination_list = []
if 'transfers' not in instance.data:
instance.data['transfers'] = []
for files in instance.data["files"]:
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
src_collections, remainder = clique.assemble(files)
src_collection = src_collections[0]
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = ext = src_collection.format("{tail}")
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = src_tail[1:]
template_data["frame"] = src_collection.format(
"{padding}") % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(anatomy_filled["render"]["path"])
dst_collections, remainder = clique.assemble(test_dest_files)
dst_collection = dst_collections[0]
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
for i in src_collection.indexes:
src_padding = src_collection.format("{padding}") % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
dst_padding = dst_collection.format("{padding}") % i
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
src = os.path.join(stagingdir, src_file_name)
instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
template_data.pop("frame", None)
fname = files
self.log.info("fname: {}".format(fname))
assert not os.path.isabs(fname), (
"Given file name is a full path"
)
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
dst = anatomy_filled["render"]["path"]
instance.data["transfers"].append([src, dst])
if ext[1:] not in ["jpeg", "jpg", "mov", "mp4", "wav"]:
template_data["frame"] = "#" * int(anatomy_filled["render"]["padding"])
anatomy_filled = anatomy.format(template_data)
path_to_save = anatomy_filled["render"]["path"]
template = anatomy.templates["render"]["path"]
self.log.debug("path_to_save: {}".format(path_to_save))
representation = {
"schema": "pype:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {'path': path_to_save, 'template': template},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
# for performance reasons.
"context": {
"root": root,
"project": {
"name": PROJECT,
"code": project['data']['code']
},
"task": api.Session["AVALON_TASK"],
"silo": asset['silo'],
"asset": ASSET,
"family": instance.data['family'],
"subset": subset["name"],
"version": int(version["name"]),
"hierarchy": hierarchy,
"representation": ext[1:]
}
}
destination_list.append(dst)
instance.data['destination_list'] = destination_list
representations.append(representation)
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
def integrate(self, instance):
"""Move the files
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
transfers = instance.data["transfers"]
for src, dest in transfers:
src = os.path.normpath(src)
dest = os.path.normpath(dest)
if src in dest:
continue
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
Arguments:
src (str): the source file which needs to be copied
dst (str): the destination of the sourc file
Returns:
None
"""
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
self.log.critical("An unexpected error occurred.")
raise
shutil.copy(src, dst)
def get_subset(self, asset, instance):
subset = io.find_one({
"type": "subset",
"parent": asset["_id"],
"name": instance.data["subset"]
})
if subset is None:
subset_name = instance.data["subset"]
self.log.info("Subset '%s' not found, creating.." % subset_name)
_id = io.insert_one({
"schema": "pype:subset-2.0",
"type": "subset",
"name": subset_name,
"data": {},
"parent": asset["_id"]
}).inserted_id
subset = io.find_one({"_id": _id})
return subset
def create_version(self, subset, version_number, locations, data=None):
""" Copy given source to destination
Args:
subset (dict): the registered subset of the asset
version_number (int): the version number
locations (list): the currently registered locations
Returns:
dict: collection of data to create a version
"""
# Imprint currently registered location
version_locations = [location for location in locations if
location is not None]
return {"schema": "pype:version-2.0",
"type": "version",
"parent": subset["_id"],
"name": version_number,
"locations": version_locations,
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for the version
Args:
context: the current context
instance: the current instance being published
Returns:
dict: the required information with instance.data as key
"""
families = []
current_families = instance.data.get("families", list())
instance_family = instance.data.get("family", None)
if instance_family is not None:
families.append(instance_family)
families += current_families
try:
source = instance.data['source']
except KeyError:
source = context.data["currentFile"]
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
version_data = {"families": families,
"time": context.data["time"],
"author": context.data["user"],
"source": source,
"comment": context.data.get("comment")}
# Include optional data if present in
optionals = ["frameStart", "frameEnd", "step",
"handles", "colorspace", "fps", "outputDir"]
for key in optionals:
if key in instance.data:
version_data[key] = instance.data.get(key, None)
return version_data

View file

@ -0,0 +1,49 @@
import os
import pyblish.api
class IntegrateResourcesPath(pyblish.api.InstancePlugin):
"""Generate directory path where the files and resources will be stored"""
label = "Integrate Resources Path"
order = pyblish.api.IntegratorOrder - 0.05
families = ["clip", "projectfile", "plate"]
def process(self, instance):
resources = instance.data.get("resources") or []
transfers = instance.data.get("transfers") or []
if not resources and not transfers:
self.log.debug(
"Instance does not have `resources` and `transfers`"
)
return
resources_folder = instance.data["resourcesDir"]
# Define resource destination and transfers
for resource in resources:
# Add destination to the resource
source_filename = os.path.basename(
resource["source"]).replace("\\", "/")
destination = os.path.join(resources_folder, source_filename)
# Force forward slashes to fix issue with software unable
# to work correctly with backslashes in specific scenarios
# (e.g. escape characters in PLN-151 V-Ray UDIM)
destination = destination.replace("\\", "/")
resource['destination'] = destination
# Collect transfers for the individual files of the resource
# e.g. all individual files of a cache or UDIM textures.
files = resource['files']
for fsrc in files:
fname = os.path.basename(fsrc)
fdest = os.path.join(
resources_folder, fname
).replace("\\", "/")
transfers.append([fsrc, fdest])
instance.data["resources"] = resources
instance.data["transfers"] = transfers

View file

@ -1,43 +0,0 @@
import pyblish.api
import os
class ValidateTemplates(pyblish.api.ContextPlugin):
"""Check if all templates were filled"""
label = "Validate Templates"
order = pyblish.api.ValidatorOrder - 0.1
hosts = ["maya", "houdini", "nuke"]
def process(self, context):
anatomy = context.data["anatomy"]
if not anatomy:
raise RuntimeError("Did not find anatomy")
else:
data = {
"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
"project": {"name": "D001_projectsx",
"code": "prjX"},
"ext": "exr",
"version": 3,
"task": "animation",
"asset": "sh001",
"app": "maya",
"hierarchy": "ep101/sq01/sh010"}
anatomy_filled = anatomy.format(data)
self.log.info(anatomy_filled)
data = {"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"],
"project": {"name": "D001_projectsy",
"code": "prjY"},
"ext": "abc",
"version": 1,
"task": "lookdev",
"asset": "bob",
"app": "maya",
"hierarchy": "ep101/sq01/bob"}
anatomy_filled = context.data["anatomy"].format(data)
self.log.info(anatomy_filled["work"]["folder"])

View file

@ -35,7 +35,7 @@ class CollectMayaScene(pyblish.api.ContextPlugin):
"subset": subset,
"asset": os.getenv("AVALON_ASSET", None),
"label": subset,
"publish": False,
"publish": True,
"family": 'workfile',
"families": ['workfile'],
"setMembers": [current_file]

View file

@ -1,6 +1,7 @@
import os
import sys
import json
import copy
import tempfile
import contextlib
import subprocess
@ -330,10 +331,9 @@ class ExtractLook(pype.api.Extractor):
maya_path))
def resource_destination(self, instance, filepath, do_maketx):
anatomy = instance.context.data["anatomy"]
self.create_destination_template(instance, anatomy)
resources_dir = instance.data["resourcesDir"]
# Compute destination location
basename, ext = os.path.splitext(os.path.basename(filepath))
@ -343,7 +343,7 @@ class ExtractLook(pype.api.Extractor):
ext = ".tx"
return os.path.join(
instance.data["assumedDestination"], "resources", basename + ext
resources_dir, basename + ext
)
def _process_texture(self, filepath, do_maketx, staging, linearise, force):
@ -407,97 +407,3 @@ class ExtractLook(pype.api.Extractor):
return converted, COPY, texture_hash
return filepath, COPY, texture_hash
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
template = a_template["publish"]["path"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'").format(asset_name, project_name)
silo = asset.get("silo")
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if instance.data.get("version"):
version_number = int(instance.data.get("version"))
padding = int(a_template["render"]["padding"])
hierarchy = asset["data"]["parents"]
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
template_data = {
"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name, "code": project["data"]["code"]},
"silo": silo,
"family": instance.data["family"],
"asset": asset_name,
"subset": subset_name,
"frame": ("#" * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP",
}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)

View file

@ -110,15 +110,7 @@ class ExtractYetiRig(pype.api.Extractor):
self.log.info("Writing metadata file")
# Create assumed destination folder for imageSearchPath
assumed_temp_data = instance.data["assumedTemplateData"]
template = instance.data["template"]
template_formatted = template.format(**assumed_temp_data)
destination_folder = os.path.dirname(template_formatted)
image_search_path = os.path.join(destination_folder, "resources")
image_search_path = os.path.normpath(image_search_path)
image_search_path = resources_dir = instance.data["resourcesDir"]
settings = instance.data.get("rigsettings", None)
if settings:

View file

@ -52,9 +52,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_dir = os.path.dirname(path)
self.log.debug('output dir: {}'.format(output_dir))
# get version to instance for integration
instance.data['version'] = instance.context.data.get(
"version", pype.get_version_from_path(nuke.root().name()))
# # get version to instance for integration
# instance.data['version'] = instance.context.data.get(
# "version", pype.get_version_from_path(nuke.root().name()))
self.log.debug('Write Version: %s' % instance.data('version'))
@ -92,16 +92,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
# Add version data to instance
version_data = {
"handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"version": int(instance.data['version']),
"colorspace": node["colorspace"].value(),
"families": ["render"],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data["family"] = "write"

View file

@ -1,5 +1,5 @@
from pyblish import api
import os
class CollectAudio(api.InstancePlugin):
"""Collect audio from tags.
@ -12,7 +12,7 @@ class CollectAudio(api.InstancePlugin):
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1025
order = api.CollectorOrder + 0.1021
label = "Collect Audio"
hosts = ["nukestudio"]
families = ["clip"]
@ -21,8 +21,10 @@ class CollectAudio(api.InstancePlugin):
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
family = dict(tag["metadata"]).get("tag.family", "")
tag_data = dict(tag["metadata"])
family = tag_data.get("tag.family", "")
if family.lower() == "audio":
subset = tag_data.get("tag.subset", "Main")
tagged = True
if not tagged:
@ -40,14 +42,14 @@ class CollectAudio(api.InstancePlugin):
data["family"] = "audio"
data["families"] = ["ftrack"]
subset = ""
for tag in instance.data["tags"]:
tag_data = dict(tag["metadata"])
if "tag.subset" in tag_data:
subset = tag_data["tag.subset"]
data["subset"] = "audio" + subset.title()
data["source"] = data["sourcePath"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
1]
)
self.log.debug("Creating instance with data: {}".format(data))
instance.context.create_instance(**data)

View file

@ -1,7 +1,7 @@
import os
from pyblish import api
import hiero
import nuke
class CollectClips(api.ContextPlugin):
@ -17,7 +17,7 @@ class CollectClips(api.ContextPlugin):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
projectdata = context.data["projectData"]
projectdata = context.data["projectEntity"]["data"]
version = context.data.get("version", "001")
sequence = context.data.get("activeSequence")
selection = context.data.get("selection")
@ -48,7 +48,9 @@ class CollectClips(api.ContextPlugin):
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
effects = [f for f in item.linkedItems() if f.isEnabled()]
effects = [f for f in item.linkedItems()
if f.isEnabled()
if isinstance(f, hiero.core.EffectTrackItem)]
# If source is *.nk its a comp effect and we need to fetch the
# write node output. This should be improved by parsing the script

View file

@ -14,7 +14,7 @@ class CollectPlates(api.InstancePlugin):
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1025
order = api.CollectorOrder + 0.1021
label = "Collect Plates"
hosts = ["nukestudio"]
families = ["clip"]
@ -23,8 +23,10 @@ class CollectPlates(api.InstancePlugin):
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
family = dict(tag["metadata"]).get("tag.family", "")
tag_data = dict(tag["metadata"])
family = tag_data.get("tag.family", "")
if family.lower() == "plate":
subset = tag_data.get("tag.subset", "Main")
tagged = True
break
@ -34,29 +36,27 @@ class CollectPlates(api.InstancePlugin):
"\"plate\"".format(instance)
)
return
self.log.debug("__ subset: `{}`".format(instance.data["subset"]))
# if "audio" in instance.data["subset"]:
# return
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
self.log.debug("__ family: `{}`".format(family))
self.log.debug("__ subset: `{}`".format(subset))
data["family"] = family.lower()
data["families"] = ["ftrack"] + instance.data["families"][1:]
data["source"] = data["sourcePath"]
subset = ""
for tag in instance.data["tags"]:
tag_data = dict(tag["metadata"])
if "tag.subset" in tag_data:
subset = tag_data["tag.subset"]
data["subset"] = data["family"] + subset.title()
data["subset"] = family + subset.title()
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
1]
)
data['asset'], data["subset"], os.path.splitext(
data["sourcePath"])[1])
if "review" in instance.data["families"]:
data["label"] += " - review"
@ -146,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin):
head, padding = os.path.splitext(basename)
ext = ext[1:]
padding = padding[1:]
self.log.debug("_ padding: `{}`".format(padding))
# head, padding, ext = source_file.split('.')
source_first_frame = int(padding)
padding = len(padding)

View file

@ -16,7 +16,7 @@ class CollectReviews(api.InstancePlugin):
order = api.CollectorOrder + 0.1022
label = "Collect Reviews"
hosts = ["nukestudio"]
families = ["clip"]
families = ["plate"]
def process(self, instance):
# Exclude non-tagged instances.

View file

@ -30,9 +30,12 @@ class CollectClipTagFrameStart(api.InstancePlugin):
except ValueError:
if "source" in t_value:
source_first = instance.data["sourceFirst"]
if source_first == 0:
source_first = 1
self.log.info("Start frame on `{0}`".format(source_first))
source_in = instance.data["sourceIn"]
handle_start = instance.data["handleStart"]
start_frame = (source_first + source_in) - handle_start
self.log.info("Start frame on `{0}`".format(source_in))
start_frame = source_first + source_in
instance.data["startingFrame"] = start_frame
self.log.info("Start frame on `{0}` set to `{1}`".format(

View file

@ -10,8 +10,6 @@ class ExtractAudioFile(pype.api.Extractor):
hosts = ["nukestudio"]
families = ["clip", "audio"]
match = api.Intersection
optional = True
active = False
def process(self, instance):
import os

View file

@ -2,6 +2,7 @@
import os
import json
import re
import copy
import pyblish.api
import tempfile
from avalon import io, api
@ -75,9 +76,11 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
)
data["source"] = data["sourcePath"]
# WARNING instance should not be created in Extractor!
# create new instance
instance = instance.context.create_instance(**data)
# TODO replace line below with `instance.data["resourcesDir"]`
# when instance is created during collection part
dst_dir = self.resource_destination_dir(instance)
# change paths in effects to files
@ -144,103 +147,109 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
return (v, dst)
def resource_destination_dir(self, instance):
anatomy = instance.context.data['anatomy']
self.create_destination_template(instance, anatomy)
# WARNING this is from `collect_instance_anatomy_data.py`
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
return os.path.join(
instance.data["assumedDestination"],
"resources"
)
def create_destination_template(self, instance, anatomy):
"""Create a filepath based on the current data available
Example template:
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
{subset}.{representation}
Args:
instance: the instance to publish
Returns:
file path (str)
"""
# get all the stuff from the database
subset_name = instance.data["subset"]
self.log.info(subset_name)
asset_name = instance.data["asset"]
project_name = api.Session["AVALON_PROJECT"]
a_template = anatomy.templates
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
project = io.find_one(
{
"type": "project",
"name": project_name
},
projection={"config": True, "data": True}
)
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
template = a_template['publish']['path']
# anatomy = instance.context.data['anatomy']
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
asset = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project["_id"]
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
if version_number is None:
version_number = 1
if latest_version is not None:
version_number += int(latest_version)
anatomy_data.update({
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number
})
assert asset, ("No asset found by the name '{}' "
"in project '{}'".format(asset_name, project_name))
silo = asset.get('silo')
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
subset = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset["_id"]
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = fps
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
instance.data["version"] = version_number
# WARNING this is from `collect_resources_path.py`
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one(
{
"type": "version",
"parent": subset["_id"]
},
sort=[("name", -1)]
)
anatomy_filled = anatomy.format(template_data)
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
if instance.data.get('version'):
version_number = int(instance.data.get('version'))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
padding = int(a_template['render']['padding'])
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
hierarchy = asset['data']['parents']
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
template_data = {"root": api.Session["AVALON_PROJECTS"],
"project": {"name": project_name,
"code": project['data']['code']},
"silo": silo,
"family": instance.data['family'],
"asset": asset_name,
"subset": subset_name,
"frame": ('#' * padding),
"version": version_number,
"hierarchy": hierarchy,
"representation": "TEMP"}
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
instance.data["template"] = template
# We take the parent folder of representation 'filepath'
instance.data["assumedDestination"] = os.path.dirname(
anatomy.format(template_data)["publish"]["path"]
)
return resources_folder