Merge branch 'develop' into feature/PYPE-653_master_version

This commit is contained in:
iLLiCiTiT 2020-02-24 16:43:28 +01:00
commit c4d766b60b
7 changed files with 203 additions and 20 deletions

View file

@ -35,7 +35,17 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
# Find project entity
project_query = 'Project where full_name is "{0}"'.format(project_name)
self.log.debug("Project query: < {0} >".format(project_query))
project_entity = session.query(project_query).one()
project_entity = list(session.query(project_query).all())
if len(project_entity) == 0:
raise AssertionError(
"Project \"{0}\" not found in Ftrack.".format(project_name)
)
# QUESTION Is possible to happen?
elif len(project_entity) > 1:
raise AssertionError((
"Found more than one project with name \"{0}\" in Ftrack."
).format(project_name))
self.log.debug("Project found: {0}".format(project_entity))
# Find asset entity
@ -44,7 +54,25 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
' and name is "{1}"'
).format(project_entity["id"], asset_name)
self.log.debug("Asset entity query: < {0} >".format(entity_query))
asset_entity = session.query(entity_query).one()
asset_entities = []
for entity in session.query(entity_query).all():
# Skip tasks
if entity.entity_type.lower() != "task":
asset_entities.append(entity)
if len(asset_entities) == 0:
raise AssertionError((
"Entity with name \"{0}\" not found"
" in Ftrack project \"{1}\"."
).format(asset_name, project_name))
elif len(asset_entities) > 1:
raise AssertionError((
"Found more than one entity with name \"{0}\""
" in Ftrack project \"{1}\"."
).format(asset_name, project_name))
asset_entity = asset_entities[0]
self.log.debug("Asset found: {0}".format(asset_entity))
# Find task entity if task is set
@ -53,8 +81,15 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
'Task where name is "{0}" and parent_id is "{1}"'
).format(task_name, asset_entity["id"])
self.log.debug("Task entity query: < {0} >".format(task_query))
task_entity = session.query(task_query).one()
self.log.debug("Task entity found: {0}".format(task_entity))
task_entity = session.query(task_query).first()
if not task_entity:
self.log.warning(
"Task entity with name \"{0}\" was not found.".format(
task_name
)
)
else:
self.log.debug("Task entity found: {0}".format(task_entity))
else:
task_entity = None

View file

@ -19,6 +19,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin):
label = "Collect Avalon Entities"
def process(self, context):
io.install()
project_name = api.Session["AVALON_PROJECT"]
asset_name = api.Session["AVALON_ASSET"]

View file

@ -256,10 +256,16 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
)
ext = collection.tail.lstrip(".")
detected_start = min(collection.indexes)
detected_end = max(collection.indexes)
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": frame_start,
"detectedStart": detected_start,
"detectedEnd": detected_end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
@ -323,12 +329,17 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
if "slate" in instance.data["families"]:
frame_start += 1
detected_start = min(collection.indexes)
detected_end = max(collection.indexes)
representation = {
"name": ext,
"ext": "{}".format(ext),
"files": list(collection),
"frameStart": frame_start,
"frameEnd": frame_end,
"detectedStart": detected_start,
"detectedEnd": detected_end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,
@ -394,6 +405,9 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
if "review" not in families:
families.append("review")
detected_start = min(collection.indexes)
detected_end = max(collection.indexes)
instance.data.update(
{
"name": str(collection),
@ -428,6 +442,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"files": list(collection),
"frameStart": start,
"frameEnd": end,
"detectedStart": detected_start,
"detectedEnd": detected_end,
"stagingDir": root,
"anatomy_template": "render",
"fps": fps,

View file

@ -149,6 +149,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
if start_frame != repre.get("detectedStart", start_frame):
start_frame = repre.get("detectedStart")
input_args.append(
"-start_number {0} -framerate {1}".format(
start_frame, fps))

View file

@ -111,15 +111,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
project_entity = instance.data["projectEntity"]
context_asset_name = context.data["assetEntity"]["name"]
asset_name = instance.data["asset"]
asset_entity = instance.data.get("assetEntity")
if not asset_entity:
if not asset_entity or asset_entity["name"] != context_asset_name:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
assert asset_entity, (
"No asset found by the name \"{0}\" in project \"{1}\""
).format(asset_name, project_entity["name"])

View file

@ -0,0 +1,97 @@
import os
import types
import maya.cmds as cmds
import pyblish.api
import pype.api
import pype.maya.action
class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
"""Ensure exporting ass file has set relative texture paths"""
order = pype.api.ValidateContentsOrder
hosts = ['maya']
families = ['ass']
label = "ASS has relative texture paths"
actions = [pype.api.RepairAction]
def process(self, instance):
# we cannot ask this until user open render settings as
# `defaultArnoldRenderOptions` doesn't exists
try:
relative_texture = cmds.getAttr(
"defaultArnoldRenderOptions.absolute_texture_paths")
relative_procedural = cmds.getAttr(
"defaultArnoldRenderOptions.absolute_procedural_paths")
texture_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.tspath"
)
procedural_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.pspath"
)
except ValueError:
assert False, ("Can not validate, render setting were not opened "
"yet so Arnold setting cannot be validate")
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
scene_name, _ = os.path.splitext(scene_basename)
project_root = "{}{}{}".format(
os.environ.get("AVALON_PROJECTS"),
os.path.sep,
os.environ.get("AVALON_PROJECT")
)
assert self.maya_is_true(relative_texture) is not True, \
("Texture path is set to be absolute")
assert self.maya_is_true(relative_procedural) is not True, \
("Procedural path is set to be absolute")
texture_search_path = texture_search_path.replace("\\", "/")
procedural_search_path = procedural_search_path.replace("\\", "/")
project_root = project_root.replace("\\", "/")
assert project_root in texture_search_path, \
("Project root is not in texture_search_path")
assert project_root in procedural_search_path, \
("Project root is not in procedural_search_path")
@classmethod
def repair(cls, instance):
texture_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.tspath"
)
procedural_search_path = cmds.getAttr(
"defaultArnoldRenderOptions.pspath"
)
project_root = "{}{}{}".format(
os.environ.get("AVALON_PROJECTS"),
os.path.sep,
os.environ.get("AVALON_PROJECT"),
).replace("\\", "/")
cmds.setAttr("defaultArnoldRenderOptions.tspath",
project_root + os.pathsep + texture_search_path,
type="string")
cmds.setAttr("defaultArnoldRenderOptions.pspath",
project_root + os.pathsep + procedural_search_path,
type="string")
cmds.setAttr("defaultArnoldRenderOptions.absolute_procedural_paths",
False)
cmds.setAttr("defaultArnoldRenderOptions.absolute_texture_paths",
False)
def maya_is_true(self, attr_val):
"""
Whether a Maya attr evaluates to True.
When querying an attribute value from an ambiguous object the
Maya API will return a list of values, which need to be properly
handled to evaluate properly.
"""
if isinstance(attr_val, types.BooleanType):
return attr_val
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
return any(attr_val)
else:
return bool(attr_val)

View file

@ -1,17 +1,14 @@
import os
import sys
import re
import datetime
import subprocess
import json
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
from pypeapp.lib import config
from pype import api as pype
from subprocess import Popen, PIPE
# FFmpeg in PATH is required
from pypeapp import Logger
log = pype.Logger().get_logger("BurninWrapper", "burninwrap")
log = Logger().get_logger("BurninWrapper", "burninwrap")
ffmpeg_path = os.environ.get("FFMPEG_PATH")
@ -41,6 +38,7 @@ TIMECODE = (
MISSING_KEY_VALUE = "N/A"
CURRENT_FRAME_KEY = "{current_frame}"
CURRENT_FRAME_SPLITTER = "_-_CURRENT_FRAME_-_"
TIME_CODE_KEY = "{timecode}"
@ -136,7 +134,9 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if options_init:
self.options_init.update(options_init)
def add_text(self, text, align, frame_start=None, options=None):
def add_text(
self, text, align, frame_start=None, frame_end=None, options=None
):
"""
Adding static text to a filter.
@ -152,11 +152,15 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if frame_start:
options["frame_offset"] = frame_start
# `frame_end` is only for meassurements of text position
if frame_end:
options["frame_end"] = frame_end
self._add_burnin(text, align, options, DRAWTEXT)
def add_timecode(
self, align, frame_start=None, frame_start_tc=None, text=None,
options=None
self, align, frame_start=None, frame_end=None, frame_start_tc=None,
text=None, options=None
):
"""
Convenience method to create the frame number expression.
@ -174,6 +178,10 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if frame_start:
options["frame_offset"] = frame_start
# `frame_end` is only for meassurements of text position
if frame_end:
options["frame_end"] = frame_end
if not frame_start_tc:
frame_start_tc = options["frame_offset"]
@ -197,10 +205,31 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
:param enum align: alignment, must use provided enum flags
:param dict options:
"""
final_text = text
text_for_size = text
if CURRENT_FRAME_SPLITTER in text:
frame_start = options["frame_offset"]
frame_end = options.get("frame_end", frame_start)
if not frame_start:
replacement_final = replacement_size = str(MISSING_KEY_VALUE)
else:
replacement_final = "\\'{}\\'".format(
r'%%{eif\:n+%d\:d}' % frame_start
)
replacement_size = str(frame_end)
final_text = final_text.replace(
CURRENT_FRAME_SPLITTER, replacement_final
)
text_for_size = text_for_size.replace(
CURRENT_FRAME_SPLITTER, replacement_size
)
resolution = self.resolution
data = {
'text': (
text
final_text
.replace(",", r"\,")
.replace(':', r'\:')
),
@ -208,7 +237,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
'size': options['font_size']
}
timecode_text = options.get("timecode") or ""
text_for_size = text + timecode_text
text_for_size += timecode_text
data.update(options)
data.update(
ffmpeg_burnins._drawtext(align, resolution, text_for_size, options)
@ -272,7 +301,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
)
print(command)
proc = Popen(command, shell=True)
proc = subprocess.Popen(command, shell=True)
proc.communicate()
if proc.returncode != 0:
raise RuntimeError("Failed to render '%s': %s'"
@ -368,6 +397,7 @@ def burnins_from_data(
burnin = ModifiedBurnins(input_path, options_init=options_init)
frame_start = data.get("frame_start")
frame_end = data.get("frame_end")
frame_start_tc = data.get('frame_start_tc', frame_start)
stream = burnin._streams[0]
@ -382,7 +412,7 @@ def burnins_from_data(
# Check frame start and add expression if is available
if frame_start is not None:
data[CURRENT_FRAME_KEY[1:-1]] = r'%%{eif\:n+%d\:d}' % frame_start
data[CURRENT_FRAME_KEY[1:-1]] = CURRENT_FRAME_SPLITTER
if frame_start_tc is not None:
data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY
@ -432,7 +462,7 @@ def burnins_from_data(
# Handle timecode differently
if has_timecode:
args = [align, frame_start, frame_start_tc]
args = [align, frame_start, frame_end, frame_start_tc]
if not value.startswith(TIME_CODE_KEY):
value_items = value.split(TIME_CODE_KEY)
text = value_items[0].format(**data)
@ -442,7 +472,7 @@ def burnins_from_data(
continue
text = value.format(**data)
burnin.add_text(text, align, frame_start)
burnin.add_text(text, align, frame_start, frame_end)
codec_args = ""
if codec_data: