mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 00:44:52 +01:00
Merge remote-tracking branch 'origin/develop' into feature/PYPE-570-maya-renderlayer-creator
This commit is contained in:
commit
fc635c3ab0
11 changed files with 170 additions and 44 deletions
|
|
@ -9,7 +9,7 @@ class SeedDebugProject(BaseAction):
|
|||
#: Action identifier.
|
||||
identifier = "seed.debug.project"
|
||||
#: Action label.
|
||||
label = "SeedDebugProject"
|
||||
label = "Seed Debug Project"
|
||||
#: Action description.
|
||||
description = "Description"
|
||||
#: priority
|
||||
|
|
@ -265,6 +265,15 @@ class SeedDebugProject(BaseAction):
|
|||
def create_assets(self, project, asset_count):
|
||||
self.log.debug("*** Creating assets:")
|
||||
|
||||
try:
|
||||
asset_count = int(asset_count)
|
||||
except ValueError:
|
||||
asset_count = 0
|
||||
|
||||
if asset_count <= 0:
|
||||
self.log.debug("No assets to create")
|
||||
return
|
||||
|
||||
main_entity = self.session.create("Folder", {
|
||||
"name": "Assets",
|
||||
"parent": project
|
||||
|
|
@ -305,6 +314,31 @@ class SeedDebugProject(BaseAction):
|
|||
|
||||
def create_shots(self, project, seq_count, shots_count):
|
||||
self.log.debug("*** Creating shots:")
|
||||
|
||||
# Convert counts to integers
|
||||
try:
|
||||
seq_count = int(seq_count)
|
||||
except ValueError:
|
||||
seq_count = 0
|
||||
|
||||
try:
|
||||
shots_count = int(shots_count)
|
||||
except ValueError:
|
||||
shots_count = 0
|
||||
|
||||
# Check if both are higher than 0
|
||||
missing = []
|
||||
if seq_count <= 0:
|
||||
missing.append("sequences")
|
||||
|
||||
if shots_count <= 0:
|
||||
missing.append("shots")
|
||||
|
||||
if missing:
|
||||
self.log.debug("No {} to create".format(" and ".join(missing)))
|
||||
return
|
||||
|
||||
# Create Folder "Shots"
|
||||
main_entity = self.session.create("Folder", {
|
||||
"name": "Shots",
|
||||
"parent": project
|
||||
|
|
|
|||
|
|
@ -62,9 +62,12 @@ class VersionToTaskStatus(BaseEvent):
|
|||
|
||||
# Lower version status name and check if has mapping
|
||||
version_status = version_status_orig.lower()
|
||||
new_status_names = status_mapping.get(version_status)
|
||||
if not new_status_names:
|
||||
continue
|
||||
new_status_names = []
|
||||
mapped = status_mapping.get(version_status)
|
||||
if mapped:
|
||||
new_status_names.extend(list(mapped))
|
||||
|
||||
new_status_names.append(version_status)
|
||||
|
||||
self.log.debug(
|
||||
"Processing AssetVersion status change: [ {} ]".format(
|
||||
|
|
@ -72,10 +75,6 @@ class VersionToTaskStatus(BaseEvent):
|
|||
)
|
||||
)
|
||||
|
||||
# Backwards compatibility (convert string to list)
|
||||
if isinstance(new_status_names, str):
|
||||
new_status_names = [new_status_names]
|
||||
|
||||
# Lower all names from presets
|
||||
new_status_names = [name.lower() for name in new_status_names]
|
||||
|
||||
|
|
|
|||
33
pype/lib.py
33
pype/lib.py
|
|
@ -14,24 +14,35 @@ log = logging.getLogger(__name__)
|
|||
|
||||
|
||||
# Special naming case for subprocess since its a built-in method.
|
||||
def _subprocess(args):
|
||||
def _subprocess(*args, **kwargs):
|
||||
"""Convenience method for getting output errors for subprocess."""
|
||||
|
||||
# make sure environment contains only strings
|
||||
env = {k: str(v) for k, v in os.environ.items()}
|
||||
filtered_env = {k: str(v) for k, v in os.environ.items()}
|
||||
|
||||
proc = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdin=subprocess.PIPE,
|
||||
env=env
|
||||
)
|
||||
# set overrides
|
||||
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
|
||||
kwargs['stderr'] = kwargs.get('stderr', subprocess.STDOUT)
|
||||
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
|
||||
kwargs['env'] = kwargs.get('env',filtered_env)
|
||||
|
||||
output = proc.communicate()[0]
|
||||
proc = subprocess.Popen(*args, **kwargs)
|
||||
|
||||
output, error = proc.communicate()
|
||||
|
||||
if output:
|
||||
output = output.decode("utf-8")
|
||||
output += "\n"
|
||||
for line in output.strip().split("\n"):
|
||||
log.info(line)
|
||||
|
||||
if error:
|
||||
error = error.decode("utf-8")
|
||||
error += "\n"
|
||||
for line in error.strip().split("\n"):
|
||||
log.error(line)
|
||||
|
||||
if proc.returncode != 0:
|
||||
log.error(output)
|
||||
raise ValueError("\"{}\" was not successful: {}".format(args, output))
|
||||
return output
|
||||
|
||||
|
|
|
|||
|
|
@ -144,8 +144,11 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
"version": 0,
|
||||
"asset": asset_entity,
|
||||
}
|
||||
|
||||
assetversion_data.update(data.get("assetversion_data", {}))
|
||||
_assetversion_data = data.get("assetversion_data", {})
|
||||
assetversion_cust_attrs = _assetversion_data.pop(
|
||||
"custom_attributes", {}
|
||||
)
|
||||
assetversion_data.update(_assetversion_data)
|
||||
|
||||
assetversion_entity = session.query(
|
||||
self.query("AssetVersion", assetversion_data)
|
||||
|
|
@ -182,6 +185,18 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
existing_assetversion_metadata.update(assetversion_metadata)
|
||||
assetversion_entity["metadata"] = existing_assetversion_metadata
|
||||
|
||||
# Adding Custom Attributes
|
||||
for attr, val in assetversion_cust_attrs.items():
|
||||
if attr in assetversion_entity["custom_attributes"]:
|
||||
assetversion_entity["custom_attributes"][attr] = val
|
||||
continue
|
||||
|
||||
self.log.warning((
|
||||
"Custom Attrubute \"{0}\""
|
||||
" is not available for AssetVersion."
|
||||
" Can't set it's value to: \"{1}\""
|
||||
).format(attr, str(val)))
|
||||
|
||||
# Have to commit the version and asset, because location can't
|
||||
# determine the final location without.
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -125,6 +125,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
"thumbnail": comp['thumbnail']
|
||||
}
|
||||
|
||||
# Add custom attributes for AssetVersion
|
||||
assetversion_cust_attrs = {}
|
||||
intent_val = instance.context.data.get("intent")
|
||||
if intent_val:
|
||||
assetversion_cust_attrs["intent"] = intent_val
|
||||
|
||||
component_item["assetversion_data"]["custom_attributes"] = (
|
||||
assetversion_cust_attrs
|
||||
)
|
||||
|
||||
componentList.append(component_item)
|
||||
# Create copy with ftrack.unmanaged location if thumb or prev
|
||||
if comp.get('thumbnail') or comp.get('preview') \
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ class CollectTemplates(pyblish.api.InstancePlugin):
|
|||
"asset": asset_name,
|
||||
"subset": subset_name,
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy,
|
||||
"hierarchy": hierarchy.replace("\\", "/"),
|
||||
"representation": "TEMP"}
|
||||
|
||||
instance.data["template"] = template
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import json
|
||||
import copy
|
||||
|
||||
import pype.api
|
||||
import pyblish
|
||||
|
|
@ -32,6 +33,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
frame_start = int(instance.data.get("frameStart") or 0)
|
||||
frame_end = int(instance.data.get("frameEnd") or 1)
|
||||
duration = frame_end - frame_start + 1
|
||||
|
||||
prep_data = {
|
||||
"username": instance.context.data['user'],
|
||||
"asset": os.environ['AVALON_ASSET'],
|
||||
|
|
@ -39,8 +41,17 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
"frame_start": frame_start,
|
||||
"frame_end": frame_end,
|
||||
"duration": duration,
|
||||
"version": version
|
||||
"version": version,
|
||||
"comment": instance.context.data.get("comment"),
|
||||
"intent": instance.context.data.get("intent")
|
||||
}
|
||||
# Update data with template data
|
||||
template_data = instance.data.get("assumedTemplateData") or {}
|
||||
prep_data.update(template_data)
|
||||
|
||||
# get anatomy project
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
self.log.debug("__ prep_data: {}".format(prep_data))
|
||||
for i, repre in enumerate(instance.data["representations"]):
|
||||
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
|
||||
|
|
@ -62,11 +73,17 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
)
|
||||
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
|
||||
|
||||
# create copy of prep_data for anatomy formatting
|
||||
_prep_data = copy.deepcopy(prep_data)
|
||||
_prep_data["representation"] = repre["name"]
|
||||
_prep_data["anatomy"] = (
|
||||
anatomy.format_all(_prep_data).get("solved") or {}
|
||||
)
|
||||
burnin_data = {
|
||||
"input": full_movie_path.replace("\\", "/"),
|
||||
"codec": repre.get("codec", []),
|
||||
"output": full_burnin_path.replace("\\", "/"),
|
||||
"burnin_data": prep_data
|
||||
"burnin_data": _prep_data
|
||||
}
|
||||
|
||||
self.log.debug("__ burnin_data2: {}".format(burnin_data))
|
||||
|
|
|
|||
|
|
@ -86,11 +86,13 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
node.end()
|
||||
|
||||
family = avalon_knob_data["family"]
|
||||
families = avalon_knob_data.get("families")
|
||||
if families:
|
||||
families = [families]
|
||||
families = list()
|
||||
families_ak = avalon_knob_data.get("families")
|
||||
|
||||
if families_ak:
|
||||
families.append(families_ak)
|
||||
else:
|
||||
families = [family]
|
||||
families.append(family)
|
||||
|
||||
# Get format
|
||||
format = root['format'].value()
|
||||
|
|
@ -100,7 +102,7 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
if node.Class() not in "Read":
|
||||
if "render" not in node.knobs().keys():
|
||||
families.insert(0, family)
|
||||
pass
|
||||
elif node["render"].value():
|
||||
self.log.info("flagged for render")
|
||||
add_family = "render.local"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
import os
|
||||
import datetime
|
||||
import subprocess
|
||||
import json
|
||||
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
|
||||
from pypeapp.lib import config
|
||||
from pype import api as pype
|
||||
|
|
@ -9,6 +11,34 @@ from pype import api as pype
|
|||
log = pype.Logger().get_logger("BurninWrapper", "burninwrap")
|
||||
|
||||
|
||||
ffmpeg_path = os.environ.get("FFMPEG_PATH")
|
||||
if ffmpeg_path and os.path.exists(ffmpeg_path):
|
||||
# add separator "/" or "\" to be prepared for next part
|
||||
ffmpeg_path += os.path.sep
|
||||
else:
|
||||
ffmpeg_path = ""
|
||||
|
||||
FFMPEG = (
|
||||
'{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s'
|
||||
).format(os.path.normpath(ffmpeg_path + "ffmpeg"))
|
||||
FFPROBE = (
|
||||
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
|
||||
).format(os.path.normpath(ffmpeg_path + "ffprobe"))
|
||||
|
||||
|
||||
def _streams(source):
|
||||
"""Reimplemented from otio burnins to be able use full path to ffprobe
|
||||
:param str source: source media file
|
||||
:rtype: [{}, ...]
|
||||
"""
|
||||
command = FFPROBE % {'source': source}
|
||||
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
|
||||
out = proc.communicate()[0]
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError("Failed to run: %s" % command)
|
||||
return json.loads(out)['streams']
|
||||
|
||||
|
||||
class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
||||
'''
|
||||
This is modification of OTIO FFmpeg Burnin adapter.
|
||||
|
|
@ -61,6 +91,9 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
}
|
||||
|
||||
def __init__(self, source, streams=None, options_init=None):
|
||||
if not streams:
|
||||
streams = _streams(source)
|
||||
|
||||
super().__init__(source, streams)
|
||||
if options_init:
|
||||
self.options_init.update(options_init)
|
||||
|
|
@ -187,7 +220,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if self.filter_string:
|
||||
filters = '-vf "{}"'.format(self.filter_string)
|
||||
|
||||
return (ffmpeg_burnins.FFMPEG % {
|
||||
return (FFMPEG % {
|
||||
'input': self.source,
|
||||
'output': output,
|
||||
'args': '%s ' % args if args else '',
|
||||
|
|
@ -368,7 +401,7 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
|
|||
codec_args = ''
|
||||
if codec_data is not []:
|
||||
codec_args = " ".join(codec_data)
|
||||
|
||||
|
||||
burnin.render(output_path, args=codec_args, overwrite=overwrite)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,16 @@ import os
|
|||
import logging
|
||||
import subprocess
|
||||
import platform
|
||||
from shutil import which
|
||||
try:
|
||||
from shutil import which
|
||||
except ImportError:
|
||||
# we are in python < 3.3
|
||||
def which(command):
|
||||
path = os.getenv('PATH')
|
||||
for p in path.split(os.path.pathsep):
|
||||
p = os.path.join(p, command)
|
||||
if os.path.exists(p) and os.access(p, os.X_OK):
|
||||
return p
|
||||
|
||||
handler = logging.basicConfig()
|
||||
log = logging.getLogger("Publish Image Sequences")
|
||||
|
|
|
|||
|
|
@ -299,14 +299,15 @@ class ComponentItem(QtWidgets.QFrame):
|
|||
class LightingButton(QtWidgets.QPushButton):
|
||||
lightingbtnstyle = """
|
||||
QPushButton {
|
||||
font: %(font_size_pt)spt;
|
||||
text-align: center;
|
||||
color: #777777;
|
||||
background-color: transparent;
|
||||
border-width: 1px;
|
||||
border-color: #777777;
|
||||
border-style: solid;
|
||||
padding-top: 2px;
|
||||
padding-bottom: 2px;
|
||||
padding-top: 0px;
|
||||
padding-bottom: 0px;
|
||||
padding-left: 3px;
|
||||
padding-right: 3px;
|
||||
border-radius: 3px;
|
||||
|
|
@ -343,18 +344,13 @@ class LightingButton(QtWidgets.QPushButton):
|
|||
}
|
||||
"""
|
||||
|
||||
def __init__(self, text, *args, **kwargs):
|
||||
super().__init__(text, *args, **kwargs)
|
||||
self.setStyleSheet(self.lightingbtnstyle)
|
||||
|
||||
def __init__(self, text, font_size_pt=8, *args, **kwargs):
|
||||
super(LightingButton, self).__init__(text, *args, **kwargs)
|
||||
self.setStyleSheet(self.lightingbtnstyle % {
|
||||
"font_size_pt": font_size_pt
|
||||
})
|
||||
self.setCheckable(True)
|
||||
|
||||
preview_font_metrics = self.fontMetrics().boundingRect(text)
|
||||
width = preview_font_metrics.width() + 16
|
||||
height = preview_font_metrics.height() + 5
|
||||
self.setMaximumWidth(width)
|
||||
self.setMaximumHeight(height)
|
||||
|
||||
|
||||
class PngFactory:
|
||||
png_names = {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue