mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into feature/PYPE-81-nuke-write-render-workflow
# Conflicts: # pype/plugins/nuke/create/create_write.py # pype/templates.py
This commit is contained in:
commit
d10e14c6db
45 changed files with 996 additions and 917 deletions
|
|
@ -1,13 +1,14 @@
|
|||
from pype.ftrack import BaseAction
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import subprocess
|
||||
from pype.vendor import ftrack_api
|
||||
import logging
|
||||
import operator
|
||||
import re
|
||||
import traceback
|
||||
import json
|
||||
|
||||
from pypeapp import Logger, config
|
||||
from pype.ftrack import BaseAction
|
||||
from pype.vendor import ftrack_api
|
||||
from avalon import io, api
|
||||
|
||||
log = Logger().get_logger(__name__)
|
||||
|
||||
|
|
@ -53,14 +54,7 @@ class RVAction(BaseAction):
|
|||
|
||||
def discover(self, session, entities, event):
|
||||
"""Return available actions based on *event*. """
|
||||
selection = event["data"].get("selection", [])
|
||||
if len(selection) != 1:
|
||||
return False
|
||||
|
||||
entityType = selection[0].get("entityType", None)
|
||||
if entityType in ["assetversion", "task"]:
|
||||
return True
|
||||
return False
|
||||
return True
|
||||
|
||||
def set_rv_path(self):
|
||||
self.rv_path = self.config_data.get("rv_path")
|
||||
|
|
@ -72,144 +66,265 @@ class RVAction(BaseAction):
|
|||
)
|
||||
super().register()
|
||||
|
||||
def get_components_from_entity(self, session, entity, components):
|
||||
"""Get components from various entity types.
|
||||
|
||||
The components dictionary is modifid in place, so nothing is returned.
|
||||
|
||||
Args:
|
||||
entity (Ftrack entity)
|
||||
components (dict)
|
||||
"""
|
||||
|
||||
if entity.entity_type.lower() == "assetversion":
|
||||
for component in entity["components"]:
|
||||
if component["file_type"][1:] not in self.allowed_types:
|
||||
continue
|
||||
|
||||
try:
|
||||
components[entity["asset"]["parent"]["name"]].append(
|
||||
component
|
||||
)
|
||||
except KeyError:
|
||||
components[entity["asset"]["parent"]["name"]] = [component]
|
||||
|
||||
return
|
||||
|
||||
if entity.entity_type.lower() == "task":
|
||||
query = "AssetVersion where task_id is '{0}'".format(entity["id"])
|
||||
for assetversion in session.query(query):
|
||||
self.get_components_from_entity(
|
||||
session, assetversion, components
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
if entity.entity_type.lower() == "shot":
|
||||
query = "AssetVersion where asset.parent.id is '{0}'".format(
|
||||
entity["id"]
|
||||
)
|
||||
for assetversion in session.query(query):
|
||||
self.get_components_from_entity(
|
||||
session, assetversion, components
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
raise NotImplementedError(
|
||||
"\"{}\" entity type is not implemented yet.".format(
|
||||
entity.entity_type
|
||||
)
|
||||
)
|
||||
|
||||
def interface(self, session, entities, event):
|
||||
if event['data'].get('values', {}):
|
||||
return
|
||||
|
||||
entity = entities[0]
|
||||
versions = []
|
||||
|
||||
entity_type = entity.entity_type.lower()
|
||||
if entity_type == "assetversion":
|
||||
if (
|
||||
entity[
|
||||
'components'
|
||||
][0]['file_type'][1:] in self.allowed_types
|
||||
):
|
||||
versions.append(entity)
|
||||
else:
|
||||
master_entity = entity
|
||||
if entity_type == "task":
|
||||
master_entity = entity['parent']
|
||||
|
||||
for asset in master_entity['assets']:
|
||||
for version in asset['versions']:
|
||||
# Get only AssetVersion of selected task
|
||||
if (
|
||||
entity_type == "task" and
|
||||
version['task']['id'] != entity['id']
|
||||
):
|
||||
continue
|
||||
# Get only components with allowed type
|
||||
filetype = version['components'][0]['file_type']
|
||||
if filetype[1:] in self.allowed_types:
|
||||
versions.append(version)
|
||||
|
||||
if len(versions) < 1:
|
||||
return {
|
||||
'success': False,
|
||||
'message': 'There are no Asset Versions to open.'
|
||||
user = session.query(
|
||||
"User where username is '{0}'".format(
|
||||
os.environ["FTRACK_API_USER"]
|
||||
)
|
||||
).one()
|
||||
job = session.create(
|
||||
"Job",
|
||||
{
|
||||
"user": user,
|
||||
"status": "running",
|
||||
"data": json.dumps({
|
||||
"description": "RV: Collecting components."
|
||||
})
|
||||
}
|
||||
)
|
||||
# Commit to feedback to user.
|
||||
session.commit()
|
||||
|
||||
items = []
|
||||
base_label = "v{0} - {1} - {2}"
|
||||
default_component = self.config_data.get(
|
||||
'default_component', None
|
||||
)
|
||||
last_available = None
|
||||
select_value = None
|
||||
for version in versions:
|
||||
for component in version['components']:
|
||||
label = base_label.format(
|
||||
str(version['version']).zfill(3),
|
||||
version['asset']['type']['name'],
|
||||
component['name']
|
||||
)
|
||||
|
||||
try:
|
||||
location = component[
|
||||
'component_locations'
|
||||
][0]['location']
|
||||
file_path = location.get_filesystem_path(component)
|
||||
except Exception:
|
||||
file_path = component[
|
||||
'component_locations'
|
||||
][0]['resource_identifier']
|
||||
|
||||
if os.path.isdir(os.path.dirname(file_path)):
|
||||
last_available = file_path
|
||||
if component['name'] == default_component:
|
||||
select_value = file_path
|
||||
items.append(
|
||||
{'label': label, 'value': file_path}
|
||||
)
|
||||
|
||||
if len(items) == 0:
|
||||
return {
|
||||
'success': False,
|
||||
'message': (
|
||||
'There are no Asset Versions with accessible path.'
|
||||
)
|
||||
}
|
||||
|
||||
item = {
|
||||
'label': 'Items to view',
|
||||
'type': 'enumerator',
|
||||
'name': 'path',
|
||||
'data': sorted(
|
||||
items,
|
||||
key=operator.itemgetter('label'),
|
||||
reverse=True
|
||||
)
|
||||
}
|
||||
if select_value is not None:
|
||||
item['value'] = select_value
|
||||
try:
|
||||
items = self.get_interface_items(session, entities)
|
||||
except Exception:
|
||||
log.error(traceback.format_exc())
|
||||
job["status"] = "failed"
|
||||
else:
|
||||
item['value'] = last_available
|
||||
job["status"] = "done"
|
||||
|
||||
return {'items': [item]}
|
||||
# Commit to end job.
|
||||
session.commit()
|
||||
|
||||
return {"items": items}
|
||||
|
||||
def get_interface_items(self, session, entities):
|
||||
|
||||
components = {}
|
||||
for entity in entities:
|
||||
self.get_components_from_entity(session, entity, components)
|
||||
|
||||
# Sort by version
|
||||
for parent_name, entities in components.items():
|
||||
version_mapping = {}
|
||||
for entity in entities:
|
||||
try:
|
||||
version_mapping[entity["version"]["version"]].append(
|
||||
entity
|
||||
)
|
||||
except KeyError:
|
||||
version_mapping[entity["version"]["version"]] = [entity]
|
||||
|
||||
# Sort same versions by date.
|
||||
for version, entities in version_mapping.items():
|
||||
version_mapping[version] = sorted(
|
||||
entities, key=lambda x: x["version"]["date"], reverse=True
|
||||
)
|
||||
|
||||
components[parent_name] = []
|
||||
for version in reversed(sorted(version_mapping.keys())):
|
||||
components[parent_name].extend(version_mapping[version])
|
||||
|
||||
# Items to present to user.
|
||||
items = []
|
||||
label = "{} - v{} - {}"
|
||||
for parent_name, entities in components.items():
|
||||
data = []
|
||||
for entity in entities:
|
||||
data.append(
|
||||
{
|
||||
"label": label.format(
|
||||
entity["version"]["asset"]["name"],
|
||||
str(entity["version"]["version"]).zfill(3),
|
||||
entity["file_type"][1:]
|
||||
),
|
||||
"value": entity["id"]
|
||||
}
|
||||
)
|
||||
|
||||
items.append(
|
||||
{
|
||||
"label": parent_name,
|
||||
"type": "enumerator",
|
||||
"name": parent_name,
|
||||
"data": data,
|
||||
"value": data[0]["value"]
|
||||
}
|
||||
)
|
||||
|
||||
return items
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
"""Callback method for RV action."""
|
||||
# Launching application
|
||||
if "values" not in event["data"]:
|
||||
return
|
||||
filename = event['data']['values']['path']
|
||||
|
||||
fps = entities[0].get('custom_attributes', {}).get('fps', None)
|
||||
|
||||
cmd = []
|
||||
# change frame number to padding string for RV to play sequence
|
||||
try:
|
||||
frame = re.findall(r'(\d+).', filename)[-1]
|
||||
except KeyError:
|
||||
# we didn't detected frame number
|
||||
pass
|
||||
else:
|
||||
padding = '#' * len(frame)
|
||||
pos = filename.rfind(frame)
|
||||
filename = filename[:pos] + padding + filename[
|
||||
filename.rfind('.'):]
|
||||
|
||||
# RV path
|
||||
cmd.append(os.path.normpath(self.rv_path))
|
||||
if fps is not None:
|
||||
cmd.append("-fps {}".format(int(fps)))
|
||||
cmd.append(os.path.normpath(filename))
|
||||
log.info('Running rv: {}'.format(' '.join(cmd)))
|
||||
try:
|
||||
# Run RV with these commands
|
||||
subprocess.Popen(' '.join(cmd), shell=True)
|
||||
except Exception as e:
|
||||
return {
|
||||
'success': False,
|
||||
'message': 'File "{}" was not found.'.format(
|
||||
e
|
||||
)
|
||||
user = session.query(
|
||||
"User where username is '{0}'".format(
|
||||
os.environ["FTRACK_API_USER"]
|
||||
)
|
||||
).one()
|
||||
job = session.create(
|
||||
"Job",
|
||||
{
|
||||
"user": user,
|
||||
"status": "running",
|
||||
"data": json.dumps({
|
||||
"description": "RV: Collecting file paths."
|
||||
})
|
||||
}
|
||||
)
|
||||
# Commit to feedback to user.
|
||||
session.commit()
|
||||
|
||||
paths = []
|
||||
try:
|
||||
paths = self.get_file_paths(session, event)
|
||||
except Exception:
|
||||
log.error(traceback.format_exc())
|
||||
job["status"] = "failed"
|
||||
else:
|
||||
job["status"] = "done"
|
||||
|
||||
# Commit to end job.
|
||||
session.commit()
|
||||
|
||||
args = [os.path.normpath(self.rv_path)]
|
||||
|
||||
fps = entities[0].get("custom_attributes", {}).get("fps", None)
|
||||
if fps is not None:
|
||||
args.extend(["-fps", str(fps)])
|
||||
|
||||
args.extend(paths)
|
||||
|
||||
log.info("Running rv: {}".format(args))
|
||||
|
||||
subprocess.Popen(args)
|
||||
|
||||
return True
|
||||
|
||||
def get_file_paths(self, session, event):
|
||||
"""Get file paths from selected components."""
|
||||
|
||||
link = session.get(
|
||||
"Component", list(event["data"]["values"].values())[0]
|
||||
)["version"]["asset"]["parent"]["link"][0]
|
||||
project = session.get(link["type"], link["id"])
|
||||
os.environ["AVALON_PROJECT"] = project["name"]
|
||||
api.Session["AVALON_PROJECT"] = project["name"]
|
||||
io.install()
|
||||
|
||||
location = ftrack_api.Session().pick_location()
|
||||
|
||||
paths = []
|
||||
for parent_name in sorted(event["data"]["values"].keys()):
|
||||
component = session.get(
|
||||
"Component", event["data"]["values"][parent_name]
|
||||
)
|
||||
|
||||
# Newer publishes have the source referenced in Ftrack.
|
||||
online_source = False
|
||||
for neighbour_component in component["version"]["components"]:
|
||||
if neighbour_component["name"] != "ftrackreview-mp4_src":
|
||||
continue
|
||||
|
||||
paths.append(
|
||||
location.get_filesystem_path(neighbour_component)
|
||||
)
|
||||
online_source = True
|
||||
|
||||
if online_source:
|
||||
continue
|
||||
|
||||
asset = io.find_one({"type": "asset", "name": parent_name})
|
||||
subset = io.find_one(
|
||||
{
|
||||
"type": "subset",
|
||||
"name": component["version"]["asset"]["name"],
|
||||
"parent": asset["_id"]
|
||||
}
|
||||
)
|
||||
version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"name": component["version"]["version"],
|
||||
"parent": subset["_id"]
|
||||
}
|
||||
)
|
||||
representation = io.find_one(
|
||||
{
|
||||
"type": "representation",
|
||||
"parent": version["_id"],
|
||||
"name": component["file_type"][1:]
|
||||
}
|
||||
)
|
||||
if representation is None:
|
||||
representation = io.find_one(
|
||||
{
|
||||
"type": "representation",
|
||||
"parent": version["_id"],
|
||||
"name": "preview"
|
||||
}
|
||||
)
|
||||
paths.append(api.get_representation_path(representation))
|
||||
|
||||
return paths
|
||||
|
||||
|
||||
def register(session):
|
||||
"""Register hooks."""
|
||||
|
|
@ -257,249 +372,3 @@ def main(arguments=None):
|
|||
|
||||
if __name__ == '__main__':
|
||||
raise SystemExit(main(sys.argv[1:]))
|
||||
|
||||
"""
|
||||
Usage: RV movie and image sequence viewer
|
||||
|
||||
One File: rv foo.jpg
|
||||
This Directory: rv .
|
||||
Other Directory: rv /path/to/dir
|
||||
Image Sequence w/Audio: rv [ in.#.tif in.wav ]
|
||||
Stereo w/Audio: rv [ left.#.tif right.#.tif in.wav ]
|
||||
Stereo Movies: rv [ left.mov right.mov ]
|
||||
Stereo Movie (from rvio): rv stereo.mov
|
||||
Cuts Sequenced: rv cut1.mov cut2.#.exr cut3.mov
|
||||
Stereo Cuts Sequenced: rv [ l1.mov r1.mov ] [ l2.mov r2.mov ]
|
||||
Forced Anamorphic: rv [ -pa 2.0 fullaperture.#.dpx ]
|
||||
Compare: rv -wipe a.exr b.exr
|
||||
Difference: rv -diff a.exr b.exr
|
||||
Slap Comp Over: rv -over a.exr b.exr
|
||||
Tile Images: rv -tile *.jpg
|
||||
Cache + Play Movie: rv -l -play foo.mov
|
||||
Cache Images to Examine: rv -c big.#.exr
|
||||
Fullscreen on 2nd monitor: rv -fullscreen -screen 1
|
||||
Select Source View: rv [ in.exr -select view right ]
|
||||
Select Source Layer: rv [ in.exr -select layer light1.diffuse ]
|
||||
(single-view source)
|
||||
Select Source Layer: rv [ in.exr -select layer left,light1.diffuse ]
|
||||
(multi-view source)
|
||||
Select Source Channel: rv [ in.exr -select channel R ]
|
||||
(single-view, single-layer source)
|
||||
Select Source Channel: rv [ in.exr -select channel left,Diffuse,R ]
|
||||
(multi-view, multi-layer source)
|
||||
|
||||
Image Sequence Numbering
|
||||
|
||||
Frames 1 to 100 no padding: image.1-100@.jpg
|
||||
Frames 1 to 100 padding 4: image.1-100#.jpg -or- image.1-100@@@@.jpg
|
||||
Frames 1 to 100 padding 5: image.1-100@@@@@.jpg
|
||||
Frames -100 to -200 padding 4: image.-100--200#jpg
|
||||
printf style padding 4: image.%04d.jpg
|
||||
printf style w/range: image.%04d.jpg 1-100
|
||||
printf no padding w/range: image.%d.jpg 1-100
|
||||
Complicated no pad 1 to 100: image_887f1-100@_982.tif
|
||||
Stereo pair (left,right): image.#.%V.tif
|
||||
Stereo pair (L,R): image.#.%v.tif
|
||||
All Frames, padding 4: image.#.jpg
|
||||
All Frames in Sequence: image.*.jpg
|
||||
All Frames in Directory: /path/to/directory
|
||||
All Frames in current dir: .
|
||||
|
||||
Per-source arguments (inside [ and ] restricts to that source only)
|
||||
|
||||
-pa %f Per-source pixel aspect ratio
|
||||
-ro %d Per-source range offset
|
||||
-rs %d Per-source range start
|
||||
-fps %f Per-source or global fps
|
||||
-ao %f Per-source audio offset in seconds
|
||||
-so %f Per-source stereo relative eye offset
|
||||
-rso %f Per-source stereo right eye offset
|
||||
-volume %f Per-source or global audio volume (default=1)
|
||||
-fcdl %S Per-source file CDL
|
||||
-lcdl %S Per-source look CDL
|
||||
-flut %S Per-source file LUT
|
||||
-llut %S Per-source look LUT
|
||||
-pclut %S Per-source pre-cache software LUT
|
||||
-cmap %S Per-source channel mapping
|
||||
(channel names, separated by ',')
|
||||
-select %S %S Per-source view/layer/channel selection
|
||||
-crop %d %d %d %d Per-source crop (xmin, ymin, xmax, ymax)
|
||||
-uncrop %d %d %d %d Per-source uncrop (width, height, xoffset, yoffset)
|
||||
-in %d Per-source cut-in frame
|
||||
-out %d Per-source cut-out frame
|
||||
-noMovieAudio Disable source movie's baked-in audio
|
||||
-inparams ... Source specific input parameters
|
||||
|
||||
... Input sequence patterns, images, movies, or directories
|
||||
-c Use region frame cache
|
||||
-l Use look-ahead cache
|
||||
-nc Use no caching
|
||||
-s %f Image scale reduction
|
||||
-ns Nuke style sequence notation
|
||||
(deprecated and ignored -- no longer needed)
|
||||
-noRanges No separate frame ranges
|
||||
(i.e. 1-10 will be considered a file)
|
||||
-sessionType %S Session type (sequence, stack) (deprecated, use -view)
|
||||
-stereo %S Stereo mode
|
||||
(hardware, checker, scanline, anaglyph, lumanaglyph,
|
||||
left, right, pair, mirror, hsqueezed, vsqueezed)
|
||||
-stereoSwap %d Swap left and right eyes stereo display
|
||||
(0 == no, 1 == yes, default=0)
|
||||
-vsync %d Video Sync (1 = on, 0 = off, default = 1)
|
||||
-comp %S Composite mode
|
||||
(over, add, difference, replace, topmost)
|
||||
-layout %S Layout mode (packed, row, column, manual)
|
||||
-over Same as -comp over -view defaultStack
|
||||
-diff Same as -comp difference -view defaultStack
|
||||
-replace Same as -comp replace -view defaultStack
|
||||
-topmost Same as -comp topmost -view defaultStack
|
||||
-layer Same as -comp topmost -view defaultStack, with strict
|
||||
frame ranges
|
||||
-tile Same as -layout packed -view defaultLayout
|
||||
-wipe Same as -over with wipes enabled
|
||||
-view %S Start with a particular view
|
||||
-noSequence Don't contract files into sequences
|
||||
-inferSequence Infer sequences from one file
|
||||
-autoRetime %d Automatically retime conflicting media fps in
|
||||
sequences and stacks (1 = on, 0 = off, default = 1)
|
||||
-rthreads %d Number of reader threads (default=1)
|
||||
-fullscreen Start in fullscreen mode
|
||||
-present Start in presentation mode (using presentation device)
|
||||
-presentAudio %d Use presentation audio device in presentation mode
|
||||
(1 = on, 0 = off)
|
||||
-presentDevice %S Presentation mode device
|
||||
-presentVideoFormat %S Presentation mode override video format
|
||||
(device specific)
|
||||
-presentDataFormat %S Presentation mode override data format
|
||||
(device specific)
|
||||
-screen %d Start on screen (0, 1, 2, ...)
|
||||
-noBorders No window manager decorations
|
||||
-geometry %d %d [%d %d] Start geometry X, Y, W, H
|
||||
-fitMedia Fit the window to the first media shown
|
||||
-init %S Override init script
|
||||
-nofloat Turn off floating point by default
|
||||
-maxbits %d Maximum default bit depth (default=32)
|
||||
-gamma %f Set display gamma (default=1)
|
||||
-sRGB Display using linear -> sRGB conversion
|
||||
-rec709 Display using linear -> Rec 709 conversion
|
||||
-dlut %S Apply display LUT
|
||||
-brightness %f Set display relative brightness in stops (default=0)
|
||||
-resampleMethod %S Resampling method
|
||||
(area, linear, cubic, nearest, default=area)
|
||||
-eval %S Evaluate Mu expression at every session start
|
||||
-pyeval %S Evaluate Python expression at every session start
|
||||
-nomb Hide menu bar on start up
|
||||
-play Play on startup
|
||||
-playMode %d Playback mode (0=Context dependent, 1=Play all frames,
|
||||
2=Realtime, default=0)
|
||||
-loopMode %d Playback loop mode
|
||||
(0=Loop, 1=Play Once, 2=Ping-Pong, default=0)
|
||||
-cli Mu command line interface
|
||||
-vram %f VRAM usage limit in Mb, default = 64.000000
|
||||
-cram %f Max region cache RAM usage in Gb,
|
||||
(6.4Gb available, default 1Gb)
|
||||
-lram %f Max look-ahead cache RAM usage in Gb,
|
||||
(6.4Gb available, default 0.2Gb)
|
||||
-noPBO Prevent use of GL PBOs for pixel transfer
|
||||
-prefetch Prefetch images for rendering
|
||||
-useAppleClientStorage Use APPLE_client_storage extension
|
||||
-useThreadedUpload Use threading for texture uploading/downloading
|
||||
if possible
|
||||
-bwait %f Max buffer wait time in cached seconds, default 5.0
|
||||
-lookback %f Percentage of the lookahead cache reserved for
|
||||
frames behind the playhead, default 25
|
||||
-yuv Assume YUV hardware conversion
|
||||
-noaudio Turn off audio
|
||||
-audiofs %d Use fixed audio frame size
|
||||
(results are hardware dependant ... try 512)
|
||||
-audioCachePacket %d Audio cache packet size in samples (default=2048)
|
||||
-audioMinCache %f Audio cache min size in seconds (default=0.300000)
|
||||
-audioMaxCache %f Audio cache max size in seconds (default=0.600000)
|
||||
-audioModule %S Use specific audio module
|
||||
-audioDevice %S Use specific audio device
|
||||
-audioRate %f Use specific output audio rate (default=ask hardware)
|
||||
-audioPrecision %d Use specific output audio precision (default=16)
|
||||
-audioNice %d Close audio device when not playing
|
||||
(may cause problems on some hardware) default=0
|
||||
-audioNoLock %d Do not use hardware audio/video syncronization
|
||||
(use software instead, default=0)
|
||||
-audioPreRoll %d Preroll audio on device open (Linux only; default=0)
|
||||
-audioGlobalOffset %f Global audio offset in seconds
|
||||
-audioDeviceLatency %f Audio device latency compensation in milliseconds
|
||||
-bg %S Background pattern (default=black, white, grey18,
|
||||
grey50, checker, crosshatch)
|
||||
-formats Show all supported image and movie formats
|
||||
-apple Use Quicktime and NSImage libraries (on OS X)
|
||||
-cinalt Use alternate Cineon/DPX readers
|
||||
-exrcpus %d EXR thread count (default=0)
|
||||
-exrRGBA EXR Always read as RGBA (default=false)
|
||||
-exrInherit EXR guess channel inheritance (default=false)
|
||||
-exrNoOneChannel EXR never use one channel planar images (default=false)
|
||||
-exrIOMethod %d [%d] EXR I/O Method (0=standard, 1=buffered, 2=unbuffered,
|
||||
3=MemoryMap, 4=AsyncBuffered, 5=AsyncUnbuffered,
|
||||
default=1) and optional chunk size (default=61440)
|
||||
-exrReadWindowIsDisplayWindow
|
||||
EXR read window is display window (default=false)
|
||||
-exrReadWindow %d EXR Read Window Method (0=Data, 1=Display,
|
||||
2=Union, 3=Data inside Display, default=3)
|
||||
-jpegRGBA Make JPEG four channel RGBA on read
|
||||
(default=no, use RGB or YUV)
|
||||
-jpegIOMethod %d [%d] JPEG I/O Method (0=standard, 1=buffered,
|
||||
2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
|
||||
5=AsyncUnbuffered, default=1) and optional
|
||||
chunk size (default=61440)
|
||||
-cinpixel %S Cineon pixel storage (default=RGB8_PLANAR)
|
||||
-cinchroma Use Cineon chromaticity values
|
||||
(for default reader only)
|
||||
-cinIOMethod %d [%d] Cineon I/O Method (0=standard, 1=buffered,
|
||||
2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
|
||||
5=AsyncUnbuffered, default=1) and optional
|
||||
chunk size (default=61440)
|
||||
-dpxpixel %S DPX pixel storage (default=RGB8_PLANAR)
|
||||
-dpxchroma Use DPX chromaticity values (for default reader only)
|
||||
-dpxIOMethod %d [%d] DPX I/O Method (0=standard, 1=buffered, 2=unbuffered,
|
||||
3=MemoryMap, 4=AsyncBuffered, 5=AsyncUnbuffered,
|
||||
default=1) and optional chunk size (default=61440)
|
||||
-tgaIOMethod %d [%d] TARGA I/O Method (0=standard, 1=buffered,
|
||||
2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
|
||||
5=AsyncUnbuffered, default=1)
|
||||
and optional chunk size (default=61440)
|
||||
-tiffIOMethod %d [%d] TIFF I/O Method (0=standard, 1=buffered,
|
||||
2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
|
||||
5=AsyncUnbuffered, default=1) and optional
|
||||
chunk size (default=61440)
|
||||
-lic %S Use specific license file
|
||||
-noPrefs Ignore preferences
|
||||
-resetPrefs Reset preferences to default values
|
||||
-qtcss %S Use QT style sheet for UI
|
||||
-qtstyle %S Use QT style
|
||||
-qtdesktop %d QT desktop aware, default=1 (on)
|
||||
-xl Aggressively absorb screen space for large media
|
||||
-mouse %d Force tablet/stylus events to be treated as a
|
||||
mouse events, default=0 (off)
|
||||
-network Start networking
|
||||
-networkPort %d Port for networking
|
||||
-networkHost %S Alternate host/address for incoming connections
|
||||
-networkTag %S Tag to mark automatically saved port file
|
||||
-networkConnect %S [%d] Start networking and connect to host at port
|
||||
-networkPerm %d Default network connection permission
|
||||
(0=Ask, 1=Allow, 2=Deny, default=0)
|
||||
-reuse %d Try to re-use the current session for
|
||||
incoming URLs (1 = reuse session,
|
||||
0 = new session, default = 1)
|
||||
-nopackages Don't load any packages at startup (for debugging)
|
||||
-encodeURL Encode the command line as
|
||||
an rvlink URL, print, and exit
|
||||
-bakeURL Fully bake the command line as an
|
||||
rvlink URL, print, and exit
|
||||
-sendEvent ... Send external events e.g. -sendEvent 'name' 'content'
|
||||
-flags ... Arbitrary flags (flag, or 'name=value')
|
||||
for use in Mu code
|
||||
-debug ... Debug category
|
||||
-version Show RV version number
|
||||
-strictlicense Exit rather than consume an rv license if no rvsolo
|
||||
licenses are available
|
||||
-prefsPath %S Alternate path to preferences directory
|
||||
-sleep %d Sleep (in seconds) before starting to
|
||||
allow attaching debugger
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class Sync_to_Avalon(BaseEvent):
|
|||
# If mongo_id textfield has changed: RETURN!
|
||||
# - infinite loop
|
||||
for ent in event['data']['entities']:
|
||||
if 'keys' in ent:
|
||||
if ent.get('keys') is not None:
|
||||
if ca_mongoid in ent['keys']:
|
||||
return
|
||||
|
||||
|
|
@ -109,7 +109,7 @@ class Sync_to_Avalon(BaseEvent):
|
|||
' for more information.'
|
||||
)
|
||||
items = [
|
||||
{'type': 'label', 'value':'# Fatal Error'},
|
||||
{'type': 'label', 'value': '# Fatal Error'},
|
||||
{'type': 'label', 'value': '<p>{}</p>'.format(ftrack_message)}
|
||||
]
|
||||
self.show_interface(event, items, title)
|
||||
|
|
|
|||
|
|
@ -507,11 +507,17 @@ def get_project_apps(entity):
|
|||
apps = []
|
||||
for app in entity['custom_attributes']['applications']:
|
||||
try:
|
||||
app_config = {}
|
||||
app_config['name'] = app
|
||||
app_config['label'] = toml.load(avalon.lib.which_app(app))['label']
|
||||
toml_path = avalon.lib.which_app(app)
|
||||
if not toml_path:
|
||||
log.warning((
|
||||
'Missing config file for application "{}"'
|
||||
).format(app))
|
||||
continue
|
||||
|
||||
apps.append(app_config)
|
||||
apps.append({
|
||||
'name': app,
|
||||
'label': toml.load(toml_path)['label']
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
log.warning('Error with application {0} - {1}'.format(app, e))
|
||||
|
|
|
|||
|
|
@ -94,6 +94,9 @@ class AppAction(BaseHandler):
|
|||
):
|
||||
return False
|
||||
|
||||
if entities[0]['parent'].entity_type.lower() == 'project':
|
||||
return False
|
||||
|
||||
ft_project = entities[0]['project']
|
||||
|
||||
database = pypelib.get_avalon_database()
|
||||
|
|
|
|||
|
|
@ -43,9 +43,20 @@ class BaseEvent(BaseHandler):
|
|||
self.session.rollback()
|
||||
self.session._local_cache.clear()
|
||||
|
||||
self.launch(
|
||||
self.session, event
|
||||
)
|
||||
try:
|
||||
self.launch(
|
||||
self.session, event
|
||||
)
|
||||
except Exception as e:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
||||
log_message = "{}/{}/Line: {}".format(
|
||||
exc_type, fname, exc_tb.tb_lineno
|
||||
)
|
||||
self.log.error(
|
||||
'Error during syncToAvalon: {}'.format(log_message),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
|
|
|
|||
|
|
@ -1,28 +1,15 @@
|
|||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
import nuke
|
||||
|
||||
from avalon import api as avalon
|
||||
from avalon.tools import workfiles
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from .. import api
|
||||
|
||||
from pype.nuke import menu
|
||||
import logging
|
||||
|
||||
from .lib import (
|
||||
create_write_node
|
||||
)
|
||||
|
||||
import nuke
|
||||
|
||||
from pypeapp import Logger
|
||||
|
||||
# #removing logger handler created in avalon_core
|
||||
# for name, handler in [(handler.get_name(), handler)
|
||||
# for handler in Logger.logging.root.handlers[:]]:
|
||||
# if "pype" not in str(name).lower():
|
||||
# Logger.logging.root.removeHandler(handler)
|
||||
from . import lib
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
|
|
@ -138,6 +125,9 @@ def install():
|
|||
if launch_workfiles:
|
||||
nuke.addOnCreate(launch_workfiles_app, nodeClass="Root")
|
||||
|
||||
# Set context settings.
|
||||
nuke.addOnCreate(lib.set_context_settings, nodeClass="Root")
|
||||
|
||||
|
||||
def launch_workfiles_app():
|
||||
if not self.workfiles_launched:
|
||||
|
|
|
|||
|
|
@ -447,8 +447,6 @@ def reset_frame_range_handles():
|
|||
"""Set frame range to current asset"""
|
||||
|
||||
root = nuke.root()
|
||||
fps = float(api.Session.get("AVALON_FPS", 25))
|
||||
root["fps"].setValue(fps)
|
||||
name = api.Session["AVALON_ASSET"]
|
||||
asset = io.find_one({"name": name, "type": "asset"})
|
||||
|
||||
|
|
@ -460,7 +458,7 @@ def reset_frame_range_handles():
|
|||
data = asset["data"]
|
||||
|
||||
missing_cols = []
|
||||
check_cols = ["fstart", "fend", "handle_start", "handle_end"]
|
||||
check_cols = ["fps", "fstart", "fend", "handle_start", "handle_end"]
|
||||
|
||||
for col in check_cols:
|
||||
if col not in data:
|
||||
|
|
@ -477,33 +475,37 @@ def reset_frame_range_handles():
|
|||
handles = avalon.nuke.get_handles(asset)
|
||||
handle_start, handle_end = pype.get_handle_irregular(asset)
|
||||
|
||||
log.info("__ handles: `{}`".format(handles))
|
||||
log.info("__ handle_start: `{}`".format(handle_start))
|
||||
log.info("__ handle_end: `{}`".format(handle_end))
|
||||
|
||||
fps = asset["data"]["fps"]
|
||||
edit_in = int(asset["data"]["fstart"]) - handle_start
|
||||
edit_out = int(asset["data"]["fend"]) + handle_end
|
||||
|
||||
root["fps"].setValue(fps)
|
||||
root["first_frame"].setValue(edit_in)
|
||||
root["last_frame"].setValue(edit_out)
|
||||
|
||||
log.info("__ handles: `{}`".format(handles))
|
||||
log.info("__ handle_start: `{}`".format(handle_start))
|
||||
log.info("__ handle_end: `{}`".format(handle_end))
|
||||
log.info("__ edit_in: `{}`".format(edit_in))
|
||||
log.info("__ edit_out: `{}`".format(edit_out))
|
||||
log.info("__ fps: `{}`".format(fps))
|
||||
|
||||
# setting active viewers
|
||||
nuke.frame(int(asset["data"]["fstart"]))
|
||||
|
||||
vv = nuke.activeViewer().node()
|
||||
|
||||
range = '{0}-{1}'.format(
|
||||
int(asset["data"]["fstart"]),
|
||||
int(asset["data"]["fend"]))
|
||||
|
||||
vv['frame_range'].setValue(range)
|
||||
vv['frame_range_lock'].setValue(True)
|
||||
for node in nuke.allNodes(filter="Viewer"):
|
||||
node['frame_range'].setValue(range)
|
||||
node['frame_range_lock'].setValue(True)
|
||||
|
||||
log.info("_frameRange: {}".format(range))
|
||||
log.info("frameRange: {}".format(vv['frame_range'].value()))
|
||||
log.info("_frameRange: {}".format(range))
|
||||
log.info("frameRange: {}".format(node['frame_range'].value()))
|
||||
|
||||
vv['frame_range'].setValue(range)
|
||||
vv['frame_range_lock'].setValue(True)
|
||||
node['frame_range'].setValue(range)
|
||||
node['frame_range_lock'].setValue(True)
|
||||
|
||||
# adding handle_start/end to root avalon knob
|
||||
if not avalon.nuke.set_avalon_knob_data(root, {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,9 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
|
|||
|
||||
ftrack_log = logging.getLogger('ftrack_api')
|
||||
ftrack_log.setLevel(logging.WARNING)
|
||||
ftrack_log = logging.getLogger('ftrack_api_old')
|
||||
ftrack_log.setLevel(logging.WARNING)
|
||||
|
||||
# Collect session
|
||||
session = ftrack_api.Session()
|
||||
context.data["ftrackSession"] = session
|
||||
|
|
|
|||
|
|
@ -26,7 +26,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
'write': 'render',
|
||||
'review': 'mov',
|
||||
'plate': 'img',
|
||||
'audio': 'audio'
|
||||
'audio': 'audio',
|
||||
'workfile': 'scene',
|
||||
'animation': 'cache'
|
||||
}
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -49,14 +51,15 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
for comp in instance.data['representations']:
|
||||
self.log.debug('component {}'.format(comp))
|
||||
|
||||
if comp.get('thumbnail'):
|
||||
if comp.get('thumbnail') or ("thumbnail" in comp.get('tags', [])):
|
||||
location = self.get_ftrack_location(
|
||||
'ftrack.server', ft_session
|
||||
)
|
||||
component_data = {
|
||||
"name": "thumbnail" # Default component name is "main".
|
||||
}
|
||||
elif comp.get('preview'):
|
||||
comp['thumbnail'] = True
|
||||
elif comp.get('preview') or ("preview" in comp.get('tags', [])):
|
||||
'''
|
||||
Ftrack bug requirement:
|
||||
- Start frame must be 0
|
||||
|
|
@ -120,7 +123,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
componentList.append(component_item)
|
||||
# Create copy with ftrack.unmanaged location if thumb or prev
|
||||
if comp.get('thumbnail') or comp.get('preview'):
|
||||
if comp.get('thumbnail') or comp.get('preview') \
|
||||
or ("preview" in comp.get('tags', [])) \
|
||||
or ("thumbnail" in comp.get('tags', [])):
|
||||
unmanaged_loc = self.get_ftrack_location(
|
||||
'ftrack.unmanaged', ft_session
|
||||
)
|
||||
|
|
@ -148,7 +153,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
componentList.append(component_item_src)
|
||||
|
||||
|
||||
self.log.debug('componentsList: {}'.format(str(componentList)))
|
||||
instance.data["ftrackComponentsList"] = componentList
|
||||
|
||||
|
|
|
|||
86
pype/plugins/global/_publish_unused/extract_quicktime.py
Normal file
86
pype/plugins/global/_publish_unused/extract_quicktime.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import subprocess
|
||||
from pype.vendor import clique
|
||||
|
||||
|
||||
class ExtractQuicktimeEXR(pyblish.api.InstancePlugin):
|
||||
"""Resolve any dependency issies
|
||||
|
||||
This plug-in resolves any paths which, if not updated might break
|
||||
the published file.
|
||||
|
||||
The order of families is important, when working with lookdev you want to
|
||||
first publish the texture, update the texture paths in the nodes and then
|
||||
publish the shading network. Same goes for file dependent assets.
|
||||
"""
|
||||
|
||||
label = "Extract Quicktime"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["imagesequence", "render", "write", "source"]
|
||||
hosts = ["shell"]
|
||||
|
||||
def process(self, instance):
|
||||
# fps = instance.data.get("fps")
|
||||
# start = instance.data.get("startFrame")
|
||||
# stagingdir = os.path.normpath(instance.data.get("stagingDir"))
|
||||
#
|
||||
# collected_frames = os.listdir(stagingdir)
|
||||
# collections, remainder = clique.assemble(collected_frames)
|
||||
#
|
||||
# full_input_path = os.path.join(
|
||||
# stagingdir, collections[0].format('{head}{padding}{tail}')
|
||||
# )
|
||||
# self.log.info("input {}".format(full_input_path))
|
||||
#
|
||||
# filename = collections[0].format('{head}')
|
||||
# if not filename.endswith('.'):
|
||||
# filename += "."
|
||||
# movFile = filename + "mov"
|
||||
# full_output_path = os.path.join(stagingdir, movFile)
|
||||
#
|
||||
# self.log.info("output {}".format(full_output_path))
|
||||
#
|
||||
# config_data = instance.context.data['output_repre_config']
|
||||
#
|
||||
# proj_name = os.environ.get('AVALON_PROJECT', '__default__')
|
||||
# profile = config_data.get(proj_name, config_data['__default__'])
|
||||
#
|
||||
# input_args = []
|
||||
# # overrides output file
|
||||
# input_args.append("-y")
|
||||
# # preset's input data
|
||||
# input_args.extend(profile.get('input', []))
|
||||
# # necessary input data
|
||||
# input_args.append("-start_number {}".format(start))
|
||||
# input_args.append("-i {}".format(full_input_path))
|
||||
# input_args.append("-framerate {}".format(fps))
|
||||
#
|
||||
# output_args = []
|
||||
# # preset's output data
|
||||
# output_args.extend(profile.get('output', []))
|
||||
# # output filename
|
||||
# output_args.append(full_output_path)
|
||||
# mov_args = [
|
||||
# "ffmpeg",
|
||||
# " ".join(input_args),
|
||||
# " ".join(output_args)
|
||||
# ]
|
||||
# subprocess_mov = " ".join(mov_args)
|
||||
# sub_proc = subprocess.Popen(subprocess_mov)
|
||||
# sub_proc.wait()
|
||||
#
|
||||
# if not os.path.isfile(full_output_path):
|
||||
# raise("Quicktime wasn't created succesfully")
|
||||
#
|
||||
# if "representations" not in instance.data:
|
||||
# instance.data["representations"] = []
|
||||
#
|
||||
# representation = {
|
||||
# 'name': 'mov',
|
||||
# 'ext': 'mov',
|
||||
# 'files': movFile,
|
||||
# "stagingDir": stagingdir,
|
||||
# "preview": True
|
||||
# }
|
||||
# instance.data["representations"].append(representation)
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
# import os
|
||||
# import pyblish.api
|
||||
# import subprocess
|
||||
# from pype.vendor import clique
|
||||
# from pypeapp import config
|
||||
#
|
||||
#
|
||||
# class ExtractReview(pyblish.api.InstancePlugin):
|
||||
# """Resolve any dependency issies
|
||||
#
|
||||
# This plug-in resolves any paths which, if not updated might break
|
||||
# the published file.
|
||||
#
|
||||
# The order of families is important, when working with lookdev you want to
|
||||
# first publish the texture, update the texture paths in the nodes and then
|
||||
# publish the shading network. Same goes for file dependent assets.
|
||||
# """
|
||||
#
|
||||
# label = "Extract Review"
|
||||
# order = pyblish.api.ExtractorOrder
|
||||
# # families = ["imagesequence", "render", "write", "source"]
|
||||
# # hosts = ["shell"]
|
||||
#
|
||||
# def process(self, instance):
|
||||
# # adding plugin attributes from presets
|
||||
# publish_presets = config.get_presets()["plugins"]["global"]["publish"]
|
||||
# plugin_attrs = publish_presets[self.__class__.__name__]
|
||||
#
|
||||
#
|
||||
# fps = instance.data.get("fps")
|
||||
# start = instance.data.get("startFrame")
|
||||
# stagingdir = os.path.normpath(instance.data.get("stagingDir"))
|
||||
#
|
||||
# collected_frames = os.listdir(stagingdir)
|
||||
# collections, remainder = clique.assemble(collected_frames)
|
||||
#
|
||||
# full_input_path = os.path.join(
|
||||
# stagingdir, collections[0].format('{head}{padding}{tail}')
|
||||
# )
|
||||
# self.log.info("input {}".format(full_input_path))
|
||||
#
|
||||
# filename = collections[0].format('{head}')
|
||||
# if not filename.endswith('.'):
|
||||
# filename += "."
|
||||
# movFile = filename + "mov"
|
||||
# full_output_path = os.path.join(stagingdir, movFile)
|
||||
#
|
||||
# self.log.info("output {}".format(full_output_path))
|
||||
#
|
||||
# config_data = instance.context.data['output_repre_config']
|
||||
#
|
||||
# proj_name = os.environ.get('AVALON_PROJECT', '__default__')
|
||||
# profile = config_data.get(proj_name, config_data['__default__'])
|
||||
#
|
||||
# input_args = []
|
||||
# # overrides output file
|
||||
# input_args.append("-y")
|
||||
# # preset's input data
|
||||
# input_args.extend(profile.get('input', []))
|
||||
# # necessary input data
|
||||
# input_args.append("-start_number {}".format(start))
|
||||
# input_args.append("-i {}".format(full_input_path))
|
||||
# input_args.append("-framerate {}".format(fps))
|
||||
#
|
||||
# output_args = []
|
||||
# # preset's output data
|
||||
# output_args.extend(profile.get('output', []))
|
||||
# # output filename
|
||||
# output_args.append(full_output_path)
|
||||
# mov_args = [
|
||||
# "ffmpeg",
|
||||
# " ".join(input_args),
|
||||
# " ".join(output_args)
|
||||
# ]
|
||||
# subprocess_mov = " ".join(mov_args)
|
||||
# sub_proc = subprocess.Popen(subprocess_mov)
|
||||
# sub_proc.wait()
|
||||
#
|
||||
# if not os.path.isfile(full_output_path):
|
||||
# raise("Quicktime wasn't created succesfully")
|
||||
#
|
||||
# if "representations" not in instance.data:
|
||||
# instance.data["representations"] = []
|
||||
#
|
||||
# representation = {
|
||||
# 'name': 'mov',
|
||||
# 'ext': 'mov',
|
||||
# 'files': movFile,
|
||||
# "stagingDir": stagingdir,
|
||||
# "preview": True
|
||||
# }
|
||||
# instance.data["representations"].append(representation)
|
||||
|
|
@ -6,6 +6,7 @@ from pprint import pformat
|
|||
|
||||
import pyblish.api
|
||||
from avalon import api
|
||||
import pype.api as pype
|
||||
|
||||
|
||||
def collect(root,
|
||||
|
|
@ -64,7 +65,7 @@ def collect(root,
|
|||
return collections
|
||||
|
||||
|
||||
class CollectFileSequences(pyblish.api.ContextPlugin):
|
||||
class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
||||
"""Gather file sequences from working directory
|
||||
|
||||
When "FILESEQUENCE" environment variable is set these paths (folders or
|
||||
|
|
@ -87,7 +88,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.CollectorOrder
|
||||
targets = ["filesequence"]
|
||||
label = "File Sequences"
|
||||
label = "RenderedFrames"
|
||||
|
||||
def process(self, context):
|
||||
if os.environ.get("PYPE_PUBLISH_PATHS"):
|
||||
|
|
@ -128,6 +129,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
|
|||
self.log.info("setting session using metadata")
|
||||
api.Session.update(session)
|
||||
os.environ.update(session)
|
||||
|
||||
else:
|
||||
# Search in directory
|
||||
data = dict()
|
||||
|
|
@ -161,6 +163,7 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
|
|||
assert isinstance(families, (list, tuple)), "Must be iterable"
|
||||
assert families, "Must have at least a single family"
|
||||
families.append("ftrack")
|
||||
families.append("review")
|
||||
for collection in collections:
|
||||
instance = context.create_instance(str(collection))
|
||||
self.log.info("Collection: %s" % list(collection))
|
||||
|
|
@ -205,7 +208,8 @@ class CollectFileSequences(pyblish.api.ContextPlugin):
|
|||
'files': list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"frameRate": fps
|
||||
"frameRate": fps,
|
||||
"tags": ['review']
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import subprocess
|
||||
import pype.api
|
||||
import json
|
||||
import pyblish
|
||||
|
||||
|
||||
class ExtractBurnin(pype.api.Extractor):
|
||||
|
|
@ -14,7 +15,8 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
"""
|
||||
|
||||
label = "Quicktime with burnins"
|
||||
families = ["burnin"]
|
||||
order = pyblish.api.ExtractorOrder + 0.03
|
||||
families = ["review", "burnin"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -22,42 +24,68 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
raise RuntimeError("Burnin needs already created mov to work on.")
|
||||
|
||||
# TODO: expand burnin data list to include all usefull keys
|
||||
burnin_data = {
|
||||
version = ''
|
||||
if instance.context.data.get('version'):
|
||||
version = "v" + str(instance.context.data['version'])
|
||||
|
||||
prep_data = {
|
||||
"username": instance.context.data['user'],
|
||||
"asset": os.environ['AVALON_ASSET'],
|
||||
"task": os.environ['AVALON_TASK'],
|
||||
"start_frame": int(instance.data['startFrame']),
|
||||
"version": "v" + str(instance.context.data['version'])
|
||||
"version": version
|
||||
}
|
||||
self.log.debug("__ prep_data: {}".format(prep_data))
|
||||
for i, repre in enumerate(instance.data["representations"]):
|
||||
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
|
||||
|
||||
for repre in instance.data["representations"]:
|
||||
if (not repre.get("burnin", False) or
|
||||
"burnin" not in repre.get("tags", [])):
|
||||
if "burnin" not in repre.get("tags", []):
|
||||
continue
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
stagingdir = repre["stagingDir"]
|
||||
filename = "{0}".format(repre["files"])
|
||||
|
||||
movieFileBurnin = filename + "Burn" + ".mov"
|
||||
name = "_burnin"
|
||||
movieFileBurnin = filename.replace(".mov", "") + name + ".mov"
|
||||
|
||||
full_movie_path = os.path.join(stagingdir, repre["files"])
|
||||
full_burnin_path = os.path.join(stagingdir, movieFileBurnin)
|
||||
full_movie_path = os.path.join(os.path.normpath(stagingdir), repre["files"])
|
||||
full_burnin_path = os.path.join(os.path.normpath(stagingdir), movieFileBurnin)
|
||||
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
|
||||
|
||||
burnin_data = {
|
||||
"input": full_movie_path.replace("\\", "/"),
|
||||
"output": full_burnin_path.replace("\\", "/"),
|
||||
"burnin_data": burnin_data
|
||||
}
|
||||
"burnin_data": prep_data
|
||||
}
|
||||
|
||||
self.log.debug("__ burnin_data2: {}".format(burnin_data))
|
||||
|
||||
json_data = json.dumps(burnin_data)
|
||||
scriptpath = os.path.join(os.environ['PYPE_MODULE_ROOT'],
|
||||
scriptpath = os.path.normpath(os.path.join(os.environ['PYPE_MODULE_ROOT'],
|
||||
"pype",
|
||||
"scripts",
|
||||
"otio_burnin.py")
|
||||
"otio_burnin.py"))
|
||||
|
||||
p = subprocess.Popen(
|
||||
['python', scriptpath, json_data]
|
||||
)
|
||||
p.wait()
|
||||
self.log.debug("__ scriptpath: {}".format(scriptpath))
|
||||
self.log.debug("__ EXE: {}".format(os.getenv("PYPE_PYTHON_EXE")))
|
||||
|
||||
repre['files']: movieFileBurnin
|
||||
try:
|
||||
p = subprocess.Popen(
|
||||
[os.getenv("PYPE_PYTHON_EXE"), scriptpath, json_data]
|
||||
)
|
||||
p.wait()
|
||||
if not os.path.isfile(full_burnin_path):
|
||||
raise RuntimeError("File not existing: {}".format(full_burnin_path))
|
||||
except Exception as e:
|
||||
raise RuntimeError("Burnin script didn't work: `{}`".format(e))
|
||||
|
||||
if os.path.exists(full_burnin_path):
|
||||
repre_update = {
|
||||
"files": movieFileBurnin,
|
||||
"name": repre["name"]
|
||||
}
|
||||
instance.data["representations"][i].update(repre_update)
|
||||
|
||||
# removing the source mov file
|
||||
os.remove(full_movie_path)
|
||||
self.log.debug("Removed: `{}`".format(full_movie_path))
|
||||
|
|
|
|||
|
|
@ -1,86 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import subprocess
|
||||
from pype.vendor import clique
|
||||
|
||||
|
||||
class ExtractQuicktimeEXR(pyblish.api.InstancePlugin):
|
||||
"""Resolve any dependency issies
|
||||
|
||||
This plug-in resolves any paths which, if not updated might break
|
||||
the published file.
|
||||
|
||||
The order of families is important, when working with lookdev you want to
|
||||
first publish the texture, update the texture paths in the nodes and then
|
||||
publish the shading network. Same goes for file dependent assets.
|
||||
"""
|
||||
|
||||
label = "Extract Quicktime"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["imagesequence", "render", "write", "source"]
|
||||
hosts = ["shell"]
|
||||
|
||||
def process(self, instance):
|
||||
fps = instance.data.get("fps")
|
||||
start = instance.data.get("startFrame")
|
||||
stagingdir = os.path.normpath(instance.data.get("stagingDir"))
|
||||
|
||||
collected_frames = os.listdir(stagingdir)
|
||||
collections, remainder = clique.assemble(collected_frames)
|
||||
|
||||
full_input_path = os.path.join(
|
||||
stagingdir, collections[0].format('{head}{padding}{tail}')
|
||||
)
|
||||
self.log.info("input {}".format(full_input_path))
|
||||
|
||||
filename = collections[0].format('{head}')
|
||||
if not filename.endswith('.'):
|
||||
filename += "."
|
||||
movFile = filename + "mov"
|
||||
full_output_path = os.path.join(stagingdir, movFile)
|
||||
|
||||
self.log.info("output {}".format(full_output_path))
|
||||
|
||||
config_data = instance.context.data['output_repre_config']
|
||||
|
||||
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
|
||||
profile = config_data.get(proj_name, config_data['__default__'])
|
||||
|
||||
input_args = []
|
||||
# overrides output file
|
||||
input_args.append("-y")
|
||||
# preset's input data
|
||||
input_args.extend(profile.get('input', []))
|
||||
# necessary input data
|
||||
input_args.append("-start_number {}".format(start))
|
||||
input_args.append("-i {}".format(full_input_path))
|
||||
input_args.append("-framerate {}".format(fps))
|
||||
|
||||
output_args = []
|
||||
# preset's output data
|
||||
output_args.extend(profile.get('output', []))
|
||||
# output filename
|
||||
output_args.append(full_output_path)
|
||||
mov_args = [
|
||||
"ffmpeg",
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
subprocess_mov = " ".join(mov_args)
|
||||
sub_proc = subprocess.Popen(subprocess_mov)
|
||||
sub_proc.wait()
|
||||
|
||||
if not os.path.isfile(full_output_path):
|
||||
raise("Quicktime wasn't created succesfully")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'mov',
|
||||
'ext': 'mov',
|
||||
'files': movFile,
|
||||
"stagingDir": stagingdir,
|
||||
"preview": True
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
183
pype/plugins/global/publish/extract_review.py
Normal file
183
pype/plugins/global/publish/extract_review.py
Normal file
|
|
@ -0,0 +1,183 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import subprocess
|
||||
from pype.vendor import clique
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
class ExtractReview(pyblish.api.InstancePlugin):
|
||||
"""Extracting Review mov file for Ftrack
|
||||
|
||||
Compulsory attribute of representation is tags list with "review",
|
||||
otherwise the representation is ignored.
|
||||
|
||||
All new represetnations are created and encoded by ffmpeg following
|
||||
presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension
|
||||
filter values use preset's attributes `ext_filter`
|
||||
"""
|
||||
|
||||
label = "Extract Review"
|
||||
order = pyblish.api.ExtractorOrder + 0.02
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
# adding plugin attributes from presets
|
||||
publish_presets = config.get_presets()["plugins"]["global"]["publish"]
|
||||
plugin_attrs = publish_presets[self.__class__.__name__]
|
||||
output_profiles = plugin_attrs.get("outputs", {})
|
||||
|
||||
inst_data = instance.data
|
||||
fps = inst_data.get("fps")
|
||||
start_frame = inst_data.get("startFrame")
|
||||
|
||||
self.log.debug("Families In: `{}`".format(instance.data["families"]))
|
||||
|
||||
# get representation and loop them
|
||||
representations = instance.data["representations"]
|
||||
|
||||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
for repre in representations:
|
||||
if repre['ext'] in plugin_attrs["ext_filter"]:
|
||||
tags = repre.get("tags", [])
|
||||
|
||||
self.log.info("Try repre: {}".format(repre))
|
||||
|
||||
if "review" in tags:
|
||||
staging_dir = repre["stagingDir"]
|
||||
for name, profile in output_profiles.items():
|
||||
self.log.debug("Profile name: {}".format(name))
|
||||
|
||||
ext = profile.get("ext", None)
|
||||
if not ext:
|
||||
ext = "mov"
|
||||
self.log.warning(
|
||||
"`ext` attribute not in output profile. Setting to default ext: `mov`")
|
||||
|
||||
self.log.debug("instance.families: {}".format(instance.data['families']))
|
||||
self.log.debug("profile.families: {}".format(profile['families']))
|
||||
|
||||
if any(item in instance.data['families'] for item in profile['families']):
|
||||
if isinstance(repre["files"], list):
|
||||
collections, remainder = clique.assemble(
|
||||
repre["files"])
|
||||
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, collections[0].format(
|
||||
'{head}{padding}{tail}')
|
||||
)
|
||||
|
||||
filename = collections[0].format('{head}')
|
||||
if filename.endswith('.'):
|
||||
filename = filename[:-1]
|
||||
else:
|
||||
full_input_path = os.path.join(
|
||||
staging_dir, repre["files"])
|
||||
filename = repre["files"].split(".")[0]
|
||||
|
||||
repr_file = filename + "_{0}.{1}".format(name, ext)
|
||||
|
||||
full_output_path = os.path.join(
|
||||
staging_dir, repr_file)
|
||||
|
||||
self.log.info("input {}".format(full_input_path))
|
||||
self.log.info("output {}".format(full_output_path))
|
||||
|
||||
repre_new = repre.copy()
|
||||
|
||||
new_tags = tags[:]
|
||||
p_tags = profile.get('tags', [])
|
||||
self.log.info("p_tags: `{}`".format(p_tags))
|
||||
# add families
|
||||
[instance.data["families"].append(t)
|
||||
for t in p_tags
|
||||
if t not in instance.data["families"]]
|
||||
# add to
|
||||
[new_tags.append(t) for t in p_tags
|
||||
if t not in new_tags]
|
||||
|
||||
self.log.info("new_tags: `{}`".format(new_tags))
|
||||
|
||||
input_args = []
|
||||
|
||||
# overrides output file
|
||||
input_args.append("-y")
|
||||
|
||||
# preset's input data
|
||||
input_args.extend(profile.get('input', []))
|
||||
|
||||
# necessary input data
|
||||
# adds start arg only if image sequence
|
||||
if "mov" not in repre_new['ext']:
|
||||
input_args.append("-start_number {0} -framerate {1}".format(
|
||||
start_frame, fps))
|
||||
|
||||
input_args.append("-i {}".format(full_input_path))
|
||||
|
||||
output_args = []
|
||||
# preset's output data
|
||||
output_args.extend(profile.get('output', []))
|
||||
|
||||
# letter_box
|
||||
# TODO: add to documentation
|
||||
lb = profile.get('letter_box', None)
|
||||
if lb:
|
||||
output_args.append(
|
||||
"-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
|
||||
|
||||
# output filename
|
||||
output_args.append(full_output_path)
|
||||
mov_args = [
|
||||
"ffmpeg",
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
subprcs_cmd = " ".join(mov_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("{}".format(subprcs_cmd))
|
||||
sub_proc = subprocess.Popen(
|
||||
subprcs_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdin=subprocess.PIPE,
|
||||
cwd=os.path.dirname(output_args[-1])
|
||||
)
|
||||
|
||||
output = sub_proc.communicate()[0]
|
||||
|
||||
if not os.path.isfile(full_output_path):
|
||||
raise ValueError(
|
||||
"Quicktime wasn't created succesfully: "
|
||||
"{}".format(output)
|
||||
)
|
||||
|
||||
# create representation data
|
||||
repre_new.update({
|
||||
'name': name,
|
||||
'ext': ext,
|
||||
'files': repr_file,
|
||||
"tags": new_tags,
|
||||
"outputName": name
|
||||
})
|
||||
if repre_new.get('preview'):
|
||||
repre_new.pop("preview")
|
||||
if repre_new.get('thumbnail'):
|
||||
repre_new.pop("thumbnail")
|
||||
|
||||
# adding representation
|
||||
representations_new.append(repre_new)
|
||||
# if "delete" in tags:
|
||||
# if "mov" in full_input_path:
|
||||
# os.remove(full_input_path)
|
||||
# self.log.debug("Removed: `{}`".format(full_input_path))
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
self.log.debug(
|
||||
"new representations: {}".format(representations_new))
|
||||
instance.data["representations"] = representations_new
|
||||
|
||||
self.log.debug("Families Out: `{}`".format(instance.data["families"]))
|
||||
|
|
@ -99,18 +99,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
# \ /
|
||||
# o __/
|
||||
#
|
||||
for result in context.data["results"]:
|
||||
if not result["success"]:
|
||||
self.log.debug(result)
|
||||
exc_type, exc_value, exc_traceback = result["error_info"]
|
||||
extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
|
||||
self.log.debug(
|
||||
"Error at line {}: \"{}\"".format(
|
||||
extracted_traceback[1], result["error"]
|
||||
)
|
||||
)
|
||||
assert all(result["success"] for result in context.data["results"]), (
|
||||
"Atomicity not held, aborting.")
|
||||
# for result in context.data["results"]:
|
||||
# if not result["success"]:
|
||||
# self.log.debug(result)
|
||||
# exc_type, exc_value, exc_traceback = result["error_info"]
|
||||
# extracted_traceback = traceback.extract_tb(exc_traceback)[-1]
|
||||
# self.log.debug(
|
||||
# "Error at line {}: \"{}\"".format(
|
||||
# extracted_traceback[1], result["error"]
|
||||
# )
|
||||
# )
|
||||
# assert all(result["success"] for result in context.data["results"]), (
|
||||
# "Atomicity not held, aborting.")
|
||||
|
||||
# Assemble
|
||||
#
|
||||
|
|
@ -225,17 +225,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = os.path.join(*parents)
|
||||
|
||||
template_data = {"root": root,
|
||||
"project": {"name": PROJECT,
|
||||
"code": project['data']['code']},
|
||||
"silo": asset['silo'],
|
||||
"task": TASK,
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": int(version["name"]),
|
||||
"hierarchy": hierarchy}
|
||||
|
||||
anatomy = instance.context.data['anatomy']
|
||||
|
||||
# Find the representations to transfer amongst the files
|
||||
|
|
@ -257,6 +246,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
# | ||
|
||||
# |_______|
|
||||
#
|
||||
# create template data for Anatomy
|
||||
template_data = {"root": root,
|
||||
"project": {"name": PROJECT,
|
||||
"code": project['data']['code']},
|
||||
"silo": asset['silo'],
|
||||
"task": TASK,
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": int(version["name"]),
|
||||
"hierarchy": hierarchy}
|
||||
|
||||
files = repre['files']
|
||||
if repre.get('stagingDir'):
|
||||
|
|
@ -271,47 +271,54 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.debug(
|
||||
"src_tail_collections: {}".format(str(src_collections)))
|
||||
src_collection = src_collections[0]
|
||||
|
||||
# Assert that each member has identical suffix
|
||||
src_head = src_collection.format("{head}")
|
||||
src_tail = src_collection.format("{tail}")
|
||||
|
||||
# fix dst_padding
|
||||
valid_files = [x for x in files if src_collection.match(x)]
|
||||
padd_len = len(
|
||||
valid_files[0].replace(src_head, "").replace(src_tail, "")
|
||||
)
|
||||
src_padding_exp = "%0{}d".format(padd_len)
|
||||
|
||||
test_dest_files = list()
|
||||
for i in [1, 2]:
|
||||
template_data["representation"] = repre['ext']
|
||||
template_data["frame"] = src_collection.format(
|
||||
"{padding}") % i
|
||||
template_data["frame"] = src_padding_exp % i
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
|
||||
test_dest_files.append(
|
||||
os.path.normpath(
|
||||
anatomy_filled[template_name]["path"])
|
||||
)
|
||||
self.log.debug(
|
||||
"test_dest_files: {}".format(str(test_dest_files)))
|
||||
|
||||
self.log.debug(
|
||||
"test_dest_files: {}".format(str(test_dest_files)))
|
||||
|
||||
dst_collections, remainder = clique.assemble(test_dest_files)
|
||||
dst_collection = dst_collections[0]
|
||||
dst_head = dst_collection.format("{head}")
|
||||
dst_tail = dst_collection.format("{tail}")
|
||||
|
||||
repre['published_path'] = dst_collection.format()
|
||||
|
||||
index_frame_start = None
|
||||
if repre.get('startFrame'):
|
||||
frame_start_padding = len(str(
|
||||
repre.get('endFrame')))
|
||||
index_frame_start = repre.get('startFrame')
|
||||
|
||||
dst_padding_exp = src_padding_exp
|
||||
for i in src_collection.indexes:
|
||||
src_padding = src_collection.format("{padding}") % i
|
||||
src_padding = src_padding_exp % i
|
||||
src_file_name = "{0}{1}{2}".format(
|
||||
src_head, src_padding, src_tail)
|
||||
|
||||
dst_padding = dst_collection.format("{padding}") % i
|
||||
dst_padding = src_padding_exp % i
|
||||
|
||||
if index_frame_start:
|
||||
dst_padding = "%0{}d".format(
|
||||
frame_start_padding) % index_frame_start
|
||||
dst_padding_exp = "%0{}d".format(frame_start_padding)
|
||||
dst_padding = dst_padding_exp % index_frame_start
|
||||
index_frame_start += 1
|
||||
|
||||
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
|
||||
|
|
@ -320,6 +327,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.debug("source: {}".format(src))
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
repre['published_path'] = "{0}{1}{2}".format(dst_head, dst_padding_exp, dst_tail)
|
||||
# for imagesequence version data
|
||||
hashes = '#' * len(dst_padding)
|
||||
dst = os.path.normpath("{0}{1}{2}".format(
|
||||
|
|
@ -342,6 +350,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
template_data["representation"] = repre['ext']
|
||||
|
||||
if repre.get("outputName"):
|
||||
template_data["output"] = repre['outputName']
|
||||
|
||||
src = os.path.join(stagingdir, fname)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
dst = os.path.normpath(
|
||||
|
|
@ -376,7 +387,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"representation": repre['ext']
|
||||
}
|
||||
}
|
||||
self.log.debug("__ _representation: {}".format(representation))
|
||||
self.log.debug("__ representation: {}".format(representation))
|
||||
destination_list.append(dst)
|
||||
self.log.debug("__ destination_list: {}".format(destination_list))
|
||||
instance.data['destination_list'] = destination_list
|
||||
|
|
|
|||
|
|
@ -276,7 +276,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"families": ["render"],
|
||||
"source": source,
|
||||
"user": context.data["user"],
|
||||
|
||||
"version": context.data["version"],
|
||||
# Optional metadata (for debugging)
|
||||
"metadata": {
|
||||
"instance": data,
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ class CreateModel(avalon.maya.Creator):
|
|||
label = "Model"
|
||||
family = "model"
|
||||
icon = "cube"
|
||||
defaults = [ "_MD", "_HD", "_LD", "Main", "Proxy",]
|
||||
defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateModel, self).__init__(*args, **kwargs)
|
||||
|
|
|
|||
|
|
@ -7,9 +7,9 @@ from Qt import QtWidgets
|
|||
class ImagePlaneLoader(api.Loader):
|
||||
"""Specific loader of plate for image planes on selected camera."""
|
||||
|
||||
families = ["plate"]
|
||||
families = ["plate", "render"]
|
||||
label = "Create imagePlane on selected camera."
|
||||
representations = ["mov"]
|
||||
representations = ["mov", "exr"]
|
||||
icon = "image"
|
||||
color = "orange"
|
||||
|
||||
|
|
@ -58,12 +58,10 @@ class ImagePlaneLoader(api.Loader):
|
|||
camera=camera, showInAllViews=False
|
||||
)
|
||||
image_plane_shape.depth.set(image_plane_depth)
|
||||
# Need to get "type" by string, because its a method as well.
|
||||
pc.Attribute(image_plane_shape + ".type").set(2)
|
||||
|
||||
image_plane_shape.imageName.set(
|
||||
context["representation"]["data"]["path"]
|
||||
)
|
||||
image_plane_shape.useFrameExtension.set(1)
|
||||
|
||||
start_frame = pc.playbackOptions(q=True, min=True)
|
||||
end_frame = pc.playbackOptions(q=True, max=True)
|
||||
|
|
@ -71,6 +69,29 @@ class ImagePlaneLoader(api.Loader):
|
|||
image_plane_shape.frameOffset.set(1 - start_frame)
|
||||
image_plane_shape.frameIn.set(start_frame)
|
||||
image_plane_shape.frameOut.set(end_frame)
|
||||
image_plane_shape.useFrameExtension.set(1)
|
||||
|
||||
if context["representation"]["name"] == "mov":
|
||||
# Need to get "type" by string, because its a method as well.
|
||||
pc.Attribute(image_plane_shape + ".type").set(2)
|
||||
|
||||
# Ask user whether to use sequence or still image.
|
||||
if context["representation"]["name"] == "exr":
|
||||
reply = QtWidgets.QMessageBox.information(
|
||||
None,
|
||||
"Frame Hold.",
|
||||
"Hold image sequence on first frame?",
|
||||
QtWidgets.QMessageBox.Ok,
|
||||
QtWidgets.QMessageBox.Cancel
|
||||
)
|
||||
if reply == QtWidgets.QMessageBox.Ok:
|
||||
pc.delete(
|
||||
image_plane_shape.listConnections(type="expression")[0]
|
||||
)
|
||||
image_plane_shape.frameExtension.set(start_frame)
|
||||
|
||||
# Ensure OpenEXRLoader plugin is loaded.
|
||||
pc.loadPlugin("OpenEXRLoader.mll", quiet=True)
|
||||
|
||||
new_nodes.extend(
|
||||
[image_plane_transform.name(), image_plane_shape.name()]
|
||||
|
|
|
|||
|
|
@ -211,6 +211,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
families = ["look"]
|
||||
label = "Collect Look"
|
||||
hosts = ["maya"]
|
||||
maketx = True
|
||||
|
||||
def process(self, instance):
|
||||
"""Collect the Look in the instance with the correct layer settings"""
|
||||
|
|
@ -219,8 +220,8 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
self.collect(instance)
|
||||
|
||||
# make ftrack publishable
|
||||
instance.data["families"] = ['ftrack']
|
||||
instance.data['maketx'] = True
|
||||
instance.data['maketx'] = self.maketx
|
||||
self.log.info('maketx: {}'.format(self.maketx))
|
||||
|
||||
def collect(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,8 @@ from pype.maya import lib
|
|||
class CollectRenderableCamera(pyblish.api.InstancePlugin):
|
||||
"""Collect the renderable camera(s) for the render layer"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
# Offset to be after renderlayer collection.
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Collect Renderable Camera(s)"
|
||||
hosts = ["maya"]
|
||||
families = ["vrayscene",
|
||||
|
|
|
|||
|
|
@ -56,6 +56,8 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
# data["publish"] = False
|
||||
data['startFrameReview'] = instance.data['startFrame']
|
||||
data['endFrameReview'] = instance.data['endFrame']
|
||||
data['startFrame'] = instance.data['startFrame']
|
||||
data['endFrame'] = instance.data['endFrame']
|
||||
data['handles'] = instance.data['handles']
|
||||
data['step'] = instance.data['step']
|
||||
data['fps'] = instance.data['fps']
|
||||
|
|
|
|||
|
|
@ -99,7 +99,6 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
playblast = capture_gui.lib.capture_scene(preset)
|
||||
|
||||
self.log.info("file list {}".format(playblast))
|
||||
# self.log.info("Calculating HUD data overlay")
|
||||
|
||||
collected_frames = os.listdir(stagingdir)
|
||||
collections, remainder = clique.assemble(collected_frames)
|
||||
|
|
@ -107,61 +106,19 @@ class ExtractQuicktime(pype.api.Extractor):
|
|||
stagingdir, collections[0].format('{head}{padding}{tail}'))
|
||||
self.log.info("input {}".format(input_path))
|
||||
|
||||
movieFile = filename + ".mov"
|
||||
movieFileBurnin = filename + "Burn" + ".mov"
|
||||
|
||||
full_movie_path = os.path.join(stagingdir, movieFile)
|
||||
full_burnin_path = os.path.join(stagingdir, movieFileBurnin)
|
||||
self.log.info("output {}".format(full_movie_path))
|
||||
with avalon.maya.suspended_refresh():
|
||||
try:
|
||||
(
|
||||
ffmpeg
|
||||
.input(input_path, framerate=fps, start_number=int(start))
|
||||
.output(full_movie_path)
|
||||
.run(overwrite_output=True,
|
||||
capture_stdout=True,
|
||||
capture_stderr=True)
|
||||
)
|
||||
except ffmpeg.Error as e:
|
||||
ffmpeg_error = 'ffmpeg error: {}'.format(e.stderr)
|
||||
self.log.error(ffmpeg_error)
|
||||
raise RuntimeError(ffmpeg_error)
|
||||
|
||||
version = instance.context.data['version']
|
||||
|
||||
burnin_data = {
|
||||
"input": full_movie_path.replace("\\", "/"),
|
||||
"output": full_burnin_path.replace("\\", "/"),
|
||||
"burnin_data": {
|
||||
"username": instance.context.data['user'],
|
||||
"asset": os.environ['AVALON_ASSET'],
|
||||
"task": os.environ['AVALON_TASK'],
|
||||
"start_frame": int(start),
|
||||
"version": "v" + str(version)
|
||||
}
|
||||
}
|
||||
|
||||
json_data = json.dumps(burnin_data)
|
||||
scriptpath = os.path.join(os.environ['PYPE_MODULE_ROOT'], "pype", "scripts", "otio_burnin.py")
|
||||
|
||||
p = subprocess.Popen(
|
||||
['python', scriptpath, json_data]
|
||||
)
|
||||
p.wait()
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'mov',
|
||||
'ext': 'mov',
|
||||
'files': movieFileBurnin,
|
||||
'files': collected_frames,
|
||||
"stagingDir": stagingdir,
|
||||
'startFrame': start,
|
||||
'endFrame': end,
|
||||
'frameRate': fps,
|
||||
'preview': True
|
||||
'preview': True,
|
||||
'tags': ['review']
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
|
|
|
|||
|
|
@ -40,6 +40,10 @@ class ValidateSingleShader(pyblish.api.InstancePlugin):
|
|||
shading_engines = cmds.listConnections(shape,
|
||||
destination=True,
|
||||
type="shadingEngine") or []
|
||||
|
||||
# Only interested in unique shading engines.
|
||||
shading_engines = list(set(shading_engines))
|
||||
|
||||
if not shading_engines:
|
||||
no_shaders.append(shape)
|
||||
elif len(shading_engines) > 1:
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import maya.mel as mel
|
||||
import pymel.core as pm
|
||||
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
|
|
@ -18,9 +19,15 @@ class ValidateRenderImageRule(pyblish.api.InstancePlugin):
|
|||
label = "Images File Rule (Workspace)"
|
||||
hosts = ["maya"]
|
||||
families = ["renderlayer"]
|
||||
actions = [pype.api.RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
assert get_file_rule("images") == "renders", (
|
||||
"Workspace's `images` file rule must be set to: renders"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
pm.workspace.fileRules["images"] = "renders"
|
||||
pm.system.Workspace.save()
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
DEFAULT_PADDING = 4
|
||||
RENDERER_PREFIX = {"vray": "<Scene>/<Layer>/<Layer>"}
|
||||
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>"
|
||||
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
46
pype/plugins/nuke/_publish_unused/collect_render_target.py
Normal file
46
pype/plugins/nuke/_publish_unused/collect_render_target.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
@pyblish.api.log
|
||||
class CollectRenderTarget(pyblish.api.InstancePlugin):
|
||||
"""Collect families for all instances"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
label = "Collect Render Target"
|
||||
hosts = ["nuke", "nukeassist"]
|
||||
families = ['write']
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
node = instance[0]
|
||||
|
||||
self.log.info('processing {}'.format(node))
|
||||
|
||||
families = []
|
||||
if instance.data.get('families'):
|
||||
families += instance.data['families']
|
||||
|
||||
# set for ftrack to accept
|
||||
# instance.data["families"] = ["ftrack"]
|
||||
|
||||
if node["render"].value():
|
||||
# dealing with local/farm rendering
|
||||
if node["render_farm"].value():
|
||||
families.append("render.farm")
|
||||
else:
|
||||
families.append("render.local")
|
||||
else:
|
||||
families.append("render.frames")
|
||||
# to ignore staging dir op in integrate
|
||||
instance.data['transfer'] = False
|
||||
|
||||
families.append('ftrack')
|
||||
|
||||
instance.data["families"] = families
|
||||
|
||||
# Sort/grouped by family (preserving local index)
|
||||
instance.context[:] = sorted(instance.context, key=self.sort_by_family)
|
||||
|
||||
def sort_by_family(self, instance):
|
||||
"""Sort by family"""
|
||||
return instance.data.get("families", instance.data.get("family"))
|
||||
|
|
@ -27,8 +27,8 @@ class ExtractScript(pype.api.Extractor):
|
|||
shutil.copy(current_script, path)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
instance.data["representations"] = list()
|
||||
|
||||
representation = {
|
||||
'name': 'nk',
|
||||
'ext': '.nk',
|
||||
|
|
@ -1,9 +1,7 @@
|
|||
from collections import OrderedDict
|
||||
import avalon.api
|
||||
import avalon.nuke
|
||||
from pype.nuke import (
|
||||
create_write_node
|
||||
)
|
||||
from pype.nuke.lib import create_write_node
|
||||
from pype import api as pype
|
||||
from pypeapp import config
|
||||
|
||||
|
|
|
|||
|
|
@ -94,9 +94,9 @@ class LoadSequence(api.Loader):
|
|||
|
||||
first = version_data.get("startFrame", None)
|
||||
last = version_data.get("endFrame", None)
|
||||
handles = version_data.get("handles", None)
|
||||
handle_start = version_data.get("handleStart", None)
|
||||
handle_end = version_data.get("handleEnd", None)
|
||||
handles = version_data.get("handles", 0)
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
# fix handle start and end if none are available
|
||||
if not handle_start and not handle_end:
|
||||
|
|
@ -130,10 +130,10 @@ class LoadSequence(api.Loader):
|
|||
r["colorspace"].setValue(str(colorspace))
|
||||
|
||||
loader_shift(r, first, relative=True)
|
||||
r["origfirst"].setValue(first)
|
||||
r["first"].setValue(first)
|
||||
r["origlast"].setValue(last)
|
||||
r["last"].setValue(last)
|
||||
r["origfirst"].setValue(int(first))
|
||||
r["first"].setValue(int(first))
|
||||
r["origlast"].setValue(int(last))
|
||||
r["last"].setValue(int(last))
|
||||
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
add_keys = ["startFrame", "endFrame", "handles",
|
||||
|
|
|
|||
|
|
@ -65,11 +65,11 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
"name": node.name(),
|
||||
"subset": subset,
|
||||
"family": avalon_knob_data["family"],
|
||||
"families": [family],
|
||||
"families": [avalon_knob_data["family"], family],
|
||||
"avalonKnob": avalon_knob_data,
|
||||
"publish": node.knob('publish').value(),
|
||||
"step": 1,
|
||||
"fps": int(nuke.root()['fps'].value())
|
||||
"fps": nuke.root()['fps'].value()
|
||||
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder + 0.1
|
||||
label = "Collect Writes"
|
||||
hosts = ["nuke", "nukeassist"]
|
||||
families = ["render.local", "render", "render.farm"]
|
||||
families = ["render", "render.local", "render.farm"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -66,19 +66,20 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
instance.data['families'].append('ftrack')
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = list()
|
||||
try:
|
||||
collected_frames = os.listdir(output_dir)
|
||||
|
||||
representation = {
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
'files': collected_frames,
|
||||
"stagingDir": output_dir,
|
||||
"anatomy_template": "render"
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
try:
|
||||
collected_frames = os.listdir(output_dir)
|
||||
representation['files'] = collected_frames
|
||||
instance.data["representations"].append(representation)
|
||||
except Exception:
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.debug("couldn't collect frames: {}".format(label))
|
||||
|
||||
if 'render.local' in instance.data['families']:
|
||||
|
|
@ -96,5 +97,4 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
"colorspace": node["colorspace"].value(),
|
||||
})
|
||||
|
||||
|
||||
self.log.debug("instance.data: {}".format(instance.data))
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
node = instance[0]
|
||||
context = instance.context
|
||||
|
||||
self.log.debug("instance collected: {}".format(instance.data))
|
||||
|
||||
|
|
@ -29,12 +28,6 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
last_frame = instance.data.get("endFrame", None)
|
||||
node_subset_name = instance.data.get("name", None)
|
||||
|
||||
# swap path to stageDir
|
||||
temp_dir = self.staging_dir(instance).replace("\\", "/")
|
||||
output_dir = instance.data.get("outputDir")
|
||||
path = node['file'].value()
|
||||
node['file'].setValue(path.replace(output_dir, temp_dir))
|
||||
|
||||
self.log.info("Starting render")
|
||||
self.log.info("Start frame: {}".format(first_frame))
|
||||
self.log.info("End frame: {}".format(last_frame))
|
||||
|
|
@ -46,27 +39,26 @@ class NukeRenderLocal(pype.api.Extractor):
|
|||
int(last_frame)
|
||||
)
|
||||
|
||||
# swap path back to publish path
|
||||
path = node['file'].value()
|
||||
node['file'].setValue(path.replace(temp_dir, output_dir))
|
||||
out_dir = os.path.dirname(path)
|
||||
ext = node["file_type"].value()
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
collected_frames = os.listdir(temp_dir)
|
||||
collected_frames = os.listdir(out_dir)
|
||||
repre = {
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
'files': collected_frames,
|
||||
"stagingDir": temp_dir,
|
||||
"stagingDir": out_dir,
|
||||
"anatomy_template": "render"
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
self.log.info("Extracted instance '{0}' to: {1}".format(
|
||||
instance.name,
|
||||
temp_dir
|
||||
out_dir
|
||||
))
|
||||
|
||||
instance.data['family'] = 'render'
|
||||
|
|
|
|||
|
|
@ -2,10 +2,9 @@ import os
|
|||
import nuke
|
||||
import pyblish.api
|
||||
import pype
|
||||
from pype.vendor import ffmpeg
|
||||
|
||||
|
||||
class ExtractDataForReview(pype.api.Extractor):
|
||||
class ExtractReviewData(pype.api.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
||||
must be run after extract_render_local.py
|
||||
|
|
@ -13,8 +12,7 @@ class ExtractDataForReview(pype.api.Extractor):
|
|||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
label = "Extract Review"
|
||||
optional = True
|
||||
label = "Extract Review Data"
|
||||
|
||||
families = ["review"]
|
||||
hosts = ["nuke"]
|
||||
|
|
@ -35,63 +33,15 @@ class ExtractDataForReview(pype.api.Extractor):
|
|||
if "still" not in instance.data["families"]:
|
||||
self.render_review_representation(instance,
|
||||
representation="mov")
|
||||
self.log.debug("review mov:")
|
||||
self.transcode_mov(instance)
|
||||
self.log.debug("instance.data: {}".format(instance.data))
|
||||
self.render_review_representation(instance,
|
||||
representation="jpeg")
|
||||
else:
|
||||
self.log.debug("instance: {}".format(instance))
|
||||
self.render_review_representation(instance, representation="jpeg")
|
||||
|
||||
# Restore selection
|
||||
[i["selected"].setValue(False) for i in nuke.allNodes()]
|
||||
[i["selected"].setValue(True) for i in selection]
|
||||
|
||||
def transcode_mov(self, instance):
|
||||
collection = instance.data["collection"]
|
||||
stagingDir = instance.data["stagingDir"].replace("\\", "/")
|
||||
file_name = collection.format("{head}mov")
|
||||
|
||||
review_mov = os.path.join(stagingDir, file_name).replace("\\", "/")
|
||||
|
||||
self.log.info("transcoding review mov: {0}".format(review_mov))
|
||||
if instance.data.get("baked_colorspace_movie"):
|
||||
input_movie = instance.data["baked_colorspace_movie"]
|
||||
out, err = (
|
||||
ffmpeg
|
||||
.input(input_movie)
|
||||
.output(
|
||||
review_mov,
|
||||
pix_fmt='yuv420p',
|
||||
crf=18,
|
||||
timecode="00:00:00:01"
|
||||
)
|
||||
.overwrite_output()
|
||||
.run()
|
||||
)
|
||||
|
||||
self.log.debug("Removing `{0}`...".format(
|
||||
instance.data["baked_colorspace_movie"]))
|
||||
os.remove(instance.data["baked_colorspace_movie"])
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'review',
|
||||
'ext': 'mov',
|
||||
'files': file_name,
|
||||
"stagingDir": stagingDir,
|
||||
"anatomy_template": "render",
|
||||
"thumbnail": False,
|
||||
"preview": True,
|
||||
'startFrameReview': instance.data['startFrame'],
|
||||
'endFrameReview': instance.data['endFrame'],
|
||||
'frameRate': instance.context.data["framerate"]
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def render_review_representation(self,
|
||||
instance,
|
||||
representation="mov"):
|
||||
|
|
@ -132,10 +82,15 @@ class ExtractDataForReview(pype.api.Extractor):
|
|||
temporary_nodes.append(node)
|
||||
|
||||
reformat_node = nuke.createNode("Reformat")
|
||||
reformat_node["format"].setValue("HD_1080")
|
||||
reformat_node["resize"].setValue("fit")
|
||||
reformat_node["filter"].setValue("Lanczos6")
|
||||
reformat_node["black_outside"].setValue(True)
|
||||
|
||||
ref_node = self.nodes.get("Reformat", None)
|
||||
if ref_node:
|
||||
for k, v in ref_node:
|
||||
self.log.debug("k,v: {0}:{1}".format(k,v))
|
||||
if isinstance(v, unicode):
|
||||
v = str(v)
|
||||
reformat_node[k].setValue(v)
|
||||
|
||||
reformat_node.setInput(0, previous_node)
|
||||
previous_node = reformat_node
|
||||
temporary_nodes.append(reformat_node)
|
||||
|
|
@ -162,6 +117,7 @@ class ExtractDataForReview(pype.api.Extractor):
|
|||
|
||||
if representation in "mov":
|
||||
file = fhead + "baked.mov"
|
||||
name = "baked"
|
||||
path = os.path.join(stagingDir, file).replace("\\", "/")
|
||||
self.log.debug("Path: {}".format(path))
|
||||
instance.data["baked_colorspace_movie"] = path
|
||||
|
|
@ -170,11 +126,11 @@ class ExtractDataForReview(pype.api.Extractor):
|
|||
write_node["raw"].setValue(1)
|
||||
write_node.setInput(0, previous_node)
|
||||
temporary_nodes.append(write_node)
|
||||
thumbnail = False
|
||||
preview = True
|
||||
tags = ["review", "delete"]
|
||||
|
||||
elif representation in "jpeg":
|
||||
file = fhead + "jpeg"
|
||||
name = "thumbnail"
|
||||
path = os.path.join(stagingDir, file).replace("\\", "/")
|
||||
instance.data["thumbnail"] = path
|
||||
write_node["file"].setValue(path)
|
||||
|
|
@ -182,31 +138,29 @@ class ExtractDataForReview(pype.api.Extractor):
|
|||
write_node["raw"].setValue(1)
|
||||
write_node.setInput(0, previous_node)
|
||||
temporary_nodes.append(write_node)
|
||||
thumbnail = True
|
||||
preview = False
|
||||
tags = ["thumbnail"]
|
||||
|
||||
# retime for
|
||||
first_frame = int(last_frame) / 2
|
||||
last_frame = int(last_frame) / 2
|
||||
# add into files for integration as representation
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
repre = {
|
||||
'name': representation,
|
||||
'ext': representation,
|
||||
'files': file,
|
||||
"stagingDir": stagingDir,
|
||||
"anatomy_template": "render",
|
||||
"thumbnail": thumbnail,
|
||||
"preview": preview
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
repre = {
|
||||
'name': name,
|
||||
'ext': representation,
|
||||
'files': file,
|
||||
"stagingDir": stagingDir,
|
||||
"startFrame": first_frame,
|
||||
"endFrame": last_frame,
|
||||
"anatomy_template": "render",
|
||||
"tags": tags
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# Render frames
|
||||
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
|
||||
|
||||
self.log.debug("representations: {}".format(instance.data["representations"]))
|
||||
|
||||
# Clean up
|
||||
for node in temporary_nodes:
|
||||
nuke.delete(node)
|
||||
|
|
@ -100,6 +100,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
# Resolve relative references
|
||||
"ProjectPath": workspace,
|
||||
|
||||
# Only the specific write node is rendered.
|
||||
"WriteNode": instance[0].name()
|
||||
},
|
||||
|
||||
# Mandatory for Deadline, may be empty
|
||||
|
|
|
|||
|
|
@ -11,9 +11,12 @@ class RepairCollectionAction(pyblish.api.Action):
|
|||
icon = "wrench"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
self.log.info(context[0])
|
||||
files_remove = [os.path.join(context[0].data["outputDir"], f)
|
||||
for f in context[0].data["files"]]
|
||||
for r in context[0].data.get("representations", [])
|
||||
for f in r.get("files", [])
|
||||
]
|
||||
self.log.info(files_remove)
|
||||
for f in files_remove:
|
||||
os.remove(f)
|
||||
self.log.debug("removing file: {}".format(f))
|
||||
|
|
@ -38,7 +41,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
|
|||
if not repre.get('files'):
|
||||
msg = ("no frames were collected, "
|
||||
"you need to render them")
|
||||
self.log.error(msg)
|
||||
self.log.warning(msg)
|
||||
raise ValidationException(msg)
|
||||
|
||||
collections, remainder = clique.assemble(repre["files"])
|
||||
|
|
|
|||
|
|
@ -23,12 +23,24 @@ class ValidateScript(pyblish.api.InstancePlugin):
|
|||
|
||||
# These attributes will be checked
|
||||
attributes = [
|
||||
"fps", "fstart", "fend",
|
||||
"resolution_width", "resolution_height", "pixel_aspect", "handle_start", "handle_end"
|
||||
"fps",
|
||||
"fstart",
|
||||
"fend",
|
||||
"resolution_width",
|
||||
"resolution_height",
|
||||
"handle_start",
|
||||
"handle_end"
|
||||
]
|
||||
|
||||
# Value of these attributes can be found on parents
|
||||
hierarchical_attributes = ["fps", "resolution_width", "resolution_height", "pixel_aspect", "handle_start", "handle_end"]
|
||||
hierarchical_attributes = [
|
||||
"fps",
|
||||
"resolution_width",
|
||||
"resolution_height",
|
||||
"pixel_aspect",
|
||||
"handle_start",
|
||||
"handle_end"
|
||||
]
|
||||
|
||||
missing_attributes = []
|
||||
asset_attributes = {}
|
||||
|
|
@ -84,8 +96,9 @@ class ValidateScript(pyblish.api.InstancePlugin):
|
|||
# Compare asset's values Nukescript X Database
|
||||
not_matching = []
|
||||
for attr in attributes:
|
||||
self.log.debug("asset vs script attribute: {0}, {1}".format(
|
||||
asset_attributes[attr], script_attributes[attr]))
|
||||
self.log.debug("asset vs script attribute \"{}\": {}, {}".format(
|
||||
attr, asset_attributes[attr], script_attributes[attr])
|
||||
)
|
||||
if asset_attributes[attr] != script_attributes[attr]:
|
||||
not_matching.append(attr)
|
||||
|
||||
|
|
|
|||
|
|
@ -32,16 +32,21 @@ class CollectClipHandles(api.ContextPlugin):
|
|||
if instance.data.get("main"):
|
||||
name = instance.data["asset"]
|
||||
if assets_shared.get(name):
|
||||
self.log.debug("Adding to shared assets: `{}`".format(
|
||||
instance.data["name"]))
|
||||
assets_shared[name].update({
|
||||
"handles": handles,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
})
|
||||
asset_shared = assets_shared.get(name)
|
||||
else:
|
||||
asset_shared = assets_shared[name]
|
||||
|
||||
self.log.debug("Adding to shared assets: `{}`".format(
|
||||
instance.data["name"]))
|
||||
asset_shared.update({
|
||||
"handles": handles,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
})
|
||||
|
||||
|
||||
for instance in filtered_instances:
|
||||
if not instance.data.get("main") or not instance.data.get("handleTag"):
|
||||
if not instance.data.get("main") and not instance.data.get("handleTag"):
|
||||
self.log.debug("Synchronize handles on: `{}`".format(
|
||||
instance.data["name"]))
|
||||
name = instance.data["asset"]
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
|
||||
for instance in context[:]:
|
||||
assets_shared = context.data.get("assetsShared")
|
||||
tags = instance.data.get("tags", None)
|
||||
clip = instance.data["item"]
|
||||
asset = instance.data.get("asset")
|
||||
|
|
@ -139,19 +140,33 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
"Clip: `{}`".format(asset)
|
||||
)
|
||||
|
||||
assetsShared = {
|
||||
asset: {
|
||||
"asset": instance.data["asset"],
|
||||
"hierarchy": hierarchy,
|
||||
"parents": parents,
|
||||
"tasks": instance.data['tasks']
|
||||
}}
|
||||
self.log.debug("__ assetsShared: {}".format(assetsShared))
|
||||
# add formated hierarchy path into instance data
|
||||
instance.data["hierarchy"] = hierarchy
|
||||
instance.data["parents"] = parents
|
||||
context.data["assetsShared"].update(
|
||||
assetsShared)
|
||||
|
||||
# adding to asset shared dict
|
||||
self.log.debug("__ assets_shared: {}".format(assets_shared))
|
||||
if assets_shared.get(asset):
|
||||
self.log.debug("Adding to shared assets: `{}`".format(
|
||||
instance.data["name"]))
|
||||
asset_shared = assets_shared.get(asset)
|
||||
else:
|
||||
asset_shared = assets_shared[asset]
|
||||
|
||||
asset_shared.update({
|
||||
"asset": instance.data["asset"],
|
||||
"hierarchy": hierarchy,
|
||||
"parents": parents,
|
||||
"tasks": instance.data["tasks"]
|
||||
})
|
||||
|
||||
# adding frame start if any on instance
|
||||
start_frame = instance.data.get("frameStart")
|
||||
if start_frame:
|
||||
asset_shared.update({
|
||||
"frameStart": start_frame
|
||||
})
|
||||
|
||||
|
||||
|
||||
class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
||||
|
|
@ -176,6 +191,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
instances = context[:]
|
||||
sequence = context.data['activeSequence']
|
||||
# create hierarchyContext attr if context has none
|
||||
|
||||
temp_context = {}
|
||||
|
|
@ -201,6 +217,12 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
instance.data["hierarchy"] = s_asset_data["hierarchy"]
|
||||
instance.data["tasks"] = s_asset_data["tasks"]
|
||||
|
||||
# adding frame start if any on instance
|
||||
start_frame = s_asset_data.get("frameStart")
|
||||
if start_frame:
|
||||
instance.data["frameStart"] = start_frame
|
||||
|
||||
|
||||
self.log.debug(
|
||||
"__ instance.data[parents]: {}".format(
|
||||
instance.data["parents"]
|
||||
|
|
@ -226,8 +248,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
|
||||
# get custom attributes of the shot
|
||||
if instance.data.get("main"):
|
||||
start_frame = instance.data.get("frameStart", 0)
|
||||
|
||||
in_info['custom_attributes'] = {
|
||||
'handles': int(instance.data.get('handles')),
|
||||
'handle_start': handle_start,
|
||||
|
|
@ -238,27 +258,30 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
"edit_in": int(instance.data["startFrame"]),
|
||||
"edit_out": int(instance.data["endFrame"])
|
||||
}
|
||||
if start_frame is not 0:
|
||||
in_info['custom_attributes'].update({
|
||||
'fstart': start_frame,
|
||||
'fend': start_frame + (
|
||||
instance.data["endFrame"] - instance.data["startFrame"])
|
||||
})
|
||||
|
||||
# adding SourceResolution if Tag was present
|
||||
s_res = instance.data.get("sourceResolution")
|
||||
if s_res and instance.data.get("main"):
|
||||
item = instance.data["item"]
|
||||
self.log.debug("TrackItem: `{0}`".format(
|
||||
item))
|
||||
width = int(item.source().mediaSource().width())
|
||||
height = int(item.source().mediaSource().height())
|
||||
self.log.info("Source Width and Height are: `{0} x {1}`".format(
|
||||
width, height))
|
||||
if instance.data.get("main"):
|
||||
width = int(sequence.format().width())
|
||||
height = int(sequence.format().height())
|
||||
pixel_aspect = sequence.format().pixelAspect()
|
||||
self.log.info("Sequence Width,Height,PixelAspect are: `{0},{1},{2}`".format(
|
||||
width, height, pixel_aspect))
|
||||
|
||||
in_info['custom_attributes'].update({
|
||||
"resolution_width": width,
|
||||
"resolution_height": height
|
||||
"resolution_height": height,
|
||||
"pixel_aspect": pixel_aspect
|
||||
})
|
||||
|
||||
start_frame = instance.data.get("frameStart")
|
||||
if start_frame:
|
||||
in_info['custom_attributes'].update({
|
||||
'fstart': start_frame,
|
||||
'fend': start_frame + (
|
||||
instance.data["endFrame"] -
|
||||
instance.data["startFrame"])
|
||||
})
|
||||
|
||||
in_info['tasks'] = instance.data['tasks']
|
||||
|
||||
parents = instance.data.get('parents', [])
|
||||
|
|
|
|||
|
|
@ -25,50 +25,55 @@ class CollectShots(api.ContextPlugin):
|
|||
)
|
||||
continue
|
||||
|
||||
# Collect data.
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
data[key] = value
|
||||
if instance.data.get("main"):
|
||||
# Collect data.
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
if key in "main":
|
||||
continue
|
||||
data[key] = value
|
||||
|
||||
data["family"] = "shot"
|
||||
data["families"] = []
|
||||
data["frameStart"] = instance.data.get("frameStart", 1)
|
||||
data["family"] = "shot"
|
||||
data["families"] = []
|
||||
data["frameStart"] = instance.data.get("frameStart", 1)
|
||||
|
||||
data["subset"] = data["family"] + "Main"
|
||||
data["subset"] = data["family"] + "Main"
|
||||
|
||||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
|
||||
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
|
||||
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
|
||||
)
|
||||
data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
|
||||
data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
|
||||
)
|
||||
|
||||
# Get handles.
|
||||
data["handleStart"] = instance.data["handleStart"]
|
||||
data["handleEnd"] = instance.data["handleEnd"]
|
||||
# Get handles.
|
||||
data["handleStart"] = instance.data["handleStart"]
|
||||
data["handleEnd"] = instance.data["handleEnd"]
|
||||
|
||||
# Frame-ranges with handles.
|
||||
data["sourceInH"] = data["sourceIn"] - data["handleStart"]
|
||||
data["sourceOutH"] = data["sourceOut"] + data["handleEnd"]
|
||||
# Frame-ranges with handles.
|
||||
data["sourceInH"] = data["sourceIn"] - data["handleStart"]
|
||||
data["sourceOutH"] = data["sourceOut"] + data["handleEnd"]
|
||||
|
||||
# Get timeline frames.
|
||||
data["timelineIn"] = int(data["item"].timelineIn())
|
||||
data["timelineOut"] = int(data["item"].timelineOut())
|
||||
# Get timeline frames.
|
||||
data["timelineIn"] = int(data["item"].timelineIn())
|
||||
data["timelineOut"] = int(data["item"].timelineOut())
|
||||
|
||||
# Frame-ranges with handles.
|
||||
data["timelineInHandles"] = data["timelineIn"]
|
||||
data["timelineInHandles"] -= data["handleStart"]
|
||||
data["timelineOutHandles"] = data["timelineOut"]
|
||||
data["timelineOutHandles"] += data["handleEnd"]
|
||||
# Frame-ranges with handles.
|
||||
data["timelineInHandles"] = data["timelineIn"]
|
||||
data["timelineInHandles"] -= data["handleStart"]
|
||||
data["timelineOutHandles"] = data["timelineOut"]
|
||||
data["timelineOutHandles"] += data["handleEnd"]
|
||||
|
||||
# Creating comp frame range.
|
||||
data["endFrame"] = (
|
||||
data["frameStart"] + (data["sourceOut"] - data["sourceIn"])
|
||||
)
|
||||
# Creating comp frame range.
|
||||
data["endFrame"] = (
|
||||
data["frameStart"] + (data["sourceOut"] - data["sourceIn"])
|
||||
)
|
||||
|
||||
# Get fps.
|
||||
sequence = instance.context.data["activeSequence"]
|
||||
data["fps"] = sequence.framerate()
|
||||
# Get fps.
|
||||
sequence = instance.context.data["activeSequence"]
|
||||
data["fps"] = sequence.framerate()
|
||||
|
||||
# Create instance.
|
||||
self.log.debug("Creating instance with: {}".format(data["name"]))
|
||||
instance.context.create_instance(**data)
|
||||
# Create instance.
|
||||
self.log.debug("Creating instance with: {}".format(data["name"]))
|
||||
instance.context.create_instance(**data)
|
||||
|
||||
self.log.debug("_ context: {}".format(context[:]))
|
||||
|
|
|
|||
|
|
@ -20,4 +20,5 @@ class CollectClipTagFrameStart(api.InstancePlugin):
|
|||
# gets only task family tags and collect labels
|
||||
if "frameStart" in t_family:
|
||||
t_number = t_metadata.get("tag.number", "")
|
||||
instance.data["frameStart"] = int(t_number)
|
||||
start_frame = int(t_number)
|
||||
instance.data["frameStart"] = start_frame
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ class CollectClipTagTypes(api.InstancePlugin):
|
|||
"""Collect Types from Tags of selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.012
|
||||
label = "Collect Plate Type from Tag"
|
||||
label = "Collect main flag"
|
||||
hosts = ["nukestudio"]
|
||||
families = ['clip']
|
||||
|
||||
|
|
@ -25,7 +25,8 @@ class CollectClipTagTypes(api.InstancePlugin):
|
|||
t_subset.capitalize())
|
||||
|
||||
if "plateMain" in subset_name:
|
||||
instance.data["main"] = True
|
||||
if not instance.data.get("main"):
|
||||
instance.data["main"] = True
|
||||
self.log.info("`plateMain` found in instance.name: `{}`".format(
|
||||
instance.data["name"]))
|
||||
return
|
||||
|
|
@ -51,11 +51,16 @@ def __main__():
|
|||
elif platform.system().lower() == "windows":
|
||||
pype_command = "pype.bat"
|
||||
|
||||
args = [os.path.join(pype_root, pype_command),
|
||||
"--node", "--publish", "--paths", " ".join(paths)]
|
||||
args = [
|
||||
os.path.join(pype_root, pype_command),
|
||||
"publish",
|
||||
" ".join(paths)
|
||||
]
|
||||
|
||||
print("Pype command: {}".format(" ".join(args)))
|
||||
subprocess.call(args, shell=True)
|
||||
exit_code = subprocess.call(args, shell=True)
|
||||
if exit_code != 0:
|
||||
raise ValueError("Publishing failed.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
|||
|
|
@ -78,7 +78,15 @@ class TimersManager(metaclass=Singleton):
|
|||
'task_name': 'Lookdev BG'
|
||||
}
|
||||
'''
|
||||
if len(data['hierarchy']) < 1:
|
||||
self.log.error((
|
||||
'Not allowed action in Pype!!'
|
||||
' Timer has been launched on task which is child of Project.'
|
||||
))
|
||||
return
|
||||
|
||||
self.last_task = data
|
||||
|
||||
for module in self.modules:
|
||||
module.start_timer_manager(data)
|
||||
self.is_running = True
|
||||
|
|
|
|||
|
|
@ -1,18 +1,25 @@
|
|||
import os
|
||||
import sys
|
||||
from pypeapp import config
|
||||
from pype.maya import lib
|
||||
import pype.maya.lib as mlib
|
||||
from maya import cmds
|
||||
|
||||
def build_shelf():
|
||||
presets = config.get_presets()
|
||||
shelf_preset = presets['maya'].get('project_shelf')
|
||||
if shelf_preset:
|
||||
project = os.environ["AVALON_PROJECT"]
|
||||
|
||||
for k, v in shelf_preset['imports'].items():
|
||||
sys.modules[k] = __import__(v, fromlist=[project])
|
||||
print("starting PYPE usersetup")
|
||||
|
||||
lib.shelf(name=shelf_preset['name'], preset=shelf_preset)
|
||||
# build a shelf
|
||||
presets = config.get_presets()
|
||||
shelf_preset = presets['maya'].get('project_shelf')
|
||||
|
||||
cmds.evalDeferred("build_shelf()")
|
||||
|
||||
if shelf_preset:
|
||||
project = os.environ["AVALON_PROJECT"]
|
||||
|
||||
for i in shelf_preset['imports']:
|
||||
import_string = "from {} import {}".format(project, i)
|
||||
print(import_string)
|
||||
exec(import_string)
|
||||
|
||||
cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], preset=shelf_preset)")
|
||||
|
||||
|
||||
print("finished PYPE usersetup")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue