Merged in Aardschok/config/render (pull request #13)

Render plugin updates
This commit is contained in:
Wijnand Koreman 2017-09-05 16:04:06 +00:00
commit de5a83cef8
26 changed files with 562 additions and 91 deletions

View file

@ -1,8 +1,7 @@
import os
from maya import cmds
from avalon import api, maya
from avalon import api
class AbcLoader(api.Loader):
@ -18,6 +17,9 @@ class AbcLoader(api.Loader):
def process(self, name, namespace, context, data):
import maya.cmds as cmds
from avalon import maya
cmds.loadPlugin("AbcImport.mll", quiet=True)
# Prevent identical alembic nodes from being shared
# Create unique namespace for the cameras

View file

@ -0,0 +1,49 @@
import sys
import os
import subprocess
from avalon import api
def open(filepath):
"""Open file with system default executable"""
if sys.platform.startswith('darwin'):
subprocess.call(('open', filepath))
elif os.name == 'nt':
os.startfile(filepath)
elif os.name == 'posix':
subprocess.call(('xdg-open', filepath))
class OpenImageSequence(api.Loader):
"""Open Image Sequence with system default"""
families = ["colorbleed.imagesequence"]
representations = ["*"]
label = "Open sequence"
order = -10
icon = "play-circle"
color = "orange"
def process(self, name, namespace, context, data):
directory = self.fname
from avalon.vendor import clique
pattern = clique.PATTERNS["frames"]
files = os.listdir(directory)
collections, remainder = clique.assemble(files,
patterns=[pattern],
minimum_items=1)
assert not remainder, ("There shouldn't have been a remainder for "
"'%s': %s" % (directory, remainder))
seqeunce = collections[0]
first_image = list(seqeunce)[0]
filepath = os.path.normpath(os.path.join(directory, first_image))
self.log.info("Opening : {}".format(filepath))
open(filepath)

View file

@ -1,9 +1,7 @@
import os
import json
from maya import cmds
from avalon import api, maya
import colorbleed.maya.lib as lib
from avalon import api
class LookLoader(api.Loader):
@ -30,6 +28,10 @@ class LookLoader(api.Loader):
"""
import maya.cmds as cmds
from avalon import maya
import colorbleed.maya.lib as lib
# improve readability of the namespace
assetname = context["asset"]["name"]
ns_assetname = "{}_".format(assetname)

View file

@ -1,7 +1,4 @@
import maya.cmds as cmds
from avalon import api
import avalon.maya
class ModelLoader(api.Loader):
@ -17,13 +14,16 @@ class ModelLoader(api.Loader):
def process(self, name, namespace, context, data):
import maya.cmds as cmds
from avalon import maya
# Create a readable namespace
# Namespace should contain asset name and counter
# TEST_001{_descriptor} where `descriptor` can be `_abc` for example
assetname = "{}_".format(namespace.split("_")[0])
namespace = avalon.maya.unique_namespace(assetname, format="%03d")
namespace = maya.unique_namespace(assetname, format="%03d")
with avalon.maya.maintained_selection():
with maya.maintained_selection():
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,

View file

@ -0,0 +1,30 @@
import os
import pyblish.api
from maya import cmds
class CollectMayaWorkspace(pyblish.api.ContextPlugin):
"""Inject the current workspace into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Maya Workspace"
hosts = ['maya']
version = (0, 1, 0)
def process(self, context):
workspace = cmds.workspace(rootDirectory=True, query=True)
if not workspace:
# Project has not been set. Files will
# instead end up next to the working file.
workspace = cmds.workspace(dir=True, query=True)
# Maya returns forward-slashes by default
normalised = os.path.normpath(workspace)
context.set_data('workspaceDir', value=normalised)
# For backwards compatibility
context.set_data('workspace_dir', value=normalised)

View file

@ -0,0 +1,102 @@
import os
from maya import cmds
import pyblish.api
from avalon import maya, api
class CollectMindbenderMayaRenderlayers(pyblish.api.ContextPlugin):
"""Gather instances by active render layers"""
order = pyblish.api.CollectorOrder
hosts = ["maya"]
label = "Render Layers"
def process(self, context):
registered_root = api.registered_root()
asset_name = os.environ["AVALON_ASSET"]
current_file = context.data["currentFile"]
relative_file = current_file.replace(registered_root, "{root}")
source_file = relative_file.replace("\\", "/")
renderlayers = cmds.ls(type="renderLayer")
for layer in renderlayers:
if layer.endswith("defaultRenderLayer"):
continue
data = {"family": "Render Layers",
"families": ["colorbleed.renderlayer"],
"publish": cmds.getAttr("{}.renderable".format(layer)),
"startFrame": self.get_render_attribute("startFrame"),
"endFrame": self.get_render_attribute("endFrame"),
"byFrameStep": self.get_render_attribute("byFrameStep"),
"renderer": self.get_render_attribute("currentRenderer"),
# instance subset
"asset": asset_name,
"subset": layer,
"setMembers": layer,
"time": api.time(),
"author": context.data["user"],
"source": source_file}
# Apply each user defined attribute as data
for attr in cmds.listAttr(layer, userDefined=True) or list():
try:
value = cmds.getAttr("{}.{}".format(layer, attr))
except Exception:
# Some attributes cannot be read directly,
# such as mesh and color attributes. These
# are considered non-essential to this
# particular publishing pipeline.
value = None
data[attr] = value
# Include (optional) global settings
# TODO(marcus): Take into account layer overrides
try:
avalon_globals = maya.lsattr("id", "avalon.renderglobals")[0]
except IndexError:
pass
else:
_globals = maya.read(avalon_globals)
data["renderGlobals"] = self.get_global_overrides(_globals)
instance = context.create_instance(layer)
instance.data.update(data)
def get_render_attribute(self, attr):
return cmds.getAttr("defaultRenderGlobals.{}".format(attr))
def get_global_overrides(self, globals):
"""
Get all overrides with a value, skip those without
Here's the kicker. These globals override defaults in the submission
integrator, but an empty value means no overriding is made.
Otherwise, Frames would override the default frames set under globals.
Args:
globals (dict) collection of render globals
Returns:
dict: only overrides with values
"""
keys = ["pool", "group", "frames", "priority"]
read_globals = {}
for key in keys:
value = globals[key]
if not value:
continue
read_globals[key.capitalize()] = value
if not read_globals:
self.log.info("Submitting without overrides")
return read_globals

View file

@ -45,7 +45,6 @@ class ExtractColorbleedAlembic(colorbleed.api.Extractor):
# force elect items to ensure all items get exported by Alembic
members = instance.data("setMembers")
print "Members : {}".format(members)
cmds.select(members)
with avalon.maya.suspended_refresh():
@ -54,4 +53,9 @@ class ExtractColorbleedAlembic(colorbleed.api.Extractor):
cmds.select(nodes, replace=True, noExpand=True)
extract_alembic(file=path, **options)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
cmds.select(clear=True)

View file

@ -34,8 +34,6 @@ class ExtractColorbleedAnimation(colorbleed.api.Extractor):
allDescendents=True,
fullPath=True) or []
print("Exporting {} as alembic".format(nodes))
# Collect the start and end including handles
start = instance.data["startFrame"]
end = instance.data["endFrame"]
@ -62,4 +60,9 @@ class ExtractColorbleedAnimation(colorbleed.api.Extractor):
**{"step": instance.data.get("step", 1.0),
"attr": ["cbId"]})
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted {} to {}".format(instance, dirname))

View file

@ -50,8 +50,9 @@ class ExtractCameraBaked(colorbleed.api.Extractor):
families = ["colorbleed.camera"]
def process(self, instance):
nodetype = 'camera'
file_names = []
nodetype = 'camera'
# Define extract output file path
dir_path = self.staging_dir(instance)
alembic_as_baked = instance.data("cameraBakedAlembic", True)
@ -80,6 +81,7 @@ class ExtractCameraBaked(colorbleed.api.Extractor):
# Perform maya ascii extraction
filename = "{0}.ma".format(instance.name)
file_names.append(filename)
path = os.path.join(dir_path, filename)
self.log.info("Performing extraction..")
@ -100,6 +102,7 @@ class ExtractCameraBaked(colorbleed.api.Extractor):
# Perform alembic extraction
filename = "{0}.abc".format(instance.name)
file_names.append(filename)
path = os.path.join(dir_path, filename)
if alembic_as_baked:
@ -136,5 +139,10 @@ class ExtractCameraBaked(colorbleed.api.Extractor):
# Delete the baked camera (using transform to leave no trace)
cmds.delete(baked)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].extend(file_names)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -43,4 +43,9 @@ class ExtractCameraRaw(colorbleed.api.Extractor):
shader=False,
expressions=False)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -119,6 +119,11 @@ class ExtractInstancerMayaAscii(colorbleed.api.Extractor):
shader=False,
expressions=False)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -34,5 +34,10 @@ class ExtractLayoutMayaAscii(colorbleed.api.Extractor):
expressions=True,
constructionHistory=True)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -77,5 +77,11 @@ class ExtractLook(colorbleed.api.Extractor):
with open(json_path, "w") as f:
json.dump(data, f)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(maya_fname)
instance.data["files"].append(json_fname)
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
maya_path))

View file

@ -46,4 +46,9 @@ class ExtractMayaAsciiRaw(colorbleed.api.Extractor):
preserveReferences=True,
constructionHistory=True)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -39,5 +39,10 @@ class ExtractParticlesMayaAscii(colorbleed.api.Extractor):
shader=False,
expressions=False)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -34,4 +34,9 @@ class ExtractColorbleedRig(colorbleed.api.Extractor):
expressions=True,
constructionHistory=True)
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
self.log.info("Extracted instance '%s' to: %s" % (instance.name, path))

View file

@ -59,5 +59,7 @@ class ExtractFurYeti(colorbleed.api.Extractor):
constructionHistory=False,
shader=False)
instance.data["files"] = [filename]
self.log.info("Extracted instance '{0}' to: {1}".format(
instance.name, path))

View file

@ -1,6 +1,99 @@
import os
import re
import json
import shutil
import getpass
from maya import cmds
from avalon import api
from avalon.vendor import requests
import pyblish.api
def get_padding_length(filename):
"""
>>> get_padding_length("sequence.v004.0001.exr", default=None)
4
>>> get_padding_length("sequence.-001.exr", default=None)
4
>>> get_padding_length("sequence.v005.exr", default=None)
None
Retrieve the padding length by retrieving the frame number from a file.
Args:
filename (str): the explicit filename, e.g.: sequence.0001.exr
Returns:
int
"""
padding_match = re.search(r"\.(-?\d+)", filename)
if padding_match:
length = len(padding_match.group())
else:
raise AttributeError("Could not find padding length in "
"'{}'".format(filename))
return length
def get_renderer_variables():
"""Retrieve the extension which has been set in the VRay settings
Will return None if the current renderer is not VRay
Returns:
dict
"""
ext = ""
filename_prefix = ""
# padding = 4
renderer = cmds.getAttr("defaultRenderGlobals.currentRenderer")
if renderer == "vray":
# padding = cmds.getAttr("vraySettings.fileNamePadding")
# check for vray settings node
settings_node = cmds.ls("vraySettings", type="VRaySettingsNode")
if not settings_node:
raise AttributeError("Could not find a VRay Settings Node, "
"to ensure the node exists open the "
"Render Settings window")
# get the extension
image_format = cmds.getAttr("vraySettings.imageFormatStr")
if image_format:
ext = "{}".format(image_format.split(" ")[0])
prefix = cmds.getAttr("vraySettings.fileNamePrefix")
if prefix:
filename_prefix = prefix
# insert other renderer logic here
# fall back to default
if renderer.lower().startswith("maya"):
# get the extension, getAttr defaultRenderGlobals.imageFormat
# returns index number
first_filename = cmds.renderSettings(fullPath=True,
firstImageName=True)[0]
ext = os.path.splitext(os.path.basename(first_filename))[-1].strip(".")
# get padding and filename prefix
# padding = cmds.getAttr("defaultRenderGlobals.extensionPadding")
prefix = cmds.getAttr("defaultRenderGlobals.fileNamePrefix")
if prefix:
filename_prefix = prefix
return {"ext": ext, "filename_prefix": filename_prefix}
class MindbenderSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit available render layers to Deadline
@ -12,20 +105,13 @@ class MindbenderSubmitDeadline(pyblish.api.InstancePlugin):
label = "Submit to Deadline"
order = pyblish.api.IntegratorOrder
hosts = ["maya"]
families = ["mindbender.renderlayer"]
families = ["colorbleed.renderlayer"]
def process(self, instance):
import os
import json
import shutil
import getpass
from maya import cmds
from avalon import api
from avalon.vendor import requests
assert api.Session["AVALON_DEADLINE"], "Requires AVALON_DEADLINE"
AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE",
"http://localhost:8082")
assert AVALON_DEADLINE is not None, "Requires AVALON_DEADLINE"
context = instance.context
workspace = context.data["workspaceDir"]
@ -40,8 +126,15 @@ class MindbenderSubmitDeadline(pyblish.api.InstancePlugin):
except OSError:
pass
# get the variables depending on the renderer
render_variables = get_renderer_variables()
output_file_prefix = render_variables["filename_prefix"]
output_filename_0 = self.preview_fname(instance,
dirname,
render_variables["ext"])
# E.g. http://192.168.0.1:8082/api/jobs
url = api.Session["AVALON_DEADLINE"] + "/api/jobs"
url = "{}/api/jobs".format(AVALON_DEADLINE)
# Documentation for keys available at:
# https://docs.thinkboxsoftware.com
@ -69,7 +162,7 @@ class MindbenderSubmitDeadline(pyblish.api.InstancePlugin):
# Optional, enable double-click to preview rendered
# frames from Deadline Monitor
"OutputFilename0": self.preview_fname(instance),
"OutputFilename0": output_filename_0,
},
"PluginInfo": {
# Input
@ -77,7 +170,7 @@ class MindbenderSubmitDeadline(pyblish.api.InstancePlugin):
# Output directory and filename
"OutputFilePath": dirname,
"OutputFilePrefix": "<RenderLayer>/<RenderLayer>",
"OutputFilePrefix": output_file_prefix,
# Mandatory for Deadline
"Version": cmds.about(version=True),
@ -119,22 +212,18 @@ class MindbenderSubmitDeadline(pyblish.api.InstancePlugin):
})
# Include optional render globals
payload["JobInfo"].update(
instance.data.get("renderGlobals", {})
)
payload["JobInfo"].update(instance.data.get("renderGlobals", {}))
self.preflight_check(instance)
self.log.info("Submitting..")
self.log.info(json.dumps(
payload, indent=4, sort_keys=True)
)
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
response = requests.post(url, json=payload)
if response.ok:
# Write metadata for publish
fname = os.path.join(dirname, instance.name + ".json")
fname = os.path.join(dirname, "{}.json".format(instance.name))
data = {
"submission": payload,
"session": api.Session,
@ -156,7 +245,7 @@ class MindbenderSubmitDeadline(pyblish.api.InstancePlugin):
raise Exception(response.text)
def preview_fname(self, instance):
def preview_fname(self, instance, dirname, extension):
"""Return outputted filename with #### for padding
Passing the absolute path to Deadline enables Deadline Monitor
@ -171,29 +260,31 @@ class MindbenderSubmitDeadline(pyblish.api.InstancePlugin):
"""
from maya import cmds
# We'll need to take tokens into account
fname = cmds.renderSettings(
firstImageName=True,
fullPath=True,
layer=instance.name
)[0]
fname = cmds.renderSettings(firstImageName=True,
fullPath=True,
layer=instance.name)[0]
try:
# Assume `c:/some/path/filename.0001.exr`
# TODO(marcus): Bulletproof this, the user may have
# chosen a different format for the outputted filename.
fname, padding, suffix = fname.rsplit(".", 2)
fname = ".".join([fname, "#" * len(padding), suffix])
basename = os.path.basename(fname)
name, padding, ext = basename.rsplit(".", 2)
padding_format = "#" * len(padding)
fname = ".".join([name, padding_format, extension])
self.log.info("Assuming renders end up @ %s" % fname)
file_name = os.path.join(dirname, instance.name, fname)
except ValueError:
fname = ""
file_name = ""
self.log.info("Couldn't figure out where renders go")
return fname
return file_name
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
for key in ("startFrame", "endFrame", "byFrameStep"):
value = instance.data[key]

View file

@ -0,0 +1,20 @@
import os
from pyblish import api
class CleanUp(api.InstancePlugin):
"""Cleans up the staging directory after a successful publish
"""
order = api.IntegratorOrder + 10
def process(self, instance):
return
def clean_up(self, instance):
staging_dir = instance.get("stagingDir", None)
if staging_dir and os.path.exists(staging_dir):
self.log.info("Removing temporary folder ...")
os.rmdir(staging_dir)

View file

@ -5,7 +5,7 @@ import avalon.io as io
class CollectAssumedDestination(pyblish.api.InstancePlugin):
"""This plug-ins displays the comment dialog box per default"""
"""Generate the assumed destination path where the file will be stored"""
label = "Collect Assumed Destination"
order = pyblish.api.CollectorOrder + 0.499
@ -81,15 +81,17 @@ class CollectAssumedDestination(pyblish.api.InstancePlugin):
"parent": asset["_id"]})
# assume there is no version yet, we start at `1`
version = None
version_number = 1
if subset is not None:
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
# if there is a subset there ought to be version
# if there is a subset there ought to be version
if version is not None:
version_number += version["name"]
template_data = {"root": os.environ["AVALON_ROOT"],
template_data = {"root": os.environ["AVALON_PROJECTS"],
"project": project_name,
"silo": os.environ["AVALON_SILO"],
"asset": asset_name,

View file

@ -0,0 +1,14 @@
import os
import pyblish.api
class CollectCurrentShellFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Current File"
hosts = ["shell"]
def process(self, context):
"""Inject the current working file"""
context.data["currentFile"] = os.path.join(os.getcwd(), "<shell>")

View file

@ -0,0 +1,61 @@
import pyblish.api
class CollectMindbenderImageSequences(pyblish.api.ContextPlugin):
"""Gather image sequnences from working directory"""
order = pyblish.api.CollectorOrder
hosts = ["shell"]
label = "Image Sequences"
def process(self, context):
import os
import json
from avalon.vendor import clique
workspace = context.data["workspaceDir"]
base, dirs, _ = next(os.walk(workspace))
for renderlayer in dirs:
abspath = os.path.join(base, renderlayer)
files = os.listdir(abspath)
pattern = clique.PATTERNS["frames"]
collections, remainder = clique.assemble(files,
patterns=[pattern],
minimum_items=1)
assert not remainder, (
"There shouldn't have been a remainder for '%s': "
"%s" % (renderlayer, remainder))
# Maya 2017 compatibility, it inexplicably prefixes layers
# with "rs_" without warning.
compatpath = os.path.join(base, renderlayer.split("rs_", 1)[-1])
for fname in (abspath, compatpath):
try:
with open("{}.json".format(fname)) as f:
metadata = json.load(f)
break
except OSError:
continue
else:
raise Exception("%s was not published correctly "
"(missing metadata)" % renderlayer)
for collection in collections:
instance = context.create_instance(str(collection))
self.log.info("Collection: %s" % list(collection))
data = dict(metadata["instance"], **{
"name": instance.name,
"family": "Image Sequences",
"families": ["colorbleed.imagesequence"],
"subset": collection.head[:-1],
"stagingDir": os.path.join(workspace, renderlayer),
"files": [list(collection)],
"metadata": metadata
})
instance.data.update(data)

View file

@ -0,0 +1,14 @@
import os
import pyblish.api
class CollectShellWorkspace(pyblish.api.ContextPlugin):
"""Inject the current workspace into context"""
order = pyblish.api.CollectorOrder - 0.5
label = "Shell Workspace"
hosts = ["shell"]
def process(self, context):
context.data["workspaceDir"] = os.getcwd()

View file

@ -1,4 +1,5 @@
import pyblish.api
from avalon import api
class CollectMindbenderTime(pyblish.api.ContextPlugin):
@ -8,5 +9,4 @@ class CollectMindbenderTime(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder
def process(self, context):
from avalon import api
context.data["time"] = api.time()

View file

@ -30,7 +30,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"colorbleed.lookdev",
"colorbleed.texture",
"colorbleed.historyLookdev",
"colorbleed.group"]
"colorbleed.group",
"colorbleed.imagesequence"]
def process(self, instance):
@ -39,9 +40,10 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
self.register(instance)
self.integrate(instance)
self.log.info("Removing temporary files and folders ...")
stagingdir = instance.data["stagingDir"]
shutil.rmtree(stagingdir)
# TODO: Decide how to clean up? And when?
# self.log.info("Removing temporary files and folders ...")
# stagingdir = instance.data["stagingDir"]
# shutil.rmtree(stagingdir)
def register(self, instance):
@ -51,11 +53,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
SILO = os.environ["AVALON_SILO"]
LOCATION = os.getenv("AVALON_LOCATION")
# todo(marcus): avoid hardcoding labels in the integrator
representation_labels = {".ma": "Maya Ascii",
".source": "Original source file",
".abc": "Alembic"}
context = instance.context
# Atomicity
#
@ -150,30 +147,66 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
template_publish = project["config"]["template"]["publish"]
# Find the representations to transfer amongst the files
# Each should be a single representation (as such, a single extension)
representations = []
staging_content = os.listdir(stagingdir)
for v, fname in enumerate(staging_content):
for files in instance.data["files"]:
name, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
# Collection
# _______
# |______|\
# | |\|
# | ||
# | ||
# | ||
# |_______|
#
if isinstance(files, list):
collection = files
src = os.path.join(stagingdir, fname)
dst = template_publish.format(**template_data)
# Assert that each member has identical suffix
_, ext = os.path.splitext(collection[0])
assert all(ext == os.path.splitext(name)[1]
for name in collection), (
"Files had varying suffixes, this is a bug"
)
# Backwards compatibility
if fname == ".metadata.json":
dirname = os.path.dirname(dst)
dst = os.path.join(dirname, fname)
template_data["representation"] = ext[1:]
# copy source to destination (library)
instance.data["transfers"].append([src, dst])
for fname in collection:
src = os.path.join(stagingdir, fname)
dst = os.path.join(
template_publish.format(**template_data),
fname
)
instance.data["transfers"].append([src, dst])
else:
# Single file
# _______
# | |\
# | |
# | |
# | |
# |_______|
#
fname = files
_, ext = os.path.splitext(fname)
template_data["representation"] = ext[1:]
src = os.path.join(stagingdir, fname)
dst = template_publish.format(**template_data)
instance.data["transfers"].append([src, dst])
representation = {
"schema": "avalon-core:representation-2.0",
"type": "representation",
"parent": version_id,
"name": ext[1:],
"data": {"label": representation_labels.get(ext)},
"data": {},
"dependencies": instance.data.get("dependencies", "").split(),
# Imprint shortcut to context
@ -189,33 +222,25 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
}
representations.append(representation)
# store data for database and source / destinations
instance.data["representations"] = representations
self.log.info("Registering {} items".format(len(representations)))
return representations
io.insert_many(representations)
def integrate(self, instance):
"""Register the representations and move the files
"""Move the files
Through the stored `representations` and `transfers`
Through `instance.data["transfers"]`
Args:
instance: the instance to integrate
"""
# get needed data
traffic = instance.data["transfers"]
representations = instance.data["representations"]
transfers = instance.data["transfers"]
self.log.info("Registering {} items".format(len(representations)))
io.insert_many(representations)
# moving files
for src, dest in traffic:
for src, dest in transfers:
self.log.info("Copying file .. {} -> {}".format(src, dest))
self.copy_file(src, dest)
def copy_file(self, src, dst):
""" Copy given source to destination
@ -280,7 +305,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"data": data}
def create_version_data(self, context, instance):
"""Create the data collection for th version
"""Create the data collection for the version
Args:
context: the current context

View file

@ -26,15 +26,21 @@ class ValidateMindbenderDeadlineDone(pyblish.api.InstancePlugin):
6: "Pending",
}
url = api.Session["AVALON_DEADLINE"] + "/api/jobs?JobID=%s"
assert "AVALON_DEADLINE" in api.Session, ("Environment variable "
"missing: 'AVALON_DEADLINE'")
AVALON_DEADLINE = api.Session["AVALON_DEADLINE"]
url = "{}/api/jobs?JobID=%s".format(AVALON_DEADLINE)
for job in instance.data["metadata"]["jobs"]:
response = requests.get(url % job["_id"])
if response.ok:
data = response.json()[0]
state = states.get(data["Stat"])
data = response.json()
assert data, ValueError("Can't find information about "
"this Deadline job: "
"{}".format(job["_id"]))
state = states.get(data[0]["Stat"])
if state in (None, "Unknown"):
raise Exception("State of this render is unknown")