Merge branch 'hotfix/psd-batch-publisher' into develop

This commit is contained in:
Milan Kolar 2020-08-12 22:36:58 +02:00
commit b784e4bfb1
11 changed files with 874 additions and 82 deletions

View file

@ -25,14 +25,14 @@ class DropEmpty(QtWidgets.QWidget):
self._label = QtWidgets.QLabel('Drag & Drop')
self._label.setFont(font)
self._label.setStyleSheet(
'background-color: rgb(255, 255, 255, 0);'
'background-color: transparent;'
)
font.setPointSize(12)
self._sub_label = QtWidgets.QLabel('(drop files here)')
self._sub_label.setFont(font)
self._sub_label.setStyleSheet(
'background-color: rgb(255, 255, 255, 0);'
'background-color: transparent;'
)
layout.addWidget(self._label, alignment=BottomCenterAlignment)

View file

@ -37,6 +37,7 @@ class DropDataFrame(QtWidgets.QFrame):
"image_file": image_extensions,
"video_file": video_extensions
}
ffprobe_ignore_extensions = [".psd"]
def __init__(self, parent):
super().__init__()
@ -284,8 +285,10 @@ class DropDataFrame(QtWidgets.QFrame):
file_info = data['file_info']
if (
ext in self.image_extensions
or ext in self.video_extensions
ext not in self.ffprobe_ignore_extensions
and (
ext in self.image_extensions or ext in self.video_extensions
)
):
probe_data = self.load_data_with_probe(filepath)
if 'fps' not in data:

View file

@ -39,11 +39,21 @@ class CollectAnatomyInstanceData(pyblish.api.InstancePlugin):
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
instance_asset_entity = instance.data.get("assetEntity")
asset_name = instance.data["asset"]
# There is possibility that assetEntity on instance is already set
# which can happen in standalone publisher
if (
instance_asset_entity
and instance_asset_entity["name"] == asset_name
):
asset_entity = instance_asset_entity
# Check if asset name is the same as what is in context
# - they may be different, e.g. in NukeStudio
if context_asset_entity["name"] == asset_name:
elif context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
else:
@ -92,6 +102,12 @@ class CollectAnatomyInstanceData(pyblish.api.InstancePlugin):
"subset": subset_name,
"version": version_number
}
if (
asset_entity
and asset_entity["_id"] != context_asset_entity["_id"]
):
parents = asset_entity["data"].get("parents") or list()
anatomy_updates["hierarchy"] = "/".join(parents)
task_name = instance.data.get("task")
if task_name:

View file

@ -84,7 +84,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"action",
"harmony.template",
"harmony.palette",
"editorial"
"editorial",
"background"
]
exclude_families = ["clip"]
db_representation_context_keys = [

View file

@ -17,10 +17,9 @@ import os
import pyblish.api
from avalon import io
import json
import logging
import copy
import clique
log = logging.getLogger("collector")
from pprint import pformat
class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
@ -33,104 +32,97 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder - 0.49
hosts = ["standalonepublisher"]
# presets
batch_extensions = ["edl", "xml", "psd"]
default_families = ["ftrack"]
def process(self, context):
# get json paths from os and load them
io.install()
# Load presets
presets = context.data.get("presets")
if not presets:
from pype.api import config
presets = config.get_presets()
project = io.find_one({"type": "project"})
context.data["project"] = project
# get json file context
input_json_path = os.environ.get("SAPUBLISH_INPATH")
with open(input_json_path, "r") as f:
in_data = json.load(f)
self.log.debug(f"_ in_data: {in_data}")
self.asset_name = in_data["asset"]
self.family = in_data["family"]
asset = io.find_one({"type": "asset", "name": self.asset_name})
context.data["asset"] = asset
self.log.debug(f"_ in_data: {pformat(in_data)}")
# exception for editorial
if "editorial" in self.family:
# avoid subset name duplicity
if not context.data.get("subsetNamesCheck"):
context.data["subsetNamesCheck"] = list()
in_data_list = list()
representations = in_data.pop("representations")
for repr in representations:
in_data_copy = in_data.copy()
ext = repr["ext"][1:]
subset = in_data_copy["subset"]
# filter out non editorial files
if ext not in ["edl", "xml"]:
in_data_copy["representations"] = [repr]
in_data_copy["subset"] = f"{ext}{subset}"
in_data_list.append(in_data_copy)
files = repr.pop("files")
# delete unneeded keys
delete_repr_keys = ["frameStart", "frameEnd"]
for k in delete_repr_keys:
if repr.get(k):
repr.pop(k)
# convert files to list if it isnt
if not isinstance(files, list):
files = [files]
self.log.debug(f"_ files: {files}")
for index, f in enumerate(files):
index += 1
# copy dictionaries
in_data_copy = in_data_copy.copy()
repr_new = repr.copy()
repr_new["files"] = f
repr_new["name"] = ext
in_data_copy["representations"] = [repr_new]
# create subset Name
new_subset = f"{ext}{index}{subset}"
while new_subset in context.data["subsetNamesCheck"]:
index += 1
new_subset = f"{ext}{index}{subset}"
context.data["subsetNamesCheck"].append(new_subset)
in_data_copy["subset"] = new_subset
in_data_list.append(in_data_copy)
self.log.info(f"Creating subset: {ext}{index}{subset}")
if in_data["family"] in ["editorial", "background_batch"]:
in_data_list = self.multiple_instances(context, in_data)
else:
in_data_list = [in_data]
self.log.debug(f"_ in_data_list: {in_data_list}")
self.log.debug(f"_ in_data_list: {pformat(in_data_list)}")
for in_data in in_data_list:
# create instance
self.create_instance(context, in_data)
def multiple_instances(self, context, in_data):
# avoid subset name duplicity
if not context.data.get("subsetNamesCheck"):
context.data["subsetNamesCheck"] = list()
in_data_list = list()
representations = in_data.pop("representations")
for repr in representations:
in_data_copy = copy.deepcopy(in_data)
ext = repr["ext"][1:]
subset = in_data_copy["subset"]
# filter out non editorial files
if ext not in self.batch_extensions:
in_data_copy["representations"] = [repr]
in_data_copy["subset"] = f"{ext}{subset}"
in_data_list.append(in_data_copy)
files = repr.get("files")
# delete unneeded keys
delete_repr_keys = ["frameStart", "frameEnd"]
for k in delete_repr_keys:
if repr.get(k):
repr.pop(k)
# convert files to list if it isnt
if not isinstance(files, (tuple, list)):
files = [files]
self.log.debug(f"_ files: {files}")
for index, f in enumerate(files):
index += 1
# copy dictionaries
in_data_copy = copy.deepcopy(in_data_copy)
repr_new = copy.deepcopy(repr)
repr_new["files"] = f
repr_new["name"] = ext
in_data_copy["representations"] = [repr_new]
# create subset Name
new_subset = f"{ext}{index}{subset}"
while new_subset in context.data["subsetNamesCheck"]:
index += 1
new_subset = f"{ext}{index}{subset}"
context.data["subsetNamesCheck"].append(new_subset)
in_data_copy["subset"] = new_subset
in_data_list.append(in_data_copy)
self.log.info(f"Creating subset: {ext}{index}{subset}")
return in_data_list
def create_instance(self, context, in_data):
subset = in_data["subset"]
instance = context.create_instance(subset)
instance.data.update(
{
"subset": subset,
"asset": self.asset_name,
"asset": in_data["asset"],
"label": subset,
"name": subset,
"family": self.family,
"family": in_data["family"],
"version": in_data.get("version", 1),
"frameStart": in_data.get("representations", [None])[0].get(
"frameStart", None
@ -138,11 +130,11 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
"frameEnd": in_data.get("representations", [None])[0].get(
"frameEnd", None
),
"families": [self.family, "ftrack"],
"families": self.default_families or [],
}
)
self.log.info("collected instance: {}".format(instance.data))
self.log.info("parsing data: {}".format(in_data))
self.log.info("collected instance: {}".format(pformat(instance.data)))
self.log.info("parsing data: {}".format(pformat(in_data)))
instance.data["destination_list"] = list()
instance.data["representations"] = list()
@ -165,4 +157,8 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
component["tags"] = ["review"]
self.log.debug("Adding review family")
if "psd" in component["name"]:
instance.data["source"] = component["files"]
self.log.debug("Adding image:background_batch family")
instance.data["representations"].append(component)

View file

@ -162,6 +162,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
label = "Collect Hierarchy Context"
order = pyblish.api.CollectorOrder + 0.102
hosts = ["standalonepublisher"]
families = ["shot"]
def update_dict(self, ex_dict, new_dict):
for key in ex_dict:

View file

@ -0,0 +1,82 @@
import os
import collections
import pyblish.api
from avalon import io
from pprint import pformat
class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin):
"""
Collecting temp json data sent from a host context
and path for returning json data back to hostself.
"""
label = "Collect Matching Asset to Instance"
order = pyblish.api.CollectorOrder - 0.05
hosts = ["standalonepublisher"]
family = ["background_batch"]
def process(self, instance):
source_file = os.path.basename(instance.data["source"]).lower()
self.log.info("Looking for asset document for file \"{}\"".format(
instance.data["source"]
))
asset_docs_by_name = self.selection_children_by_name(instance)
matching_asset_doc = asset_docs_by_name.get(source_file)
if matching_asset_doc is None:
for asset_name_low, asset_doc in asset_docs_by_name.items():
if asset_name_low in source_file:
matching_asset_doc = asset_doc
break
if matching_asset_doc:
instance.data["asset"] = matching_asset_doc["name"]
instance.data["assetEntity"] = matching_asset_doc
self.log.info(
f"Matching asset found: {pformat(matching_asset_doc)}"
)
else:
# TODO better error message
raise AssertionError((
"Filename \"{}\" does not match"
" any name of asset documents in database for your selection."
).format(instance.data["source"]))
def selection_children_by_name(self, instance):
storing_key = "childrenDocsForSelection"
children_docs = instance.context.data.get(storing_key)
if children_docs is None:
top_asset_doc = instance.context.data["assetEntity"]
assets_by_parent_id = self._asset_docs_by_parent_id(instance)
_children_docs = self._children_docs(
assets_by_parent_id, top_asset_doc
)
children_docs = {
children_doc["name"].lower(): children_doc
for children_doc in _children_docs
}
instance.context.data[storing_key] = children_docs
return children_docs
def _children_docs(self, documents_by_parent_id, parent_doc):
# Find all children in reverse order, last children is at first place.
output = []
children = documents_by_parent_id.get(parent_doc["_id"]) or tuple()
for child in children:
output.extend(
self._children_docs(documents_by_parent_id, child)
)
output.append(parent_doc)
return output
def _asset_docs_by_parent_id(self, instance):
# Query all assets for project and store them by parent's id to list
asset_docs_by_parent_id = collections.defaultdict(list)
for asset_doc in io.find({"type": "asset"}):
parent_id = asset_doc["data"]["visualParent"]
asset_docs_by_parent_id[parent_id].append(asset_doc)
return asset_docs_by_parent_id

View file

@ -0,0 +1,76 @@
import copy
import pyblish.api
from pprint import pformat
class CollectPsdInstances(pyblish.api.InstancePlugin):
"""
Collect all available instances from psd batch.
"""
label = "Collect Psd Instances"
order = pyblish.api.CollectorOrder + 0.492
hosts = ["standalonepublisher"]
families = ["background_batch"]
# presets
subsets = {
"backgroundLayout": {
"task": "background",
"family": "backgroundLayout"
},
"backgroundComp": {
"task": "background",
"family": "backgroundComp"
},
"workfileBackground": {
"task": "background",
"family": "workfile"
}
}
unchecked_by_default = []
def process(self, instance):
context = instance.context
asset_data = instance.data["assetEntity"]
asset_name = instance.data["asset"]
anatomy_data = instance.data["anatomyData"]
for subset_name, subset_data in self.subsets.items():
instance_name = f"{asset_name}_{subset_name}"
task = subset_data.get("task", "background")
# create new instance
new_instance = context.create_instance(instance_name)
# add original instance data except name key
for key, value in instance.data.items():
if key not in ["name"]:
# Make sure value is copy since value may be object which
# can be shared across all new created objects
new_instance.data[key] = copy.deepcopy(value)
# add subset data from preset
new_instance.data.update(subset_data)
new_instance.data["label"] = f"{instance_name}"
new_instance.data["subset"] = subset_name
# fix anatomy data
anatomy_data_new = copy.deepcopy(anatomy_data)
# updating hierarchy data
anatomy_data_new.update({
"asset": asset_data["name"],
"task": task,
"subset": subset_name
})
new_instance.data["anatomyData"] = anatomy_data_new
if subset_name in self.unchecked_by_default:
new_instance.data["publish"] = False
self.log.info(f"Created new instance: {instance_name}")
self.log.debug(f"_ inst_data: {pformat(new_instance.data)}")
# delete original instance
context.remove(instance)

View file

@ -0,0 +1,238 @@
import os
import json
import copy
import pype.api
from avalon import io
PSDImage = None
class ExtractBGForComp(pype.api.Extractor):
label = "Extract Background for Compositing"
families = ["backgroundComp"]
hosts = ["standalonepublisher"]
new_instance_family = "background"
# Presetable
allowed_group_names = [
"OL", "BG", "MG", "FG", "UL", "SKY", "Field Guide", "Field_Guide",
"ANIM"
]
def process(self, instance):
# Check if python module `psd_tools` is installed
try:
global PSDImage
from psd_tools import PSDImage
except Exception:
raise AssertionError(
"BUG: Python module `psd-tools` is not installed!"
)
self.allowed_group_names = [
name.lower()
for name in self.allowed_group_names
]
self.redo_global_plugins(instance)
repres = instance.data.get("representations")
if not repres:
self.log.info("There are no representations on instance.")
return
if not instance.data.get("transfers"):
instance.data["transfers"] = []
# Prepare staging dir
staging_dir = self.staging_dir(instance)
if not os.path.exists(staging_dir):
os.makedirs(staging_dir)
for repre in tuple(repres):
# Skip all files without .psd extension
if repre["ext"] != ".psd":
continue
# Prepare publish dir for transfers
publish_dir = instance.data["publishDir"]
# Prepare json filepath where extracted metadata are stored
json_filename = "{}.json".format(instance.name)
json_full_path = os.path.join(staging_dir, json_filename)
self.log.debug(f"`staging_dir` is \"{staging_dir}\"")
# Prepare new repre data
new_repre = {
"name": "json",
"ext": "json",
"files": json_filename,
"stagingDir": staging_dir
}
# TODO add check of list
psd_filename = repre["files"]
psd_folder_path = repre["stagingDir"]
psd_filepath = os.path.join(psd_folder_path, psd_filename)
self.log.debug(f"psd_filepath: \"{psd_filepath}\"")
psd_object = PSDImage.open(psd_filepath)
json_data, transfers = self.export_compositing_images(
psd_object, staging_dir, publish_dir
)
self.log.info("Json file path: {}".format(json_full_path))
with open(json_full_path, "w") as json_filestream:
json.dump(json_data, json_filestream, indent=4)
instance.data["transfers"].extend(transfers)
instance.data["representations"].remove(repre)
instance.data["representations"].append(new_repre)
def export_compositing_images(self, psd_object, output_dir, publish_dir):
json_data = {
"__schema_version__": 1,
"children": []
}
transfers = []
for main_idx, main_layer in enumerate(psd_object):
if (
not main_layer.is_visible()
or main_layer.name.lower() not in self.allowed_group_names
or not main_layer.is_group
):
continue
export_layers = []
layers_idx = 0
for layer in main_layer:
# TODO this way may be added also layers next to "ADJ"
if layer.name.lower() == "adj":
for _layer in layer:
export_layers.append((layers_idx, _layer))
layers_idx += 1
else:
export_layers.append((layers_idx, layer))
layers_idx += 1
if not export_layers:
continue
main_layer_data = {
"index": main_idx,
"name": main_layer.name,
"children": []
}
for layer_idx, layer in export_layers:
has_size = layer.width > 0 and layer.height > 0
if not has_size:
self.log.debug((
"Skipping layer \"{}\" because does "
"not have any content."
).format(layer.name))
continue
main_layer_name = main_layer.name.replace(" ", "_")
layer_name = layer.name.replace(" ", "_")
filename = "{:0>2}_{}_{:0>2}_{}.png".format(
main_idx + 1, main_layer_name, layer_idx + 1, layer_name
)
layer_data = {
"index": layer_idx,
"name": layer.name,
"filename": filename
}
output_filepath = os.path.join(output_dir, filename)
dst_filepath = os.path.join(publish_dir, filename)
transfers.append((output_filepath, dst_filepath))
pil_object = layer.composite(viewport=psd_object.viewbox)
pil_object.save(output_filepath, "PNG")
main_layer_data["children"].append(layer_data)
if main_layer_data["children"]:
json_data["children"].append(main_layer_data)
return json_data, transfers
def redo_global_plugins(self, instance):
# TODO do this in collection phase
# Copy `families` and check if `family` is not in current families
families = instance.data.get("families") or list()
if families:
families = list(set(families))
if self.new_instance_family in families:
families.remove(self.new_instance_family)
self.log.debug(
"Setting new instance families {}".format(str(families))
)
instance.data["families"] = families
# Override instance data with new information
instance.data["family"] = self.new_instance_family
subset_name = instance.data["anatomyData"]["subset"]
asset_doc = instance.data["assetEntity"]
latest_version = self.find_last_version(subset_name, asset_doc)
version_number = 1
if latest_version is not None:
version_number += latest_version
instance.data["latestVersion"] = latest_version
instance.data["version"] = version_number
# Same data apply to anatomy data
instance.data["anatomyData"].update({
"family": self.new_instance_family,
"version": version_number
})
# Redo publish and resources dir
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
publish_folder = os.path.dirname(anatomy_filled["publish"]["path"])
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
self.log.debug("publishDir: \"{}\"".format(publish_folder))
self.log.debug("resourcesDir: \"{}\"".format(resources_folder))
def find_last_version(self, subset_name, asset_doc):
subset_doc = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_doc["_id"]
})
if subset_doc is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_doc = io.find_one(
{
"type": "version",
"parent": subset_doc["_id"]
},
sort=[("name", -1)]
)
if version_doc:
return int(version_doc["name"])
return None

View file

@ -0,0 +1,213 @@
import os
import copy
import json
import pype.api
import pyblish.api
from avalon import io
PSDImage = None
class ExtractBGMainGroups(pype.api.Extractor):
label = "Extract Background Main Groups"
order = pyblish.api.ExtractorOrder + 0.02
families = ["backgroundLayout"]
hosts = ["standalonepublisher"]
new_instance_family = "background"
# Presetable
allowed_group_names = [
"OL", "BG", "MG", "FG", "UL", "SKY", "Field Guide", "Field_Guide",
"ANIM"
]
def process(self, instance):
# Check if python module `psd_tools` is installed
try:
global PSDImage
from psd_tools import PSDImage
except Exception:
raise AssertionError(
"BUG: Python module `psd-tools` is not installed!"
)
self.allowed_group_names = [
name.lower()
for name in self.allowed_group_names
]
repres = instance.data.get("representations")
if not repres:
self.log.info("There are no representations on instance.")
return
self.redo_global_plugins(instance)
repres = instance.data.get("representations")
if not repres:
self.log.info("There are no representations on instance.")
return
if not instance.data.get("transfers"):
instance.data["transfers"] = []
# Prepare staging dir
staging_dir = self.staging_dir(instance)
if not os.path.exists(staging_dir):
os.makedirs(staging_dir)
# Prepare publish dir for transfers
publish_dir = instance.data["publishDir"]
for repre in tuple(repres):
# Skip all files without .psd extension
if repre["ext"] != ".psd":
continue
# Prepare json filepath where extracted metadata are stored
json_filename = "{}.json".format(instance.name)
json_full_path = os.path.join(staging_dir, json_filename)
self.log.debug(f"`staging_dir` is \"{staging_dir}\"")
# Prepare new repre data
new_repre = {
"name": "json",
"ext": "json",
"files": json_filename,
"stagingDir": staging_dir
}
# TODO add check of list
psd_filename = repre["files"]
psd_folder_path = repre["stagingDir"]
psd_filepath = os.path.join(psd_folder_path, psd_filename)
self.log.debug(f"psd_filepath: \"{psd_filepath}\"")
psd_object = PSDImage.open(psd_filepath)
json_data, transfers = self.export_compositing_images(
psd_object, staging_dir, publish_dir
)
self.log.info("Json file path: {}".format(json_full_path))
with open(json_full_path, "w") as json_filestream:
json.dump(json_data, json_filestream, indent=4)
instance.data["transfers"].extend(transfers)
instance.data["representations"].remove(repre)
instance.data["representations"].append(new_repre)
def export_compositing_images(self, psd_object, output_dir, publish_dir):
json_data = {
"__schema_version__": 1,
"children": []
}
transfers = []
for layer_idx, layer in enumerate(psd_object):
layer_name = layer.name.replace(" ", "_")
if (
not layer.is_visible()
or layer_name.lower() not in self.allowed_group_names
):
continue
has_size = layer.width > 0 and layer.height > 0
if not has_size:
self.log.debug((
"Skipping layer \"{}\" because does not have any content."
).format(layer.name))
continue
filename = "{}.png".format(layer_name)
layer_data = {
"index": layer_idx,
"name": layer.name,
"filename": filename
}
output_filepath = os.path.join(output_dir, filename)
dst_filepath = os.path.join(publish_dir, filename)
transfers.append((output_filepath, dst_filepath))
pil_object = layer.composite(viewport=psd_object.viewbox)
pil_object.save(output_filepath, "PNG")
json_data["children"].append(layer_data)
return json_data, transfers
def redo_global_plugins(self, instance):
# TODO do this in collection phase
# Copy `families` and check if `family` is not in current families
families = instance.data.get("families") or list()
if families:
families = list(set(families))
if self.new_instance_family in families:
families.remove(self.new_instance_family)
self.log.debug(
"Setting new instance families {}".format(str(families))
)
instance.data["families"] = families
# Override instance data with new information
instance.data["family"] = self.new_instance_family
subset_name = instance.data["anatomyData"]["subset"]
asset_doc = instance.data["assetEntity"]
latest_version = self.find_last_version(subset_name, asset_doc)
version_number = 1
if latest_version is not None:
version_number += latest_version
instance.data["latestVersion"] = latest_version
instance.data["version"] = version_number
# Same data apply to anatomy data
instance.data["anatomyData"].update({
"family": self.new_instance_family,
"version": version_number
})
# Redo publish and resources dir
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
publish_folder = os.path.dirname(anatomy_filled["publish"]["path"])
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
self.log.debug("publishDir: \"{}\"".format(publish_folder))
self.log.debug("resourcesDir: \"{}\"".format(resources_folder))
def find_last_version(self, subset_name, asset_doc):
subset_doc = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_doc["_id"]
})
if subset_doc is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_doc = io.find_one(
{
"type": "version",
"parent": subset_doc["_id"]
},
sort=[("name", -1)]
)
if version_doc:
return int(version_doc["name"])
return None

View file

@ -0,0 +1,166 @@
import os
import copy
import pype.api
import pyblish.api
from avalon import io
PSDImage = None
class ExtractImagesFromPSD(pype.api.Extractor):
# PLUGIN is not currently enabled because was decided to use different
# approach
enabled = False
active = False
label = "Extract Images from PSD"
order = pyblish.api.ExtractorOrder + 0.02
families = ["backgroundLayout"]
hosts = ["standalonepublisher"]
new_instance_family = "image"
ignored_instance_data_keys = ("name", "label", "stagingDir", "version")
# Presetable
allowed_group_names = [
"OL", "BG", "MG", "FG", "UL", "SKY", "Field Guide", "Field_Guide",
"ANIM"
]
def process(self, instance):
# Check if python module `psd_tools` is installed
try:
global PSDImage
from psd_tools import PSDImage
except Exception:
raise AssertionError(
"BUG: Python module `psd-tools` is not installed!"
)
self.allowed_group_names = [
name.lower()
for name in self.allowed_group_names
]
repres = instance.data.get("representations")
if not repres:
self.log.info("There are no representations on instance.")
return
for repre in tuple(repres):
# Skip all files without .psd extension
if repre["ext"] != ".psd":
continue
# TODO add check of list of "files" value
psd_filename = repre["files"]
psd_folder_path = repre["stagingDir"]
psd_filepath = os.path.join(psd_folder_path, psd_filename)
self.log.debug(f"psd_filepath: \"{psd_filepath}\"")
psd_object = PSDImage.open(psd_filepath)
self.create_new_instances(instance, psd_object)
# Remove the instance from context
instance.context.remove(instance)
def create_new_instances(self, instance, psd_object):
asset_doc = instance.data["assetEntity"]
for layer in psd_object:
if (
not layer.is_visible()
or layer.name.lower() not in self.allowed_group_names
):
continue
has_size = layer.width > 0 and layer.height > 0
if not has_size:
self.log.debug((
"Skipping layer \"{}\" because does "
"not have any content."
).format(layer.name))
continue
layer_name = layer.name.replace(" ", "_")
instance_name = subset_name = f"image{layer_name}"
self.log.info(
f"Creating new instance with name \"{instance_name}\""
)
new_instance = instance.context.create_instance(instance_name)
for key, value in instance.data.items():
if key not in self.ignored_instance_data_keys:
new_instance.data[key] = copy.deepcopy(value)
new_instance.data["label"] = " ".join(
(new_instance.data["asset"], instance_name)
)
# Find latest version
latest_version = self.find_last_version(subset_name, asset_doc)
version_number = 1
if latest_version is not None:
version_number += latest_version
self.log.info(
"Next version of instance \"{}\" will be {}".format(
instance_name, version_number
)
)
# Set family and subset
new_instance.data["family"] = self.new_instance_family
new_instance.data["subset"] = subset_name
new_instance.data["version"] = version_number
new_instance.data["latestVersion"] = latest_version
new_instance.data["anatomyData"].update({
"subset": subset_name,
"family": self.new_instance_family,
"version": version_number
})
# Copy `families` and check if `family` is not in current families
families = new_instance.data.get("families") or list()
if families:
families = list(set(families))
if self.new_instance_family in families:
families.remove(self.new_instance_family)
new_instance.data["families"] = families
# Prepare staging dir for new instance
staging_dir = self.staging_dir(new_instance)
output_filename = "{}.png".format(layer_name)
output_filepath = os.path.join(staging_dir, output_filename)
pil_object = layer.composite(viewport=psd_object.viewbox)
pil_object.save(output_filepath, "PNG")
new_repre = {
"name": "png",
"ext": "png",
"files": output_filename,
"stagingDir": staging_dir
}
self.log.debug(
"Creating new representation: {}".format(new_repre)
)
new_instance.data["representations"] = [new_repre]
def find_last_version(self, subset_name, asset_doc):
subset_doc = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_doc["_id"]
})
if subset_doc is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_doc = io.find_one(
{
"type": "version",
"parent": subset_doc["_id"]
},
sort=[("name", -1)]
)
if version_doc:
return int(version_doc["name"])
return None