Merge branch 'release/2.8'

This commit is contained in:
Milan Kolar 2020-04-20 23:20:22 +02:00
commit a734ade82e
42 changed files with 3701 additions and 100 deletions

31
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View file

@ -0,0 +1,31 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. windows]
- Host: [e.g. Maya, Nuke, Houdini]
**Additional context**
Add any other context about the problem here.

View file

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View file

@ -258,14 +258,6 @@ class AppAction(BaseHandler):
env = acre.merge(env, current_env=dict(os.environ))
env = acre.append(dict(os.environ), env)
#
# tools_env = acre.get_tools(tools)
# env = acre.compute(dict(tools_env))
# env = acre.merge(env, dict(os.environ))
# os.environ = acre.append(dict(os.environ), env)
# os.environ = acre.compute(os.environ)
# Get path to execute
st_temp_path = os.environ['PYPE_CONFIG']
os_plat = platform.system().lower()
@ -275,6 +267,18 @@ class AppAction(BaseHandler):
# Full path to executable launcher
execfile = None
if application.get("launch_hook"):
hook = application.get("launch_hook")
self.log.info("launching hook: {}".format(hook))
ret_val = pypelib.execute_hook(
application.get("launch_hook"), env=env)
if not ret_val:
return {
'success': False,
'message': "Hook didn't finish successfully {0}"
.format(self.label)
}
if sys.platform == "win32":
for ext in os.environ["PATHEXT"].split(os.pathsep):
@ -286,7 +290,10 @@ class AppAction(BaseHandler):
# Run SW if was found executable
if execfile is not None:
popen = avalonlib.launch(
# Store subprocess to varaible. This is due to Blender launch
# bug. Please make sure Blender >=2.81 can be launched before
# remove `_popen` variable.
_popen = avalonlib.launch(
executable=execfile, args=[], environment=env
)
else:
@ -294,8 +301,7 @@ class AppAction(BaseHandler):
'success': False,
'message': "We didn't found launcher for {0}"
.format(self.label)
}
pass
}
if sys.platform.startswith('linux'):
execfile = os.path.join(path.strip('"'), self.executable)
@ -320,7 +326,7 @@ class AppAction(BaseHandler):
'message': "No executable permission - {}".format(
execfile)
}
pass
else:
self.log.error('Launcher doesn\'t exist - {}'.format(
execfile))
@ -328,10 +334,13 @@ class AppAction(BaseHandler):
'success': False,
'message': "Launcher doesn't exist - {}".format(execfile)
}
pass
# Run SW if was found executable
if execfile is not None:
avalonlib.launch(
# Store subprocess to varaible. This is due to Blender launch
# bug. Please make sure Blender >=2.81 can be launched before
# remove `_popen` variable.
_popen = avalonlib.launch(
'/usr/bin/env', args=['bash', execfile], environment=env
)
else:
@ -340,7 +349,6 @@ class AppAction(BaseHandler):
'message': "We didn't found launcher for {0}"
.format(self.label)
}
pass
# Change status of task to In progress
presets = config.get_presets()["ftrack"]["ftrack_config"]

View file

@ -0,0 +1,83 @@
import logging
import os
from pype.lib import PypeHook
from pype.unreal import lib as unreal_lib
from pypeapp import Logger
log = logging.getLogger(__name__)
class UnrealPrelaunch(PypeHook):
"""
This hook will check if current workfile path has Unreal
project inside. IF not, it initialize it and finally it pass
path to the project by environment variable to Unreal launcher
shell script.
"""
def __init__(self, logger=None):
if not logger:
self.log = Logger().get_logger(self.__class__.__name__)
else:
self.log = logger
self.signature = "( {} )".format(self.__class__.__name__)
def execute(self, *args, env: dict = None) -> bool:
if not env:
env = os.environ
asset = env["AVALON_ASSET"]
task = env["AVALON_TASK"]
workdir = env["AVALON_WORKDIR"]
engine_version = env["AVALON_APP_NAME"].split("_")[-1]
project_name = f"{asset}_{task}"
# Unreal is sensitive about project names longer then 20 chars
if len(project_name) > 20:
self.log.warning((f"Project name exceed 20 characters "
f"({project_name})!"))
# Unreal doesn't accept non alphabet characters at the start
# of the project name. This is because project name is then used
# in various places inside c++ code and there variable names cannot
# start with non-alpha. We append 'P' before project name to solve it.
# 😱
if not project_name[:1].isalpha():
self.log.warning(f"Project name doesn't start with alphabet "
f"character ({project_name}). Appending 'P'")
project_name = f"P{project_name}"
project_path = os.path.join(workdir, project_name)
self.log.info((f"{self.signature} requested UE4 version: "
f"[ {engine_version} ]"))
detected = unreal_lib.get_engine_versions()
detected_str = ', '.join(detected.keys()) or 'none'
self.log.info((f"{self.signature} detected UE4 versions: "
f"[ {detected_str} ]"))
del(detected_str)
engine_version = ".".join(engine_version.split(".")[:2])
if engine_version not in detected.keys():
self.log.error((f"{self.signature} requested version not "
f"detected [ {engine_version} ]"))
return False
os.makedirs(project_path, exist_ok=True)
project_file = os.path.join(project_path, f"{project_name}.uproject")
engine_path = detected[engine_version]
if not os.path.isfile(project_file):
self.log.info((f"{self.signature} creating unreal "
f"project [ {project_name} ]"))
if env.get("AVALON_UNREAL_PLUGIN"):
os.environ["AVALON_UNREAL_PLUGIN"] = env.get("AVALON_UNREAL_PLUGIN") # noqa: E501
unreal_lib.create_unreal_project(project_name,
engine_version,
project_path,
engine_path=engine_path)
env["PYPE_UNREAL_PROJECT_FILE"] = project_file
env["AVALON_CURRENT_UNREAL_ENGINE"] = engine_path
return True

View file

@ -1,14 +1,21 @@
import os
import sys
import types
import re
import uuid
import json
import collections
import logging
import itertools
import contextlib
import subprocess
import inspect
from abc import ABCMeta, abstractmethod
from avalon import io
from avalon import io, pipeline
import six
import avalon.api
import avalon
from pypeapp import config
log = logging.getLogger(__name__)
@ -177,7 +184,8 @@ def modified_environ(*remove, **update):
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
:param update: Dictionary of environment variables
and values to add/update.
"""
env = os.environ
update = update or {}
@ -403,8 +411,8 @@ def switch_item(container,
"parent": version["_id"]}
)
assert representation, ("Could not find representation in the database with"
" the name '%s'" % representation_name)
assert representation, ("Could not find representation in the database "
"with the name '%s'" % representation_name)
avalon.api.switch(container, representation)
@ -491,7 +499,6 @@ def filter_pyblish_plugins(plugins):
`discover()` method.
:type plugins: Dict
"""
from pypeapp import config
from pyblish import api
host = api.current_host()
@ -537,7 +544,9 @@ def get_subsets(asset_name,
"""
Query subsets with filter on name.
The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
The method will return all found subsets and its defined version
and subsets. Version could be specified with number. Representation
can be filtered.
Arguments:
asset_name (str): asset (shot) name
@ -548,14 +557,13 @@ def get_subsets(asset_name,
Returns:
dict: subsets with version and representaions in keys
"""
from avalon import io
# query asset from db
asset_io = io.find_one({"type": "asset", "name": asset_name})
# check if anything returned
assert asset_io, "Asset not existing. \
Check correct name: `{}`".format(asset_name)
assert asset_io, (
"Asset not existing. Check correct name: `{}`").format(asset_name)
# create subsets query filter
filter_query = {"type": "subset", "parent": asset_io["_id"]}
@ -569,7 +577,9 @@ def get_subsets(asset_name,
# query all assets
subsets = [s for s in io.find(filter_query)]
assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
assert subsets, ("No subsets found. Check correct filter. "
"Try this for start `r'.*'`: "
"asset: `{}`").format(asset_name)
output_dict = {}
# Process subsets
@ -622,7 +632,6 @@ class CustomNone:
def __init__(self):
"""Create uuid as identifier for custom None."""
import uuid
self.identifier = str(uuid.uuid4())
def __bool__(self):
@ -643,3 +652,678 @@ class CustomNone:
def __repr__(self):
"""Representation of custom None."""
return "<CustomNone-{}>".format(str(self.identifier))
def execute_hook(hook, *args, **kwargs):
"""
This will load hook file, instantiate class and call `execute` method
on it. Hook must be in a form:
`$PYPE_ROOT/repos/pype/path/to/hook.py/HookClass`
This will load `hook.py`, instantiate HookClass and then execute_hook
`execute(*args, **kwargs)`
:param hook: path to hook class
:type hook: str
"""
class_name = hook.split("/")[-1]
abspath = os.path.join(os.getenv('PYPE_ROOT'),
'repos', 'pype', *hook.split("/")[:-1])
mod_name, mod_ext = os.path.splitext(os.path.basename(abspath))
if not mod_ext == ".py":
return False
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath) as f:
six.exec_(f.read(), module.__dict__)
sys.modules[abspath] = module
except Exception as exp:
log.exception("loading hook failed: {}".format(exp),
exc_info=True)
return False
obj = getattr(module, class_name)
hook_obj = obj()
ret_val = hook_obj.execute(*args, **kwargs)
return ret_val
@six.add_metaclass(ABCMeta)
class PypeHook:
def __init__(self):
pass
@abstractmethod
def execute(self, *args, **kwargs):
pass
def get_linked_assets(asset_entity):
"""Return linked assets for `asset_entity`."""
# TODO implement
return []
def map_subsets_by_family(subsets):
subsets_by_family = collections.defaultdict(list)
for subset in subsets:
family = subset["data"].get("family")
if not family:
families = subset["data"].get("families")
if not families:
continue
family = families[0]
subsets_by_family[family].append(subset)
return subsets_by_family
class BuildWorkfile:
"""Wrapper for build workfile process.
Load representations for current context by build presets. Build presets
are host related, since each host has it's loaders.
"""
def process(self):
"""Main method of this wrapper.
Building of workfile is triggered and is possible to implement
post processing of loaded containers if necessary.
"""
containers = self.build_workfile()
return containers
def build_workfile(self):
"""Prepares and load containers into workfile.
Loads latest versions of current and linked assets to workfile by logic
stored in Workfile profiles from presets. Profiles are set by host,
filtered by current task name and used by families.
Each family can specify representation names and loaders for
representations and first available and successful loaded
representation is returned as container.
At the end you'll get list of loaded containers per each asset.
loaded_containers [{
"asset_entity": <AssetEntity1>,
"containers": [<Container1>, <Container2>, ...]
}, {
"asset_entity": <AssetEntity2>,
"containers": [<Container3>, ...]
}, {
...
}]
"""
# Get current asset name and entity
current_asset_name = io.Session["AVALON_ASSET"]
current_asset_entity = io.find_one({
"type": "asset",
"name": current_asset_name
})
# Skip if asset was not found
if not current_asset_entity:
print("Asset entity with name `{}` was not found".format(
current_asset_name
))
return
# Prepare available loaders
loaders_by_name = {}
for loader in avalon.api.discover(avalon.api.Loader):
loader_name = loader.__name__
if loader_name in loaders_by_name:
raise KeyError(
"Duplicated loader name {0}!".format(loader_name)
)
loaders_by_name[loader_name] = loader
# Skip if there are any loaders
if not loaders_by_name:
log.warning("There are no registered loaders.")
return
# Get current task name
current_task_name = io.Session["AVALON_TASK"]
# Load workfile presets for task
build_presets = self.get_build_presets(current_task_name)
# Skip if there are any presets for task
if not build_presets:
log.warning(
"Current task `{}` does not have any loading preset.".format(
current_task_name
)
)
return
# Get presets for loading current asset
current_context_profiles = build_presets.get("current_context")
# Get presets for loading linked assets
link_context_profiles = build_presets.get("linked_assets")
# Skip if both are missing
if not current_context_profiles and not link_context_profiles:
log.warning("Current task `{}` has empty loading preset.".format(
current_task_name
))
return
elif not current_context_profiles:
log.warning((
"Current task `{}` doesn't have any loading"
" preset for it's context."
).format(current_task_name))
elif not link_context_profiles:
log.warning((
"Current task `{}` doesn't have any"
"loading preset for it's linked assets."
).format(current_task_name))
# Prepare assets to process by workfile presets
assets = []
current_asset_id = None
if current_context_profiles:
# Add current asset entity if preset has current context set
assets.append(current_asset_entity)
current_asset_id = current_asset_entity["_id"]
if link_context_profiles:
# Find and append linked assets if preset has set linked mapping
link_assets = get_linked_assets(current_asset_entity)
if link_assets:
assets.extend(link_assets)
# Skip if there are no assets. This can happen if only linked mapping
# is set and there are no links for his asset.
if not assets:
log.warning(
"Asset does not have linked assets. Nothing to process."
)
return
# Prepare entities from database for assets
prepared_entities = self._collect_last_version_repres(assets)
# Load containers by prepared entities and presets
loaded_containers = []
# - Current asset containers
if current_asset_id and current_asset_id in prepared_entities:
current_context_data = prepared_entities.pop(current_asset_id)
loaded_data = self.load_containers_by_asset_data(
current_context_data, current_context_profiles, loaders_by_name
)
if loaded_data:
loaded_containers.append(loaded_data)
# - Linked assets container
for linked_asset_data in prepared_entities.values():
loaded_data = self.load_containers_by_asset_data(
linked_asset_data, link_context_profiles, loaders_by_name
)
if loaded_data:
loaded_containers.append(loaded_data)
# Return list of loaded containers
return loaded_containers
def get_build_presets(self, task_name):
""" Returns presets to build workfile for task name.
Presets are loaded for current project set in
io.Session["AVALON_PROJECT"], filtered by registered host
and entered task name.
:param task_name: Task name used for filtering build presets.
:type task_name: str
:return: preset per eneter task
:rtype: dict | None
"""
host_name = avalon.api.registered_host().__name__.rsplit(".", 1)[-1]
presets = config.get_presets(io.Session["AVALON_PROJECT"])
# Get presets for host
build_presets = (
presets["plugins"]
.get(host_name, {})
.get("workfile_build")
)
if not build_presets:
return
task_name_low = task_name.lower()
per_task_preset = None
for preset in build_presets:
preset_tasks = preset.get("tasks") or []
preset_tasks_low = [task.lower() for task in preset_tasks]
if task_name_low in preset_tasks_low:
per_task_preset = preset
break
return per_task_preset
def _filter_build_profiles(self, build_profiles, loaders_by_name):
""" Filter build profiles by loaders and prepare process data.
Valid profile must have "loaders", "families" and "repre_names" keys
with valid values.
- "loaders" expects list of strings representing possible loaders.
- "families" expects list of strings for filtering
by main subset family.
- "repre_names" expects list of strings for filtering by
representation name.
Lowered "families" and "repre_names" are prepared for each profile with
all required keys.
:param build_profiles: Profiles for building workfile.
:type build_profiles: dict
:param loaders_by_name: Available loaders per name.
:type loaders_by_name: dict
:return: Filtered and prepared profiles.
:rtype: list
"""
valid_profiles = []
for profile in build_profiles:
# Check loaders
profile_loaders = profile.get("loaders")
if not profile_loaders:
log.warning((
"Build profile has missing loaders configuration: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check if any loader is available
loaders_match = False
for loader_name in profile_loaders:
if loader_name in loaders_by_name:
loaders_match = True
break
if not loaders_match:
log.warning((
"All loaders from Build profile are not available: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check families
profile_families = profile.get("families")
if not profile_families:
log.warning((
"Build profile is missing families configuration: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Check representation names
profile_repre_names = profile.get("repre_names")
if not profile_repre_names:
log.warning((
"Build profile is missing"
" representation names filtering: {0}"
).format(json.dumps(profile, indent=4)))
continue
# Prepare lowered families and representation names
profile["families_lowered"] = [
fam.lower() for fam in profile_families
]
profile["repre_names_lowered"] = [
name.lower() for name in profile_repre_names
]
valid_profiles.append(profile)
return valid_profiles
def _prepare_profile_for_subsets(self, subsets, profiles):
"""Select profile for each subset byt it's data.
Profiles are filtered for each subset individually.
Profile is filtered by subset's family, optionally by name regex and
representation names set in profile.
It is possible to not find matching profile for subset, in that case
subset is skipped and it is possible that none of subsets have
matching profile.
:param subsets: Subset documents.
:type subsets: list
:param profiles: Build profiles.
:type profiles: dict
:return: Profile by subset's id.
:rtype: dict
"""
# Prepare subsets
subsets_by_family = map_subsets_by_family(subsets)
profiles_per_subset_id = {}
for family, subsets in subsets_by_family.items():
family_low = family.lower()
for profile in profiles:
# Skip profile if does not contain family
if family_low not in profile["families_lowered"]:
continue
# Precompile name filters as regexes
profile_regexes = profile.get("subset_name_filters")
if profile_regexes:
_profile_regexes = []
for regex in profile_regexes:
_profile_regexes.append(re.compile(regex))
profile_regexes = _profile_regexes
# TODO prepare regex compilation
for subset in subsets:
# Verify regex filtering (optional)
if profile_regexes:
valid = False
for pattern in profile_regexes:
if re.match(pattern, subset["name"]):
valid = True
break
if not valid:
continue
profiles_per_subset_id[subset["_id"]] = profile
# break profiles loop on finding the first matching profile
break
return profiles_per_subset_id
def load_containers_by_asset_data(
self, asset_entity_data, build_profiles, loaders_by_name
):
"""Load containers for entered asset entity by Build profiles.
:param asset_entity_data: Prepared data with subsets, last version
and representations for specific asset.
:type asset_entity_data: dict
:param build_profiles: Build profiles.
:type build_profiles: dict
:param loaders_by_name: Available loaders per name.
:type loaders_by_name: dict
:return: Output contains asset document and loaded containers.
:rtype: dict
"""
# Make sure all data are not empty
if not asset_entity_data or not build_profiles or not loaders_by_name:
return
asset_entity = asset_entity_data["asset_entity"]
valid_profiles = self._filter_build_profiles(
build_profiles, loaders_by_name
)
if not valid_profiles:
log.warning(
"There are not valid Workfile profiles. Skipping process."
)
return
log.debug("Valid Workfile profiles: {}".format(valid_profiles))
subsets_by_id = {}
version_by_subset_id = {}
repres_by_version_id = {}
for subset_id, in_data in asset_entity_data["subsets"].items():
subset_entity = in_data["subset_entity"]
subsets_by_id[subset_entity["_id"]] = subset_entity
version_data = in_data["version"]
version_entity = version_data["version_entity"]
version_by_subset_id[subset_id] = version_entity
repres_by_version_id[version_entity["_id"]] = (
version_data["repres"]
)
if not subsets_by_id:
log.warning("There are not subsets for asset {0}".format(
asset_entity["name"]
))
return
profiles_per_subset_id = self._prepare_profile_for_subsets(
subsets_by_id.values(), valid_profiles
)
if not profiles_per_subset_id:
log.warning("There are not valid subsets.")
return
valid_repres_by_subset_id = collections.defaultdict(list)
for subset_id, profile in profiles_per_subset_id.items():
profile_repre_names = profile["repre_names_lowered"]
version_entity = version_by_subset_id[subset_id]
version_id = version_entity["_id"]
repres = repres_by_version_id[version_id]
for repre in repres:
repre_name_low = repre["name"].lower()
if repre_name_low in profile_repre_names:
valid_repres_by_subset_id[subset_id].append(repre)
# DEBUG message
msg = "Valid representations for Asset: `{}`".format(
asset_entity["name"]
)
for subset_id, repres in valid_repres_by_subset_id.items():
subset = subsets_by_id[subset_id]
msg += "\n# Subset Name/ID: `{}`/{}".format(
subset["name"], subset_id
)
for repre in repres:
msg += "\n## Repre name: `{}`".format(repre["name"])
log.debug(msg)
containers = self._load_containers(
valid_repres_by_subset_id, subsets_by_id,
profiles_per_subset_id, loaders_by_name
)
return {
"asset_entity": asset_entity,
"containers": containers
}
def _load_containers(
self, repres_by_subset_id, subsets_by_id,
profiles_per_subset_id, loaders_by_name
):
"""Real load by collected data happens here.
Loading of representations per subset happens here. Each subset can
loads one representation. Loading is tried in specific order.
Representations are tried to load by names defined in configuration.
If subset has representation matching representation name each loader
is tried to load it until any is successful. If none of them was
successful then next reprensentation name is tried.
Subset process loop ends when any representation is loaded or
all matching representations were already tried.
:param repres_by_subset_id: Available representations mapped
by their parent (subset) id.
:type repres_by_subset_id: dict
:param subsets_by_id: Subset documents mapped by their id.
:type subsets_by_id: dict
:param profiles_per_subset_id: Build profiles mapped by subset id.
:type profiles_per_subset_id: dict
:param loaders_by_name: Available loaders per name.
:type loaders_by_name: dict
:return: Objects of loaded containers.
:rtype: list
"""
loaded_containers = []
for subset_id, repres in repres_by_subset_id.items():
subset_name = subsets_by_id[subset_id]["name"]
profile = profiles_per_subset_id[subset_id]
loaders_last_idx = len(profile["loaders"]) - 1
repre_names_last_idx = len(profile["repre_names_lowered"]) - 1
repre_by_low_name = {
repre["name"].lower(): repre for repre in repres
}
is_loaded = False
for repre_name_idx, profile_repre_name in enumerate(
profile["repre_names_lowered"]
):
# Break iteration if representation was already loaded
if is_loaded:
break
repre = repre_by_low_name.get(profile_repre_name)
if not repre:
continue
for loader_idx, loader_name in enumerate(profile["loaders"]):
if is_loaded:
break
loader = loaders_by_name.get(loader_name)
if not loader:
continue
try:
container = avalon.api.load(
loader,
repre["_id"],
name=subset_name
)
loaded_containers.append(container)
is_loaded = True
except Exception as exc:
if exc == pipeline.IncompatibleLoaderError:
log.info((
"Loader `{}` is not compatible with"
" representation `{}`"
).format(loader_name, repre["name"]))
else:
log.error(
"Unexpected error happened during loading",
exc_info=True
)
msg = "Loading failed."
if loader_idx < loaders_last_idx:
msg += " Trying next loader."
elif repre_name_idx < repre_names_last_idx:
msg += (
" Loading of subset `{}` was not successful."
).format(subset_name)
else:
msg += " Trying next representation."
log.info(msg)
return loaded_containers
def _collect_last_version_repres(self, asset_entities):
"""Collect subsets, versions and representations for asset_entities.
:param asset_entities: Asset entities for which want to find data
:type asset_entities: list
:return: collected entities
:rtype: dict
Example output:
```
{
{Asset ID}: {
"asset_entity": <AssetEntity>,
"subsets": {
{Subset ID}: {
"subset_entity": <SubsetEntity>,
"version": {
"version_entity": <VersionEntity>,
"repres": [
<RepreEntity1>, <RepreEntity2>, ...
]
}
},
...
}
},
...
}
output[asset_id]["subsets"][subset_id]["version"]["repres"]
```
"""
if not asset_entities:
return {}
asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities}
subsets = list(io.find({
"type": "subset",
"parent": {"$in": asset_entity_by_ids.keys()}
}))
subset_entity_by_ids = {subset["_id"]: subset for subset in subsets}
sorted_versions = list(io.find({
"type": "version",
"parent": {"$in": subset_entity_by_ids.keys()}
}).sort("name", -1))
subset_id_with_latest_version = []
last_versions_by_id = {}
for version in sorted_versions:
subset_id = version["parent"]
if subset_id in subset_id_with_latest_version:
continue
subset_id_with_latest_version.append(subset_id)
last_versions_by_id[version["_id"]] = version
repres = io.find({
"type": "representation",
"parent": {"$in": last_versions_by_id.keys()}
})
output = {}
for repre in repres:
version_id = repre["parent"]
version = last_versions_by_id[version_id]
subset_id = version["parent"]
subset = subset_entity_by_ids[subset_id]
asset_id = subset["parent"]
asset = asset_entity_by_ids[asset_id]
if asset_id not in output:
output[asset_id] = {
"asset_entity": asset,
"subsets": {}
}
if subset_id not in output[asset_id]["subsets"]:
output[asset_id]["subsets"][subset_id] = {
"subset_entity": subset,
"version": {
"version_entity": version,
"repres": []
}
}
output[asset_id]["subsets"][subset_id]["version"]["repres"].append(
repre
)
return output

View file

@ -2,8 +2,9 @@ import sys
import os
import logging
from avalon.vendor.Qt import QtWidgets, QtCore, QtGui
from avalon.vendor.Qt import QtWidgets, QtGui
from avalon.maya import pipeline
from ..lib import BuildWorkfile
import maya.cmds as cmds
self = sys.modules[__name__]
@ -21,8 +22,15 @@ def _get_menu():
return menu
def deferred():
def add_build_workfiles_item():
# Add build first workfile
cmds.menuItem(divider=True, parent=pipeline._menu)
cmds.menuItem(
"Build First Workfile",
parent=pipeline._menu,
command=lambda *args: BuildWorkfile().process()
)
log.info("Attempting to install scripts menu..")
@ -30,8 +38,11 @@ def deferred():
import scriptsmenu.launchformaya as launchformaya
import scriptsmenu.scriptsmenu as scriptsmenu
except ImportError:
log.warning("Skipping studio.menu install, because "
"'scriptsmenu' module seems unavailable.")
log.warning(
"Skipping studio.menu install, because "
"'scriptsmenu' module seems unavailable."
)
add_build_workfiles_item()
return
# load configuration of custom menu
@ -39,15 +50,16 @@ def deferred():
config = scriptsmenu.load_configuration(config_path)
# run the launcher for Maya menu
studio_menu = launchformaya.main(title=self._menu.title(),
objectName=self._menu)
studio_menu = launchformaya.main(
title=self._menu.title(),
objectName=self._menu
)
# apply configuration
studio_menu.build_from_configuration(studio_menu, config)
def uninstall():
menu = _get_menu()
if menu:
log.info("Attempting to uninstall..")
@ -60,9 +72,8 @@ def uninstall():
def install():
if cmds.about(batch=True):
print("Skipping pype.menu initialization in batch mode..")
log.info("Skipping pype.menu initialization in batch mode..")
return
uninstall()

View file

@ -28,7 +28,7 @@ self = sys.modules[__name__]
self._project = None
def onScriptLoad():
def on_script_load():
''' Callback for ffmpeg support
'''
if nuke.env['LINUX']:
@ -39,7 +39,7 @@ def onScriptLoad():
nuke.tcl('load movWriter')
def checkInventoryVersions():
def check_inventory_versions():
"""
Actiual version idetifier of Loaded containers
@ -180,8 +180,8 @@ def format_anatomy(data):
padding = int(anatomy.templates['render']['padding'])
except KeyError as e:
msg = ("`padding` key is not in `render` "
"Anatomy template. Please, add it there and restart "
"the pipeline (padding: \"4\"): `{}`").format(e)
"Anatomy template. Please, add it there and restart "
"the pipeline (padding: \"4\"): `{}`").format(e)
log.error(msg)
nuke.message(msg)
@ -717,7 +717,8 @@ class WorkfileSettings(object):
def set_reads_colorspace(self, reads):
""" Setting colorspace to Read nodes
Looping trought all read nodes and tries to set colorspace based on regex rules in presets
Looping trought all read nodes and tries to set colorspace based
on regex rules in presets
"""
changes = dict()
for n in nuke.allNodes():
@ -889,10 +890,10 @@ class WorkfileSettings(object):
if any(x for x in data.values() if x is None):
msg = ("Missing set shot attributes in DB."
"\nContact your supervisor!."
"\n\nWidth: `{width}`"
"\nHeight: `{height}`"
"\nPixel Asspect: `{pixel_aspect}`").format(**data)
"\nContact your supervisor!."
"\n\nWidth: `{width}`"
"\nHeight: `{height}`"
"\nPixel Asspect: `{pixel_aspect}`").format(**data)
log.error(msg)
nuke.message(msg)
@ -911,8 +912,9 @@ class WorkfileSettings(object):
)
except Exception as e:
bbox = None
msg = ("{}:{} \nFormat:Crop need to be set with dots, example: "
"0.0.1920.1080, /nSetting to default").format(__name__, e)
msg = ("{}:{} \nFormat:Crop need to be set with dots, "
"example: 0.0.1920.1080, "
"/nSetting to default").format(__name__, e)
log.error(msg)
nuke.message(msg)
@ -1053,7 +1055,8 @@ class BuildWorkfile(WorkfileSettings):
"""
Building first version of workfile.
Settings are taken from presets and db. It will add all subsets in last version for defined representaions
Settings are taken from presets and db. It will add all subsets
in last version for defined representaions
Arguments:
variable (type): description
@ -1281,8 +1284,6 @@ class BuildWorkfile(WorkfileSettings):
representation (dict): avalon db entity
"""
context = representation["context"]
loader_name = "LoadLuts"
loader_plugin = None

View file

@ -16,6 +16,9 @@ class CollectSceneVersion(pyblish.api.ContextPlugin):
if "standalonepublisher" in context.data.get("host", []):
return
if "unreal" in pyblish.api.registered_hosts():
return
filename = os.path.basename(context.data.get('currentFile'))
if '<shell>' in filename:

View file

@ -132,13 +132,14 @@ class ExtractBurnin(pype.api.Extractor):
slate_duration = duration_cp
# exception for slate workflow
if ("slate" in instance.data["families"]):
if "slate" in instance.data["families"]:
if "slate-frame" in repre.get("tags", []):
slate_frame_start = frame_start_cp - 1
slate_frame_end = frame_end_cp
slate_duration = duration_cp + 1
self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start))
self.log.debug("__1 slate_frame_start: {}".format(
slate_frame_start))
_prep_data.update({
"slate_frame_start": slate_frame_start,
@ -192,7 +193,6 @@ class ExtractBurnin(pype.api.Extractor):
self.log.debug("Output: {}".format(output))
repre_update = {
"anatomy_template": "render",
"files": movieFileBurnin,
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]

View file

@ -82,7 +82,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"image"
"source",
"assembly",
"textures"
"fbx",
"textures",
"action"
]
exclude_families = ["clip"]

View file

@ -0,0 +1,11 @@
import avalon.maya
class CreateUnrealStaticMesh(avalon.maya.Creator):
name = "staticMeshMain"
label = "Unreal - Static Mesh"
family = "unrealStaticMesh"
icon = "cube"
def __init__(self, *args, **kwargs):
super(CreateUnrealStaticMesh, self).__init__(*args, **kwargs)

View file

@ -6,10 +6,11 @@ from collections import defaultdict
from maya import cmds
from avalon import api
from avalon import api, io
from avalon.maya import lib as avalon_lib, pipeline
from pype.maya import lib
from pypeapp import config
from pprint import pprint
class YetiCacheLoader(api.Loader):
@ -101,12 +102,23 @@ class YetiCacheLoader(api.Loader):
def update(self, container, representation):
io.install()
namespace = container["namespace"]
container_node = container["objectName"]
fur_settings = io.find_one(
{"parent": representation["parent"], "name": "fursettings"}
)
pprint({"parent": representation["parent"], "name": "fursettings"})
pprint(fur_settings)
assert fur_settings is not None, (
"cannot find fursettings representation"
)
settings_fname = api.get_representation_path(fur_settings)
path = api.get_representation_path(representation)
# Get all node data
fname, ext = os.path.splitext(path)
settings_fname = "{}.fursettings".format(fname)
with open(settings_fname, "r") as fp:
settings = json.load(fp)
@ -147,13 +159,14 @@ class YetiCacheLoader(api.Loader):
cmds.delete(to_remove)
# replace frame in filename with %04d
RE_frame = re.compile(r"(\d+)(\.fur)$")
file_name = re.sub(RE_frame, r"%04d\g<2>", os.path.basename(path))
for cb_id, data in meta_data_lookup.items():
# Update cache file name
file_name = data["name"].replace(":", "_")
cache_file_path = "{}.%04d.fur".format(file_name)
data["attrs"]["cacheFileName"] = os.path.join(
path, cache_file_path)
os.path.dirname(path), file_name)
if cb_id not in scene_lookup:
@ -197,6 +210,12 @@ class YetiCacheLoader(api.Loader):
yeti_node = yeti_nodes[0]
for attr, value in data["attrs"].items():
# handle empty attribute strings. Those are reported
# as None, so their type is NoneType and this is not
# supported on attributes in Maya. We change it to
# empty string.
if value is None:
value = ""
lib.set_attribute(attr, value, yeti_node)
cmds.setAttr("{}.representation".format(container_node),

View file

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
from maya import cmds
import pyblish.api
class CollectUnrealStaticMesh(pyblish.api.InstancePlugin):
"""Collect unreal static mesh
Ensures always only a single frame is extracted (current frame). This
also sets correct FBX options for later extraction.
Note:
This is a workaround so that the `pype.model` family can use the
same pointcache extractor implementation as animation and pointcaches.
This always enforces the "current" frame to be published.
"""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Model Data"
families = ["unrealStaticMesh"]
def process(self, instance):
# add fbx family to trigger fbx extractor
instance.data["families"].append("fbx")
# set fbx overrides on instance
instance.data["smoothingGroups"] = True
instance.data["smoothMesh"] = True
instance.data["triangulate"] = True
frame = cmds.currentTime(query=True)
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame

View file

@ -49,6 +49,10 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
attr_data = {}
for attr in SETTINGS:
current = cmds.getAttr("%s.%s" % (shape, attr))
# change None to empty string as Maya doesn't support
# NoneType in attributes
if current is None:
current = ""
attr_data[attr] = current
# Get transform data

View file

@ -212,12 +212,11 @@ class ExtractFBX(pype.api.Extractor):
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': 'mov',
'name': 'fbx',
'ext': 'fbx',
'files': filename,
"stagingDir": stagingDir,
}
instance.data["representations"].append(representation)
self.log.info("Extract FBX successful to: {0}".format(path))

View file

@ -3,24 +3,23 @@ import glob
import contextlib
import clique
import capture
#
import pype.maya.lib as lib
import pype.api
#
from maya import cmds, mel
from maya import cmds
import pymel.core as pm
# TODO: move codec settings to presets
class ExtractQuicktime(pype.api.Extractor):
"""Extract Quicktime from viewport capture.
class ExtractPlayblast(pype.api.Extractor):
"""Extract viewport playblast.
Takes review camera and creates review Quicktime video based on viewport
capture.
"""
label = "Quicktime"
label = "Extract Playblast"
hosts = ["maya"]
families = ["review"]
optional = True
@ -29,7 +28,7 @@ class ExtractQuicktime(pype.api.Extractor):
self.log.info("Extracting capture..")
# get scene fps
fps = mel.eval('currentTimeUnitToFPS()')
fps = instance.data.get("fps") or instance.context.data.get("fps")
# if start and end frames cannot be determined, get them
# from Maya timeline
@ -39,6 +38,7 @@ class ExtractQuicktime(pype.api.Extractor):
start = cmds.playbackOptions(query=True, animationStartTime=True)
if end is None:
end = cmds.playbackOptions(query=True, animationEndTime=True)
self.log.info("start: {}, end: {}".format(start, end))
# get cameras
@ -47,7 +47,7 @@ class ExtractQuicktime(pype.api.Extractor):
try:
preset = lib.load_capture_preset(data=capture_preset)
except:
except Exception:
preset = {}
self.log.info('using viewport preset: {}'.format(preset))
@ -55,21 +55,12 @@ class ExtractQuicktime(pype.api.Extractor):
preset['format'] = "image"
# preset['compression'] = "qt"
preset['quality'] = 95
preset['compression'] = "jpg"
preset['compression'] = "png"
preset['start_frame'] = start
preset['end_frame'] = end
preset['camera_options'] = {
"displayGateMask": False,
"displayResolution": False,
"displayFilmGate": False,
"displayFieldChart": False,
"displaySafeAction": False,
"displaySafeTitle": False,
"displayFilmPivot": False,
"displayFilmOrigin": False,
"overscan": 1.0,
"depthOfField": cmds.getAttr("{0}.depthOfField".format(camera)),
}
camera_option = preset.get("camera_option", {})
camera_option["depthOfField"] = cmds.getAttr(
"{0}.depthOfField".format(camera))
stagingdir = self.staging_dir(instance)
filename = "{0}".format(instance.name)
@ -90,8 +81,8 @@ class ExtractQuicktime(pype.api.Extractor):
filename = preset.get("filename", "%TEMP%")
# Force viewer to False in call to capture because we have our own
# viewer opening call to allow a signal to trigger between playblast
# and viewer
# viewer opening call to allow a signal to trigger between
# playblast and viewer
preset['viewer'] = False
# Remove panel key since it's internal value to capture_gui
@ -112,8 +103,8 @@ class ExtractQuicktime(pype.api.Extractor):
instance.data["representations"] = []
representation = {
'name': 'mov',
'ext': 'mov',
'name': 'png',
'ext': 'png',
'files': collected_frames,
"stagingDir": stagingdir,
"frameStart": start,
@ -133,7 +124,6 @@ class ExtractQuicktime(pype.api.Extractor):
To workaround this we just glob.glob() for any file extensions and
assume the latest modified file is the correct file and return it.
"""
# Catch cancelled playblast
if filepath is None:
@ -164,7 +154,6 @@ class ExtractQuicktime(pype.api.Extractor):
return filepath
@contextlib.contextmanager
def maintained_time():
ct = cmds.currentTime(query=True)

View file

@ -38,7 +38,8 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin):
if compute:
out_set = next(x for x in instance if x.endswith("out_SET"))
instance_nodes = pc.sets(out_set, query=True)
instance_nodes.extend([x.getShape() for x in instance_nodes])
instance_nodes.extend(
[x.getShape() for x in instance_nodes if x.getShape()])
scene_nodes = pc.ls(type="transform") + pc.ls(type="mesh")
scene_nodes = set(scene_nodes) - set(instance_nodes)

View file

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
from maya import cmds
import pyblish.api
import pype.api
class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin):
"""Validate if mesh is made of triangles for Unreal Engine"""
order = pype.api.ValidateMeshOder
hosts = ["maya"]
families = ["unrealStaticMesh"]
category = "geometry"
label = "Mesh is Triangulated"
actions = [pype.maya.action.SelectInvalidAction]
@classmethod
def get_invalid(cls, instance):
invalid = []
meshes = cmds.ls(instance, type="mesh", long=True)
for mesh in meshes:
faces = cmds.polyEvaluate(mesh, f=True)
tris = cmds.polyEvaluate(mesh, t=True)
if faces != tris:
invalid.append(mesh)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
assert len(invalid) == 0, (
"Found meshes without triangles")

View file

@ -0,0 +1,120 @@
# -*- coding: utf-8 -*-
from maya import cmds
import pyblish.api
import pype.api
import pype.maya.action
import re
class ValidateUnrealStaticmeshName(pyblish.api.InstancePlugin):
"""Validate name of Unreal Static Mesh
Unreals naming convention states that staticMesh sould start with `SM`
prefix - SM_[Name]_## (Eg. SM_sube_01). This plugin also validates other
types of meshes - collision meshes:
UBX_[RenderMeshName]_##:
Boxes are created with the Box objects type in
Max or with the Cube polygonal primitive in Maya.
You cannot move the vertices around or deform it
in any way to make it something other than a
rectangular prism, or else it will not work.
UCP_[RenderMeshName]_##:
Capsules are created with the Capsule object type.
The capsule does not need to have many segments
(8 is a good number) at all because it is
converted into a true capsule for collision. Like
boxes, you should not move the individual
vertices around.
USP_[RenderMeshName]_##:
Spheres are created with the Sphere object type.
The sphere does not need to have many segments
(8 is a good number) at all because it is
converted into a true sphere for collision. Like
boxes, you should not move the individual
vertices around.
UCX_[RenderMeshName]_##:
Convex objects can be any completely closed
convex 3D shape. For example, a box can also be
a convex object
This validator also checks if collision mesh [RenderMeshName] matches one
of SM_[RenderMeshName].
"""
optional = True
order = pype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["unrealStaticMesh"]
label = "Unreal StaticMesh Name"
actions = [pype.maya.action.SelectInvalidAction]
regex_mesh = r"SM_(?P<renderName>.*)_(\d{2})"
regex_collision = r"((UBX)|(UCP)|(USP)|(UCX))_(?P<renderName>.*)_(\d{2})"
@classmethod
def get_invalid(cls, instance):
# find out if supplied transform is group or not
def is_group(groupName):
try:
children = cmds.listRelatives(groupName, children=True)
for child in children:
if not cmds.ls(child, transforms=True):
return False
return True
except Exception:
return False
invalid = []
content_instance = instance.data.get("setMembers", None)
if not content_instance:
cls.log.error("Instance has no nodes!")
return True
pass
descendants = cmds.listRelatives(content_instance,
allDescendents=True,
fullPath=True) or []
descendants = cmds.ls(descendants, noIntermediate=True, long=True)
trns = cmds.ls(descendants, long=False, type=('transform'))
# filter out groups
filter = [node for node in trns if not is_group(node)]
# compile regex for testing names
sm_r = re.compile(cls.regex_mesh)
cl_r = re.compile(cls.regex_collision)
sm_names = []
col_names = []
for obj in filter:
sm_m = sm_r.match(obj)
if sm_m is None:
# test if it matches collision mesh
cl_r = sm_r.match(obj)
if cl_r is None:
cls.log.error("invalid mesh name on: {}".format(obj))
invalid.append(obj)
else:
col_names.append((cl_r.group("renderName"), obj))
else:
sm_names.append(sm_m.group("renderName"))
for c_mesh in col_names:
if c_mesh[0] not in sm_names:
cls.log.error(("collision name {} doesn't match any "
"static mesh names.").format(obj))
invalid.append(c_mesh[1])
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Model naming is invalid. See log.")

View file

@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
from maya import cmds
import pyblish.api
import pype.api
class ValidateUnrealUpAxis(pyblish.api.ContextPlugin):
"""Validate if Z is set as up axis in Maya"""
optional = True
order = pype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["unrealStaticMesh"]
label = "Unreal Up-Axis check"
actions = [pype.api.RepairAction]
def process(self, context):
assert cmds.upAxis(q=True, axis=True) == "z", (
"Invalid axis set as up axis"
)
@classmethod
def repair(cls, instance):
cmds.upAxis(axis="z", rotateView=True)

View file

@ -151,13 +151,16 @@ class CollectReviews(api.InstancePlugin):
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
"track", "version"
"track"
]
version_data = dict()
# pass data to version
version_data.update({k: instance.data[k] for k in transfer_data})
if 'version' in instance.data:
version_data["version"] = instance.data[version]
# add to data of representation
version_data.update({
"colorspace": item.sourceMediaColourTransform(),

View file

@ -0,0 +1,33 @@
import unreal
from pype.unreal.plugin import Creator
from avalon.unreal import (
instantiate,
)
class CreateStaticMeshFBX(Creator):
"""Static FBX geometry"""
name = "unrealStaticMeshMain"
label = "Unreal - Static Mesh"
family = "unrealStaticMesh"
icon = "cube"
asset_types = ["StaticMesh"]
root = "/Game"
suffix = "_INS"
def __init__(self, *args, **kwargs):
super(CreateStaticMeshFBX, self).__init__(*args, **kwargs)
def process(self):
name = self.data["subset"]
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
unreal.log("selection: {}".format(selection))
instantiate(self.root, name, self.data, selection, self.suffix)

View file

@ -0,0 +1,101 @@
from avalon import api
from avalon import unreal as avalon_unreal
import unreal
class StaticMeshFBXLoader(api.Loader):
"""Load Unreal StaticMesh from FBX"""
families = ["unrealStaticMesh"]
label = "Import FBX Static Mesh"
representations = ["fbx"]
icon = "cube"
color = "orange"
def load(self, context, name, namespace, data):
"""
Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
tools = unreal.AssetToolsHelpers().get_asset_tools()
temp_dir, temp_name = tools.create_unique_asset_name(
"/Game/{}".format(name), "_TMP"
)
unreal.EditorAssetLibrary.make_directory(temp_dir)
task = unreal.AssetImportTask()
task.filename = self.fname
task.destination_path = temp_dir
task.destination_name = name
task.replace_existing = False
task.automated = True
task.save = True
# set import options here
task.options = unreal.FbxImportUI()
task.options.import_animations = False
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
imported_assets = unreal.EditorAssetLibrary.list_assets(
temp_dir, recursive=True, include_folder=True
)
new_dir = avalon_unreal.containerise(
name, namespace, imported_assets, context, self.__class__.__name__)
asset_content = unreal.EditorAssetLibrary.list_assets(
new_dir, recursive=True, include_folder=True
)
unreal.EditorAssetLibrary.delete_directory(temp_dir)
return asset_content
def update(self, container, representation):
node = container["objectName"]
source_path = api.get_representation_path(representation)
destination_path = container["namespace"]
task = unreal.AssetImportTask()
task.filename = source_path
task.destination_path = destination_path
# strip suffix
task.destination_name = node[:-4]
task.replace_existing = True
task.automated = True
task.save = True
task.options = unreal.FbxImportUI()
task.options.import_animations = False
# do import fbx and replace existing data
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
container_path = "{}/{}".format(container["namespace"],
container["objectName"])
# update metadata
avalon_unreal.imprint(
container_path, {"_id": str(representation["_id"])})
def remove(self, container):
unreal.EditorAssetLibrary.delete_directory(container["namespace"])

View file

@ -0,0 +1,59 @@
import unreal
import pyblish.api
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by AvalonPublishInstance class
This collector finds all paths containing `AvalonPublishInstance` class
asset
Identifier:
id (str): "pyblish.avalon.instance"
"""
label = "Collect Instances"
order = pyblish.api.CollectorOrder
hosts = ["unreal"]
def process(self, context):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
instance_containers = ar.get_assets_by_class(
"AvalonPublishInstance", True)
for container_data in instance_containers:
asset = container_data.get_asset()
data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
data["objectName"] = container_data.asset_name
# convert to strings
data = {str(key): str(value) for (key, value) in data.items()}
assert data.get("family"), (
"instance has no family"
)
# content of container
members = unreal.EditorAssetLibrary.list_assets(
asset.get_path_name(), recursive=True, include_folder=True
)
self.log.debug(members)
self.log.debug(asset.get_path_name())
# remove instance container
members.remove(asset.get_path_name())
self.log.info("Creating instance for {}".format(asset.get_name()))
instance = context.create_instance(asset.get_name())
instance[:] = members
# Store the exact members of the object set
instance.data["setMembers"] = members
instance.data["families"] = [data.get("family")]
label = "{0} ({1})".format(asset.get_name()[:-4],
data["asset"])
instance.data["label"] = label
instance.data.update(data)

View file

@ -0,0 +1,2 @@
from . import slate_base
from .slate_base import api

View file

@ -0,0 +1,18 @@
import sys
import json
from slate_base import api
def main(in_args=None):
data_arg = in_args[-1]
in_data = json.loads(data_arg)
api.create_slates(
in_data["fill_data"],
in_data.get("slate_name"),
in_data.get("slate_data"),
in_data.get("data_output_json")
)
if __name__ == "__main__":
main(sys.argv)

View file

@ -0,0 +1,15 @@
from .font_factory import FontFactory
from .base import BaseObj, load_default_style
from .main_frame import MainFrame
from .layer import Layer
from .items import (
BaseItem,
ItemImage,
ItemRectangle,
ItemPlaceHolder,
ItemText,
ItemTable,
TableField
)
from .lib import create_slates
from .example import example

View file

@ -0,0 +1,373 @@
import os
import re
import logging
import copy
import json
from uuid import uuid4
def load_default_style():
cur_folder = os.path.dirname(os.path.abspath(__file__))
default_json_path = os.path.join(cur_folder, "default_style.json")
with open(default_json_path, "r") as _file:
data = _file.read()
return json.loads(data)
class BaseObj:
"""Base Object for slates."""
obj_type = None
available_parents = []
all_style_keys = [
"font-family", "font-size", "font-color", "font-bold", "font-italic",
"bg-color", "bg-alter-color",
"alignment-horizontal", "alignment-vertical",
"padding", "padding-left", "padding-right",
"padding-top", "padding-bottom",
"margin", "margin-left", "margin-right",
"margin-top", "margin-bottom", "width", "height",
"fill", "word-wrap", "ellide", "max-lines"
]
fill_data_regex = r"{[^}]+}"
def __init__(self, parent, style={}, name=None, pos_x=None, pos_y=None):
if not self.obj_type:
raise NotImplementedError(
"Class don't have set object type <{}>".format(
self.__class__.__name__
)
)
parent_obj_type = None
if parent:
parent_obj_type = parent.obj_type
if parent_obj_type not in self.available_parents:
expected_parents = ", ".join(self.available_parents)
raise Exception((
"Invalid parent <{}> for <{}>. Expected <{}>"
).format(
parent.__class__.__name__, self.obj_type, expected_parents
))
self.parent = parent
self._style = style
self.id = uuid4()
self.name = name
self.items = {}
self._pos_x = pos_x or 0
self._pos_y = pos_y or 0
log_parts = []
module = self.__class__.__module__
if module and module != "__main__":
log_parts.append(module)
log_parts.append(self.__class__.__name__)
self.log = logging.getLogger(".".join(log_parts))
if parent:
parent.add_item(self)
def fill_data_format(self):
return
@property
def fill_data(self):
return self.parent.fill_data
@property
def main_style(self):
return load_default_style()
def height(self):
raise NotImplementedError(
"Attribute `height` is not implemented for <{}>".format(
self.__clas__.__name__
)
)
def width(self):
raise NotImplementedError(
"Attribute `width` is not implemented for <{}>".format(
self.__clas__.__name__
)
)
def collect_data(self):
return None
def find_item(self, obj_type=None, name=None):
obj_type_fits = False
name_fits = False
if obj_type is None or self.obj_type == obj_type:
obj_type_fits = True
if name is None or self.name != name:
name_fits = True
output = []
if obj_type_fits and name_fits:
output.append(self)
if not self.items:
return output
for item in self.items.values():
output.extend(
item.find_item(obj_type=obj_type, name=name)
)
return output
@property
def full_style(self):
if self.parent is not None:
style = dict(val for val in self.parent.full_style.items())
else:
style = self.main_style
for key, value in self._style.items():
if key in self.all_style_keys:
# TODO which variant is right?
style[self.obj_type][key] = value
# style["*"][key] = value
else:
if key not in style:
style[key] = {}
if isinstance(style[key], dict):
style[key].update(value)
else:
style[key] = value
return style
def get_style_for_obj_type(self, obj_type, style=None):
if not style:
style = copy.deepcopy(self.full_style)
base = style.get("*") or {}
obj_specific = style.get(obj_type) or {}
name_specific = {}
if self.name:
name = str(self.name)
if not name.startswith("#"):
name = "#" + name
name_specific = style.get(name) or {}
if obj_type == "table-item":
col_regex = r"table-item-col\[([\d\-, ]+)*\]"
row_regex = r"table-item-row\[([\d\-, ]+)*\]"
field_regex = (
r"table-item-field\[(([ ]+)?\d+([ ]+)?:([ ]+)?\d+([ ]+)?)*\]"
)
# STRICT field regex (not allowed spaces)
# fild_regex = r"table-item-field\[(\d+:\d+)*\]"
def get_indexes_from_regex_match(result, field=False):
group = result.group(1)
indexes = []
if field:
return [
int(part.strip()) for part in group.strip().split(":")
]
parts = group.strip().split(",")
for part in parts:
part = part.strip()
if "-" not in part:
indexes.append(int(part))
continue
sub_parts = [
int(sub.strip()) for sub in part.split("-")
]
if len(sub_parts) != 2:
# TODO logging
self.log.warning("Invalid range '{}'".format(part))
continue
for idx in range(sub_parts[0], sub_parts[1]+1):
indexes.append(idx)
return indexes
for key, value in style.items():
if not key.startswith(obj_type):
continue
result = re.search(col_regex, key)
if result:
indexes = get_indexes_from_regex_match(result)
if self.col_idx in indexes:
obj_specific.update(value)
continue
result = re.search(row_regex, key)
if result:
indexes = get_indexes_from_regex_match(result)
if self.row_idx in indexes:
obj_specific.update(value)
continue
result = re.search(field_regex, key)
if result:
row_idx, col_idx = get_indexes_from_regex_match(
result, True
)
if self.col_idx == col_idx and self.row_idx == row_idx:
obj_specific.update(value)
output = {}
output.update(base)
output.update(obj_specific)
output.update(name_specific)
return output
@property
def style(self):
return self.get_style_for_obj_type(self.obj_type)
@property
def item_pos_x(self):
if self.parent.obj_type == "main_frame":
return int(self._pos_x)
return 0
@property
def item_pos_y(self):
if self.parent.obj_type == "main_frame":
return int(self._pos_y)
return 0
@property
def content_pos_x(self):
pos_x = self.item_pos_x
margin = self.style["margin"]
margin_left = self.style.get("margin-left") or margin
pos_x += margin_left
return pos_x
@property
def content_pos_y(self):
pos_y = self.item_pos_y
margin = self.style["margin"]
margin_top = self.style.get("margin-top") or margin
return pos_y + margin_top
@property
def value_pos_x(self):
pos_x = int(self.content_pos_x)
padding = self.style["padding"]
padding_left = self.style.get("padding-left")
if padding_left is None:
padding_left = padding
pos_x += padding_left
return pos_x
@property
def value_pos_y(self):
pos_y = int(self.content_pos_y)
padding = self.style["padding"]
padding_top = self.style.get("padding-top")
if padding_top is None:
padding_top = padding
pos_y += padding_top
return pos_y
@property
def value_pos_start(self):
return (self.value_pos_x, self.value_pos_y)
@property
def value_pos_end(self):
pos_x, pos_y = self.value_pos_start
pos_x += self.width()
pos_y += self.height()
return (pos_x, pos_y)
@property
def content_pos_start(self):
return (self.content_pos_x, self.content_pos_y)
@property
def content_pos_end(self):
pos_x, pos_y = self.content_pos_start
pos_x += self.content_width()
pos_y += self.content_height()
return (pos_x, pos_y)
def value_width(self):
raise NotImplementedError(
"Attribute <content_width> is not implemented <{}>".format(
self.__class__.__name__
)
)
def value_height(self):
raise NotImplementedError(
"Attribute <content_width> is not implemented for <{}>".format(
self.__class__.__name__
)
)
def content_width(self):
width = self.value_width()
padding = self.style["padding"]
padding_left = self.style.get("padding-left")
if padding_left is None:
padding_left = padding
padding_right = self.style.get("padding-right")
if padding_right is None:
padding_right = padding
return width + padding_left + padding_right
def content_height(self):
height = self.value_height()
padding = self.style["padding"]
padding_top = self.style.get("padding-top")
if padding_top is None:
padding_top = padding
padding_bottom = self.style.get("padding-bottom")
if padding_bottom is None:
padding_bottom = padding
return height + padding_top + padding_bottom
def width(self):
width = self.content_width()
margin = self.style["margin"]
margin_left = self.style.get("margin-left") or margin
margin_right = self.style.get("margin-right") or margin
return width + margin_left + margin_right
def height(self):
height = self.content_height()
margin = self.style["margin"]
margin_top = self.style.get("margin-top") or margin
margin_bottom = self.style.get("margin-bottom") or margin
return height + margin_bottom + margin_top
def add_item(self, item):
self.items[item.id] = item
item.fill_data_format()
def reset(self):
for item in self.items.values():
item.reset()

View file

@ -0,0 +1,58 @@
{
"*": {
"font-family": "arial",
"font-size": 26,
"font-color": "#ffffff",
"font-bold": false,
"font-italic": false,
"bg-color": "#0077ff",
"alignment-horizontal": "left",
"alignment-vertical": "top",
"word-wrap": true,
"ellide": true,
"max-lines": null
},
"layer": {
"padding": 0,
"margin": 0
},
"rectangle": {
"padding": 0,
"margin": 0,
"fill": true
},
"image": {
"padding": 0,
"margin": 0,
"fill": true
},
"placeholder": {
"padding": 0,
"margin": 0,
"fill": true
},
"main_frame": {
"padding": 0,
"margin": 0,
"bg-color": "#252525"
},
"table": {
"padding": 0,
"margin": 0,
"bg-color": "transparent"
},
"table-item": {
"padding": 0,
"margin": 0,
"bg-color": "#212121",
"bg-alter-color": "#272727",
"font-color": "#dcdcdc",
"font-bold": false,
"font-italic": false,
"alignment-horizontal": "left",
"alignment-vertical": "top",
"word-wrap": false,
"ellide": true,
"max-lines": 1
}
}

View file

@ -0,0 +1,254 @@
# import sys
# sys.append(r"PATH/TO/PILLOW/PACKAGE")
from . import api
def example():
"""Example data to demontrate function.
It is required to fill "destination_path", "thumbnail_path"
and "color_bar_path" in `example_fill_data` to be able to execute.
"""
example_fill_data = {
"destination_path": "PATH/TO/OUTPUT/FILE",
"project": {
"name": "Testing project"
},
"intent": "WIP",
"version_name": "seq01_sh0100_compositing_v01",
"date": "2019-08-09",
"shot_type": "2d comp",
"submission_note": (
"Lorem ipsum dolor sit amet, consectetuer adipiscing elit."
" Aenean commodo ligula eget dolor. Aenean massa."
" Cum sociis natoque penatibus et magnis dis parturient montes,"
" nascetur ridiculus mus. Donec quam felis, ultricies nec,"
" pellentesque eu, pretium quis, sem. Nulla consequat massa quis"
" enim. Donec pede justo, fringilla vel,"
" aliquet nec, vulputate eget, arcu."
),
"thumbnail_path": "PATH/TO/THUMBNAIL/FILE",
"color_bar_path": "PATH/TO/COLOR/BAR/FILE",
"vendor": "Our Studio",
"shot_name": "sh0100",
"frame_start": 1001,
"frame_end": 1004,
"duration": 3
}
example_presets = {"example_HD": {
"width": 1920,
"height": 1080,
"destination_path": "{destination_path}",
"style": {
"*": {
"font-family": "arial",
"font-color": "#ffffff",
"font-bold": False,
"font-italic": False,
"bg-color": "#0077ff",
"alignment-horizontal": "left",
"alignment-vertical": "top"
},
"layer": {
"padding": 0,
"margin": 0
},
"rectangle": {
"padding": 0,
"margin": 0,
"bg-color": "#E9324B",
"fill": True
},
"main_frame": {
"padding": 0,
"margin": 0,
"bg-color": "#252525"
},
"table": {
"padding": 0,
"margin": 0,
"bg-color": "transparent"
},
"table-item": {
"padding": 5,
"padding-bottom": 10,
"margin": 0,
"bg-color": "#212121",
"bg-alter-color": "#272727",
"font-color": "#dcdcdc",
"font-bold": False,
"font-italic": False,
"alignment-horizontal": "left",
"alignment-vertical": "top",
"word-wrap": False,
"ellide": True,
"max-lines": 1
},
"table-item-col[0]": {
"font-size": 20,
"font-color": "#898989",
"font-bold": True,
"ellide": False,
"word-wrap": True,
"max-lines": None
},
"table-item-col[1]": {
"font-size": 40,
"padding-left": 10
},
"#colorbar": {
"bg-color": "#9932CC"
}
},
"items": [{
"type": "layer",
"direction": 1,
"name": "MainLayer",
"style": {
"#MainLayer": {
"width": 1094,
"height": 1000,
"margin": 25,
"padding": 0
},
"#LeftSide": {
"margin-right": 25
}
},
"items": [{
"type": "layer",
"name": "LeftSide",
"items": [{
"type": "layer",
"direction": 1,
"style": {
"table-item": {
"bg-color": "transparent",
"padding-bottom": 20
},
"table-item-col[0]": {
"font-size": 20,
"font-color": "#898989",
"alignment-horizontal": "right"
},
"table-item-col[1]": {
"alignment-horizontal": "left",
"font-bold": True,
"font-size": 40
}
},
"items": [{
"type": "table",
"values": [
["Show:", "{project[name]}"]
],
"style": {
"table-item-field[0:0]": {
"width": 150
},
"table-item-field[0:1]": {
"width": 580
}
}
}, {
"type": "table",
"values": [
["Submitting For:", "{intent}"]
],
"style": {
"table-item-field[0:0]": {
"width": 160
},
"table-item-field[0:1]": {
"width": 218,
"alignment-horizontal": "right"
}
}
}]
}, {
"type": "rectangle",
"style": {
"bg-color": "#bc1015",
"width": 1108,
"height": 5,
"fill": True
}
}, {
"type": "table",
"use_alternate_color": True,
"values": [
["Version name:", "{version_name}"],
["Date:", "{date}"],
["Shot Types:", "{shot_type}"],
["Submission Note:", "{submission_note}"]
],
"style": {
"table-item": {
"padding-bottom": 20
},
"table-item-field[0:1]": {
"font-bold": True
},
"table-item-field[3:0]": {
"word-wrap": True,
"ellide": True,
"max-lines": 4
},
"table-item-col[0]": {
"alignment-horizontal": "right",
"width": 150
},
"table-item-col[1]": {
"alignment-horizontal": "left",
"width": 958
}
}
}]
}, {
"type": "layer",
"name": "RightSide",
"items": [{
"type": "placeholder",
"name": "thumbnail",
"path": "{thumbnail_path}",
"style": {
"width": 730,
"height": 412
}
}, {
"type": "placeholder",
"name": "colorbar",
"path": "{color_bar_path}",
"return_data": True,
"style": {
"width": 730,
"height": 55
}
}, {
"type": "table",
"use_alternate_color": True,
"values": [
["Vendor:", "{vendor}"],
["Shot Name:", "{shot_name}"],
["Frames:", "{frame_start} - {frame_end} ({duration})"]
],
"style": {
"table-item-col[0]": {
"alignment-horizontal": "left",
"width": 200
},
"table-item-col[1]": {
"alignment-horizontal": "right",
"width": 530,
"font-size": 30
}
}
}]
}]
}]
}}
api.create_slates(example_fill_data, "example_HD", example_presets)

View file

@ -0,0 +1,93 @@
import os
import sys
import collections
from PIL import ImageFont
class FontFactory:
fonts = None
default = None
@classmethod
def get_font(cls, family, font_size=None, italic=False, bold=False):
if cls.fonts is None:
cls.load_fonts()
styles = []
if bold:
styles.append("Bold")
if italic:
styles.append("Italic")
if not styles:
styles.append("Regular")
style = " ".join(styles)
family = family.lower()
family_styles = cls.fonts.get(family)
if not family_styles:
return cls.default
font = family_styles.get(style)
if font:
if font_size:
font = font.font_variant(size=font_size)
return font
# Return first found
for font in family_styles:
if font_size:
font = font.font_variant(size=font_size)
return font
return cls.default
@classmethod
def load_fonts(cls):
cls.default = ImageFont.load_default()
available_font_ext = [".ttf", ".ttc"]
dirs = []
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
dirs.append(os.path.join(windir, "fonts"))
elif sys.platform in ("linux", "linux2"):
lindirs = os.environ.get("XDG_DATA_DIRS", "")
if not lindirs:
# According to the freedesktop spec, XDG_DATA_DIRS should
# default to /usr/share
lindirs = "/usr/share"
dirs += [
os.path.join(lindir, "fonts") for lindir in lindirs.split(":")
]
elif sys.platform == "darwin":
dirs += [
"/Library/Fonts",
"/System/Library/Fonts",
os.path.expanduser("~/Library/Fonts")
]
available_fonts = collections.defaultdict(dict)
for directory in dirs:
for walkroot, walkdir, walkfilenames in os.walk(directory):
for walkfilename in walkfilenames:
ext = os.path.splitext(walkfilename)[1]
if ext.lower() not in available_font_ext:
continue
fontpath = os.path.join(walkroot, walkfilename)
font_obj = ImageFont.truetype(fontpath)
family = font_obj.font.family.lower()
style = font_obj.font.style
available_fonts[family][style] = font_obj
cls.fonts = available_fonts

View file

@ -0,0 +1,667 @@
import os
import re
from PIL import Image
from .base import BaseObj
from .font_factory import FontFactory
class BaseItem(BaseObj):
available_parents = ["main_frame", "layer"]
@property
def item_pos_x(self):
if self.parent.obj_type == "main_frame":
return self._pos_x
return self.parent.child_pos_x(self.id)
@property
def item_pos_y(self):
if self.parent.obj_type == "main_frame":
return self._pos_y
return self.parent.child_pos_y(self.id)
def add_item(self, *args, **kwargs):
raise Exception("Can't add item to an item, use layers instead.")
def draw(self, image, drawer):
raise NotImplementedError(
"Method `draw` is not implemented for <{}>".format(
self.__clas__.__name__
)
)
class ItemImage(BaseItem):
obj_type = "image"
def __init__(self, image_path, *args, **kwargs):
self.image_path = image_path
super(ItemImage, self).__init__(*args, **kwargs)
def fill_data_format(self):
if re.match(self.fill_data_regex, self.image_path):
self.image_path = self.image_path.format(**self.fill_data)
def draw(self, image, drawer):
source_image = Image.open(os.path.normpath(self.image_path))
paste_image = source_image.resize(
(self.value_width(), self.value_height()),
Image.ANTIALIAS
)
image.paste(
paste_image,
(self.value_pos_x, self.value_pos_y)
)
def value_width(self):
return int(self.style["width"])
def value_height(self):
return int(self.style["height"])
class ItemRectangle(BaseItem):
obj_type = "rectangle"
def draw(self, image, drawer):
bg_color = self.style["bg-color"]
fill = self.style.get("fill", False)
kwargs = {}
if fill:
kwargs["fill"] = bg_color
else:
kwargs["outline"] = bg_color
start_pos_x = self.value_pos_x
start_pos_y = self.value_pos_y
end_pos_x = start_pos_x + self.value_width()
end_pos_y = start_pos_y + self.value_height()
drawer.rectangle(
(
(start_pos_x, start_pos_y),
(end_pos_x, end_pos_y)
),
**kwargs
)
def value_width(self):
return int(self.style["width"])
def value_height(self):
return int(self.style["height"])
class ItemPlaceHolder(BaseItem):
obj_type = "placeholder"
def __init__(self, image_path, *args, **kwargs):
self.image_path = image_path
super(ItemPlaceHolder, self).__init__(*args, **kwargs)
def fill_data_format(self):
if re.match(self.fill_data_regex, self.image_path):
self.image_path = self.image_path.format(**self.fill_data)
def draw(self, image, drawer):
bg_color = self.style["bg-color"]
kwargs = {}
if bg_color != "tranparent":
kwargs["fill"] = bg_color
start_pos_x = self.value_pos_x
start_pos_y = self.value_pos_y
end_pos_x = start_pos_x + self.value_width()
end_pos_y = start_pos_y + self.value_height()
drawer.rectangle(
(
(start_pos_x, start_pos_y),
(end_pos_x, end_pos_y)
),
**kwargs
)
def value_width(self):
return int(self.style["width"])
def value_height(self):
return int(self.style["height"])
def collect_data(self):
return {
"pos_x": self.value_pos_x,
"pos_y": self.value_pos_y,
"width": self.value_width(),
"height": self.value_height(),
"path": self.image_path
}
class ItemText(BaseItem):
obj_type = "text"
def __init__(self, value, *args, **kwargs):
self.value = value
super(ItemText, self).__init__(*args, **kwargs)
def draw(self, image, drawer):
bg_color = self.style["bg-color"]
if bg_color and bg_color.lower() != "transparent":
# TODO border outline styles
drawer.rectangle(
(self.content_pos_start, self.content_pos_end),
fill=bg_color,
outline=None
)
font_color = self.style["font-color"]
font_family = self.style["font-family"]
font_size = self.style["font-size"]
font_bold = self.style.get("font-bold", False)
font_italic = self.style.get("font-italic", False)
font = FontFactory.get_font(
font_family, font_size, font_italic, font_bold
)
drawer.text(
self.value_pos_start,
self.value,
font=font,
fill=font_color
)
def value_width(self):
font_family = self.style["font-family"]
font_size = self.style["font-size"]
font_bold = self.style.get("font-bold", False)
font_italic = self.style.get("font-italic", False)
font = FontFactory.get_font(
font_family, font_size, font_italic, font_bold
)
width = font.getsize(self.value)[0]
return int(width)
def value_height(self):
font_family = self.style["font-family"]
font_size = self.style["font-size"]
font_bold = self.style.get("font-bold", False)
font_italic = self.style.get("font-italic", False)
font = FontFactory.get_font(
font_family, font_size, font_italic, font_bold
)
height = font.getsize(self.value)[1]
return int(height)
class ItemTable(BaseItem):
obj_type = "table"
def __init__(self, values, use_alternate_color=False, *args, **kwargs):
self.values_by_cords = None
self.prepare_values(values)
super(ItemTable, self).__init__(*args, **kwargs)
self.size_values = None
self.calculate_sizes()
self.use_alternate_color = use_alternate_color
def add_item(self, item):
if item.obj_type == "table-item":
return
super(ItemTable, self).add_item(item)
def fill_data_format(self):
for item in self.values:
item.fill_data_format()
def prepare_values(self, _values):
values = []
values_by_cords = []
row_count = 0
col_count = 0
for row in _values:
row_count += 1
if len(row) > col_count:
col_count = len(row)
for row_idx in range(row_count):
values_by_cords.append([])
for col_idx in range(col_count):
values_by_cords[row_idx].append([])
if col_idx <= len(_values[row_idx]) - 1:
col = _values[row_idx][col_idx]
else:
col = ""
col_item = TableField(row_idx, col_idx, col, parent=self)
values_by_cords[row_idx][col_idx] = col_item
values.append(col_item)
self.values = values
self.values_by_cords = values_by_cords
def calculate_sizes(self):
row_heights = []
col_widths = []
for row_idx, row in enumerate(self.values_by_cords):
row_heights.append(0)
for col_idx, col_item in enumerate(row):
if len(col_widths) < col_idx + 1:
col_widths.append(0)
_width = col_widths[col_idx]
item_width = col_item.width()
if _width < item_width:
col_widths[col_idx] = item_width
_height = row_heights[row_idx]
item_height = col_item.height()
if _height < item_height:
row_heights[row_idx] = item_height
self.size_values = (row_heights, col_widths)
def draw(self, image, drawer):
bg_color = self.style["bg-color"]
if bg_color and bg_color.lower() != "transparent":
# TODO border outline styles
drawer.rectangle(
(self.content_pos_start, self.content_pos_end),
fill=bg_color,
outline=None
)
for value in self.values:
value.draw(image, drawer)
def value_width(self):
row_heights, col_widths = self.size_values
width = 0
for _width in col_widths:
width += _width
if width != 0:
width -= 1
return width
def value_height(self):
row_heights, col_widths = self.size_values
height = 0
for _height in row_heights:
height += _height
if height != 0:
height -= 1
return height
def content_pos_info_by_cord(self, row_idx, col_idx):
row_heights, col_widths = self.size_values
pos_x = int(self.value_pos_x)
pos_y = int(self.value_pos_y)
width = 0
height = 0
for idx, value in enumerate(col_widths):
if col_idx == idx:
width = value
break
pos_x += value
for idx, value in enumerate(row_heights):
if row_idx == idx:
height = value
break
pos_y += value
return (pos_x, pos_y, width, height)
class TableField(BaseItem):
obj_type = "table-item"
available_parents = ["table"]
ellide_text = "..."
def __init__(self, row_idx, col_idx, value, *args, **kwargs):
super(TableField, self).__init__(*args, **kwargs)
self.row_idx = row_idx
self.col_idx = col_idx
self.value = value
def recalculate_by_width(self, value, max_width):
padding = self.style["padding"]
padding_left = self.style.get("padding-left")
if padding_left is None:
padding_left = padding
padding_right = self.style.get("padding-right")
if padding_right is None:
padding_right = padding
max_width -= (padding_left + padding_right)
if not value:
return ""
word_wrap = self.style.get("word-wrap")
ellide = self.style.get("ellide")
max_lines = self.style.get("max-lines")
font_family = self.style["font-family"]
font_size = self.style["font-size"]
font_bold = self.style.get("font-bold", False)
font_italic = self.style.get("font-italic", False)
font = FontFactory.get_font(
font_family, font_size, font_italic, font_bold
)
val_width = font.getsize(value)[0]
if val_width <= max_width:
return value
if not ellide and not word_wrap:
# TODO logging
self.log.warning((
"Can't draw text because is too long with"
" `word-wrap` and `ellide` turned off <{}>"
).format(value))
return ""
elif ellide and not word_wrap:
max_lines = 1
words = [word for word in value.split()]
words_len = len(words)
lines = []
last_index = None
while True:
start_index = 0
if last_index is not None:
start_index = int(last_index) + 1
line = ""
for idx in range(start_index, words_len):
_word = words[idx]
connector = " "
if line == "":
connector = ""
_line = connector.join([line, _word])
_line_width = font.getsize(_line)[0]
if _line_width > max_width:
break
line = _line
last_index = idx
if line:
lines.append(line)
if last_index == words_len - 1:
break
elif last_index is None:
add_message = ""
if ellide:
add_message = " String was shortened to `{}`."
line = ""
for idx, char in enumerate(words[idx]):
_line = line + char + self.ellide_text
_line_width = font.getsize(_line)[0]
if _line_width > max_width:
if idx == 0:
line = _line
break
line = line + char
lines.append(line)
# TODO logging
self.log.warning((
"Font size is too big.{} <{}>"
).format(add_message, value))
break
output = ""
if not lines:
return output
over_max_lines = (max_lines and len(lines) > max_lines)
if not over_max_lines:
return "\n".join([line for line in lines])
lines = [lines[idx] for idx in range(max_lines)]
if not ellide:
return "\n".join(lines)
last_line = lines[-1]
last_line_width = font.getsize(last_line + self.ellide_text)[0]
if last_line_width <= max_width:
lines[-1] += self.ellide_text
return "\n".join([line for line in lines])
last_line_words = last_line.split()
if len(last_line_words) == 1:
if max_lines > 1:
# TODO try previous line?
lines[-1] = self.ellide_text
return "\n".join([line for line in lines])
line = ""
for idx, word in enumerate(last_line_words):
_line = line + word + self.ellide_text
_line_width = font.getsize(_line)[0]
if _line_width > max_width:
if idx == 0:
line = _line
break
line = _line
lines[-1] = line
return "\n".join([line for line in lines])
line = ""
for idx, _word in enumerate(last_line_words):
connector = " "
if line == "":
connector = ""
_line = connector.join([line, _word + self.ellide_text])
_line_width = font.getsize(_line)[0]
if _line_width <= max_width:
line = connector.join([line, _word])
continue
if idx != 0:
line += self.ellide_text
break
if max_lines != 1:
# TODO try previous line?
line = self.ellide_text
break
for idx, char in enumerate(_word):
_line = line + char + self.ellide_text
_line_width = font.getsize(_line)[0]
if _line_width > max_width:
if idx == 0:
line = _line
break
line = line + char
break
lines[-1] = line
return "\n".join([line for line in lines])
def fill_data_format(self):
value = self.value
if re.match(self.fill_data_regex, value):
value = value.format(**self.fill_data)
self.orig_value = value
max_width = self.style.get("max-width")
max_width = self.style.get("width") or max_width
if max_width:
value = self.recalculate_by_width(value, max_width)
self.value = value
def content_width(self):
width = self.style.get("width")
if width:
return int(width)
return super(TableField, self).content_width()
def content_height(self):
return super(TableField, self).content_height()
def value_width(self):
if not self.value:
return 0
font_family = self.style["font-family"]
font_size = self.style["font-size"]
font_bold = self.style.get("font-bold", False)
font_italic = self.style.get("font-italic", False)
font = FontFactory.get_font(
font_family, font_size, font_italic, font_bold
)
width = font.getsize_multiline(self.value)[0] + 1
min_width = self.style.get("min-height")
if min_width and min_width > width:
width = min_width
return int(width)
def value_height(self):
if not self.value:
return 0
height = self.style.get("height")
if height:
return int(height)
font_family = self.style["font-family"]
font_size = self.style["font-size"]
font_bold = self.style.get("font-bold", False)
font_italic = self.style.get("font-italic", False)
font = FontFactory.get_font(
font_family, font_size, font_italic, font_bold
)
height = font.getsize_multiline(self.value)[1] + 1
min_height = self.style.get("min-height")
if min_height and min_height > height:
height = min_height
return int(height)
@property
def item_pos_x(self):
pos_x, pos_y, width, height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
return pos_x
@property
def item_pos_y(self):
pos_x, pos_y, width, height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
return pos_y
@property
def value_pos_x(self):
pos_x, pos_y, width, height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
alignment_hor = self.style["alignment-horizontal"].lower()
if alignment_hor in ["center", "centre"]:
pos_x += (width - self.value_width()) / 2
elif alignment_hor == "right":
pos_x += width - self.value_width()
else:
padding = self.style["padding"]
padding_left = self.style.get("padding-left")
if padding_left is None:
padding_left = padding
pos_x += padding_left
return int(pos_x)
@property
def value_pos_y(self):
pos_x, pos_y, width, height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
alignment_ver = self.style["alignment-vertical"].lower()
if alignment_ver in ["center", "centre"]:
pos_y += (height - self.value_height()) / 2
elif alignment_ver == "bottom":
pos_y += height - self.value_height()
else:
padding = self.style["padding"]
padding_top = self.style.get("padding-top")
if padding_top is None:
padding_top = padding
pos_y += padding_top
return int(pos_y)
def draw(self, image, drawer):
pos_x, pos_y, width, height = (
self.parent.content_pos_info_by_cord(self.row_idx, self.col_idx)
)
pos_start = (pos_x, pos_y)
pos_end = (pos_x + width, pos_y + height)
bg_color = self.style["bg-color"]
if self.parent.use_alternate_color and (self.row_idx % 2) == 1:
bg_color = self.style["bg-alter-color"]
if bg_color and bg_color.lower() != "transparent":
# TODO border outline styles
drawer.rectangle(
(pos_start, pos_end),
fill=bg_color,
outline=None
)
font_color = self.style["font-color"]
font_family = self.style["font-family"]
font_size = self.style["font-size"]
font_bold = self.style.get("font-bold", False)
font_italic = self.style.get("font-italic", False)
font = FontFactory.get_font(
font_family, font_size, font_italic, font_bold
)
alignment_hor = self.style["alignment-horizontal"].lower()
if alignment_hor == "centre":
alignment_hor = "center"
drawer.multiline_text(
self.value_pos_start,
self.value,
font=font,
fill=font_color,
align=alignment_hor
)

View file

@ -0,0 +1,139 @@
from .base import BaseObj
class Layer(BaseObj):
obj_type = "layer"
available_parents = ["main_frame", "layer"]
# Direction can be 0=vertical/ 1=horizontal
def __init__(self, direction=0, *args, **kwargs):
super(Layer, self).__init__(*args, **kwargs)
self._direction = direction
@property
def item_pos_x(self):
if self.parent.obj_type == self.obj_type:
pos_x = self.parent.child_pos_x(self.id)
elif self.parent.obj_type == "main_frame":
pos_x = self._pos_x
else:
pos_x = self.parent.value_pos_x
return int(pos_x)
@property
def item_pos_y(self):
if self.parent.obj_type == self.obj_type:
pos_y = self.parent.child_pos_y(self.id)
elif self.parent.obj_type == "main_frame":
pos_y = self._pos_y
else:
pos_y = self.parent.value_pos_y
return int(pos_y)
@property
def direction(self):
if self._direction not in (0, 1):
self.log.warning((
"Direction of Layer must be 0 or 1 "
"(0 is horizontal / 1 is vertical)! Setting to 0."
))
return 0
return self._direction
def child_pos_x(self, item_id):
pos_x = self.value_pos_x
alignment_hor = self.style["alignment-horizontal"].lower()
item = None
for id, _item in self.items.items():
if item_id == id:
item = _item
break
if self.direction == 1:
for id, _item in self.items.items():
if item_id == id:
break
pos_x += _item.width()
if _item.obj_type not in ["image", "placeholder"]:
pos_x += 1
else:
if alignment_hor in ["center", "centre"]:
pos_x += (self.content_width() - item.content_width()) / 2
elif alignment_hor == "right":
pos_x += self.content_width() - item.content_width()
else:
margin = self.style["margin"]
margin_left = self.style.get("margin-left") or margin
pos_x += margin_left
return int(pos_x)
def child_pos_y(self, item_id):
pos_y = self.value_pos_y
alignment_ver = self.style["alignment-horizontal"].lower()
item = None
for id, _item in self.items.items():
if item_id == id:
item = _item
break
if self.direction != 1:
for id, item in self.items.items():
if item_id == id:
break
pos_y += item.height()
if item.obj_type not in ["image", "placeholder"]:
pos_y += 1
else:
if alignment_ver in ["center", "centre"]:
pos_y += (self.content_height() - item.content_height()) / 2
elif alignment_ver == "bottom":
pos_y += self.content_height() - item.content_height()
return int(pos_y)
def value_height(self):
height = 0
for item in self.items.values():
if self.direction == 1:
if height > item.height():
continue
# times 1 because won't get object pointer but number
height = item.height()
else:
height += item.height()
# TODO this is not right
min_height = self.style.get("min-height")
if min_height and min_height > height:
return min_height
return height
def value_width(self):
width = 0
for item in self.items.values():
if self.direction == 0:
if width > item.width():
continue
# times 1 because won't get object pointer but number
width = item.width()
else:
width += item.width()
min_width = self.style.get("min-width")
if min_width and min_width > width:
return min_width
return width
def draw(self, image, drawer):
for item in self.items.values():
item.draw(image, drawer)

View file

@ -0,0 +1,152 @@
import os
import json
import logging
try:
from queue import Queue
except Exception:
from Queue import Queue
from .main_frame import MainFrame
from .layer import Layer
from .items import (
ItemTable, ItemImage, ItemRectangle, ItemPlaceHolder
)
try:
from pypeapp.config import get_presets
except Exception:
get_presets = dict
log = logging.getLogger(__name__)
RequiredSlateKeys = ["width", "height", "destination_path"]
# TODO proper documentation
def create_slates(
fill_data, slate_name=None, slate_data=None, data_output_json=None
):
"""Implmentation for command line executing.
Data for slates are by defaule taken from presets. That requires to enter,
`slate_name`. If `slate_data` are entered then they are used.
`data_output` should be path to json file where data will be collected.
"""
if slate_data is None and slate_name is None:
raise TypeError(
"`create_slates` expects to enter data for slates or name"
" of slate preset."
)
elif slate_data is None:
slate_presets = (
get_presets()
.get("tools", {})
.get("slates")
) or {}
slate_data = slate_presets.get(slate_name)
if slate_data is None:
raise ValueError(
"Preset name \"{}\" was not found in slate presets.".format(
slate_name
)
)
missing_keys = []
for key in RequiredSlateKeys:
if key not in slate_data:
missing_keys.append("`{}`".format(key))
if missing_keys:
log.error("Slate data of <{}> miss required keys: {}".format(
slate_name, ", ".join(missing_keys)
))
return False
width = slate_data["width"]
height = slate_data["height"]
dst_path = slate_data["destination_path"]
style = slate_data.get("style") or {}
main = MainFrame(width, height, dst_path, fill_data, style=style)
load_queue = Queue()
for item in slate_data["items"]:
load_queue.put((item, main))
while not load_queue.empty():
item_data, parent = load_queue.get()
item_type = item_data["type"].lower()
item_style = item_data.get("style", {})
item_name = item_data.get("name")
pos_x = item_data.get("pos_x")
pos_y = item_data.get("pos_y")
if parent.obj_type != "main_frame":
if pos_x or pos_y:
# TODO logging
log.warning((
"You have specified `pos_x` and `pos_y` but won't be used."
" Possible only if parent of an item is `main_frame`."
))
pos_x = None
pos_y = None
kwargs = {
"parent": parent,
"style": item_style,
"name": item_name,
"pos_x": pos_x,
"pos_y": pos_y
}
if item_type == "layer":
direction = item_data.get("direction", 0)
item_obj = Layer(direction, **kwargs)
for item in item_data.get("items", []):
load_queue.put((item, item_obj))
elif item_type == "table":
use_alternate_color = item_data.get("use_alternate_color", False)
values = item_data.get("values") or []
ItemTable(values, use_alternate_color, **kwargs)
elif item_type == "image":
path = item_data["path"]
ItemImage(path, **kwargs)
elif item_type == "rectangle":
ItemRectangle(**kwargs)
elif item_type == "placeholder":
path = item_data["path"]
ItemPlaceHolder(path, **kwargs)
else:
# TODO logging
log.warning(
"Not implemented object type `{}` - skipping".format(item_type)
)
main.draw()
log.debug("Slate creation finished")
if not data_output_json:
return
if not data_output_json.endswith(".json"):
raise ValueError("Output path must be .json file.")
data_output_json_dir = os.path.dirname(data_output_json)
if not os.path.exists(data_output_json_dir):
log.info("Creating folder \"{}\"".format(data_output_json_dir))
os.makedirs(data_output_json_dir)
output_data = main.collect_data()
with open(data_output_json, "w") as json_file:
json_file.write(json.dumps(output_data, indent=4))
log.info("Metadata collected in \"{}\".".format(data_output_json))

View file

@ -0,0 +1,77 @@
import os
import re
from PIL import Image, ImageDraw
from .base import BaseObj
class MainFrame(BaseObj):
obj_type = "main_frame"
available_parents = [None]
def __init__(
self, width, height, destination_path, fill_data={}, *args, **kwargs
):
kwargs["parent"] = None
super(MainFrame, self).__init__(*args, **kwargs)
self._width = width
self._height = height
self.dst_path = destination_path
self._fill_data = fill_data
self.fill_data_format()
def fill_data_format(self):
if re.match(self.fill_data_regex, self.dst_path):
self.dst_path = self.dst_path.format(**self.fill_data)
@property
def fill_data(self):
return self._fill_data
def value_width(self):
width = 0
for item in self.items.values():
width += item.width()
return width
def value_height(self):
height = 0
for item in self.items.values():
height += item.height()
return height
def width(self):
return self._width
def height(self):
return self._height
def draw(self, path=None):
dir_path = os.path.dirname(self.dst_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
bg_color = self.style["bg-color"]
image = Image.new("RGB", (self.width(), self.height()), color=bg_color)
drawer = ImageDraw.Draw(image)
for item in self.items.values():
item.draw(image, drawer)
image.save(self.dst_path)
self.reset()
def collect_data(self):
output = {}
output["width"] = self.width()
output["height"] = self.height()
output["slate_path"] = self.dst_path
placeholders = self.find_item(obj_type="placeholder")
placeholders_data = []
for placeholder in placeholders:
placeholders_data.append(placeholder.collect_data())
output["placeholders"] = placeholders_data
return output

45
pype/unreal/__init__.py Normal file
View file

@ -0,0 +1,45 @@
import os
import logging
from avalon import api as avalon
from pyblish import api as pyblish
logger = logging.getLogger("pype.unreal")
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "unreal", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "unreal", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "unreal", "create")
def install():
"""Install Unreal configuration for Avalon."""
print("-=" * 40)
logo = '''.
.
____________
/ \\ __ \\
\\ \\ \\/_\\ \\
\\ \\ _____/ ______
\\ \\ \\___// \\ \\
\\ \\____\\ \\ \\_____\\
\\/_____/ \\/______/ PYPE Club .
.
'''
print(logo)
print("installing Pype for Unreal ...")
print("-=" * 40)
logger.info("installing Pype for Unreal")
pyblish.register_plugin_path(str(PUBLISH_PATH))
avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
def uninstall():
"""Uninstall Unreal configuration for Avalon."""
pyblish.deregister_plugin_path(str(PUBLISH_PATH))
avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))

425
pype/unreal/lib.py Normal file
View file

@ -0,0 +1,425 @@
import sys
import os
import platform
import json
from distutils import dir_util
import subprocess
from pypeapp import config
def get_engine_versions():
"""
This will try to detect location and versions of installed Unreal Engine.
Location can be overridden by `UNREAL_ENGINE_LOCATION` environment
variable.
Returns:
dict: dictionary with version as a key and dir as value.
Example:
>>> get_engine_version()
{
"4.23": "C:/Epic Games/UE_4.23",
"4.24": "C:/Epic Games/UE_4.24"
}
"""
try:
engine_locations = {}
root, dirs, files = next(os.walk(os.environ["UNREAL_ENGINE_LOCATION"]))
for dir in dirs:
if dir.startswith("UE_"):
ver = dir.split("_")[1]
engine_locations[ver] = os.path.join(root, dir)
except KeyError:
# environment variable not set
pass
except OSError:
# specified directory doesn't exists
pass
# if we've got something, terminate autodetection process
if engine_locations:
return engine_locations
# else kick in platform specific detection
if platform.system().lower() == "windows":
return _win_get_engine_versions()
elif platform.system().lower() == "linux":
# on linux, there is no installation and getting Unreal Engine involves
# git clone. So we'll probably depend on `UNREAL_ENGINE_LOCATION`.
pass
elif platform.system().lower() == "darwin":
return _darwin_get_engine_version()
return {}
def _win_get_engine_versions():
"""
If engines are installed via Epic Games Launcher then there is:
`%PROGRAMDATA%/Epic/UnrealEngineLauncher/LauncherInstalled.dat`
This file is JSON file listing installed stuff, Unreal engines
are marked with `"AppName" = "UE_X.XX"`` like `UE_4.24`
"""
install_json_path = os.path.join(
os.environ.get("PROGRAMDATA"),
"Epic",
"UnrealEngineLauncher",
"LauncherInstalled.dat",
)
return _parse_launcher_locations(install_json_path)
def _darwin_get_engine_version() -> dict:
"""
It works the same as on Windows, just JSON file location is different.
"""
install_json_path = os.path.join(
os.environ.get("HOME"),
"Library",
"Application Support",
"Epic",
"UnrealEngineLauncher",
"LauncherInstalled.dat",
)
return _parse_launcher_locations(install_json_path)
def _parse_launcher_locations(install_json_path: str) -> dict:
"""
This will parse locations from json file.
:param install_json_path: path to `LauncherInstalled.dat`
:type install_json_path: str
:returns: returns dict with unreal engine versions as keys and
paths to those engine installations as value.
:rtype: dict
"""
engine_locations = {}
if os.path.isfile(install_json_path):
with open(install_json_path, "r") as ilf:
try:
install_data = json.load(ilf)
except json.JSONDecodeError:
raise Exception(
"Invalid `LauncherInstalled.dat file. `"
"Cannot determine Unreal Engine location."
)
for installation in install_data.get("InstallationList", []):
if installation.get("AppName").startswith("UE_"):
ver = installation.get("AppName").split("_")[1]
engine_locations[ver] = installation.get("InstallLocation")
return engine_locations
def create_unreal_project(project_name: str,
ue_version: str,
pr_dir: str,
engine_path: str,
dev_mode: bool = False) -> None:
"""
This will create `.uproject` file at specified location. As there is no
way I know to create project via command line, this is easiest option.
Unreal project file is basically JSON file. If we find
`AVALON_UNREAL_PLUGIN` environment variable we assume this is location
of Avalon Integration Plugin and we copy its content to project folder
and enable this plugin.
:param project_name: project name
:type project_name: str
:param ue_version: unreal engine version (like 4.23)
:type ue_version: str
:param pr_dir: path to directory where project will be created
:type pr_dir: str
:param engine_path: Path to Unreal Engine installation
:type engine_path: str
:param dev_mode: Flag to trigger C++ style Unreal project needing
Visual Studio and other tools to compile plugins from
sources. This will trigger automatically if `Binaries`
directory is not found in plugin folders as this indicates
this is only source distribution of the plugin. Dev mode
is also set by preset file `unreal/project_setup.json` in
**PYPE_CONFIG**.
:type dev_mode: bool
:returns: None
"""
preset = config.get_presets()["unreal"]["project_setup"]
if os.path.isdir(os.environ.get("AVALON_UNREAL_PLUGIN", "")):
# copy plugin to correct path under project
plugins_path = os.path.join(pr_dir, "Plugins")
avalon_plugin_path = os.path.join(plugins_path, "Avalon")
if not os.path.isdir(avalon_plugin_path):
os.makedirs(avalon_plugin_path, exist_ok=True)
dir_util._path_created = {}
dir_util.copy_tree(os.environ.get("AVALON_UNREAL_PLUGIN"),
avalon_plugin_path)
if (not os.path.isdir(os.path.join(avalon_plugin_path, "Binaries"))
or not os.path.join(avalon_plugin_path, "Intermediate")):
dev_mode = True
# data for project file
data = {
"FileVersion": 3,
"EngineAssociation": ue_version,
"Category": "",
"Description": "",
"Plugins": [
{"Name": "PythonScriptPlugin", "Enabled": True},
{"Name": "EditorScriptingUtilities", "Enabled": True},
{"Name": "Avalon", "Enabled": True}
]
}
if preset["install_unreal_python_engine"]:
# If `PYPE_UNREAL_ENGINE_PYTHON_PLUGIN` is set, copy it from there to
# support offline installation.
# Otherwise clone UnrealEnginePython to Plugins directory
# https://github.com/20tab/UnrealEnginePython.git
uep_path = os.path.join(plugins_path, "UnrealEnginePython")
if os.environ.get("PYPE_UNREAL_ENGINE_PYTHON_PLUGIN"):
os.makedirs(uep_path, exist_ok=True)
dir_util._path_created = {}
dir_util.copy_tree(
os.environ.get("PYPE_UNREAL_ENGINE_PYTHON_PLUGIN"),
uep_path)
else:
# WARNING: this will trigger dev_mode, because we need to compile
# this plugin.
dev_mode = True
import git
git.Repo.clone_from(
"https://github.com/20tab/UnrealEnginePython.git",
uep_path)
data["Plugins"].append(
{"Name": "UnrealEnginePython", "Enabled": True})
if (not os.path.isdir(os.path.join(uep_path, "Binaries"))
or not os.path.join(uep_path, "Intermediate")):
dev_mode = True
if dev_mode or preset["dev_mode"]:
# this will add project module and necessary source file to make it
# C++ project and to (hopefully) make Unreal Editor to compile all
# sources at start
data["Modules"] = [{
"Name": project_name,
"Type": "Runtime",
"LoadingPhase": "Default",
"AdditionalDependencies": ["Engine"],
}]
if preset["install_unreal_python_engine"]:
# now we need to fix python path in:
# `UnrealEnginePython.Build.cs`
# to point to our python
with open(os.path.join(
uep_path, "Source",
"UnrealEnginePython",
"UnrealEnginePython.Build.cs"), mode="r") as f:
build_file = f.read()
fix = build_file.replace(
'private string pythonHome = "";',
'private string pythonHome = "{}";'.format(
sys.base_prefix.replace("\\", "/")))
with open(os.path.join(
uep_path, "Source",
"UnrealEnginePython",
"UnrealEnginePython.Build.cs"), mode="w") as f:
f.write(fix)
# write project file
project_file = os.path.join(pr_dir, "{}.uproject".format(project_name))
with open(project_file, mode="w") as pf:
json.dump(data, pf, indent=4)
# ensure we have PySide installed in engine
# TODO: make it work for other platforms 🍎 🐧
if platform.system().lower() == "windows":
python_path = os.path.join(engine_path, "Engine", "Binaries",
"ThirdParty", "Python", "Win64",
"python.exe")
subprocess.run([python_path, "-m",
"pip", "install", "pyside"])
if dev_mode or preset["dev_mode"]:
_prepare_cpp_project(project_file, engine_path)
def _prepare_cpp_project(project_file: str, engine_path: str) -> None:
"""
This function will add source files needed for project to be
rebuild along with the avalon integration plugin.
There seems not to be automated way to do it from command line.
But there might be way to create at least those target and build files
by some generator. This needs more research as manually writing
those files is rather hackish. :skull_and_crossbones:
:param project_file: path to .uproject file
:type project_file: str
:param engine_path: path to unreal engine associated with project
:type engine_path: str
"""
project_name = os.path.splitext(os.path.basename(project_file))[0]
project_dir = os.path.dirname(project_file)
targets_dir = os.path.join(project_dir, "Source")
sources_dir = os.path.join(targets_dir, project_name)
os.makedirs(sources_dir, exist_ok=True)
os.makedirs(os.path.join(project_dir, "Content"), exist_ok=True)
module_target = '''
using UnrealBuildTool;
using System.Collections.Generic;
public class {0}Target : TargetRules
{{
public {0}Target( TargetInfo Target) : base(Target)
{{
Type = TargetType.Game;
ExtraModuleNames.AddRange( new string[] {{ "{0}" }} );
}}
}}
'''.format(project_name)
editor_module_target = '''
using UnrealBuildTool;
using System.Collections.Generic;
public class {0}EditorTarget : TargetRules
{{
public {0}EditorTarget( TargetInfo Target) : base(Target)
{{
Type = TargetType.Editor;
ExtraModuleNames.AddRange( new string[] {{ "{0}" }} );
}}
}}
'''.format(project_name)
module_build = '''
using UnrealBuildTool;
public class {0} : ModuleRules
{{
public {0}(ReadOnlyTargetRules Target) : base(Target)
{{
PCHUsage = PCHUsageMode.UseExplicitOrSharedPCHs;
PublicDependencyModuleNames.AddRange(new string[] {{ "Core",
"CoreUObject", "Engine", "InputCore" }});
PrivateDependencyModuleNames.AddRange(new string[] {{ }});
}}
}}
'''.format(project_name)
module_cpp = '''
#include "{0}.h"
#include "Modules/ModuleManager.h"
IMPLEMENT_PRIMARY_GAME_MODULE( FDefaultGameModuleImpl, {0}, "{0}" );
'''.format(project_name)
module_header = '''
#pragma once
#include "CoreMinimal.h"
'''
game_mode_cpp = '''
#include "{0}GameModeBase.h"
'''.format(project_name)
game_mode_h = '''
#pragma once
#include "CoreMinimal.h"
#include "GameFramework/GameModeBase.h"
#include "{0}GameModeBase.generated.h"
UCLASS()
class {1}_API A{0}GameModeBase : public AGameModeBase
{{
GENERATED_BODY()
}};
'''.format(project_name, project_name.upper())
with open(os.path.join(
targets_dir, f"{project_name}.Target.cs"), mode="w") as f:
f.write(module_target)
with open(os.path.join(
targets_dir, f"{project_name}Editor.Target.cs"), mode="w") as f:
f.write(editor_module_target)
with open(os.path.join(
sources_dir, f"{project_name}.Build.cs"), mode="w") as f:
f.write(module_build)
with open(os.path.join(
sources_dir, f"{project_name}.cpp"), mode="w") as f:
f.write(module_cpp)
with open(os.path.join(
sources_dir, f"{project_name}.h"), mode="w") as f:
f.write(module_header)
with open(os.path.join(
sources_dir, f"{project_name}GameModeBase.cpp"), mode="w") as f:
f.write(game_mode_cpp)
with open(os.path.join(
sources_dir, f"{project_name}GameModeBase.h"), mode="w") as f:
f.write(game_mode_h)
if platform.system().lower() == "windows":
u_build_tool = (f"{engine_path}/Engine/Binaries/DotNET/"
"UnrealBuildTool.exe")
u_header_tool = (f"{engine_path}/Engine/Binaries/Win64/"
f"UnrealHeaderTool.exe")
elif platform.system().lower() == "linux":
# WARNING: there is no UnrealBuildTool on linux?
u_build_tool = ""
u_header_tool = ""
elif platform.system().lower() == "darwin":
# WARNING: there is no UnrealBuildTool on Mac?
u_build_tool = ""
u_header_tool = ""
u_build_tool = u_build_tool.replace("\\", "/")
u_header_tool = u_header_tool.replace("\\", "/")
command1 = [u_build_tool, "-projectfiles", f"-project={project_file}",
"-progress"]
subprocess.run(command1)
command2 = [u_build_tool, f"-ModuleWithSuffix={project_name},3555"
"Win64", "Development", "-TargetType=Editor"
f'-Project="{project_file}"', f'"{project_file}"'
"-IgnoreJunk"]
subprocess.run(command2)
"""
uhtmanifest = os.path.join(os.path.dirname(project_file),
f"{project_name}.uhtmanifest")
command3 = [u_header_tool, f'"{project_file}"', f'"{uhtmanifest}"',
"-Unattended", "-WarningsAsErrors", "-installed"]
subprocess.run(command3)
"""

11
pype/unreal/plugin.py Normal file
View file

@ -0,0 +1,11 @@
from avalon import api
class Creator(api.Creator):
"""This serves as skeleton for future Pype specific functionality"""
pass
class Loader(api.Loader):
"""This serves as skeleton for future Pype specific functionality"""
pass

View file

@ -1 +1 @@
__version__ = "2.7.0"
__version__ = "2.8.0"

BIN
res/app_icons/ue4.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

View file

@ -4,8 +4,8 @@ import KnobScripter
from pype.nuke.lib import (
writes_version_sync,
onScriptLoad,
checkInventoryVersions
on_script_load,
check_inventory_versions
)
import nuke
@ -15,9 +15,9 @@ log = Logger().get_logger(__name__, "nuke")
# nuke.addOnScriptSave(writes_version_sync)
nuke.addOnScriptSave(onScriptLoad)
nuke.addOnScriptLoad(checkInventoryVersions)
nuke.addOnScriptSave(checkInventoryVersions)
nuke.addOnScriptSave(on_script_load)
nuke.addOnScriptLoad(check_inventory_versions)
nuke.addOnScriptSave(check_inventory_versions)
# nuke.addOnScriptSave(writes_version_sync)
log.info('Automatic syncing of write file knob to script version')