Merge branch 'develop' into feature/OP-3926_gpu-cache

# Conflicts:
#	website/docs/admin_hosts_maya.md
This commit is contained in:
Toke Stuart Jepsen 2023-03-21 07:58:26 +00:00
commit 216c99d839
77 changed files with 2907 additions and 755 deletions

View file

@ -14,6 +14,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
# Execute after workfile template copy
order = 10
app_groups = [
"3dsmax",
"maya",
"nuke",
"nukex",

View file

@ -31,10 +31,13 @@ from .lib import (
lsattrs,
read,
maintained_selection,
maintained_time,
get_selection,
# unique_name,
)
from .capture import capture
__all__ = [
"install",
@ -56,9 +59,11 @@ __all__ = [
# Utility functions
"maintained_selection",
"maintained_time",
"lsattr",
"lsattrs",
"read",
"get_selection",
"capture",
# "unique_name",
]

View file

@ -0,0 +1,278 @@
"""Blender Capture
Playblasting with independent viewport, camera and display options
"""
import contextlib
import bpy
from .lib import maintained_time
from .plugin import deselect_all, create_blender_context
def capture(
camera=None,
width=None,
height=None,
filename=None,
start_frame=None,
end_frame=None,
step_frame=None,
sound=None,
isolate=None,
maintain_aspect_ratio=True,
overwrite=False,
image_settings=None,
display_options=None
):
"""Playblast in an independent windows
Arguments:
camera (str, optional): Name of camera, defaults to "Camera"
width (int, optional): Width of output in pixels
height (int, optional): Height of output in pixels
filename (str, optional): Name of output file path. Defaults to current
render output path.
start_frame (int, optional): Defaults to current start frame.
end_frame (int, optional): Defaults to current end frame.
step_frame (int, optional): Defaults to 1.
sound (str, optional): Specify the sound node to be used during
playblast. When None (default) no sound will be used.
isolate (list): List of nodes to isolate upon capturing
maintain_aspect_ratio (bool, optional): Modify height in order to
maintain aspect ratio.
overwrite (bool, optional): Whether or not to overwrite if file
already exists. If disabled and file exists and error will be
raised.
image_settings (dict, optional): Supplied image settings for render,
using `ImageSettings`
display_options (dict, optional): Supplied display options for render
"""
scene = bpy.context.scene
camera = camera or "Camera"
# Ensure camera exists.
if camera not in scene.objects and camera != "AUTO":
raise RuntimeError("Camera does not exist: {0}".format(camera))
# Ensure resolution.
if width and height:
maintain_aspect_ratio = False
width = width or scene.render.resolution_x
height = height or scene.render.resolution_y
if maintain_aspect_ratio:
ratio = scene.render.resolution_x / scene.render.resolution_y
height = round(width / ratio)
# Get frame range.
if start_frame is None:
start_frame = scene.frame_start
if end_frame is None:
end_frame = scene.frame_end
if step_frame is None:
step_frame = 1
frame_range = (start_frame, end_frame, step_frame)
if filename is None:
filename = scene.render.filepath
render_options = {
"filepath": "{}.".format(filename.rstrip(".")),
"resolution_x": width,
"resolution_y": height,
"use_overwrite": overwrite,
}
with _independent_window() as window:
applied_view(window, camera, isolate, options=display_options)
with contextlib.ExitStack() as stack:
stack.enter_context(maintain_camera(window, camera))
stack.enter_context(applied_frame_range(window, *frame_range))
stack.enter_context(applied_render_options(window, render_options))
stack.enter_context(applied_image_settings(window, image_settings))
stack.enter_context(maintained_time())
bpy.ops.render.opengl(
animation=True,
render_keyed_only=False,
sequencer=False,
write_still=False,
view_context=True
)
return filename
ImageSettings = {
"file_format": "FFMPEG",
"color_mode": "RGB",
"ffmpeg": {
"format": "QUICKTIME",
"use_autosplit": False,
"codec": "H264",
"constant_rate_factor": "MEDIUM",
"gopsize": 18,
"use_max_b_frames": False,
},
}
def isolate_objects(window, objects):
"""Isolate selection"""
deselect_all()
for obj in objects:
obj.select_set(True)
context = create_blender_context(selected=objects, window=window)
bpy.ops.view3d.view_axis(context, type="FRONT")
bpy.ops.view3d.localview(context)
deselect_all()
def _apply_options(entity, options):
for option, value in options.items():
if isinstance(value, dict):
_apply_options(getattr(entity, option), value)
else:
setattr(entity, option, value)
def applied_view(window, camera, isolate=None, options=None):
"""Apply view options to window."""
area = window.screen.areas[0]
space = area.spaces[0]
area.ui_type = "VIEW_3D"
meshes = [obj for obj in window.scene.objects if obj.type == "MESH"]
if camera == "AUTO":
space.region_3d.view_perspective = "ORTHO"
isolate_objects(window, isolate or meshes)
else:
isolate_objects(window, isolate or meshes)
space.camera = window.scene.objects.get(camera)
space.region_3d.view_perspective = "CAMERA"
if isinstance(options, dict):
_apply_options(space, options)
else:
space.shading.type = "SOLID"
space.shading.color_type = "MATERIAL"
space.show_gizmo = False
space.overlay.show_overlays = False
@contextlib.contextmanager
def applied_frame_range(window, start, end, step):
"""Context manager for setting frame range."""
# Store current frame range
current_frame_start = window.scene.frame_start
current_frame_end = window.scene.frame_end
current_frame_step = window.scene.frame_step
# Apply frame range
window.scene.frame_start = start
window.scene.frame_end = end
window.scene.frame_step = step
try:
yield
finally:
# Restore frame range
window.scene.frame_start = current_frame_start
window.scene.frame_end = current_frame_end
window.scene.frame_step = current_frame_step
@contextlib.contextmanager
def applied_render_options(window, options):
"""Context manager for setting render options."""
render = window.scene.render
# Store current settings
original = {}
for opt in options.copy():
try:
original[opt] = getattr(render, opt)
except ValueError:
options.pop(opt)
# Apply settings
_apply_options(render, options)
try:
yield
finally:
# Restore previous settings
_apply_options(render, original)
@contextlib.contextmanager
def applied_image_settings(window, options):
"""Context manager to override image settings."""
options = options or ImageSettings.copy()
ffmpeg = options.pop("ffmpeg", {})
render = window.scene.render
# Store current image settings
original = {}
for opt in options.copy():
try:
original[opt] = getattr(render.image_settings, opt)
except ValueError:
options.pop(opt)
# Store current ffmpeg settings
original_ffmpeg = {}
for opt in ffmpeg.copy():
try:
original_ffmpeg[opt] = getattr(render.ffmpeg, opt)
except ValueError:
ffmpeg.pop(opt)
# Apply image settings
for opt, value in options.items():
setattr(render.image_settings, opt, value)
# Apply ffmpeg settings
for opt, value in ffmpeg.items():
setattr(render.ffmpeg, opt, value)
try:
yield
finally:
# Restore previous settings
for opt, value in original.items():
setattr(render.image_settings, opt, value)
for opt, value in original_ffmpeg.items():
setattr(render.ffmpeg, opt, value)
@contextlib.contextmanager
def maintain_camera(window, camera):
"""Context manager to override camera."""
current_camera = window.scene.camera
if camera in window.scene.objects:
window.scene.camera = window.scene.objects.get(camera)
try:
yield
finally:
window.scene.camera = current_camera
@contextlib.contextmanager
def _independent_window():
"""Create capture-window context."""
context = create_blender_context()
current_windows = set(bpy.context.window_manager.windows)
bpy.ops.wm.window_new(context)
window = list(set(bpy.context.window_manager.windows) - current_windows)[0]
context["window"] = window
try:
yield window
finally:
bpy.ops.wm.window_close(context)

View file

@ -284,3 +284,13 @@ def maintained_selection():
# This could happen if the active node was deleted during the
# context.
log.exception("Failed to set active object.")
@contextlib.contextmanager
def maintained_time():
"""Maintain current frame during context."""
current_time = bpy.context.scene.frame_current
try:
yield
finally:
bpy.context.scene.frame_current = current_time

View file

@ -62,7 +62,8 @@ def prepare_data(data, container_name=None):
def create_blender_context(active: Optional[bpy.types.Object] = None,
selected: Optional[bpy.types.Object] = None,):
selected: Optional[bpy.types.Object] = None,
window: Optional[bpy.types.Window] = None):
"""Create a new Blender context. If an object is passed as
parameter, it is set as selected and active.
"""
@ -72,7 +73,9 @@ def create_blender_context(active: Optional[bpy.types.Object] = None,
override_context = bpy.context.copy()
for win in bpy.context.window_manager.windows:
windows = [window] if window else bpy.context.window_manager.windows
for win in windows:
for area in win.screen.areas:
if area.type == 'VIEW_3D':
for region in area.regions:

View file

@ -0,0 +1,47 @@
"""Create review."""
import bpy
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
class CreateReview(plugin.Creator):
"""Single baked camera"""
name = "reviewDefault"
label = "Review"
family = "review"
icon = "video-camera"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.collections.new(name=name)
instances.children.link(asset_group)
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):
selected = lib.get_selection()
for obj in selected:
asset_group.objects.link(obj)
elif (self.options or {}).get("asset_group"):
obj = (self.options or {}).get("asset_group")
asset_group.objects.link(obj)
return asset_group

View file

@ -0,0 +1,64 @@
import bpy
import pyblish.api
from openpype.pipeline import legacy_io
class CollectReview(pyblish.api.InstancePlugin):
"""Collect Review data
"""
order = pyblish.api.CollectorOrder + 0.3
label = "Collect Review Data"
families = ["review"]
def process(self, instance):
self.log.debug(f"instance: {instance}")
# get cameras
cameras = [
obj
for obj in instance
if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA"
]
assert len(cameras) == 1, (
f"Not a single camera found in extraction: {cameras}"
)
camera = cameras[0].name
self.log.debug(f"camera: {camera}")
# get isolate objects list from meshes instance members .
isolate_objects = [
obj
for obj in instance
if isinstance(obj, bpy.types.Object) and obj.type == "MESH"
]
if not instance.data.get("remove"):
task = legacy_io.Session.get("AVALON_TASK")
instance.data.update({
"subset": f"{task}Review",
"review_camera": camera,
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"fps": instance.context.data["fps"],
"isolate": isolate_objects,
})
self.log.debug(f"instance data: {instance.data}")
# TODO : Collect audio
audio_tracks = []
instance.data["audio"] = []
for track in audio_tracks:
instance.data["audio"].append(
{
"offset": track.offset.get(),
"filename": track.filename.get(),
}
)

View file

@ -0,0 +1,123 @@
import os
import clique
import bpy
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.blender.api import capture
from openpype.hosts.blender.api.lib import maintained_time
class ExtractPlayblast(publish.Extractor):
"""
Extract viewport playblast.
Takes review camera and creates review Quicktime video based on viewport
capture.
"""
label = "Extract Playblast"
hosts = ["blender"]
families = ["review"]
optional = True
order = pyblish.api.ExtractorOrder + 0.01
def process(self, instance):
self.log.info("Extracting capture..")
self.log.info(instance.data)
# get scene fps
fps = instance.data.get("fps")
if fps is None:
fps = bpy.context.scene.render.fps
instance.data["fps"] = fps
self.log.info(f"fps: {fps}")
# If start and end frames cannot be determined,
# get them from Blender timeline.
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
end = instance.data.get("frameEnd", bpy.context.scene.frame_end)
self.log.info(f"start: {start}, end: {end}")
assert end > start, "Invalid time range !"
# get cameras
camera = instance.data("review_camera", None)
# get isolate objects list
isolate = instance.data("isolate", None)
# get ouput path
stagingdir = self.staging_dir(instance)
filename = instance.name
path = os.path.join(stagingdir, filename)
self.log.info(f"Outputting images to {path}")
project_settings = instance.context.data["project_settings"]["blender"]
presets = project_settings["publish"]["ExtractPlayblast"]["presets"]
preset = presets.get("default")
preset.update({
"camera": camera,
"start_frame": start,
"end_frame": end,
"filename": path,
"overwrite": True,
"isolate": isolate,
})
preset.setdefault(
"image_settings",
{
"file_format": "PNG",
"color_mode": "RGB",
"color_depth": "8",
"compression": 15,
},
)
with maintained_time():
path = capture(**preset)
self.log.debug(f"playblast path {path}")
collected_files = os.listdir(stagingdir)
collections, remainder = clique.assemble(
collected_files,
patterns=[f"{filename}\\.{clique.DIGITS_PATTERN}\\.png$"],
)
if len(collections) > 1:
raise RuntimeError(
f"More than one collection found in stagingdir: {stagingdir}"
)
elif len(collections) == 0:
raise RuntimeError(
f"No collection found in stagingdir: {stagingdir}"
)
frame_collection = collections[0]
self.log.info(f"We found collection of interest {frame_collection}")
instance.data.setdefault("representations", [])
tags = ["review"]
if not instance.data.get("keepImages"):
tags.append("delete")
representation = {
"name": "png",
"ext": "png",
"files": list(frame_collection),
"stagingDir": stagingdir,
"frameStart": start,
"frameEnd": end,
"fps": fps,
"preview": True,
"tags": tags,
"camera_name": camera
}
instance.data["representations"].append(representation)

View file

@ -0,0 +1,99 @@
import os
import glob
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.blender.api import capture
from openpype.hosts.blender.api.lib import maintained_time
import bpy
class ExtractThumbnail(publish.Extractor):
"""Extract viewport thumbnail.
Takes review camera and creates a thumbnail based on viewport
capture.
"""
label = "Extract Thumbnail"
hosts = ["blender"]
families = ["review"]
order = pyblish.api.ExtractorOrder + 0.01
presets = {}
def process(self, instance):
self.log.info("Extracting capture..")
stagingdir = self.staging_dir(instance)
filename = instance.name
path = os.path.join(stagingdir, filename)
self.log.info(f"Outputting images to {path}")
camera = instance.data.get("review_camera", "AUTO")
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
family = instance.data.get("family")
isolate = instance.data("isolate", None)
preset = self.presets.get(family, {})
preset.update({
"camera": camera,
"start_frame": start,
"end_frame": start,
"filename": path,
"overwrite": True,
"isolate": isolate,
})
preset.setdefault(
"image_settings",
{
"file_format": "JPEG",
"color_mode": "RGB",
"quality": 100,
},
)
with maintained_time():
path = capture(**preset)
thumbnail = os.path.basename(self._fix_output_path(path))
self.log.info(f"thumbnail: {thumbnail}")
instance.data.setdefault("representations", [])
representation = {
"name": "thumbnail",
"ext": "jpg",
"files": thumbnail,
"stagingDir": stagingdir,
"thumbnail": True
}
instance.data["representations"].append(representation)
def _fix_output_path(self, filepath):
""""Workaround to return correct filepath.
To workaround this we just glob.glob() for any file extensions and
assume the latest modified file is the correct file and return it.
"""
# Catch cancelled playblast
if filepath is None:
self.log.warning(
"Playblast did not result in output path. "
"Playblast is probably interrupted."
)
return None
if not os.path.exists(filepath):
files = glob.glob(f"{filepath}.*.jpg")
if not files:
raise RuntimeError(f"Couldn't find playblast from: {filepath}")
filepath = max(files, key=os.path.getmtime)
return filepath

View file

@ -1,10 +1,14 @@
from .addon import (
get_fusion_version,
FusionAddon,
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
)
__all__ = (
"get_fusion_version",
"FusionAddon",
"FUSION_HOST_DIR",
"FUSION_VERSIONS_DICT",
)

View file

@ -1,8 +1,52 @@
import os
import re
from openpype.modules import OpenPypeModule, IHostAddon
from openpype.lib import Logger
FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__))
# FUSION_VERSIONS_DICT is used by the pre-launch hooks
# The keys correspond to all currently supported Fusion versions
# Each value is a list of corresponding Python home variables and a profile
# number, which is used by the profile hook to set Fusion profile variables.
FUSION_VERSIONS_DICT = {
9: ("FUSION_PYTHON36_HOME", 9),
16: ("FUSION16_PYTHON36_HOME", 16),
17: ("FUSION16_PYTHON36_HOME", 16),
18: ("FUSION_PYTHON3_HOME", 16),
}
def get_fusion_version(app_name):
"""
The function is triggered by the prelaunch hooks to get the fusion version.
`app_name` is obtained by prelaunch hooks from the
`launch_context.env.get("AVALON_APP_NAME")`.
To get a correct Fusion version, a version number should be present
in the `applications/fusion/variants` key
of the Blackmagic Fusion Application Settings.
"""
log = Logger.get_logger(__name__)
if not app_name:
return
app_version_candidates = re.findall(r"\d+", app_name)
if not app_version_candidates:
return
for app_version in app_version_candidates:
if int(app_version) in FUSION_VERSIONS_DICT:
return int(app_version)
else:
log.info(
"Unsupported Fusion version: {app_version}".format(
app_version=app_version
)
)
class FusionAddon(OpenPypeModule, IHostAddon):
name = "fusion"
@ -14,15 +58,11 @@ class FusionAddon(OpenPypeModule, IHostAddon):
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(FUSION_HOST_DIR, "hooks")
]
return [os.path.join(FUSION_HOST_DIR, "hooks")]
def add_implementation_envs(self, env, _app):
# Set default values if are not already set via settings
defaults = {
"OPENPYPE_LOG_NO_COLORS": "Yes"
}
defaults = {"OPENPYPE_LOG_NO_COLORS": "Yes"}
for key, value in defaults.items():
if not env.get(key):
env[key] = value

View file

@ -303,10 +303,18 @@ def get_frame_path(path):
return filename, padding, ext
def get_current_comp():
"""Hack to get current comp in this session"""
def get_fusion_module():
"""Get current Fusion instance"""
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion.CurrentComp if fusion else None
return fusion
def get_current_comp():
"""Get current comp in this session"""
fusion = get_fusion_module()
if fusion is not None:
comp = fusion.CurrentComp
return comp
@contextlib.contextmanager

View file

@ -1,19 +1,19 @@
{
Locked = true,
Global = {
Paths = {
Map = {
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
["Reactor:"] = "$(REACTOR)",
["Config:"] = "UserPaths:Config;OpenPype:Config",
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts",
["UserPaths:"] = "UserData:;AllData:;Fusion:;Reactor:Deploy"
},
},
Script = {
PythonVersion = 3,
Python3Forced = true
},
Paths = {
Map = {
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
["Config:"] = "UserPaths:Config;OpenPype:Config",
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts",
},
}
},
Script = {
PythonVersion = 3,
Python3Forced = true
},
UserInterface = {
Language = "en_US"
},
},
}

View file

@ -0,0 +1,161 @@
import os
import shutil
import platform
from pathlib import Path
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
from openpype.hosts.fusion import (
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
get_fusion_version,
)
class FusionCopyPrefsPrelaunch(PreLaunchHook):
"""
Prepares local Fusion profile directory, copies existing Fusion profile.
This also sets FUSION MasterPrefs variable, which is used
to apply Master.prefs file to override some Fusion profile settings to:
- enable the OpenPype menu
- force Python 3 over Python 2
- force English interface
Master.prefs is defined in openpype/hosts/fusion/deploy/fusion_shared.prefs
"""
app_groups = ["fusion"]
order = 2
def get_fusion_profile_name(self, profile_version) -> str:
# Returns 'Default', unless FUSION16_PROFILE is set
return os.getenv(f"FUSION{profile_version}_PROFILE", "Default")
def get_fusion_profile_dir(self, profile_version) -> Path:
# Get FUSION_PROFILE_DIR variable
fusion_profile = self.get_fusion_profile_name(profile_version)
fusion_var_prefs_dir = os.getenv(
f"FUSION{profile_version}_PROFILE_DIR"
)
# Check if FUSION_PROFILE_DIR exists
if fusion_var_prefs_dir and Path(fusion_var_prefs_dir).is_dir():
fu_prefs_dir = Path(fusion_var_prefs_dir, fusion_profile)
self.log.info(f"{fusion_var_prefs_dir} is set to {fu_prefs_dir}")
return fu_prefs_dir
def get_profile_source(self, profile_version) -> Path:
"""Get Fusion preferences profile location.
See Per-User_Preferences_and_Paths on VFXpedia for reference.
"""
fusion_profile = self.get_fusion_profile_name(profile_version)
profile_source = self.get_fusion_profile_dir(profile_version)
if profile_source:
return profile_source
# otherwise get default location of the profile folder
fu_prefs_dir = f"Blackmagic Design/Fusion/Profiles/{fusion_profile}"
if platform.system() == "Windows":
profile_source = Path(os.getenv("AppData"), fu_prefs_dir)
elif platform.system() == "Darwin":
profile_source = Path(
"~/Library/Application Support/", fu_prefs_dir
).expanduser()
elif platform.system() == "Linux":
profile_source = Path("~/.fusion", fu_prefs_dir).expanduser()
self.log.info(
f"Locating source Fusion prefs directory: {profile_source}"
)
return profile_source
def get_copy_fusion_prefs_settings(self):
# Get copy preferences options from the global application settings
copy_fusion_settings = self.data["project_settings"]["fusion"].get(
"copy_fusion_settings", {}
)
if not copy_fusion_settings:
self.log.error("Copy prefs settings not found")
copy_status = copy_fusion_settings.get("copy_status", False)
force_sync = copy_fusion_settings.get("force_sync", False)
copy_path = copy_fusion_settings.get("copy_path") or None
if copy_path:
copy_path = Path(copy_path).expanduser()
return copy_status, copy_path, force_sync
def copy_fusion_profile(
self, copy_from: Path, copy_to: Path, force_sync: bool
) -> None:
"""On the first Fusion launch copy the contents of Fusion profile
directory to the working predefined location. If the Openpype profile
folder exists, skip copying, unless re-sync is checked.
If the prefs were not copied on the first launch,
clean Fusion profile will be created in fu_profile_dir.
"""
if copy_to.exists() and not force_sync:
self.log.info(
"Destination Fusion preferences folder already exists: "
f"{copy_to} "
)
return
self.log.info("Starting copying Fusion preferences")
self.log.debug(f"force_sync option is set to {force_sync}")
try:
copy_to.mkdir(exist_ok=True, parents=True)
except PermissionError:
self.log.warning(f"Creating the folder not permitted at {copy_to}")
return
if not copy_from.exists():
self.log.warning(f"Fusion preferences not found in {copy_from}")
return
for file in copy_from.iterdir():
if file.suffix in (
".prefs",
".def",
".blocklist",
".fu",
".toolbars",
):
# convert Path to str to be compatible with Python 3.6+
shutil.copy(str(file), str(copy_to))
self.log.info(
f"Successfully copied preferences: {copy_from} to {copy_to}"
)
def execute(self):
(
copy_status,
fu_profile_dir,
force_sync,
) = self.get_copy_fusion_prefs_settings()
# Get launched application context and return correct app version
app_name = self.launch_context.env.get("AVALON_APP_NAME")
app_version = get_fusion_version(app_name)
if app_version is None:
version_names = ", ".join(str(x) for x in FUSION_VERSIONS_DICT)
raise ApplicationLaunchFailed(
"Unable to detect valid Fusion version number from app "
f"name: {app_name}.\nMake sure to include at least a digit "
"to indicate the Fusion version like '18'.\n"
f"Detectable Fusion versions are: {version_names}"
)
_, profile_version = FUSION_VERSIONS_DICT[app_version]
fu_profile = self.get_fusion_profile_name(profile_version)
# do a copy of Fusion profile if copy_status toggle is enabled
if copy_status and fu_profile_dir is not None:
profile_source = self.get_profile_source(profile_version)
dest_folder = Path(fu_profile_dir, fu_profile)
self.copy_fusion_profile(profile_source, dest_folder, force_sync)
# Add temporary profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
fu_profile_dir_variable = f"FUSION{profile_version}_PROFILE_DIR"
self.log.info(f"Setting {fu_profile_dir_variable}: {fu_profile_dir}")
self.launch_context.env[fu_profile_dir_variable] = str(fu_profile_dir)
# Add custom Fusion Master Prefs and the temporary
# profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
self.launch_context.env[master_prefs_variable] = str(master_prefs)

View file

@ -1,32 +1,43 @@
import os
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
from openpype.hosts.fusion import FUSION_HOST_DIR
from openpype.hosts.fusion import (
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
get_fusion_version,
)
class FusionPrelaunch(PreLaunchHook):
"""Prepares OpenPype Fusion environment
Requires FUSION_PYTHON3_HOME to be defined in the environment for Fusion
to point at a valid Python 3 build for Fusion. That is Python 3.3-3.10
for Fusion 18 and Fusion 3.6 for Fusion 16 and 17.
This also sets FUSION16_MasterPrefs to apply the fusion master prefs
as set in openpype/hosts/fusion/deploy/fusion_shared.prefs to enable
the OpenPype menu and force Python 3 over Python 2.
"""
Prepares OpenPype Fusion environment.
Requires correct Python home variable to be defined in the environment
settings for Fusion to point at a valid Python 3 build for Fusion.
Python3 versions that are supported by Fusion:
Fusion 9, 16, 17 : Python 3.6
Fusion 18 : Python 3.6 - 3.10
"""
app_groups = ["fusion"]
order = 1
def execute(self):
# making sure python 3 is installed at provided path
# Py 3.3-3.10 for Fusion 18+ or Py 3.6 for Fu 16-17
py3_var = "FUSION_PYTHON3_HOME"
app_data = self.launch_context.env.get("AVALON_APP_NAME")
app_version = get_fusion_version(app_data)
if not app_version:
raise ApplicationLaunchFailed(
"Fusion version information not found in System settings.\n"
"The key field in the 'applications/fusion/variants' should "
"consist a number, corresponding to major Fusion version."
)
py3_var, _ = FUSION_VERSIONS_DICT[app_version]
fusion_python3_home = self.launch_context.env.get(py3_var, "")
self.log.info(f"Looking for Python 3 in: {fusion_python3_home}")
for path in fusion_python3_home.split(os.pathsep):
# Allow defining multiple paths to allow "fallback" to other
# path. But make to set only a single path as final variable.
# Allow defining multiple paths, separated by os.pathsep,
# to allow "fallback" to other path.
# But make to set only a single path as final variable.
py3_dir = os.path.normpath(path)
if os.path.isdir(py3_dir):
break
@ -43,19 +54,10 @@ class FusionPrelaunch(PreLaunchHook):
self.launch_context.env[py3_var] = py3_dir
# Fusion 18+ requires FUSION_PYTHON3_HOME to also be on PATH
self.launch_context.env["PATH"] += ";" + py3_dir
if app_version >= 18:
self.launch_context.env["PATH"] += os.pathsep + py3_dir
# Fusion 16 and 17 use FUSION16_PYTHON36_HOME instead of
# FUSION_PYTHON3_HOME and will only work with a Python 3.6 version
# TODO: Detect Fusion version to only set for specific Fusion build
self.launch_context.env["FUSION16_PYTHON36_HOME"] = py3_dir
self.launch_context.env[py3_var] = py3_dir
# Add our Fusion Master Prefs which is the only way to customize
# Fusion to define where it can read custom scripts and tools from
self.log.info(f"Setting OPENPYPE_FUSION: {FUSION_HOST_DIR}")
self.launch_context.env["OPENPYPE_FUSION"] = FUSION_HOST_DIR
pref_var = "FUSION16_MasterPrefs" # used by Fusion 16, 17 and 18
prefs = os.path.join(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
self.log.info(f"Setting {pref_var}: {prefs}")
self.launch_context.env[pref_var] = prefs

View file

@ -8,6 +8,7 @@ from openpype.hosts.max.api.lib import (
get_current_renderer,
get_default_render_folder
)
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
@ -34,14 +35,20 @@ class RenderProducts(object):
filename,
container)
context = get_current_project_asset()
startFrame = context["data"].get("frameStart")
endFrame = context["data"].get("frameEnd") + 1
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
full_render_list = []
beauty = self.beauty_render_product(output_file, img_fmt)
full_render_list.append(beauty)
full_render_list = self.beauty_render_product(output_file,
startFrame,
endFrame,
img_fmt)
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
if renderer == "VUE_File_Renderer":
return full_render_list
@ -54,6 +61,8 @@ class RenderProducts(object):
"Quicksilver_Hardware_Renderer",
]:
render_elem_list = self.render_elements_product(output_file,
startFrame,
endFrame,
img_fmt)
if render_elem_list:
full_render_list.extend(iter(render_elem_list))
@ -61,18 +70,24 @@ class RenderProducts(object):
if renderer == "Arnold":
aov_list = self.arnold_render_product(output_file,
startFrame,
endFrame,
img_fmt)
if aov_list:
full_render_list.extend(iter(aov_list))
return full_render_list
def beauty_render_product(self, folder, fmt):
beauty_output = f"{folder}.####.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
return beauty_output
def beauty_render_product(self, folder, startFrame, endFrame, fmt):
beauty_frame_range = []
for f in range(startFrame, endFrame):
beauty_output = f"{folder}.{f}.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
beauty_frame_range.append(beauty_output)
return beauty_frame_range
# TODO: Get the arnold render product
def arnold_render_product(self, folder, fmt):
def arnold_render_product(self, folder, startFrame, endFrame, fmt):
"""Get all the Arnold AOVs"""
aovs = []
@ -85,15 +100,17 @@ class RenderProducts(object):
for i in range(aov_group_num):
# get the specific AOV group
for aov in aov_mgr.drivers[i].aov_list:
render_element = f"{folder}_{aov.name}.####.{fmt}"
render_element = render_element.replace("\\", "/")
aovs.append(render_element)
for f in range(startFrame, endFrame):
render_element = f"{folder}_{aov.name}.{f}.{fmt}"
render_element = render_element.replace("\\", "/")
aovs.append(render_element)
# close the AOVs manager window
amw.close()
return aovs
def render_elements_product(self, folder, fmt):
def render_elements_product(self, folder, startFrame, endFrame, fmt):
"""Get all the render element output files. """
render_dirname = []
@ -104,9 +121,10 @@ class RenderProducts(object):
renderlayer_name = render_elem.GetRenderElement(i)
target, renderpass = str(renderlayer_name).split(":")
if renderlayer_name.enabled:
render_element = f"{folder}_{renderpass}.####.{fmt}"
render_element = render_element.replace("\\", "/")
render_dirname.append(render_element)
for f in range(startFrame, endFrame):
render_element = f"{folder}_{renderpass}.{f}.{fmt}"
render_element = render_element.replace("\\", "/")
render_dirname.append(render_element)
return render_dirname

View file

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating raw max scene."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreateMaxScene(plugin.MaxCreator):
identifier = "io.openpype.creators.max.maxScene"
label = "Max Scene"
family = "maxScene"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreateMaxScene, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container = rt.getNodeByName(instance.data.get("instance_node"))
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating point cloud."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreatePointCloud(plugin.MaxCreator):
identifier = "io.openpype.creators.max.pointcloud"
label = "Point Cloud"
family = "pointcloud"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreatePointCloud, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container = rt.getNodeByName(instance.data.get("instance_node"))
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -9,7 +9,8 @@ from openpype.hosts.max.api import lib
class MaxSceneLoader(load.LoaderPlugin):
"""Max Scene Loader"""
families = ["camera"]
families = ["camera",
"maxScene"]
representations = ["max"]
order = -8
icon = "code-fork"
@ -46,8 +47,7 @@ class MaxSceneLoader(load.LoaderPlugin):
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
max_objects = self.get_container_children(node)
max_objects = node.Children
for max_object in max_objects:
max_object.source = path

View file

@ -0,0 +1,51 @@
import os
from openpype.pipeline import (
load, get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
class PointCloudLoader(load.LoaderPlugin):
"""Point Cloud Loader"""
families = ["pointcloud"]
representations = ["prt"]
order = -8
icon = "code-fork"
color = "green"
def load(self, context, name=None, namespace=None, data=None):
"""load point cloud by tyCache"""
from pymxs import runtime as rt
filepath = os.path.normpath(self.fname)
obj = rt.tyCache()
obj.filename = filepath
prt_container = rt.getNodeByName(f"{obj.name}")
return containerise(
name, [prt_container], context, loader=self.__class__.__name__)
def update(self, container, representation):
"""update the container"""
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
prt_objects = self.get_container_children(node)
for prt_object in prt_objects:
prt_object.source = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def remove(self, container):
"""remove the container"""
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)

View file

@ -20,7 +20,8 @@ class ExtractMaxSceneRaw(publish.Extractor,
order = pyblish.api.ExtractorOrder - 0.2
label = "Extract Max Scene (Raw)"
hosts = ["max"]
families = ["camera"]
families = ["camera",
"maxScene"]
optional = True
def process(self, instance):

View file

@ -0,0 +1,207 @@
import os
import pyblish.api
from openpype.pipeline import publish
from pymxs import runtime as rt
from openpype.hosts.max.api import (
maintained_selection
)
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
def get_setting(project_setting=None):
project_setting = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
return (project_setting["max"]["PointCloud"])
class ExtractPointCloud(publish.Extractor):
"""
Extract PRT format with tyFlow operators
Notes:
Currently only works for the default partition setting
Args:
export_particle(): sets up all job arguments for attributes
to be exported in MAXscript
get_operators(): get the export_particle operator
get_custom_attr(): get all custom channel attributes from Openpype
setting and sets it as job arguments before exporting
get_files(): get the files with tyFlow naming convention
before publishing
partition_output_name(): get the naming with partition settings.
get_partition(): get partition value
"""
order = pyblish.api.ExtractorOrder - 0.2
label = "Extract Point Cloud"
hosts = ["max"]
families = ["pointcloud"]
def process(self, instance):
start = int(instance.context.data.get("frameStart"))
end = int(instance.context.data.get("frameEnd"))
container = instance.data["instance_node"]
self.log.info("Extracting PRT...")
stagingdir = self.staging_dir(instance)
filename = "{name}.prt".format(**instance.data)
path = os.path.join(stagingdir, filename)
with maintained_selection():
job_args = self.export_particle(container,
start,
end,
path)
for job in job_args:
rt.execute(job)
self.log.info("Performing Extraction ...")
if "representations" not in instance.data:
instance.data["representations"] = []
self.log.info("Writing PRT with TyFlow Plugin...")
filenames = self.get_files(container, path, start, end)
self.log.debug("filenames: {0}".format(filenames))
partition = self.partition_output_name(container)
representation = {
'name': 'prt',
'ext': 'prt',
'files': filenames if len(filenames) > 1 else filenames[0],
"stagingDir": stagingdir,
"outputName": partition # partition value
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
path))
def export_particle(self,
container,
start,
end,
filepath):
job_args = []
opt_list = self.get_operators(container)
for operator in opt_list:
start_frame = "{0}.frameStart={1}".format(operator,
start)
job_args.append(start_frame)
end_frame = "{0}.frameEnd={1}".format(operator,
end)
job_args.append(end_frame)
filepath = filepath.replace("\\", "/")
prt_filename = '{0}.PRTFilename="{1}"'.format(operator,
filepath)
job_args.append(prt_filename)
# Partition
mode = "{0}.PRTPartitionsMode=2".format(operator)
job_args.append(mode)
additional_args = self.get_custom_attr(operator)
for args in additional_args:
job_args.append(args)
prt_export = "{0}.exportPRT()".format(operator)
job_args.append(prt_export)
return job_args
def get_operators(self, container):
"""Get Export Particles Operator"""
opt_list = []
node = rt.getNodebyName(container)
selection_list = list(node.Children)
for sel in selection_list:
obj = sel.baseobject
# TODO: to see if it can be used maxscript instead
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
sub_anim = rt.getsubanim(obj, anim_name)
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
opt_list.append(opt)
return opt_list
def get_custom_attr(self, operator):
"""Get Custom Attributes"""
custom_attr_list = []
attr_settings = get_setting()["attribute"]
for key, value in attr_settings.items():
custom_attr = "{0}.PRTChannels_{1}=True".format(operator,
value)
self.log.debug(
"{0} will be added as custom attribute".format(key)
)
custom_attr_list.append(custom_attr)
return custom_attr_list
def get_files(self,
container,
path,
start_frame,
end_frame):
"""
Note:
Set the filenames accordingly to the tyFlow file
naming extension for the publishing purpose
Actual File Output from tyFlow:
<SceneFile>__part<PartitionStart>of<PartitionCount>.<frame>.prt
e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt
"""
filenames = []
filename = os.path.basename(path)
orig_name, ext = os.path.splitext(filename)
partition_count, partition_start = self.get_partition(container)
for frame in range(int(start_frame), int(end_frame) + 1):
actual_name = "{}__part{:03}of{}_{:05}".format(orig_name,
partition_start,
partition_count,
frame)
actual_filename = path.replace(orig_name, actual_name)
filenames.append(os.path.basename(actual_filename))
return filenames
def partition_output_name(self, container):
"""
Notes:
Partition output name set for mapping
the published file output
todo:
Customizes the setting for the output
"""
partition_count, partition_start = self.get_partition(container)
partition = "_part{:03}of{}".format(partition_start,
partition_count)
return partition
def get_partition(self, container):
"""
Get Partition Value
"""
opt_list = self.get_operators(container)
for operator in opt_list:
count = rt.execute(f'{operator}.PRTPartitionsCount')
start = rt.execute(f'{operator}.PRTPartitionsFrom')
return count, start

View file

@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
from pymxs import runtime as rt
class ValidateMaxContents(pyblish.api.InstancePlugin):
"""Validates Max contents.
Check if MaxScene container includes any contents underneath.
"""
order = pyblish.api.ValidatorOrder
families = ["camera",
"maxScene",
"maxrender"]
hosts = ["max"]
label = "Max Scene Contents"
def process(self, instance):
container = rt.getNodeByName(instance.data["instance_node"])
if not list(container.Children):
raise PublishValidationError("No content found in the container")

View file

@ -0,0 +1,191 @@
import pyblish.api
from openpype.pipeline import PublishValidationError
from pymxs import runtime as rt
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
def get_setting(project_setting=None):
project_setting = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
return (project_setting["max"]["PointCloud"])
class ValidatePointCloud(pyblish.api.InstancePlugin):
"""Validate that workfile was saved."""
order = pyblish.api.ValidatorOrder
families = ["pointcloud"]
hosts = ["max"]
label = "Validate Point Cloud"
def process(self, instance):
"""
Notes:
1. Validate the container only include tyFlow objects
2. Validate if tyFlow operator Export Particle exists
3. Validate if the export mode of Export Particle is at PRT format
4. Validate the partition count and range set as default value
Partition Count : 100
Partition Range : 1 to 1
5. Validate if the custom attribute(s) exist as parameter(s)
of export_particle operator
"""
invalid = self.get_tyFlow_object(instance)
if invalid:
raise PublishValidationError("Non tyFlow object "
"found: {}".format(invalid))
invalid = self.get_tyFlow_operator(instance)
if invalid:
raise PublishValidationError("tyFlow ExportParticle operator "
"not found: {}".format(invalid))
invalid = self.validate_export_mode(instance)
if invalid:
raise PublishValidationError("The export mode is not at PRT")
invalid = self.validate_partition_value(instance)
if invalid:
raise PublishValidationError("tyFlow Partition setting is "
"not at the default value")
invalid = self.validate_custom_attribute(instance)
if invalid:
raise PublishValidationError("Custom Attribute not found "
":{}".format(invalid))
def get_tyFlow_object(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow container "
"for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
sel_tmp = str(sel)
if rt.classOf(sel) in [rt.tyFlow,
rt.Editable_Mesh]:
if "tyFlow" not in sel_tmp:
invalid.append(sel)
else:
invalid.append(sel)
return invalid
def get_tyFlow_operator(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow object "
"for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
bool_list = []
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
bool_list.append(str(boolean))
# if the export_particles property is not there
# it means there is not a "Export Particle" operator
if "True" not in bool_list:
self.log.error("Operator 'Export Particles' not found!")
invalid.append(sel)
return invalid
def validate_custom_attribute(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow custom "
"attributes for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
attributes = get_setting()["attribute"]
for key, value in attributes.items():
custom_attr = "{0}.PRTChannels_{1}".format(opt,
value)
try:
rt.execute(custom_attr)
except RuntimeError:
invalid.add(key)
return invalid
def validate_partition_value(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow partition "
"value for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
count = rt.execute(f'{opt}.PRTPartitionsCount')
if count != 100:
invalid.append(count)
start = rt.execute(f'{opt}.PRTPartitionsFrom')
if start != 1:
invalid.append(start)
end = rt.execute(f'{opt}.PRTPartitionsTo')
if end != 1:
invalid.append(end)
return invalid
def validate_export_mode(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow export "
"mode for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
export_mode = rt.execute(f'{opt}.exportMode')
if export_mode != 1:
invalid.append(export_mode)
return invalid

View file

@ -2099,29 +2099,40 @@ def get_frame_range():
}
def reset_frame_range():
"""Set frame range to current asset"""
def reset_frame_range(playback=True, render=True, fps=True):
"""Set frame range to current asset
fps = convert_to_maya_fps(
float(legacy_io.Session.get("AVALON_FPS", 25))
)
set_scene_fps(fps)
Args:
playback (bool, Optional): Whether to set the maya timeline playback
frame range. Defaults to True.
render (bool, Optional): Whether to set the maya render frame range.
Defaults to True.
fps (bool, Optional): Whether to set scene FPS. Defaults to True.
"""
if fps:
fps = convert_to_maya_fps(
float(legacy_io.Session.get("AVALON_FPS", 25))
)
set_scene_fps(fps)
frame_range = get_frame_range()
frame_start = frame_range["frameStart"] - int(frame_range["handleStart"])
frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"])
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.playbackOptions(animationStartTime=frame_start)
cmds.playbackOptions(animationEndTime=frame_end)
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.currentTime(frame_start)
if playback:
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.playbackOptions(animationStartTime=frame_start)
cmds.playbackOptions(animationEndTime=frame_end)
cmds.playbackOptions(minTime=frame_start)
cmds.playbackOptions(maxTime=frame_end)
cmds.currentTime(frame_start)
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
if render:
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
def reset_scene_resolution():

View file

@ -158,7 +158,7 @@ class RenderSettings(object):
cmds.setAttr(
"defaultArnoldDriver.mergeAOVs", multi_exr)
self._additional_attribs_setter(additional_options)
reset_frame_range()
reset_frame_range(playback=False, fps=False, render=True)
def _set_redshift_settings(self, width, height):
"""Sets settings for Redshift."""

View file

@ -13,6 +13,7 @@ class CreateAnimation(plugin.Creator):
icon = "male"
write_color_sets = False
write_face_sets = False
include_parent_hierarchy = False
include_user_defined_attributes = False
def __init__(self, *args, **kwargs):
@ -37,7 +38,7 @@ class CreateAnimation(plugin.Creator):
self.data["visibleOnly"] = False
# Include the groups above the out_SET content
self.data["includeParentHierarchy"] = False # Include parent groups
self.data["includeParentHierarchy"] = self.include_parent_hierarchy
# Default to exporting world-space
self.data["worldSpace"] = True

View file

@ -0,0 +1,178 @@
import os
import json
from collections import defaultdict
from maya import cmds
from openpype.pipeline import (
InventoryAction, get_representation_context, get_representation_path
)
from openpype.hosts.maya.api.lib import get_container_members, get_id
class ConnectYetiRig(InventoryAction):
"""Connect Yeti Rig with an animation or pointcache."""
label = "Connect Yeti Rig"
icon = "link"
color = "white"
def process(self, containers):
# Validate selection is more than 1.
message = (
"Only 1 container selected. 2+ containers needed for this action."
)
if len(containers) == 1:
self.display_warning(message)
return
# Categorize containers by family.
containers_by_family = defaultdict(list)
for container in containers:
family = get_representation_context(
container["representation"]
)["subset"]["data"]["family"]
containers_by_family[family].append(container)
# Validate to only 1 source container.
source_containers = containers_by_family.get("animation", [])
source_containers += containers_by_family.get("pointcache", [])
source_container_namespaces = [
x["namespace"] for x in source_containers
]
message = (
"{} animation containers selected:\n\n{}\n\nOnly select 1 of type "
"\"animation\" or \"pointcache\".".format(
len(source_containers), source_container_namespaces
)
)
if len(source_containers) != 1:
self.display_warning(message)
return
source_container = source_containers[0]
source_ids = self.nodes_by_id(source_container)
# Target containers.
target_ids = {}
inputs = []
yeti_rig_containers = containers_by_family.get("yetiRig")
if not yeti_rig_containers:
self.display_warning(
"Select at least one yetiRig container"
)
return
for container in yeti_rig_containers:
target_ids.update(self.nodes_by_id(container))
maya_file = get_representation_path(
get_representation_context(
container["representation"]
)["representation"]
)
_, ext = os.path.splitext(maya_file)
settings_file = maya_file.replace(ext, ".rigsettings")
if not os.path.exists(settings_file):
continue
with open(settings_file) as f:
inputs.extend(json.load(f)["inputs"])
# Compare loaded connections to scene.
for input in inputs:
source_node = source_ids.get(input["sourceID"])
target_node = target_ids.get(input["destinationID"])
if not source_node or not target_node:
self.log.debug(
"Could not find nodes for input:\n" +
json.dumps(input, indent=4, sort_keys=True)
)
continue
source_attr, target_attr = input["connections"]
if not cmds.attributeQuery(
source_attr, node=source_node, exists=True
):
self.log.debug(
"Could not find attribute {} on node {} for "
"input:\n{}".format(
source_attr,
source_node,
json.dumps(input, indent=4, sort_keys=True)
)
)
continue
if not cmds.attributeQuery(
target_attr, node=target_node, exists=True
):
self.log.debug(
"Could not find attribute {} on node {} for "
"input:\n{}".format(
target_attr,
target_node,
json.dumps(input, indent=4, sort_keys=True)
)
)
continue
source_plug = "{}.{}".format(
source_node, source_attr
)
target_plug = "{}.{}".format(
target_node, target_attr
)
if cmds.isConnected(
source_plug, target_plug, ignoreUnitConversion=True
):
self.log.debug(
"Connection already exists: {} -> {}".format(
source_plug, target_plug
)
)
continue
cmds.connectAttr(source_plug, target_plug, force=True)
self.log.debug(
"Connected attributes: {} -> {}".format(
source_plug, target_plug
)
)
def nodes_by_id(self, container):
ids = {}
for member in get_container_members(container):
id = get_id(member)
if not id:
continue
ids[id] = member
return ids
def display_warning(self, message, show_cancel=False):
"""Show feedback to user.
Returns:
bool
"""
from qtpy import QtWidgets
accept = QtWidgets.QMessageBox.Ok
if show_cancel:
buttons = accept | QtWidgets.QMessageBox.Cancel
else:
buttons = accept
state = QtWidgets.QMessageBox.warning(
None,
"",
message,
buttons=buttons,
defaultButton=accept
)
return state == accept

View file

@ -0,0 +1,332 @@
import os
import copy
from openpype.lib import EnumDef
from openpype.pipeline import (
load,
get_representation_context
)
from openpype.pipeline.load.utils import get_representation_path_from_context
from openpype.pipeline.colorspace import (
get_imageio_colorspace_from_filepath,
get_imageio_config,
get_imageio_file_rules
)
from openpype.settings import get_project_settings
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import (
unique_namespace,
namespaced
)
from maya import cmds
def create_texture():
"""Create place2dTexture with file node with uv connections
Mimics Maya "file [Texture]" creation.
"""
place = cmds.shadingNode("place2dTexture", asUtility=True, name="place2d")
file = cmds.shadingNode("file", asTexture=True, name="file")
connections = ["coverage", "translateFrame", "rotateFrame", "rotateUV",
"mirrorU", "mirrorV", "stagger", "wrapV", "wrapU",
"repeatUV", "offset", "noiseUV", "vertexUvThree",
"vertexUvTwo", "vertexUvOne", "vertexCameraOne"]
for attr in connections:
src = "{}.{}".format(place, attr)
dest = "{}.{}".format(file, attr)
cmds.connectAttr(src, dest)
cmds.connectAttr(place + '.outUV', file + '.uvCoord')
cmds.connectAttr(place + '.outUvFilterSize', file + '.uvFilterSize')
return file, place
def create_projection():
"""Create texture with place3dTexture and projection
Mimics Maya "file [Projection]" creation.
"""
file, place = create_texture()
projection = cmds.shadingNode("projection", asTexture=True,
name="projection")
place3d = cmds.shadingNode("place3dTexture", asUtility=True,
name="place3d")
cmds.connectAttr(place3d + '.worldInverseMatrix[0]',
projection + ".placementMatrix")
cmds.connectAttr(file + '.outColor', projection + ".image")
return file, place, projection, place3d
def create_stencil():
"""Create texture with extra place2dTexture offset and stencil
Mimics Maya "file [Stencil]" creation.
"""
file, place = create_texture()
place_stencil = cmds.shadingNode("place2dTexture", asUtility=True,
name="place2d_stencil")
stencil = cmds.shadingNode("stencil", asTexture=True, name="stencil")
for src_attr, dest_attr in [
("outUV", "uvCoord"),
("outUvFilterSize", "uvFilterSize")
]:
src_plug = "{}.{}".format(place_stencil, src_attr)
cmds.connectAttr(src_plug, "{}.{}".format(place, dest_attr))
cmds.connectAttr(src_plug, "{}.{}".format(stencil, dest_attr))
return file, place, stencil, place_stencil
class FileNodeLoader(load.LoaderPlugin):
"""File node loader."""
families = ["image", "plate", "render"]
label = "Load file node"
representations = ["exr", "tif", "png", "jpg"]
icon = "image"
color = "orange"
order = 2
options = [
EnumDef(
"mode",
items={
"texture": "Texture",
"projection": "Projection",
"stencil": "Stencil"
},
default="texture",
label="Texture Mode"
)
]
def load(self, context, name, namespace, data):
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
with namespaced(namespace, new=True) as namespace:
# Create the nodes within the namespace
nodes = {
"texture": create_texture,
"projection": create_projection,
"stencil": create_stencil
}[data.get("mode", "texture")]()
file_node = cmds.ls(nodes, type="file")[0]
self._apply_representation_context(context, file_node)
# For ease of access for the user select all the nodes and select
# the file node last so that UI shows its attributes by default
cmds.select(list(nodes) + [file_node], replace=True)
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__
)
def update(self, container, representation):
members = cmds.sets(container['objectName'], query=True)
file_node = cmds.ls(members, type="file")[0]
context = get_representation_context(representation)
self._apply_representation_context(context, file_node)
# Update representation
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
type="string"
)
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass
def _apply_representation_context(self, context, file_node):
"""Update the file node to match the context.
This sets the file node's attributes for:
- file path
- udim tiling mode (if it is an udim tile)
- use frame extension (if it is a sequence)
- colorspace
"""
repre_context = context["representation"]["context"]
has_frames = repre_context.get("frame") is not None
has_udim = repre_context.get("udim") is not None
# Set UV tiling mode if UDIM tiles
if has_udim:
cmds.setAttr(file_node + ".uvTilingMode", 3) # UDIM-tiles
else:
cmds.setAttr(file_node + ".uvTilingMode", 0) # off
# Enable sequence if publish has `startFrame` and `endFrame` and
# `startFrame != endFrame`
if has_frames and self._is_sequence(context):
# When enabling useFrameExtension maya automatically
# connects an expression to <file>.frameExtension to set
# the current frame. However, this expression is generated
# with some delay and thus it'll show a warning if frame 0
# doesn't exist because we're explicitly setting the <f>
# token.
cmds.setAttr(file_node + ".useFrameExtension", True)
else:
cmds.setAttr(file_node + ".useFrameExtension", False)
# Set the file node path attribute
path = self._format_path(context)
cmds.setAttr(file_node + ".fileTextureName", path, type="string")
# Set colorspace
colorspace = self._get_colorspace(context)
if colorspace:
cmds.setAttr(file_node + ".colorSpace", colorspace, type="string")
else:
self.log.debug("Unknown colorspace - setting colorspace skipped.")
def _is_sequence(self, context):
"""Check whether frameStart and frameEnd are not the same."""
version = context.get("version", {})
representation = context.get("representation", {})
for doc in [representation, version]:
# Frame range can be set on version or representation.
# When set on representation it overrides version data.
data = doc.get("data", {})
start = data.get("frameStartHandle", data.get("frameStart", None))
end = data.get("frameEndHandle", data.get("frameEnd", None))
if start is None or end is None:
continue
if start != end:
return True
else:
return False
return False
def _get_colorspace(self, context):
"""Return colorspace of the file to load.
Retrieves the explicit colorspace from the publish. If no colorspace
data is stored with published content then project imageio settings
are used to make an assumption of the colorspace based on the file
rules. If no file rules match then None is returned.
Returns:
str or None: The colorspace of the file or None if not detected.
"""
# We can't apply color spaces if management is not enabled
if not cmds.colorManagementPrefs(query=True, cmEnabled=True):
return
representation = context["representation"]
colorspace_data = representation.get("data", {}).get("colorspaceData")
if colorspace_data:
return colorspace_data["colorspace"]
# Assume colorspace from filepath based on project settings
project_name = context["project"]["name"]
host_name = os.environ.get("AVALON_APP")
project_settings = get_project_settings(project_name)
config_data = get_imageio_config(
project_name, host_name,
project_settings=project_settings
)
file_rules = get_imageio_file_rules(
project_name, host_name,
project_settings=project_settings
)
path = get_representation_path_from_context(context)
colorspace = get_imageio_colorspace_from_filepath(
path=path,
host_name=host_name,
project_name=project_name,
config_data=config_data,
file_rules=file_rules,
project_settings=project_settings
)
return colorspace
def _format_path(self, context):
"""Format the path with correct tokens for frames and udim tiles."""
context = copy.deepcopy(context)
representation = context["representation"]
template = representation.get("data", {}).get("template")
if not template:
# No template to find token locations for
return get_representation_path_from_context(context)
def _placeholder(key):
# Substitute with a long placeholder value so that potential
# custom formatting with padding doesn't find its way into
# our formatting, so that <f> wouldn't be padded as 0<f>
return "___{}___".format(key)
# We format UDIM and Frame numbers with their specific tokens. To do so
# we in-place change the representation context data to format the path
# with our own data
tokens = {
"frame": "<f>",
"udim": "<UDIM>"
}
has_tokens = False
repre_context = representation["context"]
for key, _token in tokens.items():
if key in repre_context:
repre_context[key] = _placeholder(key)
has_tokens = True
# Replace with our custom template that has the tokens set
representation["data"]["template"] = template
path = get_representation_path_from_context(context)
if has_tokens:
for key, token in tokens.items():
if key in repre_context:
path = path.replace(_placeholder(key), token)
return path

View file

@ -1,17 +1,12 @@
import os
from collections import defaultdict
import maya.cmds as cmds
from openpype.settings import get_project_settings
from openpype.settings import get_current_project_settings
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api import lib
class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"""
This loader will load Yeti rig. You can select something in scene and if it
has same ID as mesh published with rig, their shapes will be linked
together.
"""
"""This loader will load Yeti rig."""
families = ["yetiRig"]
representations = ["ma"]
@ -22,72 +17,31 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
color = "orange"
def process_reference(
self, context, name=None, namespace=None, options=None):
self, context, name=None, namespace=None, options=None
):
import maya.cmds as cmds
# get roots of selected hierarchies
selected_roots = []
for sel in cmds.ls(sl=True, long=True):
selected_roots.append(sel.split("|")[1])
# get all objects under those roots
selected_hierarchy = []
for root in selected_roots:
selected_hierarchy.append(cmds.listRelatives(
root,
allDescendents=True) or [])
# flatten the list and filter only shapes
shapes_flat = []
for root in selected_hierarchy:
shapes = cmds.ls(root, long=True, type="mesh") or []
for shape in shapes:
shapes_flat.append(shape)
# create dictionary of cbId and shape nodes
scene_lookup = defaultdict(list)
for node in shapes_flat:
cb_id = lib.get_id(node)
scene_lookup[cb_id] = node
# load rig
group_name = "{}:{}".format(namespace, name)
with lib.maintained_selection():
file_url = self.prepare_root_value(self.fname,
context["project"]["name"])
nodes = cmds.file(file_url,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
file_url = self.prepare_root_value(
self.fname, context["project"]["name"]
)
nodes = cmds.file(
file_url,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName=group_name
)
# for every shape node we've just loaded find matching shape by its
# cbId in selection. If found outMesh of scene shape will connect to
# inMesh of loaded shape.
for destination_node in nodes:
source_node = scene_lookup[lib.get_id(destination_node)]
if source_node:
self.log.info("found: {}".format(source_node))
self.log.info(
"creating connection to {}".format(destination_node))
cmds.connectAttr("{}.outMesh".format(source_node),
"{}.inMesh".format(destination_node),
force=True)
groupName = "{}:{}".format(namespace, name)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings['maya']['load']['colors']
c = colors.get('yetiRig')
settings = get_current_project_settings()
colors = settings["maya"]["load"]["colors"]
c = colors.get("yetiRig")
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
cmds.setAttr(group_name + ".useOutlinerColor", 1)
cmds.setAttr(
group_name + ".outlinerColor",
(float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255)
)
self[:] = nodes

View file

@ -24,7 +24,9 @@ class CollectReview(pyblish.api.InstancePlugin):
task = legacy_io.Session["AVALON_TASK"]
# Get panel.
instance.data["panel"] = cmds.playblast(activeEditor=True)
instance.data["panel"] = cmds.playblast(
activeEditor=True
).split("|")[-1]
# get cameras
members = instance.data['setMembers']

View file

@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
"""Maya look extractor."""
import os
import sys
import json
import tempfile
import platform
import contextlib
import subprocess
from collections import OrderedDict
from maya import cmds # noqa
@ -23,6 +21,15 @@ COPY = 1
HARDLINK = 2
def _has_arnold():
"""Return whether the arnold package is available and can be imported."""
try:
import arnold # noqa: F401
return True
except (ImportError, ModuleNotFoundError):
return False
def escape_space(path):
"""Ensure path is enclosed by quotes to allow paths with spaces"""
return '"{}"'.format(path) if " " in path else path
@ -548,7 +555,7 @@ class ExtractLook(publish.Extractor):
color_space = cmds.getAttr(color_space_attr)
except ValueError:
# node doesn't have color space attribute
if cmds.loadPlugin("mtoa", quiet=True):
if _has_arnold():
img_info = image_info(filepath)
color_space = guess_colorspace(img_info)
else:
@ -560,7 +567,7 @@ class ExtractLook(publish.Extractor):
render_colorspace])
else:
if cmds.loadPlugin("mtoa", quiet=True):
if _has_arnold():
img_info = image_info(filepath)
color_space = guess_colorspace(img_info)
if color_space == "sRGB":

View file

@ -13,6 +13,22 @@ from openpype.pipeline.publish import (
from openpype.hosts.maya.api import lib
def convert_to_int_or_float(string_value):
# Order of types are important here since float can convert string
# representation of integer.
types = [int, float]
for t in types:
try:
result = t(string_value)
except ValueError:
continue
else:
return result
# Neither integer or float.
return string_value
def get_redshift_image_format_labels():
"""Return nice labels for Redshift image formats."""
var = "$g_redshiftImageFormatLabels"
@ -242,10 +258,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING))
# load validation definitions from settings
validation_settings = (
instance.context.data["project_settings"]["maya"]["publish"]["ValidateRenderSettings"].get( # noqa: E501
"{}_render_attributes".format(renderer)) or []
)
settings_lights_flag = instance.context.data["project_settings"].get(
"maya", {}).get(
"RenderSettings", {}).get(
@ -253,17 +265,67 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
instance_lights_flag = instance.data.get("renderSetupIncludeLights")
if settings_lights_flag != instance_lights_flag:
cls.log.warning('Instance flag for "Render Setup Include Lights" is set to {0} and Settings flag is set to {1}'.format(instance_lights_flag, settings_lights_flag)) # noqa
cls.log.warning(
"Instance flag for \"Render Setup Include Lights\" is set to "
"{} and Settings flag is set to {}".format(
instance_lights_flag, settings_lights_flag
)
)
# go through definitions and test if such node.attribute exists.
# if so, compare its value from the one required.
for attr, value in OrderedDict(validation_settings).items():
cls.log.debug("{}: {}".format(attr, value))
if "." not in attr:
cls.log.warning("Skipping invalid attribute defined in "
"validation settings: '{}'".format(attr))
for attribute, data in cls.get_nodes(instance, renderer).items():
# Validate the settings has values.
if not data["values"]:
cls.log.error(
"Settings for {}.{} is missing values.".format(
node, attribute
)
)
continue
for node in data["nodes"]:
try:
render_value = cmds.getAttr(
"{}.{}".format(node, attribute)
)
except RuntimeError:
invalid = True
cls.log.error(
"Cannot get value of {}.{}".format(node, attribute)
)
else:
if render_value not in data["values"]:
invalid = True
cls.log.error(
"Invalid value {} set on {}.{}. Expecting "
"{}".format(
render_value, node, attribute, data["values"]
)
)
return invalid
@classmethod
def get_nodes(cls, instance, renderer):
maya_settings = instance.context.data["project_settings"]["maya"]
validation_settings = (
maya_settings["publish"]["ValidateRenderSettings"].get(
"{}_render_attributes".format(renderer)
) or []
)
result = {}
for attr, values in OrderedDict(validation_settings).items():
cls.log.debug("{}: {}".format(attr, values))
if "." not in attr:
cls.log.warning(
"Skipping invalid attribute defined in validation "
"settings: \"{}\"".format(attr)
)
continue
values = [convert_to_int_or_float(v) for v in values]
node_type, attribute_name = attr.split(".", 1)
# first get node of that type
@ -271,28 +333,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
if not nodes:
cls.log.warning(
"No nodes of type '{}' found.".format(node_type))
"No nodes of type \"{}\" found.".format(node_type)
)
continue
for node in nodes:
try:
render_value = cmds.getAttr(
"{}.{}".format(node, attribute_name))
except RuntimeError:
invalid = True
cls.log.error(
"Cannot get value of {}.{}".format(
node, attribute_name))
else:
if str(value) != str(render_value):
invalid = True
cls.log.error(
("Invalid value {} set on {}.{}. "
"Expecting {}").format(
render_value, node, attribute_name, value)
)
result[attribute_name] = {"nodes": nodes, "values": values}
return invalid
return result
@classmethod
def repair(cls, instance):
@ -305,6 +352,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
"{aov_separator}", instance.data.get("aovSeparator", "_")
)
for attribute, data in cls.get_nodes(instance, renderer).items():
if not data["values"]:
continue
for node in data["nodes"]:
lib.set_attribute(attribute, data["values"][0], node)
with lib.renderlayer(layer_node):
default = lib.RENDER_ATTRS['default']
render_attrs = lib.RENDER_ATTRS.get(renderer, default)

View file

@ -48,6 +48,18 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin):
yeti_loaded = cmds.pluginInfo("pgYetiMaya", query=True, loaded=True)
if not yeti_loaded and not cmds.ls(type="pgYetiMaya"):
# The yeti plug-in is available and loaded so at
# this point we don't really care whether the scene
# has any yeti callback set or not since if the callback
# is there it wouldn't error and if it weren't then
# nothing happens because there are no yeti nodes.
cls.log.info(
"Yeti is loaded but no yeti nodes were found. "
"Callback validation skipped.."
)
return False
renderer = instance.data["renderer"]
if renderer == "redshift":
cls.log.info("Redshift ignores any pre and post render callbacks")

View file

@ -375,7 +375,7 @@ def get_ffmpeg_tool_path(tool="ffmpeg"):
# Look to PATH for the tool
if not tool_executable_path:
from_path = find_executable(tool)
if from_path and _oiio_executable_validation(from_path):
if from_path and _ffmpeg_executable_validation(from_path):
tool_executable_path = from_path
CachedToolPaths.cache_executable_path(tool, tool_executable_path)

View file

@ -6,34 +6,22 @@ import datetime
import requests
from .constants import (
CLOCKIFY_ENDPOINT,
ADMIN_PERMISSION_NAMES
ADMIN_PERMISSION_NAMES,
)
from openpype.lib.local_settings import OpenPypeSecureRegistry
def time_check(obj):
if obj.request_counter < 10:
obj.request_counter += 1
return
wait_time = 1 - (time.time() - obj.request_time)
if wait_time > 0:
time.sleep(wait_time)
obj.request_time = time.time()
obj.request_counter = 0
from openpype.lib import Logger
class ClockifyAPI:
log = Logger.get_logger(__name__)
def __init__(self, api_key=None, master_parent=None):
self.workspace_name = None
self.workspace_id = None
self.master_parent = master_parent
self.api_key = api_key
self.request_counter = 0
self.request_time = time.time()
self._workspace_id = None
self._user_id = None
self._secure_registry = None
@property
@ -44,11 +32,19 @@ class ClockifyAPI:
@property
def headers(self):
return {"X-Api-Key": self.api_key}
return {"x-api-key": self.api_key}
@property
def workspace_id(self):
return self._workspace_id
@property
def user_id(self):
return self._user_id
def verify_api(self):
for key, value in self.headers.items():
if value is None or value.strip() == '':
if value is None or value.strip() == "":
return False
return True
@ -59,65 +55,55 @@ class ClockifyAPI:
if api_key is not None and self.validate_api_key(api_key) is True:
self.api_key = api_key
self.set_workspace()
self.set_user_id()
if self.master_parent:
self.master_parent.signed_in()
return True
return False
def validate_api_key(self, api_key):
test_headers = {'X-Api-Key': api_key}
action_url = 'workspaces/'
time_check(self)
test_headers = {"x-api-key": api_key}
action_url = "user"
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=test_headers
CLOCKIFY_ENDPOINT + action_url, headers=test_headers
)
if response.status_code != 200:
return False
return True
def validate_workspace_perm(self, workspace_id=None):
user_id = self.get_user_id()
def validate_workspace_permissions(self, workspace_id=None, user_id=None):
if user_id is None:
self.log.info("No user_id found during validation")
return False
if workspace_id is None:
workspace_id = self.workspace_id
action_url = "/workspaces/{}/users/{}/permissions".format(
workspace_id, user_id
)
time_check(self)
action_url = f"workspaces/{workspace_id}/users?includeRoles=1"
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
user_permissions = response.json()
for perm in user_permissions:
if perm['name'] in ADMIN_PERMISSION_NAMES:
data = response.json()
for user in data:
if user.get("id") == user_id:
roles_data = user.get("roles")
for entities in roles_data:
if entities.get("role") in ADMIN_PERMISSION_NAMES:
return True
return False
def get_user_id(self):
action_url = 'v1/user/'
time_check(self)
action_url = "user"
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
# this regex is neccessary: UNICODE strings are crashing
# during json serialization
id_regex = '\"{1}id\"{1}\:{1}\"{1}\w+\"{1}'
result = re.findall(id_regex, str(response.content))
if len(result) != 1:
# replace with log and better message?
print('User ID was not found (this is a BUG!!!)')
return None
return json.loads('{'+result[0]+'}')['id']
result = response.json()
user_id = result.get("id", None)
return user_id
def set_workspace(self, name=None):
if name is None:
name = os.environ.get('CLOCKIFY_WORKSPACE', None)
name = os.environ.get("CLOCKIFY_WORKSPACE", None)
self.workspace_name = name
self.workspace_id = None
if self.workspace_name is None:
return
try:
@ -125,7 +111,7 @@ class ClockifyAPI:
except Exception:
result = False
if result is not False:
self.workspace_id = result
self._workspace_id = result
if self.master_parent is not None:
self.master_parent.start_timer_check()
return True
@ -139,6 +125,14 @@ class ClockifyAPI:
return all_workspaces[name]
return False
def set_user_id(self):
try:
user_id = self.get_user_id()
except Exception:
user_id = None
if user_id is not None:
self._user_id = user_id
def get_api_key(self):
return self.secure_registry.get_item("api_key", None)
@ -146,11 +140,9 @@ class ClockifyAPI:
self.secure_registry.set_item("api_key", api_key)
def get_workspaces(self):
action_url = 'workspaces/'
time_check(self)
action_url = "workspaces/"
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
return {
workspace["name"]: workspace["id"] for workspace in response.json()
@ -159,27 +151,22 @@ class ClockifyAPI:
def get_projects(self, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/'.format(workspace_id)
time_check(self)
action_url = f"workspaces/{workspace_id}/projects"
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
return {
project["name"]: project["id"] for project in response.json()
}
if response.status_code != 403:
result = response.json()
return {project["name"]: project["id"] for project in result}
def get_project_by_id(self, project_id, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/{}/'.format(
action_url = "workspaces/{}/projects/{}".format(
workspace_id, project_id
)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
return response.json()
@ -187,32 +174,24 @@ class ClockifyAPI:
def get_tags(self, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/tags/'.format(workspace_id)
time_check(self)
action_url = "workspaces/{}/tags".format(workspace_id)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
return {
tag["name"]: tag["id"] for tag in response.json()
}
return {tag["name"]: tag["id"] for tag in response.json()}
def get_tasks(self, project_id, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/{}/tasks/'.format(
action_url = "workspaces/{}/projects/{}/tasks".format(
workspace_id, project_id
)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
return {
task["name"]: task["id"] for task in response.json()
}
return {task["name"]: task["id"] for task in response.json()}
def get_workspace_id(self, workspace_name):
all_workspaces = self.get_workspaces()
@ -236,48 +215,64 @@ class ClockifyAPI:
return None
return all_tasks[tag_name]
def get_task_id(
self, task_name, project_id, workspace_id=None
):
def get_task_id(self, task_name, project_id, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
all_tasks = self.get_tasks(
project_id, workspace_id
)
all_tasks = self.get_tasks(project_id, workspace_id)
if task_name not in all_tasks:
return None
return all_tasks[task_name]
def get_current_time(self):
return str(datetime.datetime.utcnow().isoformat())+'Z'
return str(datetime.datetime.utcnow().isoformat()) + "Z"
def start_time_entry(
self, description, project_id, task_id=None, tag_ids=[],
workspace_id=None, billable=True
self,
description,
project_id,
task_id=None,
tag_ids=None,
workspace_id=None,
user_id=None,
billable=True,
):
# Workspace
if workspace_id is None:
workspace_id = self.workspace_id
# User ID
if user_id is None:
user_id = self._user_id
# get running timer to check if we need to start it
current_timer = self.get_in_progress()
# Check if is currently run another times and has same values
current = self.get_in_progress(workspace_id)
if current is not None:
# DO not restart the timer, if it is already running for curent task
if current_timer:
current_timer_hierarchy = current_timer.get("description")
current_project_id = current_timer.get("projectId")
current_task_id = current_timer.get("taskId")
if (
current.get("description", None) == description and
current.get("projectId", None) == project_id and
current.get("taskId", None) == task_id
description == current_timer_hierarchy
and project_id == current_project_id
and task_id == current_task_id
):
self.log.info(
"Timer for the current project is already running"
)
self.bool_timer_run = True
return self.bool_timer_run
self.finish_time_entry(workspace_id)
self.finish_time_entry()
# Convert billable to strings
if billable:
billable = 'true'
billable = "true"
else:
billable = 'false'
billable = "false"
# Rest API Action
action_url = 'workspaces/{}/timeEntries/'.format(workspace_id)
action_url = "workspaces/{}/user/{}/time-entries".format(
workspace_id, user_id
)
start = self.get_current_time()
body = {
"start": start,
@ -285,169 +280,135 @@ class ClockifyAPI:
"description": description,
"projectId": project_id,
"taskId": task_id,
"tagIds": tag_ids
"tagIds": tag_ids,
}
time_check(self)
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
)
success = False
if response.status_code < 300:
success = True
return success
return True
return False
def get_in_progress(self, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/timeEntries/inProgress'.format(
workspace_id
)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
def _get_current_timer_values(self, response):
if response is None:
return
try:
output = response.json()
except json.decoder.JSONDecodeError:
output = None
return output
return None
if output and isinstance(output, list):
return output[0]
return None
def finish_time_entry(self, workspace_id=None):
def get_in_progress(self, user_id=None, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
current = self.get_in_progress(workspace_id)
if current is None:
return
if user_id is None:
user_id = self.user_id
current_id = current["id"]
action_url = 'workspaces/{}/timeEntries/{}'.format(
workspace_id, current_id
action_url = (
f"workspaces/{workspace_id}/user/"
f"{user_id}/time-entries?in-progress=1"
)
body = {
"start": current["timeInterval"]["start"],
"billable": current["billable"],
"description": current["description"],
"projectId": current["projectId"],
"taskId": current["taskId"],
"tagIds": current["tagIds"],
"end": self.get_current_time()
}
time_check(self)
response = requests.put(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
response = requests.get(
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
return self._get_current_timer_values(response)
def finish_time_entry(self, workspace_id=None, user_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
if user_id is None:
user_id = self.user_id
current_timer = self.get_in_progress()
if not current_timer:
return
action_url = "workspaces/{}/user/{}/time-entries".format(
workspace_id, user_id
)
body = {"end": self.get_current_time()}
response = requests.patch(
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
)
return response.json()
def get_time_entries(
self, workspace_id=None, quantity=10
):
def get_time_entries(self, workspace_id=None, user_id=None, quantity=10):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/timeEntries/'.format(workspace_id)
time_check(self)
if user_id is None:
user_id = self.user_id
action_url = "workspaces/{}/user/{}/time-entries".format(
workspace_id, user_id
)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
return response.json()[:quantity]
def remove_time_entry(self, tid, workspace_id=None):
def remove_time_entry(self, tid, workspace_id=None, user_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/timeEntries/{}'.format(
workspace_id, tid
action_url = "workspaces/{}/user/{}/time-entries/{}".format(
workspace_id, user_id, tid
)
time_check(self)
response = requests.delete(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
)
return response.json()
def add_project(self, name, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/'.format(workspace_id)
action_url = "workspaces/{}/projects".format(workspace_id)
body = {
"name": name,
"clientId": "",
"isPublic": "false",
"estimate": {
"estimate": 0,
"type": "AUTO"
},
"estimate": {"estimate": 0, "type": "AUTO"},
"color": "#f44336",
"billable": "true"
"billable": "true",
}
time_check(self)
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
)
return response.json()
def add_workspace(self, name):
action_url = 'workspaces/'
action_url = "workspaces/"
body = {"name": name}
time_check(self)
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
)
return response.json()
def add_task(
self, name, project_id, workspace_id=None
):
def add_task(self, name, project_id, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/{}/tasks/'.format(
action_url = "workspaces/{}/projects/{}/tasks".format(
workspace_id, project_id
)
body = {
"name": name,
"projectId": project_id
}
time_check(self)
body = {"name": name, "projectId": project_id}
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
)
return response.json()
def add_tag(self, name, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/tags'.format(workspace_id)
body = {
"name": name
}
time_check(self)
action_url = "workspaces/{}/tags".format(workspace_id)
body = {"name": name}
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
)
return response.json()
def delete_project(
self, project_id, workspace_id=None
):
def delete_project(self, project_id, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = '/workspaces/{}/projects/{}'.format(
action_url = "/workspaces/{}/projects/{}".format(
workspace_id, project_id
)
time_check(self)
response = requests.delete(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
@ -455,12 +416,12 @@ class ClockifyAPI:
return response.json()
def convert_input(
self, entity_id, entity_name, mode='Workspace', project_id=None
self, entity_id, entity_name, mode="Workspace", project_id=None
):
if entity_id is None:
error = False
error_msg = 'Missing information "{}"'
if mode.lower() == 'workspace':
if mode.lower() == "workspace":
if entity_id is None and entity_name is None:
if self.workspace_id is not None:
entity_id = self.workspace_id
@ -471,14 +432,14 @@ class ClockifyAPI:
else:
if entity_id is None and entity_name is None:
error = True
elif mode.lower() == 'project':
elif mode.lower() == "project":
entity_id = self.get_project_id(entity_name)
elif mode.lower() == 'task':
elif mode.lower() == "task":
entity_id = self.get_task_id(
task_name=entity_name, project_id=project_id
)
else:
raise TypeError('Unknown type')
raise TypeError("Unknown type")
# Raise error
if error:
raise ValueError(error_msg.format(mode))

View file

@ -2,24 +2,13 @@ import os
import threading
import time
from openpype.modules import (
OpenPypeModule,
ITrayModule,
IPluginPaths
)
from openpype.modules import OpenPypeModule, ITrayModule, IPluginPaths
from openpype.client import get_asset_by_name
from .clockify_api import ClockifyAPI
from .constants import (
CLOCKIFY_FTRACK_USER_PATH,
CLOCKIFY_FTRACK_SERVER_PATH
)
from .constants import CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH
class ClockifyModule(
OpenPypeModule,
ITrayModule,
IPluginPaths
):
class ClockifyModule(OpenPypeModule, ITrayModule, IPluginPaths):
name = "clockify"
def initialize(self, modules_settings):
@ -33,18 +22,23 @@ class ClockifyModule(
self.timer_manager = None
self.MessageWidgetClass = None
self.message_widget = None
self.clockapi = ClockifyAPI(master_parent=self)
self._clockify_api = None
# TimersManager attributes
# - set `timers_manager_connector` only in `tray_init`
self.timers_manager_connector = None
self._timers_manager_module = None
@property
def clockify_api(self):
if self._clockify_api is None:
from .clockify_api import ClockifyAPI
self._clockify_api = ClockifyAPI(master_parent=self)
return self._clockify_api
def get_global_environments(self):
return {
"CLOCKIFY_WORKSPACE": self.workspace_name
}
return {"CLOCKIFY_WORKSPACE": self.workspace_name}
def tray_init(self):
from .widgets import ClockifySettings, MessageWidget
@ -52,7 +46,7 @@ class ClockifyModule(
self.MessageWidgetClass = MessageWidget
self.message_widget = None
self.widget_settings = ClockifySettings(self.clockapi)
self.widget_settings = ClockifySettings(self.clockify_api)
self.widget_settings_required = None
self.thread_timer_check = None
@ -61,7 +55,7 @@ class ClockifyModule(
self.bool_api_key_set = False
self.bool_workspace_set = False
self.bool_timer_run = False
self.bool_api_key_set = self.clockapi.set_api()
self.bool_api_key_set = self.clockify_api.set_api()
# Define itself as TimersManager connector
self.timers_manager_connector = self
@ -71,12 +65,11 @@ class ClockifyModule(
self.show_settings()
return
self.bool_workspace_set = self.clockapi.workspace_id is not None
self.bool_workspace_set = self.clockify_api.workspace_id is not None
if self.bool_workspace_set is False:
return
self.start_timer_check()
self.set_menu_visibility()
def tray_exit(self, *_a, **_kw):
@ -85,23 +78,19 @@ class ClockifyModule(
def get_plugin_paths(self):
"""Implementaton of IPluginPaths to get plugin paths."""
actions_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"launcher_actions"
os.path.dirname(os.path.abspath(__file__)), "launcher_actions"
)
return {
"actions": [actions_path]
}
return {"actions": [actions_path]}
def get_ftrack_event_handler_paths(self):
"""Function for Ftrack module to add ftrack event handler paths."""
return {
"user": [CLOCKIFY_FTRACK_USER_PATH],
"server": [CLOCKIFY_FTRACK_SERVER_PATH]
"server": [CLOCKIFY_FTRACK_SERVER_PATH],
}
def clockify_timer_stopped(self):
self.bool_timer_run = False
# Call `ITimersManager` method
self.timer_stopped()
def start_timer_check(self):
@ -122,45 +111,44 @@ class ClockifyModule(
def check_running(self):
while self.bool_thread_check_running is True:
bool_timer_run = False
if self.clockapi.get_in_progress() is not None:
if self.clockify_api.get_in_progress() is not None:
bool_timer_run = True
if self.bool_timer_run != bool_timer_run:
if self.bool_timer_run is True:
self.clockify_timer_stopped()
elif self.bool_timer_run is False:
actual_timer = self.clockapi.get_in_progress()
if not actual_timer:
current_timer = self.clockify_api.get_in_progress()
if current_timer is None:
continue
current_proj_id = current_timer.get("projectId")
if not current_proj_id:
continue
actual_proj_id = actual_timer["projectId"]
if not actual_proj_id:
continue
project = self.clockapi.get_project_by_id(actual_proj_id)
project = self.clockify_api.get_project_by_id(
current_proj_id
)
if project and project.get("code") == 501:
continue
project_name = project["name"]
project_name = project.get("name")
actual_timer_hierarchy = actual_timer["description"]
hierarchy_items = actual_timer_hierarchy.split("/")
current_timer_hierarchy = current_timer.get("description")
if not current_timer_hierarchy:
continue
hierarchy_items = current_timer_hierarchy.split("/")
# Each pype timer must have at least 2 items!
if len(hierarchy_items) < 2:
continue
task_name = hierarchy_items[-1]
hierarchy = hierarchy_items[:-1]
task_type = None
if len(actual_timer.get("tags", [])) > 0:
task_type = actual_timer["tags"][0].get("name")
data = {
"task_name": task_name,
"hierarchy": hierarchy,
"project_name": project_name,
"task_type": task_type
}
# Call `ITimersManager` method
self.timer_started(data)
self.bool_timer_run = bool_timer_run
@ -184,6 +172,7 @@ class ClockifyModule(
def tray_menu(self, parent_menu):
# Menu for Tray App
from qtpy import QtWidgets
menu = QtWidgets.QMenu("Clockify", parent_menu)
menu.setProperty("submenu", "on")
@ -204,7 +193,9 @@ class ClockifyModule(
parent_menu.addMenu(menu)
def show_settings(self):
self.widget_settings.input_api_key.setText(self.clockapi.get_api_key())
self.widget_settings.input_api_key.setText(
self.clockify_api.get_api_key()
)
self.widget_settings.show()
def set_menu_visibility(self):
@ -218,72 +209,82 @@ class ClockifyModule(
def timer_started(self, data):
"""Tell TimersManager that timer started."""
if self._timers_manager_module is not None:
self._timers_manager_module.timer_started(self._module.id, data)
self._timers_manager_module.timer_started(self.id, data)
def timer_stopped(self):
"""Tell TimersManager that timer stopped."""
if self._timers_manager_module is not None:
self._timers_manager_module.timer_stopped(self._module.id)
self._timers_manager_module.timer_stopped(self.id)
def stop_timer(self):
"""Called from TimersManager to stop timer."""
self.clockapi.finish_time_entry()
self.clockify_api.finish_time_entry()
def start_timer(self, input_data):
"""Called from TimersManager to start timer."""
# If not api key is not entered then skip
if not self.clockapi.get_api_key():
return
actual_timer = self.clockapi.get_in_progress()
actual_timer_hierarchy = None
actual_project_id = None
if actual_timer is not None:
actual_timer_hierarchy = actual_timer.get("description")
actual_project_id = actual_timer.get("projectId")
# Concatenate hierarchy and task to get description
desc_items = [val for val in input_data.get("hierarchy", [])]
desc_items.append(input_data["task_name"])
description = "/".join(desc_items)
# Check project existence
project_name = input_data["project_name"]
project_id = self.clockapi.get_project_id(project_name)
def _verify_project_exists(self, project_name):
project_id = self.clockify_api.get_project_id(project_name)
if not project_id:
self.log.warning((
"Project \"{}\" was not found in Clockify. Timer won't start."
).format(project_name))
self.log.warning(
'Project "{}" was not found in Clockify. Timer won\'t start.'
).format(project_name)
if not self.MessageWidgetClass:
return
msg = (
"Project <b>\"{}\"</b> is not"
" in Clockify Workspace <b>\"{}\"</b>."
'Project <b>"{}"</b> is not'
' in Clockify Workspace <b>"{}"</b>.'
"<br><br>Please inform your Project Manager."
).format(project_name, str(self.clockapi.workspace_name))
).format(project_name, str(self.clockify_api.workspace_name))
self.message_widget = self.MessageWidgetClass(
msg, "Clockify - Info Message"
)
self.message_widget.closed.connect(self.on_message_widget_close)
self.message_widget.show()
return False
return project_id
def start_timer(self, input_data):
"""Called from TimersManager to start timer."""
# If not api key is not entered then skip
if not self.clockify_api.get_api_key():
return
if (
actual_timer is not None and
description == actual_timer_hierarchy and
project_id == actual_project_id
):
task_name = input_data.get("task_name")
# Concatenate hierarchy and task to get description
description_items = list(input_data.get("hierarchy", []))
description_items.append(task_name)
description = "/".join(description_items)
# Check project existence
project_name = input_data.get("project_name")
project_id = self._verify_project_exists(project_name)
if not project_id:
return
# Setup timer tags
tag_ids = []
task_tag_id = self.clockapi.get_tag_id(input_data["task_type"])
tag_name = input_data.get("task_type")
if not tag_name:
# no task_type found in the input data
# if the timer is restarted by idle time (bug?)
asset_name = input_data["hierarchy"][-1]
asset_doc = get_asset_by_name(project_name, asset_name)
task_info = asset_doc["data"]["tasks"][task_name]
tag_name = task_info.get("type", "")
if not tag_name:
self.log.info("No tag information found for the timer")
task_tag_id = self.clockify_api.get_tag_id(tag_name)
if task_tag_id is not None:
tag_ids.append(task_tag_id)
self.clockapi.start_time_entry(
description, project_id, tag_ids=tag_ids
# Start timer
self.clockify_api.start_time_entry(
description,
project_id,
tag_ids=tag_ids,
workspace_id=self.clockify_api.workspace_id,
user_id=self.clockify_api.user_id,
)

View file

@ -9,4 +9,4 @@ CLOCKIFY_FTRACK_USER_PATH = os.path.join(
)
ADMIN_PERMISSION_NAMES = ["WORKSPACE_OWN", "WORKSPACE_ADMIN"]
CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/"
CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/v1/"

View file

@ -4,7 +4,7 @@ from openpype_modules.ftrack.lib import ServerAction
from openpype_modules.clockify.clockify_api import ClockifyAPI
class SyncClocifyServer(ServerAction):
class SyncClockifyServer(ServerAction):
'''Synchronise project names and task types.'''
identifier = "clockify.sync.server"
@ -14,12 +14,12 @@ class SyncClocifyServer(ServerAction):
role_list = ["Pypeclub", "Administrator", "project Manager"]
def __init__(self, *args, **kwargs):
super(SyncClocifyServer, self).__init__(*args, **kwargs)
super(SyncClockifyServer, self).__init__(*args, **kwargs)
workspace_name = os.environ.get("CLOCKIFY_WORKSPACE")
api_key = os.environ.get("CLOCKIFY_API_KEY")
self.clockapi = ClockifyAPI(api_key)
self.clockapi.set_workspace(workspace_name)
self.clockify_api = ClockifyAPI(api_key)
self.clockify_api.set_workspace(workspace_name)
if api_key is None:
modified_key = "None"
else:
@ -48,13 +48,16 @@ class SyncClocifyServer(ServerAction):
return True
def launch(self, session, entities, event):
if self.clockapi.workspace_id is None:
self.clockify_api.set_api()
if self.clockify_api.workspace_id is None:
return {
"success": False,
"message": "Clockify Workspace or API key are not set!"
}
if self.clockapi.validate_workspace_perm() is False:
if not self.clockify_api.validate_workspace_permissions(
self.clockify_api.workspace_id, self.clockify_api.user_id
):
return {
"success": False,
"message": "Missing permissions for this action!"
@ -88,9 +91,9 @@ class SyncClocifyServer(ServerAction):
task_type["name"] for task_type in task_types
]
try:
clockify_projects = self.clockapi.get_projects()
clockify_projects = self.clockify_api.get_projects()
if project_name not in clockify_projects:
response = self.clockapi.add_project(project_name)
response = self.clockify_api.add_project(project_name)
if "id" not in response:
self.log.warning(
"Project \"{}\" can't be created. Response: {}".format(
@ -105,7 +108,7 @@ class SyncClocifyServer(ServerAction):
).format(project_name)
}
clockify_workspace_tags = self.clockapi.get_tags()
clockify_workspace_tags = self.clockify_api.get_tags()
for task_type_name in task_type_names:
if task_type_name in clockify_workspace_tags:
self.log.debug(
@ -113,7 +116,7 @@ class SyncClocifyServer(ServerAction):
)
continue
response = self.clockapi.add_tag(task_type_name)
response = self.clockify_api.add_tag(task_type_name)
if "id" not in response:
self.log.warning(
"Task \"{}\" can't be created. Response: {}".format(
@ -138,4 +141,4 @@ class SyncClocifyServer(ServerAction):
def register(session, **kw):
SyncClocifyServer(session).register()
SyncClockifyServer(session).register()

View file

@ -3,7 +3,7 @@ from openpype_modules.ftrack.lib import BaseAction, statics_icon
from openpype_modules.clockify.clockify_api import ClockifyAPI
class SyncClocifyLocal(BaseAction):
class SyncClockifyLocal(BaseAction):
'''Synchronise project names and task types.'''
#: Action identifier.
@ -18,9 +18,9 @@ class SyncClocifyLocal(BaseAction):
icon = statics_icon("app_icons", "clockify-white.png")
def __init__(self, *args, **kwargs):
super(SyncClocifyLocal, self).__init__(*args, **kwargs)
super(SyncClockifyLocal, self).__init__(*args, **kwargs)
#: CLockifyApi
self.clockapi = ClockifyAPI()
self.clockify_api = ClockifyAPI()
def discover(self, session, entities, event):
if (
@ -31,14 +31,18 @@ class SyncClocifyLocal(BaseAction):
return False
def launch(self, session, entities, event):
self.clockapi.set_api()
if self.clockapi.workspace_id is None:
self.clockify_api.set_api()
if self.clockify_api.workspace_id is None:
return {
"success": False,
"message": "Clockify Workspace or API key are not set!"
}
if self.clockapi.validate_workspace_perm() is False:
if (
self.clockify_api.validate_workspace_permissions(
self.clockify_api.workspace_id, self.clockify_api.user_id)
is False
):
return {
"success": False,
"message": "Missing permissions for this action!"
@ -74,9 +78,9 @@ class SyncClocifyLocal(BaseAction):
task_type["name"] for task_type in task_types
]
try:
clockify_projects = self.clockapi.get_projects()
clockify_projects = self.clockify_api.get_projects()
if project_name not in clockify_projects:
response = self.clockapi.add_project(project_name)
response = self.clockify_api.add_project(project_name)
if "id" not in response:
self.log.warning(
"Project \"{}\" can't be created. Response: {}".format(
@ -91,7 +95,7 @@ class SyncClocifyLocal(BaseAction):
).format(project_name)
}
clockify_workspace_tags = self.clockapi.get_tags()
clockify_workspace_tags = self.clockify_api.get_tags()
for task_type_name in task_type_names:
if task_type_name in clockify_workspace_tags:
self.log.debug(
@ -99,7 +103,7 @@ class SyncClocifyLocal(BaseAction):
)
continue
response = self.clockapi.add_tag(task_type_name)
response = self.clockify_api.add_tag(task_type_name)
if "id" not in response:
self.log.warning(
"Task \"{}\" can't be created. Response: {}".format(
@ -121,4 +125,4 @@ class SyncClocifyLocal(BaseAction):
def register(session, **kw):
SyncClocifyLocal(session).register()
SyncClockifyLocal(session).register()

View file

@ -6,9 +6,9 @@ from openpype_modules.clockify.clockify_api import ClockifyAPI
class ClockifyStart(LauncherAction):
name = "clockify_start_timer"
label = "Clockify - Start Timer"
icon = "clockify_icon"
icon = "app_icons/clockify.png"
order = 500
clockapi = ClockifyAPI()
clockify_api = ClockifyAPI()
def is_compatible(self, session):
"""Return whether the action is compatible with the session"""
@ -17,23 +17,39 @@ class ClockifyStart(LauncherAction):
return False
def process(self, session, **kwargs):
self.clockify_api.set_api()
user_id = self.clockify_api.user_id
workspace_id = self.clockify_api.workspace_id
project_name = session["AVALON_PROJECT"]
asset_name = session["AVALON_ASSET"]
task_name = session["AVALON_TASK"]
description = asset_name
asset_doc = get_asset_by_name(
project_name, asset_name, fields=["data.parents"]
)
if asset_doc is not None:
desc_items = asset_doc.get("data", {}).get("parents", [])
desc_items.append(asset_name)
desc_items.append(task_name)
description = "/".join(desc_items)
project_id = self.clockapi.get_project_id(project_name)
tag_ids = []
tag_ids.append(self.clockapi.get_tag_id(task_name))
self.clockapi.start_time_entry(
description, project_id, tag_ids=tag_ids
# fetch asset docs
asset_doc = get_asset_by_name(project_name, asset_name)
# get task type to fill the timer tag
task_info = asset_doc["data"]["tasks"][task_name]
task_type = task_info["type"]
# check if the task has hierarchy and fill the
parents_data = asset_doc["data"]
if parents_data is not None:
description_items = parents_data.get("parents", [])
description_items.append(asset_name)
description_items.append(task_name)
description = "/".join(description_items)
project_id = self.clockify_api.get_project_id(
project_name, workspace_id
)
tag_ids = []
tag_name = task_type
tag_ids.append(self.clockify_api.get_tag_id(tag_name, workspace_id))
self.clockify_api.start_time_entry(
description,
project_id,
tag_ids=tag_ids,
workspace_id=workspace_id,
user_id=user_id,
)

View file

@ -3,20 +3,39 @@ from openpype_modules.clockify.clockify_api import ClockifyAPI
from openpype.pipeline import LauncherAction
class ClockifySync(LauncherAction):
class ClockifyPermissionsCheckFailed(Exception):
"""Timer start failed due to user permissions check.
Message should be self explanatory as traceback won't be shown.
"""
pass
class ClockifySync(LauncherAction):
name = "sync_to_clockify"
label = "Sync to Clockify"
icon = "clockify_white_icon"
icon = "app_icons/clockify-white.png"
order = 500
clockapi = ClockifyAPI()
have_permissions = clockapi.validate_workspace_perm()
clockify_api = ClockifyAPI()
def is_compatible(self, session):
"""Return whether the action is compatible with the session"""
return self.have_permissions
"""Check if there's some projects to sync"""
try:
next(get_projects())
return True
except StopIteration:
return False
def process(self, session, **kwargs):
self.clockify_api.set_api()
workspace_id = self.clockify_api.workspace_id
user_id = self.clockify_api.user_id
if not self.clockify_api.validate_workspace_permissions(
workspace_id, user_id
):
raise ClockifyPermissionsCheckFailed(
"Current CLockify user is missing permissions for this action!"
)
project_name = session.get("AVALON_PROJECT") or ""
projects_to_sync = []
@ -30,24 +49,28 @@ class ClockifySync(LauncherAction):
task_types = project["config"]["tasks"].keys()
projects_info[project["name"]] = task_types
clockify_projects = self.clockapi.get_projects()
clockify_projects = self.clockify_api.get_projects(workspace_id)
for project_name, task_types in projects_info.items():
if project_name in clockify_projects:
continue
response = self.clockapi.add_project(project_name)
response = self.clockify_api.add_project(
project_name, workspace_id
)
if "id" not in response:
self.log.error("Project {} can't be created".format(
project_name
))
self.log.error(
"Project {} can't be created".format(project_name)
)
continue
clockify_workspace_tags = self.clockapi.get_tags()
clockify_workspace_tags = self.clockify_api.get_tags(workspace_id)
for task_type in task_types:
if task_type not in clockify_workspace_tags:
response = self.clockapi.add_tag(task_type)
response = self.clockify_api.add_tag(
task_type, workspace_id
)
if "id" not in response:
self.log.error('Task {} can\'t be created'.format(
task_type
))
self.log.error(
"Task {} can't be created".format(task_type)
)
continue

View file

@ -77,15 +77,15 @@ class MessageWidget(QtWidgets.QWidget):
class ClockifySettings(QtWidgets.QWidget):
SIZE_W = 300
SIZE_W = 500
SIZE_H = 130
loginSignal = QtCore.Signal(object, object, object)
def __init__(self, clockapi, optional=True):
def __init__(self, clockify_api, optional=True):
super(ClockifySettings, self).__init__()
self.clockapi = clockapi
self.clockify_api = clockify_api
self.optional = optional
self.validated = False
@ -162,17 +162,17 @@ class ClockifySettings(QtWidgets.QWidget):
def click_ok(self):
api_key = self.input_api_key.text().strip()
if self.optional is True and api_key == '':
self.clockapi.save_api_key(None)
self.clockapi.set_api(api_key)
self.clockify_api.save_api_key(None)
self.clockify_api.set_api(api_key)
self.validated = False
self._close_widget()
return
validation = self.clockapi.validate_api_key(api_key)
validation = self.clockify_api.validate_api_key(api_key)
if validation:
self.clockapi.save_api_key(api_key)
self.clockapi.set_api(api_key)
self.clockify_api.save_api_key(api_key)
self.clockify_api.set_api(api_key)
self.validated = True
self._close_widget()
else:

View file

@ -422,6 +422,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
assembly_job_info.Priority = instance.data.get(
"tile_priority", self.tile_priority
)
assembly_job_info.TileJob = False
pool = instance.context.data["project_settings"]["deadline"]
pool = pool["publish"]["ProcessSubmittedJobOnFarm"]["deadline_pool"]
@ -450,15 +451,14 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
frame_assembly_job_info.ExtraInfo[0] = file_hash
frame_assembly_job_info.ExtraInfo[1] = file
frame_assembly_job_info.JobDependencies = tile_job_id
frame_assembly_job_info.Frames = frame
# write assembly job config files
now = datetime.now()
config_file = os.path.join(
output_dir,
"{}_config_{}.txt".format(
os.path.splitext(file)[0],
now.strftime("%Y_%m_%d_%H_%M_%S")
datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
)
)
try:
@ -469,6 +469,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
self.log.warning("Path is unreachable: "
"`{}`".format(output_dir))
assembly_plugin_info["ConfigFile"] = config_file
with open(config_file, "w") as cf:
print("TileCount={}".format(tiles_count), file=cf)
print("ImageFileName={}".format(file), file=cf)
@ -477,25 +479,30 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
print("ImageHeight={}".format(
instance.data.get("resolutionHeight")), file=cf)
with open(config_file, "a") as cf:
# Need to reverse the order of the y tiles, because image
# coordinates are calculated from bottom left corner.
tiles = _format_tiles(
file, 0,
instance.data.get("tilesX"),
instance.data.get("tilesY"),
instance.data.get("resolutionWidth"),
instance.data.get("resolutionHeight"),
payload_plugin_info["OutputFilePrefix"]
payload_plugin_info["OutputFilePrefix"],
reversed_y=True
)[1]
for k, v in sorted(tiles.items()):
print("{}={}".format(k, v), file=cf)
payload = self.assemble_payload(
job_info=frame_assembly_job_info,
plugin_info=assembly_plugin_info.copy(),
# todo: aux file transfers don't work with deadline webservice
# add config file as job auxFile
# aux_files=[config_file]
assembly_payloads.append(
self.assemble_payload(
job_info=frame_assembly_job_info,
plugin_info=assembly_plugin_info.copy(),
# This would fail if the client machine and webserice are
# using different storage paths.
aux_files=[config_file]
)
)
assembly_payloads.append(payload)
# Submit assembly jobs
assembly_job_ids = []
@ -505,6 +512,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
"submitting assembly job {} of {}".format(i + 1,
num_assemblies)
)
self.log.info(payload)
assembly_job_id = self.submit(payload)
assembly_job_ids.append(assembly_job_id)
@ -764,8 +772,15 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
def _format_tiles(
filename, index, tiles_x, tiles_y,
width, height, prefix):
filename,
index,
tiles_x,
tiles_y,
width,
height,
prefix,
reversed_y=False
):
"""Generate tile entries for Deadline tile job.
Returns two dictionaries - one that can be directly used in Deadline
@ -802,6 +817,7 @@ def _format_tiles(
width (int): Width resolution of final image.
height (int): Height resolution of final image.
prefix (str): Image prefix.
reversed_y (bool): Reverses the order of the y tiles.
Returns:
(dict, dict): Tuple of two dictionaries - first can be used to
@ -824,12 +840,16 @@ def _format_tiles(
cfg["TilesCropped"] = "False"
tile = 0
range_y = range(1, tiles_y + 1)
reversed_y_range = list(reversed(range_y))
for tile_x in range(1, tiles_x + 1):
for tile_y in reversed(range(1, tiles_y + 1)):
for i, tile_y in enumerate(range_y):
tile_y_index = tile_y
if reversed_y:
tile_y_index = reversed_y_range[i]
tile_prefix = "_tile_{}x{}_{}x{}_".format(
tile_x, tile_y,
tiles_x,
tiles_y
tile_x, tile_y_index, tiles_x, tiles_y
)
new_filename = "{}/{}{}".format(
@ -844,11 +864,14 @@ def _format_tiles(
right = (tile_x * w_space) - 1
# Job info
out["JobInfo"]["OutputFilename{}Tile{}".format(index, tile)] = new_filename # noqa: E501
key = "OutputFilename{}".format(index)
out["JobInfo"][key] = new_filename
# Plugin Info
out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \
"/{}".format(tile_prefix).join(prefix.rsplit("/", 1))
key = "RegionPrefix{}".format(str(tile))
out["PluginInfo"][key] = "/{}".format(
tile_prefix
).join(prefix.rsplit("/", 1))
out["PluginInfo"]["RegionTop{}".format(tile)] = top
out["PluginInfo"]["RegionBottom{}".format(tile)] = bottom
out["PluginInfo"]["RegionLeft{}".format(tile)] = left

View file

@ -68,8 +68,15 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
# files to be in the folder that we might not want to use.
missing = expected_files - existing_files
if missing:
raise RuntimeError("Missing expected files: {}".format(
sorted(missing)))
raise RuntimeError(
"Missing expected files: {}\n"
"Expected files: {}\n"
"Existing files: {}".format(
sorted(missing),
sorted(expected_files),
sorted(existing_files)
)
)
def _get_frame_list(self, original_job_id):
"""Returns list of frame ranges from all render job.

View file

@ -16,6 +16,10 @@ from Deadline.Scripting import (
FileUtils, RepositoryUtils, SystemUtils)
version_major = 1
version_minor = 0
version_patch = 0
version_string = "{}.{}.{}".format(version_major, version_minor, version_patch)
STRING_TAGS = {
"format"
}
@ -264,6 +268,7 @@ class OpenPypeTileAssembler(DeadlinePlugin):
def initialize_process(self):
"""Initialization."""
self.LogInfo("Plugin version: {}".format(version_string))
self.SingleFramesOnly = True
self.StdoutHandling = True
self.renderer = self.GetPluginInfoEntryWithDefault(
@ -320,12 +325,7 @@ class OpenPypeTileAssembler(DeadlinePlugin):
output_file = data["ImageFileName"]
output_file = RepositoryUtils.CheckPathMapping(output_file)
output_file = self.process_path(output_file)
"""
_, ext = os.path.splitext(output_file)
if "exr" not in ext:
self.FailRender(
"[{}] Only EXR format is supported for now.".format(ext))
"""
tile_info = []
for tile in range(int(data["TileCount"])):
tile_info.append({
@ -336,11 +336,6 @@ class OpenPypeTileAssembler(DeadlinePlugin):
"width": int(data["Tile{}Width".format(tile)])
})
# FFMpeg doesn't support tile coordinates at the moment.
# arguments = self.tile_completer_ffmpeg_args(
# int(data["ImageWidth"]), int(data["ImageHeight"]),
# tile_info, output_file)
arguments = self.tile_oiio_args(
int(data["ImageWidth"]), int(data["ImageHeight"]),
tile_info, output_file)
@ -362,20 +357,20 @@ class OpenPypeTileAssembler(DeadlinePlugin):
def pre_render_tasks(self):
"""Load config file and do remapping."""
self.LogInfo("OpenPype Tile Assembler starting...")
scene_filename = self.GetDataFilename()
config_file = self.GetPluginInfoEntry("ConfigFile")
temp_scene_directory = self.CreateTempDirectory(
"thread" + str(self.GetThreadNumber()))
temp_scene_filename = Path.GetFileName(scene_filename)
temp_scene_filename = Path.GetFileName(config_file)
self.config_file = Path.Combine(
temp_scene_directory, temp_scene_filename)
if SystemUtils.IsRunningOnWindows():
RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator(
scene_filename, self.config_file, "/", "\\")
config_file, self.config_file, "/", "\\")
else:
RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator(
scene_filename, self.config_file, "\\", "/")
config_file, self.config_file, "\\", "/")
os.chmod(self.config_file, os.stat(self.config_file).st_mode)
def post_render_tasks(self):
@ -459,75 +454,3 @@ class OpenPypeTileAssembler(DeadlinePlugin):
args.append(output_path)
return args
def tile_completer_ffmpeg_args(
self, output_width, output_height, tiles_info, output_path):
"""Generate ffmpeg arguments for tile assembly.
Expected inputs are tiled images.
Args:
output_width (int): Width of output image.
output_height (int): Height of output image.
tiles_info (list): List of tile items, each item must be
dictionary with `filepath`, `pos_x` and `pos_y` keys
representing path to file and x, y coordinates on output
image where top-left point of tile item should start.
output_path (str): Path to file where should be output stored.
Returns:
(list): ffmpeg arguments.
"""
previous_name = "base"
ffmpeg_args = []
filter_complex_strs = []
filter_complex_strs.append("nullsrc=size={}x{}[{}]".format(
output_width, output_height, previous_name
))
new_tiles_info = {}
for idx, tile_info in enumerate(tiles_info):
# Add input and store input index
filepath = tile_info["filepath"]
ffmpeg_args.append("-i \"{}\"".format(filepath.replace("\\", "/")))
# Prepare initial filter complex arguments
index_name = "input{}".format(idx)
filter_complex_strs.append(
"[{}]setpts=PTS-STARTPTS[{}]".format(idx, index_name)
)
tile_info["index"] = idx
new_tiles_info[index_name] = tile_info
# Set frames to 1
ffmpeg_args.append("-frames 1")
# Concatenation filter complex arguments
global_index = 1
total_index = len(new_tiles_info)
for index_name, tile_info in new_tiles_info.items():
item_str = (
"[{previous_name}][{index_name}]overlay={pos_x}:{pos_y}"
).format(
previous_name=previous_name,
index_name=index_name,
pos_x=tile_info["pos_x"],
pos_y=tile_info["pos_y"]
)
new_previous = "tmp{}".format(global_index)
if global_index != total_index:
item_str += "[{}]".format(new_previous)
filter_complex_strs.append(item_str)
previous_name = new_previous
global_index += 1
joined_parts = ";".join(filter_complex_strs)
filter_complex_str = "-filter_complex \"{}\"".format(joined_parts)
ffmpeg_args.append(filter_complex_str)
ffmpeg_args.append("-y")
ffmpeg_args.append("\"{}\"".format(output_path))
return ffmpeg_args

View file

@ -7,23 +7,22 @@ Provides:
"""
import pyblish.api
from openpype.pipeline import legacy_io
from openpype.lib import filter_profiles
class CollectFtrackFamily(pyblish.api.InstancePlugin):
"""Adds explicitly 'ftrack' to families to upload instance to FTrack.
Uses selection by combination of hosts/families/tasks names via
profiles resolution.
Triggered everywhere, checks instance against configured.
Checks advanced filtering which works on 'families' not on main
'family', as some variants dynamically resolves addition of ftrack
based on 'families' (editorial drives it by presence of 'review')
"""
Adds explicitly 'ftrack' to families to upload instance to FTrack.
Uses selection by combination of hosts/families/tasks names via
profiles resolution.
Triggered everywhere, checks instance against configured.
Checks advanced filtering which works on 'families' not on main
'family', as some variants dynamically resolves addition of ftrack
based on 'families' (editorial drives it by presence of 'review')
"""
label = "Collect Ftrack Family"
order = pyblish.api.CollectorOrder + 0.4990
@ -34,68 +33,64 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin):
self.log.warning("No profiles present for adding Ftrack family")
return
add_ftrack_family = False
task_name = instance.data.get("task",
legacy_io.Session["AVALON_TASK"])
host_name = legacy_io.Session["AVALON_APP"]
host_name = instance.context.data["hostName"]
family = instance.data["family"]
task_name = instance.data.get("task")
filtering_criteria = {
"hosts": host_name,
"families": family,
"tasks": task_name
}
profile = filter_profiles(self.profiles, filtering_criteria,
logger=self.log)
profile = filter_profiles(
self.profiles,
filtering_criteria,
logger=self.log
)
add_ftrack_family = False
families = instance.data.setdefault("families", [])
if profile:
families = instance.data.get("families")
add_ftrack_family = profile["add_ftrack_family"]
additional_filters = profile.get("advanced_filtering")
if additional_filters:
self.log.info("'{}' families used for additional filtering".
format(families))
families_set = set(families) | {family}
self.log.info(
"'{}' families used for additional filtering".format(
families_set))
add_ftrack_family = self._get_add_ftrack_f_from_addit_filters(
additional_filters,
families,
families_set,
add_ftrack_family
)
if add_ftrack_family:
self.log.debug("Adding ftrack family for '{}'".
format(instance.data.get("family")))
result_str = "Not adding"
if add_ftrack_family:
result_str = "Adding"
if "ftrack" not in families:
families.append("ftrack")
if families:
if "ftrack" not in families:
instance.data["families"].append("ftrack")
else:
instance.data["families"] = ["ftrack"]
result_str = "Adding"
if not add_ftrack_family:
result_str = "Not adding"
self.log.info("{} 'ftrack' family for instance with '{}'".format(
result_str, family
))
def _get_add_ftrack_f_from_addit_filters(self,
additional_filters,
families,
add_ftrack_family):
"""
Compares additional filters - working on instance's families.
def _get_add_ftrack_f_from_addit_filters(
self, additional_filters, families, add_ftrack_family
):
"""Compares additional filters - working on instance's families.
Triggered for more detailed filtering when main family matches,
but content of 'families' actually matter.
(For example 'review' in 'families' should result in adding to
Ftrack)
Triggered for more detailed filtering when main family matches,
but content of 'families' actually matter.
(For example 'review' in 'families' should result in adding to
Ftrack)
Args:
additional_filters (dict) - from Setting
families (list) - subfamilies
add_ftrack_family (bool) - add ftrack to families if True
Args:
additional_filters (dict) - from Setting
families (set[str]) - subfamilies
add_ftrack_family (bool) - add ftrack to families if True
"""
override_filter = None
override_filter_value = -1
for additional_filter in additional_filters:

View file

@ -141,7 +141,9 @@ class TimersManager(
signal_handler = SignalHandler(self)
idle_manager = IdleManager()
widget_user_idle = WidgetUserIdle(self)
widget_user_idle.set_countdown_start(self.time_show_message)
widget_user_idle.set_countdown_start(
self.time_stop_timer - self.time_show_message
)
idle_manager.signal_reset_timer.connect(
widget_user_idle.reset_countdown

View file

@ -29,7 +29,7 @@ from openpype.pipeline.publish import OpenPypePyblishPluginMixin
class CollectInstanceCommentDef(
pyblish.api.ContextPlugin,
pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin
):
label = "Comment per instance"

View file

@ -42,6 +42,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
hosts = [
"nuke",
"maya",
"blender",
"shell",
"hiero",
"premiere",

View file

@ -80,10 +80,12 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder
families = ["workfile",
"pointcache",
"pointcloud",
"proxyAbc",
"camera",
"animation",
"model",
"maxScene",
"mayaAscii",
"mayaScene",
"setdress",

View file

@ -76,10 +76,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder + 0.00001
families = ["workfile",
"pointcache",
"pointcloud",
"proxyAbc",
"camera",
"animation",
"model",
"maxScene",
"mayaAscii",
"mayaScene",
"setdress",

View file

@ -80,6 +80,94 @@
"enabled": true,
"optional": true,
"active": false
},
"ExtractThumbnail": {
"enabled": true,
"optional": true,
"active": true,
"presets": {
"model": {
"image_settings": {
"file_format": "JPEG",
"color_mode": "RGB",
"quality": 100
},
"display_options": {
"shading": {
"light": "STUDIO",
"studio_light": "Default",
"type": "SOLID",
"color_type": "OBJECT",
"show_xray": false,
"show_shadows": false,
"show_cavity": true
},
"overlay": {
"show_overlays": false
}
}
},
"rig": {
"image_settings": {
"file_format": "JPEG",
"color_mode": "RGB",
"quality": 100
},
"display_options": {
"shading": {
"light": "STUDIO",
"studio_light": "Default",
"type": "SOLID",
"color_type": "OBJECT",
"show_xray": true,
"show_shadows": false,
"show_cavity": false
},
"overlay": {
"show_overlays": true,
"show_ortho_grid": false,
"show_floor": false,
"show_axis_x": false,
"show_axis_y": false,
"show_axis_z": false,
"show_text": false,
"show_stats": false,
"show_cursor": false,
"show_annotation": false,
"show_extras": false,
"show_relationship_lines": false,
"show_outline_selected": false,
"show_motion_paths": false,
"show_object_origins": false,
"show_bones": true
}
}
}
}
},
"ExtractPlayblast": {
"enabled": true,
"optional": true,
"active": true,
"presets": {
"default": {
"image_settings": {
"file_format": "PNG",
"color_mode": "RGB",
"color_depth": "8",
"compression": 15
},
"display_options": {
"shading": {
"type": "MATERIAL",
"render_pass": "COMBINED"
},
"overlay": {
"show_overlays": false
}
}
}
}
}
}
}

View file

@ -23,7 +23,7 @@
"enabled": true,
"optional": false,
"active": true,
"tile_assembler_plugin": "OpenPypeTileAssembler",
"tile_assembler_plugin": "DraftTileAssembler",
"use_published": true,
"import_reference": false,
"asset_dependencies": true,

View file

@ -16,5 +16,10 @@
"linux": []
}
}
},
"copy_fusion_settings": {
"copy_path": "~/.openpype/hosts/fusion/profiles",
"copy_status": false,
"force_sync": false
}
}

View file

@ -4,5 +4,20 @@
"aov_separator": "underscore",
"image_format": "exr",
"multipass": true
},
"PointCloud":{
"attribute":{
"Age": "age",
"Radius": "radius",
"Position": "position",
"Rotation": "rotation",
"Scale": "scale",
"Velocity": "velocity",
"Color": "color",
"TextureCoordinate": "texcoord",
"MaterialID": "matid",
"custFloats": "custFloats",
"custVecs": "custVecs"
}
}
}

View file

@ -147,6 +147,7 @@
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"include_parent_hierarchy": false,
"include_user_defined_attributes": false,
"defaults": [
"Main"

View file

@ -121,7 +121,7 @@
"DraftTileAssembler": "Draft Tile Assembler"
},
{
"OpenPypeTileAssembler": "Open Image IO"
"OpenPypeTileAssembler": "OpenPype Tile Assembler"
}
]
},

View file

@ -45,6 +45,29 @@
]
}
]
},
{
"type": "dict",
"key": "copy_fusion_settings",
"collapsible": true,
"label": "Local Fusion profile settings",
"children": [
{
"key": "copy_path",
"type": "path",
"label": "Local Fusion profile directory"
},
{
"type": "boolean",
"key": "copy_status",
"label": "Copy profile on first launch"
},
{
"key":"force_sync",
"type": "boolean",
"label": "Resync profile on each launch"
}
]
}
]
}

View file

@ -51,6 +51,28 @@
"label": "multipass"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "PointCloud",
"label": "Point Cloud",
"children": [
{
"type": "label",
"label": "Define the channel attribute names before exporting as PRT"
},
{
"type": "dict-modifiable",
"collapsible": true,
"key": "attribute",
"label": "Channel Attribute",
"use_label_wrap": true,
"object_type": {
"type": "text"
}
}
]
}
]
}
}

View file

@ -112,6 +112,66 @@
"label": "Extract Layout as JSON"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ExtractThumbnail",
"label": "ExtractThumbnail",
"checkbox_key": "enabled",
"is_group": true,
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "raw-json",
"key": "presets",
"label": "Presets"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ExtractPlayblast",
"label": "ExtractPlayblast",
"checkbox_key": "enabled",
"is_group": true,
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "optional",
"label": "Optional"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "raw-json",
"key": "presets",
"label": "Presets"
}
]
}
]
}
}

View file

@ -132,6 +132,11 @@
"key": "write_face_sets",
"label": "Write Face Sets"
},
{
"type": "boolean",
"key": "include_parent_hierarchy",
"label": "Include Parent Hierarchy"
},
{
"type": "boolean",
"key": "include_user_defined_attributes",

View file

@ -369,7 +369,8 @@
"label": "Arnold Render Attributes",
"use_label_wrap": true,
"object_type": {
"type": "text"
"type": "list",
"object_type": "text"
}
},
{
@ -379,7 +380,8 @@
"label": "Vray Render Attributes",
"use_label_wrap": true,
"object_type": {
"type": "text"
"type": "list",
"object_type": "text"
}
},
{
@ -389,7 +391,8 @@
"label": "Redshift Render Attributes",
"use_label_wrap": true,
"object_type": {
"type": "text"
"type": "list",
"object_type": "text"
}
},
{
@ -399,7 +402,8 @@
"label": "Renderman Render Attributes",
"use_label_wrap": true,
"object_type": {
"type": "text"
"type": "list",
"object_type": "text"
}
}
]

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.15.3-nightly.1"
__version__ = "3.15.3-nightly.2"

View file

@ -135,7 +135,7 @@ class TestPipelinePublishPlugins(TestPipeline):
}
# load plugin function for testing
plugin = publish_plugins.ExtractorColormanaged()
plugin = publish_plugins.ColormanagedPyblishPluginMixin()
plugin.log = log
config_data, file_rules = plugin.get_colorspace_settings(context)
@ -175,14 +175,14 @@ class TestPipelinePublishPlugins(TestPipeline):
}
# load plugin function for testing
plugin = publish_plugins.ExtractorColormanaged()
plugin = publish_plugins.ColormanagedPyblishPluginMixin()
plugin.log = log
plugin.set_representation_colorspace(
representation_nuke, context,
colorspace_settings=(config_data_nuke, file_rules_nuke)
)
# load plugin function for testing
plugin = publish_plugins.ExtractorColormanaged()
plugin = publish_plugins.ColormanagedPyblishPluginMixin()
plugin.log = log
plugin.set_representation_colorspace(
representation_hiero, context,

View file

@ -11,8 +11,8 @@ sidebar_label: Maya
`ValidateRenderSettings`
Render Settings Validator is here to make sure artists will submit renders
we correct settings. Some of these settings are needed by OpenPype but some
can be defined by TD using [OpenPype Settings UI](admin_settings.md).
with the correct settings. Some of these settings are needed by OpenPype but some
can be defined by the admin using [OpenPype Settings UI](admin_settings.md).
OpenPype enforced settings include:
@ -36,10 +36,9 @@ For **Renderman**:
For **Arnold**:
- there shouldn't be `<renderpass>` token when merge AOVs option is turned on
Additional check can be added via Settings - **Project Settings > Maya > Publish plugin > ValidateRenderSettings**.
You can add as many options as you want for every supported renderer. In first field put node type and attribute
and in the second required value.
and in the second required value. You can create multiple values for an attribute, but when repairing it'll be the first value in the list that get selected.
![Settings example](assets/maya-admin_render_settings_validator.png)

View file

@ -516,6 +516,22 @@ In the scene from where you want to publish your model create *Render subset*. P
model subset (Maya set node) under corresponding `LAYER_` set under *Render instance*. During publish, it will submit this render to farm and
after it is rendered, it will be attached to your model subset.
### Tile Rendering
:::note Deadline
This feature is only supported when using Deadline. See [here](module_deadline#openpypetileassembler-plugin) for setup.
:::
On the render instance objectset you'll find:
* `Tile Rendering` - for enabling tile rendering.
* `Tile X` - number of tiles in the X axis.
* `Tile Y` - number of tiles in the Y axis.
When submittig to Deadline, you'll get:
- for each frame a tile rendering job, to render each from Maya.
- for each frame a tile assembly job, to assemble the rendered tiles.
- job to publish the assembled frames.
## Render Setups
### Publishing Render Setups

View file

@ -9,7 +9,9 @@ sidebar_label: Yeti
OpenPype can work with [Yeti](https://peregrinelabs.com/yeti/) in two data modes.
It can handle Yeti caches and Yeti rigs.
### Creating and publishing Yeti caches
## Yeti Caches
### Creating and publishing
Let start by creating simple Yeti setup, just one object and Yeti node. Open new
empty scene in Maya and create sphere. Then select sphere and go **Yeti → Create Yeti Node on Mesh**
@ -44,7 +46,15 @@ You can now publish Yeti cache as any other types. **OpenPype → Publish**. It
create sequence of `.fur` files and `.fursettings` metadata file with Yeti node
setting.
### Loading Yeti caches
:::note Collect Yeti Cache failure
If you encounter **Collect Yeti Cache** failure during collecting phase, and the error is like
```fix
No object matches name: pgYetiMaya1Shape.cbId
```
then it is probably caused by scene not being saved before publishing.
:::
### Loading
You can load Yeti cache by **OpenPype → Load ...**. Select your cache, right+click on
it and select **Load Yeti cache**. This will create Yeti node in scene and set its
@ -52,26 +62,39 @@ cache path to point to your published cache files. Note that this Yeti node will
be named with same name as the one you've used to publish cache. Also notice that
when you open graph on this Yeti node, all nodes are as they were in publishing node.
### Creating and publishing Yeti Rig
## Yeti Rigs
Yeti Rigs are working in similar way as caches, but are more complex and they deal with
other data used by Yeti, like geometry and textures.
### Creating and publishing
Let's start by [loading](artist_hosts_maya.md#loading-model) into new scene some model.
I've loaded my Buddha model.
Yeti Rigs are designed to connect to published models or animation rig. The workflow gives the Yeti Rig full control on that geometry to do additional things on top of whatever input comes in, e.g. deleting faces, pushing faces in/out, subdividing, etc.
Create select model mesh, create Yeti node - **Yeti → Create Yeti Node on Mesh** and
setup similar Yeti graph as in cache example above.
Let's start with a [model](artist_hosts_maya.md#loading-model) or [rig](artist_hosts_maya.md#loading-rigs) loaded into the scene. Here we are using a simple rig.
Then select this Yeti node (mine is called with default name `pgYetiMaya1`) and
create *Yeti Rig instance* - **OpenPype → Create...** and select **Yeti Cache**.
![Maya - Yeti Simple Rig](assets/maya-yeti_simple_rig.png)
We'll need to prepare the scene a bit. We want some Yeti hair on the ball geometry, so duplicating the geometry, adding the Yeti hair and grouping it together.
![Maya - Yeti Hair Setup](assets/maya-yeti_hair_setup.png)
:::note yeti nodes and types
You can use any number of Yeti nodes and types, but they have to have unique names.
:::
Now we need to connect the Yeti Rig with the animation rig. Yeti Rigs work by publishing the attribute connections from its input nodes and reconnect them later in the pipeline. This means we can only use attribute connections to from outside of the Yeti Rig hierarchy. Internal to the Yeti Rig hierarchy, we can use any complexity of node connections. We'll connnect the Yeti Rig geometry to the animation rig, with the transform and mesh attributes.
![Maya - Yeti Rig Setup](assets/maya-yeti_rig_setup.png)
Now we are ready for publishing. Select the Yeti Rig group (`rig_GRP`) and
create *Yeti Rig instance* - **OpenPype → Create...** and select **Yeti Rig**.
Leave `Use selection` checked.
Last step is to add our model geometry to rig instance, so middle+drag its
geometry to `input_SET` under `yetiRigDefault` set representing rig instance.
Last step is to add our geometry to the rig instance, so middle+drag its
geometry to `input_SET` under the `yetiRigMain` set representing rig instance.
Note that its name can differ and is based on your subset name.
![Maya - Yeti Rig Setup](assets/maya-yeti_rig.jpg)
![Maya - Yeti Publish Setup](assets/maya-yeti_publish_setup.png)
You can have any number of nodes in the Yeti Rig, but only nodes with incoming attribute connections from outside of the Yeti Rig hierarchy is needed in the `input_SET`.
Save your scene and ready for publishing our new simple Yeti Rig!
@ -81,28 +104,14 @@ the beginning of your timeline. It will also collect all textures used in Yeti
node, copy them to publish folder `resource` directory and set *Image search path*
of published node to this location.
:::note Collect Yeti Cache failure
If you encounter **Collect Yeti Cache** failure during collecting phase, and the error is like
```fix
No object matches name: pgYetiMaya1Shape.cbId
```
then it is probably caused by scene not being saved before publishing.
:::
### Loading
### Loading Yeti Rig
You can load published Yeti Rigs as any other thing in OpenPype - **OpenPype → Load ...**,
You can load published Yeti Rigs in OpenPype with **OpenPype → Load ...**,
select you Yeti rig and right+click on it. In context menu you should see
**Load Yeti Cache** and **Load Yeti Rig** items (among others). First one will
load that one frame cache. The other one will load whole rig.
**Load Yeti Rig** item (among others).
Notice that although we put only geometry into `input_SET`, whole hierarchy was
pulled inside also. This allows you to store complex scene element along Yeti
node.
To connect the Yeti Rig with published animation, we'll load in the animation and use the Inventory to establish the connections.
:::tip auto-connecting rig mesh to existing one
If you select some objects before loading rig it will try to find shapes
under selected hierarchies and match them with shapes loaded with rig (published
under `input_SET`). This mechanism uses *cbId* attribute on those shapes.
If match is found shapes are connected using their `outMesh` and `outMesh`. Thus you can easily connect existing animation to loaded rig.
:::
![Maya - Yeti Publish Setup](assets/maya-yeti_load_connections.png)
The Yeti Rig should now be following the animation. :tada:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Before After
Before After

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

View file

@ -63,7 +63,7 @@ It's up to the Loaders to read these values and apply the correct expected color
- set the `OCIO` environment variable before launching the host via a prelaunch hook
- or (if the host allows) to set the workfile OCIO config path using the host's API
3. Each Extractor exporting pixel data (e.g. image or video) has to use parent class `openpype.pipeline.publish.publish_plugins.ExtractorColormanaged` and use `self.set_representation_colorspace` on the representations to be integrated.
3. Each Extractor exporting pixel data (e.g. image or video) has to inherit from the mixin class `openpype.pipeline.publish.publish_plugins.ColormanagedPyblishPluginMixin` and use `self.set_representation_colorspace` on the representations to be integrated.
The **set_representation_colorspace** method adds `colorspaceData` to the representation. If the `colorspace` passed is not `None` then it is added directly to the representation with resolved config path otherwise a color space is assumed using the configured file rules. If no file rule matches the `colorspaceData` is **not** added to the representation.

View file

@ -45,6 +45,10 @@ executable. It is recommended to use the `openpype_console` executable as it pro
![Configure plugin](assets/deadline_configure_plugin.png)
### OpenPypeTileAssembler Plugin
To setup tile rendering copy the `OpenPypeTileAssembler` plugin to the repository;
`[OpenPype]\openpype\modules\deadline\repository\custom\plugins\OpenPypeTileAssembler` > `[DeadlineRepository]\custom\plugins\OpenPypeTileAssembler`
### Pools
The main pools can be configured at `project_settings/deadline/publish/CollectDeadlinePools/primary_pool`, which is applied to the rendering jobs.