[Automated] Merged develop into main

This commit is contained in:
pypebot 2022-07-02 05:52:18 +02:00 committed by GitHub
commit b5d9d5db5f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
51 changed files with 2000 additions and 1397 deletions

View file

@ -1,11 +1,10 @@
from openpype.api import Anatomy
from openpype.lib import (
PreLaunchHook,
EnvironmentPrepData,
prepare_app_environments,
prepare_context_environments
)
from openpype.pipeline import AvalonMongoDB
from openpype.pipeline import AvalonMongoDB, Anatomy
class GlobalHostDataHook(PreLaunchHook):

View file

@ -93,7 +93,7 @@ def set_start_end_frames():
# Default scene settings
frameStart = scene.frame_start
frameEnd = scene.frame_end
fps = scene.render.fps
fps = scene.render.fps / scene.render.fps_base
resolution_x = scene.render.resolution_x
resolution_y = scene.render.resolution_y
@ -116,7 +116,8 @@ def set_start_end_frames():
scene.frame_start = frameStart
scene.frame_end = frameEnd
scene.render.fps = fps
scene.render.fps = round(fps)
scene.render.fps_base = round(fps) / fps
scene.render.resolution_x = resolution_x
scene.render.resolution_y = resolution_y

View file

@ -19,8 +19,9 @@ from openpype.client import (
get_last_versions,
get_representations,
)
from openpype.pipeline import legacy_io
from openpype.api import (Logger, Anatomy, get_anatomy_settings)
from openpype.settings import get_anatomy_settings
from openpype.pipeline import legacy_io, Anatomy
from openpype.api import Logger
from . import tags
try:

View file

@ -5,8 +5,7 @@ import husdoutputprocessors.base as base
import colorbleed.usdlib as usdlib
from openpype.client import get_asset_by_name
from openpype.api import Anatomy
from openpype.pipeline import legacy_io
from openpype.pipeline import legacy_io, Anatomy
class AvalonURIOutputProcessor(base.OutputProcessorBase):

View file

@ -1908,7 +1908,7 @@ def iter_parents(node):
"""
while True:
split = node.rsplit("|", 1)
if len(split) == 1:
if len(split) == 1 or not split[0]:
return
node = split[0]
@ -3213,3 +3213,209 @@ def parent_nodes(nodes, parent=None):
node[0].setParent(node[1])
if delete_parent:
pm.delete(parent_node)
@contextlib.contextmanager
def maintained_time():
ct = cmds.currentTime(query=True)
try:
yield
finally:
cmds.currentTime(ct, edit=True)
def iter_visible_nodes_in_range(nodes, start, end):
"""Yield nodes that are visible in start-end frame range.
- Ignores intermediateObjects completely.
- Considers animated visibility attributes + upstream visibilities.
This is optimized for large scenes where some nodes in the parent
hierarchy might have some input connections to the visibilities,
e.g. key, driven keys, connections to other attributes, etc.
This only does a single time step to `start` if current frame is
not inside frame range since the assumption is made that changing
a frame isn't so slow that it beats querying all visibility
plugs through MDGContext on another frame.
Args:
nodes (list): List of node names to consider.
start (int, float): Start frame.
end (int, float): End frame.
Returns:
list: List of node names. These will be long full path names so
might have a longer name than the input nodes.
"""
# States we consider per node
VISIBLE = 1 # always visible
INVISIBLE = 0 # always invisible
ANIMATED = -1 # animated visibility
# Ensure integers
start = int(start)
end = int(end)
# Consider only non-intermediate dag nodes and use the "long" names.
nodes = cmds.ls(nodes, long=True, noIntermediate=True, type="dagNode")
if not nodes:
return
with maintained_time():
# Go to first frame of the range if the current time is outside
# the queried range so can directly query all visible nodes on
# that frame.
current_time = cmds.currentTime(query=True)
if not (start <= current_time <= end):
cmds.currentTime(start)
visible = cmds.ls(nodes, long=True, visible=True)
for node in visible:
yield node
if len(visible) == len(nodes) or start == end:
# All are visible on frame one, so they are at least visible once
# inside the frame range.
return
# For the invisible ones check whether its visibility and/or
# any of its parents visibility attributes are animated. If so, it might
# get visible on other frames in the range.
def memodict(f):
"""Memoization decorator for a function taking a single argument.
See: http://code.activestate.com/recipes/
578231-probably-the-fastest-memoization-decorator-in-the-/
"""
class memodict(dict):
def __missing__(self, key):
ret = self[key] = f(key)
return ret
return memodict().__getitem__
@memodict
def get_state(node):
plug = node + ".visibility"
connections = cmds.listConnections(plug,
source=True,
destination=False)
if connections:
return ANIMATED
else:
return VISIBLE if cmds.getAttr(plug) else INVISIBLE
visible = set(visible)
invisible = [node for node in nodes if node not in visible]
always_invisible = set()
# Iterate over the nodes by short to long names to iterate the highest
# in hierarchy nodes first. So the collected data can be used from the
# cache for parent queries in next iterations.
node_dependencies = dict()
for node in sorted(invisible, key=len):
state = get_state(node)
if state == INVISIBLE:
always_invisible.add(node)
continue
# If not always invisible by itself we should go through and check
# the parents to see if any of them are always invisible. For those
# that are "ANIMATED" we consider that this node is dependent on
# that attribute, we store them as dependency.
dependencies = set()
if state == ANIMATED:
dependencies.add(node)
traversed_parents = list()
for parent in iter_parents(node):
if parent in always_invisible or get_state(parent) == INVISIBLE:
# When parent is always invisible then consider this parent,
# this node we started from and any of the parents we
# have traversed in-between to be *always invisible*
always_invisible.add(parent)
always_invisible.add(node)
always_invisible.update(traversed_parents)
break
# If we have traversed the parent before and its visibility
# was dependent on animated visibilities then we can just extend
# its dependencies for to those for this node and break further
# iteration upwards.
parent_dependencies = node_dependencies.get(parent, None)
if parent_dependencies is not None:
dependencies.update(parent_dependencies)
break
state = get_state(parent)
if state == ANIMATED:
dependencies.add(parent)
traversed_parents.append(parent)
if node not in always_invisible and dependencies:
node_dependencies[node] = dependencies
if not node_dependencies:
return
# Now we only have to check the visibilities for nodes that have animated
# visibility dependencies upstream. The fastest way to check these
# visibility attributes across different frames is with Python api 2.0
# so we do that.
@memodict
def get_visibility_mplug(node):
"""Return api 2.0 MPlug with cached memoize decorator"""
sel = om.MSelectionList()
sel.add(node)
dag = sel.getDagPath(0)
return om.MFnDagNode(dag).findPlug("visibility", True)
@contextlib.contextmanager
def dgcontext(mtime):
"""MDGContext context manager"""
context = om.MDGContext(mtime)
try:
previous = context.makeCurrent()
yield context
finally:
previous.makeCurrent()
# We skip the first frame as we already used that frame to check for
# overall visibilities. And end+1 to include the end frame.
scene_units = om.MTime.uiUnit()
for frame in range(start + 1, end + 1):
mtime = om.MTime(frame, unit=scene_units)
# Build little cache so we don't query the same MPlug's value
# again if it was checked on this frame and also is a dependency
# for another node
frame_visibilities = {}
with dgcontext(mtime) as context:
for node, dependencies in list(node_dependencies.items()):
for dependency in dependencies:
dependency_visible = frame_visibilities.get(dependency,
None)
if dependency_visible is None:
mplug = get_visibility_mplug(dependency)
dependency_visible = mplug.asBool(context)
frame_visibilities[dependency] = dependency_visible
if not dependency_visible:
# One dependency is not visible, thus the
# node is not visible.
break
else:
# All dependencies are visible.
yield node
# Remove node with dependencies for next frame iterations
# because it was visible at least once.
node_dependencies.pop(node)
# If no more nodes to process break the frame iterations..
if not node_dependencies:
break

View file

@ -9,8 +9,8 @@ from openpype.pipeline import (
LoaderPlugin,
get_representation_path,
AVALON_CONTAINER_ID,
Anatomy,
)
from openpype.api import Anatomy
from openpype.settings import get_project_settings
from .pipeline import containerise
from . import lib

View file

@ -0,0 +1,139 @@
import os
from openpype.api import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
)
# TODO aiVolume doesn't automatically set velocity fps correctly, set manual?
class LoadVDBtoArnold(load.LoaderPlugin):
"""Load OpenVDB for Arnold in aiVolume"""
families = ["vdbcache"]
representations = ["vdb"]
label = "Load VDB to Arnold"
icon = "cloud"
color = "orange"
def load(self, context, name, namespace, data):
from maya import cmds
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import unique_namespace
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "vdbcache"
# Check if the plugin for arnold is available on the pc
try:
cmds.loadPlugin("mtoa", quiet=True)
except Exception as exc:
self.log.error("Encountered exception:\n%s" % exc)
return
asset = context['asset']
asset_name = asset["name"]
namespace = namespace or unique_namespace(
asset_name + "_",
prefix="_" if asset_name[0].isdigit() else "",
suffix="_",
)
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings['maya']['load']['colors']
c = colors.get(family)
if c is not None:
cmds.setAttr(root + ".useOutlinerColor", 1)
cmds.setAttr(root + ".outlinerColor",
(float(c[0]) / 255),
(float(c[1]) / 255),
(float(c[2]) / 255)
)
# Create VRayVolumeGrid
grid_node = cmds.createNode("aiVolume",
name="{}Shape".format(root),
parent=root)
self._set_path(grid_node,
path=self.fname,
representation=context["representation"])
# Lock the shape node so the user can't delete the transform/shape
# as if it was referenced
cmds.lockNode(grid_node, lock=True)
nodes = [root, grid_node]
self[:] = nodes
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
from maya import cmds
path = get_representation_path(representation)
# Find VRayVolumeGrid
members = cmds.sets(container['objectName'], query=True)
grid_nodes = cmds.ls(members, type="aiVolume", long=True)
assert len(grid_nodes) == 1, "This is a bug"
# Update the VRayVolumeGrid
self._set_path(grid_nodes[0], path=path, representation=representation)
# Update container representation
cmds.setAttr(container["objectName"] + ".representation",
str(representation["_id"]),
type="string")
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
from maya import cmds
# Get all members of the avalon container, ensure they are unlocked
# and delete everything
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass
@staticmethod
def _set_path(grid_node,
path,
representation):
"""Apply the settings for the VDB path to the aiVolume node"""
from maya import cmds
if not os.path.exists(path):
raise RuntimeError("Path does not exist: %s" % path)
is_sequence = bool(representation["context"].get("frame"))
cmds.setAttr(grid_node + ".useFrameExtension", is_sequence)
# Set file path
cmds.setAttr(grid_node + ".filename", path, type="string")

View file

@ -1,11 +1,21 @@
import os
from openpype.api import get_project_settings
from openpype.pipeline import load
from openpype.pipeline import (
load,
get_representation_path
)
class LoadVDBtoRedShift(load.LoaderPlugin):
"""Load OpenVDB in a Redshift Volume Shape"""
"""Load OpenVDB in a Redshift Volume Shape
Note that the RedshiftVolumeShape is created without a RedshiftVolume
shader assigned. To get the Redshift volume to render correctly assign
a RedshiftVolume shader (in the Hypershade) and set the density, scatter
and emission channels to the channel names of the volumes in the VDB file.
"""
families = ["vdbcache"]
representations = ["vdb"]
@ -55,7 +65,7 @@ class LoadVDBtoRedShift(load.LoaderPlugin):
# Root group
label = "{}:{}".format(namespace, name)
root = cmds.group(name=label, empty=True)
root = cmds.createNode("transform", name=label)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings['maya']['load']['colors']
@ -74,9 +84,9 @@ class LoadVDBtoRedShift(load.LoaderPlugin):
name="{}RVSShape".format(label),
parent=root)
cmds.setAttr("{}.fileName".format(volume_node),
self.fname,
type="string")
self._set_path(volume_node,
path=self.fname,
representation=context["representation"])
nodes = [root, volume_node]
self[:] = nodes
@ -87,3 +97,56 @@ class LoadVDBtoRedShift(load.LoaderPlugin):
nodes=nodes,
context=context,
loader=self.__class__.__name__)
def update(self, container, representation):
from maya import cmds
path = get_representation_path(representation)
# Find VRayVolumeGrid
members = cmds.sets(container['objectName'], query=True)
grid_nodes = cmds.ls(members, type="RedshiftVolumeShape", long=True)
assert len(grid_nodes) == 1, "This is a bug"
# Update the VRayVolumeGrid
self._set_path(grid_nodes[0], path=path, representation=representation)
# Update container representation
cmds.setAttr(container["objectName"] + ".representation",
str(representation["_id"]),
type="string")
def remove(self, container):
from maya import cmds
# Get all members of the avalon container, ensure they are unlocked
# and delete everything
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass
def switch(self, container, representation):
self.update(container, representation)
@staticmethod
def _set_path(grid_node,
path,
representation):
"""Apply the settings for the VDB path to the RedshiftVolumeShape"""
from maya import cmds
if not os.path.exists(path):
raise RuntimeError("Path does not exist: %s" % path)
is_sequence = bool(representation["context"].get("frame"))
cmds.setAttr(grid_node + ".useFrameExtension", is_sequence)
# Set file path
cmds.setAttr(grid_node + ".fileName", path, type="string")

View file

@ -6,7 +6,8 @@ import openpype.api
from openpype.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection
maintained_selection,
iter_visible_nodes_in_range
)
@ -76,6 +77,16 @@ class ExtractAnimation(openpype.api.Extractor):
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
if instance.data.get("visibleOnly", False):
# If we only want to include nodes that are visible in the frame
# range then we need to do our own check. Alembic's `visibleOnly`
# flag does not filter out those that are only hidden on some
# frames as it counts "animated" or "connected" visibilities as
# if it's always visible.
nodes = list(iter_visible_nodes_in_range(nodes,
start=start,
end=end))
with suspended_refresh():
with maintained_selection():
cmds.select(nodes, noExpand=True)

View file

@ -6,7 +6,8 @@ import openpype.api
from openpype.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection
maintained_selection,
iter_visible_nodes_in_range
)
@ -79,6 +80,16 @@ class ExtractAlembic(openpype.api.Extractor):
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
if instance.data.get("visibleOnly", False):
# If we only want to include nodes that are visible in the frame
# range then we need to do our own check. Alembic's `visibleOnly`
# flag does not filter out those that are only hidden on some
# frames as it counts "animated" or "connected" visibilities as
# if it's always visible.
nodes = list(iter_visible_nodes_in_range(nodes,
start=start,
end=end))
with suspended_refresh():
with maintained_selection():
cmds.select(nodes, noExpand=True)

View file

@ -0,0 +1,51 @@
import pyblish.api
import openpype.api
from openpype.hosts.maya.api.lib import iter_visible_nodes_in_range
import openpype.hosts.maya.api.action
class ValidateAlembicVisibleOnly(pyblish.api.InstancePlugin):
"""Validates at least a single node is visible in frame range.
This validation only validates if the `visibleOnly` flag is enabled
on the instance - otherwise the validation is skipped.
"""
order = openpype.api.ValidateContentsOrder + 0.05
label = "Alembic Visible Only"
hosts = ["maya"]
families = ["pointcache", "animation"]
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
def process(self, instance):
if not instance.data.get("visibleOnly", False):
self.log.debug("Visible only is disabled. Validation skipped..")
return
invalid = self.get_invalid(instance)
if invalid:
start, end = self.get_frame_range(instance)
raise RuntimeError("No visible nodes found in "
"frame range {}-{}.".format(start, end))
@classmethod
def get_invalid(cls, instance):
if instance.data["family"] == "animation":
# Special behavior to use the nodes in out_SET
nodes = instance.data["out_hierarchy"]
else:
nodes = instance[:]
start, end = cls.get_frame_range(instance)
if not any(iter_visible_nodes_in_range(nodes, start, end)):
# Return the nodes we have considered so the user can identify
# them with the select invalid action
return nodes
@staticmethod
def get_frame_range(instance):
data = instance.data
return data["frameStartHandle"], data["frameEndHandle"]

View file

@ -20,21 +20,23 @@ from openpype.client import (
)
from openpype.api import (
Logger,
Anatomy,
BuildWorkfile,
get_version_from_path,
get_anatomy_settings,
get_workdir_data,
get_asset,
get_current_project_settings,
)
from openpype.tools.utils import host_tools
from openpype.lib.path_tools import HostDirmap
from openpype.settings import get_project_settings
from openpype.settings import (
get_project_settings,
get_anatomy_settings,
)
from openpype.modules import ModulesManager
from openpype.pipeline import (
discover_legacy_creator_plugins,
legacy_io,
Anatomy,
)
from . import gizmo_menu

View file

@ -104,7 +104,10 @@ class ExtractReviewDataMov(openpype.api.Extractor):
self, instance, o_name, o_data["extension"],
multiple_presets)
if "render.farm" in families:
if (
"render.farm" in families or
"prerender.farm" in families
):
if "review" in instance.data["families"]:
instance.data["families"].remove("review")

View file

@ -4,6 +4,7 @@ import nuke
import copy
import pyblish.api
import six
import openpype
from openpype.hosts.nuke.api import (
@ -12,7 +13,6 @@ from openpype.hosts.nuke.api import (
get_view_process_node
)
class ExtractSlateFrame(openpype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
@ -236,6 +236,48 @@ class ExtractSlateFrame(openpype.api.Extractor):
int(slate_first_frame)
)
# Add file to representation files
# - get write node
write_node = instance.data["writeNode"]
# - evaluate filepaths for first frame and slate frame
first_filename = os.path.basename(
write_node["file"].evaluate(first_frame))
slate_filename = os.path.basename(
write_node["file"].evaluate(slate_first_frame))
# Find matching representation based on first filename
matching_repre = None
is_sequence = None
for repre in instance.data["representations"]:
files = repre["files"]
if (
not isinstance(files, six.string_types)
and first_filename in files
):
matching_repre = repre
is_sequence = True
break
elif files == first_filename:
matching_repre = repre
is_sequence = False
break
if not matching_repre:
self.log.info((
"Matching reresentaion was not found."
" Representation files were not filled with slate."
))
return
# Add frame to matching representation files
if not is_sequence:
matching_repre["files"] = [first_filename, slate_filename]
elif slate_filename not in matching_repre["files"]:
matching_repre["files"].insert(0, slate_filename)
self.log.warning("Added slate frame to representation files")
def add_comment_slate_node(self, instance, node):
comment = instance.context.data.get("comment")

View file

@ -35,6 +35,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
if node is None:
return
instance.data["writeNode"] = node
self.log.debug("checking instance: {}".format(instance))
# Determine defined file type

View file

@ -10,8 +10,8 @@ from openpype.lib import (
from openpype.pipeline import (
registered_host,
legacy_io,
Anatomy,
)
from openpype.api import Anatomy
from openpype.hosts.tvpaint.api import lib, pipeline, plugin

View file

@ -2,7 +2,7 @@ import os
import unreal
from openpype.api import Anatomy
from openpype.pipeline import Anatomy
from openpype.hosts.unreal.api import pipeline

View file

@ -3,7 +3,7 @@ from pathlib import Path
import unreal
from openpype.api import Anatomy
from openpype.pipeline import Anatomy
from openpype.hosts.unreal.api import pipeline
import pyblish.api

File diff suppressed because it is too large Load diff

View file

@ -20,10 +20,7 @@ from openpype.settings.constants import (
METADATA_KEYS,
M_DYNAMIC_KEY_LABEL
)
from . import (
PypeLogger,
Anatomy
)
from . import PypeLogger
from .profiles_filtering import filter_profiles
from .local_settings import get_openpype_username
from .avalon_context import (
@ -1305,7 +1302,7 @@ def get_app_environments_for_context(
dict: Environments for passed context and application.
"""
from openpype.pipeline import AvalonMongoDB
from openpype.pipeline import AvalonMongoDB, Anatomy
# Avalon database connection
dbcon = AvalonMongoDB()

View file

@ -14,7 +14,6 @@ from openpype.settings import (
get_project_settings,
get_system_settings
)
from .anatomy import Anatomy
from .profiles_filtering import filter_profiles
from .events import emit_event
from .path_templates import StringTemplate
@ -593,6 +592,7 @@ def get_workdir_with_workdir_data(
))
if not anatomy:
from openpype.pipeline import Anatomy
anatomy = Anatomy(project_name)
if not template_key:
@ -604,7 +604,10 @@ def get_workdir_with_workdir_data(
anatomy_filled = anatomy.format(workdir_data)
# Output is TemplateResult object which contain useful data
return anatomy_filled[template_key]["folder"]
path = anatomy_filled[template_key]["folder"]
if path:
path = os.path.normpath(path)
return path
def get_workdir(
@ -635,6 +638,7 @@ def get_workdir(
TemplateResult: Workdir path.
"""
if not anatomy:
from openpype.pipeline import Anatomy
anatomy = Anatomy(project_doc["name"])
workdir_data = get_workdir_data(
@ -747,6 +751,8 @@ def compute_session_changes(
@with_pipeline_io
def get_workdir_from_session(session=None, template_key=None):
from openpype.pipeline import Anatomy
if session is None:
session = legacy_io.Session
project_name = session["AVALON_PROJECT"]
@ -762,7 +768,10 @@ def get_workdir_from_session(session=None, template_key=None):
host_name,
project_name=project_name
)
return anatomy_filled[template_key]["folder"]
path = anatomy_filled[template_key]["folder"]
if path:
path = os.path.normpath(path)
return path
@with_pipeline_io
@ -853,6 +862,8 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
`legacy_io` is used if not entered.
"""
from openpype.pipeline import Anatomy
# Use legacy_io if dbcon is not entered
if not dbcon:
dbcon = legacy_io
@ -1673,6 +1684,7 @@ def _get_task_context_data_for_anatomy(
"""
if anatomy is None:
from openpype.pipeline import Anatomy
anatomy = Anatomy(project_doc["name"])
asset_name = asset_doc["name"]
@ -1741,6 +1753,7 @@ def get_custom_workfile_template_by_context(
"""
if anatomy is None:
from openpype.pipeline import Anatomy
anatomy = Anatomy(project_doc["name"])
# get project, asset, task anatomy context data

View file

@ -9,7 +9,6 @@ import platform
from openpype.client import get_project
from openpype.settings import get_project_settings
from .anatomy import Anatomy
from .profiles_filtering import filter_profiles
log = logging.getLogger(__name__)
@ -227,6 +226,7 @@ def fill_paths(path_list, anatomy):
def create_project_folders(basic_paths, project_name):
from openpype.pipeline import Anatomy
anatomy = Anatomy(project_name)
concat_paths = concatenate_splitted_paths(basic_paths, anatomy)

View file

@ -5,7 +5,12 @@ from bson.objectid import ObjectId
from aiohttp.web_response import Response
from openpype.pipeline import AvalonMongoDB
from openpype.client import (
get_projects,
get_project,
get_assets,
get_asset_by_name,
)
from openpype_modules.webserver.base_routes import RestApiEndpoint
@ -14,19 +19,13 @@ class _RestApiEndpoint(RestApiEndpoint):
self.resource = resource
super(_RestApiEndpoint, self).__init__()
@property
def dbcon(self):
return self.resource.dbcon
class AvalonProjectsEndpoint(_RestApiEndpoint):
async def get(self) -> Response:
output = []
for project_name in self.dbcon.database.collection_names():
project_doc = self.dbcon.database[project_name].find_one({
"type": "project"
})
output.append(project_doc)
output = [
project_doc
for project_doc in get_projects()
]
return Response(
status=200,
body=self.resource.encode(output),
@ -36,9 +35,7 @@ class AvalonProjectsEndpoint(_RestApiEndpoint):
class AvalonProjectEndpoint(_RestApiEndpoint):
async def get(self, project_name) -> Response:
project_doc = self.dbcon.database[project_name].find_one({
"type": "project"
})
project_doc = get_project(project_name)
if project_doc:
return Response(
status=200,
@ -53,9 +50,7 @@ class AvalonProjectEndpoint(_RestApiEndpoint):
class AvalonAssetsEndpoint(_RestApiEndpoint):
async def get(self, project_name) -> Response:
asset_docs = list(self.dbcon.database[project_name].find({
"type": "asset"
}))
asset_docs = list(get_assets(project_name))
return Response(
status=200,
body=self.resource.encode(asset_docs),
@ -65,10 +60,7 @@ class AvalonAssetsEndpoint(_RestApiEndpoint):
class AvalonAssetEndpoint(_RestApiEndpoint):
async def get(self, project_name, asset_name) -> Response:
asset_doc = self.dbcon.database[project_name].find_one({
"type": "asset",
"name": asset_name
})
asset_doc = get_asset_by_name(project_name, asset_name)
if asset_doc:
return Response(
status=200,
@ -88,9 +80,6 @@ class AvalonRestApiResource:
self.module = avalon_module
self.server_manager = server_manager
self.dbcon = AvalonMongoDB()
self.dbcon.install()
self.prefix = "/avalon"
self.endpoint_defs = (

View file

@ -1,16 +1,9 @@
from openpype.api import Logger
from openpype.pipeline import (
legacy_io,
LauncherAction,
)
from openpype.client import get_asset_by_name
from openpype.pipeline import LauncherAction
from openpype_modules.clockify.clockify_api import ClockifyAPI
log = Logger.get_logger(__name__)
class ClockifyStart(LauncherAction):
name = "clockify_start_timer"
label = "Clockify - Start Timer"
icon = "clockify_icon"
@ -24,20 +17,19 @@ class ClockifyStart(LauncherAction):
return False
def process(self, session, **kwargs):
project_name = session['AVALON_PROJECT']
asset_name = session['AVALON_ASSET']
task_name = session['AVALON_TASK']
project_name = session["AVALON_PROJECT"]
asset_name = session["AVALON_ASSET"]
task_name = session["AVALON_TASK"]
description = asset_name
asset = legacy_io.find_one({
'type': 'asset',
'name': asset_name
})
if asset is not None:
desc_items = asset.get('data', {}).get('parents', [])
asset_doc = get_asset_by_name(
project_name, asset_name, fields=["data.parents"]
)
if asset_doc is not None:
desc_items = asset_doc.get("data", {}).get("parents", [])
desc_items.append(asset_name)
desc_items.append(task_name)
description = '/'.join(desc_items)
description = "/".join(desc_items)
project_id = self.clockapi.get_project_id(project_name)
tag_ids = []

View file

@ -1,11 +1,6 @@
from openpype.client import get_projects, get_project
from openpype_modules.clockify.clockify_api import ClockifyAPI
from openpype.api import Logger
from openpype.pipeline import (
legacy_io,
LauncherAction,
)
log = Logger.get_logger(__name__)
from openpype.pipeline import LauncherAction
class ClockifySync(LauncherAction):
@ -22,39 +17,36 @@ class ClockifySync(LauncherAction):
return self.have_permissions
def process(self, session, **kwargs):
project_name = session.get('AVALON_PROJECT', None)
project_name = session.get("AVALON_PROJECT") or ""
projects_to_sync = []
if project_name.strip() == '' or project_name is None:
for project in legacy_io.projects():
projects_to_sync.append(project)
if project_name.strip():
projects_to_sync = [get_project(project_name)]
else:
project = legacy_io.find_one({'type': 'project'})
projects_to_sync.append(project)
projects_to_sync = get_projects()
projects_info = {}
for project in projects_to_sync:
task_types = project['config']['tasks'].keys()
projects_info[project['name']] = task_types
task_types = project["config"]["tasks"].keys()
projects_info[project["name"]] = task_types
clockify_projects = self.clockapi.get_projects()
for project_name, task_types in projects_info.items():
if project_name not in clockify_projects:
response = self.clockapi.add_project(project_name)
if 'id' not in response:
self.log.error('Project {} can\'t be created'.format(
project_name
))
continue
project_id = response['id']
else:
project_id = clockify_projects[project_name]
if project_name in clockify_projects:
continue
response = self.clockapi.add_project(project_name)
if "id" not in response:
self.log.error("Project {} can't be created".format(
project_name
))
continue
clockify_workspace_tags = self.clockapi.get_tags()
for task_type in task_types:
if task_type not in clockify_workspace_tags:
response = self.clockapi.add_tag(task_type)
if 'id' not in response:
if "id" not in response:
self.log.error('Task {} can\'t be created'.format(
task_type
))

View file

@ -710,7 +710,9 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
new_payload["JobInfo"].update(tiles_data["JobInfo"])
new_payload["PluginInfo"].update(tiles_data["PluginInfo"])
job_hash = hashlib.sha256("{}_{}".format(file_index, file))
self.log.info("hashing {} - {}".format(file_index, file))
job_hash = hashlib.sha256(
("{}_{}".format(file_index, file)).encode("utf-8"))
frame_jobs[frame] = job_hash.hexdigest()
new_payload["JobInfo"]["ExtraInfo0"] = job_hash.hexdigest()
new_payload["JobInfo"]["ExtraInfo1"] = file

View file

@ -1045,7 +1045,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
get publish_path
Args:
anatomy (pype.lib.anatomy.Anatomy):
anatomy (openpype.pipeline.anatomy.Anatomy):
template_data (dict): pre-calculated collected data for process
asset (string): asset name
subset (string): subset name (actually group name of subset)

View file

@ -2,11 +2,11 @@ import re
import subprocess
from openpype.client import get_asset_by_id, get_asset_by_name
from openpype.settings import get_project_settings
from openpype.pipeline import Anatomy
from openpype_modules.ftrack.lib import BaseEvent
from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from openpype.api import Anatomy, get_project_settings
class UserAssigmentEvent(BaseEvent):
"""

View file

@ -1,7 +1,7 @@
import os
import collections
import copy
from openpype.api import Anatomy
from openpype.pipeline import Anatomy
from openpype_modules.ftrack.lib import BaseAction, statics_icon

View file

@ -11,9 +11,8 @@ from openpype.client import (
get_versions,
get_representations
)
from openpype.api import Anatomy
from openpype.lib import StringTemplate, TemplateUnsolved
from openpype.pipeline import AvalonMongoDB
from openpype.pipeline import AvalonMongoDB, Anatomy
from openpype_modules.ftrack.lib import BaseAction, statics_icon

View file

@ -10,12 +10,13 @@ from openpype.client import (
get_versions,
get_representations
)
from openpype.api import Anatomy, config
from openpype.pipeline import Anatomy
from openpype_modules.ftrack.lib import BaseAction, statics_icon
from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from openpype_modules.ftrack.lib.custom_attributes import (
query_custom_attributes
)
from openpype.lib import config
from openpype.lib.delivery import (
path_from_representation,
get_format_dict,

View file

@ -11,13 +11,13 @@ from openpype.client import (
get_project,
get_assets,
)
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.lib import (
get_workfile_template_key,
get_workdir_data,
Anatomy,
StringTemplate,
)
from openpype.pipeline import Anatomy
from openpype_modules.ftrack.lib import BaseAction, statics_icon
from openpype_modules.ftrack.lib.avalon_sync import create_chunks

View file

@ -11,10 +11,10 @@ from openpype.client import (
get_version_by_name,
get_representation_by_name
)
from openpype.api import Anatomy
from openpype.pipeline import (
get_representation_path,
AvalonMongoDB,
Anatomy,
)
from openpype_modules.ftrack.lib import BaseAction, statics_icon

View file

@ -14,8 +14,7 @@ from openpype.client import (
get_representations
)
from openpype_modules.ftrack.lib import BaseAction, statics_icon
from openpype.api import Anatomy
from openpype.pipeline import AvalonMongoDB
from openpype.pipeline import AvalonMongoDB, Anatomy
from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY

View file

@ -1,3 +1,4 @@
import html
from Qt import QtCore, QtWidgets
import qtawesome
from .models import LogModel, LogsFilterProxy
@ -286,7 +287,7 @@ class OutputWidget(QtWidgets.QWidget):
if level == "debug":
line_f = (
"<font color=\"Yellow\"> -"
" <font color=\"Lime\">{{ {loggerName} }}: ["
" <font color=\"Lime\">{{ {logger_name} }}: ["
" <font color=\"White\">{message}"
" <font color=\"Lime\">]"
)
@ -299,7 +300,7 @@ class OutputWidget(QtWidgets.QWidget):
elif level == "warning":
line_f = (
"<font color=\"Yellow\">*** WRN:"
" <font color=\"Lime\"> >>> {{ {loggerName} }}: ["
" <font color=\"Lime\"> >>> {{ {logger_name} }}: ["
" <font color=\"White\">{message}"
" <font color=\"Lime\">]"
)
@ -307,16 +308,25 @@ class OutputWidget(QtWidgets.QWidget):
line_f = (
"<font color=\"Red\">!!! ERR:"
" <font color=\"White\">{timestamp}"
" <font color=\"Lime\">>>> {{ {loggerName} }}: ["
" <font color=\"Lime\">>>> {{ {logger_name} }}: ["
" <font color=\"White\">{message}"
" <font color=\"Lime\">]"
)
logger_name = log["loggerName"]
timestamp = ""
if not show_timecode:
timestamp = log["timestamp"]
message = log["message"]
exc = log.get("exception")
if exc:
log["message"] = exc["message"]
message = exc["message"]
line = line_f.format(**log)
line = line_f.format(
message=html.escape(message),
logger_name=logger_name,
timestamp=timestamp
)
if show_timecode:
timestamp = log["timestamp"]

View file

@ -4,10 +4,11 @@ import shutil
import threading
import time
from openpype.api import Logger, Anatomy
from openpype.api import Logger
from openpype.pipeline import Anatomy
from .abstract_provider import AbstractProvider
log = Logger().get_logger("SyncServer")
log = Logger.get_logger("SyncServer")
class LocalDriveHandler(AbstractProvider):

View file

@ -9,14 +9,12 @@ from collections import deque, defaultdict
from openpype.modules import OpenPypeModule
from openpype_interfaces import ITrayModule
from openpype.api import (
Anatomy,
from openpype.settings import (
get_project_settings,
get_system_settings,
get_local_site_id
)
from openpype.lib import PypeLogger
from openpype.pipeline import AvalonMongoDB
from openpype.lib import PypeLogger, get_local_site_id
from openpype.pipeline import AvalonMongoDB, Anatomy
from openpype.settings.lib import (
get_default_anatomy_settings,
get_anatomy_settings
@ -28,7 +26,7 @@ from .providers import lib
from .utils import time_function, SyncStatus, SiteAlreadyPresentError
log = PypeLogger().get_logger("SyncServer")
log = PypeLogger.get_logger("SyncServer")
class SyncServerModule(OpenPypeModule, ITrayModule):

View file

@ -6,6 +6,7 @@ from .constants import (
from .mongodb import (
AvalonMongoDB,
)
from .anatomy import Anatomy
from .create import (
BaseCreator,
@ -96,6 +97,9 @@ __all__ = (
# --- MongoDB ---
"AvalonMongoDB",
# --- Anatomy ---
"Anatomy",
# --- Create ---
"BaseCreator",
"Creator",

1257
openpype/pipeline/anatomy.py Normal file

File diff suppressed because it is too large Load diff

View file

@ -14,11 +14,8 @@ from pyblish.lib import MessageHandler
import openpype
from openpype.modules import load_modules, ModulesManager
from openpype.settings import get_project_settings
from openpype.lib import (
Anatomy,
filter_pyblish_plugins,
)
from openpype.lib import filter_pyblish_plugins
from .anatomy import Anatomy
from . import (
legacy_io,
register_loader_plugin_path,

View file

@ -9,10 +9,10 @@ import numbers
import six
from bson.objectid import ObjectId
from openpype.lib import Anatomy
from openpype.pipeline import (
schema,
legacy_io,
Anatomy,
)
log = logging.getLogger(__name__)

View file

@ -9,9 +9,8 @@ import qargparse
from Qt import QtWidgets, QtCore
from openpype import style
from openpype.pipeline import load, AvalonMongoDB
from openpype.pipeline import load, AvalonMongoDB, Anatomy
from openpype.lib import StringTemplate
from openpype.api import Anatomy
class DeleteOldVersions(load.SubsetLoaderPlugin):

View file

@ -3,8 +3,8 @@ from collections import defaultdict
from Qt import QtWidgets, QtCore, QtGui
from openpype.pipeline import load, AvalonMongoDB
from openpype.api import Anatomy, config
from openpype.lib import config
from openpype.pipeline import load, AvalonMongoDB, Anatomy
from openpype import resources, style
from openpype.lib.delivery import (

View file

@ -4,11 +4,11 @@ Requires:
os.environ -> AVALON_PROJECT
Provides:
context -> anatomy (pype.api.Anatomy)
context -> anatomy (openpype.pipeline.anatomy.Anatomy)
"""
import os
from openpype.api import Anatomy
import pyblish.api
from openpype.pipeline import Anatomy
class CollectAnatomyObject(pyblish.api.ContextPlugin):

View file

@ -19,7 +19,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
label = "Extract Thumbnail"
order = pyblish.api.ExtractorOrder
families = [
"imagesequence", "render", "render2d",
"imagesequence", "render", "render2d", "prerender",
"source", "plate", "take"
]
hosts = ["shell", "fusion", "resolve"]

View file

@ -1,4 +1,20 @@
{
"load": {
"ImageSequenceLoader": {
"family": [
"shot",
"render",
"image",
"plate",
"reference"
],
"representations": [
"jpeg",
"png",
"jpg"
]
}
},
"publish": {
"CollectPalettes": {
"allowed_tasks": [

View file

@ -5,6 +5,34 @@
"label": "Harmony",
"is_file": true,
"children": [
{
"type": "dict",
"collapsible": true,
"key": "load",
"label": "Loader plugins",
"children": [
{
"type": "dict",
"collapsible": true,
"key": "ImageSequenceLoader",
"label": "Load Image Sequence",
"children": [
{
"type": "list",
"key": "family",
"label": "Families",
"object_type": "text"
},
{
"type": "list",
"key": "representations",
"label": "Representations",
"object_type": "text"
}
]
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -17,8 +17,7 @@ from openpype.client import (
get_thumbnail_id_from_source,
get_thumbnail,
)
from openpype.api import Anatomy
from openpype.pipeline import HeroVersionType
from openpype.pipeline import HeroVersionType, Anatomy
from openpype.pipeline.thumbnail import get_thumbnail_binary
from openpype.pipeline.load import (
discover_loader_plugins,

View file

@ -6,8 +6,7 @@ import speedcopy
from openpype.client import get_project, get_asset_by_name
from openpype.lib import Terminal
from openpype.api import Anatomy
from openpype.pipeline import legacy_io
from openpype.pipeline import legacy_io, Anatomy
t = Terminal()

View file

@ -11,7 +11,6 @@ from openpype.tools.utils import PlaceholderLineEdit
from openpype.tools.utils.delegates import PrettyTimeDelegate
from openpype.lib import (
emit_event,
Anatomy,
get_workfile_template_key,
create_workdir_extra_folders,
)
@ -22,6 +21,7 @@ from openpype.lib.avalon_context import (
from openpype.pipeline import (
registered_host,
legacy_io,
Anatomy,
)
from .model import (
WorkAreaFilesModel,

View file

@ -105,6 +105,10 @@ save it in secure way to your systems keyring - on Windows it is **Credential Ma
This can be also set beforehand with environment variable `OPENPYPE_MONGO`. If set it takes precedence
over the one set in keyring.
:::tip Minimal permissions for DB user
- `readWrite` role to `openpype` and `avalon` databases
- `find` permission on `openpype`, `avalon` and `local`
#### Check for OpenPype version path
When connection to MongoDB is made, OpenPype will get various settings from there - one among them is
directory location where OpenPype versions are stored. If this directory exists OpenPype tries to