Merge branch 'develop' into extract_burnin_fix_env_pythonhome

This commit is contained in:
Jakub Trllo 2023-03-30 19:59:38 +02:00 committed by GitHub
commit ac4ed94a97
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 354 additions and 37 deletions

15
.github/pr-branch-labeler.yml vendored Normal file
View file

@ -0,0 +1,15 @@
# Apply label "feature" if head matches "feature/*"
'type: feature':
head: "feature/*"
# Apply label "feature" if head matches "feature/*"
'type: enhancement':
head: "enhancement/*"
# Apply label "bugfix" if head matches one of "bugfix/*" or "hotfix/*"
'type: bug':
head: ["bugfix/*", "hotfix/*"]
# Apply label "release" if base matches "release/*"
'Bump Minor':
base: "release/next-minor"

102
.github/pr-glob-labeler.yml vendored Normal file
View file

@ -0,0 +1,102 @@
# Add type: unittest label if any changes in tests folders
'type: unittest':
- '*/*tests*/**/*'
# any changes in documentation structure
'type: documentation':
- '*/**/*website*/**/*'
- '*/**/*docs*/**/*'
# hosts triage
'host: Nuke':
- '*/**/*nuke*'
- '*/**/*nuke*/**/*'
'host: Photoshop':
- '*/**/*photoshop*'
- '*/**/*photoshop*/**/*'
'host: Harmony':
- '*/**/*harmony*'
- '*/**/*harmony*/**/*'
'host: UE':
- '*/**/*unreal*'
- '*/**/*unreal*/**/*'
'host: Houdini':
- '*/**/*houdini*'
- '*/**/*houdini*/**/*'
'host: Maya':
- '*/**/*maya*'
- '*/**/*maya*/**/*'
'host: Resolve':
- '*/**/*resolve*'
- '*/**/*resolve*/**/*'
'host: Blender':
- '*/**/*blender*'
- '*/**/*blender*/**/*'
'host: Hiero':
- '*/**/*hiero*'
- '*/**/*hiero*/**/*'
'host: Fusion':
- '*/**/*fusion*'
- '*/**/*fusion*/**/*'
'host: Flame':
- '*/**/*flame*'
- '*/**/*flame*/**/*'
'host: TrayPublisher':
- '*/**/*traypublisher*'
- '*/**/*traypublisher*/**/*'
'host: 3dsmax':
- '*/**/*max*'
- '*/**/*max*/**/*'
'host: TV Paint':
- '*/**/*tvpaint*'
- '*/**/*tvpaint*/**/*'
'host: CelAction':
- '*/**/*celaction*'
- '*/**/*celaction*/**/*'
'host: After Effects':
- '*/**/*aftereffects*'
- '*/**/*aftereffects*/**/*'
'host: Substance Painter':
- '*/**/*substancepainter*'
- '*/**/*substancepainter*/**/*'
# modules triage
'module: Deadline':
- '*/**/*deadline*'
- '*/**/*deadline*/**/*'
'module: RoyalRender':
- '*/**/*royalrender*'
- '*/**/*royalrender*/**/*'
'module: Sitesync':
- '*/**/*sync_server*'
- '*/**/*sync_server*/**/*'
'module: Ftrack':
- '*/**/*ftrack*'
- '*/**/*ftrack*/**/*'
'module: Shotgrid':
- '*/**/*shotgrid*'
- '*/**/*shotgrid*/**/*'
'module: Kitsu':
- '*/**/*kitsu*'
- '*/**/*kitsu*/**/*'

View file

@ -1,8 +1,8 @@
name: project-actions
on:
pull_request:
types: [review_requested]
pull_request_target:
types: [opened, synchronize, assigned, review_requested]
pull_request_review:
types: [submitted]
@ -20,3 +20,53 @@ jobs:
project_id: 11
resource_node_id: ${{ github.event.pull_request.node_id }}
status_value: Change Requested
size-label:
name: pr_size_label
runs-on: ubuntu-latest
if: |
${{(github.event_name == 'pull_request' && github.event.action == 'assigned')
|| (github.event_name == 'pull_request' && github.event.action == 'opened')}}
steps:
- name: Add size label
uses: "pascalgn/size-label-action@v0.4.3"
env:
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
IGNORED: ".gitignore\n*.md\n*.json"
with:
sizes: >
{
"0": "XS",
"100": "S",
"500": "M",
"1000": "L",
"1500": "XL",
"2500": "XXL"
}
label_prs_branch:
name: pr_branch_label
runs-on: ubuntu-latest
if: |
${{(github.event_name == 'pull_request' && github.event.action == 'assigned')
|| (github.event_name == 'pull_request' && github.event.action == 'opened')}}
steps:
- name: Label PRs - Branch name detection
uses: ffittschen/pr-branch-labeler@v1
with:
repo-token: ${{ secrets.YNPUT_BOT_TOKEN }}
label_prs_globe:
name: pr_globe_label
runs-on: ubuntu-latest
if: |
${{(github.event_name == 'pull_request' && github.event.action == 'assigned')
|| (github.event_name == 'pull_request' && github.event.action == 'opened')}}
steps:
- name: Label PRs - Globe detection
uses: actions/labeler@v4.0.3
with:
repo-token: ${{ secrets.YNPUT_BOT_TOKEN }}
configuration-path: ".github/pr-glob-labeler.yml"
sync-labels: false

View file

@ -476,6 +476,25 @@ function start() {
action.triggered.connect(self.onSubsetManage);
}
/**
* Set scene settings from DB to the scene
*/
self.onSetSceneSettings = function() {
app.avalonClient.send(
{
"module": "openpype.hosts.harmony.api",
"method": "ensure_scene_settings",
"args": []
},
false
);
};
// add Set Scene Settings
if (app.avalonMenu == null) {
action = menu.addAction('Set Scene Settings...');
action.triggered.connect(self.onSetSceneSettings);
}
/**
* Show Experimental dialog
*/

View file

@ -142,7 +142,7 @@ def application_launch(event):
harmony.send({"script": script})
inject_avalon_js()
ensure_scene_settings()
# ensure_scene_settings()
check_inventory()

View file

@ -25,8 +25,9 @@ class ExtractRender(pyblish.api.InstancePlugin):
application_path = instance.context.data.get("applicationPath")
scene_path = instance.context.data.get("scenePath")
frame_rate = instance.context.data.get("frameRate")
frame_start = instance.context.data.get("frameStart")
frame_end = instance.context.data.get("frameEnd")
# real value from timeline
frame_start = instance.context.data.get("frameStartHandle")
frame_end = instance.context.data.get("frameEndHandle")
audio_path = instance.context.data.get("audioPath")
if audio_path and os.path.exists(audio_path):
@ -55,9 +56,13 @@ class ExtractRender(pyblish.api.InstancePlugin):
# Execute rendering. Ignoring error cause Harmony returns error code
# always.
self.log.info(f"running [ {application_path} -batch {scene_path}")
args = [application_path, "-batch",
"-frames", str(frame_start), str(frame_end),
"-scene", scene_path]
self.log.info(f"running [ {application_path} {' '.join(args)}")
proc = subprocess.Popen(
[application_path, "-batch", scene_path],
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE

View file

@ -60,7 +60,8 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
# which is available on 'context.data["assetEntity"]'
# - the same approach can be used in 'ValidateSceneSettingsRepair'
expected_settings = harmony.get_asset_settings()
self.log.info("scene settings from DB:".format(expected_settings))
self.log.info("scene settings from DB:{}".format(expected_settings))
expected_settings.pop("entityType") # not useful for the validation
expected_settings = _update_frames(dict.copy(expected_settings))
expected_settings["frameEndHandle"] = expected_settings["frameEnd"] +\
@ -68,21 +69,32 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
if (any(re.search(pattern, os.getenv('AVALON_TASK'))
for pattern in self.skip_resolution_check)):
self.log.info("Skipping resolution check because of "
"task name and pattern {}".format(
self.skip_resolution_check))
expected_settings.pop("resolutionWidth")
expected_settings.pop("resolutionHeight")
entity_type = expected_settings.get("entityType")
if (any(re.search(pattern, entity_type)
if (any(re.search(pattern, os.getenv('AVALON_TASK'))
for pattern in self.skip_timelines_check)):
self.log.info("Skipping frames check because of "
"task name and pattern {}".format(
self.skip_timelines_check))
expected_settings.pop('frameStart', None)
expected_settings.pop('frameEnd', None)
expected_settings.pop("entityType") # not useful after the check
expected_settings.pop('frameStartHandle', None)
expected_settings.pop('frameEndHandle', None)
asset_name = instance.context.data['anatomyData']['asset']
if any(re.search(pattern, asset_name)
for pattern in self.frame_check_filter):
expected_settings.pop("frameEnd")
self.log.info("Skipping frames check because of "
"task name and pattern {}".format(
self.frame_check_filter))
expected_settings.pop('frameStart', None)
expected_settings.pop('frameEnd', None)
expected_settings.pop('frameStartHandle', None)
expected_settings.pop('frameEndHandle', None)
# handle case where ftrack uses only two decimal places
# 23.976023976023978 vs. 23.98
@ -99,6 +111,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
"frameEnd": instance.context.data["frameEnd"],
"handleStart": instance.context.data.get("handleStart"),
"handleEnd": instance.context.data.get("handleEnd"),
"frameStartHandle": instance.context.data.get("frameStartHandle"),
"frameEndHandle": instance.context.data.get("frameEndHandle"),
"resolutionWidth": instance.context.data.get("resolutionWidth"),
"resolutionHeight": instance.context.data.get("resolutionHeight"),

View file

@ -1,7 +1,6 @@
import os
import hou
from openpype.pipeline import legacy_io
import pyblish.api
@ -11,7 +10,7 @@ class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder - 0.01
label = "Houdini Current File"
hosts = ["houdini"]
family = ["workfile"]
families = ["workfile"]
def process(self, instance):
"""Inject the current working file"""
@ -21,7 +20,7 @@ class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin):
# By default, Houdini will even point a new scene to a path.
# However if the file is not saved at all and does not exist,
# we assume the user never set it.
filepath = ""
current_file = ""
elif os.path.basename(current_file) == "untitled.hip":
# Due to even a new file being called 'untitled.hip' we are unable

View file

@ -2478,8 +2478,8 @@ def load_capture_preset(data=None):
float(value[2]) / 255
]
disp_options[key] = value
else:
disp_options['displayGradient'] = True
elif key == "displayGradient":
disp_options[key] = value
options['display_options'] = disp_options

View file

@ -1,4 +1,6 @@
import os
import difflib
import contextlib
from maya import cmds
from openpype.settings import get_project_settings
@ -8,7 +10,82 @@ from openpype.pipeline.create import (
get_legacy_creator_by_name,
)
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api.lib import maintained_selection
from openpype.hosts.maya.api.lib import (
maintained_selection,
get_container_members
)
@contextlib.contextmanager
def preserve_modelpanel_cameras(container, log=None):
"""Preserve camera members of container in the modelPanels.
This is used to ensure a camera remains in the modelPanels after updating
to a new version.
"""
# Get the modelPanels that used the old camera
members = get_container_members(container)
old_cameras = set(cmds.ls(members, type="camera", long=True))
if not old_cameras:
# No need to manage anything
yield
return
panel_cameras = {}
for panel in cmds.getPanel(type="modelPanel"):
cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True),
long=True)
# Often but not always maya returns the transform from the
# modelPanel as opposed to the camera shape, so we convert it
# to explicitly be the camera shape
if cmds.nodeType(cam) != "camera":
cam = cmds.listRelatives(cam,
children=True,
fullPath=True,
type="camera")[0]
if cam in old_cameras:
panel_cameras[panel] = cam
if not panel_cameras:
# No need to manage anything
yield
return
try:
yield
finally:
new_members = get_container_members(container)
new_cameras = set(cmds.ls(new_members, type="camera", long=True))
if not new_cameras:
return
for panel, cam_name in panel_cameras.items():
new_camera = None
if cam_name in new_cameras:
new_camera = cam_name
elif len(new_cameras) == 1:
new_camera = next(iter(new_cameras))
else:
# Multiple cameras in the updated container but not an exact
# match detected by name. Find the closest match
matches = difflib.get_close_matches(word=cam_name,
possibilities=new_cameras,
n=1)
if matches:
new_camera = matches[0] # best match
if log:
log.info("Camera in '{}' restored with "
"closest match camera: {} (before: {})"
.format(panel, new_camera, cam_name))
if not new_camera:
# Unable to find the camera to re-apply in the modelpanel
continue
cmds.modelPanel(panel, edit=True, camera=new_camera)
class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
@ -68,6 +145,9 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
new_nodes = (list(set(nodes) - set(shapes)))
# if there are cameras, try to lock their transforms
self._lock_camera_transforms(new_nodes)
current_namespace = pm.namespaceInfo(currentNamespace=True)
if current_namespace != ":":
@ -136,6 +216,15 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
with preserve_modelpanel_cameras(container, log=self.log):
super(ReferenceLoader, self).update(container, representation)
# We also want to lock camera transforms on any new cameras in the
# reference or for a camera which might have changed names.
members = get_container_members(container)
self._lock_camera_transforms(members)
def _post_process_rig(self, name, namespace, context, options):
output = next((node for node in self if
@ -168,3 +257,18 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
options={"useSelection": True},
data={"dependencies": dependency}
)
def _lock_camera_transforms(self, nodes):
cameras = cmds.ls(nodes, type="camera")
if not cameras:
return
# Check the Maya version, lockTransform has been introduced since
# Maya 2016.5 Ext 2
version = int(cmds.about(version=True))
if version >= 2016:
for camera in cameras:
cmds.camera(camera, edit=True, lockTransform=True)
else:
self.log.warning("This version of Maya does not support locking of"
" transforms of cameras.")

View file

@ -388,22 +388,27 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
old_repre, repre)
# Keep previously synchronized sites up-to-date
# by comparing old and new sites and adding old sites
# if missing in new ones
old_repre_files_sites = [
f.get("sites", []) for f in old_repre.get("files", [])
]
for i, file in enumerate(repre.get("files", [])):
repre_sites_names = {
s["name"] for s in file.get("sites", [])
# by comparing old and new sites and adding old sites
# if missing in new ones
# Prepare all sites from all files in old representation
old_site_names = set()
for file_info in old_repre.get("files", []):
old_site_names |= {
site["name"]
for site in file_info["sites"]
}
for site in old_repre_files_sites[i]:
if site["name"] not in repre_sites_names:
# Pop the date to tag for sync
site.pop("created_dt", None)
file["sites"].append(site)
update_data["files"][i] = file
for file_info in update_data.get("files", []):
file_info.setdefault("sites", [])
file_info_site_names = {
site["name"]
for site in file_info["sites"]
}
for site_name in old_site_names:
if site_name not in file_info_site_names:
file_info["sites"].append({
"name": site_name
})
op_session.update_entity(
project_name,

View file

@ -795,6 +795,7 @@
"quality": 95
},
"Display Options": {
"override_display": true,
"background": [
125,
125,
@ -813,7 +814,7 @@
125,
255
],
"override_display": true
"displayGradient": true
},
"Generic": {
"isolate_view": true,

View file

@ -48,7 +48,11 @@
"type": "label",
"label": "<b>Display Options</b>"
},
{
"type": "boolean",
"key": "override_display",
"label": "Override display options"
},
{
"type": "color",
"key": "background",
@ -66,8 +70,8 @@
},
{
"type": "boolean",
"key": "override_display",
"label": "Override display options"
"key": "displayGradient",
"label": "Display background gradient"
}
]
},