Merge branch 'develop' into feature/AY-971_Use-custom-staging-dir-functions

This commit is contained in:
Jakub Jezek 2024-12-02 15:38:22 +01:00
commit 76bd193213
No known key found for this signature in database
GPG key ID: 06DBD609ADF27FD9
10 changed files with 115 additions and 35 deletions

View file

@ -1,15 +1,48 @@
name: 🔸Auto assign pr
on:
workflow_dispatch:
inputs:
pr_number:
type: string
description: "Run workflow for this PR number"
required: true
project_id:
type: string
description: "Github Project Number"
required: true
default: "16"
pull_request:
types:
- opened
env:
GH_TOKEN: ${{ github.token }}
jobs:
get-pr-repo:
runs-on: ubuntu-latest
outputs:
pr_repo_name: ${{ steps.get-repo-name.outputs.repo_name || github.event.pull_request.head.repo.full_name }}
# INFO `github.event.pull_request.head.repo.full_name` is not available on manual triggered (dispatched) runs
steps:
- name: Get PR repo name
if: ${{ github.event_name == 'workflow_dispatch' }}
id: get-repo-name
run: |
repo_name=$(gh pr view ${{ inputs.pr_number }} --json headRepository,headRepositoryOwner --repo ${{ github.repository }} | jq -r '.headRepositoryOwner.login + "/" + .headRepository.name')
echo "repo_name=$repo_name" >> $GITHUB_OUTPUT
auto-assign-pr:
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@develop
needs:
- get-pr-repo
if: ${{ needs.get-pr-repo.outputs.pr_repo_name == github.repository }}
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@main
with:
repo: "${{ github.repository }}"
project_id: 16
pull_request_number: ${{ github.event.pull_request.number }}
project_id: ${{ inputs.project_id != '' && fromJSON(inputs.project_id) || 16 }}
pull_request_number: ${{ github.event.pull_request.number || fromJSON(inputs.pr_number) }}
secrets:
token: ${{ secrets.YNPUT_BOT_TOKEN }}
# INFO fallback to default `github.token` is required for PRs from forks
# INFO organization secrets won't be available to forks
token: ${{ secrets.YNPUT_BOT_TOKEN || github.token}}

View file

@ -9,7 +9,7 @@ on:
jobs:
validate-type-label:
uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@develop
uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@main
with:
repo: "${{ github.repository }}"
pull_request_number: ${{ github.event.pull_request.number }}

View file

@ -387,7 +387,7 @@ def get_representations_delivery_template_data(
# convert representation entity. Fixed in 'ayon_api' 1.0.10.
if isinstance(template_data, str):
con = ayon_api.get_server_api_connection()
repre_entity = con._representation_conversion(repre_entity)
con._representation_conversion(repre_entity)
template_data = repre_entity["context"]
template_data.update(copy.deepcopy(general_template_data))

View file

@ -3,6 +3,7 @@ from .constants import (
ValidateContentsOrder,
ValidateSceneOrder,
ValidateMeshOrder,
FARM_JOB_ENV_DATA_KEY,
)
from .publish_plugins import (
@ -59,6 +60,7 @@ __all__ = (
"ValidateContentsOrder",
"ValidateSceneOrder",
"ValidateMeshOrder",
"FARM_JOB_ENV_DATA_KEY",
"AbstractMetaInstancePlugin",
"AbstractMetaContextPlugin",

View file

@ -8,3 +8,5 @@ ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
DEFAULT_PUBLISH_TEMPLATE = "default"
DEFAULT_HERO_PUBLISH_TEMPLATE = "default"
FARM_JOB_ENV_DATA_KEY: str = "farmJobEnv"

View file

@ -0,0 +1,43 @@
import os
import pyblish.api
from ayon_core.lib import get_ayon_username
from ayon_core.pipeline.publish import FARM_JOB_ENV_DATA_KEY
class CollectCoreJobEnvVars(pyblish.api.ContextPlugin):
"""Collect set of environment variables to submit with deadline jobs"""
order = pyblish.api.CollectorOrder - 0.45
label = "AYON core Farm Environment Variables"
targets = ["local"]
def process(self, context):
env = context.data.setdefault(FARM_JOB_ENV_DATA_KEY, {})
# Disable colored logs on farm
for key, value in (
("AYON_LOG_NO_COLORS", "1"),
("AYON_PROJECT_NAME", context.data["projectName"]),
("AYON_FOLDER_PATH", context.data.get("folderPath")),
("AYON_TASK_NAME", context.data.get("task")),
# NOTE we should use 'context.data["user"]' but that has higher
# order.
("AYON_USERNAME", get_ayon_username()),
):
if value:
self.log.debug(f"Setting job env: {key}: {value}")
env[key] = value
for key in [
"AYON_BUNDLE_NAME",
"AYON_DEFAULT_SETTINGS_VARIANT",
"AYON_IN_TESTS",
# NOTE Not sure why workdir is needed?
"AYON_WORKDIR",
]:
value = os.getenv(key)
if value:
self.log.debug(f"Setting job env: {key}: {value}")
env[key] = value

View file

@ -71,20 +71,18 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
name = inst.data["folderPath"]
recycling_file = [f for f in created_files if name in f]
# frameranges
timeline_in_h = inst.data["clipInH"]
timeline_out_h = inst.data["clipOutH"]
fps = inst.data["fps"]
# create duration
duration = (timeline_out_h - timeline_in_h) + 1
audio_clip = inst.data["otioClip"]
audio_range = audio_clip.range_in_parent()
duration = audio_range.duration.to_frames()
# ffmpeg generate new file only if doesn't exists already
if not recycling_file:
# convert to seconds
start_sec = float(timeline_in_h / fps)
duration_sec = float(duration / fps)
parent_track = audio_clip.parent()
parent_track_start = parent_track.range_in_parent().start_time
relative_start_time = (
audio_range.start_time - parent_track_start)
start_sec = relative_start_time.to_seconds()
duration_sec = audio_range.duration.to_seconds()
# temp audio file
audio_fpath = self.create_temp_file(name)
@ -163,34 +161,36 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
output = []
# go trough all audio tracks
for otio_track in otio_timeline.tracks:
if "Audio" not in otio_track.kind:
continue
for otio_track in otio_timeline.audio_tracks():
self.log.debug("_" * 50)
playhead = 0
for otio_clip in otio_track:
self.log.debug(otio_clip)
if isinstance(otio_clip, otio.schema.Gap):
playhead += otio_clip.source_range.duration.value
elif isinstance(otio_clip, otio.schema.Clip):
start = otio_clip.source_range.start_time.value
duration = otio_clip.source_range.duration.value
fps = otio_clip.source_range.start_time.rate
if (isinstance(otio_clip, otio.schema.Clip) and
not otio_clip.media_reference.is_missing_reference):
media_av_start = otio_clip.available_range().start_time
clip_start = otio_clip.source_range.start_time
fps = clip_start.rate
conformed_av_start = media_av_start.rescaled_to(fps)
# ffmpeg ignores embedded tc
start = clip_start - conformed_av_start
duration = otio_clip.source_range.duration
media_path = otio_clip.media_reference.target_url
input = {
"mediaPath": media_path,
"delayFrame": playhead,
"startFrame": start,
"durationFrame": duration,
"startFrame": start.to_frames(),
"durationFrame": duration.to_frames(),
"delayMilSec": int(float(playhead / fps) * 1000),
"startSec": float(start / fps),
"durationSec": float(duration / fps),
"fps": fps
"startSec": start.to_seconds(),
"durationSec": duration.to_seconds(),
"fps": float(fps)
}
if input not in output:
output.append(input)
self.log.debug("__ input: {}".format(input))
playhead += otio_clip.source_range.duration.value
playhead += otio_clip.source_range.duration.value
return output

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'core' version."""
__version__ = "1.0.9+dev"
__version__ = "1.0.10+dev"

View file

@ -1,6 +1,6 @@
name = "core"
title = "Core"
version = "1.0.9+dev"
version = "1.0.10+dev"
client_dir = "ayon_core"

View file

@ -5,7 +5,7 @@
[tool.poetry]
name = "ayon-core"
version = "1.0.9+dev"
version = "1.0.10+dev"
description = ""
authors = ["Ynput Team <team@ynput.io>"]
readme = "README.md"