Merge branch 'feature/AY-971_Use-custom-staging-dir-functions' of github.com:ynput/ayon-core into feature/AY-971_Use-custom-staging-dir-functions

This commit is contained in:
robin@ynput.io 2024-12-02 14:58:54 -05:00
commit c01cfa4397
10 changed files with 115 additions and 35 deletions

View file

@ -1,15 +1,48 @@
name: 🔸Auto assign pr name: 🔸Auto assign pr
on: on:
workflow_dispatch:
inputs:
pr_number:
type: string
description: "Run workflow for this PR number"
required: true
project_id:
type: string
description: "Github Project Number"
required: true
default: "16"
pull_request: pull_request:
types: types:
- opened - opened
env:
GH_TOKEN: ${{ github.token }}
jobs: jobs:
get-pr-repo:
runs-on: ubuntu-latest
outputs:
pr_repo_name: ${{ steps.get-repo-name.outputs.repo_name || github.event.pull_request.head.repo.full_name }}
# INFO `github.event.pull_request.head.repo.full_name` is not available on manual triggered (dispatched) runs
steps:
- name: Get PR repo name
if: ${{ github.event_name == 'workflow_dispatch' }}
id: get-repo-name
run: |
repo_name=$(gh pr view ${{ inputs.pr_number }} --json headRepository,headRepositoryOwner --repo ${{ github.repository }} | jq -r '.headRepositoryOwner.login + "/" + .headRepository.name')
echo "repo_name=$repo_name" >> $GITHUB_OUTPUT
auto-assign-pr: auto-assign-pr:
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@develop needs:
- get-pr-repo
if: ${{ needs.get-pr-repo.outputs.pr_repo_name == github.repository }}
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@main
with: with:
repo: "${{ github.repository }}" repo: "${{ github.repository }}"
project_id: 16 project_id: ${{ inputs.project_id != '' && fromJSON(inputs.project_id) || 16 }}
pull_request_number: ${{ github.event.pull_request.number }} pull_request_number: ${{ github.event.pull_request.number || fromJSON(inputs.pr_number) }}
secrets: secrets:
token: ${{ secrets.YNPUT_BOT_TOKEN }} # INFO fallback to default `github.token` is required for PRs from forks
# INFO organization secrets won't be available to forks
token: ${{ secrets.YNPUT_BOT_TOKEN || github.token}}

View file

@ -9,7 +9,7 @@ on:
jobs: jobs:
validate-type-label: validate-type-label:
uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@develop uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@main
with: with:
repo: "${{ github.repository }}" repo: "${{ github.repository }}"
pull_request_number: ${{ github.event.pull_request.number }} pull_request_number: ${{ github.event.pull_request.number }}

View file

@ -387,7 +387,7 @@ def get_representations_delivery_template_data(
# convert representation entity. Fixed in 'ayon_api' 1.0.10. # convert representation entity. Fixed in 'ayon_api' 1.0.10.
if isinstance(template_data, str): if isinstance(template_data, str):
con = ayon_api.get_server_api_connection() con = ayon_api.get_server_api_connection()
repre_entity = con._representation_conversion(repre_entity) con._representation_conversion(repre_entity)
template_data = repre_entity["context"] template_data = repre_entity["context"]
template_data.update(copy.deepcopy(general_template_data)) template_data.update(copy.deepcopy(general_template_data))

View file

@ -3,6 +3,7 @@ from .constants import (
ValidateContentsOrder, ValidateContentsOrder,
ValidateSceneOrder, ValidateSceneOrder,
ValidateMeshOrder, ValidateMeshOrder,
FARM_JOB_ENV_DATA_KEY,
) )
from .publish_plugins import ( from .publish_plugins import (
@ -59,6 +60,7 @@ __all__ = (
"ValidateContentsOrder", "ValidateContentsOrder",
"ValidateSceneOrder", "ValidateSceneOrder",
"ValidateMeshOrder", "ValidateMeshOrder",
"FARM_JOB_ENV_DATA_KEY",
"AbstractMetaInstancePlugin", "AbstractMetaInstancePlugin",
"AbstractMetaContextPlugin", "AbstractMetaContextPlugin",

View file

@ -8,3 +8,5 @@ ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
DEFAULT_PUBLISH_TEMPLATE = "default" DEFAULT_PUBLISH_TEMPLATE = "default"
DEFAULT_HERO_PUBLISH_TEMPLATE = "default" DEFAULT_HERO_PUBLISH_TEMPLATE = "default"
FARM_JOB_ENV_DATA_KEY: str = "farmJobEnv"

View file

@ -0,0 +1,43 @@
import os
import pyblish.api
from ayon_core.lib import get_ayon_username
from ayon_core.pipeline.publish import FARM_JOB_ENV_DATA_KEY
class CollectCoreJobEnvVars(pyblish.api.ContextPlugin):
"""Collect set of environment variables to submit with deadline jobs"""
order = pyblish.api.CollectorOrder - 0.45
label = "AYON core Farm Environment Variables"
targets = ["local"]
def process(self, context):
env = context.data.setdefault(FARM_JOB_ENV_DATA_KEY, {})
# Disable colored logs on farm
for key, value in (
("AYON_LOG_NO_COLORS", "1"),
("AYON_PROJECT_NAME", context.data["projectName"]),
("AYON_FOLDER_PATH", context.data.get("folderPath")),
("AYON_TASK_NAME", context.data.get("task")),
# NOTE we should use 'context.data["user"]' but that has higher
# order.
("AYON_USERNAME", get_ayon_username()),
):
if value:
self.log.debug(f"Setting job env: {key}: {value}")
env[key] = value
for key in [
"AYON_BUNDLE_NAME",
"AYON_DEFAULT_SETTINGS_VARIANT",
"AYON_IN_TESTS",
# NOTE Not sure why workdir is needed?
"AYON_WORKDIR",
]:
value = os.getenv(key)
if value:
self.log.debug(f"Setting job env: {key}: {value}")
env[key] = value

View file

@ -71,20 +71,18 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
name = inst.data["folderPath"] name = inst.data["folderPath"]
recycling_file = [f for f in created_files if name in f] recycling_file = [f for f in created_files if name in f]
audio_clip = inst.data["otioClip"]
# frameranges audio_range = audio_clip.range_in_parent()
timeline_in_h = inst.data["clipInH"] duration = audio_range.duration.to_frames()
timeline_out_h = inst.data["clipOutH"]
fps = inst.data["fps"]
# create duration
duration = (timeline_out_h - timeline_in_h) + 1
# ffmpeg generate new file only if doesn't exists already # ffmpeg generate new file only if doesn't exists already
if not recycling_file: if not recycling_file:
# convert to seconds parent_track = audio_clip.parent()
start_sec = float(timeline_in_h / fps) parent_track_start = parent_track.range_in_parent().start_time
duration_sec = float(duration / fps) relative_start_time = (
audio_range.start_time - parent_track_start)
start_sec = relative_start_time.to_seconds()
duration_sec = audio_range.duration.to_seconds()
# temp audio file # temp audio file
audio_fpath = self.create_temp_file(name) audio_fpath = self.create_temp_file(name)
@ -163,34 +161,36 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
output = [] output = []
# go trough all audio tracks # go trough all audio tracks
for otio_track in otio_timeline.tracks: for otio_track in otio_timeline.audio_tracks():
if "Audio" not in otio_track.kind:
continue
self.log.debug("_" * 50) self.log.debug("_" * 50)
playhead = 0 playhead = 0
for otio_clip in otio_track: for otio_clip in otio_track:
self.log.debug(otio_clip) self.log.debug(otio_clip)
if isinstance(otio_clip, otio.schema.Gap): if (isinstance(otio_clip, otio.schema.Clip) and
playhead += otio_clip.source_range.duration.value not otio_clip.media_reference.is_missing_reference):
elif isinstance(otio_clip, otio.schema.Clip): media_av_start = otio_clip.available_range().start_time
start = otio_clip.source_range.start_time.value clip_start = otio_clip.source_range.start_time
duration = otio_clip.source_range.duration.value fps = clip_start.rate
fps = otio_clip.source_range.start_time.rate conformed_av_start = media_av_start.rescaled_to(fps)
# ffmpeg ignores embedded tc
start = clip_start - conformed_av_start
duration = otio_clip.source_range.duration
media_path = otio_clip.media_reference.target_url media_path = otio_clip.media_reference.target_url
input = { input = {
"mediaPath": media_path, "mediaPath": media_path,
"delayFrame": playhead, "delayFrame": playhead,
"startFrame": start, "startFrame": start.to_frames(),
"durationFrame": duration, "durationFrame": duration.to_frames(),
"delayMilSec": int(float(playhead / fps) * 1000), "delayMilSec": int(float(playhead / fps) * 1000),
"startSec": float(start / fps), "startSec": start.to_seconds(),
"durationSec": float(duration / fps), "durationSec": duration.to_seconds(),
"fps": fps "fps": float(fps)
} }
if input not in output: if input not in output:
output.append(input) output.append(input)
self.log.debug("__ input: {}".format(input)) self.log.debug("__ input: {}".format(input))
playhead += otio_clip.source_range.duration.value
playhead += otio_clip.source_range.duration.value
return output return output

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
"""Package declaring AYON addon 'core' version.""" """Package declaring AYON addon 'core' version."""
__version__ = "1.0.9+dev" __version__ = "1.0.10+dev"

View file

@ -1,6 +1,6 @@
name = "core" name = "core"
title = "Core" title = "Core"
version = "1.0.9+dev" version = "1.0.10+dev"
client_dir = "ayon_core" client_dir = "ayon_core"

View file

@ -5,7 +5,7 @@
[tool.poetry] [tool.poetry]
name = "ayon-core" name = "ayon-core"
version = "1.0.9+dev" version = "1.0.10+dev"
description = "" description = ""
authors = ["Ynput Team <team@ynput.io>"] authors = ["Ynput Team <team@ynput.io>"]
readme = "README.md" readme = "README.md"