Merge branch 'develop' into enhancement/tray_publisher_is_not_experimental

# Conflicts:
#	openpype/hosts/traypublisher/addon.py
This commit is contained in:
Jakub Trllo 2022-11-10 11:40:01 +01:00
commit 694b2feef8
905 changed files with 49237 additions and 14198 deletions

View file

@ -6,6 +6,8 @@ labels: bug
assignees: ''
---
**Running version**
[ex. 3.14.1-nightly.2]
**Describe the bug**
A clear and concise description of what the bug is.

28
.github/workflows/milestone_assign.yml vendored Normal file
View file

@ -0,0 +1,28 @@
name: Milestone - assign to PRs
on:
pull_request_target:
types: [opened, reopened, edited, synchronize]
jobs:
run_if_release:
if: startsWith(github.base_ref, 'release/')
runs-on: ubuntu-latest
steps:
- name: 'Assign Milestone [next-minor]'
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-minor'
run_if_develop:
if: ${{ github.base_ref == 'develop' }}
runs-on: ubuntu-latest
steps:
- name: 'Assign Milestone [next-patch]'
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-patch'

62
.github/workflows/milestone_create.yml vendored Normal file
View file

@ -0,0 +1,62 @@
name: Milestone - create default
on:
milestone:
types: [closed, edited]
jobs:
generate-next-patch:
runs-on: ubuntu-latest
steps:
- name: 'Get Milestones'
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
env:
MILESTONES: ${{ steps.milestones.outputs.milestones }}
MILESTONE: "next-patch"
- name: Read output
run: |
echo "${{ steps.querymilestone.outputs.number }}"
- name: 'Create `next-patch` milestone'
if: steps.querymilestone.outputs.number == ''
id: createmilestone
uses: "WyriHaximus/github-action-create-milestone@v1"
with:
title: 'next-patch'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
generate-next-minor:
runs-on: ubuntu-latest
steps:
- name: 'Get Milestones'
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
env:
MILESTONES: ${{ steps.milestones.outputs.milestones }}
MILESTONE: "next-minor"
- name: Read output
run: |
echo "${{ steps.querymilestone.outputs.number }}"
- name: 'Create `next-minor` milestone'
if: steps.querymilestone.outputs.number == ''
id: createmilestone
uses: "WyriHaximus/github-action-create-milestone@v1"
with:
title: 'next-minor'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View file

@ -37,27 +37,27 @@ jobs:
echo ::set-output name=next_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version_type.outputs.type != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
unreleasedLabel: ${{ steps.version.outputs.next_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version_type.outputs.type != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: "🖨️ Print changelog to console"
if: steps.version_type.outputs.type != 'skip'
@ -85,11 +85,11 @@ jobs:
tags: true
unprotect_reviews: true
- name: 🔨 Merge main back to develop
- name: 🔨 Merge main back to develop
uses: everlytic/branch-merge@1.1.0
if: steps.version_type.outputs.type != 'skip'
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'

View file

@ -2,7 +2,7 @@ name: Stable Release
on:
release:
types:
types:
- prereleased
jobs:
@ -13,7 +13,7 @@ jobs:
steps:
- name: 🚛 Checkout Code
uses: actions/checkout@v2
with:
with:
fetch-depth: 0
- name: Set up Python
@ -33,27 +33,27 @@ jobs:
echo ::set-output name=last_release::$LASTRELEASE
echo ::set-output name=release_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
futureRelease: ${{ steps.version.outputs.release_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version.outputs.release_tag != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# futureRelease: ${{ steps.version.outputs.release_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: 💾 Commit and Tag
id: git_commit
@ -73,8 +73,8 @@ jobs:
token: ${{ secrets.ADMIN_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
unprotect_reviews: true
- name: "✏️ Generate last changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-last-changelog
@ -114,11 +114,11 @@ jobs:
with:
tag: "${{ steps.version.outputs.current_version }}"
- name: 🔁 Merge main back to develop
- name: 🔁 Merge main back to develop
if: steps.version.outputs.release_tag != 'skip'
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'

5
.gitignore vendored
View file

@ -107,3 +107,8 @@ website/.docusaurus
mypy.ini
tools/run_eventserver.*
# Developer tools
tools/dev_*
.github_changelog_generator

File diff suppressed because it is too large Load diff

1941
HISTORY.md

File diff suppressed because it is too large Load diff

View file

@ -41,7 +41,7 @@ It can be built and ran on all common platforms. We develop and test on the foll
- **Linux**
- **Ubuntu** 20.04 LTS
- **Centos** 7
- **Mac OSX**
- **Mac OSX**
- **10.15** Catalina
- **11.1** Big Sur (using Rosetta2)
@ -287,6 +287,14 @@ To run tests, execute `.\tools\run_tests(.ps1|.sh)`.
**Note that it needs existing virtual environment.**
Developer tools
-------------
In case you wish to add your own tools to `.\tools` folder without git tracking, it is possible by adding it with `dev_*` suffix (example: `dev_clear_pyc(.ps1|.sh)`).
## Contributors ✨
Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):

View file

@ -0,0 +1,18 @@
Addon distribution tool
------------------------
Code in this folder is backend portion of Addon distribution logic for v4 server.
Each host, module will be separate Addon in the future. Each v4 server could run different set of Addons.
Client (running on artist machine) will in the first step ask v4 for list of enabled addons.
(It expects list of json documents matching to `addon_distribution.py:AddonInfo` object.)
Next it will compare presence of enabled addon version in local folder. In the case of missing version of
an addon, client will use information in the addon to download (from http/shared local disk/git) zip file
and unzip it.
Required part of addon distribution will be sharing of dependencies (python libraries, utilities) which is not part of this folder.
Location of this folder might change in the future as it will be required for a clint to add this folder to sys.path reliably.
This code needs to be independent on Openpype code as much as possible!

View file

@ -0,0 +1,208 @@
import os
from enum import Enum
from abc import abstractmethod
import attr
import logging
import requests
import platform
import shutil
from .file_handler import RemoteFileHandler
from .addon_info import AddonInfo
class UpdateState(Enum):
EXISTS = "exists"
UPDATED = "updated"
FAILED = "failed"
class AddonDownloader:
log = logging.getLogger(__name__)
def __init__(self):
self._downloaders = {}
def register_format(self, downloader_type, downloader):
self._downloaders[downloader_type.value] = downloader
def get_downloader(self, downloader_type):
downloader = self._downloaders.get(downloader_type)
if not downloader:
raise ValueError(f"{downloader_type} not implemented")
return downloader()
@classmethod
@abstractmethod
def download(cls, source, destination):
"""Returns url to downloaded addon zip file.
Args:
source (dict): {type:"http", "url":"https://} ...}
destination (str): local folder to unzip
Returns:
(str) local path to addon zip file
"""
pass
@classmethod
def check_hash(cls, addon_path, addon_hash):
"""Compares 'hash' of downloaded 'addon_url' file.
Args:
addon_path (str): local path to addon zip file
addon_hash (str): sha256 hash of zip file
Raises:
ValueError if hashes doesn't match
"""
if not os.path.exists(addon_path):
raise ValueError(f"{addon_path} doesn't exist.")
if not RemoteFileHandler.check_integrity(addon_path,
addon_hash,
hash_type="sha256"):
raise ValueError(f"{addon_path} doesn't match expected hash.")
@classmethod
def unzip(cls, addon_zip_path, destination):
"""Unzips local 'addon_zip_path' to 'destination'.
Args:
addon_zip_path (str): local path to addon zip file
destination (str): local folder to unzip
"""
RemoteFileHandler.unzip(addon_zip_path, destination)
os.remove(addon_zip_path)
@classmethod
def remove(cls, addon_url):
pass
class OSAddonDownloader(AddonDownloader):
@classmethod
def download(cls, source, destination):
# OS doesnt need to download, unzip directly
addon_url = source["path"].get(platform.system().lower())
if not os.path.exists(addon_url):
raise ValueError("{} is not accessible".format(addon_url))
return addon_url
class HTTPAddonDownloader(AddonDownloader):
CHUNK_SIZE = 100000
@classmethod
def download(cls, source, destination):
source_url = source["url"]
cls.log.debug(f"Downloading {source_url} to {destination}")
file_name = os.path.basename(destination)
_, ext = os.path.splitext(file_name)
if (ext.replace(".", '') not
in set(RemoteFileHandler.IMPLEMENTED_ZIP_FORMATS)):
file_name += ".zip"
RemoteFileHandler.download_url(source_url,
destination,
filename=file_name)
return os.path.join(destination, file_name)
def get_addons_info(server_endpoint):
"""Returns list of addon information from Server"""
# TODO temp
# addon_info = AddonInfo(
# **{"name": "openpype_slack",
# "version": "1.0.0",
# "addon_url": "c:/projects/openpype_slack_1.0.0.zip",
# "type": UrlType.FILESYSTEM,
# "hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658"}) # noqa
#
# http_addon = AddonInfo(
# **{"name": "openpype_slack",
# "version": "1.0.0",
# "addon_url": "https://drive.google.com/file/d/1TcuV8c2OV8CcbPeWi7lxOdqWsEqQNPYy/view?usp=sharing", # noqa
# "type": UrlType.HTTP,
# "hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658"}) # noqa
response = requests.get(server_endpoint)
if not response.ok:
raise Exception(response.text)
addons_info = []
for addon in response.json():
addons_info.append(AddonInfo(**addon))
return addons_info
def update_addon_state(addon_infos, destination_folder, factory,
log=None):
"""Loops through all 'addon_infos', compares local version, unzips.
Loops through server provided list of dictionaries with information about
available addons. Looks if each addon is already present and deployed.
If isn't, addon zip gets downloaded and unzipped into 'destination_folder'.
Args:
addon_infos (list of AddonInfo)
destination_folder (str): local path
factory (AddonDownloader): factory to get appropriate downloader per
addon type
log (logging.Logger)
Returns:
(dict): {"addon_full_name": UpdateState.value
(eg. "exists"|"updated"|"failed")
"""
if not log:
log = logging.getLogger(__name__)
download_states = {}
for addon in addon_infos:
full_name = "{}_{}".format(addon.name, addon.version)
addon_dest = os.path.join(destination_folder, full_name)
if os.path.isdir(addon_dest):
log.debug(f"Addon version folder {addon_dest} already exists.")
download_states[full_name] = UpdateState.EXISTS.value
continue
for source in addon.sources:
download_states[full_name] = UpdateState.FAILED.value
try:
downloader = factory.get_downloader(source.type)
zip_file_path = downloader.download(attr.asdict(source),
addon_dest)
downloader.check_hash(zip_file_path, addon.hash)
downloader.unzip(zip_file_path, addon_dest)
download_states[full_name] = UpdateState.UPDATED.value
break
except Exception:
log.warning(f"Error happened during updating {addon.name}",
exc_info=True)
if os.path.isdir(addon_dest):
log.debug(f"Cleaning {addon_dest}")
shutil.rmtree(addon_dest)
return download_states
def check_addons(server_endpoint, addon_folder, downloaders):
"""Main entry point to compare existing addons with those on server.
Args:
server_endpoint (str): url to v4 server endpoint
addon_folder (str): local dir path for addons
downloaders (AddonDownloader): factory of downloaders
Raises:
(RuntimeError) if any addon failed update
"""
addons_info = get_addons_info(server_endpoint)
result = update_addon_state(addons_info,
addon_folder,
downloaders)
if UpdateState.FAILED.value in result.values():
raise RuntimeError(f"Unable to update some addons {result}")
def cli(*args):
raise NotImplementedError

View file

@ -0,0 +1,80 @@
import attr
from enum import Enum
class UrlType(Enum):
HTTP = "http"
GIT = "git"
FILESYSTEM = "filesystem"
@attr.s
class MultiPlatformPath(object):
windows = attr.ib(default=None)
linux = attr.ib(default=None)
darwin = attr.ib(default=None)
@attr.s
class AddonSource(object):
type = attr.ib()
@attr.s
class LocalAddonSource(AddonSource):
path = attr.ib(default=attr.Factory(MultiPlatformPath))
@attr.s
class WebAddonSource(AddonSource):
url = attr.ib(default=None)
@attr.s
class VersionData(object):
version_data = attr.ib(default=None)
@attr.s
class AddonInfo(object):
"""Object matching json payload from Server"""
name = attr.ib()
version = attr.ib()
title = attr.ib(default=None)
sources = attr.ib(default=attr.Factory(dict))
hash = attr.ib(default=None)
description = attr.ib(default=None)
license = attr.ib(default=None)
authors = attr.ib(default=None)
@classmethod
def from_dict(cls, data):
sources = []
production_version = data.get("productionVersion")
if not production_version:
return
# server payload contains info about all versions
# active addon must have 'productionVersion' and matching version info
version_data = data.get("versions", {})[production_version]
for source in version_data.get("clientSourceInfo", []):
if source.get("type") == UrlType.FILESYSTEM.value:
source_addon = LocalAddonSource(type=source["type"],
path=source["path"])
if source.get("type") == UrlType.HTTP.value:
source_addon = WebAddonSource(type=source["type"],
url=source["url"])
sources.append(source_addon)
return cls(name=data.get("name"),
version=production_version,
sources=sources,
hash=data.get("hash"),
description=data.get("description"),
title=data.get("title"),
license=data.get("license"),
authors=data.get("authors"))

View file

@ -21,7 +21,7 @@ class RemoteFileHandler:
'tar.gz', 'tar.xz', 'tar.bz2']
@staticmethod
def calculate_md5(fpath, chunk_size):
def calculate_md5(fpath, chunk_size=10000):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
@ -33,17 +33,45 @@ class RemoteFileHandler:
return md5 == RemoteFileHandler.calculate_md5(fpath, **kwargs)
@staticmethod
def check_integrity(fpath, md5=None):
def calculate_sha256(fpath):
"""Calculate sha256 for content of the file.
Args:
fpath (str): Path to file.
Returns:
str: hex encoded sha256
"""
h = hashlib.sha256()
b = bytearray(128 * 1024)
mv = memoryview(b)
with open(fpath, 'rb', buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
@staticmethod
def check_sha256(fpath, sha256, **kwargs):
return sha256 == RemoteFileHandler.calculate_sha256(fpath, **kwargs)
@staticmethod
def check_integrity(fpath, hash_value=None, hash_type=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
if hash_value is None:
return True
return RemoteFileHandler.check_md5(fpath, md5)
if not hash_type:
raise ValueError("Provide hash type, md5 or sha256")
if hash_type == 'md5':
return RemoteFileHandler.check_md5(fpath, hash_value)
if hash_type == "sha256":
return RemoteFileHandler.check_sha256(fpath, hash_value)
@staticmethod
def download_url(
url, root, filename=None,
md5=None, max_redirect_hops=3
sha256=None, max_redirect_hops=3
):
"""Download a file from a url and place it in root.
Args:
@ -51,7 +79,7 @@ class RemoteFileHandler:
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under.
If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download.
sha256 (str, optional): sha256 checksum of the download.
If None, do not check
max_redirect_hops (int, optional): Maximum number of redirect
hops allowed
@ -64,7 +92,8 @@ class RemoteFileHandler:
os.makedirs(root, exist_ok=True)
# check if file is already present locally
if RemoteFileHandler.check_integrity(fpath, md5):
if RemoteFileHandler.check_integrity(fpath,
sha256, hash_type="sha256"):
print('Using downloaded and verified file: ' + fpath)
return
@ -76,7 +105,7 @@ class RemoteFileHandler:
file_id = RemoteFileHandler._get_google_drive_file_id(url)
if file_id is not None:
return RemoteFileHandler.download_file_from_google_drive(
file_id, root, filename, md5)
file_id, root, filename, sha256)
# download the file
try:
@ -92,20 +121,21 @@ class RemoteFileHandler:
raise e
# check integrity of downloaded file
if not RemoteFileHandler.check_integrity(fpath, md5):
if not RemoteFileHandler.check_integrity(fpath,
sha256, hash_type="sha256"):
raise RuntimeError("File not found or corrupted.")
@staticmethod
def download_file_from_google_drive(file_id, root,
filename=None,
md5=None):
sha256=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under.
If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download.
sha256 (str, optional): sha256 checksum of the download.
If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url # noqa
@ -119,8 +149,8 @@ class RemoteFileHandler:
os.makedirs(root, exist_ok=True)
if os.path.isfile(fpath) and RemoteFileHandler.check_integrity(fpath,
md5):
if os.path.isfile(fpath) and RemoteFileHandler.check_integrity(
fpath, sha256, hash_type="sha256"):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()

View file

@ -0,0 +1,167 @@
import pytest
import attr
import tempfile
from common.openpype_common.distribution.addon_distribution import (
AddonDownloader,
OSAddonDownloader,
HTTPAddonDownloader,
AddonInfo,
update_addon_state,
UpdateState
)
from common.openpype_common.distribution.addon_info import UrlType
@pytest.fixture
def addon_downloader():
addon_downloader = AddonDownloader()
addon_downloader.register_format(UrlType.FILESYSTEM, OSAddonDownloader)
addon_downloader.register_format(UrlType.HTTP, HTTPAddonDownloader)
yield addon_downloader
@pytest.fixture
def http_downloader(addon_downloader):
yield addon_downloader.get_downloader(UrlType.HTTP.value)
@pytest.fixture
def temp_folder():
yield tempfile.mkdtemp()
@pytest.fixture
def sample_addon_info():
addon_info = {
"versions": {
"1.0.0": {
"clientPyproject": {
"tool": {
"poetry": {
"dependencies": {
"nxtools": "^1.6",
"orjson": "^3.6.7",
"typer": "^0.4.1",
"email-validator": "^1.1.3",
"python": "^3.10",
"fastapi": "^0.73.0"
}
}
}
},
"hasSettings": True,
"clientSourceInfo": [
{
"type": "http",
"url": "https://drive.google.com/file/d/1TcuV8c2OV8CcbPeWi7lxOdqWsEqQNPYy/view?usp=sharing" # noqa
},
{
"type": "filesystem",
"path": {
"windows": ["P:/sources/some_file.zip",
"W:/sources/some_file.zip"], # noqa
"linux": ["/mnt/srv/sources/some_file.zip"],
"darwin": ["/Volumes/srv/sources/some_file.zip"]
}
}
],
"frontendScopes": {
"project": {
"sidebar": "hierarchy"
}
}
}
},
"description": "",
"title": "Slack addon",
"name": "openpype_slack",
"productionVersion": "1.0.0",
"hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658" # noqa
}
yield addon_info
def test_register(printer):
addon_downloader = AddonDownloader()
assert len(addon_downloader._downloaders) == 0, "Contains registered"
addon_downloader.register_format(UrlType.FILESYSTEM, OSAddonDownloader)
assert len(addon_downloader._downloaders) == 1, "Should contain one"
def test_get_downloader(printer, addon_downloader):
assert addon_downloader.get_downloader(UrlType.FILESYSTEM.value), "Should find" # noqa
with pytest.raises(ValueError):
addon_downloader.get_downloader("unknown"), "Shouldn't find"
def test_addon_info(printer, sample_addon_info):
"""Tests parsing of expected payload from v4 server into AadonInfo."""
valid_minimum = {
"name": "openpype_slack",
"productionVersion": "1.0.0",
"versions": {
"1.0.0": {
"clientSourceInfo": [
{
"type": "filesystem",
"path": {
"windows": [
"P:/sources/some_file.zip",
"W:/sources/some_file.zip"],
"linux": [
"/mnt/srv/sources/some_file.zip"],
"darwin": [
"/Volumes/srv/sources/some_file.zip"] # noqa
}
}
]
}
}
}
assert AddonInfo.from_dict(valid_minimum), "Missing required fields"
valid_minimum["versions"].pop("1.0.0")
with pytest.raises(KeyError):
assert not AddonInfo.from_dict(valid_minimum), "Must fail without version data" # noqa
valid_minimum.pop("productionVersion")
assert not AddonInfo.from_dict(
valid_minimum), "none if not productionVersion" # noqa
addon = AddonInfo.from_dict(sample_addon_info)
assert addon, "Should be created"
assert addon.name == "openpype_slack", "Incorrect name"
assert addon.version == "1.0.0", "Incorrect version"
with pytest.raises(TypeError):
assert addon["name"], "Dict approach not implemented"
addon_as_dict = attr.asdict(addon)
assert addon_as_dict["name"], "Dict approach should work"
def test_update_addon_state(printer, sample_addon_info,
temp_folder, addon_downloader):
"""Tests possible cases of addon update."""
addon_info = AddonInfo.from_dict(sample_addon_info)
orig_hash = addon_info.hash
addon_info.hash = "brokenhash"
result = update_addon_state([addon_info], temp_folder, addon_downloader)
assert result["openpype_slack_1.0.0"] == UpdateState.FAILED.value, \
"Update should failed because of wrong hash"
addon_info.hash = orig_hash
result = update_addon_state([addon_info], temp_folder, addon_downloader)
assert result["openpype_slack_1.0.0"] == UpdateState.UPDATED.value, \
"Addon should have been updated"
result = update_addon_state([addon_info], temp_folder, addon_downloader)
assert result["openpype_slack_1.0.0"] == UpdateState.EXISTS.value, \
"Addon should already exist"

View file

@ -63,7 +63,7 @@ class OpenPypeVersion(semver.VersionInfo):
"""
staging = False
path = None
_VERSION_REGEX = re.compile(r"(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$") # noqa: E501
_VERSION_REGEX = re.compile(r"(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?") # noqa: E501
_installed_version = None
def __init__(self, *args, **kwargs):
@ -815,6 +815,13 @@ class BootstrapRepos:
except Exception as e:
self._print(str(e), LOG_ERROR, exc_info=True)
return None
if not destination_dir.exists():
destination_dir.mkdir(parents=True)
elif not destination_dir.is_dir():
self._print(
"Destination exists but is not directory.", LOG_ERROR)
return None
try:
shutil.move(zip_file.as_posix(), destination_dir.as_posix())
except shutil.Error as e:

View file

@ -388,8 +388,11 @@ class InstallDialog(QtWidgets.QDialog):
install_thread.start()
def _installation_finished(self):
# TODO we should find out why status can be set to 'None'?
# - 'InstallThread.run' should handle all cases so not sure where
# that come from
status = self._install_thread.result()
if status >= 0:
if status is not None and status >= 0:
self._update_progress(100)
QtWidgets.QApplication.processEvents()
self.done(3)

View file

@ -1,44 +1,82 @@
# absolute_import is needed to counter the `module has no cmds error` in Maya
from __future__ import absolute_import
import warnings
import functools
import pyblish.api
def get_errored_instances_from_context(context):
instances = list()
for result in context.data["results"]:
if result["instance"] is None:
# When instance is None we are on the "context" result
continue
if result["error"]:
instances.append(result["instance"])
return instances
class ActionDeprecatedWarning(DeprecationWarning):
pass
def get_errored_plugins_from_data(context):
"""Get all failed validation plugins
Args:
context (object):
Returns:
list of plugins which failed during validation
def deprecated(new_destination):
"""Mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
plugins = list()
results = context.data.get("results", [])
for result in results:
if result["success"] is True:
continue
plugins.append(result["plugin"])
func = None
if callable(new_destination):
func = new_destination
new_destination = None
return plugins
def _decorator(decorated_func):
if new_destination is None:
warning_message = (
" Please check content of deprecated function to figure out"
" possible replacement."
)
else:
warning_message = " Please replace your usage with '{}'.".format(
new_destination
)
@functools.wraps(decorated_func)
def wrapper(*args, **kwargs):
warnings.simplefilter("always", ActionDeprecatedWarning)
warnings.warn(
(
"Call to deprecated function '{}'"
"\nFunction was moved or removed.{}"
).format(decorated_func.__name__, warning_message),
category=ActionDeprecatedWarning,
stacklevel=4
)
return decorated_func(*args, **kwargs)
return wrapper
if func is None:
return _decorator
return _decorator(func)
@deprecated("openpype.pipeline.publish.get_errored_instances_from_context")
def get_errored_instances_from_context(context):
"""
Deprecated:
Since 3.14.* will be removed in 3.16.* or later.
"""
from openpype.pipeline.publish import get_errored_instances_from_context
return get_errored_instances_from_context(context)
@deprecated("openpype.pipeline.publish.get_errored_plugins_from_context")
def get_errored_plugins_from_data(context):
"""
Deprecated:
Since 3.14.* will be removed in 3.16.* or later.
"""
from openpype.pipeline.publish import get_errored_plugins_from_context
return get_errored_plugins_from_context(context)
# 'RepairAction' and 'RepairContextAction' were moved to
# 'openpype.pipeline.publish' please change you imports.
# There is no "reasonable" way hot mark these classes as deprecated to show
# warning of wrong import.
# Deprecated since 3.14.* will be removed in 3.16.*
class RepairAction(pyblish.api.Action):
"""Repairs the action
@ -65,6 +103,7 @@ class RepairAction(pyblish.api.Action):
plugin.repair(instance)
# Deprecated since 3.14.* will be removed in 3.16.*
class RepairContextAction(pyblish.api.Action):
"""Repairs the action

View file

@ -11,7 +11,6 @@ from .lib import (
PypeLogger,
Logger,
Anatomy,
config,
execute,
run_subprocess,
version_up,
@ -49,7 +48,6 @@ from .plugin import (
ValidateContentsOrder,
ValidateSceneOrder,
ValidateMeshOrder,
ValidationException
)
# temporary fix, might
@ -73,7 +71,6 @@ __all__ = [
"PypeLogger",
"Logger",
"Anatomy",
"config",
"execute",
"get_default_components",
"ApplicationManager",
@ -94,8 +91,6 @@ __all__ = [
"RepairAction",
"RepairContextAction",
"ValidationException",
# get contextual data
"version_up",
"get_asset",

View file

@ -277,6 +277,13 @@ def projectmanager():
PypeCommands().launch_project_manager()
@main.command(context_settings={"ignore_unknown_options": True})
def publish_report_viewer():
from openpype.tools.publisher.publish_report_viewer import main
sys.exit(main())
@main.command()
@click.argument("output_path")
@click.option("--project", help="Define project context")

View file

@ -45,6 +45,17 @@ from .entities import (
get_workfile_info,
)
from .entity_links import (
get_linked_asset_ids,
get_linked_assets,
get_linked_representation_id,
)
from .operations import (
create_project,
)
__all__ = (
"OpenPypeMongoConnection",
@ -88,4 +99,10 @@ __all__ = (
"get_thumbnail_id_from_source",
"get_workfile_info",
"get_linked_asset_ids",
"get_linked_assets",
"get_linked_representation_id",
"create_project",
)

View file

@ -14,6 +14,8 @@ from bson.objectid import ObjectId
from .mongo import get_project_database, get_project_connection
PatternType = type(re.compile(""))
def _prepare_fields(fields, required_fields=None):
if not fields:
@ -32,17 +34,37 @@ def _prepare_fields(fields, required_fields=None):
return output
def _convert_id(in_id):
def convert_id(in_id):
"""Helper function for conversion of id from string to ObjectId.
Args:
in_id (Union[str, ObjectId, Any]): Entity id that should be converted
to right type for queries.
Returns:
Union[ObjectId, Any]: Converted ids to ObjectId or in type.
"""
if isinstance(in_id, six.string_types):
return ObjectId(in_id)
return in_id
def _convert_ids(in_ids):
def convert_ids(in_ids):
"""Helper function for conversion of ids from string to ObjectId.
Args:
in_ids (Iterable[Union[str, ObjectId, Any]]): List of entity ids that
should be converted to right type for queries.
Returns:
List[ObjectId]: Converted ids to ObjectId.
"""
_output = set()
for in_id in in_ids:
if in_id is not None:
_output.add(_convert_id(in_id))
_output.add(convert_id(in_id))
return list(_output)
@ -58,7 +80,7 @@ def get_projects(active=True, inactive=False, fields=None):
yield project_doc
def get_project(project_name, active=True, inactive=False, fields=None):
def get_project(project_name, active=True, inactive=True, fields=None):
# Skip if both are disabled
if not active and not inactive:
return None
@ -115,7 +137,7 @@ def get_asset_by_id(project_name, asset_id, fields=None):
None: Asset was not found by id.
"""
asset_id = _convert_id(asset_id)
asset_id = convert_id(asset_id)
if not asset_id:
return None
@ -196,7 +218,7 @@ def _get_assets(
query_filter = {"type": {"$in": asset_types}}
if asset_ids is not None:
asset_ids = _convert_ids(asset_ids)
asset_ids = convert_ids(asset_ids)
if not asset_ids:
return []
query_filter["_id"] = {"$in": asset_ids}
@ -207,7 +229,7 @@ def _get_assets(
query_filter["name"] = {"$in": list(asset_names)}
if parent_ids is not None:
parent_ids = _convert_ids(parent_ids)
parent_ids = convert_ids(parent_ids)
if not parent_ids:
return []
query_filter["data.visualParent"] = {"$in": parent_ids}
@ -307,7 +329,7 @@ def get_asset_ids_with_subsets(project_name, asset_ids=None):
"type": "subset"
}
if asset_ids is not None:
asset_ids = _convert_ids(asset_ids)
asset_ids = convert_ids(asset_ids)
if not asset_ids:
return []
subset_query["parent"] = {"$in": asset_ids}
@ -347,7 +369,7 @@ def get_subset_by_id(project_name, subset_id, fields=None):
Dict: Subset document which can be reduced to specified 'fields'.
"""
subset_id = _convert_id(subset_id)
subset_id = convert_id(subset_id)
if not subset_id:
return None
@ -374,7 +396,7 @@ def get_subset_by_name(project_name, subset_name, asset_id, fields=None):
if not subset_name:
return None
asset_id = _convert_id(asset_id)
asset_id = convert_id(asset_id)
if not asset_id:
return None
@ -428,13 +450,13 @@ def get_subsets(
query_filter = {"type": {"$in": subset_types}}
if asset_ids is not None:
asset_ids = _convert_ids(asset_ids)
asset_ids = convert_ids(asset_ids)
if not asset_ids:
return []
query_filter["parent"] = {"$in": asset_ids}
if subset_ids is not None:
subset_ids = _convert_ids(subset_ids)
subset_ids = convert_ids(subset_ids)
if not subset_ids:
return []
query_filter["_id"] = {"$in": subset_ids}
@ -449,7 +471,7 @@ def get_subsets(
for asset_id, names in names_by_asset_ids.items():
if asset_id and names:
or_query.append({
"parent": _convert_id(asset_id),
"parent": convert_id(asset_id),
"name": {"$in": list(names)}
})
if not or_query:
@ -510,7 +532,7 @@ def get_version_by_id(project_name, version_id, fields=None):
Dict: Version document which can be reduced to specified 'fields'.
"""
version_id = _convert_id(version_id)
version_id = convert_id(version_id)
if not version_id:
return None
@ -537,7 +559,7 @@ def get_version_by_name(project_name, version, subset_id, fields=None):
Dict: Version document which can be reduced to specified 'fields'.
"""
subset_id = _convert_id(subset_id)
subset_id = convert_id(subset_id)
if not subset_id:
return None
@ -567,7 +589,7 @@ def version_is_latest(project_name, version_id):
bool: True if is latest version from subset else False.
"""
version_id = _convert_id(version_id)
version_id = convert_id(version_id)
if not version_id:
return False
version_doc = get_version_by_id(
@ -610,13 +632,13 @@ def _get_versions(
query_filter = {"type": {"$in": version_types}}
if subset_ids is not None:
subset_ids = _convert_ids(subset_ids)
subset_ids = convert_ids(subset_ids)
if not subset_ids:
return []
query_filter["parent"] = {"$in": subset_ids}
if version_ids is not None:
version_ids = _convert_ids(version_ids)
version_ids = convert_ids(version_ids)
if not version_ids:
return []
query_filter["_id"] = {"$in": version_ids}
@ -690,7 +712,7 @@ def get_hero_version_by_subset_id(project_name, subset_id, fields=None):
Dict: Hero version entity data.
"""
subset_id = _convert_id(subset_id)
subset_id = convert_id(subset_id)
if not subset_id:
return None
@ -720,7 +742,7 @@ def get_hero_version_by_id(project_name, version_id, fields=None):
Dict: Hero version entity data.
"""
version_id = _convert_id(version_id)
version_id = convert_id(version_id)
if not version_id:
return None
@ -786,7 +808,7 @@ def get_output_link_versions(project_name, version_id, fields=None):
links for passed version.
"""
version_id = _convert_id(version_id)
version_id = convert_id(version_id)
if not version_id:
return []
@ -812,7 +834,7 @@ def get_last_versions(project_name, subset_ids, fields=None):
dict[ObjectId, int]: Key is subset id and value is last version name.
"""
subset_ids = _convert_ids(subset_ids)
subset_ids = convert_ids(subset_ids)
if not subset_ids:
return {}
@ -898,7 +920,7 @@ def get_last_version_by_subset_id(project_name, subset_id, fields=None):
Dict: Version document which can be reduced to specified 'fields'.
"""
subset_id = _convert_id(subset_id)
subset_id = convert_id(subset_id)
if not subset_id:
return None
@ -971,7 +993,7 @@ def get_representation_by_id(project_name, representation_id, fields=None):
"type": {"$in": repre_types}
}
if representation_id is not None:
query_filter["_id"] = _convert_id(representation_id)
query_filter["_id"] = convert_id(representation_id)
conn = get_project_connection(project_name)
@ -996,7 +1018,7 @@ def get_representation_by_name(
to specified 'fields'.
"""
version_id = _convert_id(version_id)
version_id = convert_id(version_id)
if not version_id or not representation_name:
return None
repre_types = ["representation", "archived_representations"]
@ -1034,11 +1056,11 @@ def _regex_filters(filters):
for key, value in filters.items():
regexes = []
a_values = []
if isinstance(value, re.Pattern):
if isinstance(value, PatternType):
regexes.append(value)
elif isinstance(value, (list, tuple, set)):
for item in value:
if isinstance(item, re.Pattern):
if isinstance(item, PatternType):
regexes.append(item)
else:
a_values.append(item)
@ -1089,7 +1111,7 @@ def _get_representations(
query_filter = {"type": {"$in": repre_types}}
if representation_ids is not None:
representation_ids = _convert_ids(representation_ids)
representation_ids = convert_ids(representation_ids)
if not representation_ids:
return default_output
query_filter["_id"] = {"$in": representation_ids}
@ -1100,7 +1122,7 @@ def _get_representations(
query_filter["name"] = {"$in": list(representation_names)}
if version_ids is not None:
version_ids = _convert_ids(version_ids)
version_ids = convert_ids(version_ids)
if not version_ids:
return default_output
query_filter["parent"] = {"$in": version_ids}
@ -1111,7 +1133,7 @@ def _get_representations(
for version_id, names in names_by_version_ids.items():
if version_id and names:
or_query.append({
"parent": _convert_id(version_id),
"parent": convert_id(version_id),
"name": {"$in": list(names)}
})
if not or_query:
@ -1174,7 +1196,7 @@ def get_representations(
as filter. Filter ignored if 'None' is passed.
version_ids (Iterable[str]): Subset ids used as parent filter. Filter
ignored if 'None' is passed.
context_filters (Dict[str, List[str, re.Pattern]]): Filter by
context_filters (Dict[str, List[str, PatternType]]): Filter by
representation context fields.
names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering
using version ids and list of names under the version.
@ -1220,7 +1242,7 @@ def get_archived_representations(
as filter. Filter ignored if 'None' is passed.
version_ids (Iterable[str]): Subset ids used as parent filter. Filter
ignored if 'None' is passed.
context_filters (Dict[str, List[str, re.Pattern]]): Filter by
context_filters (Dict[str, List[str, PatternType]]): Filter by
representation context fields.
names_by_version_ids (dict[ObjectId, List[str]]): Complex filtering
using version ids and list of names under the version.
@ -1361,7 +1383,7 @@ def get_thumbnail_id_from_source(project_name, src_type, src_id):
if not src_type or not src_id:
return None
query_filter = {"_id": _convert_id(src_id)}
query_filter = {"_id": convert_id(src_id)}
conn = get_project_connection(project_name)
src_doc = conn.find_one(query_filter, {"data.thumbnail_id"})
@ -1388,7 +1410,7 @@ def get_thumbnails(project_name, thumbnail_ids, fields=None):
"""
if thumbnail_ids:
thumbnail_ids = _convert_ids(thumbnail_ids)
thumbnail_ids = convert_ids(thumbnail_ids)
if not thumbnail_ids:
return []
@ -1416,7 +1438,7 @@ def get_thumbnail(project_name, thumbnail_id, fields=None):
if not thumbnail_id:
return None
query_filter = {"type": "thumbnail", "_id": _convert_id(thumbnail_id)}
query_filter = {"type": "thumbnail", "_id": convert_id(thumbnail_id)}
conn = get_project_connection(project_name)
return conn.find_one(query_filter, _prepare_fields(fields))
@ -1444,7 +1466,7 @@ def get_workfile_info(
query_filter = {
"type": "workfile",
"parent": _convert_id(asset_id),
"parent": convert_id(asset_id),
"task_name": task_name,
"filename": filename
}
@ -1455,7 +1477,7 @@ def get_workfile_info(
"""
## Custom data storage:
- Settings - OP settings overrides and local settings
- Logging - logs from PypeLogger
- Logging - logs from Logger
- Webpublisher - jobs
- Ftrack - events
- Maya - Shaders

View file

@ -0,0 +1,241 @@
from .mongo import get_project_connection
from .entities import (
get_assets,
get_asset_by_id,
get_version_by_id,
get_representation_by_id,
convert_id,
)
def get_linked_asset_ids(project_name, asset_doc=None, asset_id=None):
"""Extract linked asset ids from asset document.
One of asset document or asset id must be passed.
Note:
Asset links now works only from asset to assets.
Args:
asset_doc (dict): Asset document from DB.
Returns:
List[Union[ObjectId, str]]: Asset ids of input links.
"""
output = []
if not asset_doc and not asset_id:
return output
if not asset_doc:
asset_doc = get_asset_by_id(
project_name, asset_id, fields=["data.inputLinks"]
)
input_links = asset_doc["data"].get("inputLinks")
if not input_links:
return output
for item in input_links:
# Backwards compatibility for "_id" key which was replaced with
# "id"
if "_id" in item:
link_id = item["_id"]
else:
link_id = item["id"]
output.append(link_id)
return output
def get_linked_assets(
project_name, asset_doc=None, asset_id=None, fields=None
):
"""Return linked assets based on passed asset document.
One of asset document or asset id must be passed.
Args:
project_name (str): Name of project where to look for queried entities.
asset_doc (Dict[str, Any]): Asset document from database.
asset_id (Union[ObjectId, str]): Asset id. Can be used instead of
asset document.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
Returns:
List[Dict[str, Any]]: Asset documents of input links for passed
asset doc.
"""
if not asset_doc:
if not asset_id:
return []
asset_doc = get_asset_by_id(
project_name,
asset_id,
fields=["data.inputLinks"]
)
if not asset_doc:
return []
link_ids = get_linked_asset_ids(project_name, asset_doc=asset_doc)
if not link_ids:
return []
return list(get_assets(project_name, asset_ids=link_ids, fields=fields))
def get_linked_representation_id(
project_name, repre_doc=None, repre_id=None, link_type=None, max_depth=None
):
"""Returns list of linked ids of particular type (if provided).
One of representation document or representation id must be passed.
Note:
Representation links now works only from representation through version
back to representations.
Args:
project_name (str): Name of project where look for links.
repre_doc (Dict[str, Any]): Representation document.
repre_id (Union[ObjectId, str]): Representation id.
link_type (str): Type of link (e.g. 'reference', ...).
max_depth (int): Limit recursion level. Default: 0
Returns:
List[ObjectId] Linked representation ids.
"""
if repre_doc:
repre_id = repre_doc["_id"]
if repre_id:
repre_id = convert_id(repre_id)
if not repre_id and not repre_doc:
return []
version_id = None
if repre_doc:
version_id = repre_doc.get("parent")
if not version_id:
repre_doc = get_representation_by_id(
project_name, repre_id, fields=["parent"]
)
version_id = repre_doc["parent"]
if not version_id:
return []
version_doc = get_version_by_id(
project_name, version_id, fields=["type", "version_id"]
)
if version_doc["type"] == "hero_version":
version_id = version_doc["version_id"]
if max_depth is None:
max_depth = 0
match = {
"_id": version_id,
# Links are not stored to hero versions at this moment so filter
# is limited to just versions
"type": "version"
}
graph_lookup = {
"from": project_name,
"startWith": "$data.inputLinks.id",
"connectFromField": "data.inputLinks.id",
"connectToField": "_id",
"as": "outputs_recursive",
"depthField": "depth"
}
if max_depth != 0:
# We offset by -1 since 0 basically means no recursion
# but the recursion only happens after the initial lookup
# for outputs.
graph_lookup["maxDepth"] = max_depth - 1
query_pipeline = [
# Match
{"$match": match},
# Recursive graph lookup for inputs
{"$graphLookup": graph_lookup}
]
conn = get_project_connection(project_name)
result = conn.aggregate(query_pipeline)
referenced_version_ids = _process_referenced_pipeline_result(
result, link_type
)
if not referenced_version_ids:
return []
ref_ids = conn.distinct(
"_id",
filter={
"parent": {"$in": list(referenced_version_ids)},
"type": "representation"
}
)
return list(ref_ids)
def _process_referenced_pipeline_result(result, link_type):
"""Filters result from pipeline for particular link_type.
Pipeline cannot use link_type directly in a query.
Returns:
(list)
"""
referenced_version_ids = set()
correctly_linked_ids = set()
for item in result:
input_links = item.get("data", {}).get("inputLinks")
if not input_links:
continue
_filter_input_links(
input_links,
link_type,
correctly_linked_ids
)
# outputs_recursive in random order, sort by depth
outputs_recursive = item.get("outputs_recursive")
if not outputs_recursive:
continue
for output in sorted(outputs_recursive, key=lambda o: o["depth"]):
output_links = output.get("data", {}).get("inputLinks")
if not output_links:
continue
# Leaf
if output["_id"] not in correctly_linked_ids:
continue
_filter_input_links(
output_links,
link_type,
correctly_linked_ids
)
referenced_version_ids.add(output["_id"])
return referenced_version_ids
def _filter_input_links(input_links, link_type, correctly_linked_ids):
for input_link in input_links:
if link_type and input_link["type"] != link_type:
continue
link_id = input_link.get("id") or input_link.get("_id")
if link_id is not None:
correctly_linked_ids.add(link_id)

View file

@ -9,6 +9,7 @@ from bson.objectid import ObjectId
from pymongo import DeleteOne, InsertOne, UpdateOne
from .mongo import get_project_connection
from .entities import get_project
REMOVED_VALUE = object()
@ -22,8 +23,10 @@ CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0"
CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0"
CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0"
CURRENT_VERSION_SCHEMA = "openpype:version-3.0"
CURRENT_HERO_VERSION_SCHEMA = "openpype:hero_version-1.0"
CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0"
CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0"
CURRENT_THUMBNAIL_SCHEMA = "openpype:thumbnail-1.0"
def _create_or_convert_to_mongo_id(mongo_id):
@ -160,6 +163,34 @@ def new_version_doc(version, subset_id, data=None, entity_id=None):
}
def new_hero_version_doc(version_id, subset_id, data=None, entity_id=None):
"""Create skeleton data of hero version document.
Args:
version_id (ObjectId): Is considered as unique identifier of version
under subset.
subset_id (Union[str, ObjectId]): Id of parent subset.
data (Dict[str, Any]): Version document data.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of version document.
"""
if data is None:
data = {}
return {
"_id": _create_or_convert_to_mongo_id(entity_id),
"schema": CURRENT_HERO_VERSION_SCHEMA,
"type": "hero_version",
"version_id": version_id,
"parent": subset_id,
"data": data
}
def new_representation_doc(
name, version_id, context, data=None, entity_id=None
):
@ -195,6 +226,29 @@ def new_representation_doc(
}
def new_thumbnail_doc(data=None, entity_id=None):
"""Create skeleton data of thumbnail document.
Args:
data (Dict[str, Any]): Thumbnail document data.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of thumbnail document.
"""
if data is None:
data = {}
return {
"_id": _create_or_convert_to_mongo_id(entity_id),
"type": "thumbnail",
"schema": CURRENT_THUMBNAIL_SCHEMA,
"data": data
}
def new_workfile_info_doc(
filename, asset_id, task_name, files, data=None, entity_id=None
):
@ -268,6 +322,20 @@ def prepare_version_update_data(old_doc, new_doc, replace=True):
return _prepare_update_data(old_doc, new_doc, replace)
def prepare_hero_version_update_data(old_doc, new_doc, replace=True):
"""Compare two hero version documents and prepare update data.
Based on compared values will create update data for 'UpdateOperation'.
Empty output means that documents are identical.
Returns:
Dict[str, Any]: Changes between old and new document.
"""
return _prepare_update_data(old_doc, new_doc, replace)
def prepare_representation_update_data(old_doc, new_doc, replace=True):
"""Compare two representation documents and prepare update data.
@ -638,3 +706,89 @@ class OperationsSession(object):
operation = DeleteOperation(project_name, entity_type, entity_id)
self.add(operation)
return operation
def create_project(project_name, project_code, library_project=False):
"""Create project using OpenPype settings.
This project creation function is not validating project document on
creation. It is because project document is created blindly with only
minimum required information about project which is it's name, code, type
and schema.
Entered project name must be unique and project must not exist yet.
Note:
This function is here to be OP v4 ready but in v3 has more logic
to do. That's why inner imports are in the body.
Args:
project_name(str): New project name. Should be unique.
project_code(str): Project's code should be unique too.
library_project(bool): Project is library project.
Raises:
ValueError: When project name already exists in MongoDB.
Returns:
dict: Created project document.
"""
from openpype.settings import ProjectSettings, SaveWarningExc
from openpype.pipeline.schema import validate
if get_project(project_name, fields=["name"]):
raise ValueError("Project with name \"{}\" already exists".format(
project_name
))
if not PROJECT_NAME_REGEX.match(project_name):
raise ValueError((
"Project name \"{}\" contain invalid characters"
).format(project_name))
project_doc = {
"type": "project",
"name": project_name,
"data": {
"code": project_code,
"library_project": library_project
},
"schema": CURRENT_PROJECT_SCHEMA
}
op_session = OperationsSession()
# Insert document with basic data
create_op = op_session.create_entity(
project_name, project_doc["type"], project_doc
)
op_session.commit()
# Load ProjectSettings for the project and save it to store all attributes
# and Anatomy
try:
project_settings_entity = ProjectSettings(project_name)
project_settings_entity.save()
except SaveWarningExc as exc:
print(str(exc))
except Exception:
op_session.delete_entity(
project_name, project_doc["type"], create_op.entity_id
)
op_session.commit()
raise
project_doc = get_project(project_name)
try:
# Validate created project document
validate(project_doc)
except Exception:
# Remove project if is not valid
op_session.delete_entity(
project_name, project_doc["type"], create_op.entity_id
)
op_session.commit()
raise
return project_doc

View file

@ -19,6 +19,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"hiero",
"houdini",
"nukestudio",
"fusion",
"blender",
"photoshop",
"tvpaint",

View file

@ -1,8 +1,6 @@
import os
from openpype.lib import (
PreLaunchHook,
create_workdir_extra_folders
)
from openpype.lib import PreLaunchHook
from openpype.pipeline.workfile import create_workdir_extra_folders
class AddLastWorkfileToLaunchArgs(PreLaunchHook):

View file

@ -1,13 +1,24 @@
from .host import (
HostBase,
)
from .interfaces import (
IWorkfileHost,
ILoadHost,
IPublishHost,
INewPublisher,
)
from .dirmap import HostDirmap
__all__ = (
"HostBase",
"IWorkfileHost",
"ILoadHost",
"IPublishHost",
"INewPublisher",
"HostDirmap",
)

205
openpype/host/dirmap.py Normal file
View file

@ -0,0 +1,205 @@
"""Dirmap functionality used in host integrations inside DCCs.
Idea for current dirmap implementation was used from Maya where is possible to
enter source and destination roots and maya will try each found source
in referenced file replace with each destionation paths. First path which
exists is used.
"""
import os
from abc import ABCMeta, abstractmethod
import six
from openpype.lib import Logger
from openpype.modules import ModulesManager
from openpype.settings import get_project_settings
from openpype.settings.lib import get_site_local_overrides
@six.add_metaclass(ABCMeta)
class HostDirmap(object):
"""Abstract class for running dirmap on a workfile in a host.
Dirmap is used to translate paths inside of host workfile from one
OS to another. (Eg. arstist created workfile on Win, different artists
opens same file on Linux.)
Expects methods to be implemented inside of host:
on_dirmap_enabled: run host code for enabling dirmap
do_dirmap: run host code to do actual remapping
"""
def __init__(
self, host_name, project_name, project_settings=None, sync_module=None
):
self.host_name = host_name
self.project_name = project_name
self._project_settings = project_settings
self._sync_module = sync_module # to limit reinit of Modules
self._log = None
self._mapping = None # cache mapping
@property
def sync_module(self):
if self._sync_module is None:
manager = ModulesManager()
self._sync_module = manager["sync_server"]
return self._sync_module
@property
def project_settings(self):
if self._project_settings is None:
self._project_settings = get_project_settings(self.project_name)
return self._project_settings
@property
def log(self):
if self._log is None:
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
@abstractmethod
def on_enable_dirmap(self):
"""Run host dependent operation for enabling dirmap if necessary."""
pass
@abstractmethod
def dirmap_routine(self, source_path, destination_path):
"""Run host dependent remapping from source_path to destination_path"""
pass
def process_dirmap(self):
# type: (dict) -> None
"""Go through all paths in Settings and set them using `dirmap`.
If artists has Site Sync enabled, take dirmap mapping directly from
Local Settings when artist is syncing workfile locally.
Args:
project_settings (dict): Settings for current project.
"""
if not self._mapping:
self._mapping = self.get_mappings(self.project_settings)
if not self._mapping:
return
self.log.info("Processing directory mapping ...")
self.on_enable_dirmap()
self.log.info("mapping:: {}".format(self._mapping))
for k, sp in enumerate(self._mapping["source-path"]):
dst = self._mapping["destination-path"][k]
try:
print("{} -> {}".format(sp, dst))
self.dirmap_routine(sp, dst)
except IndexError:
# missing corresponding destination path
self.log.error((
"invalid dirmap mapping, missing corresponding"
" destination directory."
))
break
except RuntimeError:
self.log.error(
"invalid path {} -> {}, mapping not registered".format(
sp, dst
)
)
continue
def get_mappings(self, project_settings):
"""Get translation from source-path to destination-path.
It checks if Site Sync is enabled and user chose to use local
site, in that case configuration in Local Settings takes precedence
"""
local_mapping = self._get_local_sync_dirmap(project_settings)
dirmap_label = "{}-dirmap".format(self.host_name)
if (
not self.project_settings[self.host_name].get(dirmap_label)
and not local_mapping
):
return {}
mapping_settings = self.project_settings[self.host_name][dirmap_label]
mapping_enabled = mapping_settings["enabled"] or bool(local_mapping)
if not mapping_enabled:
return {}
mapping = (
local_mapping
or mapping_settings["paths"]
or {}
)
if (
not mapping
or not mapping.get("destination-path")
or not mapping.get("source-path")
):
return {}
return mapping
def _get_local_sync_dirmap(self, project_settings):
"""
Returns dirmap if synch to local project is enabled.
Only valid mapping is from roots of remote site to local site set
in Local Settings.
Args:
project_settings (dict)
Returns:
dict : { "source-path": [XXX], "destination-path": [YYYY]}
"""
mapping = {}
if not project_settings["global"]["sync_server"]["enabled"]:
return mapping
project_name = os.getenv("AVALON_PROJECT")
active_site = self.sync_module.get_local_normalized_site(
self.sync_module.get_active_site(project_name))
remote_site = self.sync_module.get_local_normalized_site(
self.sync_module.get_remote_site(project_name))
self.log.debug(
"active {} - remote {}".format(active_site, remote_site)
)
if (
active_site == "local"
and project_name in self.sync_module.get_enabled_projects()
and active_site != remote_site
):
sync_settings = self.sync_module.get_sync_project_setting(
project_name,
exclude_locals=False,
cached=False)
active_overrides = get_site_local_overrides(
project_name, active_site)
remote_overrides = get_site_local_overrides(
project_name, remote_site)
self.log.debug("local overrides {}".format(active_overrides))
self.log.debug("remote overrides {}".format(remote_overrides))
for root_name, active_site_dir in active_overrides.items():
remote_site_dir = (
remote_overrides.get(root_name)
or sync_settings["sites"][remote_site]["root"][root_name]
)
if os.path.isdir(active_site_dir):
if "destination-path" not in mapping:
mapping["destination-path"] = []
mapping["destination-path"].append(active_site_dir)
if "source-path" not in mapping:
mapping["source-path"] = []
mapping["source-path"].append(remote_site_dir)
self.log.debug("local sync mapping:: {}".format(mapping))
return mapping

View file

@ -1,37 +1,12 @@
import logging
import contextlib
from abc import ABCMeta, abstractproperty, abstractmethod
from abc import ABCMeta, abstractproperty
import six
# NOTE can't import 'typing' because of issues in Maya 2020
# - shiboken crashes on 'typing' module import
class MissingMethodsError(ValueError):
"""Exception when host miss some required methods for specific workflow.
Args:
host (HostBase): Host implementation where are missing methods.
missing_methods (list[str]): List of missing methods.
"""
def __init__(self, host, missing_methods):
joined_missing = ", ".join(
['"{}"'.format(item) for item in missing_methods]
)
if isinstance(host, HostBase):
host_name = host.name
else:
try:
host_name = host.__file__.replace("\\", "/").split("/")[-3]
except Exception:
host_name = str(host)
message = (
"Host \"{}\" miss methods {}".format(host_name, joined_missing)
)
super(MissingMethodsError, self).__init__(message)
@six.add_metaclass(ABCMeta)
class HostBase(object):
"""Base of host implementation class.
@ -185,347 +160,3 @@ class HostBase(object):
yield
finally:
pass
class ILoadHost:
"""Implementation requirements to be able use reference of representations.
The load plugins can do referencing even without implementation of methods
here, but switch and removement of containers would not be possible.
Questions:
- Is list container dependency of host or load plugins?
- Should this be directly in HostBase?
- how to find out if referencing is available?
- do we need to know that?
"""
@staticmethod
def get_missing_load_methods(host):
"""Look for missing methods on "old type" host implementation.
Method is used for validation of implemented functions related to
loading. Checks only existence of methods.
Args:
Union[ModuleType, HostBase]: Object of host where to look for
required methods.
Returns:
list[str]: Missing method implementations for loading workflow.
"""
if isinstance(host, ILoadHost):
return []
required = ["ls"]
missing = []
for name in required:
if not hasattr(host, name):
missing.append(name)
return missing
@staticmethod
def validate_load_methods(host):
"""Validate implemented methods of "old type" host for load workflow.
Args:
Union[ModuleType, HostBase]: Object of host to validate.
Raises:
MissingMethodsError: If there are missing methods on host
implementation.
"""
missing = ILoadHost.get_missing_load_methods(host)
if missing:
raise MissingMethodsError(host, missing)
@abstractmethod
def get_containers(self):
"""Retreive referenced containers from scene.
This can be implemented in hosts where referencing can be used.
Todo:
Rename function to something more self explanatory.
Suggestion: 'get_containers'
Returns:
list[dict]: Information about loaded containers.
"""
pass
# --- Deprecated method names ---
def ls(self):
"""Deprecated variant of 'get_containers'.
Todo:
Remove when all usages are replaced.
"""
return self.get_containers()
@six.add_metaclass(ABCMeta)
class IWorkfileHost:
"""Implementation requirements to be able use workfile utils and tool."""
@staticmethod
def get_missing_workfile_methods(host):
"""Look for missing methods on "old type" host implementation.
Method is used for validation of implemented functions related to
workfiles. Checks only existence of methods.
Args:
Union[ModuleType, HostBase]: Object of host where to look for
required methods.
Returns:
list[str]: Missing method implementations for workfiles workflow.
"""
if isinstance(host, IWorkfileHost):
return []
required = [
"open_file",
"save_file",
"current_file",
"has_unsaved_changes",
"file_extensions",
"work_root",
]
missing = []
for name in required:
if not hasattr(host, name):
missing.append(name)
return missing
@staticmethod
def validate_workfile_methods(host):
"""Validate methods of "old type" host for workfiles workflow.
Args:
Union[ModuleType, HostBase]: Object of host to validate.
Raises:
MissingMethodsError: If there are missing methods on host
implementation.
"""
missing = IWorkfileHost.get_missing_workfile_methods(host)
if missing:
raise MissingMethodsError(host, missing)
@abstractmethod
def get_workfile_extensions(self):
"""Extensions that can be used as save.
Questions:
This could potentially use 'HostDefinition'.
"""
return []
@abstractmethod
def save_workfile(self, dst_path=None):
"""Save currently opened scene.
Args:
dst_path (str): Where the current scene should be saved. Or use
current path if 'None' is passed.
"""
pass
@abstractmethod
def open_workfile(self, filepath):
"""Open passed filepath in the host.
Args:
filepath (str): Path to workfile.
"""
pass
@abstractmethod
def get_current_workfile(self):
"""Retreive path to current opened file.
Returns:
str: Path to file which is currently opened.
None: If nothing is opened.
"""
return None
def workfile_has_unsaved_changes(self):
"""Currently opened scene is saved.
Not all hosts can know if current scene is saved because the API of
DCC does not support it.
Returns:
bool: True if scene is saved and False if has unsaved
modifications.
None: Can't tell if workfiles has modifications.
"""
return None
def work_root(self, session):
"""Modify workdir per host.
Default implementation keeps workdir untouched.
Warnings:
We must handle this modification with more sofisticated way because
this can't be called out of DCC so opening of last workfile
(calculated before DCC is launched) is complicated. Also breaking
defined work template is not a good idea.
Only place where it's really used and can make sense is Maya. There
workspace.mel can modify subfolders where to look for maya files.
Args:
session (dict): Session context data.
Returns:
str: Path to new workdir.
"""
return session["AVALON_WORKDIR"]
# --- Deprecated method names ---
def file_extensions(self):
"""Deprecated variant of 'get_workfile_extensions'.
Todo:
Remove when all usages are replaced.
"""
return self.get_workfile_extensions()
def save_file(self, dst_path=None):
"""Deprecated variant of 'save_workfile'.
Todo:
Remove when all usages are replaced.
"""
self.save_workfile()
def open_file(self, filepath):
"""Deprecated variant of 'open_workfile'.
Todo:
Remove when all usages are replaced.
"""
return self.open_workfile(filepath)
def current_file(self):
"""Deprecated variant of 'get_current_workfile'.
Todo:
Remove when all usages are replaced.
"""
return self.get_current_workfile()
def has_unsaved_changes(self):
"""Deprecated variant of 'workfile_has_unsaved_changes'.
Todo:
Remove when all usages are replaced.
"""
return self.workfile_has_unsaved_changes()
class INewPublisher:
"""Functions related to new creation system in new publisher.
New publisher is not storing information only about each created instance
but also some global data. At this moment are data related only to context
publish plugins but that can extend in future.
"""
@staticmethod
def get_missing_publish_methods(host):
"""Look for missing methods on "old type" host implementation.
Method is used for validation of implemented functions related to
new publish creation. Checks only existence of methods.
Args:
Union[ModuleType, HostBase]: Host module where to look for
required methods.
Returns:
list[str]: Missing method implementations for new publsher
workflow.
"""
if isinstance(host, INewPublisher):
return []
required = [
"get_context_data",
"update_context_data",
]
missing = []
for name in required:
if not hasattr(host, name):
missing.append(name)
return missing
@staticmethod
def validate_publish_methods(host):
"""Validate implemented methods of "old type" host.
Args:
Union[ModuleType, HostBase]: Host module to validate.
Raises:
MissingMethodsError: If there are missing methods on host
implementation.
"""
missing = INewPublisher.get_missing_publish_methods(host)
if missing:
raise MissingMethodsError(host, missing)
@abstractmethod
def get_context_data(self):
"""Get global data related to creation-publishing from workfile.
These data are not related to any created instance but to whole
publishing context. Not saving/returning them will cause that each
reset of publishing resets all values to default ones.
Context data can contain information about enabled/disabled publish
plugins or other values that can be filled by artist.
Returns:
dict: Context data stored using 'update_context_data'.
"""
pass
@abstractmethod
def update_context_data(self, data, changes):
"""Store global context data to workfile.
Called when some values in context data has changed.
Without storing the values in a way that 'get_context_data' would
return them will each reset of publishing cause loose of filled values
by artist. Best practice is to store values into workfile, if possible.
Args:
data (dict): New data as are.
changes (dict): Only data that has been changed. Each value has
tuple with '(<old>, <new>)' value.
"""
pass

386
openpype/host/interfaces.py Normal file
View file

@ -0,0 +1,386 @@
from abc import ABCMeta, abstractmethod
import six
class MissingMethodsError(ValueError):
"""Exception when host miss some required methods for specific workflow.
Args:
host (HostBase): Host implementation where are missing methods.
missing_methods (list[str]): List of missing methods.
"""
def __init__(self, host, missing_methods):
joined_missing = ", ".join(
['"{}"'.format(item) for item in missing_methods]
)
host_name = getattr(host, "name", None)
if not host_name:
try:
host_name = host.__file__.replace("\\", "/").split("/")[-3]
except Exception:
host_name = str(host)
message = (
"Host \"{}\" miss methods {}".format(host_name, joined_missing)
)
super(MissingMethodsError, self).__init__(message)
class ILoadHost:
"""Implementation requirements to be able use reference of representations.
The load plugins can do referencing even without implementation of methods
here, but switch and removement of containers would not be possible.
Questions:
- Is list container dependency of host or load plugins?
- Should this be directly in HostBase?
- how to find out if referencing is available?
- do we need to know that?
"""
@staticmethod
def get_missing_load_methods(host):
"""Look for missing methods on "old type" host implementation.
Method is used for validation of implemented functions related to
loading. Checks only existence of methods.
Args:
Union[ModuleType, HostBase]: Object of host where to look for
required methods.
Returns:
list[str]: Missing method implementations for loading workflow.
"""
if isinstance(host, ILoadHost):
return []
required = ["ls"]
missing = []
for name in required:
if not hasattr(host, name):
missing.append(name)
return missing
@staticmethod
def validate_load_methods(host):
"""Validate implemented methods of "old type" host for load workflow.
Args:
Union[ModuleType, HostBase]: Object of host to validate.
Raises:
MissingMethodsError: If there are missing methods on host
implementation.
"""
missing = ILoadHost.get_missing_load_methods(host)
if missing:
raise MissingMethodsError(host, missing)
@abstractmethod
def get_containers(self):
"""Retreive referenced containers from scene.
This can be implemented in hosts where referencing can be used.
Todo:
Rename function to something more self explanatory.
Suggestion: 'get_containers'
Returns:
list[dict]: Information about loaded containers.
"""
pass
# --- Deprecated method names ---
def ls(self):
"""Deprecated variant of 'get_containers'.
Todo:
Remove when all usages are replaced.
"""
return self.get_containers()
@six.add_metaclass(ABCMeta)
class IWorkfileHost:
"""Implementation requirements to be able use workfile utils and tool."""
@staticmethod
def get_missing_workfile_methods(host):
"""Look for missing methods on "old type" host implementation.
Method is used for validation of implemented functions related to
workfiles. Checks only existence of methods.
Args:
Union[ModuleType, HostBase]: Object of host where to look for
required methods.
Returns:
list[str]: Missing method implementations for workfiles workflow.
"""
if isinstance(host, IWorkfileHost):
return []
required = [
"open_file",
"save_file",
"current_file",
"has_unsaved_changes",
"file_extensions",
"work_root",
]
missing = []
for name in required:
if not hasattr(host, name):
missing.append(name)
return missing
@staticmethod
def validate_workfile_methods(host):
"""Validate methods of "old type" host for workfiles workflow.
Args:
Union[ModuleType, HostBase]: Object of host to validate.
Raises:
MissingMethodsError: If there are missing methods on host
implementation.
"""
missing = IWorkfileHost.get_missing_workfile_methods(host)
if missing:
raise MissingMethodsError(host, missing)
@abstractmethod
def get_workfile_extensions(self):
"""Extensions that can be used as save.
Questions:
This could potentially use 'HostDefinition'.
"""
return []
@abstractmethod
def save_workfile(self, dst_path=None):
"""Save currently opened scene.
Args:
dst_path (str): Where the current scene should be saved. Or use
current path if 'None' is passed.
"""
pass
@abstractmethod
def open_workfile(self, filepath):
"""Open passed filepath in the host.
Args:
filepath (str): Path to workfile.
"""
pass
@abstractmethod
def get_current_workfile(self):
"""Retreive path to current opened file.
Returns:
str: Path to file which is currently opened.
None: If nothing is opened.
"""
return None
def workfile_has_unsaved_changes(self):
"""Currently opened scene is saved.
Not all hosts can know if current scene is saved because the API of
DCC does not support it.
Returns:
bool: True if scene is saved and False if has unsaved
modifications.
None: Can't tell if workfiles has modifications.
"""
return None
def work_root(self, session):
"""Modify workdir per host.
Default implementation keeps workdir untouched.
Warnings:
We must handle this modification with more sofisticated way because
this can't be called out of DCC so opening of last workfile
(calculated before DCC is launched) is complicated. Also breaking
defined work template is not a good idea.
Only place where it's really used and can make sense is Maya. There
workspace.mel can modify subfolders where to look for maya files.
Args:
session (dict): Session context data.
Returns:
str: Path to new workdir.
"""
return session["AVALON_WORKDIR"]
# --- Deprecated method names ---
def file_extensions(self):
"""Deprecated variant of 'get_workfile_extensions'.
Todo:
Remove when all usages are replaced.
"""
return self.get_workfile_extensions()
def save_file(self, dst_path=None):
"""Deprecated variant of 'save_workfile'.
Todo:
Remove when all usages are replaced.
"""
self.save_workfile()
def open_file(self, filepath):
"""Deprecated variant of 'open_workfile'.
Todo:
Remove when all usages are replaced.
"""
return self.open_workfile(filepath)
def current_file(self):
"""Deprecated variant of 'get_current_workfile'.
Todo:
Remove when all usages are replaced.
"""
return self.get_current_workfile()
def has_unsaved_changes(self):
"""Deprecated variant of 'workfile_has_unsaved_changes'.
Todo:
Remove when all usages are replaced.
"""
return self.workfile_has_unsaved_changes()
class IPublishHost:
"""Functions related to new creation system in new publisher.
New publisher is not storing information only about each created instance
but also some global data. At this moment are data related only to context
publish plugins but that can extend in future.
"""
@staticmethod
def get_missing_publish_methods(host):
"""Look for missing methods on "old type" host implementation.
Method is used for validation of implemented functions related to
new publish creation. Checks only existence of methods.
Args:
Union[ModuleType, HostBase]: Host module where to look for
required methods.
Returns:
list[str]: Missing method implementations for new publsher
workflow.
"""
if isinstance(host, IPublishHost):
return []
required = [
"get_context_data",
"update_context_data",
"get_context_title",
"get_current_context",
]
missing = []
for name in required:
if not hasattr(host, name):
missing.append(name)
return missing
@staticmethod
def validate_publish_methods(host):
"""Validate implemented methods of "old type" host.
Args:
Union[ModuleType, HostBase]: Host module to validate.
Raises:
MissingMethodsError: If there are missing methods on host
implementation.
"""
missing = IPublishHost.get_missing_publish_methods(host)
if missing:
raise MissingMethodsError(host, missing)
@abstractmethod
def get_context_data(self):
"""Get global data related to creation-publishing from workfile.
These data are not related to any created instance but to whole
publishing context. Not saving/returning them will cause that each
reset of publishing resets all values to default ones.
Context data can contain information about enabled/disabled publish
plugins or other values that can be filled by artist.
Returns:
dict: Context data stored using 'update_context_data'.
"""
pass
@abstractmethod
def update_context_data(self, data, changes):
"""Store global context data to workfile.
Called when some values in context data has changed.
Without storing the values in a way that 'get_context_data' would
return them will each reset of publishing cause loose of filled values
by artist. Best practice is to store values into workfile, if possible.
Args:
data (dict): New data as are.
changes (dict): Only data that has been changed. Each value has
tuple with '(<old>, <new>)' value.
"""
pass
class INewPublisher(IPublishHost):
"""Legacy interface replaced by 'IPublishHost'.
Deprecated:
'INewPublisher' is replaced by 'IPublishHost' please change your
imports.
There is no "reasonable" way hot mark these classes as deprecated
to show warning of wrong import. Deprecated since 3.14.* will be
removed in 3.15.*
"""
pass

View file

@ -1,9 +1,6 @@
def add_implementation_envs(env, _app):
"""Modify environments to contain all required for implementation."""
defaults = {
"OPENPYPE_LOG_NO_COLORS": "True",
"WEBSOCKET_URL": "ws://localhost:8097/ws/"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
from .addon import AfterEffectsAddon
__all__ = (
"AfterEffectsAddon",
)

View file

@ -0,0 +1,22 @@
from openpype.modules import OpenPypeModule, IHostAddon
class AfterEffectsAddon(OpenPypeModule, IHostAddon):
name = "aftereffects"
host_name = "aftereffects"
def initialize(self, module_settings):
self.enabled = True
def add_implementation_envs(self, env, _app):
"""Modify environments to contain all required for implementation."""
defaults = {
"OPENPYPE_LOG_NO_COLORS": "True",
"WEBSOCKET_URL": "ws://localhost:8097/ws/"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_workfile_extensions(self):
return [".aep"]

View file

@ -12,6 +12,7 @@ from wsrpc_aiohttp import (
from Qt import QtCore
from openpype.lib import Logger
from openpype.pipeline import legacy_io
from openpype.tools.utils import host_tools
from openpype.tools.adobe_webserver.app import WebServerTool
@ -84,8 +85,6 @@ class ProcessLauncher(QtCore.QObject):
@property
def log(self):
if self._log is None:
from openpype.api import Logger
self._log = Logger.get_logger("{}-launcher".format(
self.route_name))
return self._log

View file

@ -1,13 +1,16 @@
import os
import sys
import re
import json
import contextlib
import traceback
import logging
from functools import partial
from Qt import QtWidgets
from openpype.pipeline import install_host
from openpype.lib.remote_publish import headless_publish
from openpype.modules import ModulesManager
from openpype.tools.utils import host_tools
from .launch_logic import ProcessLauncher, get_stub
@ -35,10 +38,18 @@ def main(*subprocess_args):
launcher.start()
if os.environ.get("HEADLESS_PUBLISH"):
launcher.execute_in_main_thread(lambda: headless_publish(
log,
"CloseAE",
os.environ.get("IS_TEST")))
manager = ModulesManager()
webpublisher_addon = manager["webpublisher"]
launcher.execute_in_main_thread(
partial(
webpublisher_addon.headless_publish,
log,
"CloseAE",
os.environ.get("IS_TEST")
)
)
elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True):
save = False
if os.getenv("WORKFILES_SAVE_AS"):
@ -68,3 +79,57 @@ def get_extension_manifest_path():
"CSXS",
"manifest.xml"
)
def get_unique_layer_name(layers, name):
"""
Gets all layer names and if 'name' is present in them, increases
suffix by 1 (eg. creates unique layer name - for Loader)
Args:
layers (list): of strings, names only
name (string): checked value
Returns:
(string): name_00X (without version)
"""
names = {}
for layer in layers:
layer_name = re.sub(r'_\d{3}$', '', layer)
if layer_name in names.keys():
names[layer_name] = names[layer_name] + 1
else:
names[layer_name] = 1
occurrences = names.get(name, 0)
return "{}_{:0>3d}".format(name, occurrences + 1)
def get_background_layers(file_url):
"""
Pulls file name from background json file, enrich with folder url for
AE to be able import files.
Order is important, follows order in json.
Args:
file_url (str): abs url of background json
Returns:
(list): of abs paths to images
"""
with open(file_url) as json_file:
data = json.load(json_file)
layers = list()
bg_folder = os.path.dirname(file_url)
for child in data['children']:
if child.get("filename"):
layers.append(os.path.join(bg_folder, child.get("filename")).
replace("\\", "/"))
else:
for layer in child['children']:
if layer.get("filename"):
layers.append(os.path.join(bg_folder,
layer.get("filename")).
replace("\\", "/"))
return layers

View file

@ -4,8 +4,7 @@ from Qt import QtWidgets
import pyblish.api
from openpype import lib
from openpype.api import Logger
from openpype.lib import Logger, register_event_callback
from openpype.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
@ -16,9 +15,8 @@ from openpype.pipeline import (
)
from openpype.pipeline.load import any_outdated_containers
import openpype.hosts.aftereffects
from openpype.lib import register_event_callback
from .launch_logic import get_stub
from .launch_logic import get_stub, ConnectionNotEstablishedYet
log = Logger.get_logger(__name__)
@ -111,7 +109,7 @@ def ls():
"""
try:
stub = get_stub() # only after AfterEffects is up
except lib.ConnectionNotEstablishedYet:
except ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return
@ -284,7 +282,7 @@ def _get_stub():
"""
try:
stub = get_stub() # only after Photoshop is up
except lib.ConnectionNotEstablishedYet:
except ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return

View file

@ -1,12 +1,11 @@
"""Host API required Work Files tool"""
import os
from openpype.pipeline import HOST_WORKFILE_EXTENSIONS
from .launch_logic import get_stub
def file_extensions():
return HOST_WORKFILE_EXTENSIONS["aftereffects"]
return [".aep"]
def has_unsaved_changes():

View file

@ -1,14 +1,14 @@
import re
from openpype.lib import (
get_background_layers,
get_unique_layer_name
)
from openpype.pipeline import get_representation_path
from openpype.hosts.aftereffects.api import (
AfterEffectsLoader,
containerise
)
from openpype.hosts.aftereffects.api.lib import (
get_background_layers,
get_unique_layer_name,
)
class BackgroundLoader(AfterEffectsLoader):

View file

@ -1,12 +1,11 @@
import re
from openpype import lib
from openpype.pipeline import get_representation_path
from openpype.hosts.aftereffects.api import (
AfterEffectsLoader,
containerise
)
from openpype.hosts.aftereffects.api.lib import get_unique_layer_name
class FileLoader(AfterEffectsLoader):
@ -28,7 +27,7 @@ class FileLoader(AfterEffectsLoader):
stub = self.get_stub()
layers = stub.get_items(comps=True, folders=True, footages=True)
existing_layers = [layer.name for layer in layers]
comp_name = lib.get_unique_layer_name(
comp_name = get_unique_layer_name(
existing_layers, "{}_{}".format(context["asset"]["name"], name))
import_options = {}
@ -87,7 +86,7 @@ class FileLoader(AfterEffectsLoader):
if namespace_from_container != layer_name:
layers = stub.get_items(comps=True)
existing_layers = [layer.name for layer in layers]
layer_name = lib.get_unique_layer_name(
layer_name = get_unique_layer_name(
existing_layers,
"{}_{}".format(context["asset"], context["subset"]))
else: # switching version - keep same name

View file

@ -1,8 +1,8 @@
import os
import pyblish.api
from openpype.lib import get_subset_name_with_asset_doc
from openpype.pipeline import legacy_io
from openpype.pipeline.create import get_subset_name
class CollectWorkfile(pyblish.api.ContextPlugin):
@ -71,13 +71,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
# workfile instance
family = "workfile"
subset = get_subset_name_with_asset_doc(
subset = get_subset_name(
family,
self.default_variant,
context.data["anatomyData"]["task"]["name"],
context.data["assetEntity"],
context.data["anatomyData"]["project"]["name"],
host_name=context.data["hostName"]
host_name=context.data["hostName"],
project_settings=context.data["project_settings"]
)
# Create instance
instance = context.create_instance(subset)

View file

@ -2,14 +2,18 @@ import os
import sys
import six
import openpype.api
from openpype.lib import (
get_ffmpeg_tool_path,
run_subprocess,
)
from openpype.pipeline import publish
from openpype.hosts.aftereffects.api import get_stub
class ExtractLocalRender(openpype.api.Extractor):
class ExtractLocalRender(publish.Extractor):
"""Render RenderQueue locally."""
order = openpype.api.Extractor.order - 0.47
order = publish.Extractor.order - 0.47
label = "Extract Local Render"
hosts = ["aftereffects"]
families = ["renderLocal", "render.local"]
@ -53,7 +57,7 @@ class ExtractLocalRender(openpype.api.Extractor):
instance.data["representations"] = [repre_data]
ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
# Generate thumbnail.
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
@ -66,7 +70,7 @@ class ExtractLocalRender(openpype.api.Extractor):
]
self.log.debug("Thumbnail args:: {}".format(args))
try:
output = openpype.lib.run_subprocess(args)
output = run_subprocess(args)
except TypeError:
self.log.warning("Error in creating thumbnail")
six.reraise(*sys.exc_info())

View file

@ -1,13 +1,13 @@
import pyblish.api
import openpype.api
from openpype.pipeline import publish
from openpype.hosts.aftereffects.api import get_stub
class ExtractSaveScene(pyblish.api.ContextPlugin):
"""Save scene before extraction."""
order = openpype.api.Extractor.order - 0.48
order = publish.Extractor.order - 0.48
label = "Extract Save Scene"
hosts = ["aftereffects"]

View file

@ -1,6 +1,6 @@
import pyblish.api
from openpype.action import get_errored_plugins_from_data
from openpype.lib import version_up
from openpype.pipeline.publish import get_errored_plugins_from_context
from openpype.hosts.aftereffects.api import get_stub
@ -18,7 +18,7 @@ class IncrementWorkfile(pyblish.api.InstancePlugin):
optional = True
def process(self, instance):
errored_plugins = get_errored_plugins_from_data(instance.context)
errored_plugins = get_errored_plugins_from_context(instance.context)
if errored_plugins:
raise RuntimeError(
"Skipping incrementing current file because publishing failed."

View file

@ -1,8 +1,8 @@
import openpype.api
from openpype.pipeline import publish
from openpype.hosts.aftereffects.api import get_stub
class RemovePublishHighlight(openpype.api.Extractor):
class RemovePublishHighlight(publish.Extractor):
"""Clean utf characters which are not working in DL
Published compositions are marked with unicode icon which causes
@ -10,7 +10,7 @@ class RemovePublishHighlight(openpype.api.Extractor):
rendering, add it later back to avoid confusion.
"""
order = openpype.api.Extractor.order - 0.49 # just before save
order = publish.Extractor.order - 0.49 # just before save
label = "Clean render comp"
hosts = ["aftereffects"]
families = ["render.farm"]

View file

@ -1,9 +1,9 @@
import pyblish.api
import openpype.api
from openpype.pipeline import (
from openpype.pipeline import legacy_io
from openpype.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
legacy_io,
)
from openpype.hosts.aftereffects.api import get_stub
@ -50,7 +50,7 @@ class ValidateInstanceAsset(pyblish.api.InstancePlugin):
label = "Validate Instance Asset"
hosts = ["aftereffects"]
actions = [ValidateInstanceAssetRepair]
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
def process(self, instance):
instance_asset = instance.data["asset"]

View file

@ -1,52 +1,6 @@
import os
from .addon import BlenderAddon
def add_implementation_envs(env, _app):
"""Modify environments to contain all required for implementation."""
# Prepare path to implementation script
implementation_user_script_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"blender_addon"
)
# Add blender implementation script path to PYTHONPATH
python_path = env.get("PYTHONPATH") or ""
python_path_parts = [
path
for path in python_path.split(os.pathsep)
if path
]
python_path_parts.insert(0, implementation_user_script_path)
env["PYTHONPATH"] = os.pathsep.join(python_path_parts)
# Modify Blender user scripts path
previous_user_scripts = set()
# Implementation path is added to set for easier paths check inside loops
# - will be removed at the end
previous_user_scripts.add(implementation_user_script_path)
openpype_blender_user_scripts = (
env.get("OPENPYPE_BLENDER_USER_SCRIPTS") or ""
)
for path in openpype_blender_user_scripts.split(os.pathsep):
if path:
previous_user_scripts.add(os.path.normpath(path))
blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or ""
for path in blender_user_scripts.split(os.pathsep):
if path:
previous_user_scripts.add(os.path.normpath(path))
# Remove implementation path from user script paths as is set to
# `BLENDER_USER_SCRIPTS`
previous_user_scripts.remove(implementation_user_script_path)
env["BLENDER_USER_SCRIPTS"] = implementation_user_script_path
# Set custom user scripts env
env["OPENPYPE_BLENDER_USER_SCRIPTS"] = os.pathsep.join(
previous_user_scripts
)
# Define Qt binding if not defined
if not env.get("QT_PREFERRED_BINDING"):
env["QT_PREFERRED_BINDING"] = "PySide2"
__all__ = (
"BlenderAddon",
)

View file

@ -0,0 +1,72 @@
import os
from openpype.modules import OpenPypeModule, IHostAddon
BLENDER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class BlenderAddon(OpenPypeModule, IHostAddon):
name = "blender"
host_name = "blender"
def initialize(self, module_settings):
self.enabled = True
def add_implementation_envs(self, env, _app):
"""Modify environments to contain all required for implementation."""
# Prepare path to implementation script
implementation_user_script_path = os.path.join(
BLENDER_ROOT_DIR,
"blender_addon"
)
# Add blender implementation script path to PYTHONPATH
python_path = env.get("PYTHONPATH") or ""
python_path_parts = [
path
for path in python_path.split(os.pathsep)
if path
]
python_path_parts.insert(0, implementation_user_script_path)
env["PYTHONPATH"] = os.pathsep.join(python_path_parts)
# Modify Blender user scripts path
previous_user_scripts = set()
# Implementation path is added to set for easier paths check inside
# loops - will be removed at the end
previous_user_scripts.add(implementation_user_script_path)
openpype_blender_user_scripts = (
env.get("OPENPYPE_BLENDER_USER_SCRIPTS") or ""
)
for path in openpype_blender_user_scripts.split(os.pathsep):
if path:
previous_user_scripts.add(os.path.normpath(path))
blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or ""
for path in blender_user_scripts.split(os.pathsep):
if path:
previous_user_scripts.add(os.path.normpath(path))
# Remove implementation path from user script paths as is set to
# `BLENDER_USER_SCRIPTS`
previous_user_scripts.remove(implementation_user_script_path)
env["BLENDER_USER_SCRIPTS"] = implementation_user_script_path
# Set custom user scripts env
env["OPENPYPE_BLENDER_USER_SCRIPTS"] = os.pathsep.join(
previous_user_scripts
)
# Define Qt binding if not defined
if not env.get("QT_PREFERRED_BINDING"):
env["QT_PREFERRED_BINDING"] = "PySide2"
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(BLENDER_ROOT_DIR, "hooks")
]
def get_workfile_extensions(self):
return [".blend"]

View file

@ -2,7 +2,7 @@ import bpy
import pyblish.api
from openpype.api import get_errored_instances_from_context
from openpype.pipeline.publish import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):

View file

@ -6,7 +6,7 @@ from typing import Dict, List, Union
import bpy
import addon_utils
from openpype.api import Logger
from openpype.lib import Logger
from . import pipeline
@ -234,7 +234,7 @@ def lsattrs(attrs: Dict) -> List:
def read(node: bpy.types.bpy_struct_meta_idprop):
"""Return user-defined attributes from `node`"""
data = dict(node.get(pipeline.AVALON_PROPERTY))
data = dict(node.get(pipeline.AVALON_PROPERTY, {}))
# Ignore hidden/internal data
data = {

View file

@ -26,7 +26,7 @@ PREVIEW_COLLECTIONS: Dict = dict()
# This seems like a good value to keep the Qt app responsive and doesn't slow
# down Blender. At least on macOS I the interace of Blender gets very laggy if
# you make it smaller.
TIMER_INTERVAL: float = 0.01
TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1
class BlenderApplication(QtWidgets.QApplication):
@ -164,6 +164,12 @@ def _process_app_events() -> Optional[float]:
dialog.setDetailedText(detail)
dialog.exec_()
# Refresh Manager
if GlobalClass.app:
manager = GlobalClass.app.get_window("WM_OT_avalon_manager")
if manager:
manager.refresh()
if not GlobalClass.is_windows:
if OpenFileCacher.opening_file:
return TIMER_INTERVAL
@ -192,10 +198,11 @@ class LaunchQtApp(bpy.types.Operator):
self._app = BlenderApplication.get_app()
GlobalClass.app = self._app
bpy.app.timers.register(
_process_app_events,
persistent=True
)
if not bpy.app.timers.is_registered(_process_app_events):
bpy.app.timers.register(
_process_app_events,
persistent=True
)
def execute(self, context):
"""Execute the operator.

View file

@ -20,8 +20,8 @@ from openpype.pipeline import (
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.api import Logger
from openpype.lib import (
Logger,
register_event_callback,
emit_event
)

View file

@ -5,8 +5,6 @@ from typing import List, Optional
import bpy
from openpype.pipeline import HOST_WORKFILE_EXTENSIONS
class OpenFileCacher:
"""Store information about opening file.
@ -78,7 +76,7 @@ def has_unsaved_changes() -> bool:
def file_extensions() -> List[str]:
"""Return the supported file extensions for Blender scene files."""
return HOST_WORKFILE_EXTENSIONS["blender"]
return [".blend"]
def work_root(session: dict) -> str:

View file

@ -1,4 +1,10 @@
from openpype.pipeline import install_host
from openpype.hosts.blender import api
install_host(api)
def register():
install_host(api)
def unregister():
pass

View file

@ -1,6 +1,19 @@
import os
import bpy
import pyblish.api
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import workio
class SaveWorkfiledAction(pyblish.api.Action):
"""Save Workfile."""
label = "Save Workfile"
on = "failed"
icon = "save"
def process(self, context, plugin):
bpy.ops.wm.avalon_workfiles()
class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
@ -8,12 +21,52 @@ class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder - 0.5
label = "Blender Current File"
hosts = ['blender']
hosts = ["blender"]
actions = [SaveWorkfiledAction]
def process(self, context):
"""Inject the current working file"""
current_file = bpy.data.filepath
context.data['currentFile'] = current_file
current_file = workio.current_file()
assert current_file != '', "Current file is empty. " \
"Save the file before continuing."
context.data["currentFile"] = current_file
assert current_file, (
"Current file is empty. Save the file before continuing."
)
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
task = legacy_io.Session["AVALON_TASK"]
data = {}
# create instance
instance = context.create_instance(name=filename)
subset = "workfile" + task.capitalize()
data.update({
"subset": subset,
"asset": os.getenv("AVALON_ASSET", None),
"label": subset,
"publish": True,
"family": "workfile",
"families": ["workfile"],
"setMembers": [current_file],
"frameStart": bpy.context.scene.frame_start,
"frameEnd": bpy.context.scene.frame_end,
})
data["representations"] = [{
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": file,
"stagingDir": folder,
}]
instance.data.update(data)
self.log.info("Collected instance: {}".format(file))
self.log.info("Scene path: {}".format(current_file))
self.log.info("staging Dir: {}".format(folder))
self.log.info("subset: {}".format(subset))

View file

@ -2,12 +2,12 @@ import os
import bpy
from openpype import api
from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractABC(api.Extractor):
class ExtractABC(publish.Extractor):
"""Extract as ABC."""
label = "Extract ABC"

View file

@ -2,10 +2,10 @@ import os
import bpy
import openpype.api
from openpype.pipeline import publish
class ExtractBlend(openpype.api.Extractor):
class ExtractBlend(publish.Extractor):
"""Extract a blend file."""
label = "Extract Blend"

View file

@ -2,10 +2,10 @@ import os
import bpy
import openpype.api
from openpype.pipeline import publish
class ExtractBlendAnimation(openpype.api.Extractor):
class ExtractBlendAnimation(publish.Extractor):
"""Extract a blend file."""
label = "Extract Blend"

View file

@ -2,11 +2,11 @@ import os
import bpy
from openpype import api
from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
class ExtractCamera(api.Extractor):
class ExtractCamera(publish.Extractor):
"""Extract as the camera as FBX."""
label = "Extract Camera"

View file

@ -2,12 +2,12 @@ import os
import bpy
from openpype import api
from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractFBX(api.Extractor):
class ExtractFBX(publish.Extractor):
"""Extract as FBX."""
label = "Extract FBX"

View file

@ -5,12 +5,12 @@ import bpy
import bpy_extras
import bpy_extras.anim_utils
from openpype import api
from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractAnimationFBX(api.Extractor):
class ExtractAnimationFBX(publish.Extractor):
"""Extract as animation."""
label = "Extract FBX"

View file

@ -6,12 +6,12 @@ import bpy_extras
import bpy_extras.anim_utils
from openpype.client import get_representation_by_name
from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
import openpype.api
class ExtractLayout(openpype.api.Extractor):
class ExtractLayout(publish.Extractor):
"""Extract a layout."""
label = "Extract Layout"

View file

@ -1,9 +1,11 @@
from typing import List
import mathutils
import bpy
import pyblish.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder
class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
@ -14,21 +16,18 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
in Unreal and Blender.
"""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["camera"]
category = "geometry"
version = (0, 1, 0)
label = "Zero Keyframe"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
_identity = mathutils.Matrix()
@classmethod
def get_invalid(cls, instance) -> List:
@staticmethod
def get_invalid(instance) -> List:
invalid = []
for obj in [obj for obj in instance]:
if obj.type == "CAMERA":
for obj in instance:
if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA":
if obj.animation_data and obj.animation_data.action:
action = obj.animation_data.action
frames_set = set()
@ -45,4 +44,5 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Object found in instance is not in Object Mode: {invalid}")
f"Camera must have a keyframe at frame 0: {invalid}"
)

View file

@ -3,13 +3,15 @@ from typing import List
import bpy
import pyblish.api
from openpype.pipeline.publish import ValidateContentsOrder
import openpype.hosts.blender.api.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
"""Validate that the current mesh has UV's."""
order = pyblish.api.ValidatorOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
@ -25,7 +27,10 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
for uv_layer in obj.data.uv_layers:
for polygon in obj.data.polygons:
for loop_index in polygon.loop_indices:
if not uv_layer.data[loop_index].uv:
if (
loop_index >= len(uv_layer.data)
or not uv_layer.data[loop_index].uv
):
return False
return True
@ -33,20 +38,20 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
@classmethod
def get_invalid(cls, instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in instance]:
try:
if obj.type == 'MESH':
# Make sure we are in object mode.
bpy.ops.object.mode_set(mode='OBJECT')
if not cls.has_uvs(obj):
invalid.append(obj)
except:
continue
for obj in instance:
if isinstance(obj, bpy.types.Object) and obj.type == 'MESH':
if obj.mode != "OBJECT":
cls.log.warning(
f"Mesh object {obj.name} should be in 'OBJECT' mode"
" to be properly checked."
)
if not cls.has_uvs(obj):
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}")
raise RuntimeError(
f"Meshes found in instance without valid UV's: {invalid}"
)

View file

@ -3,28 +3,28 @@ from typing import List
import bpy
import pyblish.api
from openpype.pipeline.publish import ValidateContentsOrder
import openpype.hosts.blender.api.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
"""Ensure that meshes don't have a negative scale."""
order = pyblish.api.ValidatorOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
label = "Mesh No Negative Scale"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance) -> List:
invalid = []
# TODO (jasper): only check objects in the collection that will be published?
for obj in [
obj for obj in bpy.data.objects if obj.type == 'MESH'
]:
if any(v < 0 for v in obj.scale):
invalid.append(obj)
for obj in instance:
if isinstance(obj, bpy.types.Object) and obj.type == 'MESH':
if any(v < 0 for v in obj.scale):
invalid.append(obj)
return invalid
def process(self, instance):

View file

@ -1,7 +1,11 @@
from typing import List
import bpy
import pyblish.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder
class ValidateNoColonsInName(pyblish.api.InstancePlugin):
@ -12,20 +16,20 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin):
"""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model", "rig"]
version = (0, 1, 0)
label = "No Colons in names"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
@classmethod
def get_invalid(cls, instance) -> List:
@staticmethod
def get_invalid(instance) -> List:
invalid = []
for obj in [obj for obj in instance]:
for obj in instance:
if ':' in obj.name:
invalid.append(obj)
if obj.type == 'ARMATURE':
if isinstance(obj, bpy.types.Object) and obj.type == 'ARMATURE':
for bone in obj.data.bones:
if ':' in bone.name:
invalid.append(obj)
@ -36,4 +40,5 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Objects found with colon in name: {invalid}")
f"Objects found with colon in name: {invalid}"
)

View file

@ -1,5 +1,7 @@
from typing import List
import bpy
import pyblish.api
import openpype.hosts.blender.api.action
@ -10,26 +12,21 @@ class ValidateObjectIsInObjectMode(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder - 0.01
hosts = ["blender"]
families = ["model", "rig", "layout"]
category = "geometry"
label = "Validate Object Mode"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
optional = False
@classmethod
def get_invalid(cls, instance) -> List:
@staticmethod
def get_invalid(instance) -> List:
invalid = []
for obj in [obj for obj in instance]:
try:
if obj.type == 'MESH' or obj.type == 'ARMATURE':
# Check if the object is in object mode.
if not obj.mode == 'OBJECT':
invalid.append(obj)
except Exception:
continue
for obj in instance:
if isinstance(obj, bpy.types.Object) and obj.mode != "OBJECT":
invalid.append(obj)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Object found in instance is not in Object Mode: {invalid}")
f"Object found in instance is not in Object Mode: {invalid}"
)

View file

@ -1,9 +1,12 @@
from typing import List
import mathutils
import bpy
import pyblish.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder
class ValidateTransformZero(pyblish.api.InstancePlugin):
@ -15,10 +18,9 @@ class ValidateTransformZero(pyblish.api.InstancePlugin):
"""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"
version = (0, 1, 0)
label = "Transform Zero"
actions = [openpype.hosts.blender.api.action.SelectInvalidAction]
@ -28,8 +30,11 @@ class ValidateTransformZero(pyblish.api.InstancePlugin):
@classmethod
def get_invalid(cls, instance) -> List:
invalid = []
for obj in [obj for obj in instance]:
if obj.matrix_basis != cls._identity:
for obj in instance:
if (
isinstance(obj, bpy.types.Object)
and obj.matrix_basis != cls._identity
):
invalid.append(obj)
return invalid
@ -37,4 +42,6 @@ class ValidateTransformZero(pyblish.api.InstancePlugin):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
f"Object found in instance is not in Object Mode: {invalid}")
"Object found in instance has not"
f" transform to zero: {invalid}"
)

View file

@ -6,15 +6,14 @@ import argparse
import pyblish.api
import pyblish.util
from openpype.api import Logger
import openpype
import openpype.hosts.celaction
from openpype.lib import Logger
from openpype.hosts.celaction import api as celaction
from openpype.tools.utils import host_tools
from openpype.pipeline import install_openpype_plugins
log = Logger().get_logger("Celaction_cli_publisher")
log = Logger.get_logger("Celaction_cli_publisher")
publish_host = "celaction"

View file

@ -1,113 +0,0 @@
import os
import collections
from pprint import pformat
import pyblish.api
from openpype.client import (
get_subsets,
get_last_versions,
get_representations
)
from openpype.pipeline import legacy_io
class AppendCelactionAudio(pyblish.api.ContextPlugin):
label = "Colect Audio for publishing"
order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
self.log.info('Collecting Audio Data')
asset_doc = context.data["assetEntity"]
# get all available representations
subsets = self.get_subsets(
asset_doc,
representations=["audio", "wav"]
)
self.log.info(f"subsets is: {pformat(subsets)}")
if not subsets.get("audioMain"):
raise AttributeError("`audioMain` subset does not exist")
reprs = subsets.get("audioMain", {}).get("representations", [])
self.log.info(f"reprs is: {pformat(reprs)}")
repr = next((r for r in reprs), None)
if not repr:
raise "Missing `audioMain` representation"
self.log.info(f"representation is: {repr}")
audio_file = repr.get('data', {}).get('path', "")
if os.path.exists(audio_file):
context.data["audioFile"] = audio_file
self.log.info(
'audio_file: {}, has been added to context'.format(audio_file))
else:
self.log.warning("Couldn't find any audio file on Ftrack.")
def get_subsets(self, asset_doc, representations):
"""
Query subsets with filter on name.
The method will return all found subsets and its defined version
and subsets. Version could be specified with number. Representation
can be filtered.
Arguments:
asset_doct (dict): Asset (shot) mongo document
representations (list): list for all representations
Returns:
dict: subsets with version and representations in keys
"""
# Query all subsets for asset
project_name = legacy_io.active_project()
subset_docs = get_subsets(
project_name, asset_ids=[asset_doc["_id"]], fields=["_id"]
)
# Collect all subset ids
subset_ids = [
subset_doc["_id"]
for subset_doc in subset_docs
]
# Check if we found anything
assert subset_ids, (
"No subsets found. Check correct filter. "
"Try this for start `r'.*'`: asset: `{}`"
).format(asset_doc["name"])
last_versions_by_subset_id = get_last_versions(
project_name, subset_ids, fields=["_id", "parent"]
)
version_docs_by_id = {}
for version_doc in last_versions_by_subset_id.values():
version_docs_by_id[version_doc["_id"]] = version_doc
repre_docs = get_representations(
project_name,
version_ids=version_docs_by_id.keys(),
representation_names=representations
)
repre_docs_by_version_id = collections.defaultdict(list)
for repre_doc in repre_docs:
version_id = repre_doc["parent"]
repre_docs_by_version_id[version_id].append(repre_doc)
output_dict = {}
for version_id, repre_docs in repre_docs_by_version_id.items():
version_doc = version_docs_by_id[version_id]
subset_id = version_doc["parent"]
subset_doc = last_versions_by_subset_id[subset_id]
# Store queried docs by subset name
output_dict[subset_doc["name"]] = {
"representations": repre_docs,
"version": version_doc
}
return output_dict

View file

@ -1,22 +1,10 @@
import os
HOST_DIR = os.path.dirname(
os.path.abspath(__file__)
from .addon import (
HOST_DIR,
FlameAddon,
)
def add_implementation_envs(env, _app):
# Add requirements to DL_PYTHON_HOOK_PATH
pype_root = os.environ["OPENPYPE_REPOS_ROOT"]
env["DL_PYTHON_HOOK_PATH"] = os.path.join(
pype_root, "openpype", "hosts", "flame", "startup")
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
# Set default values if are not already set via settings
defaults = {
"LOGLEVEL": "DEBUG"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
__all__ = (
"HOST_DIR",
"FlameAddon",
)

View file

@ -0,0 +1,35 @@
import os
from openpype.modules import OpenPypeModule, IHostAddon
HOST_DIR = os.path.dirname(os.path.abspath(__file__))
class FlameAddon(OpenPypeModule, IHostAddon):
name = "flame"
host_name = "flame"
def initialize(self, module_settings):
self.enabled = True
def add_implementation_envs(self, env, _app):
# Add requirements to DL_PYTHON_HOOK_PATH
env["DL_PYTHON_HOOK_PATH"] = os.path.join(HOST_DIR, "startup")
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
# Set default values if are not already set via settings
defaults = {
"LOGLEVEL": "DEBUG"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(HOST_DIR, "hooks")
]
def get_workfile_extensions(self):
return [".otoc"]

View file

@ -51,7 +51,8 @@ from .pipeline import (
)
from .menu import (
FlameMenuProjectConnect,
FlameMenuTimeline
FlameMenuTimeline,
FlameMenuUniversal
)
from .plugin import (
Creator,
@ -131,6 +132,7 @@ __all__ = [
# menu
"FlameMenuProjectConnect",
"FlameMenuTimeline",
"FlameMenuUniversal",
# plugin
"Creator",

View file

@ -12,6 +12,9 @@ import xml.etree.cElementTree as cET
from copy import deepcopy, copy
from xml.etree import ElementTree as ET
from pprint import pformat
from openpype.lib import Logger, run_subprocess
from .constants import (
MARKER_COLOR,
MARKER_DURATION,
@ -20,9 +23,7 @@ from .constants import (
MARKER_PUBLISH_DEFAULT
)
import openpype.api as openpype
log = openpype.Logger.get_logger(__name__)
log = Logger.get_logger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
@ -766,11 +767,11 @@ class MediaInfoFile(object):
_drop_mode = None
_file_pattern = None
def __init__(self, path, **kwargs):
def __init__(self, path, logger=None):
# replace log if any
if kwargs.get("logger"):
self.log = kwargs["logger"]
if logger:
self.log = logger
# test if `dl_get_media_info` paht exists
self._validate_media_script_path()
@ -1016,7 +1017,7 @@ class MediaInfoFile(object):
try:
# execute creation of clip xml template data
openpype.run_subprocess(cmd_args)
run_subprocess(cmd_args)
except TypeError as error:
raise TypeError(
"Error creating `{}` due: {}".format(fpath, error))

View file

@ -201,3 +201,54 @@ class FlameMenuTimeline(_FlameMenuApp):
if self.flame:
self.flame.execute_shortcut('Rescan Python Hooks')
self.log.info('Rescan Python Hooks')
class FlameMenuUniversal(_FlameMenuApp):
# flameMenuProjectconnect app takes care of the preferences dialog as well
def __init__(self, framework):
_FlameMenuApp.__init__(self, framework)
def __getattr__(self, name):
def method(*args, **kwargs):
project = self.dynamic_menu_data.get(name)
if project:
self.link_project(project)
return method
def build_menu(self):
if not self.flame:
return []
menu = deepcopy(self.menu)
menu['actions'].append({
"name": "Load...",
"execute": lambda x: callback_selection(
x, self.tools_helper.show_loader)
})
menu['actions'].append({
"name": "Manage...",
"execute": lambda x: self.tools_helper.show_scene_inventory()
})
menu['actions'].append({
"name": "Library...",
"execute": lambda x: self.tools_helper.show_library_loader()
})
return menu
def refresh(self, *args, **kwargs):
self.rescan()
def rescan(self, *args, **kwargs):
if not self.flame:
try:
import flame
self.flame = flame
except ImportError:
self.flame = None
if self.flame:
self.flame.execute_shortcut('Rescan Python Hooks')
self.log.info('Rescan Python Hooks')

View file

@ -5,7 +5,7 @@ import os
import contextlib
from pyblish import api as pyblish
from openpype.api import Logger
from openpype.lib import Logger
from openpype.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
@ -90,8 +90,7 @@ def containerise(flame_clip_segment,
def ls():
"""List available containers.
"""
# TODO: ls
pass
return []
def parse_container(tl_segment, validate=True):
@ -107,6 +106,7 @@ def update_container(tl_segment, data=None):
# TODO: update_container
pass
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""

View file

@ -6,16 +6,17 @@ from xml.etree import ElementTree as ET
from Qt import QtCore, QtWidgets
import openpype.api as openpype
import qargparse
from openpype import style
from openpype.settings import get_current_project_settings
from openpype.lib import Logger
from openpype.pipeline import LegacyCreator, LoaderPlugin
from . import constants
from . import lib as flib
from . import pipeline as fpipeline
log = openpype.Logger.get_logger(__name__)
log = Logger.get_logger(__name__)
class CreatorWidget(QtWidgets.QDialog):
@ -305,7 +306,7 @@ class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
self.presets = openpype.get_current_project_settings()[
self.presets = get_current_project_settings()[
"flame"]["create"].get(self.__class__.__name__, {})
# adding basic current context flame objects
@ -361,6 +362,8 @@ class PublishableClip:
index_from_segment_default = False
use_shot_name_default = False
include_handles_default = False
retimed_handles_default = True
retimed_framerange_default = True
def __init__(self, segment, **kwargs):
self.rename_index = kwargs["rename_index"]
@ -496,6 +499,14 @@ class PublishableClip:
"audio", {}).get("value") or False
self.include_handles = self.ui_inputs.get(
"includeHandles", {}).get("value") or self.include_handles_default
self.retimed_handles = (
self.ui_inputs.get("retimedHandles", {}).get("value")
or self.retimed_handles_default
)
self.retimed_framerange = (
self.ui_inputs.get("retimedFramerange", {}).get("value")
or self.retimed_framerange_default
)
# build subset name from layer name
if self.subset_name == "[ track name ]":
@ -668,6 +679,7 @@ class ClipLoader(LoaderPlugin):
`update` logic.
"""
log = log
options = [
qargparse.Boolean(
@ -684,16 +696,20 @@ class OpenClipSolver(flib.MediaInfoFile):
log = log
def __init__(self, openclip_file_path, feed_data):
def __init__(self, openclip_file_path, feed_data, logger=None):
self.out_file = openclip_file_path
# replace log if any
if logger:
self.log = logger
# new feed variables:
feed_path = feed_data.pop("path")
# initialize parent class
super(OpenClipSolver, self).__init__(
feed_path,
**feed_data
logger=logger
)
# get other metadata
@ -741,17 +757,18 @@ class OpenClipSolver(flib.MediaInfoFile):
self.log.info("Building new openClip")
self.log.debug(">> self.clip_data: {}".format(self.clip_data))
# clip data comming from MediaInfoFile
tmp_xml_feeds = self.clip_data.find('tracks/track/feeds')
tmp_xml_feeds.set('currentVersion', self.feed_version_name)
for tmp_feed in tmp_xml_feeds:
tmp_feed.set('vuid', self.feed_version_name)
for tmp_xml_track in self.clip_data.iter("track"):
tmp_xml_feeds = tmp_xml_track.find('feeds')
tmp_xml_feeds.set('currentVersion', self.feed_version_name)
# add colorspace if any is set
if self.feed_colorspace:
self._add_colorspace(tmp_feed, self.feed_colorspace)
for tmp_feed in tmp_xml_track.iter("feed"):
tmp_feed.set('vuid', self.feed_version_name)
self._clear_handler(tmp_feed)
# add colorspace if any is set
if self.feed_colorspace:
self._add_colorspace(tmp_feed, self.feed_colorspace)
self._clear_handler(tmp_feed)
tmp_xml_versions_obj = self.clip_data.find('versions')
tmp_xml_versions_obj.set('currentVersion', self.feed_version_name)
@ -764,6 +781,17 @@ class OpenClipSolver(flib.MediaInfoFile):
self.write_clip_data_to_file(self.out_file, self.clip_data)
def _get_xml_track_obj_by_uid(self, xml_data, uid):
# loop all tracks of input xml data
for xml_track in xml_data.iter("track"):
track_uid = xml_track.get("uid")
self.log.debug(
">> track_uid:uid: {}:{}".format(track_uid, uid))
# get matching uids
if uid == track_uid:
return xml_track
def _update_open_clip(self):
self.log.info("Updating openClip ..")
@ -773,52 +801,81 @@ class OpenClipSolver(flib.MediaInfoFile):
self.log.debug(">> out_xml: {}".format(out_xml))
self.log.debug(">> self.clip_data: {}".format(self.clip_data))
# Get new feed from tmp file
tmp_xml_feed = self.clip_data.find('tracks/track/feeds/feed')
# loop tmp tracks
updated_any = False
for tmp_xml_track in self.clip_data.iter("track"):
# get tmp track uid
tmp_track_uid = tmp_xml_track.get("uid")
self.log.debug(">> tmp_track_uid: {}".format(tmp_track_uid))
self._clear_handler(tmp_xml_feed)
# get out data track by uid
out_track_element = self._get_xml_track_obj_by_uid(
out_xml, tmp_track_uid)
self.log.debug(
">> out_track_element: {}".format(out_track_element))
# update fps from MediaInfoFile class
if self.fps:
tmp_feed_fps_obj = tmp_xml_feed.find(
"startTimecode/rate")
tmp_feed_fps_obj.text = str(self.fps)
# loop tmp feeds
for tmp_xml_feed in tmp_xml_track.iter("feed"):
new_path_obj = tmp_xml_feed.find(
"spans/span/path")
new_path = new_path_obj.text
# update start_frame from MediaInfoFile class
if self.start_frame:
tmp_feed_nb_ticks_obj = tmp_xml_feed.find(
"startTimecode/nbTicks")
tmp_feed_nb_ticks_obj.text = str(self.start_frame)
# check if feed path already exists in track's feeds
if (
out_track_element is not None
and self._feed_exists(out_track_element, new_path)
):
continue
# update drop_mode from MediaInfoFile class
if self.drop_mode:
tmp_feed_drop_mode_obj = tmp_xml_feed.find(
"startTimecode/dropMode")
tmp_feed_drop_mode_obj.text = str(self.drop_mode)
# rename versions on feeds
tmp_xml_feed.set('vuid', self.feed_version_name)
self._clear_handler(tmp_xml_feed)
new_path_obj = tmp_xml_feed.find(
"spans/span/path")
new_path = new_path_obj.text
# update fps from MediaInfoFile class
if self.fps is not None:
tmp_feed_fps_obj = tmp_xml_feed.find(
"startTimecode/rate")
tmp_feed_fps_obj.text = str(self.fps)
feed_added = False
if not self._feed_exists(out_xml, new_path):
tmp_xml_feed.set('vuid', self.feed_version_name)
# Append new temp file feed to .clip source out xml
out_track = out_xml.find("tracks/track")
# add colorspace if any is set
if self.feed_colorspace:
self._add_colorspace(tmp_xml_feed, self.feed_colorspace)
# update start_frame from MediaInfoFile class
if self.start_frame is not None:
tmp_feed_nb_ticks_obj = tmp_xml_feed.find(
"startTimecode/nbTicks")
tmp_feed_nb_ticks_obj.text = str(self.start_frame)
out_feeds = out_track.find('feeds')
out_feeds.set('currentVersion', self.feed_version_name)
out_feeds.append(tmp_xml_feed)
# update drop_mode from MediaInfoFile class
if self.drop_mode is not None:
tmp_feed_drop_mode_obj = tmp_xml_feed.find(
"startTimecode/dropMode")
tmp_feed_drop_mode_obj.text = str(self.drop_mode)
self.log.info(
"Appending new feed: {}".format(
self.feed_version_name))
feed_added = True
# add colorspace if any is set
if self.feed_colorspace is not None:
self._add_colorspace(tmp_xml_feed, self.feed_colorspace)
if feed_added:
# then append/update feed to correct track in output
if out_track_element:
self.log.debug("updating track element ..")
# update already present track
out_feeds = out_track_element.find('feeds')
out_feeds.set('currentVersion', self.feed_version_name)
out_feeds.append(tmp_xml_feed)
self.log.info(
"Appending new feed: {}".format(
self.feed_version_name))
else:
self.log.debug("adding new track element ..")
# create new track as it doesnt exists yet
# set current version to feeds on tmp
tmp_xml_feeds = tmp_xml_track.find('feeds')
tmp_xml_feeds.set('currentVersion', self.feed_version_name)
out_tracks = out_xml.find("tracks")
out_tracks.append(tmp_xml_track)
updated_any = True
if updated_any:
# Append vUID to versions
out_xml_versions_obj = out_xml.find('versions')
out_xml_versions_obj.set(

View file

@ -1,6 +1,6 @@
import os
from xml.etree import ElementTree as ET
from openpype.api import Logger
from openpype.lib import Logger
log = Logger.get_logger(__name__)

View file

@ -4,7 +4,7 @@ Flame utils for syncing scripts
import os
import shutil
from openpype.api import Logger
from openpype.lib import Logger
log = Logger.get_logger(__name__)

View file

@ -1,7 +1,7 @@
"""Host API required Work Files tool"""
import os
from openpype.api import Logger
from openpype.lib import Logger
# from .. import (
# get_project_manager,
# get_current_project

View file

@ -3,16 +3,17 @@ import json
import tempfile
import contextlib
import socket
from pprint import pformat
from openpype.lib import (
PreLaunchHook,
get_openpype_username
get_openpype_username,
run_subprocess,
)
from openpype.lib.applications import (
ApplicationLaunchFailed
)
from openpype.hosts import flame as opflame
import openpype
from pprint import pformat
class FlamePrelaunch(PreLaunchHook):
@ -22,6 +23,7 @@ class FlamePrelaunch(PreLaunchHook):
in environment var FLAME_SCRIPT_DIR.
"""
app_groups = ["flame"]
permissions = 0o777
wtc_script_path = os.path.join(
opflame.HOST_DIR, "api", "scripts", "wiretap_com.py")
@ -38,19 +40,12 @@ class FlamePrelaunch(PreLaunchHook):
"""Hook entry method."""
project_doc = self.data["project_doc"]
project_name = project_doc["name"]
volume_name = _env.get("FLAME_WIRETAP_VOLUME")
# get image io
project_anatomy = self.data["anatomy"]
project_settings = self.data["project_settings"]
# make sure anatomy settings are having flame key
if not project_anatomy["imageio"].get("flame"):
raise ApplicationLaunchFailed((
"Anatomy project settings are missing `flame` key. "
"Please make sure you remove project overides on "
"Anatomy Image io")
)
imageio_flame = project_anatomy["imageio"]["flame"]
imageio_flame = project_settings["flame"]["imageio"]
# get user name and host name
user_name = get_openpype_username()
@ -81,7 +76,7 @@ class FlamePrelaunch(PreLaunchHook):
data_to_script = {
# from settings
"host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname,
"volume_name": _env.get("FLAME_WIRETAP_VOLUME"),
"volume_name": volume_name,
"group_name": _env.get("FLAME_WIRETAP_GROUP"),
"color_policy": str(imageio_flame["project"]["colourPolicy"]),
@ -99,8 +94,40 @@ class FlamePrelaunch(PreLaunchHook):
app_arguments = self._get_launch_arguments(data_to_script)
# fix project data permission issue
self._fix_permissions(project_name, volume_name)
self.launch_context.launch_args.extend(app_arguments)
def _fix_permissions(self, project_name, volume_name):
"""Work around for project data permissions
Reported issue: when project is created locally on one machine,
it is impossible to migrate it to other machine. Autodesk Flame
is crating some unmanagable files which needs to be opened to 0o777.
Args:
project_name (str): project name
volume_name (str): studio volume
"""
dirs_to_modify = [
"/usr/discreet/project/{}".format(project_name),
"/opt/Autodesk/clip/{}/{}.prj".format(volume_name, project_name),
"/usr/discreet/clip/{}/{}.prj".format(volume_name, project_name)
]
for dirtm in dirs_to_modify:
for root, dirs, files in os.walk(dirtm):
try:
for name in set(dirs) | set(files):
path = os.path.join(root, name)
st = os.stat(path)
if oct(st.st_mode) != self.permissions:
os.chmod(path, self.permissions)
except OSError as exc:
self.log.warning("Not able to open files: {}".format(exc))
def _get_flame_fps(self, fps_num):
fps_table = {
float(23.976): "23.976 fps",
@ -152,7 +179,7 @@ class FlamePrelaunch(PreLaunchHook):
"env": self.launch_context.env
}
openpype.api.run_subprocess(args, **process_kwargs)
run_subprocess(args, **process_kwargs)
# process returned json file to pass launch args
return_json_data = open(tmp_json_path).read()

View file

@ -23,10 +23,11 @@ class CreateShotClip(opfapi.Creator):
# nested dictionary (only one level allowed
# for sections and dict)
for _k, _v in v["value"].items():
if presets.get(_k):
if presets.get(_k) is not None:
gui_inputs[k][
"value"][_k]["value"] = presets[_k]
if presets.get(k):
if presets.get(k) is not None:
gui_inputs[k]["value"] = presets[k]
# open widget for plugins inputs
@ -276,6 +277,22 @@ class CreateShotClip(opfapi.Creator):
"target": "tag",
"toolTip": "By default handles are excluded", # noqa
"order": 3
},
"retimedHandles": {
"value": True,
"type": "QCheckBox",
"label": "Retimed handles",
"target": "tag",
"toolTip": "By default handles are retimed.", # noqa
"order": 4
},
"retimedFramerange": {
"value": True,
"type": "QCheckBox",
"label": "Retimed framerange",
"target": "tag",
"toolTip": "By default framerange is retimed.", # noqa
"order": 5
}
}
}

View file

@ -4,6 +4,7 @@ from pprint import pformat
import openpype.hosts.flame.api as opfapi
from openpype.lib import StringTemplate
class LoadClip(opfapi.ClipLoader):
"""Load a subset to timeline as clip
@ -60,8 +61,6 @@ class LoadClip(opfapi.ClipLoader):
"path": self.fname.replace("\\", "/"),
"colorspace": colorspace,
"version": "v{:0>3}".format(version_name),
"logger": self.log
}
self.log.debug(pformat(
loading_context
@ -69,7 +68,8 @@ class LoadClip(opfapi.ClipLoader):
self.log.debug(openclip_path)
# make openpype clip file
opfapi.OpenClipSolver(openclip_path, loading_context).make()
opfapi.OpenClipSolver(
openclip_path, loading_context, logger=self.log).make()
# prepare Reel group in actual desktop
opc = self._get_clip(

View file

@ -1,3 +1,4 @@
from copy import deepcopy
import os
import flame
from pprint import pformat
@ -22,7 +23,7 @@ class LoadClipBatch(opfapi.ClipLoader):
# settings
reel_name = "OP_LoadedReel"
clip_name_template = "{asset}_{subset}<_{output}>"
clip_name_template = "{batch}_{asset}_{subset}<_{output}>"
def load(self, context, name, namespace, options):
@ -40,8 +41,11 @@ class LoadClipBatch(opfapi.ClipLoader):
if not context["representation"]["context"].get("output"):
self.clip_name_template.replace("output", "representation")
formating_data = deepcopy(context["representation"]["context"])
formating_data["batch"] = self.batch.name.get_value()
clip_name = StringTemplate(self.clip_name_template).format(
context["representation"]["context"])
formating_data)
# TODO: settings in imageio
# convert colorspace with ocio to flame mapping
@ -56,6 +60,7 @@ class LoadClipBatch(opfapi.ClipLoader):
openclip_path = os.path.join(
openclip_dir, clip_name + ".clip"
)
if not os.path.exists(openclip_dir):
os.makedirs(openclip_dir)
@ -64,8 +69,6 @@ class LoadClipBatch(opfapi.ClipLoader):
"path": self.fname.replace("\\", "/"),
"colorspace": colorspace,
"version": "v{:0>3}".format(version_name),
"logger": self.log
}
self.log.debug(pformat(
loading_context
@ -73,7 +76,8 @@ class LoadClipBatch(opfapi.ClipLoader):
self.log.debug(openclip_path)
# make openpype clip file
opfapi.OpenClipSolver(openclip_path, loading_context).make()
opfapi.OpenClipSolver(
openclip_path, loading_context, logger=self.log).make()
# prepare Reel group in actual desktop
opc = self._get_clip(

View file

@ -131,6 +131,9 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
"fps": self.fps,
"workfileFrameStart": workfile_start,
"sourceFirstFrame": int(first_frame),
"retimedHandles": marker_data.get("retimedHandles"),
"shotDurationFromSource": (
not marker_data.get("retimedFramerange")),
"path": file_path,
"flameAddTasks": self.add_tasks,
"tasks": {

View file

@ -1,9 +1,9 @@
import pyblish.api
import openpype.lib as oplib
from openpype.pipeline import legacy_io
import openpype.hosts.flame.api as opfapi
from openpype.hosts.flame.otio import flame_export
from openpype.pipeline import legacy_io
from openpype.pipeline.create import get_subset_name
class CollecTimelineOTIO(pyblish.api.ContextPlugin):
@ -24,11 +24,14 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin):
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
# create subset name
subset_name = oplib.get_subset_name_with_asset_doc(
subset_name = get_subset_name(
family,
variant,
task_name,
asset_doc,
context.data["projectName"],
context.data["hostName"],
project_settings=context.data["project_settings"]
)
# adding otio timeline to context

View file

@ -1,10 +1,10 @@
import os
import pyblish.api
import openpype.api
import opentimelineio as otio
from openpype.pipeline import publish
class ExtractOTIOFile(openpype.api.Extractor):
class ExtractOTIOFile(publish.Extractor):
"""
Extractor export OTIO file
"""

View file

@ -1,11 +1,11 @@
import os
import re
import tempfile
from pprint import pformat
from copy import deepcopy
import pyblish.api
import openpype.api
from openpype.pipeline import publish
from openpype.hosts.flame import api as opfapi
from openpype.hosts.flame.api import MediaInfoFile
from openpype.pipeline.editorial import (
@ -15,7 +15,7 @@ from openpype.pipeline.editorial import (
import flame
class ExtractSubsetResources(openpype.api.Extractor):
class ExtractSubsetResources(publish.Extractor):
"""
Extractor for transcoding files from Flame clip
"""
@ -80,40 +80,53 @@ class ExtractSubsetResources(openpype.api.Extractor):
retimed_data = self._get_retimed_attributes(instance)
# get individual keys
r_handle_start = retimed_data["handle_start"]
r_handle_end = retimed_data["handle_end"]
r_source_dur = retimed_data["source_duration"]
r_speed = retimed_data["speed"]
retimed_handle_start = retimed_data["handle_start"]
retimed_handle_end = retimed_data["handle_end"]
retimed_source_duration = retimed_data["source_duration"]
retimed_speed = retimed_data["speed"]
# get handles value - take only the max from both
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
handles = max(handle_start, handle_end)
include_handles = instance.data.get("includeHandles")
retimed_handles = instance.data.get("retimedHandles")
# get media source range with handles
source_start_handles = instance.data["sourceStartH"]
source_end_handles = instance.data["sourceEndH"]
# retime if needed
if r_speed != 1.0:
source_start_handles = (
instance.data["sourceStart"] - r_handle_start)
source_end_handles = (
source_start_handles
+ (r_source_dur - 1)
+ r_handle_start
+ r_handle_end
)
if retimed_speed != 1.0:
if retimed_handles:
# handles are retimed
source_start_handles = (
instance.data["sourceStart"] - retimed_handle_start)
source_end_handles = (
source_start_handles
+ (retimed_source_duration - 1)
+ retimed_handle_start
+ retimed_handle_end
)
else:
# handles are not retimed
source_end_handles = (
source_start_handles
+ (retimed_source_duration - 1)
+ handle_start
+ handle_end
)
# get frame range with handles for representation range
frame_start_handle = frame_start - handle_start
repre_frame_start = frame_start_handle
if include_handles:
if r_speed == 1.0:
if retimed_speed == 1.0 or not retimed_handles:
frame_start_handle = frame_start
else:
frame_start_handle = (
frame_start - handle_start) + r_handle_start
frame_start - handle_start) + retimed_handle_start
self.log.debug("_ frame_start_handle: {}".format(
frame_start_handle))
@ -124,6 +137,9 @@ class ExtractSubsetResources(openpype.api.Extractor):
source_duration_handles = (
source_end_handles - source_start_handles) + 1
self.log.debug("_ source_duration_handles: {}".format(
source_duration_handles))
# create staging dir path
staging_dir = self.staging_dir(instance)
@ -147,15 +163,30 @@ class ExtractSubsetResources(openpype.api.Extractor):
if version_data:
instance.data["versionData"].update(version_data)
if r_speed != 1.0:
instance.data["versionData"].update({
"frameStart": frame_start_handle,
"frameEnd": (
(frame_start_handle + source_duration_handles - 1)
- (r_handle_start + r_handle_end)
)
})
self.log.debug("_ i_version_data: {}".format(
# version data start frame
version_frame_start = frame_start
if include_handles:
version_frame_start = frame_start_handle
if retimed_speed != 1.0:
if retimed_handles:
instance.data["versionData"].update({
"frameStart": version_frame_start,
"frameEnd": (
(version_frame_start + source_duration_handles - 1)
- (retimed_handle_start + retimed_handle_end)
)
})
else:
instance.data["versionData"].update({
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": version_frame_start,
"frameEnd": (
(version_frame_start + source_duration_handles - 1)
- (handle_start + handle_end)
)
})
self.log.debug("_ version_data: {}".format(
instance.data["versionData"]
))

View file

@ -73,6 +73,8 @@ def load_apps():
opfapi.FlameMenuProjectConnect(opfapi.CTX.app_framework))
opfapi.CTX.flame_apps.append(
opfapi.FlameMenuTimeline(opfapi.CTX.app_framework))
opfapi.CTX.flame_apps.append(
opfapi.FlameMenuUniversal(opfapi.CTX.app_framework))
opfapi.CTX.app_framework.log.info("Apps are loaded")
@ -191,3 +193,27 @@ def get_timeline_custom_ui_actions():
openpype_install()
return _build_app_menu("FlameMenuTimeline")
def get_batch_custom_ui_actions():
"""Hook to create submenu in batch
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuUniversal")
def get_media_panel_custom_ui_actions():
"""Hook to create submenu in desktop
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuUniversal")

View file

@ -0,0 +1,10 @@
from .addon import (
FusionAddon,
FUSION_HOST_DIR,
)
__all__ = (
"FusionAddon",
"FUSION_HOST_DIR",
)

View file

@ -0,0 +1,31 @@
import os
from openpype.modules import OpenPypeModule, IHostAddon
FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__))
class FusionAddon(OpenPypeModule, IHostAddon):
name = "fusion"
host_name = "fusion"
def initialize(self, module_settings):
self.enabled = True
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(FUSION_HOST_DIR, "hooks")
]
def add_implementation_envs(self, env, _app):
# Set default values if are not already set via settings
defaults = {
"OPENPYPE_LOG_NO_COLORS": "Yes"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_workfile_extensions(self):
return [".comp"]

View file

@ -5,10 +5,7 @@ from .pipeline import (
ls,
imprint_container,
parse_container,
get_current_comp,
comp_lock_and_undo_chunk
parse_container
)
from .workio import (
@ -22,8 +19,10 @@ from .workio import (
from .lib import (
maintained_selection,
get_additional_data,
update_frame_range
update_frame_range,
set_asset_framerange,
get_current_comp,
comp_lock_and_undo_chunk
)
from .menu import launch_openpype_menu
@ -38,9 +37,6 @@ __all__ = [
"imprint_container",
"parse_container",
"get_current_comp",
"comp_lock_and_undo_chunk",
# workio
"open_file",
"save_file",
@ -51,8 +47,10 @@ __all__ = [
# lib
"maintained_selection",
"get_additional_data",
"update_frame_range",
"set_asset_framerange",
"get_current_comp",
"comp_lock_and_undo_chunk",
# menu
"launch_openpype_menu",

View file

@ -3,8 +3,7 @@ import sys
import re
import contextlib
from Qt import QtGui
from openpype.lib import Logger
from openpype.client import (
get_asset_by_name,
get_subset_by_name,
@ -17,13 +16,14 @@ from openpype.pipeline import (
switch_container,
legacy_io,
)
from .pipeline import get_current_comp, comp_lock_and_undo_chunk
from openpype.pipeline.context_tools import get_current_project_asset
self = sys.modules[__name__]
self._project = None
def update_frame_range(start, end, comp=None, set_render_range=True):
def update_frame_range(start, end, comp=None, set_render_range=True,
handle_start=0, handle_end=0):
"""Set Fusion comp's start and end frame range
Args:
@ -32,6 +32,8 @@ def update_frame_range(start, end, comp=None, set_render_range=True):
comp (object, Optional): comp object from fusion
set_render_range (bool, Optional): When True this will also set the
composition's render start and end frame.
handle_start (float, int, Optional): frame handles before start frame
handle_end (float, int, Optional): frame handles after end frame
Returns:
None
@ -41,11 +43,16 @@ def update_frame_range(start, end, comp=None, set_render_range=True):
if not comp:
comp = get_current_comp()
# Convert any potential none type to zero
handle_start = handle_start or 0
handle_end = handle_end or 0
attrs = {
"COMPN_GlobalStart": start,
"COMPN_GlobalEnd": end
"COMPN_GlobalStart": start - handle_start,
"COMPN_GlobalEnd": end + handle_end
}
# set frame range
if set_render_range:
attrs.update({
"COMPN_RenderStart": start,
@ -56,24 +63,122 @@ def update_frame_range(start, end, comp=None, set_render_range=True):
comp.SetAttrs(attrs)
def get_additional_data(container):
"""Get Fusion related data for the container
def set_asset_framerange():
"""Set Comp's frame range based on current asset"""
asset_doc = get_current_project_asset()
start = asset_doc["data"]["frameStart"]
end = asset_doc["data"]["frameEnd"]
handle_start = asset_doc["data"]["handleStart"]
handle_end = asset_doc["data"]["handleEnd"]
update_frame_range(start, end, set_render_range=True,
handle_start=handle_start,
handle_end=handle_end)
Args:
container(dict): the container found by the ls() function
Returns:
dict
def set_asset_resolution():
"""Set Comp's resolution width x height default based on current asset"""
asset_doc = get_current_project_asset()
width = asset_doc["data"]["resolutionWidth"]
height = asset_doc["data"]["resolutionHeight"]
comp = get_current_comp()
print("Setting comp frame format resolution to {}x{}".format(width,
height))
comp.SetPrefs({
"Comp.FrameFormat.Width": width,
"Comp.FrameFormat.Height": height,
})
def validate_comp_prefs(comp=None, force_repair=False):
"""Validate current comp defaults with asset settings.
Validates fps, resolutionWidth, resolutionHeight, aspectRatio.
This does *not* validate frameStart, frameEnd, handleStart and handleEnd.
"""
tool = container["_tool"]
tile_color = tool.TileColor
if tile_color is None:
return {}
if comp is None:
comp = get_current_comp()
return {"color": QtGui.QColor.fromRgbF(tile_color["R"],
tile_color["G"],
tile_color["B"])}
log = Logger.get_logger("validate_comp_prefs")
fields = [
"name",
"data.fps",
"data.resolutionWidth",
"data.resolutionHeight",
"data.pixelAspect"
]
asset_doc = get_current_project_asset(fields=fields)
asset_data = asset_doc["data"]
comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat")
# Pixel aspect ratio in Fusion is set as AspectX and AspectY so we convert
# the data to something that is more sensible to Fusion
asset_data["pixelAspectX"] = asset_data.pop("pixelAspect")
asset_data["pixelAspectY"] = 1.0
validations = [
("fps", "Rate", "FPS"),
("resolutionWidth", "Width", "Resolution Width"),
("resolutionHeight", "Height", "Resolution Height"),
("pixelAspectX", "AspectX", "Pixel Aspect Ratio X"),
("pixelAspectY", "AspectY", "Pixel Aspect Ratio Y")
]
invalid = []
for key, comp_key, label in validations:
asset_value = asset_data[key]
comp_value = comp_frame_format_prefs.get(comp_key)
if asset_value != comp_value:
invalid_msg = "{} {} should be {}".format(label,
comp_value,
asset_value)
invalid.append(invalid_msg)
if not force_repair:
# Do not log warning if we force repair anyway
log.warning(
"Comp {pref} {value} does not match asset "
"'{asset_name}' {pref} {asset_value}".format(
pref=label,
value=comp_value,
asset_name=asset_doc["name"],
asset_value=asset_value)
)
if invalid:
def _on_repair():
attributes = dict()
for key, comp_key, _label in validations:
value = asset_data[key]
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
attributes[comp_key_full] = value
comp.SetPrefs(attributes)
if force_repair:
log.info("Applying default Comp preferences..")
_on_repair()
return
from . import menu
from openpype.widgets import popup
from openpype.style import load_stylesheet
dialog = popup.Popup(parent=menu.menu)
dialog.setWindowTitle("Fusion comp has invalid configuration")
msg = "Comp preferences mismatches '{}'".format(asset_doc["name"])
msg += "\n" + "\n".join(invalid)
dialog.setMessage(msg)
dialog.setButtonText("Repair")
dialog.on_clicked.connect(_on_repair)
dialog.show()
dialog.raise_()
dialog.activateWindow()
dialog.setStyleSheet(load_stylesheet())
def switch_item(container,
@ -195,3 +300,21 @@ def get_frame_path(path):
padding = 4 # default Fusion padding
return filename, padding, ext
def get_current_comp():
"""Hack to get current comp in this session"""
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion.CurrentComp if fusion else None
@contextlib.contextmanager
def comp_lock_and_undo_chunk(comp, undo_queue_name="Script CMD"):
"""Lock comp and open an undo chunk during the context"""
try:
comp.Lock()
comp.StartUndo(undo_queue_name)
yield
finally:
comp.Unlock()
comp.EndUndo()

View file

@ -1,43 +1,26 @@
import os
import sys
from Qt import QtWidgets, QtCore
from Qt import QtWidgets, QtCore, QtGui
from openpype import style
from openpype.tools.utils import host_tools
from openpype.style import load_stylesheet
from openpype.lib import register_event_callback
from openpype.hosts.fusion.scripts import (
set_rendermode,
duplicate_with_inputs
)
from openpype.hosts.fusion.api.lib import (
set_asset_framerange,
set_asset_resolution
)
from openpype.pipeline import legacy_io
from openpype.resources import get_openpype_icon_filepath
from .pipeline import FusionEventHandler
from .pulse import FusionPulse
def load_stylesheet():
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
if not os.path.exists(path):
print("Unable to load stylesheet, file not found in resources")
return ""
with open(path, "r") as file_stream:
stylesheet = file_stream.read()
return stylesheet
class Spacer(QtWidgets.QWidget):
def __init__(self, height, *args, **kwargs):
super(Spacer, self).__init__(*args, **kwargs)
self.setFixedHeight(height)
real_spacer = QtWidgets.QWidget(self)
real_spacer.setObjectName("Spacer")
real_spacer.setFixedHeight(height)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(real_spacer)
self.setLayout(layout)
self = sys.modules[__name__]
self.menu = None
class OpenPypeMenu(QtWidgets.QWidget):
@ -46,15 +29,29 @@ class OpenPypeMenu(QtWidgets.QWidget):
self.setObjectName("OpenPypeMenu")
icon_path = get_openpype_icon_filepath()
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowMinimizeButtonHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
self.render_mode_widget = None
self.setWindowTitle("OpenPype")
asset_label = QtWidgets.QLabel("Context", self)
asset_label.setStyleSheet("""QLabel {
font-size: 14px;
font-weight: 600;
color: #5f9fb8;
}""")
asset_label.setAlignment(QtCore.Qt.AlignHCenter)
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
create_btn = QtWidgets.QPushButton("Create...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
@ -62,77 +59,111 @@ class OpenPypeMenu(QtWidgets.QWidget):
manager_btn = QtWidgets.QPushButton("Manage...", self)
libload_btn = QtWidgets.QPushButton("Library...", self)
rendermode_btn = QtWidgets.QPushButton("Set render mode...", self)
set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self)
set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self)
duplicate_with_inputs_btn = QtWidgets.QPushButton(
"Duplicate with input connections", self
)
reset_resolution_btn = QtWidgets.QPushButton(
"Reset Resolution from project", self
)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(10, 20, 10, 20)
layout.addWidget(asset_label)
layout.addSpacing(20)
layout.addWidget(workfiles_btn)
layout.addSpacing(20)
layout.addWidget(create_btn)
layout.addWidget(publish_btn)
layout.addWidget(load_btn)
layout.addWidget(publish_btn)
layout.addWidget(manager_btn)
layout.addWidget(Spacer(15, self))
layout.addSpacing(20)
layout.addWidget(libload_btn)
layout.addWidget(Spacer(15, self))
layout.addSpacing(20)
layout.addWidget(set_framerange_btn)
layout.addWidget(set_resolution_btn)
layout.addWidget(rendermode_btn)
layout.addWidget(Spacer(15, self))
layout.addSpacing(20)
layout.addWidget(duplicate_with_inputs_btn)
layout.addWidget(reset_resolution_btn)
self.setLayout(layout)
# Store reference so we can update the label
self.asset_label = asset_label
workfiles_btn.clicked.connect(self.on_workfile_clicked)
create_btn.clicked.connect(self.on_create_clicked)
publish_btn.clicked.connect(self.on_publish_clicked)
load_btn.clicked.connect(self.on_load_clicked)
manager_btn.clicked.connect(self.on_manager_clicked)
libload_btn.clicked.connect(self.on_libload_clicked)
rendermode_btn.clicked.connect(self.on_rendernode_clicked)
rendermode_btn.clicked.connect(self.on_rendermode_clicked)
duplicate_with_inputs_btn.clicked.connect(
self.on_duplicate_with_inputs_clicked)
reset_resolution_btn.clicked.connect(self.on_reset_resolution_clicked)
set_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
set_framerange_btn.clicked.connect(self.on_set_framerange_clicked)
self._callbacks = []
self.register_callback("taskChanged", self.on_task_changed)
self.on_task_changed()
# Force close current process if Fusion is closed
self._pulse = FusionPulse(parent=self)
self._pulse.start()
# Detect Fusion events as OpenPype events
self._event_handler = FusionEventHandler(parent=self)
self._event_handler.start()
def on_task_changed(self):
# Update current context label
label = legacy_io.Session["AVALON_ASSET"]
self.asset_label.setText(label)
def register_callback(self, name, fn):
# Create a wrapper callback that we only store
# for as long as we want it to persist as callback
def _callback(*args):
fn()
self._callbacks.append(_callback)
register_event_callback(name, _callback)
def deregister_all_callbacks(self):
self._callbacks[:] = []
def on_workfile_clicked(self):
print("Clicked Workfile")
host_tools.show_workfiles()
def on_create_clicked(self):
print("Clicked Create")
host_tools.show_creator()
def on_publish_clicked(self):
print("Clicked Publish")
host_tools.show_publish()
def on_load_clicked(self):
print("Clicked Load")
host_tools.show_loader(use_context=True)
def on_manager_clicked(self):
print("Clicked Manager")
host_tools.show_scene_inventory()
def on_libload_clicked(self):
print("Clicked Library")
host_tools.show_library_loader()
def on_rendernode_clicked(self):
print("Clicked Set Render Mode")
def on_rendermode_clicked(self):
if self.render_mode_widget is None:
window = set_rendermode.SetRenderMode()
window.setStyleSheet(style.load_stylesheet())
window.setStyleSheet(load_stylesheet())
window.show()
self.render_mode_widget = window
else:
@ -140,15 +171,16 @@ class OpenPypeMenu(QtWidgets.QWidget):
def on_duplicate_with_inputs_clicked(self):
duplicate_with_inputs.duplicate_with_input_connections()
print("Clicked Set Colorspace")
def on_reset_resolution_clicked(self):
print("Clicked Reset Resolution")
def on_set_resolution_clicked(self):
set_asset_resolution()
def on_set_framerange_clicked(self):
set_asset_framerange()
def launch_openpype_menu():
app = QtWidgets.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
pype_menu = OpenPypeMenu()
@ -156,5 +188,8 @@ def launch_openpype_menu():
pype_menu.setStyleSheet(stylesheet)
pype_menu.show()
self.menu = pype_menu
sys.exit(app.exec_())
result = app.exec_()
print("Shutting down..")
sys.exit(result)

View file

@ -1,29 +0,0 @@
QWidget {
background-color: #282828;
border-radius: 3;
}
QPushButton {
border: 1px solid #090909;
background-color: #201f1f;
color: #ffffff;
padding: 5;
}
QPushButton:focus {
background-color: "#171717";
color: #d0d0d0;
}
QPushButton:hover {
background-color: "#171717";
color: #e64b3d;
}
#OpenPypeMenu {
border: 1px solid #fef9ef;
}
#Spacer {
background-color: #282828;
}

View file

@ -4,11 +4,15 @@ Basic avalon integration
import os
import sys
import logging
import contextlib
import pyblish.api
from Qt import QtCore
from openpype.api import Logger
from openpype.lib import (
Logger,
register_event_callback,
emit_event
)
from openpype.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
@ -18,12 +22,19 @@ from openpype.pipeline import (
deregister_inventory_action_path,
AVALON_CONTAINER_ID,
)
import openpype.hosts.fusion
from openpype.pipeline.load import any_outdated_containers
from openpype.hosts.fusion import FUSION_HOST_DIR
from openpype.tools.utils import host_tools
log = Logger().get_logger(__name__)
from .lib import (
get_current_comp,
comp_lock_and_undo_chunk,
validate_comp_prefs
)
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.fusion.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
log = Logger.get_logger(__name__)
PLUGINS_DIR = os.path.join(FUSION_HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
@ -31,16 +42,32 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class CompLogHandler(logging.Handler):
class FusionLogHandler(logging.Handler):
# Keep a reference to fusion's Print function (Remote Object)
_print = None
@property
def print(self):
if self._print is not None:
# Use cached
return self._print
_print = getattr(sys.modules["__main__"], "fusion").Print
if _print is None:
# Backwards compatibility: Print method on Fusion instance was
# added around Fusion 17.4 and wasn't available on PyRemote Object
# before
_print = get_current_comp().Print
self._print = _print
return _print
def emit(self, record):
entry = self.format(record)
comp = get_current_comp()
if comp:
comp.Print(entry)
self.print(entry)
def install():
"""Install fusion-specific functionality of avalon-core.
"""Install fusion-specific functionality of OpenPype.
This is where you install menus and register families, data
and loaders into fusion.
@ -52,20 +79,18 @@ def install():
"""
# Remove all handlers associated with the root logger object, because
# that one sometimes logs as "warnings" incorrectly.
# that one always logs as "warnings" incorrectly.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Attach default logging handler that prints to active comp
logger = logging.getLogger()
formatter = logging.Formatter(fmt="%(message)s\n")
handler = CompLogHandler()
handler = FusionLogHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
log.info("openpype.hosts.fusion installed")
pyblish.api.register_host("fusion")
pyblish.api.register_plugin_path(PUBLISH_PATH)
log.info("Registering Fusion plug-ins..")
@ -78,6 +103,11 @@ def install():
"instanceToggled", on_pyblish_instance_toggled
)
# Register events
register_event_callback("open", on_after_open)
register_event_callback("save", on_save)
register_event_callback("new", on_new)
def uninstall():
"""Uninstall all that was installed
@ -103,7 +133,7 @@ def uninstall():
)
def on_pyblish_instance_toggled(instance, new_value, old_value):
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle saver tool passthrough states on instance toggles."""
comp = instance.context.data.get("currentComp")
if not comp:
@ -126,6 +156,48 @@ def on_pyblish_instance_toggled(instance, new_value, old_value):
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
def on_new(event):
comp = event["Rets"]["comp"]
validate_comp_prefs(comp, force_repair=True)
def on_save(event):
comp = event["sender"]
validate_comp_prefs(comp)
def on_after_open(event):
comp = event["sender"]
validate_comp_prefs(comp)
if any_outdated_containers():
log.warning("Scene has outdated content.")
# Find OpenPype menu to attach to
from . import menu
def _on_show_scene_inventory():
# ensure that comp is active
frame = comp.CurrentFrame
if not frame:
print("Comp is closed, skipping show scene inventory")
return
frame.ActivateFrame() # raise comp window
host_tools.show_scene_inventory()
from openpype.widgets import popup
from openpype.style import load_stylesheet
dialog = popup.Popup(parent=menu.menu)
dialog.setWindowTitle("Fusion comp has outdated content")
dialog.setMessage("There are outdated containers in "
"your Fusion comp.")
dialog.on_clicked.connect(_on_show_scene_inventory)
dialog.show()
dialog.raise_()
dialog.activateWindow()
dialog.setStyleSheet(load_stylesheet())
def ls():
"""List containers from active Fusion scene
@ -139,7 +211,7 @@ def ls():
"""
comp = get_current_comp()
tools = comp.GetToolList(False, "Loader").values()
tools = comp.GetToolList(False).values()
for tool in tools:
container = parse_container(tool)
@ -211,19 +283,114 @@ def parse_container(tool):
return container
def get_current_comp():
"""Hack to get current comp in this session"""
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion.CurrentComp if fusion else None
class FusionEventThread(QtCore.QThread):
"""QThread which will periodically ping Fusion app for any events.
The fusion.UIManager must be set up to be notified of events before they'll
be reported by this thread, for example:
fusion.UIManager.AddNotify("Comp_Save", None)
"""
on_event = QtCore.Signal(dict)
def run(self):
app = getattr(sys.modules["__main__"], "app", None)
if app is None:
# No Fusion app found
return
# As optimization store the GetEvent method directly because every
# getattr of UIManager.GetEvent tries to resolve the Remote Function
# through the PyRemoteObject
get_event = app.UIManager.GetEvent
delay = int(os.environ.get("OPENPYPE_FUSION_CALLBACK_INTERVAL", 1000))
while True:
if self.isInterruptionRequested():
return
# Process all events that have been queued up until now
while True:
event = get_event(False)
if not event:
break
self.on_event.emit(event)
# Wait some time before processing events again
# to not keep blocking the UI
self.msleep(delay)
@contextlib.contextmanager
def comp_lock_and_undo_chunk(comp, undo_queue_name="Script CMD"):
"""Lock comp and open an undo chunk during the context"""
try:
comp.Lock()
comp.StartUndo(undo_queue_name)
yield
finally:
comp.Unlock()
comp.EndUndo()
class FusionEventHandler(QtCore.QObject):
"""Emits OpenPype events based on Fusion events captured in a QThread.
This will emit the following OpenPype events based on Fusion actions:
save: Comp_Save, Comp_SaveAs
open: Comp_Opened
new: Comp_New
To use this you can attach it to you Qt UI so it runs in the background.
E.g.
>>> handler = FusionEventHandler(parent=window)
>>> handler.start()
"""
ACTION_IDS = [
"Comp_Save",
"Comp_SaveAs",
"Comp_New",
"Comp_Opened"
]
def __init__(self, parent=None):
super(FusionEventHandler, self).__init__(parent=parent)
# Set up Fusion event callbacks
fusion = getattr(sys.modules["__main__"], "fusion", None)
ui = fusion.UIManager
# Add notifications for the ones we want to listen to
notifiers = []
for action_id in self.ACTION_IDS:
notifier = ui.AddNotify(action_id, None)
notifiers.append(notifier)
# TODO: Not entirely sure whether these must be kept to avoid
# garbage collection
self._notifiers = notifiers
self._event_thread = FusionEventThread(parent=self)
self._event_thread.on_event.connect(self._on_event)
def start(self):
self._event_thread.start()
def stop(self):
self._event_thread.stop()
def _on_event(self, event):
"""Handle Fusion events to emit OpenPype events"""
if not event:
return
what = event["what"]
# Comp Save
if what in {"Comp_Save", "Comp_SaveAs"}:
if not event["Rets"].get("success"):
# If the Save action is cancelled it will still emit an
# event but with "success": False so we ignore those cases
return
# Comp was saved
emit_event("save", data=event)
return
# Comp New
elif what in {"Comp_New"}:
emit_event("new", data=event)
# Comp Opened
elif what in {"Comp_Opened"}:
emit_event("open", data=event)

View file

@ -0,0 +1,63 @@
import os
import sys
from Qt import QtCore
class PulseThread(QtCore.QThread):
no_response = QtCore.Signal()
def __init__(self, parent=None):
super(PulseThread, self).__init__(parent=parent)
def run(self):
app = getattr(sys.modules["__main__"], "app", None)
# Interval in milliseconds
interval = os.environ.get("OPENPYPE_FUSION_PULSE_INTERVAL", 1000)
while True:
if self.isInterruptionRequested():
return
# We don't need to call Test because PyRemoteObject of the app
# will actually fail to even resolve the Test function if it has
# gone down. So we can actually already just check by confirming
# the method is still getting resolved. (Optimization)
if app.Test is None:
self.no_response.emit()
self.msleep(interval)
class FusionPulse(QtCore.QObject):
"""A Timer that checks whether host app is still alive.
This checks whether the Fusion process is still active at a certain
interval. This is useful due to how Fusion runs its scripts. Each script
runs in its own environment and process (a `fusionscript` process each).
If Fusion would go down and we have a UI process running at the same time
then it can happen that the `fusionscript.exe` will remain running in the
background in limbo due to e.g. a Qt interface's QApplication that keeps
running infinitely.
Warning:
When the host is not detected this will automatically exit
the current process.
"""
def __init__(self, parent=None):
super(FusionPulse, self).__init__(parent=parent)
self._thread = PulseThread(parent=self)
self._thread.no_response.connect(self.on_no_response)
def on_no_response(self):
print("Pulse detected no response from Fusion..")
sys.exit(1)
def start(self):
self._thread.start()
def stop(self):
self._thread.requestInterruption()

View file

@ -2,13 +2,11 @@
import sys
import os
from openpype.pipeline import HOST_WORKFILE_EXTENSIONS
from .pipeline import get_current_comp
from .lib import get_current_comp
def file_extensions():
return HOST_WORKFILE_EXTENSIONS["fusion"]
return [".comp"]
def has_unsaved_changes():

View file

@ -0,0 +1,60 @@
{
Action
{
ID = "OpenPype_Menu",
Category = "OpenPype",
Name = "OpenPype Menu",
Targets =
{
Composition =
{
Execute = _Lua [=[
local scriptPath = app:MapPath("OpenPype:MenuScripts/openpype_menu.py")
if bmd.fileexists(scriptPath) == false then
print("[OpenPype Error] Can't run file: " .. scriptPath)
else
target:RunScript(scriptPath)
end
]=],
},
},
},
Action
{
ID = "OpenPype_Install_PySide2",
Category = "OpenPype",
Name = "Install PySide2",
Targets =
{
Composition =
{
Execute = _Lua [=[
local scriptPath = app:MapPath("OpenPype:MenuScripts/install_pyside2.py")
if bmd.fileexists(scriptPath) == false then
print("[OpenPype Error] Can't run file: " .. scriptPath)
else
target:RunScript(scriptPath)
end
]=],
},
},
},
Menus
{
Target = "ChildFrame",
Before "Help"
{
Sub "OpenPype"
{
"OpenPype_Menu{}",
"_",
Sub "Admin" {
"OpenPype_Install_PySide2{}"
}
}
},
},
}

View file

@ -0,0 +1,6 @@
### OpenPype deploy MenuScripts
Note that this `MenuScripts` is not an official Fusion folder.
OpenPype only uses this folder in `{fusion}/deploy/` to trigger the OpenPype menu actions.
They are used in the actions defined in `.fu` files in `{fusion}/deploy/Config`.

View file

@ -0,0 +1,29 @@
# This is just a quick hack for users running Py3 locally but having no
# Qt library installed
import os
import subprocess
import importlib
try:
from Qt import QtWidgets # noqa: F401
from Qt import __binding__
print(f"Qt binding: {__binding__}")
mod = importlib.import_module(__binding__)
print(f"Qt path: {mod.__file__}")
print("Qt library found, nothing to do..")
except ImportError:
print("Assuming no Qt library is installed..")
print('Installing PySide2 for Python 3.6: '
f'{os.environ["FUSION16_PYTHON36_HOME"]}')
# Get full path to python executable
exe = "python.exe" if os.name == 'nt' else "python"
python = os.path.join(os.environ["FUSION16_PYTHON36_HOME"], exe)
assert os.path.exists(python), f"Python doesn't exist: {python}"
# Do python -m pip install PySide2
args = [python, "-m", "pip", "install", "PySide2"]
print(f"Args: {args}")
subprocess.Popen(args)

View file

@ -0,0 +1,35 @@
import os
import sys
from openpype.lib import Logger
from openpype.pipeline import (
install_host,
registered_host,
)
def main(env):
# This script working directory starts in Fusion application folder.
# However the contents of that folder can conflict with Qt library dlls
# so we make sure to move out of it to avoid DLL Load Failed errors.
os.chdir("..")
from openpype.hosts.fusion import api
from openpype.hosts.fusion.api import menu
# activate resolve from pype
install_host(api)
log = Logger.get_logger(__name__)
log.info(f"Registered host: {registered_host()}")
menu.launch_openpype_menu()
# Initiate a QTimer to check if Fusion is still alive every X interval
# If Fusion is not found - kill itself
# todo(roy): Implement timer that ensures UI doesn't remain when e.g.
# Fusion closes down
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

Some files were not shown because too many files have changed in this diff Show more