Merge remote-tracking branch 'upstream/feature/OP-2479_color-v3-Colorspace-management-and-distribution' into substance_painter_ocio

This commit is contained in:
Roy Nieterau 2023-01-14 23:40:18 +01:00
commit 9e8b6f8246
215 changed files with 7020 additions and 2702 deletions

View file

@ -17,7 +17,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v2 uses: actions/setup-python@v2
with: with:
python-version: 3.7 python-version: 3.9
- name: Install Python requirements - name: Install Python requirements
run: pip install gitpython semver PyGithub run: pip install gitpython semver PyGithub

View file

@ -19,7 +19,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v2 uses: actions/setup-python@v2
with: with:
python-version: 3.7 python-version: 3.9
- name: Install Python requirements - name: Install Python requirements
run: pip install gitpython semver PyGithub run: pip install gitpython semver PyGithub

View file

@ -18,7 +18,7 @@ jobs:
runs-on: windows-latest runs-on: windows-latest
strategy: strategy:
matrix: matrix:
python-version: [3.7] python-version: [3.9]
steps: steps:
- name: 🚛 Checkout Code - name: 🚛 Checkout Code
@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
python-version: [3.7] python-version: [3.9]
steps: steps:
- name: 🚛 Checkout Code - name: 🚛 Checkout Code
@ -70,7 +70,7 @@ jobs:
# runs-on: macos-latest # runs-on: macos-latest
# strategy: # strategy:
# matrix: # matrix:
# python-version: [3.7] # python-version: [3.9]
# steps: # steps:
# - name: 🚛 Checkout Code # - name: 🚛 Checkout Code
@ -87,4 +87,4 @@ jobs:
# - name: 🔨 Build # - name: 🔨 Build
# run: | # run: |
# ./tools/build.sh # ./tools/build.sh

View file

@ -1,6 +1,6 @@
# Build Pype docker image # Build Pype docker image
FROM ubuntu:focal AS builder FROM ubuntu:focal AS builder
ARG OPENPYPE_PYTHON_VERSION=3.7.12 ARG OPENPYPE_PYTHON_VERSION=3.9.12
ARG BUILD_DATE ARG BUILD_DATE
ARG VERSION ARG VERSION

View file

@ -1,6 +1,6 @@
# Build Pype docker image # Build Pype docker image
FROM centos:7 AS builder FROM centos:7 AS builder
ARG OPENPYPE_PYTHON_VERSION=3.7.12 ARG OPENPYPE_PYTHON_VERSION=3.9.12
LABEL org.opencontainers.image.name="pypeclub/openpype" LABEL org.opencontainers.image.name="pypeclub/openpype"
LABEL org.opencontainers.image.title="OpenPype Docker Image" LABEL org.opencontainers.image.title="OpenPype Docker Image"
@ -96,11 +96,11 @@ RUN source $HOME/.bashrc \
RUN source $HOME/.bashrc \ RUN source $HOME/.bashrc \
&& bash ./tools/build.sh && bash ./tools/build.sh
RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.7/lib \ RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.9/lib \
&& cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.7/lib \ && cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.9/lib \
&& cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.7/lib \ && cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.9/lib \
&& cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.7/lib \ && cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.9/lib \
&& cp /usr/lib64/libxcb* ./build/exe.linux-x86_64-3.7/vendor/python/PySide2/Qt/lib && cp /usr/lib64/libxcb* ./build/exe.linux-x86_64-3.9/vendor/python/PySide2/Qt/lib
RUN cd /opt/openpype \ RUN cd /opt/openpype \
rm -rf ./vendor/bin rm -rf ./vendor/bin

81
Dockerfile.debian Normal file
View file

@ -0,0 +1,81 @@
# Build Pype docker image
FROM debian:bullseye AS builder
ARG OPENPYPE_PYTHON_VERSION=3.9.12
ARG BUILD_DATE
ARG VERSION
LABEL maintainer="info@openpype.io"
LABEL description="Docker Image to build and run OpenPype under Ubuntu 20.04"
LABEL org.opencontainers.image.name="pypeclub/openpype"
LABEL org.opencontainers.image.title="OpenPype Docker Image"
LABEL org.opencontainers.image.url="https://openpype.io/"
LABEL org.opencontainers.image.source="https://github.com/pypeclub/OpenPype"
LABEL org.opencontainers.image.documentation="https://openpype.io/docs/system_introduction"
LABEL org.opencontainers.image.created=$BUILD_DATE
LABEL org.opencontainers.image.version=$VERSION
USER root
ARG DEBIAN_FRONTEND=noninteractive
# update base
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
ca-certificates \
bash \
git \
cmake \
make \
curl \
wget \
build-essential \
libssl-dev \
zlib1g-dev \
libbz2-dev \
libreadline-dev \
libsqlite3-dev \
llvm \
libncursesw5-dev \
xz-utils \
tk-dev \
libxml2-dev \
libxmlsec1-dev \
libffi-dev \
liblzma-dev \
patchelf
SHELL ["/bin/bash", "-c"]
RUN mkdir /opt/openpype
# download and install pyenv
RUN curl https://pyenv.run | bash \
&& echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/init_pyenv.sh \
&& echo 'eval "$(pyenv init -)"' >> $HOME/init_pyenv.sh \
&& echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/init_pyenv.sh \
&& echo 'eval "$(pyenv init --path)"' >> $HOME/init_pyenv.sh
# install python with pyenv
RUN source $HOME/init_pyenv.sh \
&& pyenv install ${OPENPYPE_PYTHON_VERSION}
COPY . /opt/openpype/
RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh
WORKDIR /opt/openpype
# set local python version
RUN cd /opt/openpype \
&& source $HOME/init_pyenv.sh \
&& pyenv local ${OPENPYPE_PYTHON_VERSION}
# fetch third party tools/libraries
RUN source $HOME/init_pyenv.sh \
&& ./tools/create_env.sh \
&& ./tools/fetch_thirdparty_libs.sh
# build openpype
RUN source $HOME/init_pyenv.sh \
&& bash ./tools/build.sh

View file

@ -5,7 +5,7 @@
OpenPype OpenPype
==== ====
[![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2021-lightgrey?labelColor=303846) [![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2022-lightgrey?labelColor=303846)
@ -31,7 +31,7 @@ The main things you will need to run and build OpenPype are:
- **Terminal** in your OS - **Terminal** in your OS
- PowerShell 5.0+ (Windows) - PowerShell 5.0+ (Windows)
- Bash (Linux) - Bash (Linux)
- [**Python 3.7.8**](#python) or higher - [**Python 3.9.6**](#python) or higher
- [**MongoDB**](#database) (needed only for local development) - [**MongoDB**](#database) (needed only for local development)
@ -50,13 +50,14 @@ For more details on requirements visit [requirements documentation](https://open
Building OpenPype Building OpenPype
------------- -------------
To build OpenPype you currently need [Python 3.7](https://www.python.org/downloads/) as we are following To build OpenPype you currently need [Python 3.9](https://www.python.org/downloads/) as we are following
[vfx platform](https://vfxplatform.com). Because of some Linux distros comes with newer Python version [vfx platform](https://vfxplatform.com). Because of some Linux distros comes with newer Python version
already, you need to install **3.7** version and make use of it. You can use perhaps [pyenv](https://github.com/pyenv/pyenv) for this on Linux. already, you need to install **3.9** version and make use of it. You can use perhaps [pyenv](https://github.com/pyenv/pyenv) for this on Linux.
**Note**: We do not support 3.9.0 because of [this bug](https://github.com/python/cpython/pull/22670). Please, use higher versions of 3.9.x.
### Windows ### Windows
You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You will need [Python >= 3.9.1](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads).
More tools might be needed for installing dependencies (for example for **OpenTimelineIO**) - mostly More tools might be needed for installing dependencies (for example for **OpenTimelineIO**) - mostly
development tools like [CMake](https://cmake.org/) and [Visual Studio](https://visualstudio.microsoft.com/cs/downloads/) development tools like [CMake](https://cmake.org/) and [Visual Studio](https://visualstudio.microsoft.com/cs/downloads/)
@ -82,7 +83,7 @@ OpenPype is build using [CX_Freeze](https://cx-freeze.readthedocs.io/en/latest)
### macOS ### macOS
You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll need also other tools to build You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll need also other tools to build
some OpenPype dependencies like [CMake](https://cmake.org/) and **XCode Command Line Tools** (or some other build system). some OpenPype dependencies like [CMake](https://cmake.org/) and **XCode Command Line Tools** (or some other build system).
Easy way of installing everything necessary is to use [Homebrew](https://brew.sh): Easy way of installing everything necessary is to use [Homebrew](https://brew.sh):
@ -106,19 +107,19 @@ exec "$SHELL"
PATH=$(pyenv root)/shims:$PATH PATH=$(pyenv root)/shims:$PATH
``` ```
4) Pull in required Python version 3.7.x 4) Pull in required Python version 3.9.x
```sh ```sh
# install Python build dependences # install Python build dependences
brew install openssl readline sqlite3 xz zlib brew install openssl readline sqlite3 xz zlib
# replace with up-to-date 3.7.x version # replace with up-to-date 3.9.x version
pyenv install 3.7.9 pyenv install 3.9.6
``` ```
5) Set local Python version 5) Set local Python version
```sh ```sh
# switch to OpenPype source directory # switch to OpenPype source directory
pyenv local 3.7.9 pyenv local 3.9.6
``` ```
#### To build OpenPype: #### To build OpenPype:
@ -145,7 +146,7 @@ sudo ./tools/docker_build.sh centos7
If all is successful, you'll find built OpenPype in `./build/` folder. If all is successful, you'll find built OpenPype in `./build/` folder.
#### Manual build #### Manual build
You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled. You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled.
To build Python related stuff, you need Python header files installed (`python3-dev` on Ubuntu for example). To build Python related stuff, you need Python header files installed (`python3-dev` on Ubuntu for example).
@ -222,14 +223,14 @@ eval "$(pyenv virtualenv-init -)"
# reload shell # reload shell
exec $SHELL exec $SHELL
# install Python 3.7.9 # install Python 3.9.x
pyenv install -v 3.7.9 pyenv install -v 3.9.6
# change path to OpenPype 3 # change path to OpenPype 3
cd /path/to/openpype-3 cd /path/to/openpype-3
# set local python version # set local python version
pyenv local 3.7.9 pyenv local 3.9.6
``` ```
</details> </details>
@ -345,4 +346,4 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
<!-- ALL-CONTRIBUTORS-LIST:END --> <!-- ALL-CONTRIBUTORS-LIST:END -->
This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!

View file

@ -1,4 +1,3 @@
import enlighten
import os import os
import re import re
import urllib import urllib
@ -252,6 +251,11 @@ class RemoteFileHandler:
if key.startswith('download_warning'): if key.startswith('download_warning'):
return value return value
# handle antivirus warning for big zips
found = re.search("(confirm=)([^&.+])", response.text)
if found:
return found.groups()[1]
return None return None
@staticmethod @staticmethod
@ -259,15 +263,9 @@ class RemoteFileHandler:
response_gen, destination, response_gen, destination,
): ):
with open(destination, "wb") as f: with open(destination, "wb") as f:
pbar = enlighten.Counter(
total=None, desc="Save content", units="%", color="green")
progress = 0
for chunk in response_gen: for chunk in response_gen:
if chunk: # filter out keep-alive new chunks if chunk: # filter out keep-alive new chunks
f.write(chunk) f.write(chunk)
progress += len(chunk)
pbar.close()
@staticmethod @staticmethod
def _quota_exceeded(first_chunk): def _quota_exceeded(first_chunk):

View file

@ -24,7 +24,7 @@ def open_dialog():
if os.getenv("OPENPYPE_HEADLESS_MODE"): if os.getenv("OPENPYPE_HEADLESS_MODE"):
print("!!! Can't open dialog in headless mode. Exiting.") print("!!! Can't open dialog in headless mode. Exiting.")
sys.exit(1) sys.exit(1)
from Qt import QtWidgets, QtCore from qtpy import QtWidgets, QtCore
from .install_dialog import InstallDialog from .install_dialog import InstallDialog
scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None)
@ -47,7 +47,7 @@ def open_update_window(openpype_version):
if os.getenv("OPENPYPE_HEADLESS_MODE"): if os.getenv("OPENPYPE_HEADLESS_MODE"):
print("!!! Can't open dialog in headless mode. Exiting.") print("!!! Can't open dialog in headless mode. Exiting.")
sys.exit(1) sys.exit(1)
from Qt import QtWidgets, QtCore from qtpy import QtWidgets, QtCore
from .update_window import UpdateWindow from .update_window import UpdateWindow
scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None)
@ -71,7 +71,7 @@ def show_message_dialog(title, message):
if os.getenv("OPENPYPE_HEADLESS_MODE"): if os.getenv("OPENPYPE_HEADLESS_MODE"):
print("!!! Can't open dialog in headless mode. Exiting.") print("!!! Can't open dialog in headless mode. Exiting.")
sys.exit(1) sys.exit(1)
from Qt import QtWidgets, QtCore from qtpy import QtWidgets, QtCore
from .message_dialog import MessageDialog from .message_dialog import MessageDialog
scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None)

View file

@ -2,8 +2,7 @@
"""Open install dialog.""" """Open install dialog."""
import sys import sys
from Qt import QtWidgets # noqa from qtpy import QtWidgets
from Qt.QtCore import Signal # noqa
from .install_dialog import InstallDialog from .install_dialog import InstallDialog

View file

@ -57,11 +57,9 @@ class OpenPypeVersion(semver.VersionInfo):
"""Class for storing information about OpenPype version. """Class for storing information about OpenPype version.
Attributes: Attributes:
staging (bool): True if it is staging version
path (str): path to OpenPype path (str): path to OpenPype
""" """
staging = False
path = None path = None
# this should match any string complying with https://semver.org/ # this should match any string complying with https://semver.org/
_VERSION_REGEX = re.compile(r"(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>[a-zA-Z\d\-.]*))?(?:\+(?P<buildmetadata>[a-zA-Z\d\-.]*))?") # noqa: E501 _VERSION_REGEX = re.compile(r"(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>[a-zA-Z\d\-.]*))?(?:\+(?P<buildmetadata>[a-zA-Z\d\-.]*))?") # noqa: E501
@ -83,12 +81,10 @@ class OpenPypeVersion(semver.VersionInfo):
build (str): an optional build string build (str): an optional build string
version (str): if set, it will be parsed and will override version (str): if set, it will be parsed and will override
parameters like `major`, `minor` and so on. parameters like `major`, `minor` and so on.
staging (bool): set to True if version is staging.
path (Path): path to version location. path (Path): path to version location.
""" """
self.path = None self.path = None
self.staging = False
if "version" in kwargs.keys(): if "version" in kwargs.keys():
if not kwargs.get("version"): if not kwargs.get("version"):
@ -113,29 +109,8 @@ class OpenPypeVersion(semver.VersionInfo):
if "path" in kwargs.keys(): if "path" in kwargs.keys():
kwargs.pop("path") kwargs.pop("path")
if kwargs.get("staging"):
self.staging = kwargs.get("staging", False)
kwargs.pop("staging")
if "staging" in kwargs.keys():
kwargs.pop("staging")
if self.staging:
if kwargs.get("build"):
if "staging" not in kwargs.get("build"):
kwargs["build"] = f"{kwargs.get('build')}-staging"
else:
kwargs["build"] = "staging"
if kwargs.get("build") and "staging" in kwargs.get("build", ""):
self.staging = True
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
def __eq__(self, other):
result = super().__eq__(other)
return bool(result and self.staging == other.staging)
def __repr__(self): def __repr__(self):
return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>" return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>"
@ -150,43 +125,11 @@ class OpenPypeVersion(semver.VersionInfo):
return True return True
if self.finalize_version() == other.finalize_version() and \ if self.finalize_version() == other.finalize_version() and \
self.prerelease == other.prerelease and \ self.prerelease == other.prerelease:
self.is_staging() and not other.is_staging():
return True return True
return result return result
def set_staging(self) -> OpenPypeVersion:
"""Set version as staging and return it.
This will preserve current one.
Returns:
OpenPypeVersion: Set as staging.
"""
if self.staging:
return self
return self.replace(parts={"build": f"{self.build}-staging"})
def set_production(self) -> OpenPypeVersion:
"""Set version as production and return it.
This will preserve current one.
Returns:
OpenPypeVersion: Set as production.
"""
if not self.staging:
return self
return self.replace(
parts={"build": self.build.replace("-staging", "")})
def is_staging(self) -> bool:
"""Test if current version is staging one."""
return self.staging
def get_main_version(self) -> str: def get_main_version(self) -> str:
"""Return main version component. """Return main version component.
@ -218,21 +161,8 @@ class OpenPypeVersion(semver.VersionInfo):
if not m: if not m:
return None return None
version = OpenPypeVersion.parse(string[m.start():m.end()]) version = OpenPypeVersion.parse(string[m.start():m.end()])
if "staging" in string[m.start():m.end()]:
version.staging = True
return version return version
@classmethod
def parse(cls, version):
"""Extends parse to handle ta handle staging variant."""
v = super().parse(version)
openpype_version = cls(major=v.major, minor=v.minor,
patch=v.patch, prerelease=v.prerelease,
build=v.build)
if v.build and "staging" in v.build:
openpype_version.staging = True
return openpype_version
def __hash__(self): def __hash__(self):
return hash(self.path) if self.path else hash(str(self)) return hash(self.path) if self.path else hash(str(self))
@ -382,80 +312,28 @@ class OpenPypeVersion(semver.VersionInfo):
return False return False
@classmethod @classmethod
def get_local_versions( def get_local_versions(cls) -> List:
cls, production: bool = None,
staging: bool = None
) -> List:
"""Get all versions available on this machine. """Get all versions available on this machine.
Arguments give ability to specify if filtering is needed. If both
arguments are set to None all found versions are returned.
Args:
production (bool): Return production versions.
staging (bool): Return staging versions.
Returns: Returns:
list: of compatible versions available on the machine. list: of compatible versions available on the machine.
""" """
# Return all local versions if arguments are set to None
if production is None and staging is None:
production = True
staging = True
elif production is None and not staging:
production = True
elif staging is None and not production:
staging = True
# Just return empty output if both are disabled
if not production and not staging:
return []
# DEPRECATED: backwards compatible way to look for versions in root # DEPRECATED: backwards compatible way to look for versions in root
dir_to_search = Path(user_data_dir("openpype", "pypeclub")) dir_to_search = Path(user_data_dir("openpype", "pypeclub"))
versions = OpenPypeVersion.get_versions_from_directory(dir_to_search) versions = OpenPypeVersion.get_versions_from_directory(dir_to_search)
filtered_versions = [] return list(sorted(set(versions)))
for version in versions:
if version.is_staging():
if staging:
filtered_versions.append(version)
elif production:
filtered_versions.append(version)
return list(sorted(set(filtered_versions)))
@classmethod @classmethod
def get_remote_versions( def get_remote_versions(cls) -> List:
cls, production: bool = None,
staging: bool = None
) -> List:
"""Get all versions available in OpenPype Path. """Get all versions available in OpenPype Path.
Arguments give ability to specify if filtering is needed. If both Returns:
arguments are set to None all found versions are returned. list of OpenPypeVersions: Versions found in OpenPype path.
Args:
production (bool): Return production versions.
staging (bool): Return staging versions.
""" """
# Return all local versions if arguments are set to None # Return all local versions if arguments are set to None
if production is None and staging is None:
production = True
staging = True
elif production is None and not staging:
production = True
elif staging is None and not production:
staging = True
# Just return empty output if both are disabled
if not production and not staging:
return []
dir_to_search = None dir_to_search = None
if cls.openpype_path_is_accessible(): if cls.openpype_path_is_accessible():
@ -476,14 +354,7 @@ class OpenPypeVersion(semver.VersionInfo):
versions = cls.get_versions_from_directory(dir_to_search) versions = cls.get_versions_from_directory(dir_to_search)
filtered_versions = [] return list(sorted(set(versions)))
for version in versions:
if version.is_staging():
if staging:
filtered_versions.append(version)
elif production:
filtered_versions.append(version)
return list(sorted(set(filtered_versions)))
@staticmethod @staticmethod
def get_versions_from_directory( def get_versions_from_directory(
@ -562,7 +433,6 @@ class OpenPypeVersion(semver.VersionInfo):
@staticmethod @staticmethod
def get_latest_version( def get_latest_version(
staging: bool = False,
local: bool = None, local: bool = None,
remote: bool = None remote: bool = None
) -> Union[OpenPypeVersion, None]: ) -> Union[OpenPypeVersion, None]:
@ -571,7 +441,6 @@ class OpenPypeVersion(semver.VersionInfo):
The version does not contain information about path and source. The version does not contain information about path and source.
This is utility version to get the latest version from all found. This is utility version to get the latest version from all found.
Build version is not listed if staging is enabled.
Arguments 'local' and 'remote' define if local and remote repository Arguments 'local' and 'remote' define if local and remote repository
versions are used. All versions are used if both are not set (or set versions are used. All versions are used if both are not set (or set
@ -580,7 +449,6 @@ class OpenPypeVersion(semver.VersionInfo):
'False' in that case only build version can be used. 'False' in that case only build version can be used.
Args: Args:
staging (bool, optional): List staging versions if True.
local (bool, optional): List local versions if True. local (bool, optional): List local versions if True.
remote (bool, optional): List remote versions if True. remote (bool, optional): List remote versions if True.
@ -599,22 +467,9 @@ class OpenPypeVersion(semver.VersionInfo):
remote = True remote = True
installed_version = OpenPypeVersion.get_installed_version() installed_version = OpenPypeVersion.get_installed_version()
local_versions = [] local_versions = OpenPypeVersion.get_local_versions() if local else []
remote_versions = [] remote_versions = OpenPypeVersion.get_remote_versions() if remote else [] # noqa: E501
if local: all_versions = local_versions + remote_versions + [installed_version]
local_versions = OpenPypeVersion.get_local_versions(
staging=staging
)
if remote:
remote_versions = OpenPypeVersion.get_remote_versions(
staging=staging
)
all_versions = local_versions + remote_versions
if not staging:
all_versions.append(installed_version)
if not all_versions:
return None
all_versions.sort() all_versions.sort()
return all_versions[-1] return all_versions[-1]
@ -705,7 +560,7 @@ class BootstrapRepos:
"""Get path for specific version in list of OpenPype versions. """Get path for specific version in list of OpenPype versions.
Args: Args:
version (str): Version string to look for (1.2.4+staging) version (str): Version string to look for (1.2.4-nightly.1+test)
version_list (list of OpenPypeVersion): list of version to search. version_list (list of OpenPypeVersion): list of version to search.
Returns: Returns:
@ -807,6 +662,8 @@ class BootstrapRepos:
""" """
version = OpenPypeVersion.version_in_str(zip_file.name) version = OpenPypeVersion.version_in_str(zip_file.name)
destination_dir = self.data_dir / f"{version.major}.{version.minor}" destination_dir = self.data_dir / f"{version.major}.{version.minor}"
if not destination_dir.exists():
destination_dir.mkdir(parents=True)
destination = destination_dir / zip_file.name destination = destination_dir / zip_file.name
if destination.exists(): if destination.exists():
@ -1131,14 +988,12 @@ class BootstrapRepos:
@staticmethod @staticmethod
def find_openpype_version( def find_openpype_version(
version: Union[str, OpenPypeVersion], version: Union[str, OpenPypeVersion]
staging: bool
) -> Union[OpenPypeVersion, None]: ) -> Union[OpenPypeVersion, None]:
"""Find location of specified OpenPype version. """Find location of specified OpenPype version.
Args: Args:
version (Union[str, OpenPypeVersion): Version to find. version (Union[str, OpenPypeVersion): Version to find.
staging (bool): Filter staging versions.
Returns: Returns:
requested OpenPypeVersion. requested OpenPypeVersion.
@ -1151,9 +1006,7 @@ class BootstrapRepos:
if installed_version == version: if installed_version == version:
return installed_version return installed_version
local_versions = OpenPypeVersion.get_local_versions( local_versions = OpenPypeVersion.get_local_versions()
staging=staging, production=not staging
)
zip_version = None zip_version = None
for local_version in local_versions: for local_version in local_versions:
if local_version == version: if local_version == version:
@ -1165,37 +1018,25 @@ class BootstrapRepos:
if zip_version is not None: if zip_version is not None:
return zip_version return zip_version
remote_versions = OpenPypeVersion.get_remote_versions( remote_versions = OpenPypeVersion.get_remote_versions()
staging=staging, production=not staging return next(
) (
for remote_version in remote_versions: remote_version for remote_version in remote_versions
if remote_version == version: if remote_version == version
return remote_version ), None)
return None
@staticmethod @staticmethod
def find_latest_openpype_version( def find_latest_openpype_version() -> Union[OpenPypeVersion, None]:
staging: bool
) -> Union[OpenPypeVersion, None]:
"""Find the latest available OpenPype version in all location. """Find the latest available OpenPype version in all location.
Args:
staging (bool): True to look for staging versions.
Returns: Returns:
Latest OpenPype version on None if nothing was found. Latest OpenPype version on None if nothing was found.
""" """
installed_version = OpenPypeVersion.get_installed_version() installed_version = OpenPypeVersion.get_installed_version()
local_versions = OpenPypeVersion.get_local_versions( local_versions = OpenPypeVersion.get_local_versions()
staging=staging remote_versions = OpenPypeVersion.get_remote_versions()
) all_versions = local_versions + remote_versions + [installed_version]
remote_versions = OpenPypeVersion.get_remote_versions(
staging=staging
)
all_versions = local_versions + remote_versions
if not staging:
all_versions.append(installed_version)
if not all_versions: if not all_versions:
return None return None
@ -1215,7 +1056,6 @@ class BootstrapRepos:
def find_openpype( def find_openpype(
self, self,
openpype_path: Union[Path, str] = None, openpype_path: Union[Path, str] = None,
staging: bool = False,
include_zips: bool = False include_zips: bool = False
) -> Union[List[OpenPypeVersion], None]: ) -> Union[List[OpenPypeVersion], None]:
"""Get ordered dict of detected OpenPype version. """Get ordered dict of detected OpenPype version.
@ -1229,8 +1069,6 @@ class BootstrapRepos:
Args: Args:
openpype_path (Path or str, optional): Try to find OpenPype on openpype_path (Path or str, optional): Try to find OpenPype on
the given path or url. the given path or url.
staging (bool, optional): Filter only staging version, skip them
otherwise.
include_zips (bool, optional): If set True it will try to find include_zips (bool, optional): If set True it will try to find
OpenPype in zip files in given directory. OpenPype in zip files in given directory.
@ -1278,7 +1116,7 @@ class BootstrapRepos:
for dir_to_search in dirs_to_search: for dir_to_search in dirs_to_search:
try: try:
openpype_versions += self.get_openpype_versions( openpype_versions += self.get_openpype_versions(
dir_to_search, staging) dir_to_search)
except ValueError: except ValueError:
# location is invalid, skip it # location is invalid, skip it
pass pass
@ -1643,15 +1481,11 @@ class BootstrapRepos:
return False return False
return True return True
def get_openpype_versions( def get_openpype_versions(self, openpype_dir: Path) -> list:
self,
openpype_dir: Path,
staging: bool = False) -> list:
"""Get all detected OpenPype versions in directory. """Get all detected OpenPype versions in directory.
Args: Args:
openpype_dir (Path): Directory to scan. openpype_dir (Path): Directory to scan.
staging (bool, optional): Find staging versions if True.
Returns: Returns:
list of OpenPypeVersion list of OpenPypeVersion
@ -1669,8 +1503,7 @@ class BootstrapRepos:
for item in openpype_dir.iterdir(): for item in openpype_dir.iterdir():
# if the item is directory with major.minor version, dive deeper # if the item is directory with major.minor version, dive deeper
if item.is_dir() and re.match(r"^\d+\.\d+$", item.name): if item.is_dir() and re.match(r"^\d+\.\d+$", item.name):
_versions = self.get_openpype_versions( _versions = self.get_openpype_versions(item)
item, staging=staging)
if _versions: if _versions:
openpype_versions += _versions openpype_versions += _versions
@ -1693,11 +1526,7 @@ class BootstrapRepos:
continue continue
detected_version.path = item detected_version.path = item
if staging and detected_version.is_staging(): openpype_versions.append(detected_version)
openpype_versions.append(detected_version)
if not staging and not detected_version.is_staging():
openpype_versions.append(detected_version)
return sorted(openpype_versions) return sorted(openpype_versions)

View file

@ -5,9 +5,7 @@ import sys
import re import re
import collections import collections
from Qt import QtCore, QtGui, QtWidgets # noqa from qtpy import QtCore, QtGui, QtWidgets
from Qt.QtGui import QValidator # noqa
from Qt.QtCore import QTimer # noqa
from .install_thread import InstallThread from .install_thread import InstallThread
from .tools import ( from .tools import (

View file

@ -4,7 +4,7 @@ import os
import sys import sys
from pathlib import Path from pathlib import Path
from Qt.QtCore import QThread, Signal, QObject # noqa from qtpy import QtCore
from .bootstrap_repos import ( from .bootstrap_repos import (
BootstrapRepos, BootstrapRepos,
@ -17,7 +17,7 @@ from .bootstrap_repos import (
from .tools import validate_mongo_connection from .tools import validate_mongo_connection
class InstallThread(QThread): class InstallThread(QtCore.QThread):
"""Install Worker thread. """Install Worker thread.
This class takes care of finding OpenPype version on user entered path This class takes care of finding OpenPype version on user entered path
@ -28,15 +28,14 @@ class InstallThread(QThread):
user data dir. user data dir.
""" """
progress = Signal(int) progress = QtCore.Signal(int)
message = Signal((str, bool)) message = QtCore.Signal((str, bool))
def __init__(self, parent=None,): def __init__(self, parent=None,):
self._mongo = None self._mongo = None
self._path = None
self._result = None self._result = None
QThread.__init__(self, parent) super().__init__(parent)
def result(self): def result(self):
"""Result of finished installation.""" """Result of finished installation."""
@ -62,143 +61,117 @@ class InstallThread(QThread):
progress_callback=self.set_progress, message=self.message) progress_callback=self.set_progress, message=self.message)
local_version = OpenPypeVersion.get_installed_version_str() local_version = OpenPypeVersion.get_installed_version_str()
# if user did enter nothing, we install OpenPype from local version. # user did not entered url
# zip content of `repos`, copy it to user data dir and append if self._mongo:
# version to it. self.message.emit("Saving mongo connection string ...", False)
if not self._path: bs.secure_registry.set_item("openPypeMongo", self._mongo)
# user did not entered url
if not self._mongo:
# it not set in environment
if not os.getenv("OPENPYPE_MONGO"):
# try to get it from settings registry
try:
self._mongo = bs.secure_registry.get_item(
"openPypeMongo")
except ValueError:
self.message.emit(
"!!! We need MongoDB URL to proceed.", True)
self._set_result(-1)
return
else:
self._mongo = os.getenv("OPENPYPE_MONGO")
else:
self.message.emit("Saving mongo connection string ...", False)
bs.secure_registry.set_item("openPypeMongo", self._mongo)
os.environ["OPENPYPE_MONGO"] = self._mongo
self.message.emit(
f"Detecting installed OpenPype versions in {bs.data_dir}",
False)
detected = bs.find_openpype(include_zips=True)
if detected:
if not OpenPypeVersion.get_installed_version().is_compatible(
detected[-1]):
self.message.emit((
f"Latest detected version {detected[-1]} "
"is not compatible with the currently running "
f"{local_version}"
), True)
self.message.emit((
"Filtering detected versions to compatible ones..."
), False)
detected = [
version for version in detected
if version.is_compatible(
OpenPypeVersion.get_installed_version())
]
if OpenPypeVersion(
version=local_version, path=Path()) < detected[-1]:
self.message.emit((
f"Latest installed version {detected[-1]} is newer "
f"then currently running {local_version}"
), False)
self.message.emit("Skipping OpenPype install ...", False)
if detected[-1].path.suffix.lower() == ".zip":
bs.extract_openpype(detected[-1])
self._set_result(0)
return
if OpenPypeVersion(version=local_version).get_main_version() == detected[-1].get_main_version(): # noqa
self.message.emit((
f"Latest installed version is the same as "
f"currently running {local_version}"
), False)
self.message.emit("Skipping OpenPype install ...", False)
self._set_result(0)
return
self.message.emit((
"All installed versions are older then "
f"currently running one {local_version}"
), False)
else:
if getattr(sys, 'frozen', False):
self.message.emit("None detected.", True)
self.message.emit(("We will use OpenPype coming with "
"installer."), False)
openpype_version = bs.create_version_from_frozen_code()
if not openpype_version:
self.message.emit(
f"!!! Install failed - {openpype_version}", True)
self._set_result(-1)
return
self.message.emit(f"Using: {openpype_version}", False)
bs.install_version(openpype_version)
self.message.emit(f"Installed as {openpype_version}", False)
self.progress.emit(100)
self._set_result(1)
return
else:
self.message.emit("None detected.", False)
self.message.emit(
f"We will use local OpenPype version {local_version}", False)
local_openpype = bs.create_version_from_live_code()
if not local_openpype:
self.message.emit(
f"!!! Install failed - {local_openpype}", True)
self._set_result(-1)
return
elif os.getenv("OPENPYPE_MONGO"):
self._mongo = os.getenv("OPENPYPE_MONGO")
else:
# try to get it from settings registry
try: try:
bs.install_version(local_openpype) self._mongo = bs.secure_registry.get_item(
except (OpenPypeVersionExists, "openPypeMongo")
OpenPypeVersionInvalid, except ValueError:
OpenPypeVersionIOError) as e: self.message.emit(
self.message.emit(f"Installed failed: ", True) "!!! We need MongoDB URL to proceed.", True)
self.message.emit(str(e), True)
self._set_result(-1) self._set_result(-1)
return return
os.environ["OPENPYPE_MONGO"] = self._mongo
self.message.emit(f"Installed as {local_openpype}", False) self.message.emit(
f"Detecting installed OpenPype versions in {bs.data_dir}",
False)
detected = bs.find_openpype(include_zips=True)
if not detected and getattr(sys, 'frozen', False):
self.message.emit("None detected.", True)
self.message.emit(("We will use OpenPype coming with "
"installer."), False)
openpype_version = bs.create_version_from_frozen_code()
if not openpype_version:
self.message.emit(
f"!!! Install failed - {openpype_version}", True)
self._set_result(-1)
return
self.message.emit(f"Using: {openpype_version}", False)
bs.install_version(openpype_version)
self.message.emit(f"Installed as {openpype_version}", False)
self.progress.emit(100) self.progress.emit(100)
self._set_result(1) self._set_result(1)
return return
else:
# if we have mongo connection string, validate it, set it to
# user settings and get OPENPYPE_PATH from there.
if self._mongo:
if not validate_mongo_connection(self._mongo):
self.message.emit(
f"!!! invalid mongo url {self._mongo}", True)
self._set_result(-1)
return
bs.secure_registry.set_item("openPypeMongo", self._mongo)
os.environ["OPENPYPE_MONGO"] = self._mongo
self.message.emit(f"processing {self._path}", True) if detected and not OpenPypeVersion.get_installed_version().is_compatible(detected[-1]): # noqa: E501
repo_file = bs.process_entered_location(self._path) self.message.emit((
f"Latest detected version {detected[-1]} "
"is not compatible with the currently running "
f"{local_version}"
), True)
self.message.emit((
"Filtering detected versions to compatible ones..."
), False)
if not repo_file: # filter results to get only compatible versions
self.message.emit("!!! Cannot install", True) detected = [
self._set_result(-1) version for version in detected
if version.is_compatible(
OpenPypeVersion.get_installed_version())
]
if detected:
if OpenPypeVersion(
version=local_version, path=Path()) < detected[-1]:
self.message.emit((
f"Latest installed version {detected[-1]} is newer "
f"then currently running {local_version}"
), False)
self.message.emit("Skipping OpenPype install ...", False)
if detected[-1].path.suffix.lower() == ".zip":
bs.extract_openpype(detected[-1])
self._set_result(0)
return return
if OpenPypeVersion(version=local_version).get_main_version() == detected[-1].get_main_version(): # noqa: E501
self.message.emit((
f"Latest installed version is the same as "
f"currently running {local_version}"
), False)
self.message.emit("Skipping OpenPype install ...", False)
self._set_result(0)
return
self.message.emit((
"All installed versions are older then "
f"currently running one {local_version}"
), False)
self.message.emit("None detected.", False)
self.message.emit(
f"We will use local OpenPype version {local_version}", False)
local_openpype = bs.create_version_from_live_code()
if not local_openpype:
self.message.emit(
f"!!! Install failed - {local_openpype}", True)
self._set_result(-1)
return
try:
bs.install_version(local_openpype)
except (OpenPypeVersionExists,
OpenPypeVersionInvalid,
OpenPypeVersionIOError) as e:
self.message.emit(f"Installed failed: ", True)
self.message.emit(str(e), True)
self._set_result(-1)
return
self.message.emit(f"Installed as {local_openpype}", False)
self.progress.emit(100)
self._set_result(1)
return
self.progress.emit(100) self.progress.emit(100)
self._set_result(1) self._set_result(1)
return return

View file

@ -1,4 +1,4 @@
from Qt import QtWidgets, QtGui from qtpy import QtWidgets, QtGui
from .tools import ( from .tools import (
load_stylesheet, load_stylesheet,

View file

@ -1,4 +1,4 @@
from Qt import QtCore, QtGui, QtWidgets # noqa from qtpy import QtWidgets
class NiceProgressBar(QtWidgets.QProgressBar): class NiceProgressBar(QtWidgets.QProgressBar):

View file

@ -153,7 +153,8 @@ def get_openpype_global_settings(url: str) -> dict:
# Create mongo connection # Create mongo connection
client = MongoClient(url, **kwargs) client = MongoClient(url, **kwargs)
# Access settings collection # Access settings collection
col = client["openpype"]["settings"] openpype_db = os.environ.get("OPENPYPE_DATABASE_NAME") or "openpype"
col = client[openpype_db]["settings"]
# Query global settings # Query global settings
global_settings = col.find_one({"type": "global_settings"}) or {} global_settings = col.find_one({"type": "global_settings"}) or {}
# Close Mongo connection # Close Mongo connection
@ -184,11 +185,7 @@ def get_openpype_path_from_settings(settings: dict) -> Union[str, None]:
if paths and isinstance(paths, str): if paths and isinstance(paths, str):
paths = [paths] paths = [paths]
# Loop over paths and return only existing return next((path for path in paths if os.path.exists(path)), None)
for path in paths:
if os.path.exists(path):
return path
return None
def get_expected_studio_version_str( def get_expected_studio_version_str(
@ -206,10 +203,7 @@ def get_expected_studio_version_str(
mongo_url = os.environ.get("OPENPYPE_MONGO") mongo_url = os.environ.get("OPENPYPE_MONGO")
if global_settings is None: if global_settings is None:
global_settings = get_openpype_global_settings(mongo_url) global_settings = get_openpype_global_settings(mongo_url)
if staging: key = "staging_version" if staging else "production_version"
key = "staging_version"
else:
key = "production_version"
return global_settings.get(key) or "" return global_settings.get(key) or ""

View file

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
"""Working thread for update.""" """Working thread for update."""
from Qt.QtCore import QThread, Signal, QObject # noqa from qtpy import QtCore
from .bootstrap_repos import ( from .bootstrap_repos import (
BootstrapRepos, BootstrapRepos,
@ -8,7 +8,7 @@ from .bootstrap_repos import (
) )
class UpdateThread(QThread): class UpdateThread(QtCore.QThread):
"""Install Worker thread. """Install Worker thread.
This class takes care of finding OpenPype version on user entered path This class takes care of finding OpenPype version on user entered path
@ -19,13 +19,13 @@ class UpdateThread(QThread):
user data dir. user data dir.
""" """
progress = Signal(int) progress = QtCore.Signal(int)
message = Signal((str, bool)) message = QtCore.Signal((str, bool))
def __init__(self, parent=None): def __init__(self, parent=None):
self._result = None self._result = None
self._openpype_version = None self._openpype_version = None
QThread.__init__(self, parent) super().__init__(parent)
def set_version(self, openpype_version: OpenPypeVersion): def set_version(self, openpype_version: OpenPypeVersion):
self._openpype_version = openpype_version self._openpype_version = openpype_version

View file

@ -1,8 +1,10 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
"""Progress window to show when OpenPype is updating/installing locally.""" """Progress window to show when OpenPype is updating/installing locally."""
import os import os
from qtpy import QtCore, QtGui, QtWidgets
from .update_thread import UpdateThread from .update_thread import UpdateThread
from Qt import QtCore, QtGui, QtWidgets # noqa
from .bootstrap_repos import OpenPypeVersion from .bootstrap_repos import OpenPypeVersion
from .nice_progress_bar import NiceProgressBar from .nice_progress_bar import NiceProgressBar
from .tools import load_stylesheet from .tools import load_stylesheet
@ -47,7 +49,6 @@ class UpdateWindow(QtWidgets.QDialog):
self._update_thread = None self._update_thread = None
self.resize(QtCore.QSize(self._width, self._height))
self._init_ui() self._init_ui()
# Set stylesheet # Set stylesheet
@ -79,6 +80,16 @@ class UpdateWindow(QtWidgets.QDialog):
self._progress_bar = progress_bar self._progress_bar = progress_bar
def showEvent(self, event):
super().showEvent(event)
current_size = self.size()
new_size = QtCore.QSize(
max(current_size.width(), self._width),
max(current_size.height(), self._height)
)
if current_size != new_size:
self.resize(new_size)
def _run_update(self): def _run_update(self):
"""Start install process. """Start install process.

View file

@ -48,8 +48,8 @@ Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdir
; NOTE: Don't use "Flags: ignoreversion" on any shared system files ; NOTE: Don't use "Flags: ignoreversion" on any shared system files
[Icons] [Icons]
Name: "{autoprograms}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe" Name: "{autoprograms}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe"
Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon Name: "{autodesktop}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon
[Run] [Run]
Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent

View file

@ -16,14 +16,15 @@ from .pype_commands import PypeCommands
@click.option("--use-staging", is_flag=True, @click.option("--use-staging", is_flag=True,
expose_value=False, help="use staging variants") expose_value=False, help="use staging variants")
@click.option("--list-versions", is_flag=True, expose_value=False, @click.option("--list-versions", is_flag=True, expose_value=False,
help=("list all detected versions. Use With `--use-staging " help="list all detected versions.")
"to list staging versions."))
@click.option("--validate-version", expose_value=False, @click.option("--validate-version", expose_value=False,
help="validate given version integrity") help="validate given version integrity")
@click.option("--debug", is_flag=True, expose_value=False, @click.option("--debug", is_flag=True, expose_value=False,
help=("Enable debug")) help="Enable debug")
@click.option("--verbose", expose_value=False, @click.option("--verbose", expose_value=False,
help=("Change OpenPype log level (debug - critical or 0-50)")) help=("Change OpenPype log level (debug - critical or 0-50)"))
@click.option("--automatic-tests", is_flag=True, expose_value=False,
help=("Run in automatic tests mode"))
def main(ctx): def main(ctx):
"""Pype is main command serving as entry point to pipeline system. """Pype is main command serving as entry point to pipeline system.
@ -429,20 +430,18 @@ def unpack_project(zipfile, root):
@main.command() @main.command()
def interactive(): def interactive():
"""Interative (Python like) console. """Interactive (Python like) console.
Helpfull command not only for development to directly work with python Helpful command not only for development to directly work with python
interpreter. interpreter.
Warning: Warning:
Executable 'openpype_gui' on windows won't work. Executable 'openpype_gui' on Windows won't work.
""" """
from openpype.version import __version__ from openpype.version import __version__
banner = "OpenPype {}\nPython {} on {}".format( banner = f"OpenPype {__version__}\nPython {sys.version} on {sys.platform}"
__version__, sys.version, sys.platform
)
code.interact(banner) code.interact(banner)

View file

@ -10,30 +10,15 @@ from .launch_logic import (
) )
from .pipeline import ( from .pipeline import (
AfterEffectsHost,
ls, ls,
get_asset_settings, containerise
install,
uninstall,
list_instances,
remove_instance,
containerise,
get_context_data,
update_context_data,
get_context_title
)
from .workio import (
file_extensions,
has_unsaved_changes,
save_file,
open_file,
current_file,
work_root,
) )
from .lib import ( from .lib import (
maintained_selection, maintained_selection,
get_extension_manifest_path get_extension_manifest_path,
get_asset_settings
) )
from .plugin import ( from .plugin import (
@ -48,26 +33,12 @@ __all__ = [
# pipeline # pipeline
"ls", "ls",
"get_asset_settings",
"install",
"uninstall",
"list_instances",
"remove_instance",
"containerise", "containerise",
"get_context_data",
"update_context_data",
"get_context_title",
"file_extensions",
"has_unsaved_changes",
"save_file",
"open_file",
"current_file",
"work_root",
# lib # lib
"maintained_selection", "maintained_selection",
"get_extension_manifest_path", "get_extension_manifest_path",
"get_asset_settings",
# plugin # plugin
"AfterEffectsLoader" "AfterEffectsLoader"

View file

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.23" <ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.24"
ExtensionBundleName="openpype" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> ExtensionBundleName="openpype" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList> <ExtensionList>
<Extension Id="com.openpype.AE.panel" Version="1.0" /> <Extension Id="com.openpype.AE.panel" Version="1.0" />

View file

@ -38,17 +38,6 @@
}); });
</script> </script>
<script type=text/javascript>
$(function() {
$("a#creator-button").bind("click", function() {
RPC.call('AfterEffects.creator_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript> <script type=text/javascript>
$(function() { $(function() {
$("a#loader-button").bind("click", function() { $("a#loader-button").bind("click", function() {
@ -82,17 +71,6 @@
}); });
</script> </script>
<script type=text/javascript>
$(function() {
$("a#subsetmanager-button").bind("click", function() {
RPC.call('AfterEffects.subsetmanager_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript> <script type=text/javascript>
$(function() { $(function() {
$("a#experimental-button").bind("click", function() { $("a#experimental-button").bind("click", function() {
@ -113,11 +91,9 @@
<div> <div>
<div></div><a href=# id=workfiles-button><button class="hostFontSize">Workfiles...</button></a></div> <div></div><a href=# id=workfiles-button><button class="hostFontSize">Workfiles...</button></a></div>
<div> <a href=# id=creator-button><button class="hostFontSize">Create...</button></a></div>
<div><a href=# id=loader-button><button class="hostFontSize">Load...</button></a></div> <div><a href=# id=loader-button><button class="hostFontSize">Load...</button></a></div>
<div><a href=# id=publish-button><button class="hostFontSize">Publish...</button></a></div> <div><a href=# id=publish-button><button class="hostFontSize">Publish...</button></a></div>
<div><a href=# id=sceneinventory-button><button class="hostFontSize">Manage...</button></a></div> <div><a href=# id=sceneinventory-button><button class="hostFontSize">Manage...</button></a></div>
<div><a href=# id=subsetmanager-button><button class="hostFontSize">Subset Manager...</button></a></div>
<div><a href=# id=experimental-button><button class="hostFontSize">Experimental Tools...</button></a></div> <div><a href=# id=experimental-button><button class="hostFontSize">Experimental Tools...</button></a></div>
</div> </div>

View file

@ -237,7 +237,7 @@ function main(websocket_url){
RPC.addRoute('AfterEffects.get_render_info', function (data) { RPC.addRoute('AfterEffects.get_render_info', function (data) {
log.warn('Server called client route "get_render_info":', data); log.warn('Server called client route "get_render_info":', data);
return runEvalScript("getRenderInfo()") return runEvalScript("getRenderInfo(" + data.comp_id +")")
.then(function(result){ .then(function(result){
log.warn("get_render_info: " + result); log.warn("get_render_info: " + result);
return result; return result;
@ -289,7 +289,7 @@ function main(websocket_url){
RPC.addRoute('AfterEffects.render', function (data) { RPC.addRoute('AfterEffects.render', function (data) {
log.warn('Server called client route "render":', data); log.warn('Server called client route "render":', data);
var escapedPath = EscapeStringForJSX(data.folder_url); var escapedPath = EscapeStringForJSX(data.folder_url);
return runEvalScript("render('" + escapedPath +"')") return runEvalScript("render('" + escapedPath +"', " + data.comp_id + ")")
.then(function(result){ .then(function(result){
log.warn("render: " + result); log.warn("render: " + result);
return result; return result;

View file

@ -395,41 +395,84 @@ function saveAs(path){
app.project.save(fp = new File(path)); app.project.save(fp = new File(path));
} }
function getRenderInfo(){ function getRenderInfo(comp_id){
/*** /***
Get info from render queue. Get info from render queue.
Currently pulls only file name to parse extension and Currently pulls only file name to parse extension and
if it is sequence in Python if it is sequence in Python
Args:
comp_id (int): id of composition
Return:
(list) [{file_name:"xx.png", width:00, height:00}]
**/ **/
var item = app.project.itemByID(comp_id);
if (!item){
return _prepareError("Composition with '" + comp_id + "' wasn't found! Recreate publishable instance(s)")
}
var comp_name = item.name;
var output_metadata = []
try{ try{
var render_item = app.project.renderQueue.item(1); // render_item.duplicate() should create new item on renderQueue
if (render_item.status == RQItemStatus.DONE){ // BUT it works only sometimes, there are some weird synchronization issue
render_item.duplicate(); // create new, cannot change status if DONE // this method will be called always before render, so prepare items here
render_item.remove(); // remove existing to limit duplications // for render to spare the hassle
render_item = app.project.renderQueue.item(1); for (i = 1; i <= app.project.renderQueue.numItems; ++i){
var render_item = app.project.renderQueue.item(i);
if (render_item.comp.id != comp_id){
continue;
}
if (render_item.status == RQItemStatus.DONE){
render_item.duplicate(); // create new, cannot change status if DONE
render_item.remove(); // remove existing to limit duplications
continue;
}
} }
render_item.render = true; // always set render queue to render // properly validate as `numItems` won't change magically
var item = render_item.outputModule(1); var comp_id_count = 0;
for (i = 1; i <= app.project.renderQueue.numItems; ++i){
var render_item = app.project.renderQueue.item(i);
if (render_item.comp.id != comp_id){
continue;
}
comp_id_count += 1;
var item = render_item.outputModule(1);
for (j = 1; j<= render_item.numOutputModules; ++j){
var file_url = item.file.toString();
output_metadata.push(
JSON.stringify({
"file_name": file_url,
"width": render_item.comp.width,
"height": render_item.comp.height
})
);
}
}
} catch (error) { } catch (error) {
return _prepareError("There is no render queue, create one"); return _prepareError("There is no render queue, create one");
} }
var file_url = item.file.toString();
return JSON.stringify({ if (comp_id_count > 1){
"file_name": file_url, return _prepareError("There cannot be more items in Render Queue for '" + comp_name + "'!")
"width": render_item.comp.width, }
"height": render_item.comp.height
}) if (comp_id_count == 0){
return _prepareError("There is no item in Render Queue for '" + comp_name + "'! Add composition to Render Queue.")
}
return '[' + output_metadata.join() + ']';
} }
function getAudioUrlForComp(comp_id){ function getAudioUrlForComp(comp_id){
/** /**
* Searches composition for audio layer * Searches composition for audio layer
* *
* Only single AVLayer is expected! * Only single AVLayer is expected!
* Used for collecting Audio * Used for collecting Audio
* *
* Args: * Args:
* comp_id (int): id of composition * comp_id (int): id of composition
* Return: * Return:
@ -457,7 +500,7 @@ function addItemAsLayerToComp(comp_id, item_id, found_comp){
/** /**
* Adds already imported FootageItem ('item_id') as a new * Adds already imported FootageItem ('item_id') as a new
* layer to composition ('comp_id'). * layer to composition ('comp_id').
* *
* Args: * Args:
* comp_id (int): id of target composition * comp_id (int): id of target composition
* item_id (int): FootageItem.id * item_id (int): FootageItem.id
@ -480,17 +523,17 @@ function addItemAsLayerToComp(comp_id, item_id, found_comp){
function importBackground(comp_id, composition_name, files_to_import){ function importBackground(comp_id, composition_name, files_to_import){
/** /**
* Imports backgrounds images to existing or new composition. * Imports backgrounds images to existing or new composition.
* *
* If comp_id is not provided, new composition is created, basic * If comp_id is not provided, new composition is created, basic
* values (width, heights, frameRatio) takes from first imported * values (width, heights, frameRatio) takes from first imported
* image. * image.
* *
* Args: * Args:
* comp_id (int): id of existing composition (null if new) * comp_id (int): id of existing composition (null if new)
* composition_name (str): used when new composition * composition_name (str): used when new composition
* files_to_import (list): list of absolute paths to import and * files_to_import (list): list of absolute paths to import and
* add as layers * add as layers
* *
* Returns: * Returns:
* (str): json representation (id, name, members) * (str): json representation (id, name, members)
*/ */
@ -512,7 +555,7 @@ function importBackground(comp_id, composition_name, files_to_import){
} }
} }
} }
if (files_to_import){ if (files_to_import){
for (i = 0; i < files_to_import.length; ++i){ for (i = 0; i < files_to_import.length; ++i){
item = _importItem(files_to_import[i]); item = _importItem(files_to_import[i]);
@ -524,8 +567,8 @@ function importBackground(comp_id, composition_name, files_to_import){
if (!comp){ if (!comp){
folder = app.project.items.addFolder(composition_name); folder = app.project.items.addFolder(composition_name);
imported_ids.push(folder.id); imported_ids.push(folder.id);
comp = app.project.items.addComp(composition_name, item.width, comp = app.project.items.addComp(composition_name, item.width,
item.height, item.pixelAspect, item.height, item.pixelAspect,
1, 26.7); // hardcode defaults 1, 26.7); // hardcode defaults
imported_ids.push(comp.id); imported_ids.push(comp.id);
comp.parentFolder = folder; comp.parentFolder = folder;
@ -534,7 +577,7 @@ function importBackground(comp_id, composition_name, files_to_import){
item.parentFolder = folder; item.parentFolder = folder;
addItemAsLayerToComp(comp.id, item.id, comp); addItemAsLayerToComp(comp.id, item.id, comp);
} }
} }
var item = {"name": comp.name, var item = {"name": comp.name,
"id": folder.id, "id": folder.id,
@ -545,19 +588,19 @@ function importBackground(comp_id, composition_name, files_to_import){
function reloadBackground(comp_id, composition_name, files_to_import){ function reloadBackground(comp_id, composition_name, files_to_import){
/** /**
* Reloads existing composition. * Reloads existing composition.
* *
* It deletes complete composition with encompassing folder, recreates * It deletes complete composition with encompassing folder, recreates
* from scratch via 'importBackground' functionality. * from scratch via 'importBackground' functionality.
* *
* Args: * Args:
* comp_id (int): id of existing composition (null if new) * comp_id (int): id of existing composition (null if new)
* composition_name (str): used when new composition * composition_name (str): used when new composition
* files_to_import (list): list of absolute paths to import and * files_to_import (list): list of absolute paths to import and
* add as layers * add as layers
* *
* Returns: * Returns:
* (str): json representation (id, name, members) * (str): json representation (id, name, members)
* *
*/ */
var imported_ids = []; // keep track of members of composition var imported_ids = []; // keep track of members of composition
comp = app.project.itemByID(comp_id); comp = app.project.itemByID(comp_id);
@ -620,7 +663,7 @@ function reloadBackground(comp_id, composition_name, files_to_import){
function _get_file_name(file_url){ function _get_file_name(file_url){
/** /**
* Returns file name without extension from 'file_url' * Returns file name without extension from 'file_url'
* *
* Args: * Args:
* file_url (str): full absolute url * file_url (str): full absolute url
* Returns: * Returns:
@ -635,7 +678,7 @@ function _delete_obsolete_items(folder, new_filenames){
/*** /***
* Goes through 'folder' and removes layers not in new * Goes through 'folder' and removes layers not in new
* background * background
* *
* Args: * Args:
* folder (FolderItem) * folder (FolderItem)
* new_filenames (array): list of layer names in new bg * new_filenames (array): list of layer names in new bg
@ -660,14 +703,14 @@ function _delete_obsolete_items(folder, new_filenames){
function _importItem(file_url){ function _importItem(file_url){
/** /**
* Imports 'file_url' as new FootageItem * Imports 'file_url' as new FootageItem
* *
* Args: * Args:
* file_url (str): file url with content * file_url (str): file url with content
* Returns: * Returns:
* (FootageItem) * (FootageItem)
*/ */
file_name = _get_file_name(file_url); file_name = _get_file_name(file_url);
//importFile prepared previously to return json //importFile prepared previously to return json
item_json = importFile(file_url, file_name, JSON.stringify({"ImportAsType":"FOOTAGE"})); item_json = importFile(file_url, file_name, JSON.stringify({"ImportAsType":"FOOTAGE"}));
item_json = JSON.parse(item_json); item_json = JSON.parse(item_json);
@ -689,30 +732,42 @@ function isFileSequence (item){
return false; return false;
} }
function render(target_folder){ function render(target_folder, comp_id){
var out_dir = new Folder(target_folder); var out_dir = new Folder(target_folder);
var out_dir = out_dir.fsName; var out_dir = out_dir.fsName;
for (i = 1; i <= app.project.renderQueue.numItems; ++i){ for (i = 1; i <= app.project.renderQueue.numItems; ++i){
var render_item = app.project.renderQueue.item(i); var render_item = app.project.renderQueue.item(i);
var om1 = app.project.renderQueue.item(i).outputModule(1); var composition = render_item.comp;
var file_name = File.decode( om1.file.name ).replace('℗', ''); // Name contains special character, space? if (composition.id == comp_id){
if (render_item.status == RQItemStatus.DONE){
var new_item = render_item.duplicate();
render_item.remove();
render_item = new_item;
}
render_item.render = true;
var om1 = app.project.renderQueue.item(i).outputModule(1);
var file_name = File.decode( om1.file.name ).replace('℗', ''); // Name contains special character, space?
var omItem1_settable_str = app.project.renderQueue.item(i).outputModule(1).getSettings( GetSettingsFormat.STRING_SETTABLE );
var targetFolder = new Folder(target_folder);
if (!targetFolder.exists) {
targetFolder.create();
}
om1.file = new File(targetFolder.fsName + '/' + file_name);
}else{
if (render_item.status != RQItemStatus.DONE){
render_item.render = false;
}
}
var omItem1_settable_str = app.project.renderQueue.item(i).outputModule(1).getSettings( GetSettingsFormat.STRING_SETTABLE );
if (render_item.status == RQItemStatus.DONE){
render_item.duplicate();
render_item.remove();
continue;
}
var targetFolder = new Folder(target_folder);
if (!targetFolder.exists) {
targetFolder.create();
}
om1.file = new File(targetFolder.fsName + '/' + file_name);
} }
app.beginSuppressDialogs();
app.project.renderQueue.render(); app.project.renderQueue.render();
app.endSuppressDialogs(false);
} }
function close(){ function close(){

View file

@ -284,9 +284,6 @@ class AfterEffectsRoute(WebSocketRoute):
return await self.socket.call('aftereffects.read') return await self.socket.call('aftereffects.read')
# panel routes for tools # panel routes for tools
async def creator_route(self):
self._tool_route("creator")
async def workfiles_route(self): async def workfiles_route(self):
self._tool_route("workfiles") self._tool_route("workfiles")
@ -294,14 +291,11 @@ class AfterEffectsRoute(WebSocketRoute):
self._tool_route("loader") self._tool_route("loader")
async def publish_route(self): async def publish_route(self):
self._tool_route("publish") self._tool_route("publisher")
async def sceneinventory_route(self): async def sceneinventory_route(self):
self._tool_route("sceneinventory") self._tool_route("sceneinventory")
async def subsetmanager_route(self):
self._tool_route("subsetmanager")
async def experimental_tools_route(self): async def experimental_tools_route(self):
self._tool_route("experimental_tools") self._tool_route("experimental_tools")

View file

@ -13,6 +13,7 @@ from openpype.pipeline import install_host
from openpype.modules import ModulesManager from openpype.modules import ModulesManager
from openpype.tools.utils import host_tools from openpype.tools.utils import host_tools
from openpype.tests.lib import is_in_tests
from .launch_logic import ProcessLauncher, get_stub from .launch_logic import ProcessLauncher, get_stub
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -26,9 +27,10 @@ def safe_excepthook(*args):
def main(*subprocess_args): def main(*subprocess_args):
sys.excepthook = safe_excepthook sys.excepthook = safe_excepthook
from openpype.hosts.aftereffects import api from openpype.hosts.aftereffects.api import AfterEffectsHost
install_host(api) host = AfterEffectsHost()
install_host(host)
os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" os.environ["OPENPYPE_LOG_NO_COLORS"] = "False"
app = QtWidgets.QApplication([]) app = QtWidgets.QApplication([])
@ -46,7 +48,7 @@ def main(*subprocess_args):
webpublisher_addon.headless_publish, webpublisher_addon.headless_publish,
log, log,
"CloseAE", "CloseAE",
os.environ.get("IS_TEST") is_in_tests()
) )
) )
@ -133,3 +135,32 @@ def get_background_layers(file_url):
layer.get("filename")). layer.get("filename")).
replace("\\", "/")) replace("\\", "/"))
return layers return layers
def get_asset_settings(asset_doc):
"""Get settings on current asset from database.
Returns:
dict: Scene data.
"""
asset_data = asset_doc["data"]
fps = asset_data.get("fps")
frame_start = asset_data.get("frameStart")
frame_end = asset_data.get("frameEnd")
handle_start = asset_data.get("handleStart")
handle_end = asset_data.get("handleEnd")
resolution_width = asset_data.get("resolutionWidth")
resolution_height = asset_data.get("resolutionHeight")
duration = (frame_end - frame_start + 1) + handle_start + handle_end
return {
"fps": fps,
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": handle_start,
"handleEnd": handle_end,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"duration": duration
}

View file

@ -16,6 +16,13 @@ from openpype.pipeline import (
from openpype.pipeline.load import any_outdated_containers from openpype.pipeline.load import any_outdated_containers
import openpype.hosts.aftereffects import openpype.hosts.aftereffects
from openpype.host import (
HostBase,
IWorkfileHost,
ILoadHost,
IPublishHost
)
from .launch_logic import get_stub, ConnectionNotEstablishedYet from .launch_logic import get_stub, ConnectionNotEstablishedYet
log = Logger.get_logger(__name__) log = Logger.get_logger(__name__)
@ -30,27 +37,142 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create") CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
def install(): class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
print("Installing Pype config...") name = "aftereffects"
pyblish.api.register_host("aftereffects") def __init__(self):
pyblish.api.register_plugin_path(PUBLISH_PATH) self._stub = None
super(AfterEffectsHost, self).__init__()
register_loader_plugin_path(LOAD_PATH) @property
register_creator_plugin_path(CREATE_PATH) def stub(self):
log.info(PUBLISH_PATH) """
Handle pulling stub from PS to run operations on host
Returns:
(AEServerStub) or None
"""
if self._stub:
return self._stub
pyblish.api.register_callback( try:
"instanceToggled", on_pyblish_instance_toggled stub = get_stub() # only after Photoshop is up
) except ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return
register_event_callback("application.launched", application_launch) if not stub.get_active_document_name():
return
self._stub = stub
return self._stub
def uninstall(): def install(self):
pyblish.api.deregister_plugin_path(PUBLISH_PATH) print("Installing Pype config...")
deregister_loader_plugin_path(LOAD_PATH)
deregister_creator_plugin_path(CREATE_PATH) pyblish.api.register_host("aftereffects")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
register_event_callback("application.launched", application_launch)
def get_workfile_extensions(self):
return [".aep"]
def save_workfile(self, dst_path=None):
self.stub.saveAs(dst_path, True)
def open_workfile(self, filepath):
self.stub.open(filepath)
return True
def get_current_workfile(self):
try:
full_name = get_stub().get_active_document_full_name()
if full_name and full_name != "null":
return os.path.normpath(full_name).replace("\\", "/")
except ValueError:
print("Nothing opened")
pass
return None
def get_containers(self):
return ls()
def get_context_data(self):
meta = self.stub.get_metadata()
for item in meta:
if item.get("id") == "publish_context":
item.pop("id")
return item
return {}
def update_context_data(self, data, changes):
item = data
item["id"] = "publish_context"
self.stub.imprint(item["id"], item)
# created instances section
def list_instances(self):
"""List all created instances from current workfile which
will be published.
Pulls from File > File Info
For SubsetManager
Returns:
(list) of dictionaries matching instances format
"""
stub = self.stub
if not stub:
return []
instances = []
layers_meta = stub.get_metadata()
for instance in layers_meta:
if instance.get("id") == "pyblish.avalon.instance":
instances.append(instance)
return instances
def remove_instance(self, instance):
"""Remove instance from current workfile metadata.
Updates metadata of current file in File > File Info and removes
icon highlight on group layer.
For SubsetManager
Args:
instance (dict): instance representation from subsetmanager model
"""
stub = self.stub
if not stub:
return
inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
if not inst_id:
log.warning("No instance identifier for {}".format(instance))
return
stub.remove_instance(inst_id)
if instance.get("members"):
item = stub.get_item(instance["members"][0])
if item:
stub.rename_item(item.id,
item.name.replace(stub.PUBLISH_ICON, ''))
def application_launch(): def application_launch():
@ -63,35 +185,6 @@ def on_pyblish_instance_toggled(instance, old_value, new_value):
instance[0].Visible = new_value instance[0].Visible = new_value
def get_asset_settings(asset_doc):
"""Get settings on current asset from database.
Returns:
dict: Scene data.
"""
asset_data = asset_doc["data"]
fps = asset_data.get("fps")
frame_start = asset_data.get("frameStart")
frame_end = asset_data.get("frameEnd")
handle_start = asset_data.get("handleStart")
handle_end = asset_data.get("handleEnd")
resolution_width = asset_data.get("resolutionWidth")
resolution_height = asset_data.get("resolutionHeight")
duration = (frame_end - frame_start + 1) + handle_start + handle_end
return {
"fps": fps,
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": handle_start,
"handleEnd": handle_end,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"duration": duration
}
def ls(): def ls():
"""Yields containers from active AfterEffects document. """Yields containers from active AfterEffects document.
@ -191,102 +284,17 @@ def containerise(name,
return comp return comp
# created instances section def cache_and_get_instances(creator):
def list_instances(): """Cache instances in shared data.
"""
List all created instances from current workfile which
will be published.
Pulls from File > File Info Storing all instances as a list as legacy instances might be still present.
Args:
For SubsetManager creator (Creator): Plugin which would like to get instances from host.
Returns:
(list) of dictionaries matching instances format
"""
stub = _get_stub()
if not stub:
return []
instances = []
layers_meta = stub.get_metadata()
for instance in layers_meta:
if instance.get("id") == "pyblish.avalon.instance":
instances.append(instance)
return instances
def remove_instance(instance):
"""
Remove instance from current workfile metadata.
Updates metadata of current file in File > File Info and removes
icon highlight on group layer.
For SubsetManager
Args:
instance (dict): instance representation from subsetmanager model
"""
stub = _get_stub()
if not stub:
return
inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
if not inst_id:
log.warning("No instance identifier for {}".format(instance))
return
stub.remove_instance(inst_id)
if instance.get("members"):
item = stub.get_item(instance["members"][0])
if item:
stub.rename_item(item.id,
item.name.replace(stub.PUBLISH_ICON, ''))
# new publisher section
def get_context_data():
meta = _get_stub().get_metadata()
for item in meta:
if item.get("id") == "publish_context":
item.pop("id")
return item
return {}
def update_context_data(data, changes):
item = data
item["id"] = "publish_context"
_get_stub().imprint(item["id"], item)
def get_context_title():
"""Returns title for Creator window"""
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
return "{}/{}/{}".format(project_name, asset_name, task_name)
def _get_stub():
"""
Handle pulling stub from PS to run operations on host
Returns: Returns:
(AEServerStub) or None List[]: list of all instances stored in metadata
""" """
try: shared_key = "openpype.photoshop.instances"
stub = get_stub() # only after Photoshop is up if shared_key not in creator.collection_shared_data:
except ConnectionNotEstablishedYet: creator.collection_shared_data[shared_key] = \
print("Not connected yet, ignoring") creator.host.list_instances()
return return creator.collection_shared_data[shared_key]
if not stub.get_active_document_name():
return
return stub

View file

@ -1,53 +0,0 @@
"""Host API required Work Files tool"""
import os
from .launch_logic import get_stub
def file_extensions():
return [".aep"]
def has_unsaved_changes():
if _active_document():
return not get_stub().is_saved()
return False
def save_file(filepath):
get_stub().saveAs(filepath, True)
def open_file(filepath):
get_stub().open(filepath)
return True
def current_file():
try:
full_name = get_stub().get_active_document_full_name()
if full_name and full_name != "null":
return os.path.normpath(full_name).replace("\\", "/")
except ValueError:
print("Nothing opened")
pass
return None
def work_root(session):
return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/")
def _active_document():
# TODO merge with current_file - even in extension
document_name = None
try:
document_name = get_stub().get_active_document_name()
except ValueError:
print("Nothing opened")
pass
return document_name

View file

@ -418,18 +418,18 @@ class AfterEffectsServerStub():
return self._handle_return(res) return self._handle_return(res)
def get_render_info(self): def get_render_info(self, comp_id):
""" Get render queue info for render purposes """ Get render queue info for render purposes
Returns: Returns:
(AEItem): with 'file_name' field (list) of (AEItem): with 'file_name' field
""" """
res = self.websocketserver.call(self.client.call res = self.websocketserver.call(self.client.call
('AfterEffects.get_render_info')) ('AfterEffects.get_render_info',
comp_id=comp_id))
records = self._to_records(self._handle_return(res)) records = self._to_records(self._handle_return(res))
if records: return records
return records.pop()
def get_audio_url(self, item_id): def get_audio_url(self, item_id):
""" Get audio layer absolute url for comp """ Get audio layer absolute url for comp
@ -522,7 +522,7 @@ class AfterEffectsServerStub():
if records: if records:
return records.pop() return records.pop()
def render(self, folder_url): def render(self, folder_url, comp_id):
""" """
Render all renderqueueitem to 'folder_url' Render all renderqueueitem to 'folder_url'
Args: Args:
@ -531,7 +531,8 @@ class AfterEffectsServerStub():
""" """
res = self.websocketserver.call(self.client.call res = self.websocketserver.call(self.client.call
('AfterEffects.render', ('AfterEffects.render',
folder_url=folder_url)) folder_url=folder_url,
comp_id=comp_id))
return self._handle_return(res) return self._handle_return(res)
def get_extension_version(self): def get_extension_version(self):

View file

@ -1,13 +0,0 @@
from openpype.hosts.aftereffects.plugins.create import create_legacy_render
class CreateLocalRender(create_legacy_render.CreateRender):
""" Creator to render locally.
Created only after default render on farm. So family 'render.local' is
used for backward compatibility.
"""
name = "renderDefault"
label = "Render Locally"
family = "renderLocal"

View file

@ -1,62 +0,0 @@
from openpype.pipeline import create
from openpype.pipeline import CreatorError
from openpype.hosts.aftereffects.api import (
get_stub,
list_instances
)
class CreateRender(create.LegacyCreator):
"""Render folder for publish.
Creates subsets in format 'familyTaskSubsetname',
eg 'renderCompositingMain'.
Create only single instance from composition at a time.
"""
name = "renderDefault"
label = "Render on Farm"
family = "render"
defaults = ["Main"]
def process(self):
stub = get_stub() # only after After Effects is up
items = []
if (self.options or {}).get("useSelection"):
items = stub.get_selected_items(
comps=True, folders=False, footages=False
)
if len(items) > 1:
raise CreatorError(
"Please select only single composition at time."
)
if not items:
raise CreatorError((
"Nothing to create. Select composition "
"if 'useSelection' or create at least "
"one composition."
))
existing_subsets = [
instance['subset'].lower()
for instance in list_instances()
]
item = items.pop()
if self.name.lower() in existing_subsets:
txt = "Instance with name \"{}\" already exists.".format(self.name)
raise CreatorError(txt)
self.data["members"] = [item.id]
self.data["uuid"] = item.id # for SubsetManager
self.data["subset"] = (
self.data["subset"]
.replace(stub.PUBLISH_ICON, '')
.replace(stub.LOADED_ICON, '')
)
stub.imprint(item, self.data)
stub.set_label_color(item.id, 14) # Cyan options 0 - 16
stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"])

View file

@ -1,3 +1,5 @@
import re
from openpype import resources from openpype import resources
from openpype.lib import BoolDef, UISeparatorDef from openpype.lib import BoolDef, UISeparatorDef
from openpype.hosts.aftereffects import api from openpype.hosts.aftereffects import api
@ -7,6 +9,8 @@ from openpype.pipeline import (
CreatorError, CreatorError,
legacy_io, legacy_io,
) )
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
from openpype.lib import prepare_template_data
class RenderCreator(Creator): class RenderCreator(Creator):
@ -28,7 +32,7 @@ class RenderCreator(Creator):
return resources.get_openpype_splash_filepath() return resources.get_openpype_splash_filepath()
def collect_instances(self): def collect_instances(self):
for instance_data in api.list_instances(): for instance_data in cache_and_get_instances(self):
# legacy instances have family=='render' or 'renderLocal', use them # legacy instances have family=='render' or 'renderLocal', use them
creator_id = (instance_data.get("creator_identifier") or creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family", '').replace("Local", '')) instance_data.get("family", '').replace("Local", ''))
@ -43,46 +47,71 @@ class RenderCreator(Creator):
for created_inst, _changes in update_list: for created_inst, _changes in update_list:
api.get_stub().imprint(created_inst.get("instance_id"), api.get_stub().imprint(created_inst.get("instance_id"),
created_inst.data_to_store()) created_inst.data_to_store())
subset_change = _changes.get("subset")
if subset_change:
api.get_stub().rename_item(created_inst.data["members"][0],
subset_change[1])
def remove_instances(self, instances): def remove_instances(self, instances):
for instance in instances: for instance in instances:
api.remove_instance(instance)
self._remove_instance_from_context(instance) self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
def create(self, subset_name, data, pre_create_data): subset = instance.data["subset"]
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
if comp:
new_comp_name = comp.name.replace(subset, '')
if not new_comp_name:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)
def create(self, subset_name_from_ui, data, pre_create_data):
stub = api.get_stub() # only after After Effects is up stub = api.get_stub() # only after After Effects is up
if pre_create_data.get("use_selection"): if pre_create_data.get("use_selection"):
items = stub.get_selected_items( comps = stub.get_selected_items(
comps=True, folders=False, footages=False comps=True, folders=False, footages=False
) )
else: else:
items = stub.get_items(comps=True, folders=False, footages=False) comps = stub.get_items(comps=True, folders=False, footages=False)
if len(items) > 1: if not comps:
raise CreatorError( raise CreatorError(
"Please select only single composition at time."
)
if not items:
raise CreatorError((
"Nothing to create. Select composition " "Nothing to create. Select composition "
"if 'useSelection' or create at least " "if 'useSelection' or create at least "
"one composition." "one composition."
)) )
for inst in self.create_context.instances: for comp in comps:
if subset_name == inst.subset_name: if pre_create_data.get("use_composition_name"):
raise CreatorError("{} already exists".format( composition_name = comp.name
inst.subset_name)) dynamic_fill = prepare_template_data({"composition":
composition_name})
subset_name = subset_name_from_ui.format(**dynamic_fill)
data["composition_name"] = composition_name
else:
subset_name = subset_name_from_ui
subset_name = re.sub(r"\{composition\}", '', subset_name,
flags=re.IGNORECASE)
data["members"] = [items[0].id] for inst in self.create_context.instances:
new_instance = CreatedInstance(self.family, subset_name, data, self) if subset_name == inst.subset_name:
if "farm" in pre_create_data: raise CreatorError("{} already exists".format(
use_farm = pre_create_data["farm"] inst.subset_name))
new_instance.creator_attributes["farm"] = use_farm
api.get_stub().imprint(new_instance.id, data["members"] = [comp.id]
new_instance.data_to_store()) new_instance = CreatedInstance(self.family, subset_name, data,
self._add_instance_to_context(new_instance) self)
if "farm" in pre_create_data:
use_farm = pre_create_data["farm"]
new_instance.creator_attributes["farm"] = use_farm
api.get_stub().imprint(new_instance.id,
new_instance.data_to_store())
self._add_instance_to_context(new_instance)
stub.rename_item(comp.id, subset_name)
def get_default_variants(self): def get_default_variants(self):
return self._default_variants return self._default_variants
@ -93,6 +122,8 @@ class RenderCreator(Creator):
def get_pre_create_attr_defs(self): def get_pre_create_attr_defs(self):
output = [ output = [
BoolDef("use_selection", default=True, label="Use selection"), BoolDef("use_selection", default=True, label="Use selection"),
BoolDef("use_composition_name",
label="Use composition name in subset"),
UISeparatorDef(), UISeparatorDef(),
BoolDef("farm", label="Render on farm") BoolDef("farm", label="Render on farm")
] ]
@ -101,6 +132,18 @@ class RenderCreator(Creator):
def get_detail_description(self): def get_detail_description(self):
return """Creator for Render instances""" return """Creator for Render instances"""
def get_dynamic_data(self, variant, task_name, asset_doc,
project_name, host_name, instance):
dynamic_data = {}
if instance is not None:
composition_name = instance.get("composition_name")
if composition_name:
dynamic_data["composition"] = composition_name
else:
dynamic_data["composition"] = "{composition}"
return dynamic_data
def _handle_legacy(self, instance_data): def _handle_legacy(self, instance_data):
"""Converts old instances to new format.""" """Converts old instances to new format."""
if not instance_data.get("members"): if not instance_data.get("members"):

View file

@ -5,6 +5,7 @@ from openpype.pipeline import (
CreatedInstance, CreatedInstance,
legacy_io, legacy_io,
) )
from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances
class AEWorkfileCreator(AutoCreator): class AEWorkfileCreator(AutoCreator):
@ -17,7 +18,7 @@ class AEWorkfileCreator(AutoCreator):
return [] return []
def collect_instances(self): def collect_instances(self):
for instance_data in api.list_instances(): for instance_data in cache_and_get_instances(self):
creator_id = instance_data.get("creator_identifier") creator_id = instance_data.get("creator_identifier")
if creator_id == self.identifier: if creator_id == self.identifier:
subset_name = instance_data["subset"] subset_name = instance_data["subset"]
@ -55,7 +56,7 @@ class AEWorkfileCreator(AutoCreator):
} }
data.update(self.get_dynamic_data( data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc, self.default_variant, task_name, asset_doc,
project_name, host_name project_name, host_name, None
)) ))
new_instance = CreatedInstance( new_instance = CreatedInstance(

View file

@ -22,7 +22,7 @@ class AERenderInstance(RenderInstance):
stagingDir = attr.ib(default=None) stagingDir = attr.ib(default=None)
app_version = attr.ib(default=None) app_version = attr.ib(default=None)
publish_attributes = attr.ib(default={}) publish_attributes = attr.ib(default={})
file_name = attr.ib(default=None) file_names = attr.ib(default=[])
class CollectAERender(publish.AbstractCollectRender): class CollectAERender(publish.AbstractCollectRender):
@ -64,14 +64,13 @@ class CollectAERender(publish.AbstractCollectRender):
if family not in ["render", "renderLocal"]: # legacy if family not in ["render", "renderLocal"]: # legacy
continue continue
item_id = inst.data["members"][0] comp_id = int(inst.data["members"][0])
work_area_info = CollectAERender.get_stub().get_work_area( work_area_info = CollectAERender.get_stub().get_work_area(comp_id)
int(item_id))
if not work_area_info: if not work_area_info:
self.log.warning("Orphaned instance, deleting metadata") self.log.warning("Orphaned instance, deleting metadata")
inst_id = inst.get("instance_id") or item_id inst_id = inst.get("instance_id") or str(comp_id)
CollectAERender.get_stub().remove_instance(inst_id) CollectAERender.get_stub().remove_instance(inst_id)
continue continue
@ -84,9 +83,10 @@ class CollectAERender(publish.AbstractCollectRender):
task_name = inst.data.get("task") # legacy task_name = inst.data.get("task") # legacy
render_q = CollectAERender.get_stub().get_render_info() render_q = CollectAERender.get_stub().get_render_info(comp_id)
if not render_q: if not render_q:
raise ValueError("No file extension set in Render Queue") raise ValueError("No file extension set in Render Queue")
render_item = render_q[0]
subset_name = inst.data["subset"] subset_name = inst.data["subset"]
instance = AERenderInstance( instance = AERenderInstance(
@ -103,8 +103,8 @@ class CollectAERender(publish.AbstractCollectRender):
setMembers='', setMembers='',
publish=True, publish=True,
name=subset_name, name=subset_name,
resolutionWidth=render_q.width, resolutionWidth=render_item.width,
resolutionHeight=render_q.height, resolutionHeight=render_item.height,
pixelAspect=1, pixelAspect=1,
tileRendering=False, tileRendering=False,
tilesX=0, tilesX=0,
@ -115,16 +115,16 @@ class CollectAERender(publish.AbstractCollectRender):
fps=fps, fps=fps,
app_version=app_version, app_version=app_version,
publish_attributes=inst.data.get("publish_attributes", {}), publish_attributes=inst.data.get("publish_attributes", {}),
file_name=render_q.file_name file_names=[item.file_name for item in render_q]
) )
comp = compositions_by_id.get(int(item_id)) comp = compositions_by_id.get(comp_id)
if not comp: if not comp:
raise ValueError("There is no composition for item {}". raise ValueError("There is no composition for item {}".
format(item_id)) format(comp_id))
instance.outputDir = self._get_output_dir(instance) instance.outputDir = self._get_output_dir(instance)
instance.comp_name = comp.name instance.comp_name = comp.name
instance.comp_id = item_id instance.comp_id = comp_id
is_local = "renderLocal" in inst.data["family"] # legacy is_local = "renderLocal" in inst.data["family"] # legacy
if inst.data.get("creator_attributes"): if inst.data.get("creator_attributes"):
@ -163,28 +163,30 @@ class CollectAERender(publish.AbstractCollectRender):
start = render_instance.frameStart start = render_instance.frameStart
end = render_instance.frameEnd end = render_instance.frameEnd
_, ext = os.path.splitext(os.path.basename(render_instance.file_name))
base_dir = self._get_output_dir(render_instance) base_dir = self._get_output_dir(render_instance)
expected_files = [] expected_files = []
if "#" not in render_instance.file_name: # single frame (mov)W for file_name in render_instance.file_names:
path = os.path.join(base_dir, "{}_{}_{}.{}".format( _, ext = os.path.splitext(os.path.basename(file_name))
render_instance.asset, ext = ext.replace('.', '')
render_instance.subset, version_str = "v{:03d}".format(render_instance.version)
"v{:03d}".format(render_instance.version), if "#" not in file_name: # single frame (mov)W
ext.replace('.', '') path = os.path.join(base_dir, "{}_{}_{}.{}".format(
))
expected_files.append(path)
else:
for frame in range(start, end + 1):
path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format(
render_instance.asset, render_instance.asset,
render_instance.subset, render_instance.subset,
"v{:03d}".format(render_instance.version), version_str,
str(frame).zfill(self.padding_width), ext
ext.replace('.', '')
)) ))
expected_files.append(path) expected_files.append(path)
else:
for frame in range(start, end + 1):
path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format(
render_instance.asset,
render_instance.subset,
version_str,
str(frame).zfill(self.padding_width),
ext
))
expected_files.append(path)
return expected_files return expected_files
def _get_output_dir(self, render_instance): def _get_output_dir(self, render_instance):

View file

@ -21,41 +21,55 @@ class ExtractLocalRender(publish.Extractor):
def process(self, instance): def process(self, instance):
stub = get_stub() stub = get_stub()
staging_dir = instance.data["stagingDir"] staging_dir = instance.data["stagingDir"]
self.log.info("staging_dir::{}".format(staging_dir)) self.log.debug("staging_dir::{}".format(staging_dir))
# pull file name from Render Queue Output module # pull file name collected value from Render Queue Output module
render_q = stub.get_render_info() if not instance.data["file_names"]:
stub.render(staging_dir)
if not render_q:
raise ValueError("No file extension set in Render Queue") raise ValueError("No file extension set in Render Queue")
_, ext = os.path.splitext(os.path.basename(render_q.file_name))
ext = ext[1:]
first_file_path = None comp_id = instance.data['comp_id']
files = [] stub.render(staging_dir, comp_id)
self.log.info("files::{}".format(os.listdir(staging_dir)))
for file_name in os.listdir(staging_dir):
files.append(file_name)
if first_file_path is None:
first_file_path = os.path.join(staging_dir,
file_name)
resulting_files = files representations = []
if len(files) == 1: for file_name in instance.data["file_names"]:
resulting_files = files[0] _, ext = os.path.splitext(os.path.basename(file_name))
ext = ext[1:]
repre_data = { first_file_path = None
"frameStart": instance.data["frameStart"], files = []
"frameEnd": instance.data["frameEnd"], for found_file_name in os.listdir(staging_dir):
"name": ext, if not found_file_name.endswith(ext):
"ext": ext, continue
"files": resulting_files,
"stagingDir": staging_dir
}
if instance.data["review"]:
repre_data["tags"] = ["review"]
instance.data["representations"] = [repre_data] files.append(found_file_name)
if first_file_path is None:
first_file_path = os.path.join(staging_dir,
found_file_name)
if not files:
self.log.info("no files")
return
# single file cannot be wrapped in array
resulting_files = files
if len(files) == 1:
resulting_files = files[0]
repre_data = {
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"name": ext,
"ext": ext,
"files": resulting_files,
"stagingDir": staging_dir
}
first_repre = not representations
if instance.data["review"] and first_repre:
repre_data["tags"] = ["review"]
representations.append(repre_data)
instance.data["representations"] = representations
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
# Generate thumbnail. # Generate thumbnail.

View file

@ -9,7 +9,7 @@ Context of the given subset doesn't match your current scene.
### How to repair? ### How to repair?
You can fix this with "repair" button on the right. You can fix this with "repair" button on the right and refresh Publish at the bottom right.
</description> </description>
<detail> <detail>
### __Detailed Info__ (optional) ### __Detailed Info__ (optional)

View file

@ -41,7 +41,7 @@ class ExtractThumnail(publish.Extractor):
track_item_name, thumb_frame, ".png") track_item_name, thumb_frame, ".png")
thumb_path = os.path.join(staging_dir, thumb_file) thumb_path = os.path.join(staging_dir, thumb_file)
thumbnail = track_item.thumbnail(thumb_frame).save( thumbnail = track_item.thumbnail(thumb_frame, "colour").save(
thumb_path, thumb_path,
format='png' format='png'
) )

View file

@ -28,7 +28,7 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
Args: Args:
path (str): A path to current template (usually given by path (str): A path to current template (usually given by
get_template_path implementation) get_template_preset implementation)
Returns: Returns:
bool: Wether the template was succesfully imported or not bool: Wether the template was succesfully imported or not
@ -240,7 +240,7 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
cmds.setAttr(node + ".hiddenInOutliner", True) cmds.setAttr(node + ".hiddenInOutliner", True)
def load_succeed(self, placeholder, container): def load_succeed(self, placeholder, container):
self._parent_in_hierarhchy(placeholder, container) self._parent_in_hierarchy(placeholder, container)
def _parent_in_hierarchy(self, placeholder, container): def _parent_in_hierarchy(self, placeholder, container):
"""Parent loaded container to placeholder's parent. """Parent loaded container to placeholder's parent.

View file

@ -72,15 +72,19 @@ class CreateRender(plugin.Creator):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
"""Constructor.""" """Constructor."""
super(CreateRender, self).__init__(*args, **kwargs) super(CreateRender, self).__init__(*args, **kwargs)
deadline_settings = get_system_settings()["modules"]["deadline"]
if not deadline_settings["enabled"]: # Defaults
self.deadline_servers = {}
return
self._project_settings = get_project_settings( self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]) legacy_io.Session["AVALON_PROJECT"])
if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa if self._project_settings["maya"]["RenderSettings"]["apply_render_settings"]: # noqa
lib_rendersettings.RenderSettings().set_default_renderer_settings() lib_rendersettings.RenderSettings().set_default_renderer_settings()
# Deadline-only
manager = ModulesManager() manager = ModulesManager()
deadline_settings = get_system_settings()["modules"]["deadline"]
if not deadline_settings["enabled"]:
self.deadline_servers = {}
return
self.deadline_module = manager.modules_by_name["deadline"] self.deadline_module = manager.modules_by_name["deadline"]
try: try:
default_servers = deadline_settings["deadline_urls"] default_servers = deadline_settings["deadline_urls"]
@ -193,8 +197,6 @@ class CreateRender(plugin.Creator):
pool_names = [] pool_names = []
default_priority = 50 default_priority = 50
self.server_aliases = list(self.deadline_servers.keys())
self.data["deadlineServers"] = self.server_aliases
self.data["suspendPublishJob"] = False self.data["suspendPublishJob"] = False
self.data["review"] = True self.data["review"] = True
self.data["extendFrames"] = False self.data["extendFrames"] = False
@ -233,6 +235,9 @@ class CreateRender(plugin.Creator):
raise RuntimeError("Both Deadline and Muster are enabled") raise RuntimeError("Both Deadline and Muster are enabled")
if deadline_enabled: if deadline_enabled:
self.server_aliases = list(self.deadline_servers.keys())
self.data["deadlineServers"] = self.server_aliases
try: try:
deadline_url = self.deadline_servers["default"] deadline_url = self.deadline_servers["default"]
except KeyError: except KeyError:
@ -254,6 +259,19 @@ class CreateRender(plugin.Creator):
default_priority) default_priority)
self.data["tile_priority"] = tile_priority self.data["tile_priority"] = tile_priority
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
primary_pool = pool_setting["primary_pool"]
self.data["primaryPool"] = self._set_default_pool(pool_names,
primary_pool)
# We add a string "-" to allow the user to not
# set any secondary pools
pool_names = ["-"] + pool_names
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
if muster_enabled: if muster_enabled:
self.log.info(">>> Loading Muster credentials ...") self.log.info(">>> Loading Muster credentials ...")
self._load_credentials() self._load_credentials()
@ -273,18 +291,6 @@ class CreateRender(plugin.Creator):
self.log.info(" - pool: {}".format(pool["name"])) self.log.info(" - pool: {}".format(pool["name"]))
pool_names.append(pool["name"]) pool_names.append(pool["name"])
pool_setting = (self._project_settings["deadline"]
["publish"]
["CollectDeadlinePools"])
primary_pool = pool_setting["primary_pool"]
self.data["primaryPool"] = self._set_default_pool(pool_names,
primary_pool)
# We add a string "-" to allow the user to not
# set any secondary pools
pool_names = ["-"] + pool_names
secondary_pool = pool_setting["secondary_pool"]
self.data["secondaryPool"] = self._set_default_pool(pool_names,
secondary_pool)
self.options = {"useSelection": False} # Force no content self.options = {"useSelection": False} # Force no content
def _set_default_pool(self, pool_names, pool_value): def _set_default_pool(self, pool_names, pool_value):

View file

@ -174,9 +174,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
if "handles" in data: if "handles" in data:
data["handleStart"] = data["handles"] data["handleStart"] = data["handles"]
data["handleEnd"] = data["handles"] data["handleEnd"] = data["handles"]
else:
data["handleStart"] = 0
data["handleEnd"] = 0
data["frameStartHandle"] = data["frameStart"] - data["handleStart"] # noqa: E501 data["frameStartHandle"] = data["frameStart"] - data["handleStart"] # noqa: E501
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] # noqa: E501 data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] # noqa: E501

View file

@ -5,6 +5,11 @@ from openpype.pipeline.publish import (
RepairAction, RepairAction,
ValidateContentsOrder, ValidateContentsOrder,
) )
from openpype.hosts.maya.api.lib_rendersetup import (
get_attr_overrides,
get_attr_in_layer,
)
from maya.app.renderSetup.model.override import AbsOverride
class ValidateFrameRange(pyblish.api.InstancePlugin): class ValidateFrameRange(pyblish.api.InstancePlugin):
@ -92,10 +97,86 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
""" """
Repair instance container to match asset data. Repair instance container to match asset data.
""" """
cmds.setAttr(
"{}.frameStart".format(instance.data["name"]),
instance.context.data.get("frameStartHandle"))
cmds.setAttr( if "renderlayer" in instance.data.get("families"):
"{}.frameEnd".format(instance.data["name"]), # Special behavior for renderlayers
instance.context.data.get("frameEndHandle")) cls.repair_renderlayer(instance)
return
node = instance.data["name"]
context = instance.context
frame_start_handle = int(context.data.get("frameStartHandle"))
frame_end_handle = int(context.data.get("frameEndHandle"))
handle_start = int(context.data.get("handleStart"))
handle_end = int(context.data.get("handleEnd"))
frame_start = int(context.data.get("frameStart"))
frame_end = int(context.data.get("frameEnd"))
# Start
if cmds.attributeQuery("handleStart", node=node, exists=True):
cmds.setAttr("{}.handleStart".format(node), handle_start)
cmds.setAttr("{}.frameStart".format(node), frame_start)
else:
# Include start handle in frame start if no separate handleStart
# attribute exists on the node
cmds.setAttr("{}.frameStart".format(node), frame_start_handle)
# End
if cmds.attributeQuery("handleEnd", node=node, exists=True):
cmds.setAttr("{}.handleEnd".format(node), handle_end)
cmds.setAttr("{}.frameEnd".format(node), frame_end)
else:
# Include end handle in frame end if no separate handleEnd
# attribute exists on the node
cmds.setAttr("{}.frameEnd".format(node), frame_end_handle)
@classmethod
def repair_renderlayer(cls, instance):
"""Apply frame range in render settings"""
layer = instance.data["setMembers"]
context = instance.context
start_attr = "defaultRenderGlobals.startFrame"
end_attr = "defaultRenderGlobals.endFrame"
frame_start_handle = int(context.data.get("frameStartHandle"))
frame_end_handle = int(context.data.get("frameEndHandle"))
cls._set_attr_in_layer(start_attr, layer, frame_start_handle)
cls._set_attr_in_layer(end_attr, layer, frame_end_handle)
@classmethod
def _set_attr_in_layer(cls, node_attr, layer, value):
if get_attr_in_layer(node_attr, layer=layer) == value:
# Already ok. This can happen if you have multiple renderlayers
# validated and there are no frame range overrides. The first
# layer's repair would have fixed the global value already
return
overrides = list(get_attr_overrides(node_attr, layer=layer))
if overrides:
# We set the last absolute override if it is an absolute override
# otherwise we'll add an Absolute override
last_override = overrides[-1][1]
if not isinstance(last_override, AbsOverride):
collection = last_override.parent()
node, attr = node_attr.split(".", 1)
last_override = collection.createAbsoluteOverride(node, attr)
cls.log.debug("Setting {attr} absolute override in "
"layer '{layer}': {value}".format(layer=layer,
attr=node_attr,
value=value))
cmds.setAttr(last_override.name() + ".attrValue", value)
else:
# Set the attribute directly
# (Note that this will set the global attribute)
cls.log.debug("Setting global {attr}: {value}".format(
attr=node_attr,
value=value
))
cmds.setAttr(node_attr, value)

View file

@ -2865,10 +2865,11 @@ def get_group_io_nodes(nodes):
break break
if input_node is None: if input_node is None:
raise ValueError("No Input found") log.warning("No Input found")
if output_node is None: if output_node is None:
raise ValueError("No Output found") log.warning("No Output found")
return input_node, output_node return input_node, output_node

View file

@ -35,6 +35,7 @@ from .lib import (
) )
from .workfile_template_builder import ( from .workfile_template_builder import (
NukePlaceholderLoadPlugin, NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin,
build_workfile_template, build_workfile_template,
update_workfile_template, update_workfile_template,
create_placeholder, create_placeholder,
@ -139,7 +140,8 @@ def _show_workfiles():
def get_workfile_build_placeholder_plugins(): def get_workfile_build_placeholder_plugins():
return [ return [
NukePlaceholderLoadPlugin NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin
] ]
@ -217,10 +219,6 @@ def _install_menu():
"Build Workfile from template", "Build Workfile from template",
lambda: build_workfile_template() lambda: build_workfile_template()
) )
menu_template.addCommand(
"Update Workfile",
lambda: update_workfile_template()
)
menu_template.addSeparator() menu_template.addSeparator()
menu_template.addCommand( menu_template.addCommand(
"Create Place Holder", "Create Place Holder",

View file

@ -427,6 +427,8 @@ class ExporterReviewMov(ExporterReview):
# create nk path # create nk path
path = os.path.splitext(self.path)[0] + ".nk" path = os.path.splitext(self.path)[0] + ".nk"
# save file to the path # save file to the path
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
shutil.copyfile(self.instance.context.data["currentFile"], path) shutil.copyfile(self.instance.context.data["currentFile"], path)
self.log.info("Nodes exported...") self.log.info("Nodes exported...")

View file

@ -7,7 +7,9 @@ from openpype.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder, AbstractTemplateBuilder,
PlaceholderPlugin, PlaceholderPlugin,
LoadPlaceholderItem, LoadPlaceholderItem,
CreatePlaceholderItem,
PlaceholderLoadMixin, PlaceholderLoadMixin,
PlaceholderCreateMixin
) )
from openpype.tools.workfile_template_build import ( from openpype.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog, WorkfileBuildPlaceholderDialog,
@ -32,7 +34,7 @@ PLACEHOLDER_SET = "PLACEHOLDERS_SET"
class NukeTemplateBuilder(AbstractTemplateBuilder): class NukeTemplateBuilder(AbstractTemplateBuilder):
"""Concrete implementation of AbstractTemplateBuilder for maya""" """Concrete implementation of AbstractTemplateBuilder for nuke"""
def import_template(self, path): def import_template(self, path):
"""Import template into current scene. """Import template into current scene.
@ -40,7 +42,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder):
Args: Args:
path (str): A path to current template (usually given by path (str): A path to current template (usually given by
get_template_path implementation) get_template_preset implementation)
Returns: Returns:
bool: Wether the template was succesfully imported or not bool: Wether the template was succesfully imported or not
@ -74,8 +76,7 @@ class NukePlaceholderPlugin(PlaceholderPlugin):
node_knobs = node.knobs() node_knobs = node.knobs()
if ( if (
"builder_type" not in node_knobs "is_placeholder" not in node_knobs
or "is_placeholder" not in node_knobs
or not node.knob("is_placeholder").value() or not node.knob("is_placeholder").value()
): ):
continue continue
@ -273,6 +274,15 @@ class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
placeholder.data["nb_children"] += 1 placeholder.data["nb_children"] += 1
reset_selection() reset_selection()
# remove placeholders marked as delete
if (
placeholder.data.get("delete")
and not placeholder.data.get("keep_placeholder")
):
self.log.debug("Deleting node: {}".format(placeholder_node.name()))
nuke.delete(placeholder_node)
# go back to root group # go back to root group
nuke.root().begin() nuke.root().begin()
@ -454,12 +464,12 @@ class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
) )
for node in placeholder_node.dependent(): for node in placeholder_node.dependent():
for idx in range(node.inputs()): for idx in range(node.inputs()):
if node.input(idx) == placeholder_node: if node.input(idx) == placeholder_node and output_node:
node.setInput(idx, output_node) node.setInput(idx, output_node)
for node in placeholder_node.dependencies(): for node in placeholder_node.dependencies():
for idx in range(placeholder_node.inputs()): for idx in range(placeholder_node.inputs()):
if placeholder_node.input(idx) == node: if placeholder_node.input(idx) == node and input_node:
input_node.setInput(0, node) input_node.setInput(0, node)
def _create_sib_copies(self, placeholder): def _create_sib_copies(self, placeholder):
@ -535,6 +545,408 @@ class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
siblings_input.setInput(0, copy_output) siblings_input.setInput(0, copy_output)
class NukePlaceholderCreatePlugin(
NukePlaceholderPlugin, PlaceholderCreateMixin
):
identifier = "nuke.create"
label = "Nuke create"
def _parse_placeholder_node_data(self, node):
placeholder_data = super(
NukePlaceholderCreatePlugin, self
)._parse_placeholder_node_data(node)
node_knobs = node.knobs()
nb_children = 0
if "nb_children" in node_knobs:
nb_children = int(node_knobs["nb_children"].getValue())
placeholder_data["nb_children"] = nb_children
siblings = []
if "siblings" in node_knobs:
siblings = node_knobs["siblings"].values()
placeholder_data["siblings"] = siblings
node_full_name = node.fullName()
placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
placeholder_data["last_loaded"] = []
placeholder_data["delete"] = False
return placeholder_data
def _before_instance_create(self, placeholder):
placeholder.data["nodes_init"] = nuke.allNodes()
def collect_placeholders(self):
output = []
scene_placeholders = self._collect_scene_placeholders()
for node_name, node in scene_placeholders.items():
plugin_identifier_knob = node.knob("plugin_identifier")
if (
plugin_identifier_knob is None
or plugin_identifier_knob.getValue() != self.identifier
):
continue
placeholder_data = self._parse_placeholder_node_data(node)
output.append(
CreatePlaceholderItem(node_name, placeholder_data, self)
)
return output
def populate_placeholder(self, placeholder):
self.populate_create_placeholder(placeholder)
def repopulate_placeholder(self, placeholder):
self.populate_create_placeholder(placeholder)
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)
def cleanup_placeholder(self, placeholder, failed):
# deselect all selected nodes
placeholder_node = nuke.toNode(placeholder.scene_identifier)
# getting the latest nodes added
nodes_init = placeholder.data["nodes_init"]
nodes_created = list(set(nuke.allNodes()) - set(nodes_init))
self.log.debug("Created nodes: {}".format(nodes_created))
if not nodes_created:
return
placeholder.data["delete"] = True
nodes_created = self._move_to_placeholder_group(
placeholder, nodes_created
)
placeholder.data["last_created"] = nodes_created
refresh_nodes(nodes_created)
# positioning of the created nodes
min_x, min_y, _, _ = get_extreme_positions(nodes_created)
for node in nodes_created:
xpos = (node.xpos() - min_x) + placeholder_node.xpos()
ypos = (node.ypos() - min_y) + placeholder_node.ypos()
node.setXYpos(xpos, ypos)
refresh_nodes(nodes_created)
# fix the problem of z_order for backdrops
self._fix_z_order(placeholder)
self._imprint_siblings(placeholder)
if placeholder.data["nb_children"] == 0:
# save initial nodes postions and dimensions, update them
# and set inputs and outputs of created nodes
self._imprint_inits()
self._update_nodes(placeholder, nuke.allNodes(), nodes_created)
self._set_created_connections(placeholder)
elif placeholder.data["siblings"]:
# create copies of placeholder siblings for the new created nodes,
# set their inputs and outpus and update all nodes positions and
# dimensions and siblings names
siblings = get_nodes_by_names(placeholder.data["siblings"])
refresh_nodes(siblings)
copies = self._create_sib_copies(placeholder)
new_nodes = list(copies.values()) # copies nodes
self._update_nodes(new_nodes, nodes_created)
placeholder_node.removeKnob(placeholder_node.knob("siblings"))
new_nodes_name = get_names_from_nodes(new_nodes)
imprint(placeholder_node, {"siblings": new_nodes_name})
self._set_copies_connections(placeholder, copies)
self._update_nodes(
nuke.allNodes(),
new_nodes + nodes_created,
20
)
new_siblings = get_names_from_nodes(new_nodes)
placeholder.data["siblings"] = new_siblings
else:
# if the placeholder doesn't have siblings, the created
# nodes will be placed in a free space
xpointer, ypointer = find_free_space_to_paste_nodes(
nodes_created, direction="bottom", offset=200
)
node = nuke.createNode("NoOp")
reset_selection()
nuke.delete(node)
for node in nodes_created:
xpos = (node.xpos() - min_x) + xpointer
ypos = (node.ypos() - min_y) + ypointer
node.setXYpos(xpos, ypos)
placeholder.data["nb_children"] += 1
reset_selection()
# remove placeholders marked as delete
if (
placeholder.data.get("delete")
and not placeholder.data.get("keep_placeholder")
):
self.log.debug("Deleting node: {}".format(placeholder_node.name()))
nuke.delete(placeholder_node)
# go back to root group
nuke.root().begin()
def _move_to_placeholder_group(self, placeholder, nodes_created):
"""
opening the placeholder's group and copying created nodes in it.
Returns :
nodes_created (list): the new list of pasted nodes
"""
groups_name = placeholder.data["group_name"]
reset_selection()
select_nodes(nodes_created)
if groups_name:
with node_tempfile() as filepath:
nuke.nodeCopy(filepath)
for node in nuke.selectedNodes():
nuke.delete(node)
group = nuke.toNode(groups_name)
group.begin()
nuke.nodePaste(filepath)
nodes_created = nuke.selectedNodes()
return nodes_created
def _fix_z_order(self, placeholder):
"""Fix the problem of z_order when a backdrop is create."""
nodes_created = placeholder.data["last_created"]
created_backdrops = []
bd_orders = set()
for node in nodes_created:
if isinstance(node, nuke.BackdropNode):
created_backdrops.append(node)
bd_orders.add(node.knob("z_order").getValue())
if not bd_orders:
return
sib_orders = set()
for node_name in placeholder.data["siblings"]:
node = nuke.toNode(node_name)
if isinstance(node, nuke.BackdropNode):
sib_orders.add(node.knob("z_order").getValue())
if not sib_orders:
return
min_order = min(bd_orders)
max_order = max(sib_orders)
for backdrop_node in created_backdrops:
z_order = backdrop_node.knob("z_order").getValue()
backdrop_node.knob("z_order").setValue(
z_order + max_order - min_order + 1)
def _imprint_siblings(self, placeholder):
"""
- add siblings names to placeholder attributes (nodes created with it)
- add Id to the attributes of all the other nodes
"""
created_nodes = placeholder.data["last_created"]
created_nodes_set = set(created_nodes)
for node in created_nodes:
node_knobs = node.knobs()
if (
"is_placeholder" not in node_knobs
or (
"is_placeholder" in node_knobs
and node.knob("is_placeholder").value()
)
):
siblings = list(created_nodes_set - {node})
siblings_name = get_names_from_nodes(siblings)
siblings = {"siblings": siblings_name}
imprint(node, siblings)
def _imprint_inits(self):
"""Add initial positions and dimensions to the attributes"""
for node in nuke.allNodes():
refresh_node(node)
imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
node.knob("x_init").setVisible(False)
node.knob("y_init").setVisible(False)
width = node.screenWidth()
height = node.screenHeight()
if "bdwidth" in node.knobs():
imprint(node, {"w_init": width, "h_init": height})
node.knob("w_init").setVisible(False)
node.knob("h_init").setVisible(False)
refresh_node(node)
def _update_nodes(
self, placeholder, nodes, considered_nodes, offset_y=None
):
"""Adjust backdrop nodes dimensions and positions.
Considering some nodes sizes.
Args:
nodes (list): list of nodes to update
considered_nodes (list): list of nodes to consider while updating
positions and dimensions
offset (int): distance between copies
"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
diff_x = diff_y = 0
contained_nodes = [] # for backdrops
if offset_y is None:
width_ph = placeholder_node.screenWidth()
height_ph = placeholder_node.screenHeight()
diff_y = max_y - min_y - height_ph
diff_x = max_x - min_x - width_ph
contained_nodes = [placeholder_node]
min_x = placeholder_node.xpos()
min_y = placeholder_node.ypos()
else:
siblings = get_nodes_by_names(placeholder.data["siblings"])
minX, _, maxX, _ = get_extreme_positions(siblings)
diff_y = max_y - min_y + 20
diff_x = abs(max_x - min_x - maxX + minX)
contained_nodes = considered_nodes
if diff_y <= 0 and diff_x <= 0:
return
for node in nodes:
refresh_node(node)
if (
node == placeholder_node
or node in considered_nodes
):
continue
if (
not isinstance(node, nuke.BackdropNode)
or (
isinstance(node, nuke.BackdropNode)
and not set(contained_nodes) <= set(node.getNodes())
)
):
if offset_y is None and node.xpos() >= min_x:
node.setXpos(node.xpos() + diff_x)
if node.ypos() >= min_y:
node.setYpos(node.ypos() + diff_y)
else:
width = node.screenWidth()
height = node.screenHeight()
node.knob("bdwidth").setValue(width + diff_x)
node.knob("bdheight").setValue(height + diff_y)
refresh_node(node)
def _set_created_connections(self, placeholder):
"""
set inputs and outputs of created nodes"""
placeholder_node = nuke.toNode(placeholder.scene_identifier)
input_node, output_node = get_group_io_nodes(
placeholder.data["last_created"]
)
for node in placeholder_node.dependent():
for idx in range(node.inputs()):
if node.input(idx) == placeholder_node and output_node:
node.setInput(idx, output_node)
for node in placeholder_node.dependencies():
for idx in range(placeholder_node.inputs()):
if placeholder_node.input(idx) == node and input_node:
input_node.setInput(0, node)
def _create_sib_copies(self, placeholder):
""" creating copies of the palce_holder siblings (the ones who were
created with it) for the new nodes added
Returns :
copies (dict) : with copied nodes names and their copies
"""
copies = {}
siblings = get_nodes_by_names(placeholder.data["siblings"])
for node in siblings:
new_node = duplicate_node(node)
x_init = int(new_node.knob("x_init").getValue())
y_init = int(new_node.knob("y_init").getValue())
new_node.setXYpos(x_init, y_init)
if isinstance(new_node, nuke.BackdropNode):
w_init = new_node.knob("w_init").getValue()
h_init = new_node.knob("h_init").getValue()
new_node.knob("bdwidth").setValue(w_init)
new_node.knob("bdheight").setValue(h_init)
refresh_node(node)
if "repre_id" in node.knobs().keys():
node.removeKnob(node.knob("repre_id"))
copies[node.name()] = new_node
return copies
def _set_copies_connections(self, placeholder, copies):
"""Set inputs and outputs of the copies.
Args:
copies (dict): Copied nodes by their names.
"""
last_input, last_output = get_group_io_nodes(
placeholder.data["last_created"]
)
siblings = get_nodes_by_names(placeholder.data["siblings"])
siblings_input, siblings_output = get_group_io_nodes(siblings)
copy_input = copies[siblings_input.name()]
copy_output = copies[siblings_output.name()]
for node_init in siblings:
if node_init == siblings_output:
continue
node_copy = copies[node_init.name()]
for node in node_init.dependent():
for idx in range(node.inputs()):
if node.input(idx) != node_init:
continue
if node in siblings:
copies[node.name()].setInput(idx, node_copy)
else:
last_input.setInput(0, node_copy)
for node in node_init.dependencies():
for idx in range(node_init.inputs()):
if node_init.input(idx) != node:
continue
if node_init == siblings_input:
copy_input.setInput(idx, node)
elif node in siblings:
node_copy.setInput(idx, copies[node.name()])
else:
node_copy.setInput(idx, last_output)
siblings_input.setInput(0, copy_output)
def build_workfile_template(*args): def build_workfile_template(*args):
builder = NukeTemplateBuilder(registered_host()) builder = NukeTemplateBuilder(registered_host())
builder.build_template() builder.build_template()

View file

@ -28,7 +28,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
representations = ["nk"] representations = ["nk"]
families = ["workfile", "nukenodes"] families = ["workfile", "nukenodes"]
label = "Iport Nuke Nodes" label = "Import Nuke Nodes"
order = 0 order = 0
icon = "eye" icon = "eye"
color = "white" color = "white"

View file

@ -7,8 +7,7 @@ import nuke
from openpype.pipeline import publish from openpype.pipeline import publish
class NukeRenderLocal(publish.Extractor): class NukeRenderLocal(publish.ExtractorColormanaged):
# TODO: rewrite docstring to nuke
"""Render the current Nuke composition locally. """Render the current Nuke composition locally.
Extract the result of savers by starting a comp render Extract the result of savers by starting a comp render
@ -67,6 +66,7 @@ class NukeRenderLocal(publish.Extractor):
) )
ext = node["file_type"].value() ext = node["file_type"].value()
colorspace = node["colorspace"].value()
if "representations" not in instance.data: if "representations" not in instance.data:
instance.data["representations"] = [] instance.data["representations"] = []
@ -90,6 +90,13 @@ class NukeRenderLocal(publish.Extractor):
'files': filenames, 'files': filenames,
"stagingDir": out_dir "stagingDir": out_dir
} }
# inject colorspace data
self.set_representation_colorspace(
repre, instance.context,
colorspace=colorspace
)
instance.data["representations"].append(repre) instance.data["representations"].append(repre)
self.log.info("Extracted instance '{0}' to: {1}".format( self.log.info("Extracted instance '{0}' to: {1}".format(

View file

@ -7,28 +7,15 @@ Anything that isn't defined here is INTERNAL and unreliable for external use.
from .launch_logic import stub from .launch_logic import stub
from .pipeline import ( from .pipeline import (
PhotoshopHost,
ls, ls,
list_instances, containerise
remove_instance,
install,
uninstall,
containerise,
get_context_data,
update_context_data,
get_context_title
) )
from .plugin import ( from .plugin import (
PhotoshopLoader, PhotoshopLoader,
get_unique_layer_name get_unique_layer_name
) )
from .workio import (
file_extensions,
has_unsaved_changes,
save_file,
open_file,
current_file,
work_root,
)
from .lib import ( from .lib import (
maintained_selection, maintained_selection,
@ -40,28 +27,14 @@ __all__ = [
"stub", "stub",
# pipeline # pipeline
"PhotoshopHost",
"ls", "ls",
"list_instances",
"remove_instance",
"install",
"uninstall",
"containerise", "containerise",
"get_context_data",
"update_context_data",
"get_context_title",
# Plugin # Plugin
"PhotoshopLoader", "PhotoshopLoader",
"get_unique_layer_name", "get_unique_layer_name",
# workfiles
"file_extensions",
"has_unsaved_changes",
"save_file",
"open_file",
"current_file",
"work_root",
# lib # lib
"maintained_selection", "maintained_selection",
"maintained_visibility", "maintained_visibility",

View file

@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?> <?xml version='1.0' encoding='UTF-8'?>
<ExtensionManifest ExtensionBundleId="com.openpype.PS.panel" ExtensionBundleVersion="1.0.11" Version="7.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <ExtensionManifest ExtensionBundleId="com.openpype.PS.panel" ExtensionBundleVersion="1.0.12" Version="7.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList> <ExtensionList>
<Extension Id="com.openpype.PS.panel" Version="1.0.1" /> <Extension Id="com.openpype.PS.panel" Version="1.0.1" />
</ExtensionList> </ExtensionList>

Binary file not shown.

View file

@ -32,17 +32,6 @@
}); });
</script> </script>
<script type=text/javascript>
$(function() {
$("a#creator-button").bind("click", function() {
RPC.call('Photoshop.creator_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript> <script type=text/javascript>
$(function() { $(function() {
$("a#loader-button").bind("click", function() { $("a#loader-button").bind("click", function() {
@ -75,17 +64,6 @@
}); });
}); });
</script> </script>
<script type=text/javascript>
$(function() {
$("a#subsetmanager-button").bind("click", function() {
RPC.call('Photoshop.subsetmanager_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript> <script type=text/javascript>
$(function() { $(function() {
@ -109,11 +87,9 @@
<script type="text/javascript" src="./client/client.js"></script> <script type="text/javascript" src="./client/client.js"></script>
<a href=# id=workfiles-button><button>Workfiles...</button></a> <a href=# id=workfiles-button><button>Workfiles...</button></a>
<a href=# id=creator-button><button>Create...</button></a>
<a href=# id=loader-button><button>Load...</button></a> <a href=# id=loader-button><button>Load...</button></a>
<a href=# id=publish-button><button>Publish...</button></a> <a href=# id=publish-button><button>Publish...</button></a>
<a href=# id=sceneinventory-button><button>Manage...</button></a> <a href=# id=sceneinventory-button><button>Manage...</button></a>
<a href=# id=subsetmanager-button><button>Subset Manager...</button></a>
<a href=# id=experimental-button><button>Experimental Tools...</button></a> <a href=# id=experimental-button><button>Experimental Tools...</button></a>
</body> </body>
</html> </html>

View file

@ -334,9 +334,6 @@ class PhotoshopRoute(WebSocketRoute):
return await self.socket.call('photoshop.read') return await self.socket.call('photoshop.read')
# panel routes for tools # panel routes for tools
async def creator_route(self):
self._tool_route("creator")
async def workfiles_route(self): async def workfiles_route(self):
self._tool_route("workfiles") self._tool_route("workfiles")
@ -344,14 +341,11 @@ class PhotoshopRoute(WebSocketRoute):
self._tool_route("loader") self._tool_route("loader")
async def publish_route(self): async def publish_route(self):
self._tool_route("publish") self._tool_route("publisher")
async def sceneinventory_route(self): async def sceneinventory_route(self):
self._tool_route("sceneinventory") self._tool_route("sceneinventory")
async def subsetmanager_route(self):
self._tool_route("subsetmanager")
async def experimental_tools_route(self): async def experimental_tools_route(self):
self._tool_route("experimental_tools") self._tool_route("experimental_tools")

View file

@ -9,6 +9,7 @@ from openpype.lib import env_value_to_bool, Logger
from openpype.modules import ModulesManager from openpype.modules import ModulesManager
from openpype.pipeline import install_host from openpype.pipeline import install_host
from openpype.tools.utils import host_tools from openpype.tools.utils import host_tools
from openpype.tests.lib import is_in_tests
from .launch_logic import ProcessLauncher, stub from .launch_logic import ProcessLauncher, stub
@ -20,9 +21,11 @@ def safe_excepthook(*args):
def main(*subprocess_args): def main(*subprocess_args):
from openpype.hosts.photoshop import api from openpype.hosts.photoshop.api import PhotoshopHost
host = PhotoshopHost()
install_host(host)
install_host(api)
sys.excepthook = safe_excepthook sys.excepthook = safe_excepthook
# coloring in StdOutBroker # coloring in StdOutBroker
@ -40,7 +43,7 @@ def main(*subprocess_args):
webpublisher_addon.headless_publish, webpublisher_addon.headless_publish,
log, log,
"ClosePS", "ClosePS",
os.environ.get("IS_TEST") is_in_tests()
) )
elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH",
default=True): default=True):

View file

@ -1,4 +1,5 @@
import os import os
from qtpy import QtWidgets from qtpy import QtWidgets
import pyblish.api import pyblish.api
@ -12,6 +13,14 @@ from openpype.pipeline import (
deregister_creator_plugin_path, deregister_creator_plugin_path,
AVALON_CONTAINER_ID, AVALON_CONTAINER_ID,
) )
from openpype.host import (
HostBase,
IWorkfileHost,
ILoadHost,
IPublishHost
)
from openpype.pipeline.load import any_outdated_containers from openpype.pipeline.load import any_outdated_containers
from openpype.hosts.photoshop import PHOTOSHOP_HOST_DIR from openpype.hosts.photoshop import PHOTOSHOP_HOST_DIR
@ -26,6 +35,140 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "photoshop"
def install(self):
"""Install Photoshop-specific functionality needed for integration.
This function is called automatically on calling
`api.install(photoshop)`.
"""
log.info("Installing OpenPype Photoshop...")
pyblish.api.register_host("photoshop")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
register_event_callback("application.launched", on_application_launch)
def current_file(self):
try:
full_name = lib.stub().get_active_document_full_name()
if full_name and full_name != "null":
return os.path.normpath(full_name).replace("\\", "/")
except Exception:
pass
return None
def work_root(self, session):
return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/")
def open_workfile(self, filepath):
lib.stub().open(filepath)
return True
def save_workfile(self, filepath=None):
_, ext = os.path.splitext(filepath)
lib.stub().saveAs(filepath, ext[1:], True)
def get_current_workfile(self):
return self.current_file()
def workfile_has_unsaved_changes(self):
if self.current_file():
return not lib.stub().is_saved()
return False
def get_workfile_extensions(self):
return [".psd", ".psb"]
def get_containers(self):
return ls()
def get_context_data(self):
"""Get stored values for context (validation enable/disable etc)"""
meta = _get_stub().get_layers_metadata()
for item in meta:
if item.get("id") == "publish_context":
item.pop("id")
return item
return {}
def update_context_data(self, data, changes):
"""Store value needed for context"""
item = data
item["id"] = "publish_context"
_get_stub().imprint(item["id"], item)
def get_context_title(self):
"""Returns title for Creator window"""
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
return "{}/{}/{}".format(project_name, asset_name, task_name)
def list_instances(self):
"""List all created instances to publish from current workfile.
Pulls from File > File Info
Returns:
(list) of dictionaries matching instances format
"""
stub = _get_stub()
if not stub:
return []
instances = []
layers_meta = stub.get_layers_metadata()
if layers_meta:
for instance in layers_meta:
if instance.get("id") == "pyblish.avalon.instance":
instances.append(instance)
return instances
def remove_instance(self, instance):
"""Remove instance from current workfile metadata.
Updates metadata of current file in File > File Info and removes
icon highlight on group layer.
Args:
instance (dict): instance representation from subsetmanager model
"""
stub = _get_stub()
if not stub:
return
inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
if not inst_id:
log.warning("No instance identifier for {}".format(instance))
return
stub.remove_instance(inst_id)
if instance.get("members"):
item = stub.get_layer(instance["members"][0])
if item:
stub.rename_layer(item.id,
item.name.replace(stub.PUBLISH_ICON, ''))
def check_inventory(): def check_inventory():
if not any_outdated_containers(): if not any_outdated_containers():
return return
@ -52,32 +195,6 @@ def on_pyblish_instance_toggled(instance, old_value, new_value):
instance[0].Visible = new_value instance[0].Visible = new_value
def install():
"""Install Photoshop-specific functionality of avalon-core.
This function is called automatically on calling `api.install(photoshop)`.
"""
log.info("Installing OpenPype Photoshop...")
pyblish.api.register_host("photoshop")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info(PUBLISH_PATH)
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
register_event_callback("application.launched", on_application_launch)
def uninstall():
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
deregister_loader_plugin_path(LOAD_PATH)
deregister_creator_plugin_path(CREATE_PATH)
def ls(): def ls():
"""Yields containers from active Photoshop document """Yields containers from active Photoshop document
@ -117,61 +234,6 @@ def ls():
yield data yield data
def list_instances():
"""List all created instances to publish from current workfile.
Pulls from File > File Info
For SubsetManager
Returns:
(list) of dictionaries matching instances format
"""
stub = _get_stub()
if not stub:
return []
instances = []
layers_meta = stub.get_layers_metadata()
if layers_meta:
for instance in layers_meta:
if instance.get("id") == "pyblish.avalon.instance":
instances.append(instance)
return instances
def remove_instance(instance):
"""Remove instance from current workfile metadata.
Updates metadata of current file in File > File Info and removes
icon highlight on group layer.
For SubsetManager
Args:
instance (dict): instance representation from subsetmanager model
"""
stub = _get_stub()
if not stub:
return
inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
if not inst_id:
log.warning("No instance identifier for {}".format(instance))
return
stub.remove_instance(inst_id)
if instance.get("members"):
item = stub.get_layer(instance["members"][0])
if item:
stub.rename_layer(item.id,
item.name.replace(stub.PUBLISH_ICON, ''))
def _get_stub(): def _get_stub():
"""Handle pulling stub from PS to run operations on host """Handle pulling stub from PS to run operations on host
@ -226,28 +288,17 @@ def containerise(
return layer return layer
def get_context_data(): def cache_and_get_instances(creator):
"""Get stored values for context (validation enable/disable etc)""" """Cache instances in shared data.
meta = _get_stub().get_layers_metadata()
for item in meta:
if item.get("id") == "publish_context":
item.pop("id")
return item
return {} Storing all instances as a list as legacy instances might be still present.
Args:
creator (Creator): Plugin which would like to get instances from host.
def update_context_data(data, changes): Returns:
"""Store value needed for context""" List[]: list of all instances stored in metadata
item = data """
item["id"] = "publish_context" shared_key = "openpype.photoshop.instances"
_get_stub().imprint(item["id"], item) if shared_key not in creator.collection_shared_data:
creator.collection_shared_data[shared_key] = \
creator.host.list_instances()
def get_context_title(): return creator.collection_shared_data[shared_key]
"""Returns title for Creator window"""
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
return "{}/{}/{}".format(project_name, asset_name, task_name)

View file

@ -1,49 +0,0 @@
"""Host API required Work Files tool"""
import os
from . import lib
def _active_document():
document_name = lib.stub().get_active_document_name()
if not document_name:
return None
return document_name
def file_extensions():
return [".psd", ".psb"]
def has_unsaved_changes():
if _active_document():
return not lib.stub().is_saved()
return False
def save_file(filepath):
_, ext = os.path.splitext(filepath)
lib.stub().saveAs(filepath, ext[1:], True)
def open_file(filepath):
lib.stub().open(filepath)
return True
def current_file():
try:
full_name = lib.stub().get_active_document_full_name()
if full_name and full_name != "null":
return os.path.normpath(full_name).replace("\\", "/")
except Exception:
pass
return None
def work_root(session):
return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/")

View file

@ -9,6 +9,7 @@ from openpype.pipeline import (
) )
from openpype.lib import prepare_template_data from openpype.lib import prepare_template_data
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances
class ImageCreator(Creator): class ImageCreator(Creator):
@ -19,7 +20,7 @@ class ImageCreator(Creator):
description = "Image creator" description = "Image creator"
def collect_instances(self): def collect_instances(self):
for instance_data in api.list_instances(): for instance_data in cache_and_get_instances(self):
# legacy instances have family=='image' # legacy instances have family=='image'
creator_id = (instance_data.get("creator_identifier") or creator_id = (instance_data.get("creator_identifier") or
instance_data.get("family")) instance_data.get("family"))
@ -97,6 +98,7 @@ class ImageCreator(Creator):
data.update({"subset": subset_name}) data.update({"subset": subset_name})
data.update({"members": [str(group.id)]}) data.update({"members": [str(group.id)]})
data.update({"layer_name": layer_name})
data.update({"long_name": "_".join(layer_names_in_hierarchy)}) data.update({"long_name": "_".join(layer_names_in_hierarchy)})
new_instance = CreatedInstance(self.family, subset_name, data, new_instance = CreatedInstance(self.family, subset_name, data,
@ -121,7 +123,7 @@ class ImageCreator(Creator):
def remove_instances(self, instances): def remove_instances(self, instances):
for instance in instances: for instance in instances:
api.remove_instance(instance) self.host.remove_instance(instance)
self._remove_instance_from_context(instance) self._remove_instance_from_context(instance)
def get_default_variants(self): def get_default_variants(self):
@ -163,6 +165,11 @@ class ImageCreator(Creator):
def _clean_highlights(self, stub, item): def _clean_highlights(self, stub, item):
return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON,
'') '')
@classmethod
def get_dynamic_data(cls, *args, **kwargs): def get_dynamic_data(self, variant, task_name, asset_doc,
project_name, host_name, instance):
if instance is not None:
layer_name = instance.get("layer_name")
if layer_name:
return {"layer": layer_name}
return {"layer": "{layer}"} return {"layer": "{layer}"}

View file

@ -1,120 +0,0 @@
import re
from qtpy import QtWidgets
from openpype.pipeline import create
from openpype.hosts.photoshop import api as photoshop
from openpype.lib import prepare_template_data
from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS
class CreateImage(create.LegacyCreator):
"""Image folder for publish."""
name = "imageDefault"
label = "Image"
family = "image"
defaults = ["Main"]
def process(self):
groups = []
layers = []
create_group = False
stub = photoshop.stub()
if (self.options or {}).get("useSelection"):
multiple_instances = False
selection = stub.get_selected_layers()
self.log.info("selection {}".format(selection))
if len(selection) > 1:
# Ask user whether to create one image or image per selected
# item.
active_window = QtWidgets.QApplication.activeWindow()
msg_box = QtWidgets.QMessageBox(parent=active_window)
msg_box.setIcon(QtWidgets.QMessageBox.Warning)
msg_box.setText(
"Multiple layers selected."
"\nDo you want to make one image per layer?"
)
msg_box.setStandardButtons(
QtWidgets.QMessageBox.Yes |
QtWidgets.QMessageBox.No |
QtWidgets.QMessageBox.Cancel
)
ret = msg_box.exec_()
if ret == QtWidgets.QMessageBox.Yes:
multiple_instances = True
elif ret == QtWidgets.QMessageBox.Cancel:
return
if multiple_instances:
for item in selection:
if item.group:
groups.append(item)
else:
layers.append(item)
else:
group = stub.group_selected_layers(self.name)
groups.append(group)
elif len(selection) == 1:
# One selected item. Use group if its a LayerSet (group), else
# create a new group.
if selection[0].group:
groups.append(selection[0])
else:
layers.append(selection[0])
elif len(selection) == 0:
# No selection creates an empty group.
create_group = True
else:
group = stub.create_group(self.name)
groups.append(group)
if create_group:
group = stub.create_group(self.name)
groups.append(group)
for layer in layers:
stub.select_layers([layer])
group = stub.group_selected_layers(layer.name)
groups.append(group)
creator_subset_name = self.data["subset"]
layer_name = ''
for group in groups:
long_names = []
group.name = group.name.replace(stub.PUBLISH_ICON, ''). \
replace(stub.LOADED_ICON, '')
subset_name = creator_subset_name
if len(groups) > 1:
layer_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
group.name
)
if "{layer}" not in subset_name.lower():
subset_name += "{Layer}"
layer_fill = prepare_template_data({"layer": layer_name})
subset_name = subset_name.format(**layer_fill)
if group.long_name:
for directory in group.long_name[::-1]:
name = directory.replace(stub.PUBLISH_ICON, '').\
replace(stub.LOADED_ICON, '')
long_names.append(name)
self.data.update({"subset": subset_name})
self.data.update({"uuid": str(group.id)})
self.data.update({"members": [str(group.id)]})
self.data.update({"long_name": "_".join(long_names)})
stub.imprint(group, self.data)
# reusing existing group, need to rename afterwards
if not create_group:
stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name)
@classmethod
def get_dynamic_data(cls, *args, **kwargs):
return {"layer": "{layer}"}

View file

@ -5,6 +5,7 @@ from openpype.pipeline import (
CreatedInstance, CreatedInstance,
legacy_io legacy_io
) )
from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances
class PSWorkfileCreator(AutoCreator): class PSWorkfileCreator(AutoCreator):
@ -17,7 +18,7 @@ class PSWorkfileCreator(AutoCreator):
return [] return []
def collect_instances(self): def collect_instances(self):
for instance_data in api.list_instances(): for instance_data in cache_and_get_instances(self):
creator_id = instance_data.get("creator_identifier") creator_id = instance_data.get("creator_identifier")
if creator_id == self.identifier: if creator_id == self.identifier:
subset_name = instance_data["subset"] subset_name = instance_data["subset"]
@ -54,7 +55,7 @@ class PSWorkfileCreator(AutoCreator):
} }
data.update(self.get_dynamic_data( data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc, self.default_variant, task_name, asset_doc,
project_name, host_name project_name, host_name, None
)) ))
new_instance = CreatedInstance( new_instance = CreatedInstance(

View file

@ -22,6 +22,7 @@ from openpype_modules.webpublisher.lib import (
get_batch_asset_task_info, get_batch_asset_task_info,
parse_json parse_json
) )
from openpype.tests.lib import is_in_tests
class CollectBatchData(pyblish.api.ContextPlugin): class CollectBatchData(pyblish.api.ContextPlugin):
@ -39,7 +40,7 @@ class CollectBatchData(pyblish.api.ContextPlugin):
def process(self, context): def process(self, context):
self.log.info("CollectBatchData") self.log.info("CollectBatchData")
batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
if os.environ.get("IS_TEST"): if is_in_tests():
self.log.debug("Automatic testing, no batch data, skipping") self.log.debug("Automatic testing, no batch data, skipping")
return return

View file

@ -6,6 +6,7 @@ import pyblish.api
from openpype.lib import prepare_template_data from openpype.lib import prepare_template_data
from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop import api as photoshop
from openpype.settings import get_project_settings from openpype.settings import get_project_settings
from openpype.tests.lib import is_in_tests
class CollectColorCodedInstances(pyblish.api.ContextPlugin): class CollectColorCodedInstances(pyblish.api.ContextPlugin):
@ -46,7 +47,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin):
def process(self, context): def process(self, context):
self.log.info("CollectColorCodedInstances") self.log.info("CollectColorCodedInstances")
batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA")
if (os.environ.get("IS_TEST") and if (is_in_tests() and
(not batch_dir or not os.path.exists(batch_dir))): (not batch_dir or not os.path.exists(batch_dir))):
self.log.debug("Automatic testing, no batch data, skipping") self.log.debug("Automatic testing, no batch data, skipping")
return return

View file

@ -43,7 +43,7 @@ class CollectExtensionVersion(pyblish.api.ContextPlugin):
with open(manifest_url) as fp: with open(manifest_url) as fp:
content = fp.read() content = fp.read()
found = re.findall(r'(ExtensionBundleVersion=")([0-10\.]+)(")', found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")',
content) content)
if found: if found:
expected_version = found[0][1] expected_version = found[0][1]

View file

@ -82,7 +82,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
if len(instance_names) != len(set(instance_names)): if len(instance_names) != len(set(instance_names)):
self.log.warning("Duplicate instances found. " + self.log.warning("Duplicate instances found. " +
"Remove unwanted via SubsetManager") "Remove unwanted via Publisher")
if len(instance_names) == 0 and self.flatten_subset_template: if len(instance_names) == 0 and self.flatten_subset_template:
project_name = context.data["projectEntity"]["name"] project_name = context.data["projectEntity"]["name"]

View file

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Asset does not match</title>
<description>
## Collected asset name is not same as in context
{msg}
### How to repair?
{repair_msg}
Refresh Publish afterwards (circle arrow at the bottom right).
If that's not correct value, close workfile and reopen via Workfiles to get
proper context asset name OR disable this validator and publish again
if you are publishing to different context deliberately.
(Context means combination of project, asset name and task name.)
</description>
</error>
</root>

View file

@ -10,7 +10,7 @@ Subset or layer name cannot contain specific characters (spaces etc) which could
### How to repair? ### How to repair?
You can fix this with "repair" button on the right. You can fix this with "repair" button on the right and press Refresh publishing button at the bottom right.
</description> </description>
<detail> <detail>
### __Detailed Info__ (optional) ### __Detailed Info__ (optional)

View file

@ -1,7 +1,11 @@
import pyblish.api import pyblish.api
from openpype.pipeline import legacy_io from openpype.pipeline import legacy_io
from openpype.pipeline.publish import ValidateContentsOrder from openpype.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
OptionalPyblishPluginMixin
)
from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop import api as photoshop
@ -31,30 +35,38 @@ class ValidateInstanceAssetRepair(pyblish.api.Action):
stub.imprint(instance[0], data) stub.imprint(instance[0], data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin): class ValidateInstanceAsset(OptionalPyblishPluginMixin,
pyblish.api.InstancePlugin):
"""Validate the instance asset is the current selected context asset. """Validate the instance asset is the current selected context asset.
As it might happen that multiple worfiles are opened, switching As it might happen that multiple worfiles are opened, switching
between them would mess with selected context. between them would mess with selected context.
In that case outputs might be output under wrong asset! In that case outputs might be output under wrong asset!
Repair action will use Context asset value (from Workfiles or Launcher) Repair action will use Context asset value (from Workfiles or Launcher)
Closing and reopening with Workfiles will refresh Context value. Closing and reopening with Workfiles will refresh Context value.
""" """
label = "Validate Instance Asset" label = "Validate Instance Asset"
hosts = ["photoshop"] hosts = ["photoshop"]
optional = True
actions = [ValidateInstanceAssetRepair] actions = [ValidateInstanceAssetRepair]
order = ValidateContentsOrder order = ValidateContentsOrder
def process(self, instance): def process(self, instance):
instance_asset = instance.data["asset"] instance_asset = instance.data["asset"]
current_asset = legacy_io.Session["AVALON_ASSET"] current_asset = legacy_io.Session["AVALON_ASSET"]
msg = (
f"Instance asset {instance_asset} is not the same " if instance_asset != current_asset:
f"as current context {current_asset}. PLEASE DO:\n" msg = (
f"Repair with 'A' action to use '{current_asset}'.\n" f"Instance asset {instance_asset} is not the same "
f"If that's not correct value, close workfile and " f"as current context {current_asset}."
f"reopen via Workfiles!"
) )
assert instance_asset == current_asset, msg repair_msg = (
f"Repair with 'Repair' button to use '{current_asset}'.\n"
)
formatting_data = {"msg": msg,
"repair_msg": repair_msg}
raise PublishXmlValidationError(self, msg,
formatting_data=formatting_data)

View file

@ -84,7 +84,7 @@ class ValidateNaming(pyblish.api.InstancePlugin):
replace_char = '' replace_char = ''
def process(self, instance): def process(self, instance):
help_msg = ' Use Repair action (A) in Pyblish to fix it.' help_msg = ' Use Repair button to fix it and then refresh publish.'
layer = instance.data.get("layer") layer = instance.data.get("layer")
if layer: if layer:

View file

@ -29,7 +29,7 @@ class ValidateSubsetUniqueness(pyblish.api.ContextPlugin):
for item, count in collections.Counter(subset_names).items() for item, count in collections.Counter(subset_names).items()
if count > 1] if count > 1]
msg = ("Instance subset names {} are not unique. ".format(non_unique) + msg = ("Instance subset names {} are not unique. ".format(non_unique) +
"Remove duplicates via SubsetManager.") "Remove duplicates via Publisher.")
formatting_data = { formatting_data = {
"non_unique": ",".join(non_unique) "non_unique": ",".join(non_unique)
} }

View file

@ -10,7 +10,7 @@ STANDALONEPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class StandAlonePublishAddon(OpenPypeModule, ITrayAction, IHostAddon): class StandAlonePublishAddon(OpenPypeModule, ITrayAction, IHostAddon):
label = "Publish" label = "Publisher (legacy)"
name = "standalonepublisher" name = "standalonepublisher"
host_name = "standalonepublisher" host_name = "standalonepublisher"

View file

@ -10,7 +10,7 @@ TRAYPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction): class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction):
label = "New Publish (beta)" label = "Publisher"
name = "traypublisher" name = "traypublisher"
host_name = "traypublisher" host_name = "traypublisher"
@ -19,20 +19,9 @@ class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction):
self.publish_paths = [ self.publish_paths = [
os.path.join(TRAYPUBLISH_ROOT_DIR, "plugins", "publish") os.path.join(TRAYPUBLISH_ROOT_DIR, "plugins", "publish")
] ]
self._experimental_tools = None
def tray_init(self): def tray_init(self):
from openpype.tools.experimental_tools import ExperimentalTools return
self._experimental_tools = ExperimentalTools()
def tray_menu(self, *args, **kwargs):
super(TrayPublishAddon, self).tray_menu(*args, **kwargs)
traypublisher = self._experimental_tools.get("traypublisher")
visible = False
if traypublisher and traypublisher.enabled:
visible = True
self._action_item.setVisible(visible)
def on_action_trigger(self): def on_action_trigger(self):
self.run_traypublisher() self.run_traypublisher()

View file

@ -1368,6 +1368,7 @@ def get_app_environments_for_context(
from openpype.modules import ModulesManager from openpype.modules import ModulesManager
from openpype.pipeline import AvalonMongoDB, Anatomy from openpype.pipeline import AvalonMongoDB, Anatomy
from openpype.lib.openpype_version import is_running_staging
# Avalon database connection # Avalon database connection
dbcon = AvalonMongoDB() dbcon = AvalonMongoDB()
@ -1404,6 +1405,8 @@ def get_app_environments_for_context(
"env": env "env": env
}) })
data["env"].update(anatomy.root_environments()) data["env"].update(anatomy.root_environments())
if is_running_staging():
data["env"]["OPENPYPE_IS_STAGING"] = "1"
prepare_app_environments(data, env_group, modules_manager) prepare_app_environments(data, env_group, modules_manager)
prepare_context_environments(data, env_group, modules_manager) prepare_context_environments(data, env_group, modules_manager)

View file

@ -57,15 +57,66 @@ def is_running_from_build():
return True return True
def is_staging_enabled():
return os.environ.get("OPENPYPE_USE_STAGING") == "1"
def is_running_staging(): def is_running_staging():
"""Currently used OpenPype is staging version. """Currently used OpenPype is staging version.
This function is not 100% proper check of staging version. It is possible
to have enabled to use staging version but be in different one.
The function is based on 4 factors:
- env 'OPENPYPE_IS_STAGING' is set
- current production version
- current staging version
- use staging is enabled
First checks for 'OPENPYPE_IS_STAGING' environment which can be set to '1'.
The value should be set only when a process without access to
OpenPypeVersion is launched (e.g. in DCCs). If current version is same
as production version it is expected that it is not staging, and it
doesn't matter what would 'is_staging_enabled' return. If current version
is same as staging version it is expected we're in staging. In all other
cases 'is_staging_enabled' is used as source of outpu value.
The function is used to decide which icon is used. To check e.g. updates
the output should be combined with other functions from this file.
Returns: Returns:
bool: True if openpype version containt 'staging'. bool: Using staging version or not.
""" """
if "staging" in get_openpype_version():
if os.environ.get("OPENPYPE_IS_STAGING") == "1":
return True return True
return False
if not op_version_control_available():
return False
from openpype.settings import get_global_settings
global_settings = get_global_settings()
production_version = global_settings["production_version"]
latest_version = None
if not production_version or production_version == "latest":
latest_version = get_latest_version(local=False, remote=True)
production_version = latest_version
current_version = get_openpype_version()
if current_version == production_version:
return False
staging_version = global_settings["staging_version"]
if not staging_version or staging_version == "latest":
if latest_version is None:
latest_version = get_latest_version(local=False, remote=True)
staging_version = latest_version
if current_version == production_version:
return True
return is_staging_enabled()
# ---------------------------------------- # ----------------------------------------
@ -131,13 +182,11 @@ def get_remote_versions(*args, **kwargs):
return None return None
def get_latest_version(staging=None, local=None, remote=None): def get_latest_version(local=None, remote=None):
"""Get latest version from repository path.""" """Get latest version from repository path."""
if staging is None:
staging = is_running_staging()
if op_version_control_available(): if op_version_control_available():
return get_OpenPypeVersion().get_latest_version( return get_OpenPypeVersion().get_latest_version(
staging=staging,
local=local, local=local,
remote=remote remote=remote
) )
@ -146,9 +195,9 @@ def get_latest_version(staging=None, local=None, remote=None):
def get_expected_studio_version(staging=None): def get_expected_studio_version(staging=None):
"""Expected production or staging version in studio.""" """Expected production or staging version in studio."""
if staging is None:
staging = is_running_staging()
if op_version_control_available(): if op_version_control_available():
if staging is None:
staging = is_staging_enabled()
return get_OpenPypeVersion().get_expected_studio_version(staging) return get_OpenPypeVersion().get_expected_studio_version(staging)
return None return None
@ -158,7 +207,7 @@ def get_expected_version(staging=None):
if expected_version is None: if expected_version is None:
# Look for latest if expected version is not set in settings # Look for latest if expected version is not set in settings
expected_version = get_latest_version( expected_version = get_latest_version(
staging=staging, local=False,
remote=True remote=True
) )
return expected_version return expected_version

View file

@ -2,6 +2,7 @@ import os
import attr import attr
import getpass import getpass
import pyblish.api import pyblish.api
from datetime import datetime
from openpype.lib import ( from openpype.lib import (
env_value_to_bool, env_value_to_bool,
@ -10,6 +11,7 @@ from openpype.lib import (
from openpype.pipeline import legacy_io from openpype.pipeline import legacy_io
from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.tests.lib import is_in_tests
@attr.s @attr.s
@ -48,9 +50,11 @@ class AfterEffectsSubmitDeadline(
context = self._instance.context context = self._instance.context
batch_name = os.path.basename(self._instance.data["source"])
if is_in_tests():
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
dln_job_info.Name = self._instance.data["name"] dln_job_info.Name = self._instance.data["name"]
dln_job_info.BatchName = os.path.basename(self._instance. dln_job_info.BatchName = batch_name
data["source"])
dln_job_info.Plugin = "AfterEffects" dln_job_info.Plugin = "AfterEffects"
dln_job_info.UserName = context.data.get( dln_job_info.UserName = context.data.get(
"deadlineUser", getpass.getuser()) "deadlineUser", getpass.getuser())
@ -83,7 +87,8 @@ class AfterEffectsSubmitDeadline(
"AVALON_APP_NAME", "AVALON_APP_NAME",
"OPENPYPE_DEV", "OPENPYPE_DEV",
"OPENPYPE_LOG_NO_COLORS", "OPENPYPE_LOG_NO_COLORS",
"OPENPYPE_VERSION" "OPENPYPE_VERSION",
"IS_TEST"
] ]
# Add mongo url if it's enabled # Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"): if self._instance.context.data.get("deadlinePassMongoUrl"):

View file

@ -5,6 +5,7 @@ from pathlib import Path
from collections import OrderedDict from collections import OrderedDict
from zipfile import ZipFile, is_zipfile from zipfile import ZipFile, is_zipfile
import re import re
from datetime import datetime
import attr import attr
import pyblish.api import pyblish.api
@ -12,6 +13,7 @@ import pyblish.api
from openpype.pipeline import legacy_io from openpype.pipeline import legacy_io
from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.tests.lib import is_in_tests
class _ZipFile(ZipFile): class _ZipFile(ZipFile):
@ -261,7 +263,10 @@ class HarmonySubmitDeadline(
job_info.Pool = self._instance.data.get("primaryPool") job_info.Pool = self._instance.data.get("primaryPool")
job_info.SecondaryPool = self._instance.data.get("secondaryPool") job_info.SecondaryPool = self._instance.data.get("secondaryPool")
job_info.ChunkSize = self.chunk_size job_info.ChunkSize = self.chunk_size
job_info.BatchName = os.path.basename(self._instance.data["source"]) batch_name = os.path.basename(self._instance.data["source"])
if is_in_tests:
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
job_info.BatchName = batch_name
job_info.Department = self.department job_info.Department = self.department
job_info.Group = self.group job_info.Group = self.group
@ -275,7 +280,8 @@ class HarmonySubmitDeadline(
"AVALON_APP_NAME", "AVALON_APP_NAME",
"OPENPYPE_DEV", "OPENPYPE_DEV",
"OPENPYPE_LOG_NO_COLORS", "OPENPYPE_LOG_NO_COLORS",
"OPENPYPE_VERSION" "OPENPYPE_VERSION",
"IS_TEST"
] ]
# Add mongo url if it's enabled # Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"): if self._instance.context.data.get("deadlinePassMongoUrl"):

View file

@ -1,5 +1,6 @@
import os import os
import json import json
from datetime import datetime
import requests import requests
import hou import hou
@ -7,6 +8,7 @@ import hou
import pyblish.api import pyblish.api
from openpype.pipeline import legacy_io from openpype.pipeline import legacy_io
from openpype.tests.lib import is_in_tests
class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin):
@ -60,6 +62,8 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin):
job_name = "{scene} [PUBLISH]".format(scene=scenename) job_name = "{scene} [PUBLISH]".format(scene=scenename)
batch_name = "{code} - {scene}".format(code=code, scene=scenename) batch_name = "{code} - {scene}".format(code=code, scene=scenename)
if is_in_tests():
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
deadline_user = "roy" # todo: get deadline user dynamically deadline_user = "roy" # todo: get deadline user dynamically
# Get only major.minor version of Houdini, ignore patch version # Get only major.minor version of Houdini, ignore patch version

View file

@ -1,6 +1,7 @@
import os import os
import json import json
import getpass import getpass
from datetime import datetime
import requests import requests
import pyblish.api import pyblish.api
@ -8,6 +9,7 @@ import pyblish.api
# import hou ??? # import hou ???
from openpype.pipeline import legacy_io from openpype.pipeline import legacy_io
from openpype.tests.lib import is_in_tests
class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
@ -45,6 +47,9 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin):
if code: if code:
batch_name = "{0} - {1}".format(code, batch_name) batch_name = "{0} - {1}".format(code, batch_name)
if is_in_tests():
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
# Output driver to render # Output driver to render
driver = instance[0] driver = instance[0]

View file

@ -37,6 +37,7 @@ from openpype.hosts.maya.api.lib import get_attr_in_layer
from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.tests.lib import is_in_tests
def _validate_deadline_bool_value(instance, attribute, value): def _validate_deadline_bool_value(instance, attribute, value):
@ -121,6 +122,9 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
src_filepath = context.data["currentFile"] src_filepath = context.data["currentFile"]
src_filename = os.path.basename(src_filepath) src_filename = os.path.basename(src_filepath)
if is_in_tests():
src_filename += datetime.now().strftime("%d%m%Y%H%M%S")
job_info.Name = "%s - %s" % (src_filename, instance.name) job_info.Name = "%s - %s" % (src_filename, instance.name)
job_info.BatchName = src_filename job_info.BatchName = src_filename
job_info.Plugin = instance.data.get("mayaRenderPlugin", "MayaBatch") job_info.Plugin = instance.data.get("mayaRenderPlugin", "MayaBatch")
@ -161,7 +165,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
"AVALON_TASK", "AVALON_TASK",
"AVALON_APP_NAME", "AVALON_APP_NAME",
"OPENPYPE_DEV", "OPENPYPE_DEV",
"OPENPYPE_VERSION" "OPENPYPE_VERSION",
"IS_TEST"
] ]
# Add mongo url if it's enabled # Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"): if self._instance.context.data.get("deadlinePassMongoUrl"):

View file

@ -1,10 +1,12 @@
import os import os
import requests import requests
from datetime import datetime
from maya import cmds from maya import cmds
from openpype.pipeline import legacy_io, PublishXmlValidationError from openpype.pipeline import legacy_io, PublishXmlValidationError
from openpype.settings import get_project_settings from openpype.settings import get_project_settings
from openpype.tests.lib import is_in_tests
import pyblish.api import pyblish.api
@ -57,6 +59,8 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin):
job_name = "{scene} [PUBLISH]".format(scene=scenename) job_name = "{scene} [PUBLISH]".format(scene=scenename)
batch_name = "{code} - {scene}".format(code=project_name, batch_name = "{code} - {scene}".format(code=project_name,
scene=scenename) scene=scenename)
if is_in_tests():
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
# Generate the payload for Deadline submission # Generate the payload for Deadline submission
payload = { payload = {

View file

@ -2,12 +2,14 @@ import os
import re import re
import json import json
import getpass import getpass
from datetime import datetime
import requests import requests
import pyblish.api import pyblish.api
import nuke import nuke
from openpype.pipeline import legacy_io from openpype.pipeline import legacy_io
from openpype.tests.lib import is_in_tests
class NukeSubmitDeadline(pyblish.api.InstancePlugin): class NukeSubmitDeadline(pyblish.api.InstancePlugin):
@ -141,8 +143,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
responce_data=None responce_data=None
): ):
render_dir = os.path.normpath(os.path.dirname(render_path)) render_dir = os.path.normpath(os.path.dirname(render_path))
script_name = os.path.basename(script_path) batch_name = os.path.basename(script_path)
jobname = "%s - %s" % (script_name, instance.name) jobname = "%s - %s" % (batch_name, instance.name)
if is_in_tests():
batch_name += datetime.now().strftime("%d%m%Y%H%M%S")
output_filename_0 = self.preview_fname(render_path) output_filename_0 = self.preview_fname(render_path)
@ -176,7 +181,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
payload = { payload = {
"JobInfo": { "JobInfo": {
# Top-level group name # Top-level group name
"BatchName": script_name, "BatchName": batch_name,
# Asset dependency to wait for at least the scene file to sync. # Asset dependency to wait for at least the scene file to sync.
# "AssetDependency0": script_path, # "AssetDependency0": script_path,

View file

@ -18,6 +18,7 @@ from openpype.pipeline import (
get_representation_path, get_representation_path,
legacy_io, legacy_io,
) )
from openpype.tests.lib import is_in_tests
from openpype.pipeline.farm.patterning import match_aov_pattern from openpype.pipeline.farm.patterning import match_aov_pattern
@ -206,6 +207,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
more universal code. Muster post job is sent directly by Muster more universal code. Muster post job is sent directly by Muster
submitter, so this type of code isn't necessary for it. submitter, so this type of code isn't necessary for it.
Returns:
(str): deadline_publish_job_id
""" """
data = instance.data.copy() data = instance.data.copy()
subset = data["subset"] subset = data["subset"]
@ -239,7 +242,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"OPENPYPE_PUBLISH_JOB": "1", "OPENPYPE_PUBLISH_JOB": "1",
"OPENPYPE_RENDER_JOB": "0", "OPENPYPE_RENDER_JOB": "0",
"OPENPYPE_REMOTE_JOB": "0", "OPENPYPE_REMOTE_JOB": "0",
"OPENPYPE_LOG_NO_COLORS": "1" "OPENPYPE_LOG_NO_COLORS": "1",
"IS_TEST": str(int(is_in_tests()))
} }
# add environments from self.environ_keys # add environments from self.environ_keys
@ -269,6 +273,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"--targets", "farm" "--targets", "farm"
] ]
if is_in_tests():
args.append("--automatic-tests")
# Generate the payload for Deadline submission # Generate the payload for Deadline submission
payload = { payload = {
"JobInfo": { "JobInfo": {
@ -335,6 +342,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
if not response.ok: if not response.ok:
raise Exception(response.text) raise Exception(response.text)
deadline_publish_job_id = response.json()["_id"]
return deadline_publish_job_id
def _copy_extend_frames(self, instance, representation): def _copy_extend_frames(self, instance, representation):
"""Copy existing frames from latest version. """Copy existing frames from latest version.
@ -996,7 +1007,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
self.deadline_url = instance.data.get("deadlineUrl") self.deadline_url = instance.data.get("deadlineUrl")
assert self.deadline_url, "Requires Deadline Webservice URL" assert self.deadline_url, "Requires Deadline Webservice URL"
self._submit_deadline_post_job(instance, render_job, instances) deadline_publish_job_id = \
self._submit_deadline_post_job(instance, render_job, instances)
# publish job file # publish job file
publish_job = { publish_job = {
@ -1014,6 +1026,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"instances": instances "instances": instances
} }
if deadline_publish_job_id:
publish_job["deadline_publish_job_id"] = deadline_publish_job_id
# add audio to metadata file if available # add audio to metadata file if available
audio_file = context.data.get("audioFile") audio_file = context.data.get("audioFile")
if audio_file and os.path.isfile(audio_file): if audio_file and os.path.isfile(audio_file):

View file

@ -333,10 +333,13 @@ def inject_openpype_environment(deadlinePlugin):
"app": job.GetJobEnvironmentKeyValue("AVALON_APP_NAME"), "app": job.GetJobEnvironmentKeyValue("AVALON_APP_NAME"),
"envgroup": "farm" "envgroup": "farm"
} }
if job.GetJobEnvironmentKeyValue('IS_TEST'):
args.append("--automatic-tests")
if all(add_kwargs.values()): if all(add_kwargs.values()):
for key, value in add_kwargs.items(): for key, value in add_kwargs.items():
args.extend(["--{}".format(key), value]) args.extend(["--{}".format(key), value])
else: else:
raise RuntimeError(( raise RuntimeError((
"Missing required env vars: AVALON_PROJECT, AVALON_ASSET," "Missing required env vars: AVALON_PROJECT, AVALON_ASSET,"

View file

@ -3,6 +3,7 @@ import socket
import getpass import getpass
from openpype_modules.ftrack.lib import BaseAction from openpype_modules.ftrack.lib import BaseAction
from openpype_modules.ftrack.ftrack_server.lib import get_host_ip
class ActionWhereIRun(BaseAction): class ActionWhereIRun(BaseAction):
@ -53,8 +54,7 @@ class ActionWhereIRun(BaseAction):
try: try:
host_name = socket.gethostname() host_name = socket.gethostname()
msgs["Hostname"] = host_name msgs["Hostname"] = host_name
host_ip = socket.gethostbyname(host_name) msgs["IP"] = get_host_ip() or "N/A"
msgs["IP"] = host_ip
except Exception: except Exception:
pass pass

View file

@ -26,6 +26,7 @@ from openpype_modules.ftrack import (
) )
from openpype_modules.ftrack.lib import credentials from openpype_modules.ftrack.lib import credentials
from openpype_modules.ftrack.ftrack_server import socket_thread from openpype_modules.ftrack.ftrack_server import socket_thread
from openpype_modules.ftrack.ftrack_server.lib import get_host_ip
class MongoPermissionsError(Exception): class MongoPermissionsError(Exception):
@ -245,11 +246,13 @@ def main_loop(ftrack_url):
) )
host_name = socket.gethostname() host_name = socket.gethostname()
host_ip = get_host_ip()
main_info = [ main_info = [
["created_at", datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S")], ["created_at", datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S")],
["Username", getpass.getuser()], ["Username", getpass.getuser()],
["Host Name", host_name], ["Host Name", host_name],
["Host IP", socket.gethostbyname(host_name)], ["Host IP", host_ip or "N/A"],
["OpenPype executable", get_openpype_execute_args()[-1]], ["OpenPype executable", get_openpype_execute_args()[-1]],
["OpenPype version", get_openpype_version() or "N/A"], ["OpenPype version", get_openpype_version() or "N/A"],
["OpenPype build version", get_build_version() or "N/A"] ["OpenPype build version", get_build_version() or "N/A"]

View file

@ -9,8 +9,9 @@ import time
import queue import queue
import collections import collections
import appdirs import appdirs
import pymongo import socket
import pymongo
import requests import requests
import ftrack_api import ftrack_api
import ftrack_api.session import ftrack_api.session
@ -32,6 +33,16 @@ TOPIC_STATUS_SERVER = "openpype.event.server.status"
TOPIC_STATUS_SERVER_RESULT = "openpype.event.server.status.result" TOPIC_STATUS_SERVER_RESULT = "openpype.event.server.status.result"
def get_host_ip():
host_name = socket.gethostname()
try:
return socket.gethostbyname(host_name)
except Exception:
pass
return None
class SocketBaseEventHub(ftrack_api.event.hub.EventHub): class SocketBaseEventHub(ftrack_api.event.hub.EventHub):
hearbeat_msg = b"hearbeat" hearbeat_msg = b"hearbeat"

View file

@ -15,7 +15,8 @@ from openpype_modules.ftrack.ftrack_server.lib import (
SocketSession, SocketSession,
StatusEventHub, StatusEventHub,
TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER,
TOPIC_STATUS_SERVER_RESULT TOPIC_STATUS_SERVER_RESULT,
get_host_ip
) )
from openpype.lib import ( from openpype.lib import (
Logger, Logger,
@ -29,10 +30,10 @@ log = Logger.get_logger("Event storer")
action_identifier = ( action_identifier = (
"event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"] "event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"]
) )
host_ip = socket.gethostbyname(socket.gethostname()) host_ip = get_host_ip()
action_data = { action_data = {
"label": "OpenPype Admin", "label": "OpenPype Admin",
"variant": "- Event server Status ({})".format(host_ip), "variant": "- Event server Status ({})".format(host_ip or "IP N/A"),
"description": "Get Infromation about event server", "description": "Get Infromation about event server",
"actionIdentifier": action_identifier "actionIdentifier": action_identifier
} }

View file

@ -199,7 +199,7 @@ class FtrackTrayWrapper:
failed_count = 0 failed_count = 0
# If thread failed test Ftrack and Mongo connection # If thread failed test Ftrack and Mongo connection
elif not self.thread_socket_server.isAlive(): elif not self.thread_socket_server.is_alive():
self.thread_socket_server.join() self.thread_socket_server.join()
self.thread_socket_server = None self.thread_socket_server = None
ftrack_accessible = False ftrack_accessible = False

View file

@ -1,10 +1,12 @@
import pyblish.api import pyblish.api
from openpype.lib.profiles_filtering import filter_profiles from openpype.lib.profiles_filtering import filter_profiles
from openpype.pipeline import legacy_io from openpype.lib import attribute_definitions
from openpype.pipeline import OpenPypePyblishPluginMixin
class CollectSlackFamilies(pyblish.api.InstancePlugin): class CollectSlackFamilies(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
"""Collect family for Slack notification """Collect family for Slack notification
Expects configured profile in Expects configured profile in
@ -17,6 +19,18 @@ class CollectSlackFamilies(pyblish.api.InstancePlugin):
profiles = None profiles = None
@classmethod
def get_attribute_defs(cls):
return [
attribute_definitions.TextDef(
# Key under which it will be stored
"additional_message",
# Use plugin label as label for attribute
label="Additional Slack message",
placeholder="<Only if Slack is configured>"
)
]
def process(self, instance): def process(self, instance):
task_data = instance.data["anatomyData"].get("task", {}) task_data = instance.data["anatomyData"].get("task", {})
family = self.main_family_from_instance(instance) family = self.main_family_from_instance(instance)
@ -55,6 +69,11 @@ class CollectSlackFamilies(pyblish.api.InstancePlugin):
["token"]) ["token"])
instance.data["slack_token"] = slack_token instance.data["slack_token"] = slack_token
attribute_values = self.get_attr_values_from_data(instance.data)
additional_message = attribute_values.get("additional_message")
if additional_message:
instance.data["slack_additional_message"] = additional_message
def main_family_from_instance(self, instance): # TODO yank from integrate def main_family_from_instance(self, instance): # TODO yank from integrate
"""Returns main family of entered instance.""" """Returns main family of entered instance."""
family = instance.data.get("family") family = instance.data.get("family")

View file

@ -1,8 +1,11 @@
import os import os
import re
import six import six
import pyblish.api import pyblish.api
import copy import copy
from datetime import datetime from datetime import datetime
from abc import ABCMeta, abstractmethod
import time
from openpype.client import OpenPypeMongoConnection from openpype.client import OpenPypeMongoConnection
from openpype.lib.plugin_tools import prepare_template_data from openpype.lib.plugin_tools import prepare_template_data
@ -31,11 +34,15 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
review_path = self._get_review_path(instance) review_path = self._get_review_path(instance)
publish_files = set() publish_files = set()
message = ''
additional_message = instance.data.get("slack_additional_message")
token = instance.data["slack_token"]
if additional_message:
message = "{} \n".format(additional_message)
for message_profile in instance.data["slack_channel_message_profiles"]: for message_profile in instance.data["slack_channel_message_profiles"]:
message = self._get_filled_message(message_profile["message"], message += self._get_filled_message(message_profile["message"],
instance, instance,
review_path) review_path)
self.log.debug("message:: {}".format(message))
if not message: if not message:
return return
@ -49,18 +56,16 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
project = instance.context.data["anatomyData"]["project"]["code"] project = instance.context.data["anatomyData"]["project"]["code"]
for channel in message_profile["channels"]: for channel in message_profile["channels"]:
if six.PY2: if six.PY2:
msg_id, file_ids = \ client = SlackPython2Operations(token, self.log)
self._python2_call(instance.data["slack_token"],
channel,
message,
publish_files)
else: else:
msg_id, file_ids = \ client = SlackPython3Operations(token, self.log)
self._python3_call(instance.data["slack_token"],
channel,
message,
publish_files)
users, groups = client.get_users_and_groups()
message = self._translate_users(message, users, groups)
msg_id, file_ids = client.send_message(channel,
message,
publish_files)
if not msg_id: if not msg_id:
return return
@ -132,14 +137,14 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
fill_key = "task[{}]".format(key) fill_key = "task[{}]".format(key)
fill_pairs.append((fill_key, value)) fill_pairs.append((fill_key, value))
self.log.debug("fill_pairs ::{}".format(fill_pairs))
multiple_case_variants = prepare_template_data(fill_pairs) multiple_case_variants = prepare_template_data(fill_pairs)
fill_data.update(multiple_case_variants) fill_data.update(multiple_case_variants)
message = ''
message = None
try: try:
message = message_templ.format(**fill_data) message = self._escape_missing_keys(message_templ, fill_data).\
format(**fill_data)
except Exception: except Exception:
# shouldn't happen
self.log.warning( self.log.warning(
"Some keys are missing in {}".format(message_templ), "Some keys are missing in {}".format(message_templ),
exc_info=True) exc_info=True)
@ -162,27 +167,249 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
def _get_review_path(self, instance): def _get_review_path(self, instance):
"""Returns abs url for review if present in instance repres""" """Returns abs url for review if present in instance repres"""
published_path = None review_path = None
for repre in instance.data.get("representations", []): for repre in instance.data.get("representations", []):
tags = repre.get('tags', []) tags = repre.get('tags', [])
if (repre.get("review") if (repre.get("review")
or "review" in tags or "review" in tags
or "burnin" in tags): or "burnin" in tags):
if os.path.exists(repre["published_path"]): repre_review_path = (
published_path = repre["published_path"] repre.get("published_path") or
os.path.join(repre["stagingDir"], repre["files"])
)
if os.path.exists(repre_review_path):
review_path = repre_review_path
if "burnin" in tags: # burnin has precedence if exists if "burnin" in tags: # burnin has precedence if exists
break break
return published_path return review_path
def _python2_call(self, token, channel, message, publish_files): def _get_user_id(self, users, user_name):
from slackclient import SlackClient """Returns internal slack id for user name"""
user_id = None
user_name_lower = user_name.lower()
for user in users:
if (not user.get("deleted") and
(user_name_lower == user["name"].lower() or
# bots dont have display_name
user_name_lower == user["profile"].get("display_name",
'').lower() or
user_name_lower == user["profile"].get("real_name",
'').lower())):
user_id = user["id"]
break
return user_id
def _get_group_id(self, groups, group_name):
"""Returns internal group id for string name"""
group_id = None
for group in groups:
if (not group.get("date_delete") and
(group_name.lower() == group["name"].lower() or
group_name.lower() == group["handle"])):
group_id = group["id"]
break
return group_id
def _translate_users(self, message, users, groups):
"""Replace all occurences of @mentions with proper <@name> format."""
matches = re.findall(r"(?<!<)@[^ ]+", message)
in_quotes = re.findall(r"(?<!<)(['\"])(@[^'\"]+)", message)
for item in in_quotes:
matches.append(item[1])
if not matches:
return message
for orig_user in matches:
user_name = orig_user.replace("@", '')
slack_id = self._get_user_id(users, user_name)
mention = None
if slack_id:
mention = "<@{}>".format(slack_id)
else:
slack_id = self._get_group_id(groups, user_name)
if slack_id:
mention = "<!subteam^{}>".format(slack_id)
if mention:
message = message.replace(orig_user, mention)
return message
def _escape_missing_keys(self, message, fill_data):
"""Double escapes placeholder which are missing in 'fill_data'"""
placeholder_keys = re.findall(r"\{([^}]+)\}", message)
fill_keys = []
for key, value in fill_data.items():
fill_keys.append(key)
if isinstance(value, dict):
for child_key in value.keys():
fill_keys.append("{}[{}]".format(key, child_key))
not_matched = set(placeholder_keys) - set(fill_keys)
for not_matched_item in not_matched:
message = message.replace("{}".format(not_matched_item),
"{{{}}}".format(not_matched_item))
return message
@six.add_metaclass(ABCMeta)
class AbstractSlackOperations:
@abstractmethod
def _get_users_list(self):
"""Return response with user list, different methods Python 2 vs 3"""
raise NotImplementedError
@abstractmethod
def _get_usergroups_list(self):
"""Return response with user list, different methods Python 2 vs 3"""
raise NotImplementedError
@abstractmethod
def get_users_and_groups(self):
"""Return users and groups, different retry in Python 2 vs 3"""
raise NotImplementedError
@abstractmethod
def send_message(self, channel, message, publish_files):
"""Sends message to channel, different methods in Python 2 vs 3"""
pass
def _get_users(self):
"""Parse users.list response into list of users (dicts)"""
first = True
next_page = None
users = []
while first or next_page:
response = self._get_users_list()
first = False
next_page = response.get("response_metadata").get("next_cursor")
for user in response.get("members"):
users.append(user)
return users
def _get_groups(self):
"""Parses usergroups.list response into list of groups (dicts)"""
response = self._get_usergroups_list()
groups = []
for group in response.get("usergroups"):
groups.append(group)
return groups
def _enrich_error(self, error_str, channel):
"""Enhance known errors with more helpful notations."""
if 'not_in_channel' in error_str:
# there is no file.write.public scope, app must be explicitly in
# the channel
msg = " - application must added to channel '{}'.".format(channel)
error_str += msg + " Ask Slack admin."
return error_str
class SlackPython3Operations(AbstractSlackOperations):
def __init__(self, token, log):
from slack_sdk import WebClient
self.client = WebClient(token=token)
self.log = log
def _get_users_list(self):
return self.client.users_list()
def _get_usergroups_list(self):
return self.client.usergroups_list()
def get_users_and_groups(self):
from slack_sdk.errors import SlackApiError
while True:
try:
users = self._get_users()
groups = self._get_groups()
break
except SlackApiError as e:
retry_after = e.response.headers.get("Retry-After")
if retry_after:
print(
"Rate limit hit, sleeping for {}".format(retry_after))
time.sleep(int(retry_after))
else:
self.log.warning("Cannot pull user info, "
"mentions won't work", exc_info=True)
return [], []
return users, groups
def send_message(self, channel, message, publish_files):
from slack_sdk.errors import SlackApiError
try:
attachment_str = "\n\n Attachment links: \n"
file_ids = []
for published_file in publish_files:
response = self.client.files_upload(
file=published_file,
filename=os.path.basename(published_file))
attachment_str += "\n<{}|{}>".format(
response["file"]["permalink"],
os.path.basename(published_file))
file_ids.append(response["file"]["id"])
if publish_files:
message += attachment_str
response = self.client.chat_postMessage(
channel=channel,
text=message
)
return response.data["ts"], file_ids
except SlackApiError as e:
# # You will get a SlackApiError if "ok" is False
error_str = self._enrich_error(str(e.response["error"]), channel)
self.log.warning("Error happened {}".format(error_str))
except Exception as e:
error_str = self._enrich_error(str(e), channel)
self.log.warning("Not SlackAPI error", exc_info=True)
return None, []
class SlackPython2Operations(AbstractSlackOperations):
def __init__(self, token, log):
from slackclient import SlackClient
self.client = SlackClient(token=token)
self.log = log
def _get_users_list(self):
return self.client.api_call("users.list")
def _get_usergroups_list(self):
return self.client.api_call("usergroups.list")
def get_users_and_groups(self):
while True:
try:
users = self._get_users()
groups = self._get_groups()
break
except Exception:
self.log.warning("Cannot pull user info, "
"mentions won't work", exc_info=True)
return [], []
return users, groups
def send_message(self, channel, message, publish_files):
try: try:
client = SlackClient(token)
attachment_str = "\n\n Attachment links: \n" attachment_str = "\n\n Attachment links: \n"
file_ids = [] file_ids = []
for p_file in publish_files: for p_file in publish_files:
with open(p_file, 'rb') as pf: with open(p_file, 'rb') as pf:
response = client.api_call( response = self.client.api_call(
"files.upload", "files.upload",
file=pf, file=pf,
channel=channel, channel=channel,
@ -203,7 +430,7 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
if publish_files: if publish_files:
message += attachment_str message += attachment_str
response = client.api_call( response = self.client.api_call(
"chat.postMessage", "chat.postMessage",
channel=channel, channel=channel,
text=message text=message
@ -220,46 +447,3 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
self.log.warning("Error happened: {}".format(error_str)) self.log.warning("Error happened: {}".format(error_str))
return None, [] return None, []
def _python3_call(self, token, channel, message, publish_files):
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
try:
client = WebClient(token=token)
attachment_str = "\n\n Attachment links: \n"
file_ids = []
for published_file in publish_files:
response = client.files_upload(
file=published_file,
filename=os.path.basename(published_file))
attachment_str += "\n<{}|{}>".format(
response["file"]["permalink"],
os.path.basename(published_file))
file_ids.append(response["file"]["id"])
if publish_files:
message += attachment_str
response = client.chat_postMessage(
channel=channel,
text=message
)
return response.data["ts"], file_ids
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
error_str = self._enrich_error(str(e.response["error"]), channel)
self.log.warning("Error happened {}".format(error_str))
except Exception as e:
error_str = self._enrich_error(str(e), channel)
self.log.warning("Not SlackAPI error", exc_info=True)
return None, []
def _enrich_error(self, error_str, channel):
"""Enhance known errors with more helpful notations."""
if 'not_in_channel' in error_str:
# there is no file.write.public scope, app must be explicitly in
# the channel
msg = " - application must added to channel '{}'.".format(channel)
error_str += msg + " Ask Slack admin."
return error_str

View file

@ -165,7 +165,7 @@ class DropboxHandler(AbstractProvider):
Returns: Returns:
(boolean) (boolean)
""" """
return self.presets["enabled"] and self.dbx is not None return self.presets.get("enabled") and self.dbx is not None
@classmethod @classmethod
def get_configurable_items(cls): def get_configurable_items(cls):

View file

@ -119,7 +119,7 @@ class GDriveHandler(AbstractProvider):
Returns: Returns:
(boolean) (boolean)
""" """
return self.presets["enabled"] and self.service is not None return self.presets.get("enabled") and self.service is not None
@classmethod @classmethod
def get_system_settings_schema(cls): def get_system_settings_schema(cls):

View file

@ -169,7 +169,7 @@ def resolve_paths(module, file_path, project_name,
return local_file_path, remote_file_path return local_file_path, remote_file_path
def site_is_working(module, project_name, site_name): def _site_is_working(module, project_name, site_name, site_config):
""" """
Confirm that 'site_name' is configured correctly for 'project_name'. Confirm that 'site_name' is configured correctly for 'project_name'.
@ -179,54 +179,17 @@ def site_is_working(module, project_name, site_name):
module (SyncServerModule) module (SyncServerModule)
project_name(string): project_name(string):
site_name(string): site_name(string):
site_config (dict): configuration for site from Settings
Returns Returns
(bool) (bool)
""" """
if _get_configured_sites(module, project_name).get(site_name): provider = module.get_provider_for_site(site=site_name)
return True handler = lib.factory.get_provider(provider,
return False project_name,
site_name,
presets=site_config)
return handler.is_active()
def _get_configured_sites(module, project_name):
"""
Loops through settings and looks for configured sites and checks
its handlers for particular 'project_name'.
Args:
project_setting(dict): dictionary from Settings
only_project_name(string, optional): only interested in
particular project
Returns:
(dict of dict)
{'ProjectA': {'studio':True, 'gdrive':False}}
"""
settings = module.get_sync_project_setting(project_name)
return _get_configured_sites_from_setting(module, project_name, settings)
def _get_configured_sites_from_setting(module, project_name, project_setting):
if not project_setting.get("enabled"):
return {}
initiated_handlers = {}
configured_sites = {}
all_sites = module._get_default_site_configs()
all_sites.update(project_setting.get("sites"))
for site_name, config in all_sites.items():
provider = module.get_provider_for_site(site=site_name)
handler = initiated_handlers.get((provider, site_name))
if not handler:
handler = lib.factory.get_provider(provider,
project_name,
site_name,
presets=config)
initiated_handlers[(provider, site_name)] = \
handler
if handler.is_active():
configured_sites[site_name] = True
return configured_sites
class SyncServerThread(threading.Thread): class SyncServerThread(threading.Thread):
@ -288,7 +251,8 @@ class SyncServerThread(threading.Thread):
for project_name in enabled_projects: for project_name in enabled_projects:
preset = self.module.sync_project_settings[project_name] preset = self.module.sync_project_settings[project_name]
local_site, remote_site = self._working_sites(project_name) local_site, remote_site = self._working_sites(project_name,
preset)
if not all([local_site, remote_site]): if not all([local_site, remote_site]):
continue continue
@ -464,7 +428,7 @@ class SyncServerThread(threading.Thread):
self.timer.cancel() self.timer.cancel()
self.timer = None self.timer = None
def _working_sites(self, project_name): def _working_sites(self, project_name, sync_config):
if self.module.is_project_paused(project_name): if self.module.is_project_paused(project_name):
self.log.debug("Both sites same, skipping") self.log.debug("Both sites same, skipping")
return None, None return None, None
@ -476,9 +440,12 @@ class SyncServerThread(threading.Thread):
local_site, remote_site)) local_site, remote_site))
return None, None return None, None
configured_sites = _get_configured_sites(self.module, project_name) local_site_config = sync_config.get('sites')[local_site]
if not all([local_site in configured_sites, remote_site_config = sync_config.get('sites')[remote_site]
remote_site in configured_sites]): if not all([_site_is_working(self.module, project_name, local_site,
local_site_config),
_site_is_working(self.module, project_name, remote_site,
remote_site_config)]):
self.log.debug( self.log.debug(
"Some of the sites {} - {} is not working properly".format( "Some of the sites {} - {} is not working properly".format(
local_site, remote_site local_site, remote_site

View file

@ -0,0 +1,464 @@
from copy import deepcopy
import re
import os
import sys
import json
import platform
import contextlib
import tempfile
from openpype import PACKAGE_DIR
from openpype.settings import get_project_settings
from openpype.lib import (
StringTemplate,
run_openpype_process,
Logger
)
from openpype.pipeline import Anatomy
log = Logger.get_logger(__name__)
@contextlib.contextmanager
def _make_temp_json_file():
"""Wrapping function for json temp file
"""
try:
# Store dumped json to temporary file
temporary_json_file = tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
)
temporary_json_file.close()
temporary_json_filepath = temporary_json_file.name.replace(
"\\", "/"
)
yield temporary_json_filepath
except IOError as _error:
raise IOError(
"Not able to create temp json file: {}".format(
_error
)
)
finally:
# Remove the temporary json
os.remove(temporary_json_filepath)
def get_ocio_config_script_path():
"""Get path to ocio wrapper script
Returns:
str: path string
"""
return os.path.normpath(
os.path.join(
PACKAGE_DIR,
"scripts",
"ocio_wrapper.py"
)
)
def get_imageio_colorspace_from_filepath(
path, host_name, project_name,
config_data=None, file_rules=None,
project_settings=None,
validate=True
):
"""Get colorspace name from filepath
ImageIO Settings file rules are tested for matching rule.
Args:
path (str): path string, file rule pattern is tested on it
host_name (str): host name
project_name (str): project name
config_data (dict, optional): config path and template in dict.
Defaults to None.
file_rules (dict, optional): file rule data from settings.
Defaults to None.
project_settings (dict, optional): project settings. Defaults to None.
validate (bool, optional): should resulting colorspace be validated
with config file? Defaults to True.
Returns:
str: name of colorspace
"""
if not any([config_data, file_rules]):
project_settings = project_settings or get_project_settings(
project_name
)
config_data = get_imageio_config(
project_name, host_name, project_settings)
file_rules = get_imageio_file_rules(
project_name, host_name, project_settings)
# match file rule from path
colorspace_name = None
for _frule_name, file_rule in file_rules.items():
pattern = file_rule["pattern"]
extension = file_rule["ext"]
ext_match = re.match(
r".*(?=.{})".format(extension), path
)
file_match = re.search(
pattern, path
)
if ext_match and file_match:
colorspace_name = file_rule["colorspace"]
if not colorspace_name:
log.info("No imageio file rule matched input path: '{}'".format(
path
))
return None
# validate matching colorspace with config
if validate and config_data:
validate_imageio_colorspace_in_config(
config_data["path"], colorspace_name)
return colorspace_name
def parse_colorspace_from_filepath(
path, host_name, project_name,
config_data=None,
project_settings=None
):
"""Parse colorspace name from filepath
An input path can have colorspace name used as part of name
or as folder name.
Args:
path (str): path string
host_name (str): host name
project_name (str): project name
config_data (dict, optional): config path and template in dict.
Defaults to None.
project_settings (dict, optional): project settings. Defaults to None.
Returns:
str: name of colorspace
"""
if not config_data:
project_settings = project_settings or get_project_settings(
project_name
)
config_data = get_imageio_config(
project_name, host_name, project_settings)
config_path = config_data["path"]
# match file rule from path
colorspace_name = None
colorspaces = get_ocio_config_colorspaces(config_path)
for colorspace_key in colorspaces:
# check underscored variant of colorspace name
# since we are reformating it in integrate.py
if colorspace_key.replace(" ", "_") in path:
colorspace_name = colorspace_key
break
if colorspace_key in path:
colorspace_name = colorspace_key
break
if not colorspace_name:
log.info("No matching colorspace in config '{}' for path: '{}'".format(
config_path, path
))
return None
return colorspace_name
def validate_imageio_colorspace_in_config(config_path, colorspace_name):
"""Validator making sure colorspace name is used in config.ocio
Args:
config_path (str): path leading to config.ocio file
colorspace_name (str): tested colorspace name
Raises:
KeyError: missing colorspace name
Returns:
bool: True if exists
"""
colorspaces = get_ocio_config_colorspaces(config_path)
if colorspace_name not in colorspaces:
raise KeyError(
"Missing colorspace '{}' in config file '{}'".format(
colorspace_name, config_path)
)
return True
def get_ocio_config_colorspaces(config_path):
"""Get all colorspace data
Wrapper function for aggregating all names and its families.
Families can be used for building menu and submenus in gui.
Args:
config_path (str): path leading to config.ocio file
Returns:
dict: colorspace and family in couple
"""
if sys.version_info[0] == 2:
return get_colorspace_data_subprocess(config_path)
from ..scripts.ocio_wrapper import get_colorspace_data
return get_colorspace_data(config_path)
def get_colorspace_data_subprocess(config_path):
"""Get colorspace data via subprocess
Wrapper for Python 2 hosts.
Args:
config_path (str): path leading to config.ocio file
Returns:
dict: colorspace and family in couple
"""
with _make_temp_json_file() as tmp_json_path:
# Prepare subprocess arguments
args = [
"run", get_ocio_config_script_path(),
"config", "get_colorspace",
"--in_path", config_path,
"--out_path", tmp_json_path
]
log.info("Executing: {}".format(" ".join(args)))
process_kwargs = {
"logger": log,
"env": {}
}
run_openpype_process(*args, **process_kwargs)
# return all colorspaces
return_json_data = open(tmp_json_path).read()
return json.loads(return_json_data)
def get_ocio_config_views(config_path):
"""Get all viewer data
Wrapper function for aggregating all display and related viewers.
Key can be used for building gui menu with submenus.
Args:
config_path (str): path leading to config.ocio file
Returns:
dict: `display/viewer` and viewer data
"""
if sys.version_info[0] == 2:
return get_views_data_subprocess(config_path)
from ..scripts.ocio_wrapper import get_views_data
return get_views_data(config_path)
def get_views_data_subprocess(config_path):
"""Get viewers data via subprocess
Wrapper for Python 2 hosts.
Args:
config_path (str): path leading to config.ocio file
Returns:
dict: `display/viewer` and viewer data
"""
with _make_temp_json_file() as tmp_json_path:
# Prepare subprocess arguments
args = [
"run", get_ocio_config_script_path(),
"config", "get_views",
"--in_path", config_path,
"--out_path", tmp_json_path
]
log.info("Executing: {}".format(" ".join(args)))
process_kwargs = {
"logger": log,
"env": {}
}
run_openpype_process(*args, **process_kwargs)
# return all colorspaces
return_json_data = open(tmp_json_path).read()
return json.loads(return_json_data)
def get_imageio_config(
project_name, host_name,
project_settings=None,
anatomy_data=None,
anatomy=None
):
"""Returns config data from settings
Config path is formatted in `path` key
and original settings input is saved into `template` key.
Args:
project_name (str): project name
host_name (str): host name
project_settings (dict, optional): project settings.
Defaults to None.
anatomy_data (dict, optional): anatomy formatting data.
Defaults to None.
anatomy (lib.Anatomy, optional): Anatomy object.
Defaults to None.
Returns:
dict or bool: config path data or None
"""
project_settings = project_settings or get_project_settings(project_name)
anatomy = anatomy or Anatomy(project_name)
if not anatomy_data:
from openpype.pipeline.context_tools import (
get_template_data_from_session)
anatomy_data = get_template_data_from_session()
# add project roots to anatomy data
anatomy_data["root"] = anatomy.roots
anatomy_data["platform"] = platform.system().lower()
# get colorspace settings
imageio_global, imageio_host = _get_imageio_settings(
project_settings, host_name)
config_host = imageio_host["ocio_config"]
if config_host["enabled"]:
print(config_host["filepath"])
config_data = _get_config_data(
config_host["filepath"], anatomy_data
)
else:
config_data = None
if not config_data:
# get config path from either global or host_name
config_global = imageio_global["ocio_config"]
print(config_global["filepath"])
config_data = _get_config_data(
config_global["filepath"], anatomy_data
)
if not config_data:
raise FileExistsError(
"No OCIO config found in settings. It is "
"either missing or there is typo in path inputs"
)
return config_data
def _get_config_data(path_list, anatomy_data):
"""Path formating in interation
Args:
path_list (list[str]): list of abs paths
anatomy_data (dict): formating data
Returns:
dict: config data
"""
# first try host config paths
for path_ in path_list:
formated_path = _format_path(path_, anatomy_data)
if not os.path.exists(formated_path):
continue
return {
"path": os.path.normpath(formated_path),
"template": path_
}
def _format_path(tempate_path, anatomy_data):
"""Single template path formating.
Args:
tempate_path (str): template string
anatomy_data (dict): formating data
Returns:
str: absolute formated path
"""
formatting_data = deepcopy(anatomy_data)
# format the path for potential env vars
formatting_data.update(dict(**os.environ))
# format path for anatomy keys
formatted_path = StringTemplate(tempate_path).format(
formatting_data)
return os.path.abspath(formatted_path)
def get_imageio_file_rules(project_name, host_name, project_settings=None):
"""Get ImageIO File rules from project settings
Args:
project_name (str): project name
host_name (str): host name
project_settings (dict, optional): project settings.
Defaults to None.
Returns:
dict: file rules data
"""
project_settings = project_settings or get_project_settings(project_name)
imageio_global, imageio_host = _get_imageio_settings(
project_settings, host_name)
# get file rules from global and host_name
frules_global = imageio_global["file_rules"]
frules_host = imageio_host["file_rules"]
# compile file rules dictionary
file_rules = {}
if frules_global["enabled"]:
file_rules.update(frules_global["rules"])
if frules_host["enabled"]:
file_rules.update(frules_host["rules"])
return file_rules
def _get_imageio_settings(project_settings, host_name):
"""Get ImageIO settings for global and host
Args:
project_settings (dict): project settings.
Defaults to None.
host_name (str): host name
Returns:
tuple[dict, dict]: image io settings for global and host
"""
# get image io from global and host_name
imageio_global = project_settings["global"]["imageio"]
imageio_host = project_settings[host_name]["imageio"]
return imageio_global, imageio_host

View file

@ -608,7 +608,7 @@ def discover_legacy_creator_plugins():
plugin.apply_settings(project_settings, system_settings) plugin.apply_settings(project_settings, system_settings)
except Exception: except Exception:
log.warning( log.warning(
"Failed to apply settings to loader {}".format( "Failed to apply settings to creator {}".format(
plugin.__name__ plugin.__name__
), ),
exc_info=True exc_info=True

View file

@ -19,6 +19,7 @@ from .publish_plugins import (
RepairContextAction, RepairContextAction,
Extractor, Extractor,
ExtractorColormanaged,
) )
from .lib import ( from .lib import (
@ -63,6 +64,7 @@ __all__ = (
"RepairContextAction", "RepairContextAction",
"Extractor", "Extractor",
"ExtractorColormanaged",
"get_publish_template_name", "get_publish_template_name",

View file

@ -1,6 +1,6 @@
import inspect import inspect
from abc import ABCMeta from abc import ABCMeta
from pprint import pformat
import pyblish.api import pyblish.api
from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin
@ -13,6 +13,12 @@ from .lib import (
get_instance_staging_dir, get_instance_staging_dir,
) )
from openpype.pipeline.colorspace import (
get_imageio_colorspace_from_filepath,
get_imageio_config,
get_imageio_file_rules
)
class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin): class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin):
pass pass
@ -250,12 +256,12 @@ class RepairContextAction(pyblish.api.Action):
if not hasattr(plugin, "repair"): if not hasattr(plugin, "repair"):
raise RuntimeError("Plug-in does not have repair method.") raise RuntimeError("Plug-in does not have repair method.")
# Get the errored instances # Get the failed instances
self.log.info("Finding failed instances..") self.log.info("Finding failed instances..")
errored_plugins = get_errored_plugins_from_context(context) failed_plugins = get_errored_plugins_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in # Apply pyblish.logic to get the instances for the plug-in
if plugin in errored_plugins: if plugin in failed_plugins:
self.log.info("Attempting fix ...") self.log.info("Attempting fix ...")
plugin.repair(context) plugin.repair(context)
@ -280,3 +286,143 @@ class Extractor(pyblish.api.InstancePlugin):
""" """
return get_instance_staging_dir(instance) return get_instance_staging_dir(instance)
class ExtractorColormanaged(Extractor):
"""Extractor base for color managed image data.
Each Extractor intended to export pixel data representation
should inherit from this class to allow color managed data.
Class implements "get_colorspace_settings" and
"set_representation_colorspace" functions used
for injecting colorspace data to representation data for farther
integration into db document.
"""
allowed_ext = [
"cin", "dpx", "avi", "dv", "gif", "flv", "mkv", "mov", "mpg", "mpeg",
"mp4", "m4v", "mxf", "iff", "z", "ifl", "jpeg", "jpg", "jfif", "lut",
"1dl", "exr", "pic", "png", "ppm", "pnm", "pgm", "pbm", "rla", "rpf",
"sgi", "rgba", "rgb", "bw", "tga", "tiff", "tif", "img"
]
@staticmethod
def get_colorspace_settings(context):
"""Retuns solved settings for the host context.
Args:
context (publish.Context): publishing context
Returns:
tuple | bool: config, file rules or None
"""
if "imageioSettings" in context.data:
return context.data["imageioSettings"]
project_name = context.data["projectName"]
host_name = context.data["hostName"]
anatomy_data = context.data["anatomyData"]
project_settings_ = context.data["project_settings"]
config_data = get_imageio_config(
project_name, host_name,
project_settings=project_settings_,
anatomy_data=anatomy_data
)
file_rules = get_imageio_file_rules(
project_name, host_name,
project_settings=project_settings_
)
# caching settings for future instance processing
context.data["imageioSettings"] = (config_data, file_rules)
return config_data, file_rules
def set_representation_colorspace(
self, representation, context,
colorspace=None,
colorspace_settings=None
):
"""Sets colorspace data to representation.
Args:
representation (dict): publishing representation
context (publish.Context): publishing context
config_data (dict): host resolved config data
file_rules (dict): host resolved file rules data
colorspace (str, optional): colorspace name. Defaults to None.
colorspace_settings (tuple[dict, dict], optional):
Settings for config_data and file_rules.
Defaults to None.
Example:
```
{
# for other publish plugins and loaders
"colorspace": "linear",
"configData": {
# for future references in case need
"path": "/abs/path/to/config.ocio",
# for other plugins within remote publish cases
"template": "{project[root]}/path/to/config.ocio"
}
}
```
"""
if colorspace_settings is None:
colorspace_settings = self.get_colorspace_settings(context)
# unpack colorspace settings
config_data, file_rules = colorspace_settings
if not config_data:
# warn in case no colorspace path was defined
self.log.warning("No colorspace management was defined")
return
self.log.info("Config data is : `{}`".format(
config_data))
ext = representation["ext"]
# check extension
self.log.debug("__ ext: `{}`".format(ext))
if ext.lower() not in self.allowed_ext:
return
project_name = context.data["projectName"]
host_name = context.data["hostName"]
project_settings = context.data["project_settings"]
# get one filename
filename = representation["files"]
if isinstance(filename, list):
filename = filename.pop()
self.log.debug("__ filename: `{}`".format(
filename))
# get matching colorspace from rules
colorspace = colorspace or get_imageio_colorspace_from_filepath(
filename, host_name, project_name,
config_data=config_data,
file_rules=file_rules,
project_settings=project_settings
)
self.log.debug("__ colorspace: `{}`".format(
colorspace))
# infuse data to representation
if colorspace:
colorspace_data = {
"colorspace": colorspace,
"configData": config_data
}
# update data key
representation["colorspaceData"] = colorspace_data
self.log.debug("__ colorspace_data: `{}`".format(
pformat(colorspace_data)))

View file

@ -42,7 +42,9 @@ from openpype.pipeline.load import (
get_contexts_for_repre_docs, get_contexts_for_repre_docs,
load_with_repre_context, load_with_repre_context,
) )
from openpype.pipeline.create import get_legacy_creator_by_name from openpype.pipeline.create import (
discover_legacy_creator_plugins
)
class TemplateNotFound(Exception): class TemplateNotFound(Exception):
@ -235,7 +237,14 @@ class AbstractTemplateBuilder(object):
def get_creators_by_name(self): def get_creators_by_name(self):
if self._creators_by_name is None: if self._creators_by_name is None:
self._creators_by_name = get_legacy_creator_by_name() self._creators_by_name = {}
for creator in discover_legacy_creator_plugins():
creator_name = creator.__name__
if creator_name in self._creators_by_name:
raise KeyError(
"Duplicated creator name {} !".format(creator_name)
)
self._creators_by_name[creator_name] = creator
return self._creators_by_name return self._creators_by_name
def get_shared_data(self, key): def get_shared_data(self, key):
@ -401,7 +410,12 @@ class AbstractTemplateBuilder(object):
key=lambda i: i.order key=lambda i: i.order
)) ))
def build_template(self, template_path=None, level_limit=None): def build_template(
self,
template_path=None,
level_limit=None,
keep_placeholders=None
):
"""Main callback for building workfile from template path. """Main callback for building workfile from template path.
Todo: Todo:
@ -410,16 +424,25 @@ class AbstractTemplateBuilder(object):
Args: Args:
template_path (str): Path to a template file with placeholders. template_path (str): Path to a template file with placeholders.
Template from settings 'get_template_path' used when not Template from settings 'get_template_preset' used when not
passed. passed.
level_limit (int): Limit of populate loops. Related to level_limit (int): Limit of populate loops. Related to
'populate_scene_placeholders' method. 'populate_scene_placeholders' method.
keep_placeholders (bool): Add flag to placeholder data for
hosts to decide if they want to remove
placeholder after it is used.
""" """
template_preset = self.get_template_preset()
if template_path is None: if template_path is None:
template_path = self.get_template_path() template_path = template_preset["path"]
if keep_placeholders is None:
keep_placeholders = template_preset["keep_placeholder"]
self.import_template(template_path) self.import_template(template_path)
self.populate_scene_placeholders(level_limit) self.populate_scene_placeholders(
level_limit, keep_placeholders)
def rebuild_template(self): def rebuild_template(self):
"""Go through existing placeholders in scene and update them. """Go through existing placeholders in scene and update them.
@ -489,7 +512,9 @@ class AbstractTemplateBuilder(object):
plugin = plugins_by_identifier[identifier] plugin = plugins_by_identifier[identifier]
plugin.prepare_placeholders(placeholders) plugin.prepare_placeholders(placeholders)
def populate_scene_placeholders(self, level_limit=None): def populate_scene_placeholders(
self, level_limit=None, keep_placeholders=None
):
"""Find placeholders in scene using plugins and process them. """Find placeholders in scene using plugins and process them.
This should happen after 'import_template'. This should happen after 'import_template'.
@ -505,6 +530,9 @@ class AbstractTemplateBuilder(object):
Args: Args:
level_limit (int): Level of loops that can happen. Default is 1000. level_limit (int): Level of loops that can happen. Default is 1000.
keep_placeholders (bool): Add flag to placeholder data for
hosts to decide if they want to remove
placeholder after it is used.
""" """
if not self.placeholder_plugins: if not self.placeholder_plugins:
@ -541,6 +569,11 @@ class AbstractTemplateBuilder(object):
" is already in progress." " is already in progress."
)) ))
continue continue
# add flag for keeping placeholders in scene
# after they are processed
placeholder.data["keep_placeholder"] = keep_placeholders
filtered_placeholders.append(placeholder) filtered_placeholders.append(placeholder)
self._prepare_placeholders(filtered_placeholders) self._prepare_placeholders(filtered_placeholders)
@ -599,8 +632,8 @@ class AbstractTemplateBuilder(object):
["profiles"] ["profiles"]
) )
def get_template_path(self): def get_template_preset(self):
"""Unified way how template path is received usign settings. """Unified way how template preset is received usign settings.
Method is dependent on '_get_build_profiles' which should return filter Method is dependent on '_get_build_profiles' which should return filter
profiles to resolve path to a template. Default implementation looks profiles to resolve path to a template. Default implementation looks
@ -637,6 +670,13 @@ class AbstractTemplateBuilder(object):
).format(task_name, task_type, host_name)) ).format(task_name, task_type, host_name))
path = profile["path"] path = profile["path"]
# switch to remove placeholders after they are used
keep_placeholder = profile.get("keep_placeholder")
# backward compatibility, since default is True
if keep_placeholder is None:
keep_placeholder = True
if not path: if not path:
raise TemplateLoadFailed(( raise TemplateLoadFailed((
"Template path is not set.\n" "Template path is not set.\n"
@ -650,14 +690,24 @@ class AbstractTemplateBuilder(object):
key: value key: value
for key, value in os.environ.items() for key, value in os.environ.items()
} }
fill_data["root"] = anatomy.roots fill_data["root"] = anatomy.roots
fill_data["project"] = {
"name": project_name,
"code": anatomy["attributes"]["code"]
}
result = StringTemplate.format_template(path, fill_data) result = StringTemplate.format_template(path, fill_data)
if result.solved: if result.solved:
path = result.normalized() path = result.normalized()
if path and os.path.exists(path): if path and os.path.exists(path):
self.log.info("Found template at: '{}'".format(path)) self.log.info("Found template at: '{}'".format(path))
return path return {
"path": path,
"keep_placeholder": keep_placeholder
}
solved_path = None solved_path = None
while True: while True:
@ -683,7 +733,10 @@ class AbstractTemplateBuilder(object):
self.log.info("Found template at: '{}'".format(solved_path)) self.log.info("Found template at: '{}'".format(solved_path))
return solved_path return {
"path": solved_path,
"keep_placeholder": keep_placeholder
}
@six.add_metaclass(ABCMeta) @six.add_metaclass(ABCMeta)
@ -1002,7 +1055,13 @@ class PlaceholderItem(object):
return self._log return self._log
def __repr__(self): def __repr__(self):
return "< {} {} >".format(self.__class__.__name__, self.name) name = None
if hasattr("name", self):
name = self.name
if hasattr("_scene_identifier ", self):
name = self._scene_identifier
return "< {} {} >".format(self.__class__.__name__, name)
@property @property
def order(self): def order(self):
@ -1426,6 +1485,173 @@ class PlaceholderLoadMixin(object):
pass pass
class PlaceholderCreateMixin(object):
"""Mixin prepared for creating placeholder plugins.
Implementation prepares options for placeholders with
'get_create_plugin_options'.
For placeholder population is implemented 'populate_create_placeholder'.
PlaceholderItem can have implemented methods:
- 'create_failed' - called when creating of an instance failed
- 'create_succeed' - called when creating of an instance succeeded
"""
def get_create_plugin_options(self, options=None):
"""Unified attribute definitions for create placeholder.
Common function for placeholder plugins used for creating of
publishable instances. Use it with 'get_placeholder_options'.
Args:
plugin (PlaceholderPlugin): Plugin used for creating of
publish instances.
options (Dict[str, Any]): Already available options which are used
as defaults for attributes.
Returns:
List[AbtractAttrDef]: Attribute definitions common for create
plugins.
"""
creators_by_name = self.builder.get_creators_by_name()
creator_items = [
(creator_name, creator.label or creator_name)
for creator_name, creator in creators_by_name.items()
]
creator_items.sort(key=lambda i: i[1])
options = options or {}
return [
attribute_definitions.UISeparatorDef(),
attribute_definitions.UILabelDef("Main attributes"),
attribute_definitions.UISeparatorDef(),
attribute_definitions.EnumDef(
"creator",
label="Creator",
default=options.get("creator"),
items=creator_items,
tooltip=(
"Creator"
"\nDefines what OpenPype creator will be used to"
" create publishable instance."
"\nUseable creator depends on current host's creator list."
"\nField is case sensitive."
)
),
attribute_definitions.TextDef(
"create_variant",
label="Variant",
default=options.get("create_variant"),
placeholder='Main',
tooltip=(
"Creator"
"\nDefines variant name which will be use for "
"\ncompiling of subset name."
)
),
attribute_definitions.UISeparatorDef(),
attribute_definitions.NumberDef(
"order",
label="Order",
default=options.get("order") or 0,
decimals=0,
minimum=0,
maximum=999,
tooltip=(
"Order"
"\nOrder defines creating instance priority (0 to 999)"
"\nPriority rule is : \"lowest is first to load\"."
)
)
]
def populate_create_placeholder(self, placeholder):
"""Create placeholder is going to create matching publishabe instance.
Args:
placeholder (PlaceholderItem): Placeholder item with information
about requested publishable instance.
"""
creator_name = placeholder.data["creator"]
create_variant = placeholder.data["create_variant"]
creator_plugin = self.builder.get_creators_by_name()[creator_name]
# create subset name
project_name = legacy_io.Session["AVALON_PROJECT"]
task_name = legacy_io.Session["AVALON_TASK"]
asset_name = legacy_io.Session["AVALON_ASSET"]
# get asset id
asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"])
assert asset_doc, "No current asset found in Session"
asset_id = asset_doc['_id']
subset_name = creator_plugin.get_subset_name(
create_variant,
task_name,
asset_id,
project_name
)
creator_data = {
"creator_name": creator_name,
"create_variant": create_variant,
"subset_name": subset_name,
"creator_plugin": creator_plugin
}
self._before_instance_create(placeholder)
# compile subset name from variant
try:
creator_instance = creator_plugin(
subset_name,
asset_name
).process()
except Exception:
failed = True
self.create_failed(placeholder, creator_data)
else:
failed = False
self.create_succeed(placeholder, creator_instance)
self.cleanup_placeholder(placeholder, failed)
def create_failed(self, placeholder, creator_data):
if hasattr(placeholder, "create_failed"):
placeholder.create_failed(creator_data)
def create_succeed(self, placeholder, creator_instance):
if hasattr(placeholder, "create_succeed"):
placeholder.create_succeed(creator_instance)
def cleanup_placeholder(self, placeholder, failed):
"""Cleanup placeholder after load of single representation.
Can be called multiple times during placeholder item populating and is
called even if loading failed.
Args:
placeholder (PlaceholderItem): Item which was just used to load
representation.
failed (bool): Loading of representation failed.
"""
pass
def _before_instance_create(self, placeholder):
"""Can be overriden. Is called before instance is created."""
pass
class LoadPlaceholderItem(PlaceholderItem): class LoadPlaceholderItem(PlaceholderItem):
"""PlaceholderItem for plugin which is loading representations. """PlaceholderItem for plugin which is loading representations.
@ -1449,3 +1675,28 @@ class LoadPlaceholderItem(PlaceholderItem):
def load_failed(self, representation): def load_failed(self, representation):
self._failed_representations.append(representation) self._failed_representations.append(representation)
class CreatePlaceholderItem(PlaceholderItem):
"""PlaceholderItem for plugin which is creating publish instance.
Connected to 'PlaceholderCreateMixin'.
"""
def __init__(self, *args, **kwargs):
super(CreatePlaceholderItem, self).__init__(*args, **kwargs)
self._failed_created_publish_instances = []
def get_errors(self):
if not self._failed_representations:
return []
message = (
"Failed to create {} instance using Creator {}"
).format(
len(self._failed_created_publish_instances),
self.data["creator"]
)
return [message]
def create_failed(self, creator_data):
self._failed_created_publish_instances.append(creator_data)

View file

@ -5,6 +5,8 @@ import shutil
import pyblish.api import pyblish.api
import re import re
from openpype.tests.lib import is_in_tests
class CleanUp(pyblish.api.InstancePlugin): class CleanUp(pyblish.api.InstancePlugin):
"""Cleans up the staging directory after a successful publish. """Cleans up the staging directory after a successful publish.
@ -44,6 +46,9 @@ class CleanUp(pyblish.api.InstancePlugin):
def process(self, instance): def process(self, instance):
"""Plugin entry point.""" """Plugin entry point."""
if is_in_tests():
# let automatic test process clean up temporary data
return
# Get the errored instances # Get the errored instances
failed = [] failed = []
for result in instance.context.data["results"]: for result in instance.context.data["results"]:

View file

@ -4,7 +4,9 @@
import os import os
import pyblish.api import pyblish.api
from openpype.pipeline import legacy_io from openpype.host import IPublishHost
from openpype.pipeline import legacy_io, registered_host
from openpype.pipeline.create import CreateContext
class CollectFromCreateContext(pyblish.api.ContextPlugin): class CollectFromCreateContext(pyblish.api.ContextPlugin):
@ -15,7 +17,11 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
def process(self, context): def process(self, context):
create_context = context.data.pop("create_context", None) create_context = context.data.pop("create_context", None)
# Skip if create context is not available if not create_context:
host = registered_host()
if isinstance(host, IPublishHost):
create_context = CreateContext(host)
if not create_context: if not create_context:
return return
@ -31,6 +37,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
context.data["projectName"] = project_name context.data["projectName"] = project_name
for created_instance in create_context.instances: for created_instance in create_context.instances:
self.log.info(f"created_instance:: {created_instance}")
instance_data = created_instance.data_to_store() instance_data = created_instance.data_to_store()
if instance_data["active"]: if instance_data["active"]:
thumbnail_path = thumbnail_paths_by_instance_id.get( thumbnail_path = thumbnail_paths_by_instance_id.get(

Some files were not shown because too many files have changed in this diff Show more